mirror of https://github.com/rocboss/paopao-ce
parent
d1d65144ef
commit
220d1f8732
@ -0,0 +1,37 @@
|
||||
# syntax=docker/dockerfile:experimental
|
||||
|
||||
# build frontend
|
||||
FROM node:19-alpine as frontend
|
||||
ARG API_HOST
|
||||
ARG USE_API_HOST=yes
|
||||
ARG EMBED_UI=yes
|
||||
ARG USE_DIST=no
|
||||
WORKDIR /web
|
||||
COPY web/ ./
|
||||
RUN [ $EMBED_UI != yes ] || [ $USE_API_HOST != yes ] || echo "VITE_HOST='$API_HOST'">.env.local
|
||||
RUN [ $EMBED_UI != yes ] || [ $USE_DIST != no ] || (yarn && yarn build)
|
||||
RUN [ $EMBED_UI = yes ] || mkdir dist || echo ""
|
||||
|
||||
# build backend
|
||||
FROM bitbus/paopao-ce-backend-builder:latest AS backend
|
||||
ARG API_HOST
|
||||
ARG USE_API_HOST=yes
|
||||
ARG EMBED_UI=yes
|
||||
ARG USE_DIST=no
|
||||
|
||||
WORKDIR /paopao-ce
|
||||
COPY . .
|
||||
COPY --from=frontend /web/dist ./web/dist
|
||||
ENV GOPROXY=https://goproxy.cn
|
||||
RUN [ $EMBED_UI != yes ] || make build TAGS='go_json'
|
||||
RUN [ $EMBED_UI = yes ] || make build TAGS='slim embed go_json'
|
||||
|
||||
FROM bitbus/paopao-ce-allinone-runner:latest
|
||||
ARG API_HOST
|
||||
ARG USE_API_HOST=yes
|
||||
ARG EMBED_UI=yes
|
||||
ARG USE_DIST=no
|
||||
ENV TZ=Asia/Shanghai
|
||||
|
||||
WORKDIR /app
|
||||
COPY --from=backend /paopao-ce/release/paopao .
|
@ -0,0 +1,3 @@
|
||||
# Docker for paopao-ce
|
||||
|
||||
TODO;
|
@ -0,0 +1,14 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -xe
|
||||
|
||||
# Create paopao user for paopao-ce
|
||||
addgroup -S paopao
|
||||
adduser -G paopao -H -D -g 'paopao User' paopao -h /data/paopao -s /bin/bash && usermod -p '*' paopao && passwd -u paopao
|
||||
echo "export PAOPAO_CUSTOM=${PAOPAO_CUSTOM}" >> /etc/profile
|
||||
|
||||
# Final cleaning
|
||||
mv /app/docker/paopao-ce_config.yaml /app/config.yaml
|
||||
rm -rf /app/docker/build
|
||||
rm /app/docker/nsswitch.conf
|
||||
rm /app/docker/README.md
|
@ -0,0 +1,16 @@
|
||||
# /etc/nsswitch.conf
|
||||
|
||||
passwd: compat
|
||||
group: compat
|
||||
shadow: compat
|
||||
|
||||
hosts: files dns
|
||||
networks: files
|
||||
|
||||
protocols: db files
|
||||
services: db files
|
||||
ethers: db files
|
||||
rpc: db files
|
||||
|
||||
netgroup: nis
|
||||
|
@ -0,0 +1,144 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
BACKUP_PATH="/backup"
|
||||
|
||||
# Make sure that required directories exist
|
||||
mkdir -p "${BACKUP_PATH}"
|
||||
mkdir -p "/etc/crontabs"
|
||||
chown git:git /backup
|
||||
chmod 2770 /backup
|
||||
|
||||
# [string] BACKUP_INTERVAL Period expression
|
||||
# [string] BACKUP_RETENTION Period expression
|
||||
if [ -z "${BACKUP_INTERVAL}" ]; then
|
||||
echo "Backup disabled: BACKUP_INTERVAL has not been found" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${BACKUP_RETENTION}" ]; then
|
||||
echo "Backup retention period is not defined, default to 7 days" 1>&2
|
||||
BACKUP_RETENTION='7d'
|
||||
fi
|
||||
|
||||
# Parse BACKUP_INTERVAL environment variable and generate appropriate cron expression. Backup cron task will be run as scheduled.
|
||||
# Expected format: nu (n - number, u - unit) (eg. 3d means 3 days)
|
||||
# Supported units: h - hours, d - days, M - months
|
||||
parse_generate_cron_expression() {
|
||||
CRON_EXPR_MINUTES="*"
|
||||
CRON_EXPR_HOURS="*"
|
||||
CRON_EXPR_DAYS="*"
|
||||
CRON_EXPR_MONTHS="*"
|
||||
|
||||
# shellcheck disable=SC2001
|
||||
TIME_INTERVAL=$(echo "${BACKUP_INTERVAL}" | sed -e 's/[hdM]$//')
|
||||
# shellcheck disable=SC2001
|
||||
TIME_UNIT=$(echo "${BACKUP_INTERVAL}" | sed -e 's/^[0-9]\+//')
|
||||
|
||||
if [ "${TIME_UNIT}" = "h" ]; then
|
||||
if [ ! "${TIME_INTERVAL}" -le 23 ]; then
|
||||
echo "Parse error: Time unit 'h' (hour) cannot be greater than 23" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CRON_EXPR_MINUTES=0
|
||||
CRON_EXPR_HOURS="*/${TIME_INTERVAL}"
|
||||
elif [ "${TIME_UNIT}" = "d" ]; then
|
||||
if [ ! "${TIME_INTERVAL}" -le 30 ]; then
|
||||
echo "Parse error: Time unit 'd' (day) cannot be greater than 30" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CRON_EXPR_MINUTES=0
|
||||
CRON_EXPR_HOURS=0
|
||||
CRON_EXPR_DAYS="*/${TIME_INTERVAL}"
|
||||
elif [ "${TIME_UNIT}" = "M" ]; then
|
||||
if [ ! "${TIME_INTERVAL}" -le 12 ]; then
|
||||
echo "Parse error: Time unit 'M' (month) cannot be greater than 12" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CRON_EXPR_MINUTES=0
|
||||
CRON_EXPR_HOURS=0
|
||||
CRON_EXPR_DAYS="1"
|
||||
CRON_EXPR_MONTHS="*/${TIME_INTERVAL}"
|
||||
else
|
||||
echo "Parse error: BACKUP_INTERVAL expression is invalid" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "${CRON_EXPR_MINUTES} ${CRON_EXPR_HOURS} ${CRON_EXPR_DAYS} ${CRON_EXPR_MONTHS} *"
|
||||
}
|
||||
|
||||
# Parse BACKUP_RETENTION environment variable and generate appropriate find command expression.
|
||||
# Expected format: nu (n - number, u - unit) (eg. 3d means 3 days)
|
||||
# Supported units: m - minutes, d - days
|
||||
parse_generate_retention_expression() {
|
||||
FIND_TIME_EXPR='mtime'
|
||||
|
||||
# shellcheck disable=SC2001
|
||||
TIME_INTERVAL=$(echo "${BACKUP_RETENTION}" | sed -e 's/[mhdM]$//')
|
||||
# shellcheck disable=SC2001
|
||||
TIME_UNIT=$(echo "${BACKUP_RETENTION}" | sed -e 's/^[0-9]\+//')
|
||||
|
||||
if [ "${TIME_UNIT}" = "m" ]; then
|
||||
if [ "${TIME_INTERVAL}" -le 59 ]; then
|
||||
echo "Warning: Minimal retention is 60m. Value set to 60m" 1>&2
|
||||
TIME_INTERVAL=60
|
||||
fi
|
||||
|
||||
FIND_TIME_EXPR="mmin"
|
||||
elif [ "${TIME_UNIT}" = "h" ]; then
|
||||
echo "Error: Unsupported expression - Try: eg. 120m for 2 hours." 1>&2
|
||||
exit 1
|
||||
elif [ "${TIME_UNIT}" = "d" ]; then
|
||||
FIND_TIME_EXPR="mtime"
|
||||
elif [ "${TIME_UNIT}" = "M" ]; then
|
||||
echo "Error: Unsupported expression - Try: eg. 60d for 2 months." 1>&2
|
||||
exit 1
|
||||
else
|
||||
echo "Parse error: BACKUP_RETENTION expression is invalid" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "${FIND_TIME_EXPR} +${TIME_INTERVAL:-7}"
|
||||
}
|
||||
|
||||
add_backup_cronjob() {
|
||||
CRONTAB_USER="${1:-git}"
|
||||
CRONTAB_FILE="/etc/crontabs/${CRONTAB_USER}"
|
||||
CRONJOB_EXPRESSION="${2:-}"
|
||||
CRONJOB_EXECUTOR="${3:-}"
|
||||
CRONJOB_EXECUTOR_ARGUMENTS="${4:-}"
|
||||
CRONJOB_TASK="${CRONJOB_EXPRESSION} /bin/sh ${CRONJOB_EXECUTOR} ${CRONJOB_EXECUTOR_ARGUMENTS}"
|
||||
|
||||
if [ -f "${CRONTAB_FILE}" ]; then
|
||||
CRONJOB_EXECUTOR_COUNT=$(grep -c "${CRONJOB_EXECUTOR}" "${CRONTAB_FILE}" || exit 0)
|
||||
if [ "${CRONJOB_EXECUTOR_COUNT}" != "0" ]; then
|
||||
echo "Cron job already exists for ${CRONJOB_EXECUTOR}. Updating existing." 1>&2
|
||||
CRONJOB_TASK=$(echo "{CRONJOB_TASK}" | sed 's/\//\\\//g' )
|
||||
CRONJOB_EXECUTOR=$(echo "{CRONJOB_EXECUTOR}" | sed 's/\//\\\//g' )
|
||||
sed -i "/${CRONJOB_EXECUTOR}/c\\${CRONJOB_TASK}" "${CRONTAB_FILE}"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Finally append new line with cron task expression
|
||||
echo "${CRONJOB_TASK}" >>"${CRONTAB_FILE}"
|
||||
}
|
||||
|
||||
CRONTAB_USER=$(awk -v val="${PUID}" -F ":" '$3==val{print $1}' /etc/passwd)
|
||||
|
||||
# Up to this point, it was desirable that interpreter handles the command errors and halts execution upon any error.
|
||||
# From now, we handle the errors our self.
|
||||
set +e
|
||||
RETENTION_EXPRESSION="$(parse_generate_retention_expression)"
|
||||
|
||||
if [ -z "${RETENTION_EXPRESSION}" ]; then
|
||||
echo "Couldn't generate backup retention expression. Aborting backup setup" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Backup rotator cron will run every 5 minutes
|
||||
add_backup_cronjob "${CRONTAB_USER}" "*/5 * * * *" "/app/gogs/docker/runtime/backup-rotator.sh" "'${BACKUP_PATH}' '${RETENTION_EXPRESSION}'"
|
||||
add_backup_cronjob "${CRONTAB_USER}" "$(parse_generate_cron_expression)" "/app/gogs/docker/runtime/backup-job.sh" "'${BACKUP_PATH}'"
|
@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
execute_backup_job() {
|
||||
BACKUP_ARG_PATH="${1:-}"
|
||||
BACKUP_ARG_CONFIG="${BACKUP_ARG_CONFIG:-}"
|
||||
BACKUP_ARG_EXCLUDE_REPOS="${BACKUP_ARG_EXCLUDE_REPOS:-}"
|
||||
BACKUP_EXTRA_ARGS="${BACKUP_EXTRA_ARGS:-}"
|
||||
cd "/app/gogs" || exit 1
|
||||
|
||||
BACKUP_ARGS="--target=${BACKUP_ARG_PATH}"
|
||||
|
||||
if [ -n "${BACKUP_ARG_CONFIG}" ]; then
|
||||
BACKUP_ARGS="${BACKUP_ARGS} --config=${BACKUP_ARG_CONFIG}"
|
||||
fi
|
||||
|
||||
if [ -n "${BACKUP_ARG_EXCLUDE_REPOS}" ]; then
|
||||
BACKUP_ARGS="${BACKUP_ARGS} --exclude-repos=${BACKUP_ARG_EXCLUDE_REPOS}"
|
||||
fi
|
||||
|
||||
if [ -n "${BACKUP_EXTRA_ARGS}" ]; then
|
||||
BACKUP_ARGS="${BACKUP_ARGS} ${BACKUP_EXTRA_ARGS}"
|
||||
fi
|
||||
|
||||
# NOTE: We actually need word splitting to be able to pass multiple arguments.
|
||||
# shellcheck disable=SC2086
|
||||
./gogs backup ${BACKUP_ARGS} || echo "Error: Backup job returned non-successful code." && exit 1
|
||||
}
|
||||
|
||||
main() {
|
||||
BACKUP_PATH="${1:-}"
|
||||
|
||||
if [ -z "${BACKUP_PATH}" ]; then
|
||||
echo "Required argument missing BACKUP_PATH" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
execute_backup_job "${BACKUP_PATH}"
|
||||
}
|
||||
|
||||
main "$@"
|
@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
# This is very simple, yet effective backup rotation script.
|
||||
# Using find command, all files that are older than BACKUP_RETENTION_DAYS are accumulated and deleted using rm.
|
||||
main() {
|
||||
BACKUP_PATH="${1:-}"
|
||||
FIND_EXPRESSION="${2:-mtime +7}"
|
||||
|
||||
if [ -z "${BACKUP_PATH}" ]; then
|
||||
echo "Error: Required argument missing BACKUP_PATH" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$(realpath "${BACKUP_PATH}")" = "/" ]; then
|
||||
echo "Error: Dangerous BACKUP_PATH: /" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -d "${BACKUP_PATH}" ]; then
|
||||
echo "Error: BACKUP_PATH doesn't exist or is not a directory" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
find "${BACKUP_PATH}/" -type f -name "gogs-backup-*.zip" -${FIND_EXPRESSION} -print -exec rm "{}" +
|
||||
}
|
||||
|
||||
main "$@"
|
@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Cleanup SOCAT services and s6 event folder
|
||||
rm -rf "$(find /app/docker/s6/ -name 'event')"
|
||||
rm -rf /app/docker/s6/SOCAT_*
|
@ -0,0 +1,8 @@
|
||||
#!/bin/sh
|
||||
|
||||
if test -f ./setup; then
|
||||
# shellcheck disable=SC2039,SC1091,SC3046
|
||||
source ./setup
|
||||
fi
|
||||
|
||||
exec gosu paopao /bin/meilisearch
|
@ -0,0 +1,8 @@
|
||||
#!/bin/sh
|
||||
|
||||
if test -f ./setup; then
|
||||
# shellcheck disable=SC2039,SC1091,SC3046
|
||||
source ./setup
|
||||
fi
|
||||
|
||||
exec gosu paopao /app/paopao serve
|
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
cd /app || exit 1
|
@ -0,0 +1,8 @@
|
||||
#!/bin/sh
|
||||
|
||||
if test -f ./setup; then
|
||||
# shellcheck disable=SC2039,SC1091,SC3046
|
||||
source ./setup
|
||||
fi
|
||||
|
||||
exec docker-entrypoint.sh redis-server
|
@ -0,0 +1,15 @@
|
||||
Port 22
|
||||
AddressFamily any
|
||||
ListenAddress 0.0.0.0
|
||||
ListenAddress ::
|
||||
Protocol 2
|
||||
LogLevel INFO
|
||||
HostKey /data/ssh/ssh_host_rsa_key
|
||||
HostKey /data/ssh/ssh_host_dsa_key
|
||||
HostKey /data/ssh/ssh_host_ecdsa_key
|
||||
HostKey /data/ssh/ssh_host_ed25519_key
|
||||
PermitRootLogin no
|
||||
AuthorizedKeysFile .ssh/authorized_keys
|
||||
PasswordAuthentication no
|
||||
PermitUserEnvironment yes
|
||||
AllowUsers paopao
|
@ -0,0 +1,83 @@
|
||||
#!/bin/sh
|
||||
|
||||
create_socat_links() {
|
||||
# Bind linked docker container to localhost socket using socat
|
||||
USED_PORT="3000:22"
|
||||
while read -r NAME ADDR PORT; do
|
||||
if test -z "$NAME$ADDR$PORT"; then
|
||||
continue
|
||||
elif echo "$USED_PORT" | grep -E "(^|:)$PORT($|:)" > /dev/null; then
|
||||
echo "init:socat | Can't bind linked container ${NAME} to localhost, port ${PORT} already in use" 1>&2
|
||||
else
|
||||
SERV_FOLDER=/app/gogs/docker/s6/SOCAT_${NAME}_${PORT}
|
||||
mkdir -p "${SERV_FOLDER}"
|
||||
CMD="socat -ls TCP4-LISTEN:${PORT},fork,reuseaddr TCP4:${ADDR}:${PORT}"
|
||||
# shellcheck disable=SC2039,SC3037
|
||||
echo -e "#!/bin/sh\nexec $CMD" > "${SERV_FOLDER}"/run
|
||||
chmod +x "${SERV_FOLDER}"/run
|
||||
USED_PORT="${USED_PORT}:${PORT}"
|
||||
echo "init:socat | Linked container ${NAME} will be binded to localhost on port ${PORT}" 1>&2
|
||||
fi
|
||||
done << EOT
|
||||
$(env | sed -En 's|(.*)_PORT_([0-9]+)_TCP=tcp://(.*):([0-9]+)|\1 \3 \4|p')
|
||||
EOT
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
# Cleanup SOCAT services and s6 event folder
|
||||
# On start and on shutdown in case container has been killed
|
||||
rm -rf "$(find /app/gogs/docker/s6/ -name 'event')"
|
||||
rm -rf /app/gogs/docker/s6/SOCAT_*
|
||||
}
|
||||
|
||||
create_volume_subfolder() {
|
||||
# only change ownership if needed, if using an nfs mount this could be expensive
|
||||
if [ "$USER:$USER" != "$(stat /data -c '%U:%G')" ]
|
||||
then
|
||||
# Modify the owner of /data dir, make $USER(git) user have permission to create sub-dir in /data.
|
||||
chown -R "$USER:$USER" /data
|
||||
fi
|
||||
|
||||
# Create VOLUME subfolder
|
||||
for f in /data/gogs/data /data/gogs/conf /data/gogs/log /data/git /data/ssh; do
|
||||
if ! test -d $f; then
|
||||
gosu "$USER" mkdir -p $f
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
setids() {
|
||||
export USER=git
|
||||
PUID=${PUID:-1000}
|
||||
PGID=${PGID:-1000}
|
||||
groupmod -o -g "$PGID" $USER
|
||||
usermod -o -u "$PUID" $USER
|
||||
}
|
||||
|
||||
setids
|
||||
cleanup
|
||||
create_volume_subfolder
|
||||
|
||||
LINK=$(echo "$SOCAT_LINK" | tr '[:upper:]' '[:lower:]')
|
||||
if [ "$LINK" = "false" ] || [ "$LINK" = "0" ]; then
|
||||
echo "init:socat | Will not try to create socat links as requested" 1>&2
|
||||
else
|
||||
create_socat_links
|
||||
fi
|
||||
|
||||
CROND=$(echo "$RUN_CROND" | tr '[:upper:]' '[:lower:]')
|
||||
if [ "$CROND" = "true" ] || [ "$CROND" = "1" ]; then
|
||||
echo "init:crond | Cron Daemon (crond) will be run as requested by s6" 1>&2
|
||||
rm -f /app/gogs/docker/s6/crond/down
|
||||
/bin/sh /app/gogs/docker/runtime/backup-init.sh "${PUID}"
|
||||
else
|
||||
# Tell s6 not to run the crond service
|
||||
touch /app/gogs/docker/s6/crond/down
|
||||
fi
|
||||
|
||||
# Exec CMD or S6 by default if nothing present
|
||||
if [ $# -gt 0 ];then
|
||||
exec "$@"
|
||||
else
|
||||
exec /bin/s6-svscan /app/gogs/docker/s6/
|
||||
fi
|
@ -0,0 +1,36 @@
|
||||
FROM getmeili/meilisearch:v1.4 as meilisearch
|
||||
|
||||
FROM redis:7.2-alpine
|
||||
ENV TZ=Asia/Shanghai
|
||||
RUN apk update && apk add --no-cache ca-certificates && update-ca-certificates
|
||||
RUN apk update --quiet \
|
||||
&& apk -q --no-cache --no-progress add \
|
||||
ca-certificates \
|
||||
libgcc \
|
||||
curl \
|
||||
s6 \
|
||||
socat
|
||||
&& update-ca-certificates
|
||||
|
||||
WORKDIR /app
|
||||
COPY ./docker ./docker
|
||||
|
||||
# add meilisearch and meilitool to the `/bin` so you can run it from anywhere
|
||||
# and it's easy to find.
|
||||
COPY --from=meilisearch /bin/meilisearch /bin/meilisearch
|
||||
COPY --from-meilisearch /bin/meilitool /bin/meilitool
|
||||
# To stay compatible with the older version of the container (pre v0.27.0) we're
|
||||
# going to symlink the meilisearch binary in the path to `/meilisearch`
|
||||
RUN ln -s /bin/meilisearch /meilisearch
|
||||
|
||||
ENV MEILI_DB_PATH=/meili_data
|
||||
VOLUME ["/meili_data"]
|
||||
|
||||
RUN ./docker/build/finalize.sh
|
||||
|
||||
# Configure Docker Container
|
||||
VOLUME ["/meili_data", "/app/custom"]
|
||||
EXPOSE 8008 7700/tcp
|
||||
HEALTHCHECK --interval=5s --timeout=3s --retries=3 CMD ps -ef | grep paopao || exit 1
|
||||
ENTRYPOINT ["/app/docker/start.sh"]
|
||||
CMD ["/bin/s6-svscan", "/app/docker/s6/"]
|
@ -1,6 +1,7 @@
|
||||
### Dockerfile builer pre-build images
|
||||
|
||||
```sh
|
||||
docker build -t bitbus/paopao-ce-backend-builder:latest -f Dockerfile-backend-builder .
|
||||
docker build -t bitbus/paopao-ce-backend-runner:latest -f Dockerfile-backend-runner .
|
||||
docker build -t bitbus/paopao-ce-backend-builder:latest -f Dockerfile.backend-builder .
|
||||
docker build -t bitbus/paopao-ce-backend-runner:latest -f Dockerfile.backend-runner .
|
||||
docker build -t bitbus/paopao-ce-allinone-runner:latest -f Dockerfile.allinone-runner .
|
||||
```
|
Loading…
Reference in new issue