Compare commits

..

8 commits

Author SHA1 Message Date
LinuxServer-CI 13b7c12c86 Bot Updating Templated Files 2019-10-12 07:27:43 -04:00
LinuxServer-CI cd7bf5d960 Bot Updating Templated Files 2019-10-12 07:26:51 -04:00
LinuxServer-CI 2f8a95f0bf Bot Updating Package Versions 2019-10-05 07:27:22 -04:00
LinuxServer-CI 97958c63e8 Bot Updating Templated Files 2019-09-07 07:28:11 -04:00
LinuxServer-CI 60b2181c1c Bot Updating Templated Files 2019-09-07 12:25:52 +01:00
LinuxServer-CI 408021d19e Bot Updating Package Versions 2019-08-24 13:29:16 +02:00
LinuxServer-CI 9cbeb86174 Bot Updating Package Versions 2019-08-10 11:28:58 +00:00
thelamer 7088022081 branching 3.9 to bump master to 3.10 2019-06-28 14:06:16 -07:00
80 changed files with 970 additions and 1162 deletions

View file

@ -1,20 +0,0 @@
# This file is globally distributed to all container image projects from
# https://github.com/linuxserver/docker-jenkins-builder/blob/master/.editorconfig
# top-most EditorConfig file
root = true
# Unix-style newlines with a newline ending every file
[*]
end_of_line = lf
insert_final_newline = true
# trim_trailing_whitespace may cause unintended issues and should not be globally set true
trim_trailing_whitespace = false
[{Dockerfile*,**.yml}]
indent_style = space
indent_size = 2
[{**.sh,root/etc/s6-overlay/s6-rc.d/**,root/etc/cont-init.d/**,root/etc/services.d/**}]
indent_style = space
indent_size = 4

View file

@ -1,15 +0,0 @@
name: build
run-name: ${{ gitea.actor }} is building baseimage-alpine
on:
push:
branches:
- 'master'
jobs:
build:
runs-on: podman
steps:
- uses: actions/checkout@v3
- run: podman login --username registry --password ${{ secrets.MEATBAG_REGISTRY_TOKEN }} registry.meatbag.se
- run: podman build -t meatbag/baseimage-alpine:latest .
- run: podman push meatbag/baseimage-alpine registry.meatbag.se/meatbag/baseimage-alpine

1
.github/FUNDING.yml vendored Executable file
View file

@ -0,0 +1 @@
open_collective: linuxserver

34
.github/ISSUE_TEMPLATE.md vendored Normal file
View file

@ -0,0 +1,34 @@
[linuxserverurl]: https://linuxserver.io
[![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)][linuxserverurl]
If you are new to Docker or this application our issue tracker is **ONLY** used for reporting bugs or requesting features. Please use [our discord server](https://discord.gg/YWrKVTn) for general support.
<!--- Provide a general summary of the issue in the Title above -->
------------------------------
## Expected Behavior
<!--- Tell us what should happen -->
## Current Behavior
<!--- Tell us what happens instead of the expected behavior -->
## Steps to Reproduce
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
<!--- reproduce this bug. Include code to reproduce, if relevant -->
1.
2.
3.
4.
## Environment
**OS:**
**CPU architecture:** x86_64/arm32/arm64
**How docker service was installed:**
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
## Command used to create docker container (run/create/compose/screenshot)
<!--- Provide your docker create/run command or compose yaml snippet, or a screenshot of settings if using a gui to create the container -->
## Docker logs
<!--- Provide a full docker log, output of "docker logs baseimage-alpine" -->

39
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View file

@ -0,0 +1,39 @@
<!--- Provide a general summary of your changes in the Title above -->
[linuxserverurl]: https://linuxserver.io
[![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)][linuxserverurl]
<!--- Before submitting a pull request please check the following -->
<!--- If this is a fix for a typo in code or documentation in the README please file an issue and let us sort it out we do not need a PR -->
<!--- Ask yourself if this modification is something the whole userbase will benefit from, if this is a specific change for corner case functionality or plugins please look at making a Docker Mod or local script https://blog.linuxserver.io/2019/09/14/customizing-our-containers/ -->
<!--- That if the PR is addressing an existing issue include, closes #<issue number> , in the body of the PR commit message -->
<!--- You have included links to any files / patches etc your PR may be using in the body of the PR commit message -->
<!--- We maintain a changelog of major revisions to the container at the end of readme-vars.yml in the root of this repository, please add your changes there if appropriate -->
<!--- Coding guidelines: -->
<!--- 1. Installed packages in the Dockerfiles should be in alphabetical order -->
<!--- 2. Changes to Dockerfile should be replicated in Dockerfile.armhf and Dockerfile.aarch64 if applicable -->
<!--- 3. Indentation style (tabs vs 4 spaces vs 1 space) should match the rest of the document -->
<!--- 4. Readme is auto generated from readme-vars.yml, make your changes there -->
------------------------------
We welcome all PRs though this doesnt guarantee it will be accepted.
## Description:
<!--- Describe your changes in detail -->
## Benefits of this PR and context:
<!--- Please explain why we should accept this PR. If this fixes an outstanding bug, please reference the issue # -->
## How Has This Been Tested?
<!--- Please describe in detail how you tested your changes. -->
<!--- Include details of your testing environment, and the tests you ran to -->
<!--- see how your change affects other areas of the code, etc. -->
## Source / References:
<!--- Please include any forum posts/github links relevant to the PR -->

1
.gitignore vendored
View file

@ -41,4 +41,3 @@ $RECYCLE.BIN/
Network Trash Folder Network Trash Folder
Temporary Items Temporary Items
.apdisk .apdisk
.jenkins-external

View file

@ -1,92 +1,74 @@
# syntax=docker/dockerfile:1 FROM alpine:3.9 as rootfs-stage
MAINTAINER sparkyballs,thelamer
FROM alpine:3.20 AS rootfs-stage
# environment # environment
ENV ROOTFS=/root-out ENV REL=v3.9
ENV REL=v3.21
ENV ARCH=x86_64 ENV ARCH=x86_64
ENV MIRROR=http://dl-cdn.alpinelinux.org/alpine ENV MIRROR=http://dl-cdn.alpinelinux.org/alpine
ENV PACKAGES=alpine-baselayout,\ ENV PACKAGES=alpine-baselayout,\
alpine-keys,\ alpine-keys,\
apk-tools,\ apk-tools,\
busybox,\ busybox,\
libc-utils libc-utils,\
xz
# install packages # install packages
RUN \ RUN \
apk add --no-cache \ apk add --no-cache \
bash \ bash \
curl \
tzdata \
xz xz
# build rootfs # fetch builder script from gliderlabs
RUN \ RUN \
mkdir -p "$ROOTFS/etc/apk" && \ curl -o \
{ \ /mkimage-alpine.bash -L \
echo "$MIRROR/$REL/main"; \ https://raw.githubusercontent.com/gliderlabs/docker-alpine/master/builder/scripts/mkimage-alpine.bash && \
echo "$MIRROR/$REL/community"; \ chmod +x \
} > "$ROOTFS/etc/apk/repositories" && \ /mkimage-alpine.bash && \
apk --root "$ROOTFS" --no-cache --keys-dir /etc/apk/keys add --arch $ARCH --initdb ${PACKAGES//,/ } && \ ./mkimage-alpine.bash && \
mkdir /root-out && \
tar xf \
/rootfs.tar.xz -C \
/root-out && \
sed -i -e 's/^root::/root:!:/' /root-out/etc/shadow sed -i -e 's/^root::/root:!:/' /root-out/etc/shadow
# set version for s6 overlay
ARG S6_OVERLAY_VERSION="3.2.0.2"
ARG S6_OVERLAY_ARCH="x86_64"
# add s6 overlay
ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz /tmp
RUN tar -C /root-out -Jxpf /tmp/s6-overlay-noarch.tar.xz
ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz /tmp
RUN tar -C /root-out -Jxpf /tmp/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz
# add s6 optional symlinks
ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-noarch.tar.xz /tmp
RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-noarch.tar.xz && unlink /root-out/usr/bin/with-contenv
ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-arch.tar.xz /tmp
RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-arch.tar.xz
# Runtime stage # Runtime stage
FROM scratch FROM scratch
COPY --from=rootfs-stage /root-out/ / COPY --from=rootfs-stage /root-out/ /
ARG BUILD_DATE ARG BUILD_DATE
ARG VERSION ARG VERSION
ARG MODS_VERSION="v3"
ARG PKG_INST_VERSION="v1"
ARG LSIOWN_VERSION="v1"
ARG WITHCONTENV_VERSION="v1"
LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}" LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}"
LABEL maintainer="TheLamer" LABEL MAINTAINER="sparkyballs,TheLamer"
ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/docker-mods.${MODS_VERSION}" "/docker-mods" # set version for s6 overlay
ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/package-install.${PKG_INST_VERSION}" "/etc/s6-overlay/s6-rc.d/init-mods-package-install/run" ARG OVERLAY_VERSION="v1.22.0.0"
ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/lsiown.${LSIOWN_VERSION}" "/usr/bin/lsiown" ARG OVERLAY_ARCH="amd64"
ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/with-contenv.${WITHCONTENV_VERSION}" "/usr/bin/with-contenv"
# environment variables # environment variables
ENV PS1="$(whoami)@$(hostname):$(pwd)\\$ " \ ENV PS1="$(whoami)@$(hostname):$(pwd)\\$ " \
HOME="/root" \ HOME="/root" \
TERM="xterm" \ TERM="xterm"
S6_CMD_WAIT_FOR_SERVICES_MAXTIME="0" \
S6_VERBOSITY=1 \
S6_STAGE2_HOOK=/docker-mods \
VIRTUAL_ENV=/lsiopy \
PATH="/lsiopy/bin:$PATH"
RUN \ RUN \
echo "**** install build packages ****" && \
apk add --no-cache --virtual=build-dependencies \
curl \
tar && \
echo "**** install runtime packages ****" && \ echo "**** install runtime packages ****" && \
apk add --no-cache \ apk add --no-cache \
alpine-release \
bash \ bash \
ca-certificates \ ca-certificates \
catatonit \
coreutils \ coreutils \
curl \
findutils \
jq \
netcat-openbsd \
procps-ng \
shadow \ shadow \
tzdata && \ tzdata && \
echo "**** add s6 overlay ****" && \
curl -o \
/tmp/s6-overlay.tar.gz -L \
"https://github.com/just-containers/s6-overlay/releases/download/${OVERLAY_VERSION}/s6-overlay-${OVERLAY_ARCH}.tar.gz" && \
tar xfz \
/tmp/s6-overlay.tar.gz -C / && \
echo "**** create abc user and make our folders ****" && \ echo "**** create abc user and make our folders ****" && \
groupmod -g 1000 users && \ groupmod -g 1000 users && \
useradd -u 911 -U -d /config -s /bin/false abc && \ useradd -u 911 -U -d /config -s /bin/false abc && \
@ -94,9 +76,10 @@ RUN \
mkdir -p \ mkdir -p \
/app \ /app \
/config \ /config \
/defaults \ /defaults && \
/lsiopy && \
echo "**** cleanup ****" && \ echo "**** cleanup ****" && \
apk del --purge \
build-dependencies && \
rm -rf \ rm -rf \
/tmp/* /tmp/*

View file

@ -1,92 +1,74 @@
# syntax=docker/dockerfile:1 FROM alpine:3.9 as rootfs-stage
MAINTAINER sparkyballs,thelamer
FROM alpine:3.20 AS rootfs-stage
# environment # environment
ENV ROOTFS=/root-out ENV REL=v3.9
ENV REL=v3.21
ENV ARCH=aarch64 ENV ARCH=aarch64
ENV MIRROR=http://dl-cdn.alpinelinux.org/alpine ENV MIRROR=http://dl-cdn.alpinelinux.org/alpine
ENV PACKAGES=alpine-baselayout,\ ENV PACKAGES=alpine-baselayout,\
alpine-keys,\ alpine-keys,\
apk-tools,\ apk-tools,\
busybox,\ busybox,\
libc-utils libc-utils,\
xz
# install packages # install packages
RUN \ RUN \
apk add --no-cache \ apk add --no-cache \
bash \ bash \
curl \
tzdata \
xz xz
# build rootfs # fetch builder script from gliderlabs
RUN \ RUN \
mkdir -p "$ROOTFS/etc/apk" && \ curl -o \
{ \ /mkimage-alpine.bash -L \
echo "$MIRROR/$REL/main"; \ https://raw.githubusercontent.com/gliderlabs/docker-alpine/master/builder/scripts/mkimage-alpine.bash && \
echo "$MIRROR/$REL/community"; \ chmod +x \
} > "$ROOTFS/etc/apk/repositories" && \ /mkimage-alpine.bash && \
apk --root "$ROOTFS" --no-cache --keys-dir /etc/apk/keys add --arch $ARCH --initdb ${PACKAGES//,/ } && \ ./mkimage-alpine.bash && \
mkdir /root-out && \
tar xf \
/rootfs.tar.xz -C \
/root-out && \
sed -i -e 's/^root::/root:!:/' /root-out/etc/shadow sed -i -e 's/^root::/root:!:/' /root-out/etc/shadow
# set version for s6 overlay
ARG S6_OVERLAY_VERSION="3.2.0.2"
ARG S6_OVERLAY_ARCH="aarch64"
# add s6 overlay
ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz /tmp
RUN tar -C /root-out -Jxpf /tmp/s6-overlay-noarch.tar.xz
ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz /tmp
RUN tar -C /root-out -Jxpf /tmp/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz
# add s6 optional symlinks
ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-noarch.tar.xz /tmp
RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-noarch.tar.xz && unlink /root-out/usr/bin/with-contenv
ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-arch.tar.xz /tmp
RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-arch.tar.xz
# Runtime stage # Runtime stage
FROM scratch FROM scratch
COPY --from=rootfs-stage /root-out/ / COPY --from=rootfs-stage /root-out/ /
ARG BUILD_DATE ARG BUILD_DATE
ARG VERSION ARG VERSION
ARG MODS_VERSION="v3"
ARG PKG_INST_VERSION="v1"
ARG LSIOWN_VERSION="v1"
ARG WITHCONTENV_VERSION="v1"
LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}" LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}"
LABEL maintainer="TheLamer" LABEL MAINTAINER="sparkyballs,TheLamer"
ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/docker-mods.${MODS_VERSION}" "/docker-mods" # set version for s6 overlay
ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/package-install.${PKG_INST_VERSION}" "/etc/s6-overlay/s6-rc.d/init-mods-package-install/run" ARG OVERLAY_VERSION="v1.22.0.0"
ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/lsiown.${LSIOWN_VERSION}" "/usr/bin/lsiown" ARG OVERLAY_ARCH="aarch64"
ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/with-contenv.${WITHCONTENV_VERSION}" "/usr/bin/with-contenv"
# environment variables # environment variables
ENV PS1="$(whoami)@$(hostname):$(pwd)\\$ " \ ENV PS1="$(whoami)@$(hostname):$(pwd)\\$ " \
HOME="/root" \ HOME="/root" \
TERM="xterm" \ TERM="xterm"
S6_CMD_WAIT_FOR_SERVICES_MAXTIME="0" \
S6_VERBOSITY=1 \
S6_STAGE2_HOOK=/docker-mods \
VIRTUAL_ENV=/lsiopy \
PATH="/lsiopy/bin:$PATH"
RUN \ RUN \
echo "**** install build packages ****" && \
apk add --no-cache --virtual=build-dependencies \
curl \
tar && \
echo "**** install runtime packages ****" && \ echo "**** install runtime packages ****" && \
apk add --no-cache \ apk add --no-cache \
alpine-release \
bash \ bash \
ca-certificates \ ca-certificates \
catatonit \
coreutils \ coreutils \
curl \
findutils \
jq \
netcat-openbsd \
procps-ng \
shadow \ shadow \
tzdata && \ tzdata && \
echo "**** add s6 overlay ****" && \
curl -o \
/tmp/s6-overlay.tar.gz -L \
"https://github.com/just-containers/s6-overlay/releases/download/${OVERLAY_VERSION}/s6-overlay-${OVERLAY_ARCH}.tar.gz" && \
tar xfz \
/tmp/s6-overlay.tar.gz -C / && \
echo "**** create abc user and make our folders ****" && \ echo "**** create abc user and make our folders ****" && \
groupmod -g 1000 users && \ groupmod -g 1000 users && \
useradd -u 911 -U -d /config -s /bin/false abc && \ useradd -u 911 -U -d /config -s /bin/false abc && \
@ -94,9 +76,15 @@ RUN \
mkdir -p \ mkdir -p \
/app \ /app \
/config \ /config \
/defaults \ /defaults && \
/lsiopy && \ echo "**** add qemu ****" && \
curl -o \
/usr/bin/qemu-aarch64-static -L \
"https://lsio-ci.ams3.digitaloceanspaces.com/qemu-aarch64-static" && \
chmod +x /usr/bin/qemu-aarch64-static && \
echo "**** cleanup ****" && \ echo "**** cleanup ****" && \
apk del --purge \
build-dependencies && \
rm -rf \ rm -rf \
/tmp/* /tmp/*

94
Dockerfile.armhf Normal file
View file

@ -0,0 +1,94 @@
FROM alpine:3.9 as rootfs-stage
MAINTAINER sparkyballs,thelamer
# environment
ENV REL=v3.9
ENV ARCH=armv7
ENV MIRROR=http://dl-cdn.alpinelinux.org/alpine
ENV PACKAGES=alpine-baselayout,\
alpine-keys,\
apk-tools,\
busybox,\
libc-utils,\
xz
# install packages
RUN \
apk add --no-cache \
bash \
curl \
tzdata \
xz
# fetch builder script from gliderlabs
RUN \
curl -o \
/mkimage-alpine.bash -L \
https://raw.githubusercontent.com/gliderlabs/docker-alpine/master/builder/scripts/mkimage-alpine.bash && \
chmod +x \
/mkimage-alpine.bash && \
./mkimage-alpine.bash && \
mkdir /root-out && \
tar xf \
/rootfs.tar.xz -C \
/root-out && \
sed -i -e 's/^root::/root:!:/' /root-out/etc/shadow
# Runtime stage
FROM scratch
COPY --from=rootfs-stage /root-out/ /
ARG BUILD_DATE
ARG VERSION
LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}"
LABEL MAINTAINER="sparkyballs,TheLamer"
# set version for s6 overlay
ARG OVERLAY_VERSION="v1.22.0.0"
ARG OVERLAY_ARCH="arm"
# environment variables
ENV PS1="$(whoami)@$(hostname):$(pwd)\\$ " \
HOME="/root" \
TERM="xterm"
RUN \
echo "**** install build packages ****" && \
apk add --no-cache --virtual=build-dependencies \
curl \
tar && \
echo "**** install runtime packages ****" && \
apk add --no-cache \
bash \
ca-certificates \
coreutils \
shadow \
tzdata && \
echo "**** add s6 overlay ****" && \
curl -o \
/tmp/s6-overlay.tar.gz -L \
"https://github.com/just-containers/s6-overlay/releases/download/${OVERLAY_VERSION}/s6-overlay-${OVERLAY_ARCH}.tar.gz" && \
tar xfz \
/tmp/s6-overlay.tar.gz -C / && \
echo "**** create abc user and make our folders ****" && \
groupmod -g 1000 users && \
useradd -u 911 -U -d /config -s /bin/false abc && \
usermod -G users abc && \
mkdir -p \
/app \
/config \
/defaults && \
echo "**** add qemu ****" && \
curl -o \
/usr/bin/qemu-arm-static -L \
"https://lsio-ci.ams3.digitaloceanspaces.com/qemu-arm-static" && \
chmod +x /usr/bin/qemu-arm-static && \
echo "**** cleanup ****" && \
apk del --purge \
build-dependencies && \
rm -rf \
/tmp/*
# add local files
COPY root/ /
ENTRYPOINT ["/init"]

891
Jenkinsfile vendored

File diff suppressed because it is too large Load diff

View file

@ -1 +1,27 @@
A custom base image built with [Alpine Linux](https://alpinelinux.org) and [s6-overlay](https://github.com/just-containers/s6-overlay). [linuxserverurl]: https://linuxserver.io
[forumurl]: https://forum.linuxserver.io
[ircurl]: https://www.linuxserver.io/irc/
[appurl]: https://alpinelinux.org
[![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png?v=4&s=4000)][linuxserverurl]
## Contact information:-
| Type | Address/Details |
| :---: | --- |
| Discord | [Discord](https://discord.gg/YWrKVTn) |
| IRC | freenode at `#linuxserver.io` more information at:- [IRC][ircurl]
| Forum | [Linuserver.io forum][forumurl] |
&nbsp;
&nbsp;
[![](https://images.microbadger.com/badges/image/lsiobase/alpine.svg)](https://microbadger.com/images/lsiobase/alpine "Get your own image badge on microbadger.com")
[![](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/Dockerfile-Link-green.png)](https://github.com/linuxserver/docker-baseimage-alpine/blob/master/Dockerfile)
A custom base image built with [Alpine linux][appurl] and [S6 overlay](https://github.com/just-containers/s6-overlay)..
The following line is only in this repo for loop testing:
- { date: "01.01.50:", desc: "I am the release message for this internal repo." }

View file

@ -3,11 +3,9 @@
# jenkins variables # jenkins variables
project_name: docker-baseimage-alpine project_name: docker-baseimage-alpine
external_type: os external_type: os
release_type: stable release_type: prerelease
release_tag: "3.21" release_tag: 3.9
ls_branch: master ls_branch: 3.9
image_sbom: true
image_provenance: true
repo_vars: repo_vars:
- BUILD_VERSION_ARG = 'OS' - BUILD_VERSION_ARG = 'OS'
- LS_USER = 'linuxserver' - LS_USER = 'linuxserver'
@ -23,6 +21,6 @@ repo_vars:
- CI_PORT='80' - CI_PORT='80'
- CI_SSL='true' - CI_SSL='true'
- CI_DELAY='30' - CI_DELAY='30'
- CI_DOCKERENV='LSIO_FIRST_PARTY=true' - CI_DOCKERENV='TZ=US/Pacific'
- CI_AUTH='' - CI_AUTH='user:password'
- CI_WEBPATH='' - CI_WEBPATH=''

View file

@ -1,51 +1,28 @@
NAME VERSION TYPE alpine-baselayout-3.1.0-r3
acl-libs 2.3.2-r1 apk alpine-keys-2.1-r1
alpine-baselayout 3.6.8-r1 apk apk-tools-2.10.3-r1
alpine-baselayout-data 3.6.8-r1 apk bash-4.4.19-r1
alpine-keys 2.5-r0 apk busybox-1.29.3-r10
alpine-release 3.21.3-r0 apk ca-certificates-20190108-r0
apk-tools 2.14.6-r3 apk ca-certificates-cacert-20190108-r0
bash 5.2.37-r0 apk coreutils-8.30-r0
brotli-libs 1.1.0-r2 apk libacl-2.2.52-r5
busybox 1.37.0-r12 apk libattr-2.4.47-r7
busybox-binsh 1.37.0-r12 apk libc-utils-0.7.1-r0
c-ares 1.34.5-r0 apk libcrypto1.1-1.1.1d-r0
ca-certificates 20241121-r1 apk libssl1.1-1.1.1d-r0
ca-certificates-bundle 20241121-r1 apk libtls-standalone-2.7.4-r6
catatonit 0.2.0-r0 apk linux-pam-1.3.0-r0
coreutils 9.5-r2 apk musl-1.1.20-r5
coreutils-env 9.5-r2 apk musl-utils-1.1.20-r5
coreutils-fmt 9.5-r2 apk ncurses-libs-6.1_p20190105-r0
coreutils-sha512sum 9.5-r2 apk ncurses-terminfo-6.1_p20190105-r0
curl 8.12.1-r1 apk ncurses-terminfo-base-6.1_p20190105-r0
findutils 4.10.0-r0 apk readline-7.0.003-r1
jq 1.7.1-r0 apk scanelf-1.2.3-r0
libattr 2.5.2-r2 apk shadow-4.5-r2
libbsd 0.12.2-r0 apk ssl_client-1.29.3-r10
libcrypto3 3.3.3-r0 apk tzdata-2019b-r0
libcurl 8.12.1-r1 apk xz-5.2.4-r0
libidn2 2.3.7-r0 apk xz-libs-5.2.4-r0
libintl 0.22.5-r0 apk zlib-1.2.11-r1
libmd 1.1.0-r0 apk
libncursesw 6.5_p20241006-r3 apk
libproc2 4.0.4-r2 apk
libpsl 0.21.5-r3 apk
libssl3 3.3.3-r0 apk
libunistring 1.2-r0 apk
linux-pam 1.6.1-r1 apk
musl 1.2.5-r9 apk
musl-utils 1.2.5-r9 apk
ncurses-terminfo-base 6.5_p20241006-r3 apk
netcat-openbsd 1.226.1.1-r0 apk
nghttp2-libs 1.64.0-r0 apk
oniguruma 6.9.9-r0 apk
procps-ng 4.0.4-r2 apk
readline 8.2.13-r0 apk
scanelf 1.3.8-r1 apk
shadow 4.16.0-r1 apk
skalibs-libs 2.14.3.0-r0 apk
ssl_client 1.37.0-r12 apk
tzdata 2025b-r0 apk
utmps-libs 0.1.2.3-r2 apk
zlib 1.3.1-r2 apk
zstd-libs 1.5.6-r2 apk

View file

@ -4,22 +4,29 @@
project_name: baseimage-alpine project_name: baseimage-alpine
full_custom_readme: | full_custom_readme: |
{% raw -%} {% raw -%}
[![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)](https://linuxserver.io) [linuxserverurl]: https://linuxserver.io
[forumurl]: https://forum.linuxserver.io
[ircurl]: https://www.linuxserver.io/irc/
[appurl]: https://alpinelinux.org
[![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png?v=4&s=4000)][linuxserverurl]
## Contact information:- ## Contact information:-
| Type | Address/Details | | Type | Address/Details |
| :---: | --- | | :---: | --- |
| Discord | [Discord](https://linuxserver.io/discord) | | Discord | [Discord](https://discord.gg/YWrKVTn) |
| IRC | `#linuxserver.io` on irc.libera.chat | | IRC | freenode at `#linuxserver.io` more information at:- [IRC][ircurl]
| Forum | [Discourse](https://discourse.linuxserver.io/) | | Forum | [Linuserver.io forum][forumurl] |
A custom base image built with [Alpine Linux](https://alpinelinux.org) and [s6-overlay](https://github.com/just-containers/s6-overlay). &nbsp;
&nbsp;
- Support for using our base images in your own projects is provided on a Reasonable Endeavours basis, please see our [Support Policy](https://www.linuxserver.io/supportpolicy) for details. [![](https://images.microbadger.com/badges/image/lsiobase/alpine.svg)](https://microbadger.com/images/lsiobase/alpine "Get your own image badge on microbadger.com")
- There is no `latest` tag for any of our base images, by design. We often make breaking changes between versions, and we don't publish release notes like we do for the downstream images.
- If you're intending to distribute an image using one of our bases, please read our [docs on container branding](https://docs.linuxserver.io/general/container-branding/) first. [![](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/Dockerfile-Link-green.png)](https://github.com/linuxserver/docker-baseimage-alpine/blob/master/Dockerfile)
- Alpine releases are supported for 2 years, after which we will stop building new base images for that version.
A custom base image built with [Alpine linux][appurl] and [S6 overlay](https://github.com/just-containers/s6-overlay)..
The following line is only in this repo for loop testing: The following line is only in this repo for loop testing:

80
root/docker-mods Executable file
View file

@ -0,0 +1,80 @@
#!/usr/bin/with-contenv bash
# Exit if mods is not set
if [ -z ${DOCKER_MODS+x} ]; then
exit 0
fi
# Check for curl
if [ ! -f /usr/bin/curl ]; then
echo "[mod-init] Curl was not found on this system for Docker mods installing"
if [ -f /usr/bin/apt ]; then
## Ubuntu
apt-get update
apt-get install --no-install-recommends -y \
curl
elif [ -f /sbin/apk ]; then
# Alpine
apk add --no-cache \
curl
fi
fi
# Main run logic
echo "[mod-init] Attempting to run Docker Modification Logic"
IFS='|'
DOCKER_MODS=(${DOCKER_MODS})
for DOCKER_MOD in "${DOCKER_MODS[@]}"; do
FILENAME=$(echo ${DOCKER_MOD} | sed 's/[:\/]/./g')
ENDPOINT=$(echo ${DOCKER_MOD} | awk -F: '{print $1}')
USERNAME=$(echo ${ENDPOINT} | awk -F/ '{print $1}')
TAG=$(echo ${DOCKER_MOD} | awk -F: '{print $2}')
# Kill off modification logic if any of the usernames are banned
BLACKLIST=$(curl -s https://raw.githubusercontent.com/linuxserver/docker-mods/master/blacklist.txt)
IFS=$'\n'
BLACKLIST=(${BLACKLIST})
for BANNED in "${BLACKLIST[@]}"; do
if [ "${BANNED}" == "${USERNAME,,}" ]; then
if [ -z ${RUN_BANNED_MODS+x} ]; then
echo "[mod-init] ${DOCKER_MOD} is banned from use due to reported abuse aborting mod logic"
exit 0
else
echo "[mod-init] You have chosen to run banned mods ${DOCKER_MOD} will be applied"
fi
fi
done
echo "[mod-init] Applying ${DOCKER_MOD} files to container"
# Get Dockerhub token for api operations
TOKEN=\
"$(curl \
--silent \
--header 'GET' \
"https://auth.docker.io/token?service=registry.docker.io&scope=repository:${ENDPOINT}:pull" \
| awk -F'"' '{print $4}' \
)"
# Determine first and only layer of image
SHALAYER=\
"$(curl \
--silent \
--location \
--request GET \
--header "Authorization: Bearer ${TOKEN}" \
https://registry-1.docker.io/v2/${ENDPOINT}/manifests/${TAG} \
|grep -m1 "blobSum" \
| awk -F'"' '{print $4}' \
)"
# Check if we have allready applied this layer
if [ -f "/${FILENAME}" ] && [ "${SHALAYER}" == "$(cat /${FILENAME})" ]; then
echo "[mod-init] ${DOCKER_MOD} at ${SHALAYER} has been previously applied skipping"
else
# Download and extract layer to /
curl \
--silent \
--location \
--request GET \
--header "Authorization: Bearer ${TOKEN}" \
"https://registry-1.docker.io/v2/${ENDPOINT}/blobs/${SHALAYER}" \
| tar xz -C /
echo ${SHALAYER} > "/${FILENAME}"
fi
done

View file

@ -0,0 +1,31 @@
#!/usr/bin/with-contenv bash
PUID=${PUID:-911}
PGID=${PGID:-911}
groupmod -o -g "$PGID" abc
usermod -o -u "$PUID" abc
echo '
-------------------------------------
_ ()
| | ___ _ __
| | / __| | | / \
| | \__ \ | | | () |
|_| |___/ |_| \__/
Brought to you by linuxserver.io
We gratefully accept donations at:
https://www.linuxserver.io/donate/
-------------------------------------
GID/UID
-------------------------------------'
echo "
User uid: $(id -u abc)
User gid: $(id -g abc)
-------------------------------------
"
chown abc:abc /app
chown abc:abc /config
chown abc:abc /defaults

View file

@ -0,0 +1,49 @@
#!/usr/bin/with-contenv bash
# Directories
SCRIPTS_DIR="/config/custom-cont-init.d"
SERVICES_DIR="/config/custom-services.d"
# Remove all existing custom services before continuing to ensure
# we aren't running anything the user may have removed
if [ -n "$(/bin/ls -A /etc/services.d/custom-service-* 2>/dev/null)" ]; then
echo "[custom-init] removing existing custom services..."
rm -rf /etc/services.d/custom-service-*
fi
# Make sure custom init directory exists and has files in it
if ([ -e "${SCRIPTS_DIR}" ] && \
[ -n "$(/bin/ls -A ${SCRIPTS_DIR} 2>/dev/null)" ]) || \
([ -e "${SERVICES_DIR}" ] && \
[ -n "$(/bin/ls -A ${SERVICES_DIR} 2>/dev/null)" ]); then
if [ -n "$(/bin/ls -A ${SCRIPTS_DIR} 2>/dev/null)" ]; then
echo "[custom-init] files found in ${SCRIPTS_DIR} executing"
for SCRIPT in ${SCRIPTS_DIR}/*; do
NAME="$(basename "${SCRIPT}")"
if [ -f "${SCRIPT}" ]; then
echo "[custom-init] ${NAME}: executing..."
/bin/bash ${SCRIPT}
echo "[custom-init] ${NAME}: exited $?"
elif [ ! -f "${SCRIPT}" ]; then
echo "[custom-init] ${NAME}: is not a file"
fi
done
fi
if [ -n "$(/bin/ls -A ${SERVICES_DIR} 2>/dev/null)" ]; then
echo "[custom-init] service files found in ${SERVICES_DIR}"
for SERVICE in ${SERVICES_DIR}/*; do
NAME="$(basename "${SERVICE}")"
if [ -f "${SERVICE}" ]; then
echo "[custom-init] ${NAME}: service detected, copying..."
mkdir -p /etc/services.d/custom-service-${NAME}/
cp ${SERVICE} /etc/services.d/custom-service-${NAME}/run
chmod +x /etc/services.d/custom-service-${NAME}/run
echo "[custom-init] ${NAME}: copied"
elif [ ! -f "${SERVICE}" ]; then
echo "[custom-init] ${NAME}: is not a file"
fi
done
fi
else
echo "[custom-init] no custom files found exiting..."
fi

View file

@ -1 +0,0 @@
oneshot

View file

@ -1 +0,0 @@
echo "[ls.io-init] done."

View file

@ -1,3 +0,0 @@
───────
meatbag
───────

View file

@ -1,57 +0,0 @@
#!/usr/bin/with-contenv bash
# shellcheck shell=bash
PUID=${PUID:-911}
PGID=${PGID:-911}
if [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; then
USERHOME=$(grep abc /etc/passwd | cut -d ":" -f6)
usermod -d "/root" abc
groupmod -o -g "${PGID}" abc
usermod -o -u "${PUID}" abc
usermod -d "${USERHOME}" abc
fi
if { [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; } || [[ ! ${LSIO_FIRST_PARTY} = "true" ]]; then
cat /etc/s6-overlay/s6-rc.d/init-adduser/branding
else
cat /run/branding
fi
if [[ -f /donate.txt ]]; then
echo '
To support the app dev(s) visit:'
cat /donate.txt
fi
echo '
To support LSIO projects visit:
https://www.linuxserver.io/donate/
───────────────────────────────────────
GID/UID
───────────────────────────────────────'
if [[ -z ${LSIO_NON_ROOT_USER} ]]; then
echo "
User UID: $(id -u abc)
User GID: $(id -g abc)
───────────────────────────────────────"
else
echo "
User UID: $(stat /run -c %u)
User GID: $(stat /run -c %g)
───────────────────────────────────────"
fi
if [[ -f /build_version ]]; then
cat /build_version
echo '
───────────────────────────────────────
'
fi
if [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; then
lsiown abc:abc /app
lsiown abc:abc /config
lsiown abc:abc /defaults
fi

View file

@ -1 +0,0 @@
oneshot

View file

@ -1 +0,0 @@
/etc/s6-overlay/s6-rc.d/init-adduser/run

View file

@ -1 +0,0 @@
oneshot

View file

@ -1 +0,0 @@
# This file doesn't do anything, it's just the end of the downstream image init process

View file

@ -1 +0,0 @@
oneshot

View file

@ -1 +0,0 @@
# This file doesn't do anything, it's just the start of the downstream image init process

View file

@ -1,33 +0,0 @@
#!/usr/bin/with-contenv bash
# shellcheck shell=bash
for cron_user in abc root; do
if [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; then
if [[ -f "/etc/crontabs/${cron_user}" ]]; then
lsiown "${cron_user}":"${cron_user}" "/etc/crontabs/${cron_user}"
crontab -u "${cron_user}" "/etc/crontabs/${cron_user}"
fi
fi
if [[ -f "/defaults/crontabs/${cron_user}" ]]; then
# make folders
mkdir -p \
/config/crontabs
# if crontabs do not exist in config
if [[ ! -f "/config/crontabs/${cron_user}" ]]; then
# copy crontab from system
if crontab -l -u "${cron_user}" >/dev/null 2>&1; then
crontab -l -u "${cron_user}" >"/config/crontabs/${cron_user}"
fi
# if crontabs still do not exist in config (were not copied from system)
# copy crontab from image defaults (using -n, do not overwrite an existing file)
cp -n "/defaults/crontabs/${cron_user}" /config/crontabs/
fi
# set permissions and import user crontabs
lsiown "${cron_user}":"${cron_user}" "/config/crontabs/${cron_user}"
crontab -u "${cron_user}" "/config/crontabs/${cron_user}"
fi
done

View file

@ -1 +0,0 @@
oneshot

View file

@ -1 +0,0 @@
/etc/s6-overlay/s6-rc.d/init-crontab-config/run

View file

@ -1,22 +0,0 @@
#!/usr/bin/with-contenv bash
# shellcheck shell=bash
# Directories
SCRIPTS_DIR="/custom-cont-init.d"
# Make sure custom init directory exists and has files in it
if [[ -e "${SCRIPTS_DIR}" ]] && [[ -n "$(/bin/ls -A ${SCRIPTS_DIR} 2>/dev/null)" ]]; then
echo "[custom-init] Files found, executing"
for SCRIPT in "${SCRIPTS_DIR}"/*; do
NAME="$(basename "${SCRIPT}")"
if [[ -f "${SCRIPT}" ]]; then
echo "[custom-init] ${NAME}: executing..."
/bin/bash "${SCRIPT}"
echo "[custom-init] ${NAME}: exited $?"
elif [[ ! -f "${SCRIPT}" ]]; then
echo "[custom-init] ${NAME}: is not a file"
fi
done
else
echo "[custom-init] No custom files found, skipping..."
fi

View file

@ -1 +0,0 @@
oneshot

View file

@ -1 +0,0 @@
/etc/s6-overlay/s6-rc.d/init-custom-files/run

View file

@ -1,19 +0,0 @@
#!/usr/bin/with-contenv bash
# shellcheck shell=bash
if find /run/s6/container_environment/FILE__* -maxdepth 1 > /dev/null 2>&1; then
for FILENAME in /run/s6/container_environment/FILE__*; do
SECRETFILE=$(cat "${FILENAME}")
if [[ -f ${SECRETFILE} ]]; then
FILESTRIP=${FILENAME//FILE__/}
if [[ $(tail -n1 "${SECRETFILE}" | wc -l) != 0 ]]; then
echo "[env-init] Your secret: ${FILENAME##*/}"
echo " contains a trailing newline and may not work as expected"
fi
cat "${SECRETFILE}" >"${FILESTRIP}"
echo "[env-init] ${FILESTRIP##*/} set from ${FILENAME##*/}"
else
echo "[env-init] cannot find secret in ${FILENAME##*/}"
fi
done
fi

View file

@ -1 +0,0 @@
oneshot

View file

@ -1 +0,0 @@
/etc/s6-overlay/s6-rc.d/init-envfile/run

View file

@ -1,32 +0,0 @@
#!/usr/bin/with-contenv bash
# shellcheck shell=bash
MIGRATIONS_DIR="/migrations"
MIGRATIONS_HISTORY="/config/.migrations"
echo "[migrations] started"
if [[ ! -d ${MIGRATIONS_DIR} ]]; then
echo "[migrations] no migrations found"
exit
fi
for MIGRATION in $(find ${MIGRATIONS_DIR}/* | sort -n); do
NAME="$(basename "${MIGRATION}")"
if [[ -f ${MIGRATIONS_HISTORY} ]] && grep -Fxq "${NAME}" ${MIGRATIONS_HISTORY}; then
echo "[migrations] ${NAME}: skipped"
continue
fi
echo "[migrations] ${NAME}: executing..."
# Execute migration script in a subshell to prevent it from modifying the current environment
("${MIGRATION}")
EXIT_CODE=$?
if [[ ${EXIT_CODE} -ne 0 ]]; then
echo "[migrations] ${NAME}: failed with exit code ${EXIT_CODE}, contact support"
exit "${EXIT_CODE}"
fi
echo "${NAME}" >>${MIGRATIONS_HISTORY}
echo "[migrations] ${NAME}: succeeded"
done
echo "[migrations] done"

View file

@ -1 +0,0 @@
oneshot

View file

@ -1 +0,0 @@
/etc/s6-overlay/s6-rc.d/init-migrations/run

View file

@ -1 +0,0 @@
oneshot

View file

@ -1 +0,0 @@
# This file doesn't do anything, it's just the end of the mod init process

View file

@ -1 +0,0 @@
/etc/s6-overlay/s6-rc.d/init-mods-package-install/run

View file

@ -1 +0,0 @@
oneshot

View file

@ -1 +0,0 @@
# This file doesn't do anything, it's just the start of the mod init process

View file

@ -1 +0,0 @@
oneshot

View file

@ -1 +0,0 @@
# This file doesn't do anything, it's just the end of the mod init process

View file

@ -1 +0,0 @@
oneshot

View file

@ -1 +0,0 @@
# This file doesn't do anything, it just signals that services can start

View file

@ -1,15 +0,0 @@
#!/usr/bin/with-contenv bash
# shellcheck shell=bash
if builtin command -v crontab >/dev/null 2>&1 && [[ -n "$(crontab -l -u abc 2>/dev/null || true)" || -n "$(crontab -l -u root 2>/dev/null || true)" ]]; then
if builtin command -v busybox >/dev/null 2>&1 && [[ $(busybox || true) =~ [[:space:]](crond)([,]|$) ]]; then
exec busybox crond -f -S -l 5
elif [[ -f /usr/bin/apt ]] && [[ -f /usr/sbin/cron ]]; then
exec /usr/sbin/cron -f -L 5
else
echo "**** cron not found ****"
sleep infinity
fi
else
sleep infinity
fi

View file

@ -1 +0,0 @@
longrun

226
root/etc/s6/init/init-stage2 Executable file
View file

@ -0,0 +1,226 @@
#!/bin/execlineb -S0
# This file is executed (not as process 1!) as soon as s6-svscan
# starts, with the original stdin/out/err, but NOT the original
# environment.
# Purpose of this file: to perform all the one-time initialization tasks.
# Merge environments from our custom stage into current context
s6-envdir -I /var/run/s6/env-stage2
# This env decides what to do if stage2 fails
backtick -D 0 -n S6_BEHAVIOUR_IF_STAGE2_FAILS { printcontenv S6_BEHAVIOUR_IF_STAGE2_FAILS }
importas -u S6_BEHAVIOUR_IF_STAGE2_FAILS S6_BEHAVIOUR_IF_STAGE2_FAILS
# This env determines whether user provided files in /etc should be linked
# or copied into /var/run/s6
backtick -D 0 -n S6_READ_ONLY_ROOT { printcontenv S6_READ_ONLY_ROOT }
importas -u S6_READ_ONLY_ROOT S6_READ_ONLY_ROOT
# Docker Mods run logic
foreground
{
/docker-mods
}
foreground
{
if
{
/etc/s6/init/init-stage2-redirfd
foreground
{
##
## copy user provided files to /var/run/s6/etc, depending on S6_RUNTIME_PROFILE env,
## /etc (if not defined) or /etc/cont-profile.d/${S6_RUNTIME_PROFILE} will be used
## as copying source.
##
if
{
if { s6-echo -n -- "[s6-init] making user provided files available at /var/run/s6/etc..." }
foreground
{
backtick -n S6_RUNTIME_PROFILE { printcontenv S6_RUNTIME_PROFILE }
importas -u S6_RUNTIME_PROFILE S6_RUNTIME_PROFILE
backtick -n S6_RUNTIME_PROFILE_SRC {
ifte { s6-echo "/etc/cont-profile.d/${S6_RUNTIME_PROFILE}" } { s6-echo "/etc" }
s6-test -n ${S6_RUNTIME_PROFILE}
}
importas -u S6_RUNTIME_PROFILE_SRC S6_RUNTIME_PROFILE_SRC
if { s6-rmrf /var/run/s6/etc }
if { s6-mkdir -pm 0755 /var/run/s6/etc }
forx i { "fix-attrs.d" "cont-init.d" "cont-finish.d" "services.d" }
importas -u i i
if { s6-test -d ${S6_RUNTIME_PROFILE_SRC}/${i} }
# although s6-hiercopy is prefered, and until it doesn't support 'follow symlinks'
# option, there is no clean way to allow symlinks between user provided runcoms.
ifelse { s6-test ${S6_READ_ONLY_ROOT} -eq 0 } {
s6-ln -s ${S6_RUNTIME_PROFILE_SRC}/${i} /var/run/s6/etc/${i}
}
if { s6-hiercopy ${S6_RUNTIME_PROFILE_SRC}/${i} /var/run/s6/etc/${i} }
}
importas -u ? ?
if { s6-echo -- "exited ${?}." }
ifelse { s6-test ${S6_BEHAVIOUR_IF_STAGE2_FAILS} -eq 0 } { exit 0 }
exit ${?}
}
##
## fix-attrs: ensure user-provided files have correct ownership & perms
##
if
{
if { s6-echo -n -- "[s6-init] ensuring user provided files have correct perms..." }
foreground { redirfd -r 0 /etc/s6/init/init-stage2-fixattrs.txt fix-attrs }
importas -u ? ?
if { s6-echo -- "exited ${?}." }
ifelse { s6-test ${S6_BEHAVIOUR_IF_STAGE2_FAILS} -eq 0 } { exit 0 }
exit ${?}
}
##
## fix-attrs.d: apply user-provided ownership & permission fixes
##
if
{
if -t { s6-test -d /var/run/s6/etc/fix-attrs.d }
if { s6-echo "[fix-attrs.d] applying ownership & permissions fixes..." }
if
{
pipeline { s6-ls -0 -- /var/run/s6/etc/fix-attrs.d }
pipeline { s6-sort -0 -- }
forstdin -0 -- i
importas -u i i
if { s6-echo -- "[fix-attrs.d] ${i}: applying... " }
foreground { redirfd -r 0 /var/run/s6/etc/fix-attrs.d/${i} fix-attrs }
importas -u ? ?
if { s6-echo -- "[fix-attrs.d] ${i}: exited ${?}." }
ifelse { s6-test ${S6_BEHAVIOUR_IF_STAGE2_FAILS} -eq 0 } { exit 0 }
exit ${?}
}
if { s6-echo -- "[fix-attrs.d] done." }
}
##
## cont-init.d: one-time init scripts
##
if
{
if -t { s6-test -d /var/run/s6/etc/cont-init.d }
if { s6-echo "[cont-init.d] executing container initialization scripts..." }
if
{
pipeline { s6-ls -0 -- /var/run/s6/etc/cont-init.d }
pipeline { s6-sort -0 -- }
forstdin -o 0 -0 -- i
importas -u i i
if { s6-echo -- "[cont-init.d] ${i}: executing... " }
foreground { /var/run/s6/etc/cont-init.d/${i} }
importas -u ? ?
if { s6-echo -- "[cont-init.d] ${i}: exited ${?}." }
ifelse { s6-test ${S6_BEHAVIOUR_IF_STAGE2_FAILS} -eq 0 } { exit 0 }
exit ${?}
}
if { s6-echo -- "[cont-init.d] done." }
}
##
## services.d: long-lived processes to be supervised
##
if
{
if -t { s6-test -d /var/run/s6/etc/services.d }
if { s6-echo "[services.d] starting services" }
if
{
pipeline { s6-ls -0 -- /var/run/s6/etc/services.d }
forstdin -0 -p -- i
importas -u i i
if { s6-test -d /var/run/s6/etc/services.d/${i} }
s6-hiercopy /var/run/s6/etc/services.d/${i} /var/run/s6/services/${i}
}
if { s6-svscanctl -a /var/run/s6/services }
if
{
# This envs decide if CMD should wait until services are up
backtick -D 0 -n S6_CMD_WAIT_FOR_SERVICES { printcontenv S6_CMD_WAIT_FOR_SERVICES }
importas -u S6_CMD_WAIT_FOR_SERVICES S6_CMD_WAIT_FOR_SERVICES
backtick -D 5000 -n S6_CMD_WAIT_FOR_SERVICES_MAXTIME { printcontenv S6_CMD_WAIT_FOR_SERVICES_MAXTIME }
importas -u S6_CMD_WAIT_FOR_SERVICES_MAXTIME S6_CMD_WAIT_FOR_SERVICES_MAXTIME
if -t { if { s6-test ${S6_CMD_WAIT_FOR_SERVICES} -ne 0 } s6-test $# -ne 0 }
s6-maximumtime -t ${S6_CMD_WAIT_FOR_SERVICES_MAXTIME}
pipeline { s6-ls -0 -- /var/run/s6/etc/services.d }
forstdin -0 -o 0 -- i
importas -u i i
ifelse { s6-test -f /var/run/s6/services/${i}/down } { exit 0 }
ifelse { s6-test -f /var/run/s6/services/${i}/notification-fd }
{
s6-svwait -t ${S6_CMD_WAIT_FOR_SERVICES_MAXTIME} -U /var/run/s6/services/${i}
}
s6-svwait -t ${S6_CMD_WAIT_FOR_SERVICES_MAXTIME} -u /var/run/s6/services/${i}
}
if { s6-echo -- "[services.d] done." }
}
}
importas -u ? ?
ifelse { s6-test ${S6_BEHAVIOUR_IF_STAGE2_FAILS} -eq 0 } { exit 0 }
# Make stage2 exit code available in stage3
foreground { redirfd -w 1 /var/run/s6/env-stage3/S6_STAGE2_EXITED s6-echo -n -- "${?}" }
exit ${?}
}
##
## The init is complete, If the user has a given CMD, run it now, then
## kill everything when it exits.
##
if -t { s6-test $# -ne 0 }
foreground {
s6-setsid -gq -- with-contenv
backtick -D 0 -n S6_LOGGING { printcontenv S6_LOGGING }
importas S6_LOGGING S6_LOGGING
ifelse { s6-test ${S6_LOGGING} -eq 2 }
{
redirfd -w 1 /var/run/s6/uncaught-logs-fifo
fdmove -c 2 1
$@
}
$@
}
importas -u ? ?
foreground {
/etc/s6/init/init-stage2-redirfd
s6-echo -- "[cmd] ${1} exited ${?}"
}
# Make CMD exit code available in stage3
foreground { redirfd -w 1 /var/run/s6/env-stage3/S6_STAGE2_EXITED s6-echo -n -- "${?}" }
# Stop supervision tree
foreground { s6-svscanctl -t /var/run/s6/services }
# Wait to be nuked
s6-pause -th
}
importas -u ? ?
if { s6-test ${?} -ne 0 }
if { s6-test ${S6_BEHAVIOUR_IF_STAGE2_FAILS} -ne 0 }
ifelse { s6-test ${S6_BEHAVIOUR_IF_STAGE2_FAILS} -ne 1 }
{
s6-svscanctl -t /var/run/s6/services
}
s6-echo -- "\n!!!!!\n init-stage2 failed.\n!!!!!"