diff --git a/CHANGELOG.md b/CHANGELOG.md index f0b045c0004..cf9955c5bf4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +# [2022-12-09] (Chart Release ) + +## Bug fixes and other updates + +* Prevention of storing unnecessary data in the database if adding a bot to a conversation fails. (#2870) + +## Internal changes + +* bump nginx-module-vts from v0.1.15 to v0.2.1 (#2827) +* Build nginz and nginz_disco docker images using nix (#2796) + # [2022-11-03] (Chart Release 4.26.0) ## Release notes diff --git a/changelog.d/5-internal/bump-nginx-module-vts b/changelog.d/5-internal/bump-nginx-module-vts deleted file mode 100644 index a2e4ab6582d..00000000000 --- a/changelog.d/5-internal/bump-nginx-module-vts +++ /dev/null @@ -1 +0,0 @@ -bump nginx-module-vts from v0.1.15 to v0.2.1 (#2827) diff --git a/charts/nginz/templates/conf/_nginx.conf.tpl b/charts/nginz/templates/conf/_nginx.conf.tpl index 7b28c77493c..29f8e28e8fd 100644 --- a/charts/nginz/templates/conf/_nginx.conf.tpl +++ b/charts/nginz/templates/conf/_nginx.conf.tpl @@ -37,6 +37,8 @@ http { types_hash_max_size 2048; map_hash_bucket_size 128; + variables_hash_bucket_size 256; + server_names_hash_bucket_size 64; server_name_in_redirect off; diff --git a/hack/bin/upload-image.sh b/hack/bin/upload-image.sh index 9540158d342..e49eaca08b1 100755 --- a/hack/bin/upload-image.sh +++ b/hack/bin/upload-image.sh @@ -17,8 +17,8 @@ readonly DOCKER_TAG=${DOCKER_TAG:?"Please set the DOCKER_TAG env variable"} readonly usage="USAGE: $0 " readonly IMAGE_ATTR=${1:?$usage} -SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -ROOT_DIR=$(cd -- "$SCRIPT_DIR/../../" &> /dev/null && pwd) +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) +ROOT_DIR=$(cd -- "$SCRIPT_DIR/../../" &>/dev/null && pwd) readonly SCRIPT_DIR ROOT_DIR credsArgs="" @@ -27,6 +27,39 @@ if [[ "${DOCKER_USER+x}" != "" ]]; then credsArgs="--dest-creds=$DOCKER_USER:$DOCKER_PASSWORD" fi +# Retry a command with exponential backoff +# quay.io sometimes rate-limits us, so try again. +# Also, skopeo's retry logic doesn't properly work, look here if you want to see very badly written go code: +# https://github.com/containers/skopeo/blob/869d496f185cc086f22d6bbb79bb57ac3a415617/vendor/github.com/containers/common/pkg/retry/retry.go#L52-L113 +function retry { + local maxAttempts=$1 + local secondsDelay=1 + local attemptCount=1 + local output= + shift 1 + + while [ $attemptCount -le "$maxAttempts" ]; do + output=$("$@") + local status=$? + + if [ $status -eq 0 ]; then + break + fi + + if [ $attemptCount -lt "$maxAttempts" ]; then + echo "Command [$*] failed after attempt $attemptCount of $maxAttempts. Retrying in $secondsDelay second(s)." >&2 + sleep $secondsDelay + elif [ $attemptCount -eq "$maxAttempts" ]; then + echo "Command [$*] failed after $attemptCount attempt(s)" >&2 + return $status + fi + attemptCount=$((attemptCount + 1)) + secondsDelay=$((secondsDelay * 2)) + done + + echo "$output" +} + tmp_link_store=$(mktemp -d) # Using dockerTools.streamLayeredImage outputs an executable which prints the # image tar on stdout when executed. This is done so we don't store large images @@ -38,8 +71,8 @@ tmp_link_store=$(mktemp -d) image_stream_file="$tmp_link_store/image_stream" nix -v --show-trace -L build -f "$ROOT_DIR/nix" "$IMAGE_ATTR" -o "$image_stream_file" image_file="$tmp_link_store/image" -"$image_stream_file" > "$image_file" +"$image_stream_file" >"$image_file" repo=$(skopeo list-tags "docker-archive://$image_file" | jq -r '.Tags[0] | split(":") | .[0]') -printf "*** Uploading $image_file to %s:%s" "$repo" "$DOCKER_TAG" +printf "*** Uploading $image_file to %s:%s\n" "$repo" "$DOCKER_TAG" # shellcheck disable=SC2086 -skopeo --insecure-policy copy --retry-times 5 $credsArgs "docker-archive://$image_file" "docker://$repo:$DOCKER_TAG" +retry 5 skopeo --insecure-policy copy --retry-times 5 $credsArgs "docker-archive://$image_file" "docker://$repo:$DOCKER_TAG" diff --git a/hack/bin/upload-images.sh b/hack/bin/upload-images.sh index 205a5dfab09..79c0798f2ce 100755 --- a/hack/bin/upload-images.sh +++ b/hack/bin/upload-images.sh @@ -17,8 +17,8 @@ readonly usage="USAGE: $0 " # nix attribute under wireServer from "$ROOT_DIR/nix" containing all the images readonly IMAGES_ATTR=${1:?$usage} -SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -ROOT_DIR=$(cd -- "$SCRIPT_DIR/../../" &> /dev/null && pwd) +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) +ROOT_DIR=$(cd -- "$SCRIPT_DIR/../../" &>/dev/null && pwd) readonly SCRIPT_DIR ROOT_DIR tmp_link_store=$(mktemp -d) @@ -28,8 +28,12 @@ nix -v --show-trace -L build -f "$ROOT_DIR/nix" wireServer.imagesList -o "$image # Build everything first so we can benefit the most from having many cores. nix -v --show-trace -L build -f "$ROOT_DIR/nix" "wireServer.$IMAGES_ATTR" --no-link -while IFS="" read -r image_name || [ -n "$image_name" ] -do +while IFS="" read -r image_name || [ -n "$image_name" ]; do printf '*** Uploading image %s\n' "$image_name" "$SCRIPT_DIR/upload-image.sh" "wireServer.$IMAGES_ATTR.$image_name" -done < "$image_list_file" +done <"$image_list_file" + +for image_name in nginz nginz-disco; do + printf '*** Uploading image %s\n' "$image_name" + "$SCRIPT_DIR/upload-image.sh" "$image_name" +done diff --git a/nix/default.nix b/nix/default.nix index 11ed4ac483d..34f37250e00 100644 --- a/nix/default.nix +++ b/nix/default.nix @@ -11,16 +11,18 @@ let }; profileEnv = pkgs.writeTextFile { - name = "profile-env"; - destination = "/.profile"; - # This gets sourced by direnv. Set NIX_PATH, so `nix-shell` uses the same nixpkgs as here. - text = '' - export NIX_PATH=nixpkgs=${toString pkgs.path} - export LOCALE_ARCHIVE=${pkgs.glibcLocales}/lib/locale/locale-archive - ''; - }; + name = "profile-env"; + destination = "/.profile"; + # This gets sourced by direnv. Set NIX_PATH, so `nix-shell` uses the same nixpkgs as here. + text = '' + export NIX_PATH=nixpkgs=${toString pkgs.path} + export LOCALE_ARCHIVE=${pkgs.glibcLocales}/lib/locale/locale-archive + ''; + }; wireServer = import ./wire-server.nix pkgs; + nginz = pkgs.callPackage ./nginz.nix { }; + nginz-disco = pkgs.callPackage ./nginz-disco.nix { }; # packages necessary to build wire-server docs docsPkgs = [ @@ -65,4 +67,5 @@ let }; mls-test-cli = pkgs.mls-test-cli; rusty-jwt-tools = pkgs.rusty-jwt-tools; -in {inherit pkgs profileEnv wireServer docs docsEnv mls-test-cli;} +in +{ inherit pkgs profileEnv wireServer docs docsEnv mls-test-cli nginz nginz-disco; } diff --git a/nix/nginz-disco.nix b/nix/nginz-disco.nix new file mode 100644 index 00000000000..be3dabac474 --- /dev/null +++ b/nix/nginz-disco.nix @@ -0,0 +1,42 @@ +{ stdenv +, dockerTools +, gnugrep +, coreutils +, which +, dumb-init +, bashInteractive +, lib +, makeWrapper +, writers +, dig +, gawk +, diffutils +}: +let + nginz-disco = stdenv.mkDerivation { + name = "nginz-disco"; + src = (writers.writeBash "nginz_disco.sh" ../tools/nginz_disco/nginz_disco.sh); + phases = "installPhase"; + nativeBuildInputs = [ makeWrapper ]; + installPhase = '' + mkdir -p $out/bin + cp $src $out/bin/nginz_disco.sh + wrapProgram $out/bin/nginz_disco.sh \ + --prefix PATH : "${lib.makeBinPath [ gnugrep gawk dig diffutils ]}" + ''; + }; + + nginz-disco-image = dockerTools.streamLayeredImage { + name = "quay.io/wire/nginz_disco"; + maxLayers = 10; + contents = [ + bashInteractive + coreutils + which + ]; + config = { + Entrypoint = [ "${dumb-init}/bin/dumb-init" "--" "${nginz-disco}/bin/nginz_disco.sh" ]; + }; + }; +in +nginz-disco-image diff --git a/nix/nginz.nix b/nix/nginz.nix new file mode 100644 index 00000000000..67636b9180e --- /dev/null +++ b/nix/nginz.nix @@ -0,0 +1,80 @@ +{ stdenv +, symlinkJoin +, dockerTools +, writeTextDir +, runCommand +, gnugrep +, coreutils +, which +, inotify-tools +, dumb-init +, cacert +, bashInteractive +, lib +, makeWrapper +, writers +, nginz +}: +let + + nginzWithReloader = stdenv.mkDerivation { + name = "reload-script"; + src = (writers.writeBash "nginz_reload.sh" ../services/nginz/nginz_reload.sh); + phases = "installPhase"; + nativeBuildInputs = [ makeWrapper ]; + installPhase = '' + mkdir -p $out/bin + cp $src $out/bin/nginz_reload.sh + wrapProgram $out/bin/nginz_reload.sh \ + --prefix PATH : "${lib.makeBinPath [ inotify-tools nginz ]}" + ''; + }; + + # copied from nixpkgs fakeNss, but using nginx as username + nginxFakeNss = symlinkJoin { + name = "fake-nss"; + paths = [ + (writeTextDir "etc/passwd" '' + root:x:0:0:root user:/var/empty:/bin/sh + nginx:x:101:101:nginx:/var/empty:/bin/sh + nobody:x:65534:65534:nobody:/var/empty:/bin/sh + '') + (writeTextDir "etc/group" '' + root:x:0: + nginx:x:101: + nobody:x:65534: + '') + (writeTextDir "etc/nsswitch.conf" '' + hosts: files dns + '') + (runCommand "var-empty" { } '' + mkdir -p $out/var/empty + '') + # it seems nginx still tries to log, and doesn't create + # these directories automatically + (runCommand "nginx-misc" { } '' + mkdir -p $out/var/log/nginx + mkdir -p $out/var/cache/nginx + '') + ]; + }; + + nginzImage = dockerTools.streamLayeredImage { + name = "quay.io/wire/nginz"; + maxLayers = 10; + contents = [ + cacert + bashInteractive + gnugrep + which + coreutils + nginxFakeNss + nginz # so preStop lifecycle hook in cannon can nginx -c … quit + ]; + config = { + Entrypoint = [ "${dumb-init}/bin/dumb-init" "--" "${nginzWithReloader}/bin/nginz_reload.sh" "-g" "daemon off;" "-c" "/etc/wire/nginz/conf/nginx.conf" ]; + Env = [ "SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt" ]; + }; + }; +in +nginzImage diff --git a/nix/overlay.nix b/nix/overlay.nix index 917301f96d3..d63b1214324 100644 --- a/nix/overlay.nix +++ b/nix/overlay.nix @@ -30,10 +30,11 @@ let src = if stdenv.isDarwin then - fetchurl { - url = darwinAmd64Url; - sha256 = darwinAmd64Sha256; - } + fetchurl + { + url = darwinAmd64Url; + sha256 = darwinAmd64Sha256; + } else fetchurl { url = linuxAmd64Url; diff --git a/services/brig/src/Brig/Provider/API.hs b/services/brig/src/Brig/Provider/API.hs index 26cbdc0bae1..84ea17a47a3 100644 --- a/services/brig/src/Brig/Provider/API.hs +++ b/services/brig/src/Brig/Provider/API.hs @@ -898,6 +898,14 @@ addBot zuid zcon cid add = do let sid = addBotService add -- Get the conversation and check preconditions cnv <- lift (liftSem $ GalleyProvider.getConv zuid cid) >>= maybeConvNotFound + -- Check that the user is a conversation admin and therefore is allowed to add a bot to this conversation. + -- Note that this precondition is also checked in the internal galley API, + -- but by having this check here we prevent any (useless) data to be written to the database + -- as well as the unnecessary creation of the bot via the external service API call. + -- However, in case we refine the roles model in the future, this check might not be granular enough. + -- In that case we should rather do an internal call to galley to check for the correct permissions. + -- Also see `removeBot` for a similar check. + guardConvAdmin cnv let mems = cnvMembers cnv unless (cnvType cnv == RegularConv) $ throwStd invalidConv @@ -974,6 +982,12 @@ removeBot :: Members '[GalleyProvider] r => UserId -> ConnId -> ConvId -> BotId removeBot zusr zcon cid bid = do -- Get the conversation and check preconditions cnv <- lift (liftSem $ GalleyProvider.getConv zusr cid) >>= maybeConvNotFound + -- Check that the user is a conversation admin and therefore is allowed to remove a bot from the conversation. + -- Note that this precondition is also checked in the internal galley API. + -- However, in case we refine the roles model in the future, this check might not be granular enough. + -- In that case we should rather do an internal call to galley to check for the correct permissions. + -- Also see `addBot` for a similar check. + guardConvAdmin cnv let mems = cnvMembers cnv unless (cnvType cnv == RegularConv) $ throwStd invalidConv @@ -985,6 +999,11 @@ removeBot zusr zcon cid bid = do Just _ -> do lift $ Public.RemoveBotResponse <$$> wrapHttpClient (deleteBot zusr (Just zcon) bid cid) +guardConvAdmin :: Conversation -> ExceptT Error (AppT r) () +guardConvAdmin conv = do + let selfMember = cmSelf . cnvMembers $ conv + unless (memConvRoleName selfMember == roleNameWireAdmin) $ throwStd accessDenied + -------------------------------------------------------------------------------- -- Bot API diff --git a/services/brig/test/integration/API/Provider.hs b/services/brig/test/integration/API/Provider.hs index 1449ce29c53..ebadad1e62c 100644 --- a/services/brig/test/integration/API/Provider.hs +++ b/services/brig/test/integration/API/Provider.hs @@ -148,6 +148,7 @@ tests dom conf p db b c g = do testGroup "bot-teams" [ test p "add-remove" $ testAddRemoveBotTeam conf db b g c, + test p "add-remove-access-denied-for-non-conv-admin" $ testNonConvAdminCannotAddRemoveBot conf db b g, test p "team-only" $ testBotTeamOnlyConv conf db b g c, test p "message" $ testMessageBotTeam conf db b g c, test p "delete conv" $ testDeleteConvBotTeam conf db b g c, @@ -566,6 +567,30 @@ testAddBotBlocked config db brig galley = withTestService config db brig defServ const 403 === statusCode const (Just "access-denied") === fmap Error.label . responseJsonMaybe +testNonConvAdminCannotAddRemoveBot :: Config -> DB.ClientState -> Brig -> Galley -> Http () +testNonConvAdminCannotAddRemoveBot config db brig galley = withTestService config db brig defServiceApp $ \sref _buf -> do + let pid = sref ^. serviceRefProvider + let sid = sref ^. serviceRefId + (ownerId, tid) <- Team.createUserWithTeam brig + member <- Team.createTeamMember brig galley ownerId tid fullPermissions + let memberId = userId member + whitelistService brig ownerId tid pid sid + cid <- Team.createTeamConvWithRole roleNameWireMember galley tid ownerId [memberId] Nothing + addBot brig memberId pid sid cid !!! do + const 403 === statusCode + const (Just "access-denied") === fmap Error.label . responseJsonMaybe + rs <- responseJsonError =<< addBot brig ownerId pid sid cid DB.ClientState -> Brig -> Galley -> Cannon -> Http () testGetBotConvBlocked config db brig galley cannon = withTestService config db brig defServiceApp $ \sref buf -> do (user1, userId -> u2, _, tid, cid, pid, sid) <- prepareBotUsersTeam brig galley sref @@ -1305,6 +1330,31 @@ removeBot brig uid cid bid = . header "Z-User" (toByteString' uid) . header "Z-Connection" "conn" +data RemoveBot = RemoveBot + { _rmBotConv :: !ConvId, + _rmBotId :: !BotId + } + +instance ToJSON RemoveBot where + toJSON a = + object + [ "conversation" .= _rmBotConv a, + "bot" .= _rmBotId a + ] + +removeBotInternal :: + Galley -> + UserId -> + ConvId -> + BotId -> + Http ResponseLBS +removeBotInternal galley uid cid bid = + delete $ + galley + . paths ["i", "bots"] + . header "Z-User" (toByteString' uid) + . Bilge.json (RemoveBot cid bid) + createConv :: Galley -> UserId -> diff --git a/services/brig/test/integration/API/Team/Util.hs b/services/brig/test/integration/API/Team/Util.hs index 6fa61788ab3..8bf86c2aa6b 100644 --- a/services/brig/test/integration/API/Team/Util.hs +++ b/services/brig/test/integration/API/Team/Util.hs @@ -214,10 +214,24 @@ updatePermissions from tid (to, perm) galley = changeMember = Member.mkNewTeamMember to perm Nothing createTeamConv :: HasCallStack => Galley -> TeamId -> UserId -> [UserId] -> Maybe Milliseconds -> Http ConvId -createTeamConv g tid u us mtimer = do +createTeamConv = createTeamConvWithRole roleNameWireAdmin + +createTeamConvWithRole :: HasCallStack => RoleName -> Galley -> TeamId -> UserId -> [UserId] -> Maybe Milliseconds -> Http ConvId +createTeamConvWithRole role g tid u us mtimer = do let tinfo = Just $ ConvTeamInfo tid let conv = - NewConv us [] Nothing (Set.fromList []) Nothing tinfo mtimer Nothing roleNameWireAdmin ProtocolProteusTag Nothing + NewConv + us + [] + Nothing + (Set.fromList []) + Nothing + tinfo + mtimer + Nothing + role + ProtocolProteusTag + Nothing r <- post ( g diff --git a/services/galley/src/Galley/API/Update.hs b/services/galley/src/Galley/API/Update.hs index 0ee2281513a..5349bb128f4 100644 --- a/services/galley/src/Galley/API/Update.hs +++ b/services/galley/src/Galley/API/Update.hs @@ -1571,6 +1571,8 @@ addBot lusr zcon b = do unless (tUnqualified lusr `isMember` users) $ throwS @'ConvNotFound ensureGroupConversation c self <- getSelfMemberFromLocals (tUnqualified lusr) users + -- Note that in brig from where this internal handler is called, we additionally check for conversation admin role. + -- Remember to change this if we ever want to allow non admins to add bots. ensureActionAllowed SAddConversationMember self unless (any ((== b ^. addBotId) . botMemId) bots) $ do let botId = qualifyAs lusr (botUserId (b ^. addBotId)) @@ -1587,7 +1589,8 @@ rmBotH :: Input (Local ()), Input UTCTime, MemberStore, - WaiRoutes + WaiRoutes, + ErrorS ('ActionDenied 'RemoveConversationMember) ] r => UserId ::: Maybe ConnId ::: JsonRequest RemoveBot -> @@ -1605,7 +1608,8 @@ rmBot :: ExternalAccess, GundeckAccess, Input UTCTime, - MemberStore + MemberStore, + ErrorS ('ActionDenied 'RemoveConversationMember) ] r => Local UserId -> @@ -1615,10 +1619,17 @@ rmBot :: rmBot lusr zcon b = do c <- E.getConversation (b ^. rmBotConv) >>= noteS @'ConvNotFound - let lcnv = qualifyAs lusr (Data.convId c) + let (bots, users) = localBotsAndUsers (Data.convLocalMembers c) unless (tUnqualified lusr `isMember` Data.convLocalMembers c) $ throwS @'ConvNotFound - let (bots, users) = localBotsAndUsers (Data.convLocalMembers c) + -- A bot can remove itself (which will internally be triggered when a service is deleted), + -- otherwise we have to check for the correct permissions + unless (botUserId (b ^. rmBotId) == tUnqualified lusr) $ do + -- Note that in brig from where this internal handler is called, we additionally check for conversation admin role. + -- Remember to change this if we ever want to allow non admins to remove bots. + self <- getSelfMemberFromLocals (tUnqualified lusr) users + ensureActionAllowed SRemoveConversationMember self + let lcnv = qualifyAs lusr (Data.convId c) if not (any ((== b ^. rmBotId) . botMemId) bots) then pure Unchanged else do diff --git a/services/nginz/Dockerfile b/services/nginz/Dockerfile deleted file mode 100644 index e608fe96246..00000000000 --- a/services/nginz/Dockerfile +++ /dev/null @@ -1,154 +0,0 @@ -# Requires docker >= 17.05 (requires support for multi-stage builds) -FROM alpine:3.15 as libzauth-builder - -# Compile libzauth -COPY libs/libzauth /src/libzauth -RUN cd /src/libzauth/libzauth-c \ - && apk add --no-cache make bash cargo libsodium-dev \ - && make install - -# Nginz container -FROM alpine:3.15 - -# Install libzauth -COPY --from=libzauth-builder /usr/local/include/zauth.h /usr/local/include/zauth.h -COPY --from=libzauth-builder /usr/local/lib/libzauth.so /usr/local/lib/libzauth.so -COPY --from=libzauth-builder /usr/local/lib/pkgconfig/libzauth.pc /usr/local/lib/pkgconfig/libzauth.pc - -COPY services/nginz/third_party /src/third_party - -ENV CONFIG --prefix=/etc/nginx \ - --sbin-path=/usr/sbin/nginx \ - --modules-path=/usr/lib/nginx/modules \ - --conf-path=/etc/nginx/nginx.conf \ - --error-log-path=/var/log/nginx/error.log \ - --http-log-path=/var/log/nginx/access.log \ - --pid-path=/var/run/nginx.pid \ - --lock-path=/var/run/nginx.lock \ - --http-client-body-temp-path=/var/cache/nginx/client_temp \ - --http-proxy-temp-path=/var/cache/nginx/proxy_temp \ - --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \ - --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \ - --http-scgi-temp-path=/var/cache/nginx/scgi_temp \ - --user=nginx \ - --group=nginx \ - --with-http_ssl_module \ - --with-http_v2_module \ - --with-http_stub_status_module \ - --with-http_realip_module \ - --with-http_gunzip_module \ - --add-module=/src/third_party/nginx-zauth-module \ - --add-module=/src/third_party/headers-more-nginx-module \ - --add-module=/src/third_party/nginx-module-vts - -################# similar block as upstream ######################################## -# see https://github.com/nginxinc/docker-nginx/blob/master/stable/alpine/Dockerfile -# This uses dockerfile logic from before 1.16 -#################################################################################### - -ENV NGINX_VERSION 1.22.1 - -RUN apk update - -RUN apk add -vv --virtual .build-deps \ - libsodium-dev \ - llvm-libunwind-dev \ - gcc \ - libc-dev \ - make \ - openssl-dev \ - pcre-dev \ - zlib-dev \ - linux-headers \ - curl \ - gnupg1 \ - libxslt-dev \ - gd-dev \ - geoip-dev - -# This line checks whether the 'apk add' succeeded, sometimes it doesn't work. -RUN curl -h - -RUN set -x \ - && addgroup -g 101 -S nginx \ - && adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx \ - && export GPG_KEYS=13C82A63B603576156E30A4EA0EA981B66B0D967 \ - && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz -o nginx.tar.gz \ - && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz.asc -o nginx.tar.gz.asc \ - && found=''; \ - for server in \ - ha.pool.sks-keyservers.net \ - hkp://keyserver.ubuntu.com:80 \ - hkp://p80.pool.sks-keyservers.net:80 \ - pgp.mit.edu \ - ; do \ - echo "Fetching GPG key $GPG_KEYS from $server"; \ - gpg --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$GPG_KEYS" && found=yes && break; \ - done; \ - test -z "$found" && echo >&2 "error: failed to fetch GPG key $GPG_KEYS" && exit 1; \ - gpg --batch --verify nginx.tar.gz.asc nginx.tar.gz \ - && rm -rf "$GNUPGHOME" nginx.tar.gz.asc \ - && mkdir -p /usr/src \ - && tar -zxC /usr/src -f nginx.tar.gz \ - && rm nginx.tar.gz \ - && cd /usr/src/nginx-$NGINX_VERSION \ - && ./configure $CONFIG --with-debug \ - && make -j$(getconf _NPROCESSORS_ONLN) \ - && mv objs/nginx objs/nginx-debug \ - && ./configure $CONFIG \ - && make -j$(getconf _NPROCESSORS_ONLN) \ - && make install \ - && rm -rf /etc/nginx/html/ \ - && mkdir /etc/nginx/conf.d/ \ - && mkdir -p /usr/share/nginx/html/ \ - && install -m644 html/index.html /usr/share/nginx/html/ \ - && install -m644 html/50x.html /usr/share/nginx/html/ \ - && install -m755 objs/nginx-debug /usr/sbin/nginx-debug \ - && ln -s ../../usr/lib/nginx/modules /etc/nginx/modules \ - && strip /usr/sbin/nginx* \ - && rm -rf /usr/src/nginx-$NGINX_VERSION \ - \ - # Bring in gettext so we can get `envsubst`, then throw - # the rest away. To do this, we need to install `gettext` - # then move `envsubst` out of the way so `gettext` can - # be deleted completely, then move `envsubst` back. - && apk add --no-cache --virtual .gettext gettext \ - && mv /usr/bin/envsubst /tmp/ \ - \ - && runDepsTmp="$( \ - scanelf --needed --nobanner --format '%n#p' /usr/sbin/nginx /usr/lib/nginx/modules/*.so /tmp/envsubst \ - | tr ',' '\n' \ - | sort -u \ - | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \ - )" \ - \ - # exclude libzauth from runDeps - && runDeps=${runDepsTmp/so:libzauth.so/''} \ - && apk add --no-cache --virtual .nginx-rundeps $runDeps \ - && apk del .build-deps \ - && apk del .gettext \ - && mv /tmp/envsubst /usr/local/bin/ \ - \ - # Bring in tzdata so users could set the timezones through the environment - # variables - && apk add --no-cache tzdata \ - \ - # forward request and error logs to docker log collector - && ln -sf /dev/stdout /var/log/nginx/access.log \ - && ln -sf /dev/stderr /var/log/nginx/error.log - -################# wire/nginz specific ###################### - -# Fix file permissions -RUN mkdir -p /var/cache/nginx/client_temp && chown -R nginx:nginx /var/cache/nginx - -RUN apk add --no-cache inotify-tools dumb-init bash curl && \ - # add libzauth runtime dependencies back in - apk add --no-cache libsodium llvm-libunwind libgcc && \ - # add openssl runtime dependencies for TLS/SSL certificate support - apk add --no-cache openssl - -COPY services/nginz/nginz_reload.sh /usr/bin/nginz_reload.sh - -ENTRYPOINT ["/usr/bin/dumb-init", "--"] -CMD ["/usr/bin/nginz_reload.sh", "-g", "daemon off;", "-c", "/etc/wire/nginz/conf/nginx.conf"] diff --git a/services/nginz/Makefile b/services/nginz/Makefile deleted file mode 100644 index 1803bdb8a66..00000000000 --- a/services/nginz/Makefile +++ /dev/null @@ -1,133 +0,0 @@ -LANG := en_US.UTF-8 -SHELL := /usr/bin/env bash -NAME := nginz -NGINX_VERSION = 1.22.1 -NGINZ_VERSION ?= -SWAGGER_VERSION:= 2.2.10 -SHELL := /usr/bin/env bash -DIST := build -BIN := src/objs/nginx -ifeq ($(DEBUG), 1) -WITH_DEBUG = --with-debug -endif -DOCKER_REGISTRY ?= quay.io -DOCKER_USER ?= quay.io/wire -DOCKER_TAG ?= local - -DEST_PATH ?= /opt/nginz -# Use a folder that can be written to since errors during startup do not respect -# your config and will use the `LOG_PATH` defined here -LOG_PATH ?= /var/log/nginz -CONF_PATH ?= /etc/nginz -PID_PATH ?= /var/run - -# You may need to use this if you have some dependencies in non-standard -# locations. For macOS, we use Brew default directories for OpenSSL (if they -# exist). These variables can be always overridden when running the -# Makefile, though. -ifeq ($(wildcard /usr/local/opt/openssl/.),) - EXTRA_CC_INC ?= - EXTRA_CC_LIB ?= -else - EXTRA_CC_INC ?= -I/usr/local/opt/openssl/include - EXTRA_CC_LIB ?= -L/usr/local/opt/openssl/lib -endif - -# Where should we look for packages, locally or globally? -EXTRA_PKG_PATH := $(shell [ -w /usr/local ] && echo /usr/local || echo "$(HOME)/.wire-dev")/lib/pkgconfig - -CONFIG_OPTIONS = \ - --prefix=$(DEST_PATH) \ - $(WITH_DEBUG) \ - --with-cc-opt="-std=gnu99 $(EXTRA_CC_INC)" \ - --with-ld-opt="$(EXTRA_CC_LIB)" \ - --error-log-path=$(LOG_PATH)/error.log \ - --http-log-path=$(LOG_PATH)/access.log \ - --conf-path=$(CONF_PATH)/nginx.conf \ - --pid-path=$(PID_PATH) - -ADDITIONAL_MODULES = \ - --with-http_ssl_module \ - --with-http_v2_module \ - --with-http_stub_status_module \ - --with-http_realip_module \ - --with-http_gunzip_module \ - --add-module=../third_party/nginx-zauth-module \ - --add-module=../third_party/headers-more-nginx-module \ - --add-module=../third_party/nginx-module-vts - -guard-%: - @ if [ "${${*}}" = "" ]; then \ - echo "Environment variable $* not set"; \ - exit 1; \ - fi - -default: compile - -.PHONY: clean -clean: - -rm -rf src $(DIST) .metadata zwagger-ui/swagger-ui - -.PHONY: compile -compile: $(BIN) - mkdir -p ../../dist - cp src/objs/nginx ../../dist/ - -$(BIN): src zwagger-ui/swagger-ui integration-test/conf/nginz/zwagger-ui - PKG_CONFIG_PATH=$(EXTRA_PKG_PATH) pkg-config --exists libzauth || { echo -e "\n\033[0;31m The 'libzauth' library was not found\033[0m\n pkg-config path = $(EXTRA_PKG_PATH)\n\n Attempting to install it...\n" && $(MAKE) libzauth; } - git submodule update --init - (cd src; PKG_CONFIG_PATH=$(EXTRA_PKG_PATH) ./configure $(CONFIG_OPTIONS) $(ADDITIONAL_MODULES)) - make -C src - -$(DIST): - mkdir -p $(DIST) - -# -# Dependencies -# - -NGINX_BUNDLE=nginx-$(NGINX_VERSION).tar.gz -SWAGGER_BUNDLE=swagger-$(SWAGGER_VERSION).tar.gz - -.PHONY: integration-test/conf/nginz/zwagger-ui -integration-test/conf/nginz/zwagger-ui: zwagger-ui/swagger-ui - cp -r "zwagger-ui/." integration-test/conf/nginz/zwagger-ui/ - -.PHONY: zwagger-ui/swagger-ui -zwagger-ui/swagger-ui: $(SWAGGER_BUNDLE) - tar zxf $(SWAGGER_BUNDLE) - rm -rf zwagger-ui/swagger-ui - mv -v swagger-ui-$(SWAGGER_VERSION)/dist zwagger-ui/swagger-ui - touch zwagger-ui/swagger-ui - rm -rf swagger-ui-$(SWAGGER_VERSION) - -$(SWAGGER_BUNDLE): - curl -L https://github.com/swagger-api/swagger-ui/archive/v$(SWAGGER_VERSION).tar.gz -o $(SWAGGER_BUNDLE) - -src: $(NGINX_BUNDLE) - #Find keys on https://nginx.org/en/pgp_keys.html - gpg --verify $(NGINX_BUNDLE).asc $(NGINX_BUNDLE) - tar zxf $(NGINX_BUNDLE) - rm -rf src && mv nginx-$(NGINX_VERSION) src - -$(NGINX_BUNDLE): - curl -O https://nginx.org/download/$(NGINX_BUNDLE).asc - curl -O https://nginx.org/download/$(NGINX_BUNDLE) - -.PHONY: docker -docker: - git submodule update --init - docker build -t $(DOCKER_USER)/nginz:$(DOCKER_TAG) -f Dockerfile ../.. - docker tag $(DOCKER_USER)/nginz:$(DOCKER_TAG) $(DOCKER_USER)/nginz:latest - if test -n "$$DOCKER_PUSH"; then docker login $(DOCKER_REGISTRY); docker push $(DOCKER_USER)/nginz:$(DOCKER_TAG); docker push $(DOCKER_USER)/nginz:latest; fi; - -.PHONY: libzauth -libzauth: - $(MAKE) -C ../../libs/libzauth install - -# a target to start the locally-compiled docker image (tagged 'local') -# using the configuration in wire-server/deploy/services-demo -# can aid when updating nginx versions and configuration -.PHONY: docker-run-demo-local -docker-run-demo: - docker run --network=host -it -v $$(pwd)/../../deploy/services-demo:/configs --entrypoint /usr/sbin/nginx quay.io/wire/nginz:local -p /configs -c /configs/conf/nginz/nginx-docker.conf diff --git a/services/nginz/README.md b/services/nginz/README.md deleted file mode 100644 index fb1832268a4..00000000000 --- a/services/nginz/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# NGINX build with extra modules - -## Compile natively - -To build nginz natively, you will need to have the usual C compiler toolchains installed, along with the following dependencies: - -* gpg (needed to verify nginx's signatures) -* openssl -* libossp-uuid -* libpcre3 -* [libzauth](../../libs/libzauth) - * depends on the rust compiler, libsodium23 - -### Alpine -If you're on alpine, see the [Dockerfile](Dockerfile) for the precise dependency names. - -### Ubuntu / Debian (backports / testing / unstable) - -_Note_: Debian packages are only used as part of wire's infrastructure, and as such, you do not need to install them to run the integration tests or the demo. - -_Note_: Debian stable does not contain a new enough version of libsodium. you must get it from backports, testing, or unstable. - -_Note_: On some Ubuntu versions, upstart is installed in addition to systemd, causing runit to fail with an error like "Unable to connect to Upstart: Failed to connect to socket". Luckily, there is [a simple fix](https://forum.peppermintos.com/index.php?topic=5210.0). - -#### Build Dependencies: -```bash -sudo apt install libossp-uuid-dev libpcre3-dev libsodium23 runit gnupg -``` - -#### Building -```bash -make -``` - -### Compile with docker - -`make docker` - -### Generic -If you're on another platform, the names of the dependencies might differ slightly. - -Once you have all necessary dependencies, `make` in this directory should work. - -## Common problems while compiling - -``` -gpg: Can't check signature: public key not found -``` - -This means that you haven't imported the public key that was used to sign nginx. Look for the keys at https://nginx.org/en/pgp_keys.html and make sure to import ALL of them with: - -`gpg --import ` - -Alternatively, you can ask GPG to find the key by its ID (printed in the error message): - -`gpg --recv-keys KEY_ID` - ---- - -``` -checking for OpenSSL library ... not found -[...] -./configure: error: SSL modules require the OpenSSL library. -You can either do not enable the modules, or install the OpenSSL library -into the system, or build the OpenSSL library statically from the source -with nginx by using --with-openssl= option. -``` - -openssl is required to compile nginx and it may be installed in a "non-standard" path in your system. Once you are sure you have installed it, look for `EXTRA_CC_INC` and `EXTRA_CC_LIB` in the `Makefile` and point them to the correct location in your system. - -If you are using macOS and you used `brew` to install openssl, the `Makefile` already contains the right paths so you should not be seeing that error. - -## How to run it - -Have a look at our demo config in [./integration-test/conf/nginz/](./integration-test/conf/nginz/) -