diff --git a/.github/workflows/build-push-ar.yml b/.github/workflows/build-push-ar.yml new file mode 100644 index 0000000..99315f9 --- /dev/null +++ b/.github/workflows/build-push-ar.yml @@ -0,0 +1,57 @@ +name: build-push-ar +on: + workflow_call: + inputs: + image: + required: true + type: string + build-args: + required: false + type: string + default: "" + context: + required: false + type: string + default: "." +jobs: + build-push-ar: + runs-on: ubuntu-latest + timeout-minutes: 15 + permissions: + contents: read + id-token: write + steps: + - uses: 'actions/checkout@v4' + - name: Extract branch name + shell: bash + run: echo "branch=${GITHUB_REF#refs/heads/}" >> $GITHUB_OUTPUT + id: extract_branch + - name: Extract tag name + shell: bash + run: |- + t=$(echo ${GITHUB_SHA} | cut -c1-7) + echo "tag=$t" >> $GITHUB_OUTPUT + id: extract_tag + - id: 'auth' + name: 'Authenticate to Google Cloud' + uses: 'google-github-actions/auth@v1' + with: + workload_identity_provider: ${{ secrets.GCLOUD_OIDC_POOL }} + create_credentials_file: true + service_account: ${{ secrets.GSA }} + token_format: 'access_token' + - uses: 'docker/login-action@v3' + name: 'Docker login' + with: + registry: 'us-docker.pkg.dev' + username: 'oauth2accesstoken' + password: '${{ steps.auth.outputs.access_token }}' + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: ${{ inputs.context }} + push: true + build-args: ${{ inputs.build-args }} + tags: | + us-docker.pkg.dev/${{ secrets.GCLOUD_PROJECT }}/${{ inputs.image }}:${{steps.extract_branch.outputs.branch}}-${{steps.extract_tag.outputs.tag}} + us-docker.pkg.dev/${{ secrets.GCLOUD_PROJECT }}/${{ inputs.image }}:${{steps.extract_branch.outputs.branch}} diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 664f84c..e6a18c2 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -1,76 +1,56 @@ -name: Workflow Template - Build and push +name: build-push on: - workflow_call: - inputs: - dir: - required: true - type: string - major_version: - required: true - type: string - build_arg: - type: string + push: jobs: - build-push: + find-jobs: + name: Find changed directories runs-on: ubuntu-latest - timeout-minutes: 15 + outputs: + folders: ${{ steps.jobs.outputs.folders }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - id: jobs + run: | + folders=$(git diff --diff-filter="ACMR" --name-only ${{ github.event.before }} ${{ github.event.after }} | xargs dirname | awk -F '/' '{print $1}' | sort | uniq | grep -Ev "^\." | awk NF | jq -c --raw-input --slurp 'split("\n") | .[0:-1]') + echo $folders | jq . + echo "folders=$folders" >> $GITHUB_OUTPUT + + get-tags: + needs: [find-jobs] + runs-on: ubuntu-latest + outputs: + tags: ${{ steps.tags.outputs.tags }} + steps: + - uses: actions/checkout@v4 + - id: tags + run: | + json_array=$(echo '${{ needs.find-jobs.outputs.folders }}' | jq -rc '.[]') + TAGS=$(while read -r REPO; do + if [ ! -d $REPO/.build-args ]; then + continue; + fi + for TAG in $(ls $REPO/.build-args); do + echo '{"context":"'${REPO}'", "image":"'${REPO}'-'${TAG}'", "args":"'$(cat ${REPO}/.build-args/${TAG})'"},' + done + done <<< "$json_array") + TAGS=$(echo "[${TAGS%,}]" | tr -d '\n') + echo $TAGS | jq . + echo "tags=$TAGS" >> $GITHUB_OUTPUT + + build-push-ar: + name: "Build and push ${{ matrix.tags.image }} to Google Artifact Registry" + needs: [get-tags] + strategy: + matrix: + tags: ${{ fromJson(needs.get-tags.outputs.tags) }} + uses: ./.github/workflows/build-push-ar.yml + with: + image: "internal/${{ matrix.tags.image }}" + context: ${{ matrix.tags.context }} + build-args: ${{ matrix.tags.args }} permissions: contents: read id-token: write - steps: - - - name: Extract branch name - shell: bash - run: echo "branch=${GITHUB_REF#refs/heads/}" >> $GITHUB_OUTPUT - id: extract_branch - - - name: Extract tag name - shell: bash - run: |- - t=$(echo ${GITHUB_SHA} | cut -c1-7) - echo "tag=$t" >> $GITHUB_OUTPUT - id: extract_tag - - - uses: 'actions/checkout@v3' - - - name: 'Authenticate to Google Cloud' - id: 'auth' - uses: 'google-github-actions/auth@v0' - with: - workload_identity_provider: ${{ secrets.GCLOUD_OIDC_POOL }} - create_credentials_file: true - service_account: ${{ secrets.GSA }} - token_format: 'access_token' - - - name: 'Docker login init' - uses: 'docker/login-action@v1' - with: - registry: 'us-docker.pkg.dev' - username: 'oauth2accesstoken' - password: '${{ steps.auth.outputs.access_token }}' - - - name: 'Docker login' - run: echo '${{ steps.auth.outputs.access_token }}' | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev - - - name: Lint Dockerfile - uses: hadolint/hadolint-action@v3.1.0 - with: - dockerfile: ${{ inputs.dir }}/Dockerfile - - - name: Lint shell - working-directory: ${{ inputs.dir }} - run: |- - echo "Running shellcheck on" - find . -name "*.sh" -exec ls -l {} \; - echo "Starting..." - find . -name "*.sh" -exec shellcheck {} \; - - - name: Build and push - uses: docker/build-push-action@v3 - with: - context: ${{ inputs.dir }} - push: true - build-args: ${{ inputs.build_arg }} - tags: | - us-docker.pkg.dev/${{ secrets.GCLOUD_PROJECT }}/${{ secrets.GCLOUD_AR_REPO }}/${{ inputs.dir }}:${{ inputs.major_version }}-${{steps.extract_branch.outputs.branch}}-${{steps.extract_tag.outputs.tag}} - us-docker.pkg.dev/${{ secrets.GCLOUD_PROJECT }}/${{ secrets.GCLOUD_AR_REPO }}/${{ inputs.dir }}:${{ inputs.major_version }}-${{steps.extract_branch.outputs.branch}} + secrets: inherit diff --git a/.github/workflows/gulp.yml b/.github/workflows/gulp.yml deleted file mode 100644 index fcc12de..0000000 --- a/.github/workflows/gulp.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: Build and push gulp 2.3 -on: - push: - paths: - - 'gulp/**' - - '.github/workflows/gulp.yml' - -jobs: - deploy: - uses: ./.github/workflows/build-push.yml - with: - dir: gulp - major_version: 2 - permissions: - contents: read - id-token: write - secrets: inherit - diff --git a/.github/workflows/loris-3.2.yml b/.github/workflows/loris-3.2.yml deleted file mode 100644 index 472ede7..0000000 --- a/.github/workflows/loris-3.2.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Build and push loris 3.2 -on: - push: - paths: - - 'loris/**' - - '.github/workflows/loris-3.2.yml' - - '.github/workflows/build-push.yml' - -jobs: - deploy: - uses: ./.github/workflows/build-push.yml - with: - dir: loris - major_version: 3.2 - build_arg: LORIS_VERSION=3.2.1 - permissions: - contents: read - id-token: write - secrets: inherit diff --git a/.github/workflows/nginx-1.25.yml b/.github/workflows/nginx-1.25.yml deleted file mode 100644 index 4427176..0000000 --- a/.github/workflows/nginx-1.25.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Build and push nginx 1.25 -on: - push: - paths: - - 'nginx/**' - - '.github/workflows/nginx-1.25.yml' - - '.github/workflows/build-push.yml' - -jobs: - deploy: - uses: ./.github/workflows/build-push.yml - with: - dir: nginx - major_version: 1.25 - build_arg: NGINX_VERSION=1.25.1 - permissions: - contents: read - id-token: write - secrets: inherit diff --git a/.github/workflows/php-8.yml b/.github/workflows/php-8.yml deleted file mode 100644 index 0be2e96..0000000 --- a/.github/workflows/php-8.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: CI -on: - schedule: - - cron: '43 12 * * 6' - push: - paths: - - 'php/**' - - '.github/workflows/php-8.yml' - - '.github/workflows/build-push.yml' - -jobs: - deploy: - strategy: - fail-fast: false - matrix: - version: ["8.1", "8.2"] - - name: Build and push PHP ${{ matrix.version }} - - uses: ./.github/workflows/build-push.yml - - with: - dir: php - major_version: ${{ matrix.version }} - build_arg: PHP_VERSION=${{ matrix.version }} - - permissions: - contents: read - id-token: write - - secrets: inherit diff --git a/README.md b/README.md index d3f83aa..1d69e7d 100644 --- a/README.md +++ b/README.md @@ -5,23 +5,22 @@ Various docker containers used within Lehigh Libraries infrastructure. ## Structure ``` -|-- ./.github/workflows/image1.yml -|-- ./.github/workflows/image2.yml -... -... -... -|-- ./.github/workflows/imageN.yml |-- ./image1 | `-- ./image1/Dockerfile +| `-- ./image1/.build-args/TAG1 +| `-- ./image1/.build-args/TAG2 |-- ./image2 | `-- ./image2/Dockerfile +| `-- ./image2/.build-args/TAG1 ... ... ... |-- ./imageN | `-- ./imageN/Dockerfile +| `-- ./imageN/.build-args/TAG1 + ``` Each docker image is defined within its own directory. -The image then has a GitHub action defined in [.github/workflows](./.github/workflows) that uses the base [build-push GitHub Action workflow](./.github/workflows/build-push.yml) to push images to Google Artifact Registry. +The image then has a `.build-args` directory. That directory contains a file that is represents a specific version for the tag. The file then contains any `build-args` that may be needed for the docker build. diff --git a/gulp/.build-args/2.3 b/gulp/.build-args/2.3 new file mode 100644 index 0000000..e69de29 diff --git a/gulp/README.md b/gulp/README.md new file mode 100644 index 0000000..b6f8229 --- /dev/null +++ b/gulp/README.md @@ -0,0 +1,7 @@ +# gulp + +For use in themes using gulp for asset generation + +``` +Rebuild count: 0 +``` diff --git a/loris/Dockerfile b/loris/Dockerfile deleted file mode 100644 index c37b796..0000000 --- a/loris/Dockerfile +++ /dev/null @@ -1,52 +0,0 @@ -FROM ubuntu:focal - -ENV TZ=Etc/UTC -ENV PROXY_URL=https://loris.iiif.example.com -ENV CORS_REGEX='' - -RUN ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && \ - echo "$TZ" > /etc/timezone && \ - apt-get -y upgrade && \ - apt-get update && \ - apt-get -y install python3-pip=20.0.2-5ubuntu1.9 \ - wget=1.20.3-1ubuntu2 \ - libjpeg-turbo8-dev=2.0.3-0ubuntu1.20.04.3 \ - libfreetype6-dev=2.10.1-2ubuntu0.3 \ - zlib1g-dev=1:1.2.11.dfsg-2ubuntu1.5 \ - liblcms2-dev=2.9-4 \ - liblcms2-utils=2.9-4 \ - libtiff5-dev=4.1.0+git191117-2ubuntu0.20.04.8 \ - libwebp-dev=0.6.1-2ubuntu0.20.04.2 \ - libopenjp2-tools=2.3.1-1ubuntu4.20.04.1 \ - python3-dev=3.8.2-0ubuntu2 \ - libssl-dev=1.1.1f-1ubuntu2.19 \ - gcc=4:9.3.0-1ubuntu2 \ - --no-install-recommends && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* && \ - pip3 install --no-cache-dir \ - Pillow==9.5.0 \ - configobj==5.0.8 \ - requests==2.31.0 \ - mock==5.0.2 \ - responses==0.23.1 && \ - adduser --disabled-password --gecos "" loris - -WORKDIR /opt -RUN wget --quiet https://github.com/loris-imageserver/loris/archive/refs/tags/v3.2.1.tar.gz && \ - tar -zxvf v3.2.1.tar.gz && \ - mv loris-3.2.1 loris - -WORKDIR /opt/loris -RUN python3 setup.py install && \ - python3 bin/setup_directories.py && \ - mkdir -p /opt/loris/cache/info && \ - chown -R loris.loris /opt/loris/cache && \ - chmod -R 700 /opt/loris/cache - -COPY conf/loris.conf /opt/loris/etc/loris.conf -COPY webapp.py /opt/loris/loris/webapp.py - -ENTRYPOINT ["python3"] - -CMD ["/opt/loris/loris/webapp.py"] diff --git a/loris/conf/loris.conf b/loris/conf/loris.conf deleted file mode 100644 index 287c5f4..0000000 --- a/loris/conf/loris.conf +++ /dev/null @@ -1,84 +0,0 @@ -# loris.conf -# -# This file is parsed by the ConfigObj library: -# -# -# -# ConfigObj uses an ini-like syntax with a few important changes and extensions, -# which are explained here: -# -# -# -# Note that 'unrepr' mode is used, which means that values are parsed as Python -# datatypes, e.g. strings are in quotes, integers are not, True is used for the -# boolean value TRUE, False for the boolean value FALSE, and lists are in [] -# with commas (',') as the separators. -# -# -# -# String interpolation is enabled using the "template" style. OS environment -# variables are available for interpolation, e.g., run_as_user='$USER' -# -# -# - -[loris.Loris] -tmp_dp = '/tmp/loris/tmp' # r-- -www_dp = '/opt/loris/loris/data/www' # r-x -run_as_user = 'loris' -run_as_group = 'loris' -enable_caching = True -redirect_canonical_image_request = False -redirect_id_slash_to_info = True - -# max_size_above_full restricts interpolation of images on the server. -# Default value 200 means that a user cannot request image sizes greater than -# 200% of original image size (width or height). -# Set this value to 100 to disallow interpolation. Set to 0 to remove -# size restriction. -max_size_above_full = 100 - -proxy_path='$PROXY_URL' -cors_regex = '$CORS_REGEX' -# NOTE: If supplied, cors_regex is passed to re.search(): -# https://docs.python.org/2/library/re.html#re.search -# Any url_root: -# http://werkzeug.pocoo.org/docs/latest/wrappers/#werkzeug.wrappers.BaseRequest.url_root -# (i.e., https?://domain.edu(:port)?/) that matches will be -# set to the value of Access-Control-Allow-Origin. - -[logging] -log_to = 'console' -log_level = 'INFO' -format = '%(asctime)s (%(name)s) [%(levelname)s]: %(message)s' - -[resolver] -impl = 'loris.resolver.SimpleHTTPResolver' -cache_root='/opt/loris/cache' -uri_resolvable = True - -[img.ImageCache] -cache_dp = '/opt/loris/cache' # rwx - -[img_info.InfoCache] -cache_dp = '/opt/loris/cache/info' # rwx - -[transforms] -dither_bitonal_images = False -# To enable TIFF output, add "tif" here: -target_formats = ['jpg','png','gif','webp'] - -# By default PIL throws a DecompressionBombError for images that are larger than -# 2x its MAX_IMAGE_PIXELS property (this limit is 2 * 89478485 = 178956970px). -# This property can be overridden by this config value. If set to 0, MAX_IMAGE_PIXELS -# is set to `None` and there is no limit on image size. -# pil_max_image_pixels = 250000000 - - [[jpg]] - impl = 'JPG_Transformer' - - [[tif]] - impl = 'TIF_Transformer' - - [[png]] - impl = 'PNG_Transformer' diff --git a/loris/webapp.py b/loris/webapp.py deleted file mode 100644 index 3d180ee..0000000 --- a/loris/webapp.py +++ /dev/null @@ -1,823 +0,0 @@ -#!/usr/bin/env python3 -""" -webapp.py -========= -Implements IIIF 2.0 level 2 -""" -from datetime import datetime -from decimal import getcontext -import logging -from logging.handlers import RotatingFileHandler -import os -from os import path, unlink -import re -from subprocess import CalledProcessError -from tempfile import NamedTemporaryFile -from urllib.parse import unquote - -import sys - -sys.path.append(".") - -from configobj import ConfigObj -from PIL import Image -from werkzeug.http import parse_date, http_date - -from werkzeug.wrappers import ( - Request, - Response, - BaseResponse, - CommonResponseDescriptorsMixin, -) - -from loris import constants, img, transforms -from loris.img_info import InfoCache -from loris.loris_exception import ( - ConfigError, - ImageInfoException, - RequestException, - ResolverException, - SyntaxException, - TransformException, -) - - -getcontext().prec = 25 # Decimal precision. This should be plenty. - - -def get_debug_config(debug_jp2_transformer): - # change a few things, read the config and set up logging - project_dp = path.dirname(path.dirname(path.realpath(__file__))) - data_directory = path.join(project_dp, "loris", "data") - config_file_path = path.join(data_directory, "loris.conf") - - config = read_config(config_file_path) - - config["logging"]["log_to"] = "console" - config["logging"]["log_level"] = "DEBUG" - - # override some stuff to look at relative or tmp directories. - config["loris.Loris"]["www_dp"] = path.join(data_directory, "www") - config["loris.Loris"]["tmp_dp"] = "/tmp/loris/tmp" - config["loris.Loris"]["enable_caching"] = True - config["img.ImageCache"]["cache_dp"] = "/tmp/loris/cache/img" - config["img_info.InfoCache"]["cache_dp"] = "/tmp/loris/cache/info" - config["resolver"]["impl"] = "loris.resolver.SimpleFSResolver" - config["resolver"]["src_img_root"] = path.join(project_dp, "tests", "img") - config["transforms"]["target_formats"] = ["jpg", "png", "gif", "webp", "tif"] - - if debug_jp2_transformer == "opj": - config["transforms"]["jp2"]["impl"] = "OPJ_JP2Transformer" - config["transforms"]["jp2"]["opj_decompress"] = "/usr/bin/opj_decompress" - elif debug_jp2_transformer == "kdu": - from loris.transforms import KakaduJP2Transformer - - config["transforms"]["jp2"]["impl"] = "KakaduJP2Transformer" - kdu_expand = KakaduJP2Transformer.local_kdu_expand_path() - config["transforms"]["jp2"]["kdu_expand"] = path.join(project_dp, kdu_expand) - libkdu_dir = KakaduJP2Transformer.local_libkdu_dir() - config["transforms"]["jp2"]["kdu_libs"] = path.join(project_dp, libkdu_dir) - else: - raise ConfigError( - "Unrecognized debug JP2 transformer: %r" % debug_jp2_transformer - ) - - config["authorizer"] = { - "impl": "loris.authorizer.RulesAuthorizer", - "cookie_secret": b"4rakTQJDyhaYgoew802q78pNnsXR7ClvbYtAF1YC87o", - "token_secret": b"hyQijpEEe9z1OB9NOkHvmSA4lC1B4lu1n80bKNx0Uz0=", - "roles_key": "roles", - "id_key": "sub", - } - - return config - - -def create_app(debug=False, debug_jp2_transformer="kdu", config_file_path=""): - if debug: - config = get_debug_config(debug_jp2_transformer) - else: - config = read_config(config_file_path) - - return Loris(config) - - -def read_config(config_file_path): - config = ConfigObj(config_file_path, unrepr=True, interpolation="template") - # add the OS environment variables as the DEFAULT section to support - # interpolating their values into other keys - # make a copy of the os.environ dictionary so that the config object can't - # inadvertently modify the environment - config["DEFAULT"] = { - key: val for (key, val) in os.environ.items() if key not in ("PS1") - } - return config - - -def _validate_logging_config(config): - """ - Validate the logging config before setting up a logger. - """ - mandatory_keys = ["log_to", "log_level", "format"] - missing_keys = [key for key in mandatory_keys if key not in config] - - if missing_keys: - raise ConfigError( - "Missing mandatory logging parameters: %r" % ",".join(missing_keys) - ) - - if config["log_to"] not in ("file", "console"): - raise ConfigError( - "logging.log_to=%r, expected one of file/console" % config["log_to"] - ) - - if config["log_to"] == "file": - mandatory_keys = ["log_dir", "max_size", "max_backups"] - missing_keys = [] - for key in mandatory_keys: - if key not in config: - missing_keys.append(key) - - if missing_keys: - raise ConfigError( - "When log_to=file, the following parameters are required: %r" - % ",".join(missing_keys) - ) - - -def configure_logging(config): - _validate_logging_config(config) - - logger = logging.getLogger() - - try: - logger.setLevel(config["log_level"]) - except ValueError: - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter(fmt=config["format"]) - - if not getattr(logger, "handler_set", None): - if config["log_to"] == "file": - fp = "%s.log" % (path.join(config["log_dir"], "loris"),) - handler = RotatingFileHandler( - fp, - maxBytes=config["max_size"], - backupCount=config["max_backups"], - delay=True, - ) - handler.setFormatter(formatter) - logger.addHandler(handler) - elif config["log_to"] == "console": - from sys import __stderr__, __stdout__ - - # STDERR - err_handler = logging.StreamHandler(__stderr__) - err_handler.addFilter(StdErrFilter()) - err_handler.setFormatter(formatter) - logger.addHandler(err_handler) - - # STDOUT - out_handler = logging.StreamHandler(__stdout__) - out_handler.addFilter(StdOutFilter()) - out_handler.setFormatter(formatter) - logger.addHandler(out_handler) - else: - # This should be protected by ``_validate_logging_config()``. - assert False, "Should not be reachable" - - logger.handler_set = True - return logger - - -class StdErrFilter(logging.Filter): - """Logging filter for stderr""" - - def filter(self, record): - return 1 if record.levelno >= 30 else 0 - - -class StdOutFilter(logging.Filter): - """Logging filter for stdout""" - - def filter(self, record): - return 1 if record.levelno <= 20 else 0 - - -class LorisResponse(BaseResponse, CommonResponseDescriptorsMixin): - """Similar to Response, but IIIF Compliance Link and - Access-Control-Allow-Origin Headers are added and none of the - ETagResponseMixin, ResponseStreamMixin, or WWWAuthenticateMixin - capabilities are included. - See: http://werkzeug.pocoo.org/docs/wrappers/#werkzeug.wrappers.Response - """ - - def __init__(self, response=None, status=None, content_type=None): - super(LorisResponse, self).__init__( - response=response, status=status, content_type=content_type - ) - self.headers["Link"] = '<%s>;rel="profile"' % (constants.COMPLIANCE,) - - def set_acao(self, request, regex=None): - if regex: - if regex.search(request.url_root): - self.headers["Access-Control-Allow-Origin"] = request.url_root - else: - self.headers["Access-Control-Allow-Origin"] = "*" - self.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - self.headers["Access-Control-Allow-Headers"] = "Authorization" - - -class BadRequestResponse(LorisResponse): - def __init__(self, message=None): - if message is None: - message = "request does not match the IIIF syntax" - status = 400 - message = "Bad Request: %s (%d)" % (message, status) - super(BadRequestResponse, self).__init__(message, status, "text/plain") - - -class NotFoundResponse(LorisResponse): - def __init__(self, message): - status = 404 - message = "Not Found: %s (%d)" % (message, status) - super(NotFoundResponse, self).__init__(message, status, "text/plain") - - -class ServerSideErrorResponse(LorisResponse): - def __init__(self, message): - status = 500 - message = "Server Side Error: %s (%d)" % (message, status) - super(ServerSideErrorResponse, self).__init__(message, status, "text/plain") - - -class LorisRequest: - def __init__(self, request, redirect_id_slash_to_info=True, proxy_path=None): - # make sure path is unquoted, so we know what we're working with - self._path = unquote(request.path) - self._request = request - self._redirect_id_slash_to_info = redirect_id_slash_to_info - self._proxy_path = proxy_path - self._dissect_uri() - - @property - def base_uri(self): - if self._proxy_path is not None: - uri = "%s%s" % (self._proxy_path, self.ident) - elif self._request.script_root != "": - uri = "%s%s" % (self._request.url_root, self.ident) - else: - uri = "%s%s" % (self._request.host_url, self.ident) - return uri - - def _dissect_uri(self): - self.ident = "" - self.params = "" - # handle some initial static views first - if self._path == "/": - self.request_type = "index" - return - - elif self._path[1:] == "favicon.ico": - self.request_type = "favicon" - return - - # check for image request - # Note: this doesn't guarantee that all the parameters have valid - # values - see regexes in constants.py. - image_match = constants.IMAGE_RE.match(self._path) - - # check for info request - info_match = constants.INFO_RE.match(self._path) - - # process image request - if image_match: - groups = image_match.groupdict() - self.ident = groups["ident"] - self.params = { - "region": groups["region"], - "size": groups["size"], - "rotation": groups["rotation"], - "quality": groups["quality"], - "format": groups["format"], - } - self.request_type = "image" - - # process info request - elif info_match: - groups = info_match.groupdict() - self.ident = groups["ident"] - self.params = "info.json" - self.request_type = "info" - - # if the request didn't match the stricter regexes above, but it does - # match this one, we know we have an invalid image request, so we can - # return a 400 BadRequest to the user. - elif constants.LOOSER_IMAGE_RE.match(self._path): - self.request_type = "bad_image_request" - - else: # treat it as a redirect_info - ident = self._path[1:] - if ident.endswith("/") and self._redirect_id_slash_to_info: - ident = ident[:-1] - self.ident = ident - self.request_type = "redirect_info" - - -def set_content_disposition_header(image_request, response): - """ - Set the HTTP Content-Disposition header. - - This indicates to the browser what filename it should use to download the - file. We use the identifier from the request, which is generally more - useful than `default.jpg`. - - See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition - """ - ident = image_request.ident - img_format = image_request.format - - # If the identifier has an associated image format and it matches the - # request, pass through the identifier directly. - # - # If the request is for a different format, include both formats. - # - # e.g. /cats.jpg/.../default.jpg ~> cats.jpg - # /cats.jpg/.../default.png ~> cats.jpg.png - # - if ident.endswith(".%s" % img_format): - download_filename = ident - else: - download_filename = "%s.%s" % (ident, img_format) - - response.headers["Content-Disposition"] = "filename*=utf-8''%s" % download_filename - - -class Loris: - def __init__(self, app_configs={}): - """The WSGI Application. - Args: - app_configs ({}): - A dictionary of dictionaries that represents the loris.conf - file. - """ - self.app_configs = app_configs - self.logger = configure_logging(app_configs["logging"]) - self.logger.debug("Loris initialized with these settings:") - [ - self.logger.debug("%s.%s=%s", key, sub_key, self.app_configs[key][sub_key]) - for key in self.app_configs - for sub_key in self.app_configs[key] - ] - - # make the loris.Loris configs attrs for easier access - _loris_config = self.app_configs["loris.Loris"] - self.tmp_dp = _loris_config["tmp_dp"] - - try: - os.makedirs(self.tmp_dp, exist_ok=True) - except Exception as exc: - raise ConfigError("Error creating tmp_dp %s: %r" % (self.tmp_dp, exc)) - - self.www_dp = _loris_config["www_dp"] - self.enable_caching = _loris_config["enable_caching"] - self.redirect_canonical_image_request = _loris_config[ - "redirect_canonical_image_request" - ] - self.redirect_id_slash_to_info = _loris_config["redirect_id_slash_to_info"] - self.proxy_path = _loris_config.get("proxy_path", None) - self.cors_regex = _loris_config.get("cors_regex", None) - if self.cors_regex: - self.cors_regex = re.compile(self.cors_regex) - - self.transformers = self._load_transformers() - self.resolver = self._load_resolver() - self.authorizer = self._load_authorizer() - self.max_size_above_full = _loris_config.get("max_size_above_full", 200) - - if self.enable_caching: - self.info_cache = InfoCache( - self.app_configs["img_info.InfoCache"]["cache_dp"] - ) - cache_dp = self.app_configs["img.ImageCache"]["cache_dp"] - self.img_cache = img.ImageCache(cache_dp) - - def _load_transformers(self): - tforms = self.app_configs["transforms"] - source_formats = [k for k in tforms if isinstance(tforms[k], dict)] - self.logger.debug("Source formats: %r", source_formats) - global_transform_options = dict( - (k, v) for k, v in tforms.items() if not isinstance(v, dict) - ) - self.logger.debug("Global transform options: %r", global_transform_options) - - pil_max_image_pixels = tforms.get( - "pil_max_image_pixels", Image.MAX_IMAGE_PIXELS - ) - if pil_max_image_pixels != 0: - Image.MAX_IMAGE_PIXELS = pil_max_image_pixels - self.logger.debug( - "PIL maximum image pixels set to: %d", pil_max_image_pixels - ) - else: - Image.MAX_IMAGE_PIXELS = None - self.logger.debug("PIL maximum image pixels limit removed.") - - transformers = {} - for sf in source_formats: - # merge [transforms] options and [transforms][source_format]] options - config = dict( - list(self.app_configs["transforms"][sf].items()) - + list(global_transform_options.items()) - ) - transformers[sf] = self._load_transformer(config) - return transformers - - def _load_transformer(self, config): - Klass = getattr(transforms, config["impl"]) - instance = Klass(config) - self.logger.debug("Loaded Transformer %s", config["impl"]) - return instance - - def _load_resolver(self): - impl = self.app_configs["resolver"]["impl"] - ResolverClass = self._import_class(impl) - resolver_config = self.app_configs["resolver"] - return ResolverClass(resolver_config) - - def _load_authorizer(self): - try: - impl = self.app_configs["authorizer"]["impl"] - except KeyError: - return None - else: - AuthorizerClass = self._import_class(impl) - return AuthorizerClass(self.app_configs["authorizer"]) - - def _import_class(self, qname): - """Imports a class AND returns it (the class, not an instance).""" - module_name = ".".join(qname.split(".")[:-1]) - class_name = qname.split(".")[-1] - module = __import__(module_name, fromlist=[class_name]) - self.logger.debug("Imported %s", qname) - return getattr(module, class_name) - - def wsgi_app(self, environ, start_response): - request = Request(environ) - response = self.route(request) - return response(environ, start_response) - - def route(self, request): - loris_request = LorisRequest( - request, self.redirect_id_slash_to_info, self.proxy_path - ) - request_type = loris_request.request_type - - if request_type == "index": - return self.get_index(request) - - if request_type == "favicon": - return self.get_favicon(request) - - if request_type == "bad_image_request": - return BadRequestResponse() - - ident = loris_request.ident - base_uri = loris_request.base_uri - - if request_type == "redirect_info": - if not self.resolver.is_resolvable(ident): - msg = "could not resolve identifier: %s " % (ident) - return NotFoundResponse(msg) - - r = LorisResponse() - r.headers["Location"] = "%s/info.json" % (base_uri,) - r.set_acao(request) - r.status_code = 303 - return r - - elif request_type == "info": - if request.method == "OPTIONS": - # never redirect - r = LorisResponse() - r.set_acao(request) - r.status_code = 200 - return r - - return self.get_info(request, ident, base_uri) - - else: # request_type == 'image': - params = loris_request.params - fmt = params["format"] - if fmt not in self.app_configs["transforms"]["target_formats"]: - return BadRequestResponse('"%s" is not a supported format' % (fmt,)) - quality = params["quality"] - rotation = params["rotation"] - size = params["size"] - region = params["region"] - - return self.get_img( - request, ident, region, size, rotation, quality, fmt, base_uri - ) - - def __call__(self, environ, start_response): - """ - This makes Loris executable. - """ - return self.wsgi_app(environ, start_response) - - def get_index(self, request): - """ - Just so there's something at /. - """ - f = open(path.join(self.www_dp, "index.txt"), "rb") - r = Response(f, content_type="text/plain") - if self.enable_caching: - r.add_etag() - r.make_conditional(request) - return r - - def get_favicon(self, request): - f = path.join(self.www_dp, "icons", "loris-icon.png") - r = Response(open(f, "rb"), content_type="image/x-icon") - if self.enable_caching: - r.add_etag() - r.make_conditional(request) - return r - - def get_info(self, request, ident, base_uri): - try: - info, last_mod = self._get_info(ident, request, base_uri) - except ResolverException as re: - return NotFoundResponse(str(re)) - except ImageInfoException as ie: - return ServerSideErrorResponse(str(ie)) - except IOError as e: - msg = "%s \n(This is likely a permissions problem)" % e - return ServerSideErrorResponse(msg) - - r = LorisResponse() - r.set_acao(request, self.cors_regex) - ims_hdr = request.headers.get("If-Modified-Since") - ims = parse_date(ims_hdr) - last_mod = parse_date(http_date(last_mod)) # see note under get_img - - if self.authorizer and self.authorizer.is_protected(info): - authed = self.authorizer.is_authorized(info, request) - if authed["status"] == "deny": - r.status_code = 401 - # trash If-Mod-Since to ensure no 304 - ims = None - elif authed["status"] == "redirect": - r.status_code = 302 - r.location = authed["location"] - # Otherwise we're okay - - if ims and ims >= last_mod: - self.logger.debug("Sent 304 for %s ", ident) - r.status_code = 304 - else: - if last_mod: - r.last_modified = last_mod - callback = request.args.get("callback", None) - if callback: - r.mimetype = "application/javascript" - r.data = "%s(%s);" % (callback, info.to_iiif_json(base_uri)) - else: - if request.headers.get("accept") == "application/ld+json": - r.content_type = "application/ld+json" - else: - r.content_type = "application/json" - l = ';rel="http://www.w3.org/ns/json-ld#context";type="application/ld+json"' - r.headers["Link"] = "%s,%s" % (r.headers["Link"], l) - r.data = info.to_iiif_json(base_uri) - return r - - def _get_info(self, ident, request, base_uri): - # return info from the cache if we can - if self.enable_caching: - try: - return self.info_cache[ident] - except KeyError: - pass - - # otherwise construct it - info = self.resolver.resolve(self, ident, base_uri) - - # Maybe inject services before caching - if self.authorizer and self.authorizer.is_protected(info): - # Call get_services to inject - svcs = self.authorizer.get_services_info(info) - if svcs and "service" in svcs: - info.service = svcs["service"] - - # cache new info if needed - if self.enable_caching: - self.info_cache[ident] = info - # pick up the timestamp... :() - info, last_mod = self.info_cache[ident] - else: - last_mod = None - - return (info, last_mod) - - def _set_canonical_link(self, request, response, image_request, image_info): - if self.proxy_path: - root = self.proxy_path - else: - root = request.url_root - - canonical_path = image_request.canonical_request_path(image_info) - canonical_uri = "%s%s" % (root, canonical_path) - response.headers["Link"] = '%s,<%s>;rel="canonical"' % ( - response.headers["Link"], - canonical_uri, - ) - - def get_img( - self, request, ident, region, size, rotation, quality, target_fmt, base_uri - ): - """Get an Image. - Args: - request (Request): - Forwarded by dispatch_request - ident (str): - The identifier portion of the IIIF URI syntax - - """ - r = LorisResponse() - r.set_acao(request, self.cors_regex) - # ImageRequest's Parameter attributes, i.e. RegionParameter etc. are - # decorated with @property and not constructed until they are first - # accessed, which mean we don't have to catch any exceptions here. - image_request = img.ImageRequest( - ident, region, size, rotation, quality, target_fmt - ) - - self.logger.debug("Image Request Path: %s", image_request.request_path) - - if self.enable_caching: - in_cache = image_request in self.img_cache - else: - in_cache = False - - try: - # We need the info to check authorization, - # ... still cheaper than always resolving as likely to be cached - info = self._get_info(ident, request, base_uri)[0] - except ResolverException as re: - return NotFoundResponse(str(re)) - except ImageInfoException as ie: - return ServerSideErrorResponse(ie) - - if self.authorizer and self.authorizer.is_protected(info): - authed = self.authorizer.is_authorized(info, request) - - if authed["status"] != "ok": - # Images don't redirect, they just deny out - r.status_code = 401 - return r - - set_content_disposition_header(image_request=image_request, response=r) - - if in_cache: - fp, img_last_mod = self.img_cache[image_request] - ims_hdr = request.headers.get("If-Modified-Since") - # The stamp from the FS needs to be rounded using the same precision - # as when went sent it, so for an accurate comparison turn it into - # an http date and then parse it again :-( : - img_last_mod = parse_date(http_date(img_last_mod)) - self.logger.debug("Time from FS (default, rounded): %s", img_last_mod) - self.logger.debug("Time from IMS Header (parsed): %s", parse_date(ims_hdr)) - # ims_hdr = parse_date(ims_hdr) # catch parsing errors? - if ims_hdr and parse_date(ims_hdr) >= img_last_mod: - self.logger.debug("Sent 304 for %s ", fp) - r.status_code = 304 - return r - else: - r.content_type = constants.FORMATS_BY_EXTENSION[target_fmt] - r.status_code = 200 - r.last_modified = img_last_mod - r.headers["Content-Length"] = path.getsize(fp) - r.response = open(fp, "rb") - - # hand the Image object its info - info = self._get_info(ident, request, base_uri)[0] - - self._set_canonical_link( - request=request, - response=r, - image_request=image_request, - image_info=info, - ) - return r - else: - try: - # Check that we can make the quality requested - if image_request.quality not in info.profile.description["qualities"]: - return BadRequestResponse( - '"%s" quality is not available for this image' - % (image_request.quality,) - ) - - # Check if requested size is allowed - if image_request.request_resolution_too_large( - max_size_above_full=self.max_size_above_full, image_info=info - ): - return NotFoundResponse("Resolution not available") - - # Redirect if appropriate - if self.redirect_canonical_image_request: - if not image_request.is_canonical(info): - self.logger.debug( - "Attempting redirect to %s", - image_request.canonical_request_path, - ) - r.headers["Location"] = image_request.canonical_request_path - r.status_code = 301 - return r - - # Make an image - fp = self._make_image(image_request=image_request, image_info=info) - - except TransformException as te: - return ServerSideErrorResponse( - "error generating derivative image: see log" - ) - except (RequestException, SyntaxException) as e: - return BadRequestResponse(str(e)) - except (CalledProcessError, IOError) as e: - # CalledProcessError and IOError typically happen when there are - # permissions problems with one of the files or directories - # used by the transformer. - msg = """%s \n\nThis is likely a permissions problem, though it\'s -possible that there was a problem with the source file -(%s).""" % ( - str(e), - info.src_img_fp, - ) - return ServerSideErrorResponse(msg) - r.content_type = constants.FORMATS_BY_EXTENSION[target_fmt] - r.status_code = 200 - r.last_modified = datetime.utcfromtimestamp(path.getctime(fp)) - r.headers["Content-Length"] = path.getsize(fp) - self._set_canonical_link( - request=request, response=r, image_request=image_request, image_info=info - ) - r.response = open(fp, "rb") - - if not self.enable_caching: - r.call_on_close(lambda: unlink(fp)) - - return r - - def _make_image(self, image_request, image_info): - """Call the appropriate transformer to create the image. - - Args: - image_request (ImageRequest) - image_info (ImageInfo) - Returns: - (str) the file path of the new image - - """ - temp_file = NamedTemporaryFile( - dir=self.tmp_dp, suffix=".%s" % image_request.format, delete=False - ) - temp_fp = temp_file.name - - try: - transformer = self.transformers[image_info.src_format] - transformer.transform( - target_fp=temp_fp, image_request=image_request, image_info=image_info - ) - derivative_size = os.stat(temp_fp).st_size - if derivative_size < 1: - self.logger.error( - "empty derivative file created for %s" % image_info.src_img_fp - ) - raise TransformException() - except Exception: - unlink(temp_fp) - raise - - if self.enable_caching: - canonical_cache_fp = self.img_cache.upsert( - image_request=image_request, temp_fp=temp_fp, image_info=image_info - ) - return canonical_cache_fp - else: - return temp_fp - - -if __name__ == "__main__": - from werkzeug.serving import run_simple - import sys - - project_dp = path.dirname(path.dirname(path.realpath(__file__))) - - sys.path.append(path.join(project_dp)) # to find any local resolvers - - app = create_app(config_file_path="/opt/loris/etc/loris.conf") # or 'opj' - - run_simple("0.0.0.0", 8080, app, use_debugger=True, use_reloader=True) diff --git a/nginx/.build-args/1.25 b/nginx/.build-args/1.25 new file mode 100644 index 0000000..6828ea5 --- /dev/null +++ b/nginx/.build-args/1.25 @@ -0,0 +1 @@ +NGINX_VERSION=1.25.1 diff --git a/nginx/README.md b/nginx/README.md new file mode 100644 index 0000000..dfbc232 --- /dev/null +++ b/nginx/README.md @@ -0,0 +1,6 @@ +# nginx + + +``` +Rebuild count: 0 +``` diff --git a/php/.build-args/8.1 b/php/.build-args/8.1 new file mode 100644 index 0000000..c1e3042 --- /dev/null +++ b/php/.build-args/8.1 @@ -0,0 +1 @@ +PHP_VERSION=8.1 diff --git a/php/.build-args/8.2 b/php/.build-args/8.2 new file mode 100644 index 0000000..f47646e --- /dev/null +++ b/php/.build-args/8.2 @@ -0,0 +1 @@ +PHP_VERSION=8.2 diff --git a/php/.build-args/8.3 b/php/.build-args/8.3 new file mode 100644 index 0000000..e1b749c --- /dev/null +++ b/php/.build-args/8.3 @@ -0,0 +1 @@ +PHP_VERSION=8.3 diff --git a/php/README.md b/php/README.md new file mode 100644 index 0000000..913a640 --- /dev/null +++ b/php/README.md @@ -0,0 +1,6 @@ +# php + + +``` +Rebuild count: 0 +```