# https://circleci.com/docs/2.0/

# We use version 2.1 of CircleCI's configuration format (the docs are still at
# the 2.0 link) in order to have access to Windows executors. This means we
# can't use dots in job names anymore. They have a new "parameters" feature
# that is supposed to remove the need to have version numbers in job names (the
# source of our dots), but switching to that is going to be a bigger refactor:
#
#   https://discuss.circleci.com/t/v2-1-job-name-validation/31123
#   https://circleci.com/docs/2.0/reusing-config/
#
version: 2.1

# Every job that pushes a Docker image from Docker Hub must authenticate to
# it.  Define a couple yaml anchors that can be used to supply the necessary
# credentials.

# First is a CircleCI job context which makes Docker Hub credentials available
# in the environment.
#
# Contexts are managed in the CircleCI web interface:
#
#  https://app.circleci.com/settings/organization/github/tahoe-lafs/contexts
dockerhub-context-template: &DOCKERHUB_CONTEXT
  context: "dockerhub-auth"

# Next is a Docker executor template that gets the credentials from the
# environment and supplies them to the executor.
dockerhub-auth-template: &DOCKERHUB_AUTH
  - auth:
      username: $DOCKERHUB_USERNAME
      password: $DOCKERHUB_PASSWORD

  # A template that can be shared between the two different image-building
# workflows.
.images: &IMAGES
  jobs:
    - "build-image-debian-11":
        <<: *DOCKERHUB_CONTEXT
    - "build-image-ubuntu-20-04":
        <<: *DOCKERHUB_CONTEXT
    - "build-image-ubuntu-22-04":
        <<: *DOCKERHUB_CONTEXT
    - "build-image-fedora-35":
        <<: *DOCKERHUB_CONTEXT
    - "build-image-oraclelinux-8":
        <<: *DOCKERHUB_CONTEXT
    # Restore later as PyPy38
    #- "build-image-pypy27-buster":
    #    <<: *DOCKERHUB_CONTEXT

parameters:
  # Control whether the image-building workflow runs as part of this pipeline.
  # Generally we do not want this to run because we don't need our
  # dependencies to move around all the time and because building the image
  # takes a couple minutes.
  #
  # An easy way to trigger a pipeline with this set to true is with the
  # rebuild-images.sh tool in this directory.  You can also do so via the
  # CircleCI web UI.
  build-images:
    default: false
    type: "boolean"

  # Control whether the test-running workflow runs as part of this pipeline.
  # Generally we do want this to run because running the tests is the primary
  # purpose of this pipeline.
  run-tests:
    default: true
    type: "boolean"

workflows:
  ci:
    when: "<< pipeline.parameters.run-tests >>"
    jobs:
      # Start with jobs testing various platforms.
      - "debian-11":
          {}

      - "ubuntu-20-04":
          {}

      - "ubuntu-22-04":
          {}

      # Equivalent to RHEL 8; CentOS 8 is dead.
      - "oraclelinux-8":
          {}

      - "nixos":
          name: "<<matrix.pythonVersion>>"
          nixpkgs: "22.11"
          matrix:
            parameters:
              pythonVersion:
                - "python38"
                - "python39"
                - "python310"

      - "nixos":
          name: "<<matrix.pythonVersion>>"
          nixpkgs: "unstable"
          matrix:
            parameters:
              pythonVersion:
                - "python311"

      # Eventually, test against PyPy 3.8
      #- "pypy27-buster":
      #    {}

      # Other assorted tasks and configurations
      - "codechecks":
          {}
      - "pyinstaller":
          {}
      - "c-locale":
          {}
      # Any locale other than C or UTF-8.
      - "another-locale":
          {}

      - "integration":
          # Run even the slow integration tests here.  We need the `--` to
          # sneak past tox and get to pytest.
          tox-args: "-- --runslow integration"
          requires:
            # If the unit test suite doesn't pass, don't bother running the
            # integration tests.
            - "debian-11"

      - "typechecks":
          {}
      - "docs":
          {}

  images:
    <<: *IMAGES

    # Build as part of the workflow but only if requested.
    when: "<< pipeline.parameters.build-images >>"

jobs:
  codechecks:
    docker:
      - <<: *DOCKERHUB_AUTH
        image: "cimg/python:3.9"

    steps:
      - "checkout"

      - run: &INSTALL_TOX
          name: "Install tox"
          command: |
            pip install --user 'tox~=3.0'

      - run:
          name: "Static-ish code checks"
          command: |
            ~/.local/bin/tox -e codechecks

  pyinstaller:
    docker:
      - <<: *DOCKERHUB_AUTH
        image: "cimg/python:3.9"

    steps:
      - "checkout"

      - run:
          <<: *INSTALL_TOX

      - run:
          name: "Make PyInstaller executable"
          command: |
            ~/.local/bin/tox -e pyinstaller

      - run:
          # To verify that the resultant PyInstaller-generated binary executes
          # cleanly (i.e., that it terminates with an exit code of 0 and isn't
          # failing due to import/packaging-related errors, etc.).
          name: "Test PyInstaller executable"
          command: |
            dist/Tahoe-LAFS/tahoe --version

  debian-11: &DEBIAN
    environment: &UTF_8_ENVIRONMENT
      # In general, the test suite is not allowed to fail while the job
      # succeeds.  But you can set this to "yes" if you want it to be
      # otherwise.
      ALLOWED_FAILURE: "no"
      # Tell Hypothesis which configuration we want it to use.
      TAHOE_LAFS_HYPOTHESIS_PROFILE: "ci"
      # Tell the C runtime things about character encoding (mainly to do with
      # filenames and argv).
      LANG: "en_US.UTF-8"
      # Select a tox environment to run for this job.
      TAHOE_LAFS_TOX_ENVIRONMENT: "py39"
      # Additional arguments to pass to tox.
      TAHOE_LAFS_TOX_ARGS: ""
      # The path in which test artifacts will be placed.
      ARTIFACTS_OUTPUT_PATH: "/tmp/artifacts"
      # Convince all of our pip invocations to look at the cached wheelhouse
      # we maintain.
      WHEELHOUSE_PATH: &WHEELHOUSE_PATH "/tmp/wheelhouse"
      PIP_FIND_LINKS: "file:///tmp/wheelhouse"
      # Upload the coverage report.
      UPLOAD_COVERAGE: ""

    # pip cannot install packages if the working directory is not readable.
    # We want to run a lot of steps as nobody instead of as root.
    working_directory: "/tmp/project"

    steps:
      - "checkout"
      - run: &SETUP_VIRTUALENV
          name: "Setup virtualenv"
          command: |
            /tmp/project/.circleci/setup-virtualenv.sh \
                "/tmp/venv" \
                "/tmp/project" \
                "${WHEELHOUSE_PATH}" \
                "${TAHOE_LAFS_TOX_ENVIRONMENT}" \
                "${TAHOE_LAFS_TOX_ARGS}"

      - run: &RUN_TESTS
          name: "Run test suite"
          command: |
            /tmp/project/.circleci/run-tests.sh \
                "/tmp/venv" \
                "/tmp/project" \
                "${ALLOWED_FAILURE}" \
                "${ARTIFACTS_OUTPUT_PATH}" \
                "${TAHOE_LAFS_TOX_ENVIRONMENT}" \
                "${TAHOE_LAFS_TOX_ARGS}"
          # trial output gets directed straight to a log.  avoid the circleci
          # timeout while the test suite runs.
          no_output_timeout: "20m"

      - store_test_results: &STORE_TEST_RESULTS
          path: "/tmp/artifacts/junit"

      - store_artifacts: &STORE_TEST_LOG
          # Despite passing --workdir /tmp to tox above, it still runs trial
          # in the project source checkout.
          path: "/tmp/project/_trial_temp/test.log"

      - store_artifacts: &STORE_ELIOT_LOG
          # Despite passing --workdir /tmp to tox above, it still runs trial
          # in the project source checkout.
          path: "/tmp/project/eliot.log"

      - store_artifacts: &STORE_OTHER_ARTIFACTS
          # Store any other artifacts, too.  This is handy to allow other jobs
          # sharing most of the definition of this one to be able to
          # contribute artifacts easily.
          path: "/tmp/artifacts"

      - run: &SUBMIT_COVERAGE
          name: "Submit coverage results"
          command: |
            if [ -n "${UPLOAD_COVERAGE}" ]; then
              echo "TODO: Need a new coverage solution, see https://tahoe-lafs.org/trac/tahoe-lafs/ticket/4011"
            fi

    docker:
      - <<: *DOCKERHUB_AUTH
        image: "tahoelafsci/debian:11-py3.9"
        user: "nobody"


  # Restore later using PyPy3.8
  # pypy27-buster:
  #   <<: *DEBIAN
  #   docker:
  #     - <<: *DOCKERHUB_AUTH
  #       image: "tahoelafsci/pypy:buster-py2"
  #       user: "nobody"
  #   environment:
  #     <<: *UTF_8_ENVIRONMENT
  #     # We don't do coverage since it makes PyPy far too slow:
  #     TAHOE_LAFS_TOX_ENVIRONMENT: "pypy27"
  #     # Since we didn't collect it, don't upload it.
  #     UPLOAD_COVERAGE: ""

  c-locale:
    <<: *DEBIAN

    environment:
      <<: *UTF_8_ENVIRONMENT
      LANG: "C"


  another-locale:
    <<: *DEBIAN

    environment:
      <<: *UTF_8_ENVIRONMENT
      # aka "Latin 1"
      LANG: "en_US.ISO-8859-1"

  integration:
    <<: *DEBIAN

    parameters:
      tox-args:
        description: >-
          Additional arguments to pass to the tox command.
        type: "string"
        default: ""

    docker:
      - <<: *DOCKERHUB_AUTH
        image: "tahoelafsci/debian:11-py3.9"
        user: "nobody"

    environment:
      <<: *UTF_8_ENVIRONMENT
      # Select the integration tests tox environments.
      TAHOE_LAFS_TOX_ENVIRONMENT: "integration"
      # Disable artifact collection because py.test can't produce any.
      ARTIFACTS_OUTPUT_PATH: ""

      # Pass on anything we got in our parameters.
      TAHOE_LAFS_TOX_ARGS: "<< parameters.tox-args >>"

    steps:
      - "checkout"
      # DRY, YAML-style.  See the debian-9 steps.
      - run: *SETUP_VIRTUALENV
      - run: *RUN_TESTS

  ubuntu-20-04:
    <<: *DEBIAN
    docker:
      - <<: *DOCKERHUB_AUTH
        image: "tahoelafsci/ubuntu:20.04-py3.9"
        user: "nobody"
    environment:
      <<: *UTF_8_ENVIRONMENT
      TAHOE_LAFS_TOX_ENVIRONMENT: "py39"

  ubuntu-22-04:
    <<: *DEBIAN
    docker:
      - <<: *DOCKERHUB_AUTH
        image: "tahoelafsci/ubuntu:22.04-py3.10"
        user: "nobody"
    environment:
      <<: *UTF_8_ENVIRONMENT
      TAHOE_LAFS_TOX_ENVIRONMENT: "py310"

  oraclelinux-8: &RHEL_DERIV
    docker:
      - <<: *DOCKERHUB_AUTH
        image: "tahoelafsci/oraclelinux:8-py3.8"
        user: "nobody"

    environment:
      <<: *UTF_8_ENVIRONMENT
      TAHOE_LAFS_TOX_ENVIRONMENT: "py38"

    # pip cannot install packages if the working directory is not readable.
    # We want to run a lot of steps as nobody instead of as root.
    working_directory: "/tmp/project"

    steps:
      - "checkout"
      - run: *SETUP_VIRTUALENV
      - run: *RUN_TESTS
      - store_test_results: *STORE_TEST_RESULTS
      - store_artifacts: *STORE_TEST_LOG
      - store_artifacts: *STORE_ELIOT_LOG
      - store_artifacts: *STORE_OTHER_ARTIFACTS
      - run: *SUBMIT_COVERAGE

  fedora-35:
    <<: *RHEL_DERIV
    docker:
      - <<: *DOCKERHUB_AUTH
        image: "tahoelafsci/fedora:35-py3"
        user: "nobody"

  nixos:
    parameters:
      nixpkgs:
        description: >-
          Reference the name of a niv-managed nixpkgs source (see `niv show`
          and nix/sources.json)
        type: "string"
      pythonVersion:
        description: >-
          Reference the name of a Python package in nixpkgs to use.
        type: "string"

    executor: "nix"

    steps:
      - "nix-build":
          nixpkgs: "<<parameters.nixpkgs>>"
          pythonVersion: "<<parameters.pythonVersion>>"
          buildSteps:
            - "run":
                name: "Unit Test"
                command: |
                  # The dependencies are all built so we can allow more
                  # parallelism here.
                  source .circleci/lib.sh
                  cache_if_able nix-build \
                    --cores 8 \
                    --argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" \
                    --argstr pythonVersion "<<parameters.pythonVersion>>" \
                    nix/tests.nix

  typechecks:
    docker:
      - <<: *DOCKERHUB_AUTH
        image: "tahoelafsci/ubuntu:20.04-py3.9"

    steps:
      - "checkout"
      - run:
          name: "Validate Types"
          command: |
            /tmp/venv/bin/tox -e typechecks

  docs:
    docker:
      - <<: *DOCKERHUB_AUTH
        image: "tahoelafsci/ubuntu:20.04-py3.9"

    steps:
      - "checkout"
      - run:
          name: "Build documentation"
          command: |
            /tmp/venv/bin/tox -e docs

  build-image: &BUILD_IMAGE
    # This is a template for a job to build a Docker image that has as much of
    # the setup as we can manage already done and baked in.  This cuts down on
    # the per-job setup time the actual testing jobs have to perform - by
    # perhaps 10% - 20%.
    #
    # https://circleci.com/blog/how-to-build-a-docker-image-on-circleci-2-0/
    docker:
      - <<: *DOCKERHUB_AUTH
        # CircleCI build images; https://github.com/CircleCI-Public/cimg-base
        # for details.
        image: "cimg/base:2022.01"

    environment:
      DISTRO: "tahoelafsci/<DISTRO>:foo-py3.9"
      TAG: "tahoelafsci/distro:<TAG>-py3.9"
      PYTHON_VERSION: "tahoelafsci/distro:tag-py<PYTHON_VERSION}"

    steps:
      - "checkout"
      - setup_remote_docker:
          version: "20.10.11"
      - run:
          name: "Log in to Dockerhub"
          command: |
            docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
      - run:
          name: "Build image"
          command: |
            docker \
                build \
                --build-arg TAG=${TAG} \
                --build-arg PYTHON_VERSION=${PYTHON_VERSION} \
                -t tahoelafsci/${DISTRO}:${TAG}-py${PYTHON_VERSION} \
                -f ~/project/.circleci/Dockerfile.${DISTRO} \
                ~/project/
      - run:
          name: "Push image"
          command: |
            docker push tahoelafsci/${DISTRO}:${TAG}-py${PYTHON_VERSION}


  build-image-debian-11:
    <<: *BUILD_IMAGE

    environment:
      DISTRO: "debian"
      TAG: "11"
      PYTHON_VERSION: "3.9"


  build-image-ubuntu-20-04:
    <<: *BUILD_IMAGE

    environment:
      DISTRO: "ubuntu"
      TAG: "20.04"
      PYTHON_VERSION: "3.9"


  build-image-ubuntu-22-04:
    <<: *BUILD_IMAGE

    environment:
      DISTRO: "ubuntu"
      TAG: "22.04"
      PYTHON_VERSION: "3.10"


  build-image-oraclelinux-8:
    <<: *BUILD_IMAGE

    environment:
      DISTRO: "oraclelinux"
      TAG: "8"
      PYTHON_VERSION: "3.8"

  build-image-fedora-35:
    <<: *BUILD_IMAGE

    environment:
      DISTRO: "fedora"
      TAG: "35"
      PYTHON_VERSION: "3"

  # build-image-pypy27-buster:
  #   <<: *BUILD_IMAGE
  #   environment:
  #     DISTRO: "pypy"
  #     TAG: "buster"
  #     # We only have Python 2 for PyPy right now so there's no support for
  #     # setting up PyPy 3 in the image building toolchain.  This value is just
  #     # for constructing the right Docker image tag.
  #     PYTHON_VERSION: "2"

executors:
  nix:
    docker:
      # Run in a highly Nix-capable environment.
      - <<: *DOCKERHUB_AUTH
        image: "nixos/nix:2.10.3"
    environment:
      # CACHIX_AUTH_TOKEN is manually set in the CircleCI web UI and allows us
      # to push to CACHIX_NAME.  CACHIX_NAME tells cachix which cache to push
      # to.
      CACHIX_NAME: "tahoe-lafs-opensource"

commands:
  nix-build:
    parameters:
      nixpkgs:
        description: >-
          Reference the name of a niv-managed nixpkgs source (see `niv show`
          and nix/sources.json)
        type: "string"
      pythonVersion:
        description: >-
          Reference the name of a Python package in nixpkgs to use.
        type: "string"
      buildSteps:
        description: >-
          The build steps to execute after setting up the build environment.
        type: "steps"

    steps:
      - "run":
          # Get cachix for Nix-friendly caching.
          name: "Install Basic Dependencies"
          command: |
            NIXPKGS="https://github.com/nixos/nixpkgs/archive/nixos-<<parameters.nixpkgs>>.tar.gz"
            nix-env \
              --file $NIXPKGS \
              --install \
              -A cachix bash
              # Activate it for "binary substitution".  This sets up
              # configuration tht lets Nix download something from the cache
              # instead of building it locally, if possible.
              cachix use "${CACHIX_NAME}"

      - "checkout"

      - "run":
          # The Nix package doesn't know how to do this part, unfortunately.
          name: "Generate version"
          command: |
            nix-shell \
              -p 'python3.withPackages (ps: [ ps.setuptools ])' \
              --run 'python setup.py update_version'

      - "run":
          name: "Build Dependencies"
          command: |
            # CircleCI build environment looks like it has a zillion and a
            # half cores.  Don't let Nix autodetect this high core count
            # because it blows up memory usage and fails the test run.  Pick a
            # number of cores that suits the build environment we're paying
            # for (the free one!).
            source .circleci/lib.sh
            # nix-shell will build all of the dependencies of the target but
            # not the target itself.
            cache_if_able nix-shell \
              --run "" \
              --cores 3 \
              --argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" \
              --argstr pythonVersion "<<parameters.pythonVersion>>" \
              ./default.nix

      - "run":
          name: "Build Package"
          command: |
            source .circleci/lib.sh
            cache_if_able nix-build \
              --cores 4 \
              --argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" \
              --argstr pythonVersion "<<parameters.pythonVersion>>" \
              ./default.nix

      - steps: "<<parameters.buildSteps>>"