diff --git a/.devcontainer.json b/.devcontainer.json deleted file mode 100644 index 7553c7f72..000000000 --- a/.devcontainer.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "image": "webis/tira-application:basis-0.0.96", - "customizations": { - "vscode": { - "extensions": [ - "ms-python.python", - "ms-python.vscode-pylance", - "ms-toolsai.jupyter", - "42Crunch.vscode-openapi", - ] - } - }, - "runArgs": ["--privileged"], - "mounts": ["source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind"] -} diff --git a/.devcontainer/Dockerfile.dev b/.devcontainer/Dockerfile.dev new file mode 100644 index 000000000..dc75fa13d --- /dev/null +++ b/.devcontainer/Dockerfile.dev @@ -0,0 +1,92 @@ +FROM ubuntu:24.04 + +ENV TZ=Europe/Berlin +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +RUN apt-get update && apt-get install -y sudo git locales +RUN echo "en_US.UTF-8 UTF-8" | tee -a /etc/locale.gen && locale-gen + +######################################################################################################################## +# Create Users # +######################################################################################################################## +RUN < /dev/null + apt-get update -y + apt-get install -y docker-ce-cli + touch /var/run/docker.sock + chown root:docker /var/run/docker.sock +EOF + + + +######################################################################################################################## +# Documentation # +######################################################################################################################## +ENV PIP_BREAK_SYSTEM_PACKAGES 1 +USER root +RUN apt-get update && apt-get install -y python3 python3-pip pkg-config plantuml wget +# Install umlet +RUN mkdir -p /usr/share/umlet \ + && cd /usr/share/umlet \ + && wget https://www.umlet.com/download/umlet_15_1/umlet-standalone-15.1.zip -O download.zip \ + && cd /usr/share/umlet \ + && unzip download.zip +USER dev +RUN pip3 install sphinx furo myst-parser sphinx-toolbox sphinx-design sphinxcontrib-plantuml sphinxcontrib-umlet diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 000000000..6436ae7a8 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,152 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-docker-compose +{ + "name": "Existing Docker Compose (Extend)", + + // Update the 'dockerComposeFile' list if you have more compose files or use different names. + // The .devcontainer/docker-compose.yml file contains any overrides you need/want to make. + "dockerComposeFile": [ + "./docker-compose.dev.yml", + "docker-compose.yml" + ], + + // The 'service' property is the name of the service for the container that VS Code should + // use. Update this value and .devcontainer/docker-compose.yml to the real service name. + "service": "devenv", + + // The optional 'workspaceFolder' property is the path VS Code should open by default when + // connected. This is typically a file mount in .devcontainer/docker-compose.yml + "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}", + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + "forwardPorts": [ + "nginx:8080", + "nginx:8081", + "nginx:8082" + ], + + "portsAttributes": { + "nginx:8080": { + "label": "TIRA Backend", + "protocol": "https" + }, + "nginx:8081": { + "label": "Authentication", + "protocol": "https" + }, + "nginx:8082": { + "label": "TIRA Frontend", + "protocol": "https" + } + }, + + // Uncomment the next line if you want start specific services in your Docker Compose config. + // "runServices": [], + + // Uncomment the next line if you want to keep your containers running after VS Code shuts down. + // "shutdownAction": "none", + + // Uncomment the next line to run commands after the container is created. + "postCreateCommand": "cd frontend; yarn; cd ../python-client; pip install -e .[test,dev]; cd ../application/; pip install -e .[test,dev]; make setup; make import-mock-data", + + // Configure tool-specific properties. + "customizations": { + "vscode": { + "settings": { + "editor.formatOnSave": true, + "json.format.keepLines": true, + "livePreview.portNumber": 3080, + "remote.autoForwardPorts": false, + "files.exclude": { + "**/__pycache__": true, + "**/.mypy_cache": true, + "**/.pytest_cache": true, + "**/*.egg-info": true, + "**/node_modules": true, + "application/.data-dumps": true + }, + "launch": { + "version": "0.2.0", + "configurations": [ + { + "name": "TIRA Frontend", + "type": "node", + "request": "launch", + "runtimeExecutable": "yarn", + "cwd": "${workspaceFolder:Frontend}/", + "runtimeArgs": [ + "dev" + ] + }, + { + "name": "TIRA Backend", + "type": "debugpy", + "request": "launch", + "args": [ + "run_develop" + ], + "django": true, + "autoStartBrowser": false, + "program": "${workspaceFolder:Backend}/src/manage.py" + }, + { + "name": "TIRA Backend Tests", + "type": "debugpy", + "request": "launch", + "program": "${workspaceFolder:Backend}/src/manage.py", + "cwd": "${workspaceFolder:Backend}/test", + "args": [ "test", "--failfast", "--settings=settings_test" ], + "django": true, + "env": { "PYTHONPATH": ":../src:.", "DJANGO_SETTINGS_MODULE": "settings_test" }, + "justMyCode": false + }, + { + "name": "Frontend Tests", + "type": "node", + "request": "launch", + "runtimeExecutable": "yarn", + "args": [ "test" ], + "cwd": "${workspaceFolder:Frontend}" + } + ], + "compounds": [ + { + "name": "TIRA Stack", + "configurations": [ + "TIRA Backend", + "TIRA Frontend" + ], + "stopAll": true + } + ] + }, + "[python]": { + "editor.codeActionsOnSave": { "source.organizeImports": "explicit" } + }, + "python.formatting.provider": "black", + "isort.args": [ "--profile", "black" ] + }, + "extensions": [ + "Vue.volar", + "ms-vscode.live-server", + "ms-python.python", + "ms-python.isort", + "ms-python.black-formatter", + "ms-python.flake8", + "ms-python.mypy-type-checker", + "42Crunch.vscode-openapi", + "vuetifyjs.vuetify-vscode" + ] + } + }, + + // Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "devcontainer" + + // Needed for parts of the python-client that build and run docker containers + "privileged": true, + "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ] +} diff --git a/.devcontainer/devfiles/README.md b/.devcontainer/devfiles/README.md new file mode 100644 index 000000000..674bfca31 --- /dev/null +++ b/.devcontainer/devfiles/README.md @@ -0,0 +1,13 @@ +This folder contains files that are **only** relevant for setting up a mock environment for development. The subfolder +`authelia` contains an customized +[configuration file](https://github.com/authelia/authelia/blob/v4.38.8/config.template.yml) for +[Authelia](https://www.authelia.com/) and a user database containing some mock users and admins. The configuration files +for the reverse proxy (inside `nginx`) are copied with only slight modifications from Authelia's +[Guide](https://www.authelia.com/integration/proxies/nginx/) and the certificates are self-signed since we need https +for the auth provider (Authelia) to be happy. + +If the certificates expired, you can run +```sh +sudo openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tira-dev-selfsigned.key -out tira-dev-selfsigned.crt +``` +to generate new ones. \ No newline at end of file diff --git a/.devcontainer/devfiles/authelia/configuration.dev.yml b/.devcontainer/devfiles/authelia/configuration.dev.yml new file mode 100644 index 000000000..4053f2eda --- /dev/null +++ b/.devcontainer/devfiles/authelia/configuration.dev.yml @@ -0,0 +1,1422 @@ +# Configuration template from https://github.com/authelia/authelia/blob/v4.38.8/config.template.yml + +# yamllint disable rule:comments-indentation +--- +############################################################################### +## Authelia Configuration ## +############################################################################### + +## +## Notes: +## +## - the default location of this file is assumed to be configuration.yml unless otherwise noted +## - when using docker the container expects this by default to be at /config/configuration.yml +## - the default location where this file is loaded from can be overridden with the X_AUTHELIA_CONFIG environment var +## - the comments in this configuration file are helpful but users should consult the official documentation on the +## website at https://www.authelia.com/ or https://www.authelia.com/configuration/prologue/introduction/ +## - this configuration file template is not automatically updated +## + +## Certificates directory specifies where Authelia will load trusted certificates (public portion) from in addition to +## the system certificates store. +## They should be in base64 format, and have one of the following extensions: *.cer, *.crt, *.pem. +# certificates_directory: '/config/certificates/' + +## The theme to display: light, dark, grey, auto. +# theme: 'light' + +## Set the default 2FA method for new users and for when a user has a preferred method configured that has been +## disabled. This setting must be a method that is enabled. +## Options are totp, webauthn, mobile_push. +# default_2fa_method: '' + +## +## Server Configuration +## +server: + ## The address for the Main server to listen on in the address common syntax. + ## Formats: + ## - [://][:][/] + ## - [://][hostname]:[/] + ## Square brackets indicate optional portions of the format. Scheme must be 'tcp', 'tcp4', 'tcp6', or 'unix'. + ## The default scheme is 'unix' if the address is an absolute path otherwise it's 'tcp'. The default port is '9091'. + ## If the path is specified this configures the router to handle both the `/` path and the configured path. + address: 'tcp4://:9091/' + + ## Set the path on disk to Authelia assets. + ## Useful to allow overriding of specific static assets. + # asset_path: '/config/assets/' + + ## Disables writing the health check vars to /app/.healthcheck.env which makes healthcheck.sh return exit code 0. + ## This is disabled by default if either /app/.healthcheck.env or /app/healthcheck.sh do not exist. + # disable_healthcheck: false + + ## Authelia by default doesn't accept TLS communication on the server port. This section overrides this behaviour. + # tls: + ## The path to the DER base64/PEM format private key. + # key: '' + + ## The path to the DER base64/PEM format public certificate. + # certificate: '' + + ## The list of certificates for client authentication. + # client_certificates: [] + + ## Server headers configuration/customization. + # headers: + + ## The CSP Template. Read the docs. + # csp_template: '' + + ## Server Buffers configuration. + # buffers: + + ## Buffers usually should be configured to be the same value. + ## Explanation at https://www.authelia.com/c/server#buffer-sizes + ## Read buffer size adjusts the server's max incoming request size in bytes. + ## Write buffer size does the same for outgoing responses. + + ## Read buffer. + # read: 4096 + + ## Write buffer. + # write: 4096 + + ## Server Timeouts configuration. + # timeouts: + + ## Read timeout in the duration common syntax. + # read: '6 seconds' + + ## Write timeout in the duration common syntax. + # write: '6 seconds' + + ## Idle timeout in the duration common syntax. + # idle: '30 seconds' + + ## Server Endpoints configuration. + ## This section is considered advanced and it SHOULD NOT be configured unless you've read the relevant documentation. + endpoints: + ## Enables the pprof endpoint. + # enable_pprof: false + + ## Enables the expvars endpoint. + # enable_expvars: false + + ## Configure the authz endpoints. + authz: + # forward-auth: + # implementation: 'ForwardAuth' + # authn_strategies: [] + # ext-authz: + # implementation: 'ExtAuthz' + # authn_strategies: [] + auth-request: + implementation: 'AuthRequest' + # authn_strategies: [] + # legacy: + # implementation: 'Legacy' + # authn_strategies: [] + +## +## Log Configuration +## +log: + ## Level of verbosity for logs: info, debug, trace. + level: 'debug' + + ## Format the logs are written as: json, text. + # format: 'json' + + ## File path where the logs will be written. If not set logs are written to stdout. + # file_path: '/config/authelia.log' + + ## Whether to also log to stdout when a log_file_path is defined. + # keep_stdout: false + +## +## Telemetry Configuration +## +telemetry: + + ## + ## Metrics Configuration + ## + metrics: + ## Enable Metrics. + enabled: false + + ## The address for the Metrics server to listen on in the address common syntax. + ## Formats: + ## - [://][:][/] + ## - [://][hostname]:[/] + ## Square brackets indicate optional portions of the format. Scheme must be 'tcp', 'tcp4', 'tcp6', or 'unix'. + ## The default scheme is 'unix' if the address is an absolute path otherwise it's 'tcp'. The default port is '9959'. + ## If the path is not specified it defaults to `/metrics`. + # address: 'tcp://:9959/metrics' + + ## Metrics Server Buffers configuration. + # buffers: + + ## Read buffer. + # read: 4096 + + ## Write buffer. + # write: 4096 + + ## Metrics Server Timeouts configuration. + # timeouts: + + ## Read timeout in the duration common syntax. + # read: '6 seconds' + + ## Write timeout in the duration common syntax. + # write: '6 seconds' + + ## Idle timeout in the duration common syntax. + # idle: '30 seconds' + +## +## TOTP Configuration +## +## Parameters used for TOTP generation. +totp: + ## Disable TOTP. + disable: false + + ## The issuer name displayed in the Authenticator application of your choice. + # issuer: 'authelia.com' + + ## The TOTP algorithm to use. + ## It is CRITICAL you read the documentation before changing this option: + ## https://www.authelia.com/c/totp#algorithm + # algorithm: 'SHA1' + + ## The number of digits a user has to input. Must either be 6 or 8. + ## Changing this option only affects newly generated TOTP configurations. + ## It is CRITICAL you read the documentation before changing this option: + ## https://www.authelia.com/c/totp#digits + # digits: 6 + + ## The period in seconds a Time-based One-Time Password is valid for. + ## Changing this option only affects newly generated TOTP configurations. + # period: 30 + + ## The skew controls number of Time-based One-Time Passwords either side of the current one that are valid. + ## Warning: before changing skew read the docs link below. + # skew: 1 + ## See: https://www.authelia.com/c/totp#input-validation to read + ## the documentation. + + ## The size of the generated shared secrets. Default is 32 and is sufficient in most use cases, minimum is 20. + # secret_size: 32 + + ## The allowed algorithms for a user to pick from. + # allowed_algorithms: + # - 'SHA1' + + ## The allowed digits for a user to pick from. + # allowed_digits: + # - 6 + + ## The allowed periods for a user to pick from. + # allowed_periods: + # - 30 + + ## Disable the reuse security policy which prevents replays of one-time password code values. + # disable_reuse_security_policy: false + +## +## WebAuthn Configuration +## +## Parameters used for WebAuthn. +webauthn: + ## Disable WebAuthn. + disable: false + + ## The interaction timeout for WebAuthn dialogues in the duration common syntax. + # timeout: '60 seconds' + + ## The display name the browser should show the user for when using WebAuthn to login/register. + # display_name: 'Authelia' + + ## Conveyance preference controls if we collect the attestation statement including the AAGUID from the device. + ## Options are none, indirect, direct. + # attestation_conveyance_preference: 'indirect' + + ## User verification controls if the user must make a gesture or action to confirm they are present. + ## Options are required, preferred, discouraged. + # user_verification: 'preferred' + +## +## Duo Push API Configuration +## +## Parameters used to contact the Duo API. Those are generated when you protect an application of type +## "Partner Auth API" in the management panel. +# duo_api: + # disable: false + # hostname: 'api-123456789.example.com' + # integration_key: 'ABCDEF' + ## Secret can also be set using a secret: https://www.authelia.com/c/secrets + # secret_key: '1234567890abcdefghifjkl' + # enable_self_enrollment: false + +## +## Identity Validation Configuration +## +## This configuration tunes the identity validation flows. +identity_validation: + + ## Reset Password flow. Adjusts how the reset password flow operates. + reset_password: + ## Maximum allowed time before the JWT is generated and when the user uses it in the duration common syntax. + # jwt_lifespan: '5 minutes' + + ## The algorithm used for the Reset Password JWT. + # jwt_algorithm: 'HS256' + + ## The secret key used to sign and verify the JWT. + jwt_secret: 'a_very_important_secret' + + ## Elevated Session flows. Adjusts the flow which require elevated sessions for example managing credentials, adding, + ## removing, etc. + # elevated_session: + ## Maximum allowed lifetime after the One-Time Code is generated that it is considered valid. + # code_lifespan: '5 minutes' + + ## Maximum allowed lifetime after the user uses the One-Time Code and the user must perform the validation again in + ## the duration common syntax. + # elevation_lifespan: '10 minutes' + + ## Number of characters the one-time password contains. + # characters: 8 + + ## In addition to the One-Time Code requires the user performs a second factor authentication. + # require_second_factor: false + + ## Skips the elevation requirement and entry of the One-Time Code if the user has performed second factor + ## authentication. + # skip_second_factor: false + +## +## NTP Configuration +## +## This is used to validate the servers time is accurate enough to validate TOTP. +# ntp: + ## The address of the NTP server to connect to in the address common syntax. + ## Format: [://][:]. + ## Square brackets indicate optional portions of the format. Scheme must be 'udp', 'udp4', or 'udp6'. + ## The default scheme is 'udp'. The default port is '123'. + # address: 'udp://time.cloudflare.com:123' + + ## NTP version. + # version: 4 + + ## Maximum allowed time offset between the host and the NTP server in the duration common syntax. + # max_desync: '3 seconds' + + ## Disables the NTP check on startup entirely. This means Authelia will not contact a remote service at all if you + ## set this to true, and can operate in a truly offline mode. + # disable_startup_check: false + + ## The default of false will prevent startup only if we can contact the NTP server and the time is out of sync with + ## the NTP server more than the configured max_desync. If you set this to true, an error will be logged but startup + ## will continue regardless of results. + # disable_failure: false + +## +## Authentication Backend Provider Configuration +## +## Used for verifying user passwords and retrieve information such as email address and groups users belong to. +## +## The available providers are: `file`, `ldap`. You must use only one of these providers. +authentication_backend: + + ## Password Reset Options. + password_reset: + ## Disable both the HTML element and the API for reset password functionality. + disable: false + + ## External reset password url that redirects the user to an external reset portal. This disables the internal reset + ## functionality. + # custom_url: '' + + ## The amount of time to wait before we refresh data from the authentication backend in the duration common syntax. + ## To disable this feature set it to 'disable', this will slightly reduce security because for Authelia, users will + ## always belong to groups they belonged to at the time of login even if they have been removed from them in LDAP. + ## To force update on every request you can set this to '0' or 'always', this will increase processor demand. + ## See the below documentation for more information. + ## Refresh Interval docs: https://www.authelia.com/c/1fa#refresh-interval + # refresh_interval: '5 minutes' + + ## + ## LDAP (Authentication Provider) + ## + ## This is the recommended Authentication Provider in production + ## because it allows Authelia to offload the stateful operations + ## onto the LDAP service. + # ldap: + ## The address of the directory server to connect to in the address common syntax. + ## Format: [://][:]. + ## Square brackets indicate optional portions of the format. Scheme must be 'ldap', 'ldaps', or 'ldapi`. + ## The default scheme is 'ldapi' if the address is an absolute path otherwise it's 'ldaps'. + ## The default port is '636', unless the scheme is 'ldap' in which case it's '389'. + # address: 'ldaps://127.0.0.1:636' + + ## The LDAP implementation, this affects elements like the attribute utilised for resetting a password. + ## Acceptable options are as follows: + ## - 'activedirectory' - for Microsoft Active Directory. + ## - 'freeipa' - for FreeIPA. + ## - 'lldap' - for lldap. + ## - 'custom' - for custom specifications of attributes and filters. + ## This currently defaults to 'custom' to maintain existing behaviour. + ## + ## Depending on the option here certain other values in this section have a default value, notably all of the + ## attribute mappings have a default value that this config overrides, you can read more about these default values + ## at https://www.authelia.com/c/ldap#defaults + # implementation: 'custom' + + ## The dial timeout for LDAP in the duration common syntax. + # timeout: '5 seconds' + + ## Use StartTLS with the LDAP connection. + # start_tls: false + + # tls: + ## The server subject name to check the servers certificate against during the validation process. + ## This option is not required if the certificate has a SAN which matches the address options hostname. + # server_name: 'ldap.example.com' + + ## Skip verifying the server certificate entirely. In preference to setting this we strongly recommend you add the + ## certificate or the certificate of the authority signing the certificate to the certificates directory which is + ## defined by the `certificates_directory` option at the top of the configuration. + ## It's important to note the public key should be added to the directory, not the private key. + ## This option is strongly discouraged but may be useful in some self-signed situations where validation is not + ## important to the administrator. + # skip_verify: false + + ## Minimum TLS version for the connection. + # minimum_version: 'TLS1.2' + + ## Maximum TLS version for the connection. + # maximum_version: 'TLS1.3' + + ## The certificate chain used with the private_key if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # certificate_chain: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + + ## The private key used with the certificate_chain if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # private_key: | + # -----BEGIN RSA PRIVATE KEY----- + # ... + # -----END RSA PRIVATE KEY----- + + ## The distinguished name of the container searched for objects in the directory information tree. + ## See also: additional_users_dn, additional_groups_dn. + # base_dn: 'dc=example,dc=com' + + ## The additional_users_dn is prefixed to base_dn and delimited by a comma when searching for users. + ## i.e. with this set to OU=Users and base_dn set to DC=a,DC=com; OU=Users,DC=a,DC=com is searched for users. + # additional_users_dn: 'ou=users' + + ## The users filter used in search queries to find the user profile based on input filled in login form. + ## Various placeholders are available in the user filter which you can read about in the documentation which can + ## be found at: https://www.authelia.com/c/ldap#users-filter-replacements + ## + ## Recommended settings are as follows: + ## - Microsoft Active Directory: (&({username_attribute}={input})(objectCategory=person)(objectClass=user)) + ## - OpenLDAP: + ## - (&({username_attribute}={input})(objectClass=person)) + ## - (&({username_attribute}={input})(objectClass=inetOrgPerson)) + ## + ## To allow sign in both with username and email, one can use a filter like + ## (&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person)) + # users_filter: '(&({username_attribute}={input})(objectClass=person))' + + ## The additional_groups_dn is prefixed to base_dn and delimited by a comma when searching for groups. + ## i.e. with this set to OU=Groups and base_dn set to DC=a,DC=com; OU=Groups,DC=a,DC=com is searched for groups. + # additional_groups_dn: 'ou=groups' + + ## The groups filter used in search queries to find the groups based on relevant authenticated user. + ## Various placeholders are available in the groups filter which you can read about in the documentation which can + ## be found at: https://www.authelia.com/c/ldap#groups-filter-replacements + ## + ## If your groups use the `groupOfUniqueNames` structure use this instead: + ## (&(uniqueMember={dn})(objectClass=groupOfUniqueNames)) + # groups_filter: '(&(member={dn})(objectClass=groupOfNames))' + + ## The group search mode to use. Options are 'filter' or 'memberof'. It's essential to read the docs if you wish to + ## use 'memberof'. Also 'filter' is the best choice for most use cases. + # group_search_mode: 'filter' + + ## Follow referrals returned by the server. + ## This is especially useful for environments where read-only servers exist. Only implemented for write operations. + # permit_referrals: false + + ## The username and password of the admin user. + # user: 'cn=admin,dc=example,dc=com' + ## Password can also be set using a secret: https://www.authelia.com/c/secrets + # password: 'password' + + ## The attributes for users and objects from the directory server. + # attributes: + + ## The distinguished name attribute if your directory server supports it. Users should read the docs before + ## configuring. Only used for the 'memberof' group search mode. + # distinguished_name: '' + + ## The attribute holding the username of the user. This attribute is used to populate the username in the session + ## information. For your information, Microsoft Active Directory usually uses 'sAMAccountName' and OpenLDAP + ## usually uses 'uid'. Beware that this attribute holds the unique identifiers for the users binding the user and + ## the configuration stored in database; therefore only single value attributes are allowed and the value must + ## never be changed once attributed to a user otherwise it would break the configuration for that user. + ## Technically non-unique attributes like 'mail' can also be used but we don't recommend using them, we instead + ## advise to use a filter to perform alternative lookups and the attributes mentioned above + ## (sAMAccountName and uid) to follow https://datatracker.ietf.org/doc/html/rfc2307. + # username: 'uid' + + ## The attribute holding the display name of the user. This will be used to greet an authenticated user. + # display_name: 'displayName' + + ## The attribute holding the mail address of the user. If multiple email addresses are defined for a user, only + ## the first one returned by the directory server is used. + # mail: 'mail' + + ## The attribute which provides distinguished names of groups an object is a member of. + ## Only used for the 'memberof' group search mode. + # member_of: 'memberOf' + + ## The attribute holding the name of the group. + # group_name: 'cn' + + ## + ## File (Authentication Provider) + ## + ## With this backend, the users database is stored in a file which is updated when users reset their passwords. + ## Therefore, this backend is meant to be used in a dev environment and not in production since it prevents Authelia + ## to be scaled to more than one instance. The options under 'password' have sane defaults, and as it has security + ## implications it is highly recommended you leave the default values. Before considering changing these settings + ## please read the docs page below: + ## https://www.authelia.com/r/passwords#tuning + ## + ## Important: Kubernetes (or HA) users must read https://www.authelia.com/t/statelessness + ## + file: + path: '/config/users_database.yml' + # watch: false + # search: + # email: false + # case_insensitive: false + # password: + # algorithm: 'argon2' + # argon2: + # variant: 'argon2id' + # iterations: 3 + # memory: 65536 + # parallelism: 4 + # key_length: 32 + # salt_length: 16 + # scrypt: + # iterations: 16 + # block_size: 8 + # parallelism: 1 + # key_length: 32 + # salt_length: 16 + # pbkdf2: + # variant: 'sha512' + # iterations: 310000 + # salt_length: 16 + # sha2crypt: + # variant: 'sha512' + # iterations: 50000 + # salt_length: 16 + # bcrypt: + # variant: 'standard' + # cost: 12 + +## +## Password Policy Configuration. +## +password_policy: + + ## The standard policy allows you to tune individual settings manually. + standard: + enabled: false + + ## Require a minimum length for passwords. + min_length: 8 + + ## Require a maximum length for passwords. + max_length: 0 + + ## Require uppercase characters. + require_uppercase: true + + ## Require lowercase characters. + require_lowercase: true + + ## Require numeric characters. + require_number: true + + ## Require special characters. + require_special: true + + ## zxcvbn is a well known and used password strength algorithm. It does not have tunable settings. + zxcvbn: + enabled: false + + ## Configures the minimum score allowed. + min_score: 3 + +## +## Privacy Policy Configuration +## +## Parameters used for displaying the privacy policy link and drawer. +privacy_policy: + + ## Enables the display of the privacy policy using the policy_url. + enabled: false + + ## Enables the display of the privacy policy drawer which requires users accept the privacy policy + ## on a per-browser basis. + require_user_acceptance: false + + ## The URL of the privacy policy document. Must be an absolute URL and must have the 'https://' scheme. + ## If the privacy policy enabled option is true, this MUST be provided. + policy_url: '' + +## +## Access Control Configuration +## +## Access control is a list of rules defining the authorizations applied for one resource to users or group of users. +## +## If 'access_control' is not defined, ACL rules are disabled and the 'bypass' rule is applied, i.e., access is allowed +## to anyone. Otherwise restrictions follow the rules defined. +## +## Note: One can use the wildcard * to match any subdomain. +## It must stand at the beginning of the pattern. (example: *.example.com) +## +## Note: You must put patterns containing wildcards between simple quotes for the YAML to be syntactically correct. +## +## Definition: A 'rule' is an object with the following keys: 'domain', 'subject', 'policy' and 'resources'. +## +## - 'domain' defines which domain or set of domains the rule applies to. +## +## - 'subject' defines the subject to apply authorizations to. This parameter is optional and matching any user if not +## provided. If provided, the parameter represents either a user or a group. It should be of the form +## 'user:' or 'group:'. +## +## - 'policy' is the policy to apply to resources. It must be either 'bypass', 'one_factor', 'two_factor' or 'deny'. +## +## - 'resources' is a list of regular expressions that matches a set of resources to apply the policy to. This parameter +## is optional and matches any resource if not provided. +## +## Note: the order of the rules is important. The first policy matching (domain, resource, subject) applies. +access_control: + ## Default policy can either be 'bypass', 'one_factor', 'two_factor' or 'deny'. It is the policy applied to any + ## resource if there is no policy to be applied to the user. + default_policy: 'bypass' + + # networks: + # - name: 'internal' + # networks: + # - '10.10.0.0/16' + # - '192.168.2.0/24' + # - name: 'VPN' + # networks: '10.9.0.0/16' + + rules: + ## Rules applied to everyone + - domain: '127.0.0.1' + resources: + - '/login' + policy: 'one_factor' + + ## Domain Regex examples. Generally we recommend just using a standard domain. + # - domain_regex: '^(?P\w+)\.example\.com$' + # policy: 'one_factor' + # - domain_regex: '^(?P\w+)\.example\.com$' + # policy: 'one_factor' + # - domain_regex: + # - '^appgroup-.*\.example\.com$' + # - '^appgroup2-.*\.example\.com$' + # policy: 'one_factor' + # - domain_regex: '^.*\.example\.com$' + # policy: 'two_factor' + + # - domain: 'secure.example.com' + # policy: 'one_factor' + ## Network based rule, if not provided any network matches. + # networks: + # - 'internal' + # - 'VPN' + # - '192.168.1.0/24' + # - '10.0.0.1' + + # - domain: + # - 'secure.example.com' + # - 'private.example.com' + # policy: 'two_factor' + + # - domain: 'singlefactor.example.com' + # policy: 'one_factor' + + ## Rules applied to 'admins' group + # - domain: 'mx2.mail.example.com' + # subject: 'group:admins' + # policy: 'deny' + + # - domain: '*.example.com' + # subject: + # - 'group:admins' + # - 'group:moderators' + # policy: 'two_factor' + + ## Rules applied to 'dev' group + # - domain: 'dev.example.com' + # resources: + # - '^/groups/dev/.*$' + # subject: 'group:dev' + # policy: 'two_factor' + + ## Rules applied to user 'john' + # - domain: 'dev.example.com' + # resources: + # - '^/users/john/.*$' + # subject: 'user:john' + # policy: 'two_factor' + + ## Rules applied to user 'harry' + # - domain: 'dev.example.com' + # resources: + # - '^/users/harry/.*$' + # subject: 'user:harry' + # policy: 'two_factor' + + ## Rules applied to user 'bob' + # - domain: '*.mail.example.com' + # subject: 'user:bob' + # policy: 'two_factor' + # - domain: 'dev.example.com' + # resources: + # - '^/users/bob/.*$' + # subject: 'user:bob' + # policy: 'two_factor' + +## +## Session Provider Configuration +## +## The session cookies identify the user once logged in. +## The available providers are: `memory`, `redis`. Memory is the provider unless redis is defined. +session: + ## The secret to encrypt the session data. This is only used with Redis / Redis Sentinel. + ## Secret can also be set using a secret: https://www.authelia.com/c/secrets + secret: 'insecure_session_secret' + + ## Cookies configures the list of allowed cookie domains for sessions to be created on. + ## Undefined values will default to the values below. + cookies: + - + ## The name of the session cookie. + name: 'authelia_session' + + ## The domain to protect. + ## Note: the Authelia portal must also be in that domain. + domain: '127.0.0.1' + + ## Required. The fully qualified URI of the portal to redirect users to on proxies that support redirections. + ## Rules: + ## - MUST use the secure scheme 'https://' + ## - The above 'domain' option MUST either: + ## - Match the host portion of this URI. + ## - Match the suffix of the host portion when prefixed with '.'. + authelia_url: 'https://127.0.0.1:8081' + + ## Optional. The fully qualified URI used as the redirection location if the portal is accessed directly. Not + ## configuring this option disables the automatic redirection behaviour. + ## + ## Note: this parameter is optional. If not provided, user won't be redirected upon successful authentication + ## unless they were redirected to Authelia by the proxy. + ## + ## Rules: + ## - MUST use the secure scheme 'https://' + ## - MUST not match the 'authelia_url' option. + ## - The above 'domain' option MUST either: + ## - Match the host portion of this URI. + ## - Match the suffix of the host portion when prefixed with '.'. + default_redirection_url: 'https://127.0.0.1:8082' + + ## Sets the Cookie SameSite value. Possible options are none, lax, or strict. + ## Please read https://www.authelia.com/c/session#same_site + # same_site: 'lax' + + ## The value for inactivity, expiration, and remember_me are in seconds or the duration common syntax. + ## All three of these values affect the cookie/session validity period. Longer periods are considered less secure + ## because a stolen cookie will last longer giving attackers more time to spy or attack. + + ## The inactivity time before the session is reset. If expiration is set to 1h, and this is set to 5m, if the user + ## does not select the remember me option their session will get destroyed after 1h, or after 5m since the last + ## time Authelia detected user activity. + # inactivity: '5 minutes' + + ## The time before the session cookie expires and the session is destroyed if remember me IS NOT selected by the + ## user. + expiration: '1 hour' + + ## The time before the cookie expires and the session is destroyed if remember me IS selected by the user. Setting + ## this value to -1 disables remember me for this session cookie domain. If allowed and the user uses the remember + ## me checkbox this overrides the expiration option and disables the inactivity option. + # remember_me: '1 month' + + ## Cookie Session Domain default 'name' value. + name: 'authelia_session' + + ## Cookie Session Domain default 'same_site' value. + same_site: 'lax' + + ## Cookie Session Domain default 'inactivity' value. + inactivity: '5m' + + ## Cookie Session Domain default 'expiration' value. + expiration: '1h' + + ## Cookie Session Domain default 'remember_me' value. + remember_me: '1M' + + ## + ## Redis Provider + ## + ## Important: Kubernetes (or HA) users must read https://www.authelia.com/t/statelessness + ## + # redis: + # host: '127.0.0.1' + # port: 6379 + ## Use a unix socket instead + # host: '/var/run/redis/redis.sock' + + ## Username used for redis authentication. This is optional and a new feature in redis 6.0. + # username: 'authelia' + + ## Password can also be set using a secret: https://www.authelia.com/c/secrets + # password: 'authelia' + + ## This is the Redis DB Index https://redis.io/commands/select (sometimes referred to as database number, DB, etc). + # database_index: 0 + + ## The maximum number of concurrent active connections to Redis. + # maximum_active_connections: 8 + + ## The target number of idle connections to have open ready for work. Useful when opening connections is slow. + # minimum_idle_connections: 0 + + ## The Redis TLS configuration. If defined will require a TLS connection to the Redis instance(s). + # tls: + ## The server subject name to check the servers certificate against during the validation process. + ## This option is not required if the certificate has a SAN which matches the host option. + # server_name: 'myredis.example.com' + + ## Skip verifying the server certificate entirely. In preference to setting this we strongly recommend you add the + ## certificate or the certificate of the authority signing the certificate to the certificates directory which is + ## defined by the `certificates_directory` option at the top of the configuration. + ## It's important to note the public key should be added to the directory, not the private key. + ## This option is strongly discouraged but may be useful in some self-signed situations where validation is not + ## important to the administrator. + # skip_verify: false + + ## Minimum TLS version for the connection. + # minimum_version: 'TLS1.2' + + ## Maximum TLS version for the connection. + # maximum_version: 'TLS1.3' + + ## The certificate chain used with the private_key if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # certificate_chain: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + + ## The private key used with the certificate_chain if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # private_key: | + # -----BEGIN RSA PRIVATE KEY----- + # ... + # -----END RSA PRIVATE KEY----- + + ## The Redis HA configuration options. + ## This provides specific options to Redis Sentinel, sentinel_name must be defined (Master Name). + # high_availability: + ## Sentinel Name / Master Name. + # sentinel_name: 'mysentinel' + + ## Specific username for Redis Sentinel. The node username and password is configured above. + # sentinel_username: 'sentinel_specific_user' + + ## Specific password for Redis Sentinel. The node username and password is configured above. + # sentinel_password: 'sentinel_specific_pass' + + ## The additional nodes to pre-seed the redis provider with (for sentinel). + ## If the host in the above section is defined, it will be combined with this list to connect to sentinel. + ## For high availability to be used you must have either defined; the host above or at least one node below. + # nodes: + # - host: 'sentinel-node1' + # port: 6379 + # - host: 'sentinel-node2' + # port: 6379 + + ## Choose the host with the lowest latency. + # route_by_latency: false + + ## Choose the host randomly. + # route_randomly: false + +## +## Regulation Configuration +## +## This mechanism prevents attackers from brute forcing the first factor. It bans the user if too many attempts are made +## in a short period of time. +# regulation: + ## The number of failed login attempts before user is banned. Set it to 0 to disable regulation. + # max_retries: 3 + + ## The time range during which the user can attempt login before being banned in the duration common syntax. The user + ## is banned if the authentication failed 'max_retries' times in a 'find_time' seconds window. + # find_time: '2 minutes' + + ## The length of time before a banned user can login again in the duration common syntax. + # ban_time: '5 minutes' + +## +## Storage Provider Configuration +## +## The available providers are: `local`, `mysql`, `postgres`. You must use one and only one of these providers. +storage: + ## The encryption key that is used to encrypt sensitive information in the database. Must be a string with a minimum + ## length of 20. Please see the docs if you configure this with an undesirable key and need to change it, you MUST use + ## the CLI to change this in the database if you want to change it from a previously configured value. + encryption_key: 'you_must_generate_a_random_string_of_more_than_twenty_chars_and_configure_this' + + ## + ## Local (Storage Provider) + ## + ## This stores the data in a SQLite3 Database. + ## This is only recommended for lightweight non-stateful installations. + ## + ## Important: Kubernetes (or HA) users must read https://www.authelia.com/t/statelessness + ## + local: + ## Path to the SQLite3 Database. + path: '/config/db.sqlite3' + + ## + ## MySQL / MariaDB (Storage Provider) + ## + # mysql: + ## The address of the MySQL server to connect to in the address common syntax. + ## Format: [://][:]. + ## Square brackets indicate optional portions of the format. Scheme must be 'tcp', 'tcp4', 'tcp6', or 'unix`. + ## The default scheme is 'unix' if the address is an absolute path otherwise it's 'tcp'. The default port is '3306'. + # address: 'tcp://127.0.0.1:3306' + + ## The database name to use. + # database: 'authelia' + + ## The username used for SQL authentication. + # username: 'authelia' + + ## The password used for SQL authentication. + ## Can also be set using a secret: https://www.authelia.com/c/secrets + # password: 'mypassword' + + ## The connection timeout in the duration common syntax. + # timeout: '5 seconds' + + ## MySQL TLS settings. Configuring this requires TLS. + # tls: + ## The server subject name to check the servers certificate against during the validation process. + ## This option is not required if the certificate has a SAN which matches the address options hostname. + # server_name: 'mysql.example.com' + + ## Skip verifying the server certificate entirely. In preference to setting this we strongly recommend you add the + ## certificate or the certificate of the authority signing the certificate to the certificates directory which is + ## defined by the `certificates_directory` option at the top of the configuration. + ## It's important to note the public key should be added to the directory, not the private key. + ## This option is strongly discouraged but may be useful in some self-signed situations where validation is not + ## important to the administrator. + # skip_verify: false + + ## Minimum TLS version for the connection. + # minimum_version: 'TLS1.2' + + ## Maximum TLS version for the connection. + # maximum_version: 'TLS1.3' + + ## The certificate chain used with the private_key if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # certificate_chain: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + + ## The private key used with the certificate_chain if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # private_key: | + # -----BEGIN RSA PRIVATE KEY----- + # ... + # -----END RSA PRIVATE KEY----- + + ## + ## PostgreSQL (Storage Provider) + ## + # postgres: + ## The address of the PostgreSQL server to connect to in the address common syntax. + ## Format: [://][:]. + ## Square brackets indicate optional portions of the format. Scheme must be 'tcp', 'tcp4', 'tcp6', or 'unix`. + ## The default scheme is 'unix' if the address is an absolute path otherwise it's 'tcp'. The default port is '5432'. + # address: 'tcp://127.0.0.1:5432' + + ## The database name to use. + # database: 'authelia' + + ## The schema name to use. + # schema: 'public' + + ## The username used for SQL authentication. + # username: 'authelia' + + ## The password used for SQL authentication. + ## Can also be set using a secret: https://www.authelia.com/c/secrets + # password: 'mypassword' + + ## The connection timeout in the duration common syntax. + # timeout: '5 seconds' + + ## PostgreSQL TLS settings. Configuring this requires TLS. + # tls: + ## The server subject name to check the servers certificate against during the validation process. + ## This option is not required if the certificate has a SAN which matches the address options hostname. + # server_name: 'postgres.example.com' + + ## Skip verifying the server certificate entirely. In preference to setting this we strongly recommend you add the + ## certificate or the certificate of the authority signing the certificate to the certificates directory which is + ## defined by the `certificates_directory` option at the top of the configuration. + ## It's important to note the public key should be added to the directory, not the private key. + ## This option is strongly discouraged but may be useful in some self-signed situations where validation is not + ## important to the administrator. + # skip_verify: false + + ## Minimum TLS version for the connection. + # minimum_version: 'TLS1.2' + + ## Maximum TLS version for the connection. + # maximum_version: 'TLS1.3' + + ## The certificate chain used with the private_key if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # certificate_chain: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + + ## The private key used with the certificate_chain if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # private_key: | + # -----BEGIN RSA PRIVATE KEY----- + # ... + # -----END RSA PRIVATE KEY----- + +## +## Notification Provider +## +## Notifications are sent to users when they require a password reset, a WebAuthn registration or a TOTP registration. +## The available providers are: filesystem, smtp. You must use only one of these providers. +notifier: + ## You can disable the notifier startup check by setting this to true. + disable_startup_check: false + + ## + ## File System (Notification Provider) + ## + ## Important: Kubernetes (or HA) users must read https://www.authelia.com/t/statelessness + ## + filesystem: + filename: '/config/notification.txt' + + ## + ## SMTP (Notification Provider) + ## + ## Use a SMTP server for sending notifications. Authelia uses the PLAIN or LOGIN methods to authenticate. + ## [Security] By default Authelia will: + ## - force all SMTP connections over TLS including unauthenticated connections + ## - use the disable_require_tls boolean value to disable this requirement + ## (only works for unauthenticated connections) + ## - validate the SMTP server x509 certificate during the TLS handshake against the hosts trusted certificates + ## (configure in tls section) + # smtp: + ## The address of the SMTP server to connect to in the address common syntax. + # address: 'smtp://127.0.0.1:25' + + ## The connection timeout in the duration common syntax. + # timeout: '5 seconds' + + ## The username used for SMTP authentication. + # username: 'test' + + ## The password used for SMTP authentication. + ## Can also be set using a secret: https://www.authelia.com/c/secrets + # password: 'password' + + ## The sender is used to is used for the MAIL FROM command and the FROM header. + ## If this is not defined and the username is an email, we use the username as this value. This can either be just + ## an email address or the RFC5322 'Name ' format. + # sender: 'Authelia ' + + ## HELO/EHLO Identifier. Some SMTP Servers may reject the default of localhost. + # identifier: 'localhost' + + ## Subject configuration of the emails sent. {title} is replaced by the text from the notifier. + # subject: '[Authelia] {title}' + + ## This address is used during the startup check to verify the email configuration is correct. + ## It's not important what it is except if your email server only allows local delivery. + # startup_check_address: 'test@authelia.com' + + ## By default we require some form of TLS. This disables this check though is not advised. + # disable_require_tls: false + + ## Disables sending HTML formatted emails. + # disable_html_emails: false + + # tls: + ## The server subject name to check the servers certificate against during the validation process. + ## This option is not required if the certificate has a SAN which matches the address options hostname. + # server_name: 'smtp.example.com' + + ## Skip verifying the server certificate entirely. In preference to setting this we strongly recommend you add the + ## certificate or the certificate of the authority signing the certificate to the certificates directory which is + ## defined by the `certificates_directory` option at the top of the configuration. + ## It's important to note the public key should be added to the directory, not the private key. + ## This option is strongly discouraged but may be useful in some self-signed situations where validation is not + ## important to the administrator. + # skip_verify: false + + ## Minimum TLS version for the connection. + # minimum_version: 'TLS1.2' + + ## Maximum TLS version for the connection. + # maximum_version: 'TLS1.3' + + ## The certificate chain used with the private_key if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # certificate_chain: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + + ## The private key used with the certificate_chain if the server requests TLS Client Authentication + ## i.e. Mutual TLS. + # private_key: | + # -----BEGIN RSA PRIVATE KEY----- + # ... + # -----END RSA PRIVATE KEY----- + +## +## Identity Providers +## +# identity_providers: + + ## + ## OpenID Connect (Identity Provider) + ## + ## It's recommended you read the documentation before configuration of this section: + ## https://www.authelia.com/c/oidc + # oidc: + ## The hmac_secret is used to sign OAuth2 tokens (authorization code, access tokens and refresh tokens). + ## HMAC Secret can also be set using a secret: https://www.authelia.com/c/secrets + # hmac_secret: 'this_is_a_secret_abc123abc123abc' + + ## The JWK's issuer option configures multiple JSON Web Keys. It's required that at least one of the JWK's + ## configured has the RS256 algorithm. For RSA keys (RS or PS) the minimum is a 2048 bit key. + # jwks: + # - + ## Key ID embedded into the JWT header for key matching. Must be an alphanumeric string with 7 or less characters. + ## This value is automatically generated if not provided. It's recommended to not configure this. + # key_id: 'example' + + ## The key algorithm used with this key. + # algorithm: 'RS256' + + ## The key use expected with this key. Currently only 'sig' is supported. + # use: 'sig' + + ## Required Private Key in PEM DER form. + # key: | + # -----BEGIN RSA PRIVATE KEY----- + # ... + # -----END RSA PRIVATE KEY----- + + + ## Optional matching certificate chain in PEM DER form that matches the key. All certificates within the chain + ## must be valid and current, and from top to bottom each certificate must be signed by the subsequent one. + # certificate_chain: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + + ## Enables additional debug messages. + # enable_client_debug_messages: false + + ## SECURITY NOTICE: It's not recommended changing this option and values below 8 are strongly discouraged. + # minimum_parameter_entropy: 8 + + ## SECURITY NOTICE: It's not recommended changing this option, and highly discouraged to have it set to 'never' + ## for security reasons. + # enforce_pkce: 'public_clients_only' + + ## SECURITY NOTICE: It's not recommended changing this option. We encourage you to read the documentation and fully + ## understanding it before enabling this option. + # enable_jwt_access_token_stateless_introspection: false + + ## The signing algorithm used for signing the discovery and metadata responses. An issuer JWK with a matching + ## algorithm must be available when configured. Most clients completely ignore this and it has a performance cost. + # discovery_signed_response_alg: 'none' + + ## The signing key id used for signing the discovery and metadata responses. An issuer JWK with a matching key id + ## must be available when configured. Most clients completely ignore this and it has a performance cost. + # discovery_signed_response_key_id: '' + + ## Authorization Policies which can be utilized by clients. The 'policy_name' is an arbitrary value that you pick + ## which is utilized as the value for the 'authorization_policy' on the client. + # authorization_policies: + # policy_name: + # default_policy: 'two_factor' + # rules: + # - policy: 'one_factor' + # subject: 'group:services' + + ## The lifespans configure the expiration for these token types in the duration common syntax. In addition to this + ## syntax the lifespans can be customized per-client. + # lifespans: + ## Configures the default/fallback lifespan for given token types. This behaviour applies to all clients and all + ## grant types but you can override this behaviour using the custom lifespans. + # access_token: '1 hour' + # authorize_code: '1 minute' + # id_token: '1 hour' + # refresh_token: '90 minutes' + + ## Cross-Origin Resource Sharing (CORS) settings. + # cors: + ## List of endpoints in addition to the metadata endpoints to permit cross-origin requests on. + # endpoints: + # - 'authorization' + # - 'pushed-authorization-request' + # - 'token' + # - 'revocation' + # - 'introspection' + # - 'userinfo' + + ## List of allowed origins. + ## Any origin with https is permitted unless this option is configured or the + ## allowed_origins_from_client_redirect_uris option is enabled. + # allowed_origins: + # - 'https://example.com' + + ## Automatically adds the origin portion of all redirect URI's on all clients to the list of allowed_origins, + ## provided they have the scheme http or https and do not have the hostname of localhost. + # allowed_origins_from_client_redirect_uris: false + + ## Clients is a list of known clients and their configuration. + # clients: + # - + ## The Client ID is the OAuth 2.0 and OpenID Connect 1.0 Client ID which is used to link an application to a + ## configuration. + # client_id: 'myapp' + + ## The description to show to users when they end up on the consent screen. Defaults to the ID above. + # client_name: 'My Application' + + ## The client secret is a shared secret between Authelia and the consumer of this client. + # yamllint disable-line rule:line-length + # client_secret: '$pbkdf2-sha512$310000$c8p78n7pUMln0jzvd4aK4Q$JNRBzwAo0ek5qKn50cFzzvE9RXV88h1wJn5KGiHrD0YKtZaR/nCb2CJPOsKaPK0hjf.9yHxzQGZziziccp6Yng' # The digest of 'insecure_secret'. + + ## Sector Identifiers are occasionally used to generate pairwise subject identifiers. In most cases this is not + ## necessary. It is critical to read the documentation for more information. + # sector_identifier_uri: 'https://example.com/sector.json' + + ## Sets the client to public. This should typically not be set, please see the documentation for usage. + # public: false + + ## Redirect URI's specifies a list of valid case-sensitive callbacks for this client. + # redirect_uris: + # - 'https://oidc.example.com:8080/oauth2/callback' + + ## Request URI's specifies a list of valid case-sensitive TLS-secured URIs for this client for use as + ## URIs to fetch Request Objects. + # request_uris: + # - 'https://oidc.example.com:8080/oidc/request-object.jwk' + + ## Audience this client is allowed to request. + # audience: [] + + ## Scopes this client is allowed to request. + # scopes: + # - 'openid' + # - 'groups' + # - 'email' + # - 'profile' + + ## Grant Types configures which grants this client can obtain. + ## It's not recommended to define this unless you know what you're doing. + # grant_types: + # - 'authorization_code' + + ## Response Types configures which responses this client can be sent. + ## It's not recommended to define this unless you know what you're doing. + # response_types: + # - 'code' + + ## Response Modes configures which response modes this client supports. + # response_modes: + # - 'form_post' + # - 'query' + + ## The policy to require for this client; one_factor or two_factor. Can also be the key names for the + ## authorization policies section. + # authorization_policy: 'two_factor' + + ## The custom lifespan name to use for this client. This must be configured independent of the client before + ## utilization. Custom lifespans are reusable similar to authorization policies. + # lifespan: '' + + ## The consent mode controls how consent is obtained. + # consent_mode: 'auto' + + ## This value controls the duration a consent on this client remains remembered when the consent mode is + ## configured as 'auto' or 'pre-configured' in the duration common syntax. + # pre_configured_consent_duration: '1 week' + + ## Requires the use of Pushed Authorization Requests for this client when set to true. + # require_pushed_authorization_requests: false + + ## Enforces the use of PKCE for this client when set to true. + # require_pkce: false + + ## Enforces the use of PKCE for this client when configured, and enforces the specified challenge method. + ## Options are 'plain' and 'S256'. + # pkce_challenge_method: 'S256' + + ## The permitted client authentication method for the Token Endpoint for this client. + ## For confidential client types this value defaults to 'client_secret_basic' and for the public client types it + ## defaults to 'none' per the specifications. + # token_endpoint_auth_method: 'client_secret_basic' + + ## The permitted client authentication signing algorithm for the Token Endpoint for this client when using + ## the 'client_secret_jwt' or 'private_key_jwt' token_endpoint_auth_method. + # token_endpoint_auth_signing_alg: 'RS256' + + ## The signing algorithm which must be used for request objects. A client JWK with a matching algorithm must be + ## available if configured. + # request_object_signing_alg: 'RS256' + + ## The signing algorithm used for signing the authorization response. An issuer JWK with a matching algorithm + ## must be available when configured. Configuring this value enables the JWT Secured Authorization Response + ## Mode (JARM) for this client. JARM is not understood by a majority of clients so you should only configure + ## this when you know it's supported. + ## Has no effect if authorization_signed_response_key_id is configured. + # authorization_signed_response_alg: 'none' + + ## The signing key id used for signing the authorization response. An issuer JWK with a matching key id must be + ## available when configured. Configuring this value enables the JWT Secured Authorization Response Mode (JARM) + ## for this client. JARM is not understood by a majority of clients so you should only configure this when you + ## know it's supported. + # authorization_signed_response_key_id: '' + + ## The signing algorithm used for ID Tokens. An issuer JWK with a matching algorithm must be available when + ## configured. Has no effect if id_token_signed_response_key_id is configured. + # id_token_signed_response_alg: 'RS256' + + ## The signing key id used for ID Tokens. An issuer JWK with a matching key id must be available when + ## configured. + # id_token_signed_response_key_id: '' + + ## The signing algorithm used for Access Tokens. An issuer JWK with a matching algorithm must be available. + ## Has no effect if access_token_signed_response_key_id is configured. Values other than 'none' enable RFC9068 + ## for this client. + # access_token_signed_response_alg: 'none' + + ## The signing key id used for Access Tokens. An issuer JWK with a matching key id must be available when + ## configured. Values other than a blank value enable RFC9068 for this client. + # access_token_signed_response_key_id: '' + + ## The signing algorithm used for User Info responses. An issuer JWK with a matching algorithm must be + ## available. Has no effect if userinfo_signing_key_id is configured. + # userinfo_signed_response_alg: 'none' + + ## The signing key id used for User Info responses. An issuer JWK with a matching key id must be available when + ## configured. + # userinfo_signed_response_key_id: '' + + ## The signing algorithm used for Introspection responses. An issuer JWK with a matching algorithm must be + ## available when configured. Has no effect if introspection_signed_response_key_id is configured. + # introspection_signed_response_alg: 'none' + + ## The signing key id used for Introspection responses. An issuer JWK with a matching key id must be available + ## when configured. + # introspection_signed_response_key_id: '' + + ## Trusted public keys configuration for request object signing for things such as 'private_key_jwt'. + ## URL of the HTTPS endpoint which serves the keys. Please note the 'jwks_uri' and the 'jwks' option below + ## are mutually exclusive. + # jwks_uri: 'https://app.example.com/jwks.json' + + ## Trusted public keys configuration for request object signing for things such as 'private_key_jwt'. + ## List of JWKs known and registered with this client. It's recommended to use the 'jwks_uri' option if + ## available due to key rotation. Please note the 'jwks' and the 'jwks_uri' option above are mutually exclusive. + # jwks: + # - + ## Key ID used to match the JWT's to an individual identifier. This option is required if configured. + # key_id: 'example' + + ## The key algorithm expected with this key. + # algorithm: 'RS256' + + ## The key use expected with this key. Currently only 'sig' is supported. + # use: 'sig' + + ## Required Public Key in PEM DER form. + # key: | + # -----BEGIN RSA PUBLIC KEY----- + # ... + # -----END RSA PUBLIC KEY----- + + ## The matching certificate chain in PEM DER form that matches the key if available. + # certificate_chain: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- +... \ No newline at end of file diff --git a/.devcontainer/devfiles/authelia/users-database.yml b/.devcontainer/devfiles/authelia/users-database.yml new file mode 100644 index 000000000..992c63460 --- /dev/null +++ b/.devcontainer/devfiles/authelia/users-database.yml @@ -0,0 +1,37 @@ +# https://www.authelia.com/reference/guides/passwords/#yaml-format + +# All passwords in this file are "password" + +users: + admin: + disabled: false + displayname: 'Ed Ministraitor' + password: '$argon2id$v=19$m=65536,t=3,p=4$965dgqVamsIBA0SOboHemw$wa3VhfmAjrzTc17yDiKLsjodVCR62REwNkctxyuFIc4' + email: 'admin@tira.local' + groups: + - 'admins' + - 'tira_reviewer' + organizer1: + disabled: false + displayname: 'Big Wig' + password: '$argon2id$v=19$m=65536,t=3,p=4$965dgqVamsIBA0SOboHemw$wa3VhfmAjrzTc17yDiKLsjodVCR62REwNkctxyuFIc4' + email: 'org1@tira.local' + groups: [] + organizer2: + disabled: false + displayname: 'Even Longer' + password: '$argon2id$v=19$m=65536,t=3,p=4$965dgqVamsIBA0SOboHemw$wa3VhfmAjrzTc17yDiKLsjodVCR62REwNkctxyuFIc4' + email: 'org2@tira.local' + groups: [] + user1: + disabled: false + displayname: 'John Doe' + password: '$argon2id$v=19$m=65536,t=3,p=4$965dgqVamsIBA0SOboHemw$wa3VhfmAjrzTc17yDiKLsjodVCR62REwNkctxyuFIc4' + email: 'user1@tira.local' + groups: [] + user2: + disabled: false + displayname: 'Jane Doe' + password: '$argon2id$v=19$m=65536,t=3,p=4$965dgqVamsIBA0SOboHemw$wa3VhfmAjrzTc17yDiKLsjodVCR62REwNkctxyuFIc4' + email: 'user2@tira.local' + groups: [] \ No newline at end of file diff --git a/.devcontainer/devfiles/nginx/auth.conf b/.devcontainer/devfiles/nginx/auth.conf new file mode 100644 index 000000000..752f5614f --- /dev/null +++ b/.devcontainer/devfiles/nginx/auth.conf @@ -0,0 +1,36 @@ +#server { +# listen 8081; +# +# return 301 https://$server_name$request_uri; +#} + +# Upgrade WebSocket if requested, otherwise use keepalive +map $http_upgrade $connection_upgrade_keepalive { + default upgrade; + '' ''; +} + +server { + listen 8081 ssl http2; + + include /config/nginx/snippets/ssl.conf; + + # Support for websocket + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade_keepalive; + + set $upstream http://auth.tira.local:9091; + + location / { + include /config/nginx/snippets/proxy.conf; + proxy_pass $upstream; + } + + location /api/verify { + proxy_pass $upstream; + } + + location /api/authz/ { + proxy_pass $upstream; + } +} diff --git a/.devcontainer/devfiles/nginx/certs/tira-dev-selfsigned.crt b/.devcontainer/devfiles/nginx/certs/tira-dev-selfsigned.crt new file mode 100644 index 000000000..6b785827c --- /dev/null +++ b/.devcontainer/devfiles/nginx/certs/tira-dev-selfsigned.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIUDLi/PlIIVovVNR9vUDB8YoYdFzgwDQYJKoZIhvcNAQEL +BQAwNTELMAkGA1UEBhMCREUxEzARBgNVBAgMClNvbWUtU3RhdGUxETAPBgNVBAoM +CFRJUkEtZGV2MB4XDTI0MDUwMzA5MzcwMFoXDTI1MDUwMzA5MzcwMFowNTELMAkG +A1UEBhMCREUxEzARBgNVBAgMClNvbWUtU3RhdGUxETAPBgNVBAoMCFRJUkEtZGV2 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtnYiWxJpVv7Zjm1+VDPl +IvqJ7guZ7TBrqaksBpQiePrz9Kv34rXIabPPq/WsYvPMVA+1Dq/e5VYhEIABvf8K +T3tNLkt2U75aLT8l+s6dTd2fkgt3NJSTn7wCHXhB/7VUPbZ/WOq3v++4DA2y2dYV +pdfvW625lINMDCOt/Fr2dDNTD//QJsymV2TEFqSQTAhuenQfCaU/6uUnmwFXWwb3 +NvthTvlqAQgCmv50UFCOCA/WNTBehQ4Js0JC+6L1YCIAmFOJNcTRubkHX/sKZCrZ +C0sK9WhgwY6CHyQ2UvlmY5K106wxtFpqT7Nd8PSPHcXMMQlyLJL/GLVXeq1UvSuN +9wIDAQABo1MwUTAdBgNVHQ4EFgQUgOLjS7OONM7laiEJACGZoeLZAYEwHwYDVR0j +BBgwFoAUgOLjS7OONM7laiEJACGZoeLZAYEwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQsFAAOCAQEAJvqT49sWyOm/NL+gabNcxFXK+9iEl34tmnnrkmN4s1mt +K/kPUWQorpVCgFqyU9gp4LHCks/bEoN+f7Tgka6p6Jxc+I+IJk0j3hqCn+9CM2cN +sRNtJM6tsS/xpH9VbhFzAq3LIuH3dlmWQoFBoSsVXNww2lDUuF1e4tnDrvx5GeFa +6+aurONgSvh0fJXs2Bkv7TimncP8eEj7Q45oIgB0qqXx7FyW7BuLvS5XSPadiLL0 +Pu3t7w883GvW1Y27qE9pBzhPnj6S4Xt0TzUln9z/2xy8xiqGOQFz2srAWsJGX/k4 +M4BQ1n5EZnsmKKmWZFbTmMuHwwzOkHyaFXMCCgSQWQ== +-----END CERTIFICATE----- diff --git a/.devcontainer/devfiles/nginx/certs/tira-dev-selfsigned.key b/.devcontainer/devfiles/nginx/certs/tira-dev-selfsigned.key new file mode 100644 index 000000000..d110c714e --- /dev/null +++ b/.devcontainer/devfiles/nginx/certs/tira-dev-selfsigned.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC2diJbEmlW/tmO +bX5UM+Ui+onuC5ntMGupqSwGlCJ4+vP0q/fitchps8+r9axi88xUD7UOr97lViEQ +gAG9/wpPe00uS3ZTvlotPyX6zp1N3Z+SC3c0lJOfvAIdeEH/tVQ9tn9Y6re/77gM +DbLZ1hWl1+9brbmUg0wMI638WvZ0M1MP/9AmzKZXZMQWpJBMCG56dB8JpT/q5Seb +AVdbBvc2+2FO+WoBCAKa/nRQUI4ID9Y1MF6FDgmzQkL7ovVgIgCYU4k1xNG5uQdf ++wpkKtkLSwr1aGDBjoIfJDZS+WZjkrXTrDG0WmpPs13w9I8dxcwxCXIskv8YtVd6 +rVS9K433AgMBAAECggEAFxL7HEOd9lugkp5D1A0uBHg6OimAa++W0JataSA8Tcqv ++uNGeVvzaMlNpsRDlSNEqzrDSFoIWOiaoitgoluJkmyYNusMdIxa0Lmu1A1jSjtl +LcMXw5qod+epktAoSdiaEHsu6+/e5ehrANHjVMtPOIZauuAjhbr8QoAT8BkRbWHw +fyAFE+f+s+iU646TpHAB6vOt78tqwx5i3fjHsc2B1QxQrscd70r4LUgwM2KK4pZ7 +ODiWz/12YH8qpWawZI+0N1OZMGG8+Y3SyepWmpZwo/spYzNrOUQ3FWmuJsMLw4Fz +zFz9VVStx6/kj3f90E1jN89qoMCSojtlBjYycJz8sQKBgQDzaaAA7NMZSxO2PBUM +hde0WicFVuj7JrYZcbsOpiUSb5Ns9g4gW3D6nA10YqWiuEckENpB3xpbjbsosGXh +B6sZJ9bYn+HkH46pdlaZfF4MIhuYhFFwYfTVa+Xte7UIBRNyy6wJSVy3knHMZYIq +rLnAk77Fw1ZisfVv4r4+y0JtUQKBgQC/5Z5Gh/DPrt1QgdaUBrzjh00w+9+8weIX +bqVaqR4t0uBsY1hVYIP+lR79AdSXBdXkKj30uONv2xG99JCibkd3SXMVO0wjqezI +7ef9IHVvZIzo7LD+SHYgW9iTlotJxmz0wTsg6EEhPg/GBg7IfbxvNCCLE7cfvvYr +CRu3E4pUxwKBgAJzidz4hZe3ndcz+7JIlseGMOZo3afJ2AOhlZSsL6tj3EQ4piPF +R/n+IFSPucNNv8mpMcmACa/qWXwraps9jIeQUyWTVp8JglJKksyw5/GPVl/O1VMg +aWrMz1/44vXVt0BstKrTRk/C9cptK0ZqvuQlmMI9GV2dFS+vROM1ANJBAoGAXoGj +VXEVs/KWpFONbhFzbUrxRTKGmduSkZ+4Z9HqPv8f78Fte36xAuxF+qywGkCRenEj +WibEPA5Tyo1rvdM59xGBT++IK/g/2STxn8Qgkd+fVgdbk5/0LTOOkgEOUCMc/d08 +VZSMnuaFtiy9kxf5eXrenB/1i/jsG3283IkPE3kCgYBDGZ/NHAVGNKq93SEFgZyo +/vRCj3AfXMf2jn8+7AK1lr/UB10vu9KSxUEFiyt6vvo76YET0AmvQPTIXVcVt7wK +8TDlU8Jv8wYHPqz/idAGWj9RbRF9yU+djcNZp+U7qBjNvO6mvJrlejaGWpgTFBAa +8XB/pod/p7EexTwD0kNrKA== +-----END PRIVATE KEY----- diff --git a/.devcontainer/devfiles/nginx/snippets/authelia-authrequest.conf b/.devcontainer/devfiles/nginx/snippets/authelia-authrequest.conf new file mode 100644 index 000000000..c6111f973 --- /dev/null +++ b/.devcontainer/devfiles/nginx/snippets/authelia-authrequest.conf @@ -0,0 +1,32 @@ +## Send a subrequest to Authelia to verify if the user is authenticated and has permission to access the resource. +auth_request /internal/authelia/authz; + +## Save the upstream metadata response headers from Authelia to variables. +auth_request_set $user $upstream_http_remote_user; +auth_request_set $groups $upstream_http_remote_groups; +auth_request_set $name $upstream_http_remote_name; +auth_request_set $email $upstream_http_remote_email; + +## Inject the metadata response headers from the variables into the request made to the backend. +proxy_set_header X-Disraptor-User $user; +proxy_set_header X-Disraptor-Groups $groups; +proxy_set_header Remote-Email $email; +proxy_set_header Remote-Name $name; + +## Configure the redirection when the authz failure occurs. Lines starting with 'Modern Method' and 'Legacy Method' +## should be commented / uncommented as pairs. The modern method uses the session cookies configuration's authelia_url +## value to determine the redirection URL here. It's much simpler and compatible with the mutli-cookie domain easily. + +## Modern Method: Set the $redirection_url to the Location header of the response to the Authz endpoint. +auth_request_set $redirection_url $upstream_http_location; + +## Modern Method: When there is a 401 response code from the authz endpoint redirect to the $redirection_url. +error_page 401 =302 $redirection_url; + +## Legacy Method: Set $target_url to the original requested URL. +## This requires http_set_misc module, replace 'set_escape_uri' with 'set' if you don't have this module. +# set_escape_uri $target_url $scheme://$http_host$request_uri; + +## Legacy Method: When there is a 401 response code from the authz endpoint redirect to the portal with the 'rd' +## URL parameter set to $target_url. This requires users update 'auth.example.com/' with their external authelia URL. +# error_page 401 =302 https://auth.example.com/?rd=$target_url; \ No newline at end of file diff --git a/.devcontainer/devfiles/nginx/snippets/authelia-location.conf b/.devcontainer/devfiles/nginx/snippets/authelia-location.conf new file mode 100644 index 000000000..841e525a8 --- /dev/null +++ b/.devcontainer/devfiles/nginx/snippets/authelia-location.conf @@ -0,0 +1,32 @@ +set $upstream_authelia http://auth.tira.local:9091/api/authz/auth-request; + +## Virtual endpoint created by nginx to forward auth requests. +location /internal/authelia/authz { + ## Essential Proxy Configuration + internal; + proxy_pass $upstream_authelia; + + ## Headers + ## The headers starting with X-* are required. + proxy_set_header X-Original-Method $request_method; + proxy_set_header X-Original-URL $scheme://$http_host$request_uri; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header Content-Length ""; + proxy_set_header Connection ""; + + ## Basic Proxy Configuration + proxy_pass_request_body off; + proxy_next_upstream error timeout invalid_header http_500 http_502 http_503; # Timeout if the real server is dead + proxy_redirect http:// $scheme://; + proxy_http_version 1.1; + proxy_cache_bypass $cookie_session; + proxy_no_cache $cookie_session; + proxy_buffers 4 32k; + client_body_buffer_size 128k; + + ## Advanced Proxy Configuration + send_timeout 5m; + proxy_read_timeout 240; + proxy_send_timeout 240; + proxy_connect_timeout 240; +} \ No newline at end of file diff --git a/.devcontainer/devfiles/nginx/snippets/proxy.conf b/.devcontainer/devfiles/nginx/snippets/proxy.conf new file mode 100644 index 000000000..0551bcd60 --- /dev/null +++ b/.devcontainer/devfiles/nginx/snippets/proxy.conf @@ -0,0 +1,34 @@ +## Headers +proxy_set_header Host $host; +proxy_set_header X-Original-URL $scheme://$http_host$request_uri; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header X-Forwarded-Host $http_host; +proxy_set_header X-Forwarded-URI $request_uri; +proxy_set_header X-Forwarded-Ssl on; +proxy_set_header X-Forwarded-For $remote_addr; +proxy_set_header X-Real-IP $remote_addr; + +## Basic Proxy Configuration +client_body_buffer_size 128k; +proxy_next_upstream error timeout invalid_header http_500 http_502 http_503; ## Timeout if the real server is dead. +proxy_redirect http:// $scheme://; +proxy_http_version 1.1; +proxy_cache_bypass $cookie_session; +proxy_no_cache $cookie_session; +proxy_buffers 64 256k; + +## Trusted Proxies Configuration +## Please read the following documentation before configuring this: +## https://www.authelia.com/integration/proxies/nginx/#trusted-proxies +# set_real_ip_from 10.0.0.0/8; +# set_real_ip_from 172.16.0.0/12; +# set_real_ip_from 192.168.0.0/16; +# set_real_ip_from fc00::/7; +real_ip_header X-Forwarded-For; +real_ip_recursive on; + +## Advanced Proxy Configuration +send_timeout 5m; +proxy_read_timeout 360; +proxy_send_timeout 360; +proxy_connect_timeout 360; \ No newline at end of file diff --git a/.devcontainer/devfiles/nginx/snippets/ssl.conf b/.devcontainer/devfiles/nginx/snippets/ssl.conf new file mode 100644 index 000000000..c52b458b7 --- /dev/null +++ b/.devcontainer/devfiles/nginx/snippets/ssl.conf @@ -0,0 +1,2 @@ +ssl_certificate certs/tira-dev-selfsigned.crt; +ssl_certificate_key certs/tira-dev-selfsigned.key; \ No newline at end of file diff --git a/.devcontainer/devfiles/nginx/tira-backend.conf b/.devcontainer/devfiles/nginx/tira-backend.conf new file mode 100644 index 000000000..7e481df87 --- /dev/null +++ b/.devcontainer/devfiles/nginx/tira-backend.conf @@ -0,0 +1,24 @@ +#server { +# listen 8080; +# +# return 301 https://$server_name$request_uri; +#} + + +server { + listen 8080 ssl http2; + # server api.tira.local; + + include /config/nginx/snippets/ssl.conf; + include /config/nginx/snippets/authelia-location.conf; + + set $upstream http://www.tira.local:8080; + + location / { + include /config/nginx/snippets/proxy.conf; + include /config/nginx/snippets/authelia-authrequest.conf; + add_header 'Access-Control-Allow-Origin' 'https://127.0.0.1:8082'; + add_header 'Access-Control-Allow-Credentials' 'true'; + proxy_pass $upstream; + } +} \ No newline at end of file diff --git a/.devcontainer/devfiles/nginx/tira.conf b/.devcontainer/devfiles/nginx/tira.conf new file mode 100644 index 000000000..95970c20f --- /dev/null +++ b/.devcontainer/devfiles/nginx/tira.conf @@ -0,0 +1,30 @@ +#server { +# listen 8082; +# +# return 301 https://$server_name$request_uri; +#} + +# Upgrade WebSocket if requested, otherwise use keepalive +map $http_upgrade $connection_upgrade_keepalive { + default upgrade; + '' ''; +} + +server { + listen 8082 ssl http2; + + include /config/nginx/snippets/ssl.conf; + include /config/nginx/snippets/authelia-location.conf; + + # Support for websocket + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade_keepalive; + + set $upstream http://www.tira.local:3000; + + location / { + include /config/nginx/snippets/proxy.conf; + include /config/nginx/snippets/authelia-authrequest.conf; + proxy_pass $upstream; + } +} \ No newline at end of file diff --git a/.devcontainer/docker-compose.dev.yml b/.devcontainer/docker-compose.dev.yml new file mode 100644 index 000000000..7b7cc8261 --- /dev/null +++ b/.devcontainer/docker-compose.dev.yml @@ -0,0 +1,35 @@ +services: + devenv: + build: + context: ./ + dockerfile: Dockerfile.dev + restart: unless-stopped + environment: + # TIRA Frontend Environment Variables + VITE_TIRA_REST_BASE_URL: https://127.0.0.1:8080 + VITE_TIRA_GRPC_BASE_URL: https://127.0.0.1:8080 + external_links: + - "auth:auth.tira.local" + - "nginx:api.tira.local" + auth: + image: ghcr.io/authelia/authelia + restart: unless-stopped + volumes: + - ./devfiles/authelia/configuration.dev.yml:/config/configuration.yml + - ./devfiles/authelia/users-database.yml:/config/users_database.yml + nginx: + image: lscr.io/linuxserver/nginx + restart: unless-stopped + #ports: + # - "8080:8080" + # - "8081:8081" + # - "8082:8082" + external_links: + - "auth:auth.tira.local" + - "devenv:www.tira.local" + volumes: + - ./devfiles/nginx/tira.conf:/config/nginx/site-confs/tira.conf + - ./devfiles/nginx/tira-backend.conf:/config/nginx/site-confs/tira-backend.conf + - ./devfiles/nginx/auth.conf:/config/nginx/site-confs/auth.conf + - ./devfiles/nginx/snippets/:/config/nginx/snippets/ + - ./devfiles/nginx/certs/:/etc/nginx/certs/ diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml new file mode 100644 index 000000000..829d5f3a4 --- /dev/null +++ b/.devcontainer/docker-compose.yml @@ -0,0 +1,25 @@ +services: + # Update this to the name of the service you want to work with in your docker-compose.yml file + devenv: + # Uncomment if you want to override the service's Dockerfile to one in the .devcontainer + # folder. Note that the path of the Dockerfile and context is relative to the *primary* + # docker-compose.yml file (the first in the devcontainer.json "dockerComposeFile" + # array). The sample below assumes your primary file is in the root of your project. + # + # build: + # context: . + # dockerfile: .devcontainer/Dockerfile + + volumes: + # Update this to wherever you want VS Code to mount the folder of your project + - ..:/workspaces/tira:cached + + # Uncomment the next four lines if you will use a ptrace-based debugger like C++, Go, and Rust. + # cap_add: + # - SYS_PTRACE + # security_opt: + # - seccomp:unconfined + + # Overrides default command so things don't shut down after the process ends. + command: /bin/sh -c "while sleep 1000; do :; done" + diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml deleted file mode 100644 index ca176da92..000000000 --- a/.github/workflows/build-docker-image.yml +++ /dev/null @@ -1,27 +0,0 @@ -name: Docker Image -on: [push] - -jobs: - image: - - runs-on: ubuntu-latest - timeout-minutes: 15 - steps: - - - name: Checkout - uses: actions/checkout@v3 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - name: Build Image - run: | - cd application - echo running on branch ${GITHUB_REF##*/} - docker login --username ${{ secrets.DOCKER_USERNAME }} --password ${{ secrets.DOCKER_TOKEN }} registry.webis.de - docker build --tag webis/tira-application:build-from-branch-${GITHUB_REF##*/} -f Dockerfile.application .. - docker tag webis/tira-application:build-from-branch-${GITHUB_REF##*/} registry.webis.de/code-lib/public-images/tira-application:build-from-branch-${GITHUB_REF##*/} - docker push registry.webis.de/code-lib/public-images/tira-application:build-from-branch-${GITHUB_REF##*/} - diff --git a/.github/workflows/linters.yml b/.github/workflows/linters.yml new file mode 100644 index 000000000..a8f3b1f9c --- /dev/null +++ b/.github/workflows/linters.yml @@ -0,0 +1,83 @@ +name: Linters + +on: + push: {} + workflow_dispatch: {} + +jobs: + mypy: + runs-on: ubuntu-latest + timeout-minutes: 15 + + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: 3.9 + - name: Install mypy + run: pip3 install mypy + - name: Run mypy on backend + working-directory: ${{github.workspace}}/application + run: | + mkdir .mypy_cache + mypy . --non-interactive --cache-dir=.mypy_cache/ + - name: Run mypy on python-client + working-directory: ${{github.workspace}}/python-client + run: | + mkdir .mypy_cache + mypy .--non-interactive --cache-dir=.mypy_cache/ + + flake8: + runs-on: ubuntu-latest + timeout-minutes: 15 + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: 3.9 + - name: Install flake8 + run: pip3 install flake8 + - name: Run flake8 on backend + working-directory: ${{github.workspace}}/application + run: flake8 . + - name: Run flake8 on python-client + working-directory: ${{github.workspace}}/python-client + run: flake8 . + + black: + runs-on: ubuntu-latest + timeout-minutes: 15 + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: 3.9 + - name: Install black + run: pip3 install black + - name: Run black on backend + working-directory: ${{github.workspace}}/application + run: black --check . + - name: Run black on python-client + working-directory: ${{github.workspace}}/python-client + run: black --check . + + isort: + runs-on: ubuntu-latest + timeout-minutes: 15 + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: 3.9 + - name: Install isort + run: pip3 install isort + - name: Run isort on backend + working-directory: ${{github.workspace}}/application + run: isort --check . + - name: Run isort on python-client + working-directory: ${{github.workspace}}/python-client + run: isort --check . \ No newline at end of file diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 000000000..eb889403f --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,52 @@ +name: Publish Docker images + +on: + release: + types: [published] + +env: + REGISTRY: ghcr.io + +jobs: + build-and-push: + name: Push Docker image to GHCR + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Log in to GHCR + uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Extract metadata (tags, labels) for Docker + id: meta-frontend + uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 + with: + images: ${{ env.REGISTRY }}/tira-io/tira-frontend + - name: Publish tira-frontend image + uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4 + with: + context: ${{ github.workspace }}/frontend + file: ${{ github.workspace }}/frontend/Dockerfile.prod + push: true + tags: ${{ steps.meta-frontend.outputs.tags }} + labels: ${{ steps.meta-frontend.outputs.labels }} + + - name: Extract metadata (tags, labels) for Docker + id: meta-backend + uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 + with: + images: ${{ env.REGISTRY }}/tira-io/tira-backend + - name: Publish tira-backend image + uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4 + with: + context: ${{ github.workspace }}/ + file: ${{ github.workspace }}/application/Dockerfile.prod + push: true + tags: ${{ steps.meta-backend.outputs.tags }} + labels: ${{ steps.meta-backend.outputs.labels }} diff --git a/.github/workflows/run-all-tests.yml b/.github/workflows/run-all-tests.yml deleted file mode 100644 index 687842bb2..000000000 --- a/.github/workflows/run-all-tests.yml +++ /dev/null @@ -1,25 +0,0 @@ -name: Unit Tests - -on: [push] - -jobs: - build: - - runs-on: ubuntu-latest - timeout-minutes: 15 - strategy: - matrix: - python-version: ["3.8", "3.9"] - - steps: - - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - name: Run tests - run: | - mkdir -p application/src/tira/static/tira/frontend-vuetify/ - make setup - make tests - diff --git a/.github/workflows/test-python-client-on-many-python-versions.yml b/.github/workflows/test-python-client-on-many-python-versions.yml deleted file mode 100644 index 7f61cb613..000000000 --- a/.github/workflows/test-python-client-on-many-python-versions.yml +++ /dev/null @@ -1,27 +0,0 @@ -name: Test Python Client on Many Python Versions -on: [push] - -jobs: - image: - runs-on: ubuntu-latest - timeout-minutes: 15 - strategy: - matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - name: Install Dependencies - working-directory: ${{github.workspace}}/python-client - run: | - sudo apt-get install -y openjdk-11-jdk - pip3 install .[test,dev] - - name: Running Tests - working-directory: ${{github.workspace}}/python-client - run: | - echo running on branch ${GITHUB_REF##*/} - pytest diff --git a/.github/workflows/test-python-client.yml b/.github/workflows/test-python-client.yml deleted file mode 100644 index e122df229..000000000 --- a/.github/workflows/test-python-client.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: Test Python Client -on: [push] - -jobs: - image: - runs-on: ubuntu-latest - timeout-minutes: 15 - steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - name: Build Image - run: | - cd python-client - echo running on branch ${GITHUB_REF##*/} - make run-tests - diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 000000000..37c382528 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,77 @@ +name: Unit Tests + +on: [push] + +jobs: + backend-tests: + runs-on: ubuntu-latest + timeout-minutes: 15 + strategy: + matrix: + python-version: ["3.11", "3.12"] + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install TIRA Python Client from Repo + working-directory: ${{github.workspace}}/python-client + run: | + # Install tira from the repository since the pip version may not be up-to-date enough. + # The install musst be editable (-e) since importing from tira fails otherwise + pip3 install .[dev,test] + - name: Install dependencies + working-directory: ${{github.workspace}}/application + run: | + pip3 install .[dev,test] + make setup + env: + TIRA_ROOT: ${{github.workspace}}/model/src + TIRA_CONFIG: ${{github.workspace}}/application/config/tira-application-config.yml + DISCOURSE_API_KEY: "I am so secret" + - name: Run backend tests + working-directory: ${{github.workspace}}/application/test + run: pytest + + frontend-tests: + runs-on: ubuntu-latest + timeout-minutes: 15 + steps: + - uses: actions/checkout@v4 + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 20.x + - name: Install dependencies + working-directory: ${{github.workspace}}/frontend + run: yarn --frozen-lockfile + - name: Run frontend tests + working-directory: ${{github.workspace}}/frontend + run: yarn test + + python-client-test: + runs-on: ubuntu-latest + timeout-minutes: 15 + strategy: + matrix: + python-version: ["3.8", "3.9", "3.10", "3.11"] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Install Dependencies + working-directory: ${{github.workspace}}/python-client + run: | + sudo apt-get install -y openjdk-11-jdk + pip3 install .[test,dev] + - name: Running Tests + working-directory: ${{github.workspace}}/python-client + run: | + echo running on branch ${GITHUB_REF##*/} + pytest diff --git a/.gitignore b/.gitignore index fc890c24d..568f9df59 100644 --- a/.gitignore +++ b/.gitignore @@ -288,4 +288,4 @@ tira-web/tira-web/lib/ .DS_Store # TextMate -application/src/tira/frontend-vuetify/.editorconfig +frontend/.editorconfig diff --git a/Makefile b/Makefile deleted file mode 100644 index 88a6672e3..000000000 --- a/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -.PHONY: help setup run-develop build-docker clean - -.DEFAULT: help -help: - @echo "make setup" - @echo " setup your environment" - @echo "make run-develop" - @echo " run the tira server" - @echo "make tests" - @echo " run all tests (automatically done in Github Actions on each commit)" - @echo "make vite-build" - @echo " build and test the frontnend client code" - @echo "make clean" - @echo " clean the environment" - - -setup: - @cd application && make setup - -run-develop: - @cd application && make run-develop - -tests: - @cd application && make tests - -vite-build: - @cd application && make vite-build - -clean: - @cd application && make clean - diff --git a/README.md b/README.md index 80b11d5a3..b0a329d5f 100644 --- a/README.md +++ b/README.md @@ -1,67 +1,68 @@ -

TIRA Integrated Research Architecture

-

- -This repository contains the source code for all components of the [TIRA](https://www.tira.io) shared task platform. - -Components: - -- [Backend](application) (test coverage: ![test coverage backend](application/test/test-coverage/coverage.svg)) -- [Frontend](application/src/tira/frontend-vuetify) (test coverage: ![Coverage of the frontend](application/src/tira/frontend-vuetify/coverage/badge-lines.svg)) -- [Python Client](python-client) (test coverage: ![Coverage of the python client](python-client/tests/test-coverage/coverage.svg)) - - -## Resources -* [Wiki](https://tira-io.github.io/tira/): Getting started with TIRA as a developer/administrator -* [User Docs](https://www.tira.io/t/getting-started/1364): Getting started with TIRA as a user -* [Papers](https://webis.de/publications.html?q=tira): List of publications -* [Contribution Guide](CONTRIBUTING.md): How to contribute to the TIRA project - - -## Setup Your Development Environment - -We use [devcontainers](https://code.visualstudio.com/docs/devcontainers/containers) for development. To start your environment, either use Github Codespaces (click on "Code" -> "Codespaces" in Github to open one) as easiest way to get started, or [devpod](https://github.com/loft-sh/devpod) as open source alternative (directly pointing to our Kubernetes or your local docker installation). - -Run `make` to get an overview of all commands that will setup a self-contained tira application in your dev environment. - -1. Setup the database and compile the vuetify frontend - ```bash - ~$ make setup - ``` - -2. Start the local environment, point your browser to the specified URL - ```bash - ~$ make run-develop - ``` - -3. Optionally: To work on real data, initialize your development database from a database dump via - ```bash - ~$ make import-data-from-dump - ``` - or to work with mock data run: - ```bash - ~$ cd application - ~$ make import-mock-data - ``` - - -## Paper +

TIRA

+ +

+ + +
+
+ + GPL 2.0 License + + + Current Release + + + Deployment + + + Tests + + + Linters + +
+ Documentation  |  + Backend  |  + Frontend  |  + API & CLI  |  + Publications  |  + Citation +

+ +--- + +TIRA **I**ntegrated **R**esearch **A**rchitecture is a free and open source research platform designed for hosting and +partaking in shared tasks of any form. + +## I want to... +### ... organize a shared task +You can find the documentation for organizing your own task +[here](https://tira-io.github.io/tira/organizers/organizing-tasks.html). + +### ... deploy my own instance +> Stay tuned, we are currently working on making this process as simple as possible. + +### ... join a shared task +Awesome! Have a look [at this guide](https://tira-io.github.io/tira/users/participate.html). + +### ... contribute (bug reports, feature requests, code, documentation) +Great! Check out our Contribution Guide and see [how to get started](https://tira-io.github.io/tira/development/devenvironment.html); + + +## Citation If you use TIRA in your own research, please cite our paper ``` @InProceedings{froebe:2023b, - address = {Berlin Heidelberg New York}, - author = {Maik Fr{\"o}be and Matti Wiegmann and Nikolay Kolyada and Bastian Grahm and Theresa Elstner and Frank Loebe and Matthias Hagen and Benno Stein and Martin Potthast}, - booktitle = {Advances in Information Retrieval. 45th European Conference on {IR} Research ({ECIR} 2023)}, - month = apr, - publisher = {Springer}, - series = {Lecture Notes in Computer Science}, - site = {Dublin, Irland}, - title = {{Continuous Integration for Reproducible Shared Tasks with TIRA.io}}, - todo = {doi, month, pages, code}, - year = 2023 + address = {Berlin Heidelberg New York}, + author = {Maik Fr{\"o}be and Matti Wiegmann and Nikolay Kolyada and Bastian Grahm and Theresa Elstner and Frank Loebe and Matthias Hagen and Benno Stein and Martin Potthast}, + booktitle = {Advances in Information Retrieval. 45th European Conference on {IR} Research ({ECIR} 2023)}, + month = apr, + publisher = {Springer}, + series = {Lecture Notes in Computer Science}, + site = {Dublin, Irland}, + title = {{Continuous Integration for Reproducible Shared Tasks with TIRA.io}}, + year = 2023 } ``` -## License - -[MIT License](LICENSE) diff --git a/application/.devcontainer.json b/application/.devcontainer.json deleted file mode 100644 index 99ded8ed7..000000000 --- a/application/.devcontainer.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "image": "webis/tira-application:basis-0.0.96", - "customizations": { - "vscode": { - "extensions": ["ms-python.python", "ms-python.vscode-pylance", "ms-toolsai.jupyter"] - } - } -} diff --git a/application/.dockerignore b/application/.dockerignore index f83a4abb6..bac451f72 100644 --- a/application/.dockerignore +++ b/application/.dockerignore @@ -1,2 +1,303 @@ +mock-data + +.dockerignore +.gitignore +Dockerfile.* +Makefile +README.md + +test/tira-root +.data-dumps + + +# Gitignore contentn +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +#build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ venv/ -**/node_modules/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. + .idea/artifacts + .idea/compiler.xml + .idea/jarRepositories.xml + .idea/modules.xml + .idea/*.iml + .idea/modules + *.iml + *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser +tira.iml + +### Eclipse ### +.metadata +bin/ +tmp/ +*.tmp +*.bak +*.swp +*~.nib +local.properties +.settings/ +.loadpath +.recommenders + +# External tool builders +.externalToolBuilders/ + +# Locally stored "Eclipse launch configurations" +*.launch + +# PyDev specific (Python IDE for Eclipse) +*.pydevproject + +# CDT-specific (C/C++ Development Tooling) +.cproject + +# CDT- autotools +.autotools + +# Java annotation processor (APT) +.factorypath + +# PDT-specific (PHP Development Tools) +.buildpath + +# sbteclipse plugin +.target + +# Tern plugin +.tern-project + +# TeXlipse plugin +.texlipse + +# STS (Spring Tool Suite) +.springBeans + +# Code Recommenders +.recommenders/ + +# Annotation Processing +.apt_generated/ +.apt_generated_test/ + +# Scala IDE specific (Scala & Java development for Eclipse) +.cache-main +.scala_dependencies +.worksheet + +# Uncomment this line if you wish to ignore the project description file. +# Typically, this file would be tracked if it contains build/dependency configurations: +#.project + +### Eclipse Patch ### +# Spring Boot Tooling +.sts4-cache/ + +# End of https://www.toptal.com/developers/gitignore/api/eclipse + +/bin/ +conf/disraptor.properties +*.swp +/info/ +tira-web/tira-web/lib/ + +# Mac +.DS_Store + +# TextMate +frontend/.editorconfig + diff --git a/application/.gitignore b/application/.gitignore index 71f8f6c1f..117dbbb48 100644 --- a/application/.gitignore +++ b/application/.gitignore @@ -3,15 +3,12 @@ src/tira_vm_states.sqlite3 src/venv src/__init__.py src/config -src/tira/migrations/*.py +src/tira_app/migrations/*.py test/test-database -src/tira/frontend/node_modules/ -src/tira/static/tira/dist/* +src/tira_app/static/tira/dist/* package-lock.json -src/tira/frontend/webpack-stats.json test/tira-root/ **/*.received.txt .data-dumps/ -src/tira/static/tira/frontend-vuetify/ diff --git a/application/.vscode/settings.json b/application/.vscode/settings.json new file mode 100644 index 000000000..7020c77da --- /dev/null +++ b/application/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "python.testing.pytestArgs": [ ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "python.testing.cwd": "${workspaceFolder}/test" +} \ No newline at end of file diff --git a/application/Dockerfile.application b/application/Dockerfile.application index 223ad2d4e..d0140e093 100644 --- a/application/Dockerfile.application +++ b/application/Dockerfile.application @@ -1,13 +1,3 @@ -#Build Vuetify in first stage: Only change in case of new / updated dependencies -FROM webis/tira-application:basis-0.0.97 - -COPY application/src/tira/frontend-vuetify /src/tira/frontend-vuetify - -RUN cd /src/tira/frontend-vuetify \ - && ln -s /usr/local/lib/node_modules node_modules \ - && yarn build - - # Only change in case of new / updated dependencies FROM webis/tira-application:basis-0.0.96 @@ -27,9 +17,6 @@ RUN cd /tira/application/src && \ chmod +x /tira/application/src/tira/endpoints/aha && \ cp /tira/application/src/tira/management/commands/irds_cli.sh /irds_cli.sh && \ rm -f ./config/settings.yml ./config/config.yml ./config/tira-application-config.dev.yml && \ - rm -Rf tira/static/tira/frontend-vuetify/ && rm -Rf /tira/application/src/tira/static/tira/frontend-vuetify/ - -COPY --from=0 /src/tira/static/tira/frontend-vuetify/ /tira/application/src/tira/static/tira/frontend-vuetify/ RUN cd /tira/application/ && \ ./test/run_all_tests.sh diff --git a/application/Dockerfile.application-dev b/application/Dockerfile.application-dev index e7e1c2c25..f9307a3a3 100644 --- a/application/Dockerfile.application-dev +++ b/application/Dockerfile.application-dev @@ -62,7 +62,7 @@ RUN wget 'https://nodejs.org/dist/v20.10.0/node-v20.10.0-linux-x64.tar.xz' \ && mv node-v20.10.0-linux-x64/lib/node_modules/ /usr/local/lib/ \ && npm install --global yarn -ADD src/tira/frontend-vuetify/package.json src/tira/frontend-vuetify/yarn.lock src/tira/frontend-vuetify/vite.config.ts src/tira/frontend-vuetify/jest.config.js src/tira/frontend-vuetify/babel.config.js /tmp-del/ +ADD frontend/package.json frontend/yarn.lock frontend/vite.config.ts frontend/jest.config.js frontend/babel.config.js /tmp-del/ RUN cd /tmp-del \ && yarn create vuetify \ diff --git a/application/Dockerfile.basis b/application/Dockerfile.basis index 4ed08e167..f7d24bb07 100644 --- a/application/Dockerfile.basis +++ b/application/Dockerfile.basis @@ -23,7 +23,7 @@ RUN addgroup --gid 1010 tira && \ COPY application/requirements.txt /tira-dependencies/ COPY application/src/tira/frontend /tira-dependencies/webpack-frontend -COPY application/src/tira/frontend-vuetify /tira-dependencies/vite-frontend +COPY frontend /tira-dependencies/vite-frontend COPY protocol/build/python/* /tira/application/src/tira/proto/ COPY model/src/* /mnt/ceph/tira/model/ COPY application/src/tira_nginx.conf /etc/nginx/nginx.conf diff --git a/application/Dockerfile.prod b/application/Dockerfile.prod new file mode 100644 index 000000000..c9fd9e723 --- /dev/null +++ b/application/Dockerfile.prod @@ -0,0 +1,126 @@ +# TODO: this dockerfile is the latest and greatest out of the large quantity found in this folder +# At some point, Dockerfile.application* and Dockerfile.basis should be obsolete and can be deleted. If you read this in +# the future and that is the case, then "Hello Future Person" and also please delete these dockerfiles. + +# !!! This Dockerfile needs to be build from the project root and NOT the application folder !!! + +######################################################################################################################## +# Build Container # +######################################################################################################################## +FROM debian:stable-slim AS build + +ENV TZ=Europe/Berlin +RUN < /etc/timezone +apt-get -qq update && apt-get -qq install -y locales +echo "en_US.UTF-8 UTF-8" | tee -a /etc/locale.gen && locale-gen +EOF + +######################################################################################################################## +# Create User # +######################################################################################################################## +RUN useradd -ms /bin/bash tira + +######################################################################################################################## +# Copy all neccessary files over # +######################################################################################################################## +USER tira +WORKDIR /tira/ +COPY --chown=tira:tira ./application ./ +COPY --chown=tira:tira ./python-client ../python-client + +######################################################################################################################## +# Install Python and Dependencies # +######################################################################################################################## +USER root +ENV PIP_BREAK_SYSTEM_PACKAGES=1 +# For faster build of GRPCIO (TODO: remove when GRPC is not used anymore) +ENV GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS=16 + +RUN < /etc/timezone +apt-get -qq update && apt-get -qq install -y locales +echo "en_US.UTF-8 UTF-8" | tee -a /etc/locale.gen && locale-gen + +# Tools +apt-get -qq install -y python3 python3-dev +# For MariaDB +apt-get -qq install -y mariadb-client +# +mkdir -p /tira/application/src +chown -R tira:tira /tira +EOF + + +######################################################################################################################## +# Copy Data & Install Python and Dependencies # +######################################################################################################################## +COPY --from=build --chown=tira:tira /tira/src/manage.py /home/tira/manage.py + +RUN <.zip django-db-dump.zip If there are problems with the precompiled protobuf parser, you can recompile them from the `tira/protocol` repository and copy them to `tira/application/src/tira/proto`. If you run into `django.db.utils.OperationalError: (1050, "Table already exists")`, skip migrations using `./venv/bin/python3 src/manage.py migrate --fake` . - -Windows users using WSL: If you run into `setup.sh: line 3: $'\r'`: command not found' when executing make setup: - 1. run `sudo apt-get install dos2unix` - 2. run `dos2unix setup.sh` - 3. run `cd tests && dos2unix setup.sh` - 4. Now make setup should work - - Error running vite-dev: `00h00m00s 0/0: : ERROR: [Errno 2] No such file or directory: 'install'`: - run `apt remove cmdtest - sudo apt remove yarn - curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add - - echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list - sudo apt-get update - sudo apt-get install yarn -y` diff --git a/application/config/tira-application-config.dev.yml b/application/config/tira-application-config.dev.yml deleted file mode 100644 index 2820dcbfc..000000000 --- a/application/config/tira-application-config.dev.yml +++ /dev/null @@ -1,27 +0,0 @@ -# ---task/celebrity-profiling/user/hodge20a -debug: true -allowed_hosts: - - "127.0.0.1" -django_secret: 'not-so-secret' -# --- -# tira_root: /mnt/ceph/tira -# deployment = {disraptor, legacy} -deployment: legacy -# in the legacy deployment, a path to the users file is needed. With the default tira model this is TIRA_ROOT/model/users/users.prototext -# legacy_users_file: model/users/users.prototext -# disraptor_secret_file: /etc/discourse/client-api-key -# The directory where logs are written to. Defaults to TIRA_ROOT/log/tira-application -# logging_dir: /mnt/ceph/tira/log/tira-application -# grpc_host can be local or remote. If local, it will call localhost (i.e. for testing). If remote, it will call the vm-host -# When developing, set this option to local, otherwise you might accidentally remote-control the live-vms. -grpc_host: local -host_grpc_port: 50051 -application_grpc_port: 50052 -database: - engine: django.db.backends.sqlite3 # django.db.backends.mysql or django.db.backends.sqlite3 - name: tira # when backend is sqlite, this will be the name of the database below TIRA_ROOT/state - user: tira # ignored when using sqlite3 - password: TODO-ENTER-PASSWORD # ignored when using sqlite3 - host: tira-mariadb # ignored when using sqlite3 - port: 3306 # ignored when using sqlite3 -github_token: TODO-ENTER-TOKEN diff --git a/application/config/tira-application-config.docker.yml b/application/config/tira-application-config.docker.yml deleted file mode 100644 index 3295f9389..000000000 --- a/application/config/tira-application-config.docker.yml +++ /dev/null @@ -1,27 +0,0 @@ -# ---task/celebrity-profiling/user/hodge20a -debug: true -allowed_hosts: - - "127.0.0.1" -django_secret: 'not-so-secret' -# --- -# tira_root: /mnt/ceph/tira -# deployment = {disraptor, legacy} -deployment: legacy -# in the legacy deployment, a path to the users file is needed. With the default tira model this is TIRA_ROOT/model/users/users.prototext -# legacy_users_file: model/users/users.prototext -# disraptor_secret_file: /etc/discourse/client-api-key -# The directory where logs are written to. Defaults to TIRA_ROOT/log/tira-application -# logging_dir: /mnt/ceph/tira/log/tira-application -# grpc_host can be local or remote. If local, it will call localhost (i.e. for testing). If remote, it will call the vm-host -# When developing, set this option to local, otherwise you might accidentally remote-control the live-vms. -grpc_host: local -host_grpc_port: 50051 -application_grpc_port: 50052 -database: - engine: django.db.backends.sqlite3 # django.db.backends.mysql or django.db.backends.sqlite3 - name: tira # when backend is sqlite, this will be the name of the database below TIRA_ROOT/state - user: tira # ignored when using sqlite3 - password: TODO-ENTER-PASSWORD # ignored when using sqlite3 - host: tira-mariadb # ignored when using sqlite3 - port: 3306 # ignored when using sqlite3 - diff --git a/application/config/tira-application-config.yml b/application/config/tira-application-config.yml new file mode 100644 index 000000000..bdb93a62c --- /dev/null +++ b/application/config/tira-application-config.yml @@ -0,0 +1,61 @@ +# For your convenience, we marked secrets with [SECRET]. Make sure, you change these +# values from their defaults! + +########################################################################################## +# TIRA # +########################################################################################## +# Enables debug mode. This means more verbose output in the console and for the REST-API. +# Settings this value to true in production is a security risk! +debug: !ENV ${TIRA_DEBUG:false} + +tira_root: !ENV ${TIRA_ROOT:/tira} +# The directory where logs are written to. Defaults to TIRA_ROOT/log/tira-application +# logging_dir: /mnt/ceph/tira/log/tira-application + +# [SECRET] +github_token: !ENV ${TIRA_GITHUB_TOKEN} + +########################################################################################## +# Database # +########################################################################################## +database: + # django.db.backends.mysql or django.db.backends.sqlite3 + engine: !ENV ${TIRA_DB_ENGINE:django.db.backends.sqlite3} + # when backend is sqlite, this will be the name of the database below TIRA_ROOT/state + name: !ENV ${TIRA_DB_NAME:tira} + user: !ENV ${TIRA_DB_USER:tira} # ignored when using sqlite3 + password: !ENV ${TIRA_DB_PASSWORD} # ignored when using sqlite3 + host: !ENV ${TIRA_DB_HOST:tira-mariadb} # ignored when using sqlite3 + port: !ENV ${TIRA_DB_PORT:3306} # ignored when using sqlite3 + +########################################################################################## +# Discourse # +########################################################################################## +discourse_api_url: !ENV ${DISCOURSE_API_URL:https://www.tira.io} + +# [SECRET] +discourse_api_key: !ENV ${DISCOURSE_API_KEY:""} + +########################################################################################## +# Django # +########################################################################################## +# A list of hostnames using which the backend may be addressed. The value "*" denotes any +# address. A value of ["tira.example.com", "example.com"] would only allow requests made +# addressing these hostnames. See +# https://docs.djangoproject.com/en/5.1/ref/settings/#allowed-hosts for more information. +allowed_hosts: + - "*" + +# [SECRET] See https://docs.djangoproject.com/en/5.1/ref/settings/#std-setting-SECRET_KEY +# for more information. +django_secret: !ENV ${DJANGO_SECRET:change-me!} + +########################################################################################## +# Deprecated and removed soon (we hope) # +########################################################################################## +# grpc_host can be local or remote. If local, it will call localhost (i.e., for testing). +# If remote, it will call the vm-host. When developing, set this option to local, +# otherwise you might accidentally remote-control the live-vms. +grpc_host: local +host_grpc_port: 50051 +application_grpc_port: 50052 diff --git a/application/pyproject.toml b/application/pyproject.toml new file mode 100644 index 000000000..d3de6c31c --- /dev/null +++ b/application/pyproject.toml @@ -0,0 +1,43 @@ +[tool.black] +line-length = 120 +exclude = '''/( + src/tira_app/migrations + | src/tira_app/proto +)''' + +[tool.isort] +profile = "black" +multi_line_output = 3 +line_length = 120 +include_trailing_comma = true +skip = [ + "src/tira_app/migrations", + "src/tira_app/proto", +] + +[tool.mypy] +disallow_untyped_calls = true +explicit_package_bases = true +ignore_missing_imports = true +install_types = true +exclude = [ + "^src/tira_app/proto/.*\\.py$", + "^src/tira_app/migrations/.*\\.py$", +] + +[tool.pytest.ini_options] +DJANGO_SETTINGS_MODULE = "settings_test" +pythonpath = ["./src", "./test"] +python_files = "test_*.py" + +[tool.pytest_env] +HF_HOME = "./tira-root/huggingface" +TIRA_ROOT = "./tira-root" +TIRA_CONFIG = "../config/tira-application-config.yml" +TIRA_DEBUG = true +TIRA_DB_ENGINE = "django.db.backends.sqlite3" +TIRA_DB_NAME = "test-database/sqlite3" +TIRA_DB_USER = "tira" +TIRA_DB_PASSWORD = "replace-with-db-password" +TIRA_DB_HOST = "tira-mariadb" +TIRA_DB_PORT = 3306 diff --git a/application/requirements.txt b/application/requirements.txt deleted file mode 100644 index 6da3e0958..000000000 --- a/application/requirements.txt +++ /dev/null @@ -1,30 +0,0 @@ -grpcio==1.53.2 -grpcio-tools==1.36.1 -protobuf<4.0dev -pyuwsgi -Django -pyyaml -requests -randomname -tqdm -mysql -mysqlclient -django-webpack-loader==0.6.0 -python-gitlab -GitPython -python-slugify -git+https://github.com/allenai/ir_datasets -git+https://github.com/mam10eks/diffir -pandas -markdown -PyGithub==1.59.1 -ghapi -parameterized -mockito -approvaltests==7.3.0 -django-extensions -coverage -coverage-badge -discourse-client-in-disraptor==0.0.8 -tira>=0.0.97 -huggingface-hub diff --git a/application/setup.cfg b/application/setup.cfg new file mode 100644 index 000000000..755f27756 --- /dev/null +++ b/application/setup.cfg @@ -0,0 +1,71 @@ +[metadata] +name = tira_app + +[options] +python_requires = >=3.9 +include_package_data = True +package_dir = + = src +packages = find_namespace: +install_requires = + grpcio>=1.53.2 + # grpcio-tools==1.36.1 # still needed? + protobuf<4.0dev + Django==5.0.9 + pyyaml + requests + randomname + tqdm + mysqlclient + python-gitlab==4.10.0 + GitPython + python-slugify + ir-datasets + diffir@git+https://github.com/mam10eks/diffir + pandas + markdown + PyGithub==1.59.1 + django-extensions + discourse-client-in-disraptor==0.0.8 + # tira>=0.0.97 + huggingface-hub + djangorestframework==3.15.1 + django-filter==24.2 + djangorestframework-jsonapi==7.0.0 + pyaml-env==1.2.1 + +[options.extras_require] +test = + mockito + parameterized + approvaltests==7.3.0 + pytest-django + pytest-env==1.1.3 +dev = + coverage + coverage-badge + black + flake8 + isort + mypy +deploy = + uwsgi +postgreqsql = + psycopg2-binary + +[options.packages.find] +where = src +include = + tira_app + tira_app.* + django_admin + +[options.package_data] +tira_app.res = *.yml + +[flake8] +max-line-length = 120 +extend-ignore = E203 +include = src,test +exclude = src/tira/migrations,src/tira/proto +max-complexity = 10 diff --git a/application/setup.py b/application/setup.py new file mode 100644 index 000000000..606849326 --- /dev/null +++ b/application/setup.py @@ -0,0 +1,3 @@ +from setuptools import setup + +setup() diff --git a/application/setup.sh b/application/setup.sh deleted file mode 100755 index c26eaba97..000000000 --- a/application/setup.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -# setup.sh does steps in the tira setup. - -exit_with () { - echo $1 - echo "Exiting setup.sh; Please install the required software."; - exit 1 -} - -which mysql_config > '\dev\null' || exit_with "Please install the sql driver before the tira-application setup: - sudo apt install libmysqlclient-dev - sudo apk add mariadb-dev" - -which npm > '\dev\null' || exit_with "Please install npm to build the vue frontend - https://nodejs.org/en/download/" diff --git a/application/src/django_admin/asgi.py b/application/src/django_admin/asgi.py index fb6359fce..b4a2e5b4f 100644 --- a/application/src/django_admin/asgi.py +++ b/application/src/django_admin/asgi.py @@ -11,6 +11,6 @@ from django.core.asgi import get_asgi_application -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_admin.settings') +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_admin.settings") application = get_asgi_application() diff --git a/application/src/django_admin/settings.py b/application/src/django_admin/settings.py index 570e395a8..491d7be0e 100644 --- a/application/src/django_admin/settings.py +++ b/application/src/django_admin/settings.py @@ -9,270 +9,341 @@ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ -import json + +import importlib.resources as resources import logging -from pathlib import Path import os +from pathlib import Path + import yaml +from pyaml_env import parse_config + +from tira_app.util import str2bool # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent custom_settings = {} -for cfg in (BASE_DIR / "config").glob("*.yml"): - print(f'Load settings from {cfg}.') - custom_settings.update(yaml.load(open(cfg, "r").read(), Loader=yaml.FullLoader)) +cfgpath = os.environ.get("TIRA_CONFIG", str(BASE_DIR / "config" / "tira-application-config.yml")) +logging.info(f"Load settings from {cfgpath}.") +config = parse_config(cfgpath, default_value=None, loader=yaml.FullLoader) +custom_settings.update(config) -if 'database' not in custom_settings: - custom_settings['database'] = {} +# SECURITY WARNING: don't run with debug turned on in production! +DEBUG = str2bool(custom_settings["debug"]) + +if DEBUG: + logging.basicConfig(level=logging.DEBUG, force=True) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! -SECRET_KEY = custom_settings.get("django_secret", 'not-so-secret') +# https://docs.djangoproject.com/en/5.1/ref/settings/#std-setting-SECRET_KEY +SECRET_KEY = custom_settings["django_secret"] -# SECURITY WARNING: don't run with debug turned on in production! -DEBUG = custom_settings.get("debug", True) -ALLOWED_HOSTS = custom_settings.get("allowed_hosts", []) +ALLOWED_HOSTS = custom_settings["allowed_hosts"] -TIRA_ROOT = Path(custom_settings.get("tira_root", BASE_DIR.parents[1] / "model" / "src")) +TIRA_ROOT = Path(custom_settings["tira_root"]) if not TIRA_ROOT.is_dir(): logging.warning(f"{TIRA_ROOT} does not exists and will be created now.") (TIRA_ROOT / "state").mkdir(parents=True, exist_ok=True) -DEPLOYMENT = custom_settings.get("deployment", "legacy") -LEGACY_USER_FILE = Path(custom_settings.get("legacy_users_file", TIRA_ROOT / "model" / "users" / "users.prototext")) -DISRAPTOR_SECRET_FILE = Path(custom_settings.get("disraptor_secret_file", "/etc/discourse/client-api-key")) HOST_GRPC_PORT = custom_settings.get("host_grpc_port", "50051") APPLICATION_GRPC_PORT = custom_settings.get("application_grpc_port", "50052") GRPC_HOST = custom_settings.get("grpc_host", "local") # can be local or remote -TIRA_DB_NAME = Path(TIRA_ROOT / "state") / f"{custom_settings['database'].get('name', 'tira')}.sqlite3" \ - if custom_settings['database'].get('engine', 'django.db.backends.sqlite3') == 'django.db.backends.sqlite3' \ - else custom_settings['database'].get('name', 'tira') +TIRA_DB_NAME = ( + Path(TIRA_ROOT / "state") / f"{custom_settings['database']['name']}.sqlite3" + if custom_settings["database"]["engine"] == "django.db.backends.sqlite3" + else custom_settings["database"]["name"] +) TIRA_DB = { - 'ENGINE': custom_settings['database'].get('engine', 'django.db.backends.sqlite3'), - 'NAME': TIRA_DB_NAME, - 'USER': custom_settings['database'].get('user', 'tira'), - 'PASSWORD': custom_settings['database'].get('password', 'replace-with-db-password'), - 'HOST': custom_settings['database'].get('host', 'tira-mariadb'), - 'PORT': int(custom_settings['database'].get('port', 3306)), - 'TEST': { - 'NAME': "test_tira", - 'ENGINE': 'django.db.backends.sqlite3', - } + "ENGINE": custom_settings["database"]["engine"], + "NAME": TIRA_DB_NAME, + "USER": custom_settings["database"]["user"], + "PASSWORD": custom_settings["database"]["password"], + "HOST": custom_settings["database"]["host"], + "PORT": int(custom_settings["database"]["port"]), + "TEST": { + "NAME": "test_tira", + "ENGINE": "django.db.backends.sqlite3", + }, } # Application definition INSTALLED_APPS = [ - 'tira.apps.TiraConfig', - 'django.contrib.admin', - 'django.contrib.auth', - 'django.contrib.contenttypes', - 'django.contrib.sessions', - 'django.contrib.messages', - 'django.contrib.staticfiles', - 'webpack_loader', + "tira_app.apps.TiraConfig", + "django.contrib.auth", + "django.contrib.contenttypes", + "django.contrib.sessions", + "django.contrib.messages", + "django_filters", + "rest_framework", + "rest_framework_json_api", ] MIDDLEWARE = [ - 'django.middleware.security.SecurityMiddleware', - 'django.contrib.sessions.middleware.SessionMiddleware', - 'django.middleware.common.CommonMiddleware', - 'django.middleware.csrf.CsrfViewMiddleware', - 'django.contrib.auth.middleware.AuthenticationMiddleware', - 'django.contrib.messages.middleware.MessageMiddleware', - 'django.middleware.clickjacking.XFrameOptionsMiddleware', + "django.middleware.security.SecurityMiddleware", + "django.contrib.sessions.middleware.SessionMiddleware", + "django.middleware.common.CommonMiddleware", + "django.middleware.csrf.CsrfViewMiddleware", + "django.contrib.auth.middleware.AuthenticationMiddleware", + "django.contrib.messages.middleware.MessageMiddleware", + "django.middleware.clickjacking.XFrameOptionsMiddleware", ] -ROOT_URLCONF = 'django_admin.urls' +REST_FRAMEWORK = { + "DEFAULT_AUTHENTICATION_CLASSES": ("tira_app.authentication.TrustedHeaderAuthentication",), + "DEFAULT_FILTER_BACKENDS": ("rest_framework_json_api.django_filters.DjangoFilterBackend",), + "DEFAULT_RENDERER_CLASSES": ("rest_framework.renderers.JSONRenderer",), +} + +ROOT_URLCONF = "django_admin.urls" TEMPLATES = [ { - 'BACKEND': 'django.template.backends.django.DjangoTemplates', - 'DIRS': [BASE_DIR / 'templates'] - , - 'APP_DIRS': True, - 'OPTIONS': { - 'context_processors': [ - 'django.template.context_processors.debug', - 'django.template.context_processors.request', - 'django.contrib.auth.context_processors.auth', - 'django.contrib.messages.context_processors.messages', + "BACKEND": "django.template.backends.django.DjangoTemplates", + "DIRS": [BASE_DIR / "templates"], + "APP_DIRS": True, + "OPTIONS": { + "context_processors": [ + "django.template.context_processors.debug", + "django.template.context_processors.request", + "django.contrib.auth.context_processors.auth", + "django.contrib.messages.context_processors.messages", ], }, }, ] -WSGI_APPLICATION = 'django_admin.wsgi.application' +WSGI_APPLICATION = "django_admin.wsgi.application" # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases -DATABASES = { - 'default': TIRA_DB -} +DATABASES = {"default": TIRA_DB} SESSION_ENGINE = "django.contrib.sessions.backends.cached_db" -DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' +DEFAULT_AUTO_FIELD = "django.db.models.AutoField" def logger_config(log_dir: Path): return { - 'version': 1, - 'disable_existing_loggers': False, - 'formatters': { - 'verbose': { - 'format': '{levelname} {asctime} {module} {process:d} {thread:d} {message}', - 'style': '{', + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "verbose": { + "format": "{levelname} {asctime} {module} {process:d} {thread:d} {message}", + "style": "{", }, - 'default': { - 'format': '{levelname} {asctime} {module}: {message}', - 'style': '{', + "default": { + "format": "{levelname} {asctime} {module}: {message}", + "style": "{", }, - 'simple': { - 'format': '{levelname} {message}', - 'style': '{', + "simple": { + "format": "{levelname} {message}", + "style": "{", }, }, - 'filters': { - 'require_debug_true': { - '()': 'django.utils.log.RequireDebugTrue', + "filters": { + "require_debug_true": { + "()": "django.utils.log.RequireDebugTrue", }, }, - 'handlers': { - 'console': { - 'level': 'DEBUG', - 'filters': ['require_debug_true'], - 'class': 'logging.StreamHandler', - 'formatter': 'default' + "handlers": { + "console": { + "level": "DEBUG", + "filters": ["require_debug_true"], + "class": "logging.StreamHandler", + "formatter": "default", }, - 'ceph_django_debug': { - 'level': 'DEBUG', - 'class': 'logging.FileHandler', - 'filters': ['require_debug_true'], - 'filename': log_dir / 'django-debug.log', - 'formatter': 'default' + "ceph_django_debug": { + "level": "DEBUG", + "class": "logging.FileHandler", + "filters": ["require_debug_true"], + "filename": log_dir / "django-debug.log", + "formatter": "default", }, - 'ceph_django_info': { - 'level': 'INFO', - 'class': 'logging.FileHandler', - 'filename': log_dir / 'django-info.log', - 'formatter': 'default' + "ceph_django_info": { + "level": "INFO", + "class": "logging.FileHandler", + "filename": log_dir / "django-info.log", + "formatter": "default", }, - 'ceph_django_warn': { - 'level': 'WARNING', - 'class': 'logging.FileHandler', - 'filename': log_dir / 'django-warning.log', - 'formatter': 'default' + "ceph_django_warn": { + "level": "WARNING", + "class": "logging.FileHandler", + "filename": log_dir / "django-warning.log", + "formatter": "default", }, - 'ceph_tira_debug': { - 'level': 'DEBUG', - 'class': 'logging.FileHandler', - 'filters': ['require_debug_true'], - 'filename': log_dir / 'tira-debug.log', - 'formatter': 'default' + "ceph_tira_debug": { + "level": "DEBUG", + "class": "logging.FileHandler", + "filters": ["require_debug_true"], + "filename": log_dir / "tira-debug.log", + "formatter": "default", }, - 'ceph_tira_info': { - 'level': 'INFO', - 'class': 'logging.FileHandler', - 'filename': log_dir / 'tira-info.log', - 'formatter': 'default' + "ceph_tira_info": { + "level": "INFO", + "class": "logging.FileHandler", + "filename": log_dir / "tira-info.log", + "formatter": "default", }, - 'ceph_tira_warn': { - 'level': 'WARNING', - 'class': 'logging.FileHandler', - 'filename': log_dir / 'tira-warning.log', - 'formatter': 'default' + "ceph_tira_warn": { + "level": "WARNING", + "class": "logging.FileHandler", + "filename": log_dir / "tira-warning.log", + "formatter": "default", }, - 'ceph_tira_db': { - 'level': 'INFO', - 'class': 'logging.FileHandler', - 'filename': log_dir / 'tira-db.log', - 'formatter': 'default' + "ceph_tira_db": { + "level": "INFO", + "class": "logging.FileHandler", + "filename": log_dir / "tira-db.log", + "formatter": "default", }, - 'ceph_grpc_debug': { - 'level': 'DEBUG', - 'class': 'logging.FileHandler', - 'filters': ['require_debug_true'], - 'filename': log_dir / 'grpc-debug.log', - 'formatter': 'default' + "ceph_grpc_debug": { + "level": "DEBUG", + "class": "logging.FileHandler", + "filters": ["require_debug_true"], + "filename": log_dir / "grpc-debug.log", + "formatter": "default", }, - 'ceph_grpc_info': { - 'level': 'INFO', - 'class': 'logging.FileHandler', - 'filename': log_dir / 'grpc-info.log', - 'formatter': 'default' + "ceph_grpc_info": { + "level": "INFO", + "class": "logging.FileHandler", + "filename": log_dir / "grpc-info.log", + "formatter": "default", }, - 'ceph_grpc_warn': { - 'level': 'WARNING', - 'class': 'logging.FileHandler', - 'filename': log_dir / 'grpc-warning.log', - 'formatter': 'default' + "ceph_grpc_warn": { + "level": "WARNING", + "class": "logging.FileHandler", + "filename": log_dir / "grpc-warning.log", + "formatter": "default", }, }, - 'loggers': { - 'django': { - 'handlers': ['console', 'ceph_django_debug', 'ceph_django_warn', 'ceph_django_info'], - 'propagate': True, + "loggers": { + "django": { + "handlers": ["console", "ceph_django_debug", "ceph_django_warn", "ceph_django_info"], + "propagate": True, }, - 'django.requests': { - 'handlers': ['console', 'ceph_django_debug', 'ceph_django_warn', 'ceph_django_info'], - 'propagate': True, + "django.requests": { + "handlers": ["console", "ceph_django_debug", "ceph_django_warn", "ceph_django_info"], + "propagate": True, }, - 'django.server': { - 'handlers': ['console', 'ceph_django_debug', 'ceph_django_warn', 'ceph_django_info'], - 'propagate': True, + "django.server": { + "handlers": ["console", "ceph_django_debug", "ceph_django_warn", "ceph_django_info"], + "propagate": True, }, - 'tira': { - 'handlers': ['console', 'ceph_tira_debug', 'ceph_tira_warn', 'ceph_tira_info'], - 'propagate': True, + "tira": { + "handlers": ["console", "ceph_tira_debug", "ceph_tira_warn", "ceph_tira_info"], + "propagate": True, }, - 'tira_db': { - 'handlers': ['console', 'ceph_tira_db'], - 'propagate': True, + "tira_db": { + "handlers": ["console", "ceph_tira_db"], + "propagate": True, }, - 'grpc_server': { - 'handlers': ['console', 'ceph_grpc_debug', 'ceph_grpc_warn', 'ceph_grpc_info'], - 'propagate': True, + "grpc_server": { + "handlers": ["console", "ceph_grpc_debug", "ceph_grpc_warn", "ceph_grpc_info"], + "propagate": True, }, - } + }, } + # Git Integration GIT_CI_AVAILABLE_RESOURCES = { - 'small-resources': {'cores': 1, 'ram': 10, 'gpu': 0, 'data': 'no', 'description': 'Small (1 CPU Cores, 10GB of RAM)', 'key': 'small-resources'}, - 'medium-resources': {'cores': 2, 'ram': 20, 'gpu': 0, 'data': 'no', 'description': 'Medium (2 CPU Cores, 20GB of RAM)', 'key': 'medium-resources'}, - 'large-resources': {'cores': 4, 'ram': 40, 'gpu': 0, 'data': 'no', 'description': 'Large (4 CPU Cores, 40GB of RAM)', 'key': 'large-resources'}, - 'small-resources-gpu': {'cores': 1, 'ram': 10, 'gpu': '1-nvidia-1080', 'data': 'no', 'description': 'Small w. GPU (1 CPU Cores, 10GB of RAM, 1 Nvidia GTX 1080 with 8GB)', 'key': 'small-resources-gpu'}, - 'medium-resources-gpu': {'cores': 2, 'ram': 20, 'gpu': '1-nvidia-1080', 'data': 'no', 'description': 'Medium w. GPU (2 CPU Cores, 20GB of RAM, 1 Nvidia GTX 1080 with 8GB)', 'key': 'medium-resources-gpu'}, - 'large-resources-gpu': {'cores': 4, 'ram': 40, 'gpu': '1-nvidia-1080', 'data': 'no', 'description': 'Large w. GPU (4 CPU Cores, 40GB of RAM, 1 Nvidia GTX 1080 with 8GB)', 'key': 'large-resources-gpu'}, - 'xl-resources-gpu': {'cores': 8, 'ram': 80, 'gpu': '1-nvidia-1080', 'data': 'no', 'description': 'XL w. GPU (8 CPU Cores, 80GB of RAM, 1 Nvidia GTX 1080 with 8GB)', 'key': 'xl-resources-gpu'}, - 'a100-resources-gpu': {'cores': 5, 'ram': 50, 'gpu': '1-nvidia-a100', 'data': 'no', 'description': 'A100 GPU (5 CPU Cores, 50GB of RAM, 1 Nvidia A100 with 40GB)', 'key': 'a100-resources-gpu'} + "small-resources": { + "cores": 1, + "ram": 10, + "gpu": 0, + "data": "no", + "description": "Small (1 CPU Cores, 10GB of RAM)", + "key": "small-resources", + }, + "medium-resources": { + "cores": 2, + "ram": 20, + "gpu": 0, + "data": "no", + "description": "Medium (2 CPU Cores, 20GB of RAM)", + "key": "medium-resources", + }, + "large-resources": { + "cores": 4, + "ram": 40, + "gpu": 0, + "data": "no", + "description": "Large (4 CPU Cores, 40GB of RAM)", + "key": "large-resources", + }, + "small-resources-gpu": { + "cores": 1, + "ram": 10, + "gpu": "1-nvidia-1080", + "data": "no", + "description": "Small w. GPU (1 CPU Cores, 10GB of RAM, 1 Nvidia GTX 1080 with 8GB)", + "key": "small-resources-gpu", + }, + "medium-resources-gpu": { + "cores": 2, + "ram": 20, + "gpu": "1-nvidia-1080", + "data": "no", + "description": "Medium w. GPU (2 CPU Cores, 20GB of RAM, 1 Nvidia GTX 1080 with 8GB)", + "key": "medium-resources-gpu", + }, + "large-resources-gpu": { + "cores": 4, + "ram": 40, + "gpu": "1-nvidia-1080", + "data": "no", + "description": "Large w. GPU (4 CPU Cores, 40GB of RAM, 1 Nvidia GTX 1080 with 8GB)", + "key": "large-resources-gpu", + }, + "xl-resources-gpu": { + "cores": 8, + "ram": 80, + "gpu": "1-nvidia-1080", + "data": "no", + "description": "XL w. GPU (8 CPU Cores, 80GB of RAM, 1 Nvidia GTX 1080 with 8GB)", + "key": "xl-resources-gpu", + }, + "a100-resources-gpu": { + "cores": 5, + "ram": 50, + "gpu": "1-nvidia-a100", + "data": "no", + "description": "A100 GPU (5 CPU Cores, 50GB of RAM, 1 Nvidia A100 with 40GB)", + "key": "a100-resources-gpu", + }, } -DEFAULT_GIT_INTEGRATION_URL = 'https://git.webis.de/code-research/tira' +DEFAULT_GIT_INTEGRATION_URL = "https://git.webis.de/code-research/tira" -IR_MEASURES_IMAGE = custom_settings.get('IR_MEASURES_IMAGE', 'webis/tira-ir-measures-evaluator:0.0.1') -IR_MEASURES_COMMAND = custom_settings.get('IR_MEASURES_COMMAND', '/ir_measures_evaluator.py --run ${inputRun}/run.txt --topics ${inputDataset}/queries.jsonl --qrels ${inputDataset}/qrels.txt --output ${outputDir} --measures "P@10" "nDCG@10" "MRR"') +IR_MEASURES_IMAGE = custom_settings.get("IR_MEASURES_IMAGE", "webis/tira-ir-measures-evaluator:0.0.1") +IR_MEASURES_COMMAND = custom_settings.get( + "IR_MEASURES_COMMAND", + "/ir_measures_evaluator.py --run ${inputRun}/run.txt --topics ${inputDataset}/queries.jsonl --qrels" + ' ${inputDataset}/qrels.txt --output ${outputDir} --measures "P@10" "nDCG@10" "MRR"', +) -GITHUB_TOKEN = custom_settings.get("github_token", '') +GITHUB_TOKEN = custom_settings["github_token"] # Caching CACHES = { - 'default': { - 'BACKEND': 'django.core.cache.backends.db.DatabaseCache', - 'LOCATION': 'tira_database_cache_table', - 'TIMEOUT': 43200, # 43200 seconds (i.e.,12 hours) as timeout, to use for the cache - 'OPTIONS': { - 'MAX_ENTRIES': 100000 - } + "default": { + "BACKEND": "django.core.cache.backends.db.DatabaseCache", + "LOCATION": "tira_database_cache_table", + "TIMEOUT": 43200, # 43200 seconds (i.e.,12 hours) as timeout, to use for the cache + "OPTIONS": {"MAX_ENTRIES": 100000}, } } -TIREX_COMPONENTS = yaml.load(open(BASE_DIR / 'tirex-components.yml').read(), Loader=yaml.FullLoader) +TIREX_COMPONENTS = yaml.load( + (resources.files("tira_app.res") / "tirex-components.yml").read_bytes(), Loader=yaml.FullLoader +) # Logging ld = Path(custom_settings.get("logging_dir", TIRA_ROOT / "log" / "tira-application")) @@ -291,30 +362,12 @@ def logger_config(log_dir: Path): raise PermissionError(f"Can not write to {ld} in production mode.") -# Password validation -# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators - -AUTH_PASSWORD_VALIDATORS = [ - { - 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', - }, - { - 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', - }, - { - 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', - }, - { - 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', - }, -] - # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ -LANGUAGE_CODE = 'en-us' +LANGUAGE_CODE = "en-us" -TIME_ZONE = 'Europe/Berlin' +TIME_ZONE = "Europe/Berlin" USE_I18N = True @@ -322,48 +375,27 @@ def logger_config(log_dir: Path): USE_TZ = True -# Static files (CSS, JavaScript, Images) -# https://docs.djangoproject.com/en/3.1/howto/static-files/ - -STATIC_URL = '/public/' - -STATICFILES_DIRS = [ - BASE_DIR / "static/", - BASE_DIR / "tira/static/" -] - -STATIC_ROOT = "/var/www/public" - -WEBPACK_LOADER = { - 'DEFAULT': { - 'CACHE': DEBUG, - 'BUNDLE_DIR_NAME': '/bundles/', - 'STATS_FILE': BASE_DIR / 'tira' / 'frontend' / 'webpack-stats.json' - } -} - -DISCOURSE_API_URL = 'https://www.tira.io' -PUBLIC_TRAINING_DATA = set(['jena-topics-20231026-test', 'leipzig-topics-20231025-test']) +DISCOURSE_API_URL = custom_settings["discourse_api_url"] +DISRAPTOR_API_KEY = custom_settings["discourse_api_key"] +PUBLIC_TRAINING_DATA = set(["jena-topics-20231026-test", "leipzig-topics-20231025-test"]) CODE_SUBMISSION_REFERENCE_REPOSITORIES = { - 'ir-lab-jena-leipzig-wise-2023': 'tira-io/tira-ir-lab-wise-submission-template', - 'ir-lab-sose-2024': 'webis-de/information-retrieval-exercise', - 'ir-benchmarks': 'tira-io/tira-ir-lab-wise-submission-template', - 'webpage-classification': 'OpenWebSearch/irixys23-tira-submission-template', - 'valueeval-2024-human-value-detection': 'touche-webis-de/valueeval24-tira-software-submission-template', - 'workshop-on-open-web-search': 'tira-io/wows24-submission-template', - 'nlpbuw-fsu-sose-24': 'webis-de/natural-language-processing-exercises', + "ir-lab-jena-leipzig-wise-2023": "tira-io/tira-ir-lab-wise-submission-template", + "ir-lab-sose-2024": "webis-de/information-retrieval-exercise", + "ir-benchmarks": "tira-io/tira-ir-lab-wise-submission-template", + "webpage-classification": "OpenWebSearch/irixys23-tira-submission-template", + "valueeval-2024-human-value-detection": "touche-webis-de/valueeval24-tira-software-submission-template", + "workshop-on-open-web-search": "tira-io/wows24-submission-template", + "nlpbuw-fsu-sose-24": "webis-de/natural-language-processing-exercises", } REFERENCE_DATASETS = { - 'ir-lab-padua-2024': 'ir-lab-padua-2024/spot-check-20240424-training', - 'ir-benchmarks': 'ir-benchmarks/cranfield-20230107-training', - 'workshop-on-open-web-search': 'workshop-on-open-web-search/retrieval-20231027-training', - 'generative-ai-authorship-verification-panclef-2024': 'generative-ai-authorship-verification-panclef-2024/pan24-generative-authorship-tiny-smoke-20240417-training', + "ir-lab-padua-2024": "ir-lab-padua-2024/spot-check-20240424-training", + "ir-benchmarks": "ir-benchmarks/cranfield-20230107-training", + "workshop-on-open-web-search": "workshop-on-open-web-search/retrieval-20231027-training", + "generative-ai-authorship-verification-panclef-2024": ( + "generative-ai-authorship-verification-panclef-2024/pan24-generative-authorship-tiny-smoke-20240417-training" + ), } -CODE_SUBMISSION_REPOSITORY_NAMESPACE = 'tira-io' -try: - DISRAPTOR_API_KEY = open(DISRAPTOR_SECRET_FILE, "r").read().strip() -except: - pass +CODE_SUBMISSION_REPOSITORY_NAMESPACE = "tira-io" diff --git a/application/src/django_admin/urls.py b/application/src/django_admin/urls.py index 4368d747a..02850af19 100644 --- a/application/src/django_admin/urls.py +++ b/application/src/django_admin/urls.py @@ -13,12 +13,9 @@ 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ -from django.contrib import admin -from django.urls import path, include -from django.conf import settings -from django.conf.urls.static import static + +from django.urls import include, path urlpatterns = [ - path('admin/', admin.site.urls), - path('', include('tira.urls')), -] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + path("", include("tira_app.urls")), +] diff --git a/application/src/django_admin/wsgi.py b/application/src/django_admin/wsgi.py index 59b755597..f473d6bc5 100644 --- a/application/src/django_admin/wsgi.py +++ b/application/src/django_admin/wsgi.py @@ -9,8 +9,12 @@ import os +from django.core.management import call_command from django.core.wsgi import get_wsgi_application -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_admin.settings') +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_admin.settings") application = get_wsgi_application() + +# https://stackoverflow.com/a/58182766 +call_command("migrate") diff --git a/application/src/manage.py b/application/src/manage.py index ed42e76e7..fa29ef217 100755 --- a/application/src/manage.py +++ b/application/src/manage.py @@ -4,9 +4,9 @@ import sys -def main(): +def main() -> None: """Run administrative tasks.""" - os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_admin.settings') + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_admin.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: @@ -18,5 +18,5 @@ def main(): execute_from_command_line(sys.argv) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/application/src/run_grpc_server.py b/application/src/run_grpc_server.py index 164489115..b1aa43ddd 100644 --- a/application/src/run_grpc_server.py +++ b/application/src/run_grpc_server.py @@ -1,12 +1,11 @@ -import django import os -os.environ.setdefault( - "DJANGO_SETTINGS_MODULE", - "django_admin.settings" -) -from django_admin.settings import DATABASES, TIME_ZONE, INSTALLED_APPS +import django + +from tira_app.grpc import grpc_server + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_admin.settings") + django.setup() -from tira.grpc import grpc_server grpc_server.serve() diff --git a/application/src/static/admin/css/autocomplete.css b/application/src/static/admin/css/autocomplete.css deleted file mode 100644 index 3ef95d15f..000000000 --- a/application/src/static/admin/css/autocomplete.css +++ /dev/null @@ -1,260 +0,0 @@ -select.admin-autocomplete { - width: 20em; -} - -.select2-container--admin-autocomplete.select2-container { - min-height: 30px; -} - -.select2-container--admin-autocomplete .select2-selection--single, -.select2-container--admin-autocomplete .select2-selection--multiple { - min-height: 30px; - padding: 0; -} - -.select2-container--admin-autocomplete.select2-container--focus .select2-selection, -.select2-container--admin-autocomplete.select2-container--open .select2-selection { - border-color: #999; - min-height: 30px; -} - -.select2-container--admin-autocomplete.select2-container--focus .select2-selection.select2-selection--single, -.select2-container--admin-autocomplete.select2-container--open .select2-selection.select2-selection--single { - padding: 0; -} - -.select2-container--admin-autocomplete.select2-container--focus .select2-selection.select2-selection--multiple, -.select2-container--admin-autocomplete.select2-container--open .select2-selection.select2-selection--multiple { - padding: 0; -} - -.select2-container--admin-autocomplete .select2-selection--single { - background-color: #fff; - border: 1px solid #ccc; - border-radius: 4px; -} - -.select2-container--admin-autocomplete .select2-selection--single .select2-selection__rendered { - color: #444; - line-height: 30px; -} - -.select2-container--admin-autocomplete .select2-selection--single .select2-selection__clear { - cursor: pointer; - float: right; - font-weight: bold; -} - -.select2-container--admin-autocomplete .select2-selection--single .select2-selection__placeholder { - color: #999; -} - -.select2-container--admin-autocomplete .select2-selection--single .select2-selection__arrow { - height: 26px; - position: absolute; - top: 1px; - right: 1px; - width: 20px; -} - -.select2-container--admin-autocomplete .select2-selection--single .select2-selection__arrow b { - border-color: #888 transparent transparent transparent; - border-style: solid; - border-width: 5px 4px 0 4px; - height: 0; - left: 50%; - margin-left: -4px; - margin-top: -2px; - position: absolute; - top: 50%; - width: 0; -} - -.select2-container--admin-autocomplete[dir="rtl"] .select2-selection--single .select2-selection__clear { - float: left; -} - -.select2-container--admin-autocomplete[dir="rtl"] .select2-selection--single .select2-selection__arrow { - left: 1px; - right: auto; -} - -.select2-container--admin-autocomplete.select2-container--disabled .select2-selection--single { - background-color: #eee; - cursor: default; -} - -.select2-container--admin-autocomplete.select2-container--disabled .select2-selection--single .select2-selection__clear { - display: none; -} - -.select2-container--admin-autocomplete.select2-container--open .select2-selection--single .select2-selection__arrow b { - border-color: transparent transparent #888 transparent; - border-width: 0 4px 5px 4px; -} - -.select2-container--admin-autocomplete .select2-selection--multiple { - background-color: white; - border: 1px solid #ccc; - border-radius: 4px; - cursor: text; -} - -.select2-container--admin-autocomplete .select2-selection--multiple .select2-selection__rendered { - box-sizing: border-box; - list-style: none; - margin: 0; - padding: 0 5px; - width: 100%; -} - -.select2-container--admin-autocomplete .select2-selection--multiple .select2-selection__rendered li { - list-style: none; -} - -.select2-container--admin-autocomplete .select2-selection--multiple .select2-selection__placeholder { - color: #999; - margin-top: 5px; - float: left; -} - -.select2-container--admin-autocomplete .select2-selection--multiple .select2-selection__clear { - cursor: pointer; - float: right; - font-weight: bold; - margin: 5px; -} - -.select2-container--admin-autocomplete .select2-selection--multiple .select2-selection__choice { - background-color: #e4e4e4; - border: 1px solid #ccc; - border-radius: 4px; - cursor: default; - float: left; - margin-right: 5px; - margin-top: 5px; - padding: 0 5px; -} - -.select2-container--admin-autocomplete .select2-selection--multiple .select2-selection__choice__remove { - color: #999; - cursor: pointer; - display: inline-block; - font-weight: bold; - margin-right: 2px; -} - -.select2-container--admin-autocomplete .select2-selection--multiple .select2-selection__choice__remove:hover { - color: #333; -} - -.select2-container--admin-autocomplete[dir="rtl"] .select2-selection--multiple .select2-selection__choice, .select2-container--admin-autocomplete[dir="rtl"] .select2-selection--multiple .select2-selection__placeholder, .select2-container--admin-autocomplete[dir="rtl"] .select2-selection--multiple .select2-search--inline { - float: right; -} - -.select2-container--admin-autocomplete[dir="rtl"] .select2-selection--multiple .select2-selection__choice { - margin-left: 5px; - margin-right: auto; -} - -.select2-container--admin-autocomplete[dir="rtl"] .select2-selection--multiple .select2-selection__choice__remove { - margin-left: 2px; - margin-right: auto; -} - -.select2-container--admin-autocomplete.select2-container--focus .select2-selection--multiple { - border: solid #999 1px; - outline: 0; -} - -.select2-container--admin-autocomplete.select2-container--disabled .select2-selection--multiple { - background-color: #eee; - cursor: default; -} - -.select2-container--admin-autocomplete.select2-container--disabled .select2-selection__choice__remove { - display: none; -} - -.select2-container--admin-autocomplete.select2-container--open.select2-container--above .select2-selection--single, .select2-container--admin-autocomplete.select2-container--open.select2-container--above .select2-selection--multiple { - border-top-left-radius: 0; - border-top-right-radius: 0; -} - -.select2-container--admin-autocomplete.select2-container--open.select2-container--below .select2-selection--single, .select2-container--admin-autocomplete.select2-container--open.select2-container--below .select2-selection--multiple { - border-bottom-left-radius: 0; - border-bottom-right-radius: 0; -} - -.select2-container--admin-autocomplete .select2-search--dropdown .select2-search__field { - border: 1px solid #ccc; -} - -.select2-container--admin-autocomplete .select2-search--inline .select2-search__field { - background: transparent; - border: none; - outline: 0; - box-shadow: none; - -webkit-appearance: textfield; -} - -.select2-container--admin-autocomplete .select2-results > .select2-results__options { - max-height: 200px; - overflow-y: auto; -} - -.select2-container--admin-autocomplete .select2-results__option[role=group] { - padding: 0; -} - -.select2-container--admin-autocomplete .select2-results__option[aria-disabled=true] { - color: #999; -} - -.select2-container--admin-autocomplete .select2-results__option[aria-selected=true] { - background-color: #ddd; -} - -.select2-container--admin-autocomplete .select2-results__option .select2-results__option { - padding-left: 1em; -} - -.select2-container--admin-autocomplete .select2-results__option .select2-results__option .select2-results__group { - padding-left: 0; -} - -.select2-container--admin-autocomplete .select2-results__option .select2-results__option .select2-results__option { - margin-left: -1em; - padding-left: 2em; -} - -.select2-container--admin-autocomplete .select2-results__option .select2-results__option .select2-results__option .select2-results__option { - margin-left: -2em; - padding-left: 3em; -} - -.select2-container--admin-autocomplete .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option { - margin-left: -3em; - padding-left: 4em; -} - -.select2-container--admin-autocomplete .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option { - margin-left: -4em; - padding-left: 5em; -} - -.select2-container--admin-autocomplete .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option { - margin-left: -5em; - padding-left: 6em; -} - -.select2-container--admin-autocomplete .select2-results__option--highlighted[aria-selected] { - background-color: #79aec8; - color: white; -} - -.select2-container--admin-autocomplete .select2-results__group { - cursor: default; - display: block; - padding: 6px; -} diff --git a/application/src/static/admin/css/base.css b/application/src/static/admin/css/base.css deleted file mode 100644 index c4285195f..000000000 --- a/application/src/static/admin/css/base.css +++ /dev/null @@ -1,966 +0,0 @@ -/* - DJANGO Admin styles -*/ - -@import url(fonts.css); - -html, body { - height: 100%; -} - -body { - margin: 0; - padding: 0; - font-size: 14px; - font-family: "Roboto","Lucida Grande","DejaVu Sans","Bitstream Vera Sans",Verdana,Arial,sans-serif; - color: #333; - background: #fff; -} - -/* LINKS */ - -a:link, a:visited { - color: #447e9b; - text-decoration: none; -} - -a:focus, a:hover { - color: #036; -} - -a:focus { - text-decoration: underline; -} - -a img { - border: none; -} - -a.section:link, a.section:visited { - color: #fff; - text-decoration: none; -} - -a.section:focus, a.section:hover { - text-decoration: underline; -} - -/* GLOBAL DEFAULTS */ - -p, ol, ul, dl { - margin: .2em 0 .8em 0; -} - -p { - padding: 0; - line-height: 140%; -} - -h1,h2,h3,h4,h5 { - font-weight: bold; -} - -h1 { - margin: 0 0 20px; - font-weight: 300; - font-size: 20px; - color: #666; -} - -h2 { - font-size: 16px; - margin: 1em 0 .5em 0; -} - -h2.subhead { - font-weight: normal; - margin-top: 0; -} - -h3 { - font-size: 14px; - margin: .8em 0 .3em 0; - color: #666; - font-weight: bold; -} - -h4 { - font-size: 12px; - margin: 1em 0 .8em 0; - padding-bottom: 3px; -} - -h5 { - font-size: 10px; - margin: 1.5em 0 .5em 0; - color: #666; - text-transform: uppercase; - letter-spacing: 1px; -} - -ul > li { - list-style-type: square; - padding: 1px 0; -} - -li ul { - margin-bottom: 0; -} - -li, dt, dd { - font-size: 13px; - line-height: 20px; -} - -dt { - font-weight: bold; - margin-top: 4px; -} - -dd { - margin-left: 0; -} - -form { - margin: 0; - padding: 0; -} - -fieldset { - margin: 0; - min-width: 0; - padding: 0; - border: none; - border-top: 1px solid #eee; -} - -blockquote { - font-size: 11px; - color: #777; - margin-left: 2px; - padding-left: 10px; - border-left: 5px solid #ddd; -} - -code, pre { - font-family: "Bitstream Vera Sans Mono", Monaco, "Courier New", Courier, monospace; - color: #666; - font-size: 12px; - overflow-x: auto; -} - -pre.literal-block { - margin: 10px; - background: #eee; - padding: 6px 8px; -} - -code strong { - color: #930; -} - -hr { - clear: both; - color: #eee; - background-color: #eee; - height: 1px; - border: none; - margin: 0; - padding: 0; - font-size: 1px; - line-height: 1px; -} - -/* TEXT STYLES & MODIFIERS */ - -.small { - font-size: 11px; -} - -.mini { - font-size: 10px; -} - -.help, p.help, form p.help, div.help, form div.help, div.help li { - font-size: 11px; - color: #999; -} - -div.help ul { - margin-bottom: 0; -} - -.help-tooltip { - cursor: help; -} - -p img, h1 img, h2 img, h3 img, h4 img, td img { - vertical-align: middle; -} - -.quiet, a.quiet:link, a.quiet:visited { - color: #999; - font-weight: normal; -} - -.clear { - clear: both; -} - -.nowrap { - white-space: nowrap; -} - -/* TABLES */ - -table { - border-collapse: collapse; - border-color: #ccc; -} - -td, th { - font-size: 13px; - line-height: 16px; - border-bottom: 1px solid #eee; - vertical-align: top; - padding: 8px; - font-family: "Roboto", "Lucida Grande", Verdana, Arial, sans-serif; -} - -th { - font-weight: 600; - text-align: left; -} - -thead th, -tfoot td { - color: #666; - padding: 5px 10px; - font-size: 11px; - background: #fff; - border: none; - border-top: 1px solid #eee; - border-bottom: 1px solid #eee; -} - -tfoot td { - border-bottom: none; - border-top: 1px solid #eee; -} - -thead th.required { - color: #000; -} - -tr.alt { - background: #f6f6f6; -} - -tr:nth-child(odd), .row-form-errors { - background: #fff; -} - -tr:nth-child(even), -tr:nth-child(even) .errorlist, -tr:nth-child(odd) + .row-form-errors, -tr:nth-child(odd) + .row-form-errors .errorlist { - background: #f9f9f9; -} - -/* SORTABLE TABLES */ - -thead th { - padding: 5px 10px; - line-height: normal; - text-transform: uppercase; - background: #f6f6f6; -} - -thead th a:link, thead th a:visited { - color: #666; -} - -thead th.sorted { - background: #eee; -} - -thead th.sorted .text { - padding-right: 42px; -} - -table thead th .text span { - padding: 8px 10px; - display: block; -} - -table thead th .text a { - display: block; - cursor: pointer; - padding: 8px 10px; -} - -table thead th .text a:focus, table thead th .text a:hover { - background: #eee; -} - -thead th.sorted a.sortremove { - visibility: hidden; -} - -table thead th.sorted:hover a.sortremove { - visibility: visible; -} - -table thead th.sorted .sortoptions { - display: block; - padding: 9px 5px 0 5px; - float: right; - text-align: right; -} - -table thead th.sorted .sortpriority { - font-size: .8em; - min-width: 12px; - text-align: center; - vertical-align: 3px; - margin-left: 2px; - margin-right: 2px; -} - -table thead th.sorted .sortoptions a { - position: relative; - width: 14px; - height: 14px; - display: inline-block; - background: url(../img/sorting-icons.svg) 0 0 no-repeat; - background-size: 14px auto; -} - -table thead th.sorted .sortoptions a.sortremove { - background-position: 0 0; -} - -table thead th.sorted .sortoptions a.sortremove:after { - content: '\\'; - position: absolute; - top: -6px; - left: 3px; - font-weight: 200; - font-size: 18px; - color: #999; -} - -table thead th.sorted .sortoptions a.sortremove:focus:after, -table thead th.sorted .sortoptions a.sortremove:hover:after { - color: #447e9b; -} - -table thead th.sorted .sortoptions a.sortremove:focus, -table thead th.sorted .sortoptions a.sortremove:hover { - background-position: 0 -14px; -} - -table thead th.sorted .sortoptions a.ascending { - background-position: 0 -28px; -} - -table thead th.sorted .sortoptions a.ascending:focus, -table thead th.sorted .sortoptions a.ascending:hover { - background-position: 0 -42px; -} - -table thead th.sorted .sortoptions a.descending { - top: 1px; - background-position: 0 -56px; -} - -table thead th.sorted .sortoptions a.descending:focus, -table thead th.sorted .sortoptions a.descending:hover { - background-position: 0 -70px; -} - -/* FORM DEFAULTS */ - -input, textarea, select, .form-row p, form .button { - margin: 2px 0; - padding: 2px 3px; - vertical-align: middle; - font-family: "Roboto", "Lucida Grande", Verdana, Arial, sans-serif; - font-weight: normal; - font-size: 13px; -} -.form-row div.help { - padding: 2px 3px; -} - -textarea { - vertical-align: top; -} - -input[type=text], input[type=password], input[type=email], input[type=url], -input[type=number], input[type=tel], textarea, select, .vTextField { - border: 1px solid #ccc; - border-radius: 4px; - padding: 5px 6px; - margin-top: 0; -} - -input[type=text]:focus, input[type=password]:focus, input[type=email]:focus, -input[type=url]:focus, input[type=number]:focus, input[type=tel]:focus, -textarea:focus, select:focus, .vTextField:focus { - border-color: #999; -} - -select { - height: 30px; -} - -select[multiple] { - /* Allow HTML size attribute to override the height in the rule above. */ - height: auto; - min-height: 150px; -} - -/* FORM BUTTONS */ - -.button, input[type=submit], input[type=button], .submit-row input, a.button { - background: #79aec8; - padding: 10px 15px; - border: none; - border-radius: 4px; - color: #fff; - cursor: pointer; -} - -a.button { - padding: 4px 5px; -} - -.button:active, input[type=submit]:active, input[type=button]:active, -.button:focus, input[type=submit]:focus, input[type=button]:focus, -.button:hover, input[type=submit]:hover, input[type=button]:hover { - background: #609ab6; -} - -.button[disabled], input[type=submit][disabled], input[type=button][disabled] { - opacity: 0.4; -} - -.button.default, input[type=submit].default, .submit-row input.default { - float: right; - border: none; - font-weight: 400; - background: #417690; -} - -.button.default:active, input[type=submit].default:active, -.button.default:focus, input[type=submit].default:focus, -.button.default:hover, input[type=submit].default:hover { - background: #205067; -} - -.button[disabled].default, -input[type=submit][disabled].default, -input[type=button][disabled].default { - opacity: 0.4; -} - - -/* MODULES */ - -.module { - border: none; - margin-bottom: 30px; - background: #fff; -} - -.module p, .module ul, .module h3, .module h4, .module dl, .module pre { - padding-left: 10px; - padding-right: 10px; -} - -.module blockquote { - margin-left: 12px; -} - -.module ul, .module ol { - margin-left: 1.5em; -} - -.module h3 { - margin-top: .6em; -} - -.module h2, .module caption, .inline-group h2 { - margin: 0; - padding: 8px; - font-weight: 400; - font-size: 13px; - text-align: left; - background: #79aec8; - color: #fff; -} - -.module caption, -.inline-group h2 { - font-size: 12px; - letter-spacing: 0.5px; - text-transform: uppercase; -} - -.module table { - border-collapse: collapse; -} - -/* MESSAGES & ERRORS */ - -ul.messagelist { - padding: 0; - margin: 0; -} - -ul.messagelist li { - display: block; - font-weight: 400; - font-size: 13px; - padding: 10px 10px 10px 65px; - margin: 0 0 10px 0; - background: #dfd url(../img/icon-yes.svg) 40px 12px no-repeat; - background-size: 16px auto; - color: #333; -} - -ul.messagelist li.warning { - background: #ffc url(../img/icon-alert.svg) 40px 14px no-repeat; - background-size: 14px auto; -} - -ul.messagelist li.error { - background: #ffefef url(../img/icon-no.svg) 40px 12px no-repeat; - background-size: 16px auto; -} - -.errornote { - font-size: 14px; - font-weight: 700; - display: block; - padding: 10px 12px; - margin: 0 0 10px 0; - color: #ba2121; - border: 1px solid #ba2121; - border-radius: 4px; - background-color: #fff; - background-position: 5px 12px; -} - -ul.errorlist { - margin: 0 0 4px; - padding: 0; - color: #ba2121; - background: #fff; -} - -ul.errorlist li { - font-size: 13px; - display: block; - margin-bottom: 4px; -} - -ul.errorlist li:first-child { - margin-top: 0; -} - -ul.errorlist li a { - color: inherit; - text-decoration: underline; -} - -td ul.errorlist { - margin: 0; - padding: 0; -} - -td ul.errorlist li { - margin: 0; -} - -.form-row.errors { - margin: 0; - border: none; - border-bottom: 1px solid #eee; - background: none; -} - -.form-row.errors ul.errorlist li { - padding-left: 0; -} - -.errors input, .errors select, .errors textarea, -td ul.errorlist + input, td ul.errorlist + select, td ul.errorlist + textarea { - border: 1px solid #ba2121; -} - -.description { - font-size: 12px; - padding: 5px 0 0 12px; -} - -/* BREADCRUMBS */ - -div.breadcrumbs { - background: #79aec8; - padding: 10px 40px; - border: none; - font-size: 14px; - color: #c4dce8; - text-align: left; -} - -div.breadcrumbs a { - color: #fff; -} - -div.breadcrumbs a:focus, div.breadcrumbs a:hover { - color: #c4dce8; -} - -/* ACTION ICONS */ - -.viewlink, .inlineviewlink { - padding-left: 16px; - background: url(../img/icon-viewlink.svg) 0 1px no-repeat; -} - -.addlink { - padding-left: 16px; - background: url(../img/icon-addlink.svg) 0 1px no-repeat; -} - -.changelink, .inlinechangelink { - padding-left: 16px; - background: url(../img/icon-changelink.svg) 0 1px no-repeat; -} - -.deletelink { - padding-left: 16px; - background: url(../img/icon-deletelink.svg) 0 1px no-repeat; -} - -a.deletelink:link, a.deletelink:visited { - color: #CC3434; -} - -a.deletelink:focus, a.deletelink:hover { - color: #993333; - text-decoration: none; -} - -/* OBJECT TOOLS */ - -.object-tools { - font-size: 10px; - font-weight: bold; - padding-left: 0; - float: right; - position: relative; - margin-top: -48px; -} - -.form-row .object-tools { - margin-top: 5px; - margin-bottom: 5px; - float: none; - height: 2em; - padding-left: 3.5em; -} - -.object-tools li { - display: block; - float: left; - margin-left: 5px; - height: 16px; -} - -.object-tools a { - border-radius: 15px; -} - -.object-tools a:link, .object-tools a:visited { - display: block; - float: left; - padding: 3px 12px; - background: #999; - font-weight: 400; - font-size: 11px; - text-transform: uppercase; - letter-spacing: 0.5px; - color: #fff; -} - -.object-tools a:focus, .object-tools a:hover { - background-color: #417690; -} - -.object-tools a:focus{ - text-decoration: none; -} - -.object-tools a.viewsitelink, .object-tools a.golink,.object-tools a.addlink { - background-repeat: no-repeat; - background-position: right 7px center; - padding-right: 26px; -} - -.object-tools a.viewsitelink, .object-tools a.golink { - background-image: url(../img/tooltag-arrowright.svg); -} - -.object-tools a.addlink { - background-image: url(../img/tooltag-add.svg); -} - -/* OBJECT HISTORY */ - -table#change-history { - width: 100%; -} - -table#change-history tbody th { - width: 16em; -} - -/* PAGE STRUCTURE */ - -#container { - position: relative; - width: 100%; - min-width: 980px; - padding: 0; - display: flex; - flex-direction: column; - height: 100%; -} - -#container > div { - flex-shrink: 0; -} - -#container > .main { - display: flex; - flex: 1 0 auto; -} - -.main > .content { - flex: 1 0; - max-width: 100%; -} - -#content { - padding: 20px 40px; -} - -.dashboard #content { - width: 600px; -} - -#content-main { - float: left; - width: 100%; -} - -#content-related { - float: right; - width: 260px; - position: relative; - margin-right: -300px; -} - -#footer { - clear: both; - padding: 10px; -} - -/* COLUMN TYPES */ - -.colMS { - margin-right: 300px; -} - -.colSM { - margin-left: 300px; -} - -.colSM #content-related { - float: left; - margin-right: 0; - margin-left: -300px; -} - -.colSM #content-main { - float: right; -} - -.popup .colM { - width: auto; -} - -/* HEADER */ - -#header { - width: auto; - height: auto; - display: flex; - justify-content: space-between; - align-items: center; - padding: 10px 40px; - background: #417690; - color: #ffc; - overflow: hidden; -} - -#header a:link, #header a:visited { - color: #fff; -} - -#header a:focus , #header a:hover { - text-decoration: underline; -} - -#branding { - float: left; -} - -#branding h1 { - padding: 0; - margin: 0 20px 0 0; - font-weight: 300; - font-size: 24px; - color: #f5dd5d; -} - -#branding h1, #branding h1 a:link, #branding h1 a:visited { - color: #f5dd5d; -} - -#branding h2 { - padding: 0 10px; - font-size: 14px; - margin: -8px 0 8px 0; - font-weight: normal; - color: #ffc; -} - -#branding a:hover { - text-decoration: none; -} - -#user-tools { - float: right; - padding: 0; - margin: 0 0 0 20px; - font-weight: 300; - font-size: 11px; - letter-spacing: 0.5px; - text-transform: uppercase; - text-align: right; -} - -#user-tools a { - border-bottom: 1px solid rgba(255, 255, 255, 0.25); -} - -#user-tools a:focus, #user-tools a:hover { - text-decoration: none; - border-bottom-color: #79aec8; - color: #79aec8; -} - -/* SIDEBAR */ - -#content-related { - background: #f8f8f8; -} - -#content-related .module { - background: none; -} - -#content-related h3 { - font-size: 14px; - color: #666; - padding: 0 16px; - margin: 0 0 16px; -} - -#content-related h4 { - font-size: 13px; -} - -#content-related p { - padding-left: 16px; - padding-right: 16px; -} - -#content-related .actionlist { - padding: 0; - margin: 16px; -} - -#content-related .actionlist li { - line-height: 1.2; - margin-bottom: 10px; - padding-left: 18px; -} - -#content-related .module h2 { - background: none; - padding: 16px; - margin-bottom: 16px; - border-bottom: 1px solid #eaeaea; - font-size: 18px; - color: #333; -} - -.delete-confirmation form input[type="submit"] { - background: #ba2121; - border-radius: 4px; - padding: 10px 15px; - color: #fff; -} - -.delete-confirmation form input[type="submit"]:active, -.delete-confirmation form input[type="submit"]:focus, -.delete-confirmation form input[type="submit"]:hover { - background: #a41515; -} - -.delete-confirmation form .cancel-link { - display: inline-block; - vertical-align: middle; - height: 15px; - line-height: 15px; - background: #ddd; - border-radius: 4px; - padding: 10px 15px; - color: #333; - margin: 0 0 0 10px; -} - -.delete-confirmation form .cancel-link:active, -.delete-confirmation form .cancel-link:focus, -.delete-confirmation form .cancel-link:hover { - background: #ccc; -} - -/* POPUP */ -.popup #content { - padding: 20px; -} - -.popup #container { - min-width: 0; -} - -.popup #header { - padding: 10px 20px; -} diff --git a/application/src/static/admin/css/changelists.css b/application/src/static/admin/css/changelists.css deleted file mode 100644 index 7b8b9c771..000000000 --- a/application/src/static/admin/css/changelists.css +++ /dev/null @@ -1,354 +0,0 @@ -/* CHANGELISTS */ - -#changelist { - display: flex; - align-items: flex-start; - justify-content: space-between; -} - -#changelist .changelist-form-container { - flex: 1 1 auto; - min-width: 0; -} - -#changelist table { - width: 100%; -} - -.change-list .hiddenfields { display:none; } - -.change-list .filtered table { - border-right: none; -} - -.change-list .filtered { - min-height: 400px; -} - -.change-list .filtered .results, .change-list .filtered .paginator, -.filtered #toolbar, .filtered div.xfull { - width: auto; -} - -.change-list .filtered table tbody th { - padding-right: 1em; -} - -#changelist-form .results { - overflow-x: auto; - width: 100%; -} - -#changelist .toplinks { - border-bottom: 1px solid #ddd; -} - -#changelist .paginator { - color: #666; - border-bottom: 1px solid #eee; - background: #fff; - overflow: hidden; -} - -/* CHANGELIST TABLES */ - -#changelist table thead th { - padding: 0; - white-space: nowrap; - vertical-align: middle; -} - -#changelist table thead th.action-checkbox-column { - width: 1.5em; - text-align: center; -} - -#changelist table tbody td.action-checkbox { - text-align: center; -} - -#changelist table tfoot { - color: #666; -} - -/* TOOLBAR */ - -#toolbar { - padding: 8px 10px; - margin-bottom: 15px; - border-top: 1px solid #eee; - border-bottom: 1px solid #eee; - background: #f8f8f8; - color: #666; -} - -#toolbar form input { - border-radius: 4px; - font-size: 14px; - padding: 5px; - color: #333; -} - -#toolbar #searchbar { - height: 19px; - border: 1px solid #ccc; - padding: 2px 5px; - margin: 0; - vertical-align: top; - font-size: 13px; - max-width: 100%; -} - -#toolbar #searchbar:focus { - border-color: #999; -} - -#toolbar form input[type="submit"] { - border: 1px solid #ccc; - font-size: 13px; - padding: 4px 8px; - margin: 0; - vertical-align: middle; - background: #fff; - box-shadow: 0 -15px 20px -10px rgba(0, 0, 0, 0.15) inset; - cursor: pointer; - color: #333; -} - -#toolbar form input[type="submit"]:focus, -#toolbar form input[type="submit"]:hover { - border-color: #999; -} - -#changelist-search img { - vertical-align: middle; - margin-right: 4px; -} - -/* FILTER COLUMN */ - -#changelist-filter { - order: 1; - width: 240px; - background: #f8f8f8; - border-left: none; - margin: 0 0 0 30px; -} - -#changelist-filter h2 { - font-size: 14px; - text-transform: uppercase; - letter-spacing: 0.5px; - padding: 5px 15px; - margin-bottom: 12px; - border-bottom: none; -} - -#changelist-filter h3 { - font-weight: 400; - font-size: 14px; - padding: 0 15px; - margin-bottom: 10px; -} - -#changelist-filter ul { - margin: 5px 0; - padding: 0 15px 15px; - border-bottom: 1px solid #eaeaea; -} - -#changelist-filter ul:last-child { - border-bottom: none; -} - -#changelist-filter li { - list-style-type: none; - margin-left: 0; - padding-left: 0; -} - -#changelist-filter a { - display: block; - color: #999; - text-overflow: ellipsis; - overflow-x: hidden; -} - -#changelist-filter li.selected { - border-left: 5px solid #eaeaea; - padding-left: 10px; - margin-left: -15px; -} - -#changelist-filter li.selected a { - color: #5b80b2; -} - -#changelist-filter a:focus, #changelist-filter a:hover, -#changelist-filter li.selected a:focus, -#changelist-filter li.selected a:hover { - color: #036; -} - -#changelist-filter #changelist-filter-clear a { - font-size: 13px; - padding-bottom: 10px; - border-bottom: 1px solid #eaeaea; -} - -/* DATE DRILLDOWN */ - -.change-list ul.toplinks { - display: block; - float: left; - padding: 0; - margin: 0; - width: 100%; -} - -.change-list ul.toplinks li { - padding: 3px 6px; - font-weight: bold; - list-style-type: none; - display: inline-block; -} - -.change-list ul.toplinks .date-back a { - color: #999; -} - -.change-list ul.toplinks .date-back a:focus, -.change-list ul.toplinks .date-back a:hover { - color: #036; -} - -/* PAGINATOR */ - -.paginator { - font-size: 13px; - padding-top: 10px; - padding-bottom: 10px; - line-height: 22px; - margin: 0; - border-top: 1px solid #ddd; - width: 100%; -} - -.paginator a:link, .paginator a:visited { - padding: 2px 6px; - background: #79aec8; - text-decoration: none; - color: #fff; -} - -.paginator a.showall { - border: none; - background: none; - color: #5b80b2; -} - -.paginator a.showall:focus, .paginator a.showall:hover { - background: none; - color: #036; -} - -.paginator .end { - margin-right: 6px; -} - -.paginator .this-page { - padding: 2px 6px; - font-weight: bold; - font-size: 13px; - vertical-align: top; -} - -.paginator a:focus, .paginator a:hover { - color: white; - background: #036; -} - -/* ACTIONS */ - -.filtered .actions { - border-right: none; -} - -#changelist table input { - margin: 0; - vertical-align: baseline; -} - -#changelist table tbody tr.selected { - background-color: #FFFFCC; -} - -#changelist .actions { - padding: 10px; - background: #fff; - border-top: none; - border-bottom: none; - line-height: 24px; - color: #999; - width: 100%; -} - -#changelist .actions.selected { - background: #fffccf; - border-top: 1px solid #fffee8; - border-bottom: 1px solid #edecd6; -} - -#changelist .actions span.all, -#changelist .actions span.action-counter, -#changelist .actions span.clear, -#changelist .actions span.question { - font-size: 13px; - margin: 0 0.5em; - display: none; -} - -#changelist .actions:last-child { - border-bottom: none; -} - -#changelist .actions select { - vertical-align: top; - height: 24px; - background: none; - color: #000; - border: 1px solid #ccc; - border-radius: 4px; - font-size: 14px; - padding: 0 0 0 4px; - margin: 0; - margin-left: 10px; -} - -#changelist .actions select:focus { - border-color: #999; -} - -#changelist .actions label { - display: inline-block; - vertical-align: middle; - font-size: 13px; -} - -#changelist .actions .button { - font-size: 13px; - border: 1px solid #ccc; - border-radius: 4px; - background: #fff; - box-shadow: 0 -15px 20px -10px rgba(0, 0, 0, 0.15) inset; - cursor: pointer; - height: 24px; - line-height: 1; - padding: 4px 8px; - margin: 0; - color: #333; -} - -#changelist .actions .button:focus, #changelist .actions .button:hover { - border-color: #999; -} diff --git a/application/src/static/admin/css/dashboard.css b/application/src/static/admin/css/dashboard.css deleted file mode 100644 index 91d6efde8..000000000 --- a/application/src/static/admin/css/dashboard.css +++ /dev/null @@ -1,26 +0,0 @@ -/* DASHBOARD */ - -.dashboard .module table th { - width: 100%; -} - -.dashboard .module table td { - white-space: nowrap; -} - -.dashboard .module table td a { - display: block; - padding-right: .6em; -} - -/* RECENT ACTIONS MODULE */ - -.module ul.actionlist { - margin-left: 0; -} - -ul.actionlist li { - list-style-type: none; - overflow: hidden; - text-overflow: ellipsis; -} diff --git a/application/src/static/admin/css/fonts.css b/application/src/static/admin/css/fonts.css deleted file mode 100644 index c837e017c..000000000 --- a/application/src/static/admin/css/fonts.css +++ /dev/null @@ -1,20 +0,0 @@ -@font-face { - font-family: 'Roboto'; - src: url('../fonts/Roboto-Bold-webfont.woff'); - font-weight: 700; - font-style: normal; -} - -@font-face { - font-family: 'Roboto'; - src: url('../fonts/Roboto-Regular-webfont.woff'); - font-weight: 400; - font-style: normal; -} - -@font-face { - font-family: 'Roboto'; - src: url('../fonts/Roboto-Light-webfont.woff'); - font-weight: 300; - font-style: normal; -} diff --git a/application/src/static/admin/css/forms.css b/application/src/static/admin/css/forms.css deleted file mode 100644 index 89d57482f..000000000 --- a/application/src/static/admin/css/forms.css +++ /dev/null @@ -1,527 +0,0 @@ -@import url('widgets.css'); - -/* FORM ROWS */ - -.form-row { - overflow: hidden; - padding: 10px; - font-size: 13px; - border-bottom: 1px solid #eee; -} - -.form-row img, .form-row input { - vertical-align: middle; -} - -.form-row label input[type="checkbox"] { - margin-top: 0; - vertical-align: 0; -} - -form .form-row p { - padding-left: 0; -} - -.hidden { - display: none; -} - -/* FORM LABELS */ - -label { - font-weight: normal; - color: #666; - font-size: 13px; -} - -.required label, label.required { - font-weight: bold; - color: #333; -} - -/* RADIO BUTTONS */ - -form ul.radiolist li { - list-style-type: none; -} - -form ul.radiolist label { - float: none; - display: inline; -} - -form ul.radiolist input[type="radio"] { - margin: -2px 4px 0 0; - padding: 0; -} - -form ul.inline { - margin-left: 0; - padding: 0; -} - -form ul.inline li { - float: left; - padding-right: 7px; -} - -/* ALIGNED FIELDSETS */ - -.aligned label { - display: block; - padding: 4px 10px 0 0; - float: left; - width: 160px; - word-wrap: break-word; - line-height: 1; -} - -.aligned label:not(.vCheckboxLabel):after { - content: ''; - display: inline-block; - vertical-align: middle; - height: 26px; -} - -.aligned label + p, .aligned label + div.help, .aligned label + div.readonly { - padding: 6px 0; - margin-top: 0; - margin-bottom: 0; - margin-left: 170px; -} - -.aligned ul label { - display: inline; - float: none; - width: auto; -} - -.aligned .form-row input { - margin-bottom: 0; -} - -.colMS .aligned .vLargeTextField, .colMS .aligned .vXMLLargeTextField { - width: 350px; -} - -form .aligned ul { - margin-left: 160px; - padding-left: 10px; -} - -form .aligned ul.radiolist { - display: inline-block; - margin: 0; - padding: 0; -} - -form .aligned p.help, -form .aligned div.help { - clear: left; - margin-top: 0; - margin-left: 160px; - padding-left: 10px; -} - -form .aligned label + p.help, -form .aligned label + div.help { - margin-left: 0; - padding-left: 0; -} - -form .aligned p.help:last-child, -form .aligned div.help:last-child { - margin-bottom: 0; - padding-bottom: 0; -} - -form .aligned input + p.help, -form .aligned textarea + p.help, -form .aligned select + p.help, -form .aligned input + div.help, -form .aligned textarea + div.help, -form .aligned select + div.help { - margin-left: 160px; - padding-left: 10px; -} - -form .aligned ul li { - list-style: none; -} - -form .aligned table p { - margin-left: 0; - padding-left: 0; -} - -.aligned .vCheckboxLabel { - float: none; - width: auto; - display: inline-block; - vertical-align: -3px; - padding: 0 0 5px 5px; -} - -.aligned .vCheckboxLabel + p.help, -.aligned .vCheckboxLabel + div.help { - margin-top: -4px; -} - -.colM .aligned .vLargeTextField, .colM .aligned .vXMLLargeTextField { - width: 610px; -} - -.checkbox-row p.help, -.checkbox-row div.help { - margin-left: 0; - padding-left: 0; -} - -fieldset .fieldBox { - float: left; - margin-right: 20px; -} - -/* WIDE FIELDSETS */ - -.wide label { - width: 200px; -} - -form .wide p, -form .wide input + p.help, -form .wide input + div.help { - margin-left: 200px; -} - -form .wide p.help, -form .wide div.help { - padding-left: 38px; -} - -form div.help ul { - padding-left: 0; - margin-left: 0; -} - -.colM fieldset.wide .vLargeTextField, .colM fieldset.wide .vXMLLargeTextField { - width: 450px; -} - -/* COLLAPSED FIELDSETS */ - -fieldset.collapsed * { - display: none; -} - -fieldset.collapsed h2, fieldset.collapsed { - display: block; -} - -fieldset.collapsed { - border: 1px solid #eee; - border-radius: 4px; - overflow: hidden; -} - -fieldset.collapsed h2 { - background: #f8f8f8; - color: #666; -} - -fieldset .collapse-toggle { - color: #fff; -} - -fieldset.collapsed .collapse-toggle { - background: transparent; - display: inline; - color: #447e9b; -} - -/* MONOSPACE TEXTAREAS */ - -fieldset.monospace textarea { - font-family: "Bitstream Vera Sans Mono", Monaco, "Courier New", Courier, monospace; -} - -/* SUBMIT ROW */ - -.submit-row { - padding: 12px 14px; - margin: 0 0 20px; - background: #f8f8f8; - border: 1px solid #eee; - border-radius: 4px; - text-align: right; - overflow: hidden; -} - -body.popup .submit-row { - overflow: auto; -} - -.submit-row input { - height: 35px; - line-height: 15px; - margin: 0 0 0 5px; -} - -.submit-row input.default { - margin: 0 0 0 8px; - text-transform: uppercase; -} - -.submit-row p { - margin: 0.3em; -} - -.submit-row p.deletelink-box { - float: left; - margin: 0; -} - -.submit-row a.deletelink { - display: block; - background: #ba2121; - border-radius: 4px; - padding: 10px 15px; - height: 15px; - line-height: 15px; - color: #fff; -} - -.submit-row a.closelink { - display: inline-block; - background: #bbbbbb; - border-radius: 4px; - padding: 10px 15px; - height: 15px; - line-height: 15px; - margin: 0 0 0 5px; - color: #fff; -} - -.submit-row a.deletelink:focus, -.submit-row a.deletelink:hover, -.submit-row a.deletelink:active { - background: #a41515; -} - -.submit-row a.closelink:focus, -.submit-row a.closelink:hover, -.submit-row a.closelink:active { - background: #aaaaaa; -} - -/* CUSTOM FORM FIELDS */ - -.vSelectMultipleField { - vertical-align: top; -} - -.vCheckboxField { - border: none; -} - -.vDateField, .vTimeField { - margin-right: 2px; - margin-bottom: 4px; -} - -.vDateField { - min-width: 6.85em; -} - -.vTimeField { - min-width: 4.7em; -} - -.vURLField { - width: 30em; -} - -.vLargeTextField, .vXMLLargeTextField { - width: 48em; -} - -.flatpages-flatpage #id_content { - height: 40.2em; -} - -.module table .vPositiveSmallIntegerField { - width: 2.2em; -} - -.vTextField, .vUUIDField { - width: 20em; -} - -.vIntegerField { - width: 5em; -} - -.vBigIntegerField { - width: 10em; -} - -.vForeignKeyRawIdAdminField { - width: 5em; -} - -/* INLINES */ - -.inline-group { - padding: 0; - margin: 0 0 30px; -} - -.inline-group thead th { - padding: 8px 10px; -} - -.inline-group .aligned label { - width: 160px; -} - -.inline-related { - position: relative; -} - -.inline-related h3 { - margin: 0; - color: #666; - padding: 5px; - font-size: 13px; - background: #f8f8f8; - border-top: 1px solid #eee; - border-bottom: 1px solid #eee; -} - -.inline-related h3 span.delete { - float: right; -} - -.inline-related h3 span.delete label { - margin-left: 2px; - font-size: 11px; -} - -.inline-related fieldset { - margin: 0; - background: #fff; - border: none; - width: 100%; -} - -.inline-related fieldset.module h3 { - margin: 0; - padding: 2px 5px 3px 5px; - font-size: 11px; - text-align: left; - font-weight: bold; - background: #bcd; - color: #fff; -} - -.inline-group .tabular fieldset.module { - border: none; -} - -.inline-related.tabular fieldset.module table { - width: 100%; - overflow-x: scroll; -} - -.last-related fieldset { - border: none; -} - -.inline-group .tabular tr.has_original td { - padding-top: 2em; -} - -.inline-group .tabular tr td.original { - padding: 2px 0 0 0; - width: 0; - _position: relative; -} - -.inline-group .tabular th.original { - width: 0px; - padding: 0; -} - -.inline-group .tabular td.original p { - position: absolute; - left: 0; - height: 1.1em; - padding: 2px 9px; - overflow: hidden; - font-size: 9px; - font-weight: bold; - color: #666; - _width: 700px; -} - -.inline-group ul.tools { - padding: 0; - margin: 0; - list-style: none; -} - -.inline-group ul.tools li { - display: inline; - padding: 0 5px; -} - -.inline-group div.add-row, -.inline-group .tabular tr.add-row td { - color: #666; - background: #f8f8f8; - padding: 8px 10px; - border-bottom: 1px solid #eee; -} - -.inline-group .tabular tr.add-row td { - padding: 8px 10px; - border-bottom: 1px solid #eee; -} - -.inline-group ul.tools a.add, -.inline-group div.add-row a, -.inline-group .tabular tr.add-row td a { - background: url(../img/icon-addlink.svg) 0 1px no-repeat; - padding-left: 16px; - font-size: 12px; -} - -.empty-form { - display: none; -} - -/* RELATED FIELD ADD ONE / LOOKUP */ - -.related-lookup { - margin-left: 5px; - display: inline-block; - vertical-align: middle; - background-repeat: no-repeat; - background-size: 14px; -} - -.related-lookup { - width: 16px; - height: 16px; - background-image: url(../img/search.svg); -} - -form .related-widget-wrapper ul { - display: inline-block; - margin-left: 0; - padding-left: 0; -} - -.clearable-file-input input { - margin-top: 0; -} diff --git a/application/src/static/admin/css/login.css b/application/src/static/admin/css/login.css deleted file mode 100644 index 062b36e05..000000000 --- a/application/src/static/admin/css/login.css +++ /dev/null @@ -1,79 +0,0 @@ -/* LOGIN FORM */ - -.login { - background: #f8f8f8; - height: auto; -} - -.login #header { - height: auto; - padding: 15px 16px; - justify-content: center; -} - -.login #header h1 { - font-size: 18px; -} - -.login #header h1 a { - color: #fff; -} - -.login #content { - padding: 20px 20px 0; -} - -.login #container { - background: #fff; - border: 1px solid #eaeaea; - border-radius: 4px; - overflow: hidden; - width: 28em; - min-width: 300px; - margin: 100px auto; - height: auto; -} - -.login #content-main { - width: 100%; -} - -.login .form-row { - padding: 4px 0; - float: left; - width: 100%; - border-bottom: none; -} - -.login .form-row label { - padding-right: 0.5em; - line-height: 2em; - font-size: 1em; - clear: both; - color: #333; -} - -.login .form-row #id_username, .login .form-row #id_password { - clear: both; - padding: 8px; - width: 100%; - box-sizing: border-box; -} - -.login span.help { - font-size: 10px; - display: block; -} - -.login .submit-row { - clear: both; - padding: 1em 0 0 9.4em; - margin: 0; - border: none; - background: none; - text-align: left; -} - -.login .password-reset-link { - text-align: center; -} diff --git a/application/src/static/admin/css/nav_sidebar.css b/application/src/static/admin/css/nav_sidebar.css deleted file mode 100644 index 784d08741..000000000 --- a/application/src/static/admin/css/nav_sidebar.css +++ /dev/null @@ -1,119 +0,0 @@ -.sticky { - position: sticky; - top: 0; - max-height: 100vh; -} - -.toggle-nav-sidebar { - z-index: 20; - left: 0; - display: flex; - align-items: center; - justify-content: center; - flex: 0 0 23px; - width: 23px; - border-right: 1px solid #eaeaea; - background-color: #ffffff; - cursor: pointer; - font-size: 20px; - color: #447e9b; - padding: 0; -} - -[dir="rtl"] .toggle-nav-sidebar { - border-left: 1px solid #eaeaea; - border-right: 0; -} - -.toggle-nav-sidebar:hover, -.toggle-nav-sidebar:focus { - background-color: #f6f6f6; -} - -#nav-sidebar { - z-index: 15; - flex: 0 0 275px; - left: -276px; - margin-left: -276px; - border-top: 1px solid transparent; - border-right: 1px solid #eaeaea; - background-color: #ffffff; - overflow: auto; -} - -[dir="rtl"] #nav-sidebar { - border-left: 1px solid #eaeaea; - border-right: 0; - left: 0; - margin-left: 0; - right: -276px; - margin-right: -276px; -} - -.toggle-nav-sidebar::before { - content: '\00BB'; -} - -.main.shifted .toggle-nav-sidebar::before { - content: '\00AB'; -} - -.main.shifted > #nav-sidebar { - left: 24px; - margin-left: 0; -} - -[dir="rtl"] .main.shifted > #nav-sidebar { - left: 0; - right: 24px; - margin-right: 0; -} - -#nav-sidebar .module th { - width: 100%; - overflow-wrap: anywhere; -} - -#nav-sidebar .module th, -#nav-sidebar .module caption { - padding-left: 16px; -} - -#nav-sidebar .module td { - white-space: nowrap; -} - -[dir="rtl"] #nav-sidebar .module th, -[dir="rtl"] #nav-sidebar .module caption { - padding-left: 8px; - padding-right: 16px; -} - -#nav-sidebar .current-app .section:link, -#nav-sidebar .current-app .section:visited { - color: #ffc; - font-weight: bold; -} - -#nav-sidebar .current-model { - background: #ffc; -} - -.main > #nav-sidebar + .content { - max-width: calc(100% - 23px); -} - -.main.shifted > #nav-sidebar + .content { - max-width: calc(100% - 299px); -} - -@media (max-width: 767px) { - #nav-sidebar, #toggle-nav-sidebar { - display: none; - } - - .main > #nav-sidebar + .content, - .main.shifted > #nav-sidebar + .content { - max-width: 100%; - } -} diff --git a/application/src/static/admin/css/responsive.css b/application/src/static/admin/css/responsive.css deleted file mode 100644 index ef968c239..000000000 --- a/application/src/static/admin/css/responsive.css +++ /dev/null @@ -1,1004 +0,0 @@ -/* Tablets */ - -input[type="submit"], button { - -webkit-appearance: none; - appearance: none; -} - -@media (max-width: 1024px) { - /* Basic */ - - html { - -webkit-text-size-adjust: 100%; - } - - td, th { - padding: 10px; - font-size: 14px; - } - - .small { - font-size: 12px; - } - - /* Layout */ - - #container { - min-width: 0; - } - - #content { - padding: 20px 30px 30px; - } - - div.breadcrumbs { - padding: 10px 30px; - } - - /* Header */ - - #header { - flex-direction: column; - padding: 15px 30px; - justify-content: flex-start; - } - - #branding h1 { - margin: 0 0 8px; - font-size: 20px; - line-height: 1.2; - } - - #user-tools { - margin: 0; - font-weight: 400; - line-height: 1.85; - text-align: left; - } - - #user-tools a { - display: inline-block; - line-height: 1.4; - } - - /* Dashboard */ - - .dashboard #content { - width: auto; - } - - #content-related { - margin-right: -290px; - } - - .colSM #content-related { - margin-left: -290px; - } - - .colMS { - margin-right: 290px; - } - - .colSM { - margin-left: 290px; - } - - .dashboard .module table td a { - padding-right: 0; - } - - td .changelink, td .addlink { - font-size: 13px; - } - - /* Changelist */ - - #toolbar { - border: none; - padding: 15px; - } - - #changelist-search > div { - display: flex; - flex-wrap: nowrap; - max-width: 480px; - } - - #changelist-search label { - line-height: 22px; - } - - #toolbar form #searchbar { - flex: 1 0 auto; - width: 0; - height: 22px; - margin: 0 10px 0 6px; - } - - #toolbar form input[type=submit] { - flex: 0 1 auto; - } - - #changelist-search .quiet { - width: 0; - flex: 1 0 auto; - margin: 5px 0 0 25px; - } - - #changelist .actions { - display: flex; - flex-wrap: wrap; - padding: 15px 0; - } - - #changelist .actions.selected { - border: none; - } - - #changelist .actions label { - display: flex; - } - - #changelist .actions select { - background: #fff; - } - - #changelist .actions .button { - min-width: 48px; - margin: 0 10px; - } - - #changelist .actions span.all, - #changelist .actions span.clear, - #changelist .actions span.question, - #changelist .actions span.action-counter { - font-size: 11px; - margin: 0 10px 0 0; - } - - #changelist-filter { - width: 200px; - } - - .change-list .filtered .results, - .change-list .filtered .paginator, - .filtered #toolbar, - .filtered .actions, - - #changelist .paginator { - border-top-color: #eee; - } - - #changelist .results + .paginator { - border-top: none; - } - - /* Forms */ - - label { - font-size: 14px; - } - - .form-row input[type=text], - .form-row input[type=password], - .form-row input[type=email], - .form-row input[type=url], - .form-row input[type=tel], - .form-row input[type=number], - .form-row textarea, - .form-row select, - .form-row .vTextField { - box-sizing: border-box; - margin: 0; - padding: 6px 8px; - min-height: 36px; - font-size: 14px; - } - - .form-row select { - height: 36px; - } - - .form-row select[multiple] { - height: auto; - min-height: 0; - } - - fieldset .fieldBox { - float: none; - margin: 0 -10px; - padding: 0 10px; - } - - fieldset .fieldBox + .fieldBox { - margin-top: 10px; - padding-top: 10px; - border-top: 1px solid #eee; - } - - textarea { - max-width: 100%; - max-height: 120px; - } - - .aligned label { - padding-top: 6px; - } - - .aligned .related-lookup, - .aligned .datetimeshortcuts, - .aligned .related-lookup + strong { - align-self: center; - margin-left: 15px; - } - - form .aligned ul.radiolist { - margin-left: 2px; - } - - /* Related widget */ - - .related-widget-wrapper { - float: none; - } - - .related-widget-wrapper-link + .selector { - max-width: calc(100% - 30px); - margin-right: 15px; - } - - select + .related-widget-wrapper-link, - .related-widget-wrapper-link + .related-widget-wrapper-link { - margin-left: 10px; - } - - /* Selector */ - - .selector { - display: flex; - width: 100%; - } - - .selector .selector-filter { - display: flex; - align-items: center; - } - - .selector .selector-filter label { - margin: 0 8px 0 0; - } - - .selector .selector-filter input { - width: auto; - min-height: 0; - flex: 1 1; - } - - .selector-available, .selector-chosen { - width: auto; - flex: 1 1; - display: flex; - flex-direction: column; - } - - .selector select { - width: 100%; - flex: 1 0 auto; - margin-bottom: 5px; - } - - .selector ul.selector-chooser { - width: 26px; - height: 52px; - padding: 2px 0; - margin: auto 15px; - border-radius: 20px; - transform: translateY(-10px); - } - - .selector-add, .selector-remove { - width: 20px; - height: 20px; - background-size: 20px auto; - } - - .selector-add { - background-position: 0 -120px; - } - - .selector-remove { - background-position: 0 -80px; - } - - a.selector-chooseall, a.selector-clearall { - align-self: center; - } - - .stacked { - flex-direction: column; - max-width: 480px; - } - - .stacked > * { - flex: 0 1 auto; - } - - .stacked select { - margin-bottom: 0; - } - - .stacked .selector-available, .stacked .selector-chosen { - width: auto; - } - - .stacked ul.selector-chooser { - width: 52px; - height: 26px; - padding: 0 2px; - margin: 15px auto; - transform: none; - } - - .stacked .selector-chooser li { - padding: 3px; - } - - .stacked .selector-add, .stacked .selector-remove { - background-size: 20px auto; - } - - .stacked .selector-add { - background-position: 0 -40px; - } - - .stacked .active.selector-add { - background-position: 0 -40px; - } - - .active.selector-add:focus, .active.selector-add:hover { - background-position: 0 -140px; - } - - .stacked .active.selector-add:focus, .stacked .active.selector-add:hover { - background-position: 0 -60px; - } - - .stacked .selector-remove { - background-position: 0 0; - } - - .stacked .active.selector-remove { - background-position: 0 0; - } - - .active.selector-remove:focus, .active.selector-remove:hover { - background-position: 0 -100px; - } - - .stacked .active.selector-remove:focus, .stacked .active.selector-remove:hover { - background-position: 0 -20px; - } - - .help-tooltip, .selector .help-icon { - display: none; - } - - form .form-row p.datetime { - width: 100%; - } - - .datetime input { - width: 50%; - max-width: 120px; - } - - .datetime span { - font-size: 13px; - } - - .datetime .timezonewarning { - display: block; - font-size: 11px; - color: #999; - } - - .datetimeshortcuts { - color: #ccc; - } - - .form-row .datetime input.vDateField, .form-row .datetime input.vTimeField { - width: 75%; - } - - .inline-group { - overflow: auto; - } - - /* Messages */ - - ul.messagelist li { - padding-left: 55px; - background-position: 30px 12px; - } - - ul.messagelist li.error { - background-position: 30px 12px; - } - - ul.messagelist li.warning { - background-position: 30px 14px; - } - - /* Login */ - - .login #header { - padding: 15px 20px; - } - - .login #branding h1 { - margin: 0; - } - - /* GIS */ - - div.olMap { - max-width: calc(100vw - 30px); - max-height: 300px; - } - - .olMap + .clear_features { - display: block; - margin-top: 10px; - } - - /* Docs */ - - .module table.xfull { - width: 100%; - } - - pre.literal-block { - overflow: auto; - } -} - -/* Mobile */ - -@media (max-width: 767px) { - /* Layout */ - - #header, #content, #footer { - padding: 15px; - } - - #footer:empty { - padding: 0; - } - - div.breadcrumbs { - padding: 10px 15px; - } - - /* Dashboard */ - - .colMS, .colSM { - margin: 0; - } - - #content-related, .colSM #content-related { - width: 100%; - margin: 0; - } - - #content-related .module { - margin-bottom: 0; - } - - #content-related .module h2 { - padding: 10px 15px; - font-size: 16px; - } - - /* Changelist */ - - #changelist { - align-items: stretch; - flex-direction: column; - } - - #toolbar { - padding: 10px; - } - - #changelist-filter { - margin-left: 0; - } - - #changelist .actions label { - flex: 1 1; - } - - #changelist .actions select { - flex: 1 0; - width: 100%; - } - - #changelist .actions span { - flex: 1 0 100%; - } - - #changelist-filter { - position: static; - width: auto; - margin-top: 30px; - } - - .object-tools { - float: none; - margin: 0 0 15px; - padding: 0; - overflow: hidden; - } - - .object-tools li { - height: auto; - margin-left: 0; - } - - .object-tools li + li { - margin-left: 15px; - } - - /* Forms */ - - .form-row { - padding: 15px 0; - } - - .aligned .form-row, - .aligned .form-row > div { - display: flex; - flex-wrap: wrap; - max-width: 100vw; - } - - .aligned .form-row > div { - width: calc(100vw - 30px); - } - - textarea { - max-width: none; - } - - .vURLField { - width: auto; - } - - fieldset .fieldBox + .fieldBox { - margin-top: 15px; - padding-top: 15px; - } - - fieldset.collapsed .form-row { - display: none; - } - - .aligned label { - width: 100%; - padding: 0 0 10px; - } - - .aligned label:after { - max-height: 0; - } - - .aligned .form-row input, - .aligned .form-row select, - .aligned .form-row textarea { - flex: 1 1 auto; - max-width: 100%; - } - - .aligned .checkbox-row { - align-items: center; - } - - .aligned .checkbox-row input { - flex: 0 1 auto; - margin: 0; - } - - .aligned .vCheckboxLabel { - flex: 1 0; - padding: 1px 0 0 5px; - } - - .aligned label + p, - .aligned label + div.help, - .aligned label + div.readonly { - padding: 0; - margin-left: 0; - } - - .aligned p.file-upload { - margin-left: 0; - font-size: 13px; - } - - span.clearable-file-input { - margin-left: 15px; - } - - span.clearable-file-input label { - font-size: 13px; - padding-bottom: 0; - } - - .aligned .timezonewarning { - flex: 1 0 100%; - margin-top: 5px; - } - - form .aligned .form-row div.help { - width: 100%; - margin: 5px 0 0; - padding: 0; - } - - form .aligned ul { - margin-left: 0; - padding-left: 0; - } - - form .aligned ul.radiolist { - margin-right: 15px; - margin-bottom: -3px; - } - - form .aligned ul.radiolist li + li { - margin-top: 5px; - } - - /* Related widget */ - - .related-widget-wrapper { - width: 100%; - display: flex; - align-items: flex-start; - } - - .related-widget-wrapper .selector { - order: 1; - } - - .related-widget-wrapper > a { - order: 2; - } - - .related-widget-wrapper .radiolist ~ a { - align-self: flex-end; - } - - .related-widget-wrapper > select ~ a { - align-self: center; - } - - select + .related-widget-wrapper-link, - .related-widget-wrapper-link + .related-widget-wrapper-link { - margin-left: 15px; - } - - /* Selector */ - - .selector { - flex-direction: column; - } - - .selector > * { - float: none; - } - - .selector-available, .selector-chosen { - margin-bottom: 0; - flex: 1 1 auto; - } - - .selector select { - max-height: 96px; - } - - .selector ul.selector-chooser { - display: block; - float: none; - width: 52px; - height: 26px; - padding: 0 2px; - margin: 15px auto 20px; - transform: none; - } - - .selector ul.selector-chooser li { - float: left; - } - - .selector-remove { - background-position: 0 0; - } - - .active.selector-remove:focus, .active.selector-remove:hover { - background-position: 0 -20px; - } - - .selector-add { - background-position: 0 -40px; - } - - .active.selector-add:focus, .active.selector-add:hover { - background-position: 0 -60px; - } - - /* Inlines */ - - .inline-group[data-inline-type="stacked"] .inline-related { - border: 2px solid #eee; - border-radius: 4px; - margin-top: 15px; - overflow: auto; - } - - .inline-group[data-inline-type="stacked"] .inline-related > * { - box-sizing: border-box; - } - - .inline-group[data-inline-type="stacked"] .inline-related + .inline-related { - margin-top: 30px; - } - - .inline-group[data-inline-type="stacked"] .inline-related .module { - padding: 0 10px; - } - - .inline-group[data-inline-type="stacked"] .inline-related .module .form-row:last-child { - border-bottom: none; - } - - .inline-group[data-inline-type="stacked"] .inline-related h3 { - padding: 10px; - border-top-width: 0; - border-bottom-width: 2px; - display: flex; - flex-wrap: wrap; - align-items: center; - } - - .inline-group[data-inline-type="stacked"] .inline-related h3 .inline_label { - margin-right: auto; - } - - .inline-group[data-inline-type="stacked"] .inline-related h3 span.delete { - float: none; - flex: 1 1 100%; - margin-top: 5px; - } - - .inline-group[data-inline-type="stacked"] .aligned .form-row > div:not([class]) { - width: 100%; - } - - .inline-group[data-inline-type="stacked"] .aligned label { - width: 100%; - } - - .inline-group[data-inline-type="stacked"] div.add-row { - margin-top: 15px; - border: 1px solid #eee; - border-radius: 4px; - } - - .inline-group div.add-row, - .inline-group .tabular tr.add-row td { - padding: 0; - } - - .inline-group div.add-row a, - .inline-group .tabular tr.add-row td a { - display: block; - padding: 8px 10px 8px 26px; - background-position: 8px 9px; - } - - /* Submit row */ - - .submit-row { - padding: 10px 10px 0; - margin: 0 0 15px; - display: flex; - flex-direction: column; - } - - .submit-row > * { - width: 100%; - } - - .submit-row input, .submit-row input.default, .submit-row a, .submit-row a.closelink { - float: none; - margin: 0 0 10px; - text-align: center; - } - - .submit-row a.closelink { - padding: 10px 0; - } - - .submit-row p.deletelink-box { - order: 4; - } - - /* Messages */ - - ul.messagelist li { - padding-left: 40px; - background-position: 15px 12px; - } - - ul.messagelist li.error { - background-position: 15px 12px; - } - - ul.messagelist li.warning { - background-position: 15px 14px; - } - - /* Paginator */ - - .paginator .this-page, .paginator a:link, .paginator a:visited { - padding: 4px 10px; - } - - /* Login */ - - body.login { - padding: 0 15px; - } - - .login #container { - width: auto; - max-width: 480px; - margin: 50px auto; - } - - .login #header, - .login #content { - padding: 15px; - } - - .login #content-main { - float: none; - } - - .login .form-row { - padding: 0; - } - - .login .form-row + .form-row { - margin-top: 15px; - } - - .login .form-row label { - display: block; - margin: 0 0 5px; - padding: 0; - line-height: 1.2; - } - - .login .submit-row { - padding: 15px 0 0; - } - - .login br, .login .submit-row label { - display: none; - } - - .login .submit-row input { - margin: 0; - text-transform: uppercase; - } - - .errornote { - margin: 0 0 20px; - padding: 8px 12px; - font-size: 13px; - } - - /* Calendar and clock */ - - .calendarbox, .clockbox { - position: fixed !important; - top: 50% !important; - left: 50% !important; - transform: translate(-50%, -50%); - margin: 0; - border: none; - overflow: visible; - } - - .calendarbox:before, .clockbox:before { - content: ''; - position: fixed; - top: 50%; - left: 50%; - width: 100vw; - height: 100vh; - background: rgba(0, 0, 0, 0.75); - transform: translate(-50%, -50%); - } - - .calendarbox > *, .clockbox > * { - position: relative; - z-index: 1; - } - - .calendarbox > div:first-child { - z-index: 2; - } - - .calendarbox .calendar, .clockbox h2 { - border-radius: 4px 4px 0 0; - overflow: hidden; - } - - .calendarbox .calendar-cancel, .clockbox .calendar-cancel { - border-radius: 0 0 4px 4px; - overflow: hidden; - } - - .calendar-shortcuts { - padding: 10px 0; - font-size: 12px; - line-height: 12px; - } - - .calendar-shortcuts a { - margin: 0 4px; - } - - .timelist a { - background: #fff; - padding: 4px; - } - - .calendar-cancel { - padding: 8px 10px; - } - - .clockbox h2 { - padding: 8px 15px; - } - - .calendar caption { - padding: 10px; - } - - .calendarbox .calendarnav-previous, .calendarbox .calendarnav-next { - z-index: 1; - top: 10px; - } - - /* History */ - - table#change-history tbody th, table#change-history tbody td { - font-size: 13px; - word-break: break-word; - } - - table#change-history tbody th { - width: auto; - } - - /* Docs */ - - table.model tbody th, table.model tbody td { - font-size: 13px; - word-break: break-word; - } -} diff --git a/application/src/static/admin/css/responsive_rtl.css b/application/src/static/admin/css/responsive_rtl.css deleted file mode 100644 index 66d3c2f9b..000000000 --- a/application/src/static/admin/css/responsive_rtl.css +++ /dev/null @@ -1,80 +0,0 @@ -/* TABLETS */ - -@media (max-width: 1024px) { - [dir="rtl"] .colMS { - margin-right: 0; - } - - [dir="rtl"] #user-tools { - text-align: right; - } - - [dir="rtl"] #changelist .actions label { - padding-left: 10px; - padding-right: 0; - } - - [dir="rtl"] #changelist .actions select { - margin-left: 0; - margin-right: 15px; - } - - [dir="rtl"] .change-list .filtered .results, - [dir="rtl"] .change-list .filtered .paginator, - [dir="rtl"] .filtered #toolbar, - [dir="rtl"] .filtered div.xfull, - [dir="rtl"] .filtered .actions, - [dir="rtl"] #changelist-filter { - margin-left: 0; - } - - [dir="rtl"] .inline-group ul.tools a.add, - [dir="rtl"] .inline-group div.add-row a, - [dir="rtl"] .inline-group .tabular tr.add-row td a { - padding: 8px 26px 8px 10px; - background-position: calc(100% - 8px) 9px; - } - - [dir="rtl"] .related-widget-wrapper-link + .selector { - margin-right: 0; - margin-left: 15px; - } - - [dir="rtl"] .selector .selector-filter label { - margin-right: 0; - margin-left: 8px; - } - - [dir="rtl"] .object-tools li { - float: right; - } - - [dir="rtl"] .object-tools li + li { - margin-left: 0; - margin-right: 15px; - } - - [dir="rtl"] .dashboard .module table td a { - padding-left: 0; - padding-right: 16px; - } -} - -/* MOBILE */ - -@media (max-width: 767px) { - [dir="rtl"] .aligned .related-lookup, - [dir="rtl"] .aligned .datetimeshortcuts { - margin-left: 0; - margin-right: 15px; - } - - [dir="rtl"] .aligned ul { - margin-right: 0; - } - - [dir="rtl"] #changelist-filter { - margin-left: 0; - margin-right: 0; - } -} diff --git a/application/src/static/admin/css/rtl.css b/application/src/static/admin/css/rtl.css deleted file mode 100644 index a40aad0c8..000000000 --- a/application/src/static/admin/css/rtl.css +++ /dev/null @@ -1,249 +0,0 @@ -body { - direction: rtl; -} - -/* LOGIN */ - -.login .form-row { - float: right; -} - -.login .form-row label { - float: right; - padding-left: 0.5em; - padding-right: 0; - text-align: left; -} - -.login .submit-row { - clear: both; - padding: 1em 9.4em 0 0; -} - -/* GLOBAL */ - -th { - text-align: right; -} - -.module h2, .module caption { - text-align: right; -} - -.module ul, .module ol { - margin-left: 0; - margin-right: 1.5em; -} - -.viewlink, .addlink, .changelink { - padding-left: 0; - padding-right: 16px; - background-position: 100% 1px; -} - -.deletelink { - padding-left: 0; - padding-right: 16px; - background-position: 100% 1px; -} - -.object-tools { - float: left; -} - -thead th:first-child, -tfoot td:first-child { - border-left: none; -} - -/* LAYOUT */ - -#user-tools { - right: auto; - left: 0; - text-align: left; -} - -div.breadcrumbs { - text-align: right; -} - -#content-main { - float: right; -} - -#content-related { - float: left; - margin-left: -300px; - margin-right: auto; -} - -.colMS { - margin-left: 300px; - margin-right: 0; -} - -/* SORTABLE TABLES */ - -table thead th.sorted .sortoptions { - float: left; -} - -thead th.sorted .text { - padding-right: 0; - padding-left: 42px; -} - -/* dashboard styles */ - -.dashboard .module table td a { - padding-left: .6em; - padding-right: 16px; -} - -/* changelists styles */ - -.change-list .filtered table { - border-left: none; - border-right: 0px none; -} - -#changelist-filter { - border-left: none; - border-right: none; - margin-left: 0; - margin-right: 30px; -} - -#changelist-filter li.selected { - border-left: none; - padding-left: 10px; - margin-left: 0; - border-right: 5px solid #eaeaea; - padding-right: 10px; - margin-right: -15px; -} - -#changelist table tbody td:first-child, #changelist table tbody th:first-child { - border-right: none; - border-left: none; -} - -/* FORMS */ - -.aligned label { - padding: 0 0 3px 1em; - float: right; -} - -.submit-row { - text-align: left -} - -.submit-row p.deletelink-box { - float: right; -} - -.submit-row input.default { - margin-left: 0; -} - -.vDateField, .vTimeField { - margin-left: 2px; -} - -.aligned .form-row input { - margin-left: 5px; -} - -form .aligned p.help, form .aligned div.help { - clear: right; -} - -form .aligned ul { - margin-right: 163px; - margin-left: 0; -} - -form ul.inline li { - float: right; - padding-right: 0; - padding-left: 7px; -} - -input[type=submit].default, .submit-row input.default { - float: left; -} - -fieldset .fieldBox { - float: right; - margin-left: 20px; - margin-right: 0; -} - -.errorlist li { - background-position: 100% 12px; - padding: 0; -} - -.errornote { - background-position: 100% 12px; - padding: 10px 12px; -} - -/* WIDGETS */ - -.calendarnav-previous { - top: 0; - left: auto; - right: 10px; -} - -.calendarnav-next { - top: 0; - right: auto; - left: 10px; -} - -.calendar caption, .calendarbox h2 { - text-align: center; -} - -.selector { - float: right; -} - -.selector .selector-filter { - text-align: right; -} - -.inline-deletelink { - float: left; -} - -form .form-row p.datetime { - overflow: hidden; -} - -.related-widget-wrapper { - float: right; -} - -/* MISC */ - -.inline-related h2, .inline-group h2 { - text-align: right -} - -.inline-related h3 span.delete { - padding-right: 20px; - padding-left: inherit; - left: 10px; - right: inherit; - float:left; -} - -.inline-related h3 span.delete label { - margin-left: inherit; - margin-right: 2px; -} diff --git a/application/src/static/admin/css/vendor/select2/select2.css b/application/src/static/admin/css/vendor/select2/select2.css deleted file mode 100644 index 750b3207a..000000000 --- a/application/src/static/admin/css/vendor/select2/select2.css +++ /dev/null @@ -1,481 +0,0 @@ -.select2-container { - box-sizing: border-box; - display: inline-block; - margin: 0; - position: relative; - vertical-align: middle; } - .select2-container .select2-selection--single { - box-sizing: border-box; - cursor: pointer; - display: block; - height: 28px; - user-select: none; - -webkit-user-select: none; } - .select2-container .select2-selection--single .select2-selection__rendered { - display: block; - padding-left: 8px; - padding-right: 20px; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; } - .select2-container .select2-selection--single .select2-selection__clear { - position: relative; } - .select2-container[dir="rtl"] .select2-selection--single .select2-selection__rendered { - padding-right: 8px; - padding-left: 20px; } - .select2-container .select2-selection--multiple { - box-sizing: border-box; - cursor: pointer; - display: block; - min-height: 32px; - user-select: none; - -webkit-user-select: none; } - .select2-container .select2-selection--multiple .select2-selection__rendered { - display: inline-block; - overflow: hidden; - padding-left: 8px; - text-overflow: ellipsis; - white-space: nowrap; } - .select2-container .select2-search--inline { - float: left; } - .select2-container .select2-search--inline .select2-search__field { - box-sizing: border-box; - border: none; - font-size: 100%; - margin-top: 5px; - padding: 0; } - .select2-container .select2-search--inline .select2-search__field::-webkit-search-cancel-button { - -webkit-appearance: none; } - -.select2-dropdown { - background-color: white; - border: 1px solid #aaa; - border-radius: 4px; - box-sizing: border-box; - display: block; - position: absolute; - left: -100000px; - width: 100%; - z-index: 1051; } - -.select2-results { - display: block; } - -.select2-results__options { - list-style: none; - margin: 0; - padding: 0; } - -.select2-results__option { - padding: 6px; - user-select: none; - -webkit-user-select: none; } - .select2-results__option[aria-selected] { - cursor: pointer; } - -.select2-container--open .select2-dropdown { - left: 0; } - -.select2-container--open .select2-dropdown--above { - border-bottom: none; - border-bottom-left-radius: 0; - border-bottom-right-radius: 0; } - -.select2-container--open .select2-dropdown--below { - border-top: none; - border-top-left-radius: 0; - border-top-right-radius: 0; } - -.select2-search--dropdown { - display: block; - padding: 4px; } - .select2-search--dropdown .select2-search__field { - padding: 4px; - width: 100%; - box-sizing: border-box; } - .select2-search--dropdown .select2-search__field::-webkit-search-cancel-button { - -webkit-appearance: none; } - .select2-search--dropdown.select2-search--hide { - display: none; } - -.select2-close-mask { - border: 0; - margin: 0; - padding: 0; - display: block; - position: fixed; - left: 0; - top: 0; - min-height: 100%; - min-width: 100%; - height: auto; - width: auto; - opacity: 0; - z-index: 99; - background-color: #fff; - filter: alpha(opacity=0); } - -.select2-hidden-accessible { - border: 0 !important; - clip: rect(0 0 0 0) !important; - -webkit-clip-path: inset(50%) !important; - clip-path: inset(50%) !important; - height: 1px !important; - overflow: hidden !important; - padding: 0 !important; - position: absolute !important; - width: 1px !important; - white-space: nowrap !important; } - -.select2-container--default .select2-selection--single { - background-color: #fff; - border: 1px solid #aaa; - border-radius: 4px; } - .select2-container--default .select2-selection--single .select2-selection__rendered { - color: #444; - line-height: 28px; } - .select2-container--default .select2-selection--single .select2-selection__clear { - cursor: pointer; - float: right; - font-weight: bold; } - .select2-container--default .select2-selection--single .select2-selection__placeholder { - color: #999; } - .select2-container--default .select2-selection--single .select2-selection__arrow { - height: 26px; - position: absolute; - top: 1px; - right: 1px; - width: 20px; } - .select2-container--default .select2-selection--single .select2-selection__arrow b { - border-color: #888 transparent transparent transparent; - border-style: solid; - border-width: 5px 4px 0 4px; - height: 0; - left: 50%; - margin-left: -4px; - margin-top: -2px; - position: absolute; - top: 50%; - width: 0; } - -.select2-container--default[dir="rtl"] .select2-selection--single .select2-selection__clear { - float: left; } - -.select2-container--default[dir="rtl"] .select2-selection--single .select2-selection__arrow { - left: 1px; - right: auto; } - -.select2-container--default.select2-container--disabled .select2-selection--single { - background-color: #eee; - cursor: default; } - .select2-container--default.select2-container--disabled .select2-selection--single .select2-selection__clear { - display: none; } - -.select2-container--default.select2-container--open .select2-selection--single .select2-selection__arrow b { - border-color: transparent transparent #888 transparent; - border-width: 0 4px 5px 4px; } - -.select2-container--default .select2-selection--multiple { - background-color: white; - border: 1px solid #aaa; - border-radius: 4px; - cursor: text; } - .select2-container--default .select2-selection--multiple .select2-selection__rendered { - box-sizing: border-box; - list-style: none; - margin: 0; - padding: 0 5px; - width: 100%; } - .select2-container--default .select2-selection--multiple .select2-selection__rendered li { - list-style: none; } - .select2-container--default .select2-selection--multiple .select2-selection__clear { - cursor: pointer; - float: right; - font-weight: bold; - margin-top: 5px; - margin-right: 10px; - padding: 1px; } - .select2-container--default .select2-selection--multiple .select2-selection__choice { - background-color: #e4e4e4; - border: 1px solid #aaa; - border-radius: 4px; - cursor: default; - float: left; - margin-right: 5px; - margin-top: 5px; - padding: 0 5px; } - .select2-container--default .select2-selection--multiple .select2-selection__choice__remove { - color: #999; - cursor: pointer; - display: inline-block; - font-weight: bold; - margin-right: 2px; } - .select2-container--default .select2-selection--multiple .select2-selection__choice__remove:hover { - color: #333; } - -.select2-container--default[dir="rtl"] .select2-selection--multiple .select2-selection__choice, .select2-container--default[dir="rtl"] .select2-selection--multiple .select2-search--inline { - float: right; } - -.select2-container--default[dir="rtl"] .select2-selection--multiple .select2-selection__choice { - margin-left: 5px; - margin-right: auto; } - -.select2-container--default[dir="rtl"] .select2-selection--multiple .select2-selection__choice__remove { - margin-left: 2px; - margin-right: auto; } - -.select2-container--default.select2-container--focus .select2-selection--multiple { - border: solid black 1px; - outline: 0; } - -.select2-container--default.select2-container--disabled .select2-selection--multiple { - background-color: #eee; - cursor: default; } - -.select2-container--default.select2-container--disabled .select2-selection__choice__remove { - display: none; } - -.select2-container--default.select2-container--open.select2-container--above .select2-selection--single, .select2-container--default.select2-container--open.select2-container--above .select2-selection--multiple { - border-top-left-radius: 0; - border-top-right-radius: 0; } - -.select2-container--default.select2-container--open.select2-container--below .select2-selection--single, .select2-container--default.select2-container--open.select2-container--below .select2-selection--multiple { - border-bottom-left-radius: 0; - border-bottom-right-radius: 0; } - -.select2-container--default .select2-search--dropdown .select2-search__field { - border: 1px solid #aaa; } - -.select2-container--default .select2-search--inline .select2-search__field { - background: transparent; - border: none; - outline: 0; - box-shadow: none; - -webkit-appearance: textfield; } - -.select2-container--default .select2-results > .select2-results__options { - max-height: 200px; - overflow-y: auto; } - -.select2-container--default .select2-results__option[role=group] { - padding: 0; } - -.select2-container--default .select2-results__option[aria-disabled=true] { - color: #999; } - -.select2-container--default .select2-results__option[aria-selected=true] { - background-color: #ddd; } - -.select2-container--default .select2-results__option .select2-results__option { - padding-left: 1em; } - .select2-container--default .select2-results__option .select2-results__option .select2-results__group { - padding-left: 0; } - .select2-container--default .select2-results__option .select2-results__option .select2-results__option { - margin-left: -1em; - padding-left: 2em; } - .select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option { - margin-left: -2em; - padding-left: 3em; } - .select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option { - margin-left: -3em; - padding-left: 4em; } - .select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option { - margin-left: -4em; - padding-left: 5em; } - .select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option { - margin-left: -5em; - padding-left: 6em; } - -.select2-container--default .select2-results__option--highlighted[aria-selected] { - background-color: #5897fb; - color: white; } - -.select2-container--default .select2-results__group { - cursor: default; - display: block; - padding: 6px; } - -.select2-container--classic .select2-selection--single { - background-color: #f7f7f7; - border: 1px solid #aaa; - border-radius: 4px; - outline: 0; - background-image: -webkit-linear-gradient(top, white 50%, #eeeeee 100%); - background-image: -o-linear-gradient(top, white 50%, #eeeeee 100%); - background-image: linear-gradient(to bottom, white 50%, #eeeeee 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFFFFFFF', endColorstr='#FFEEEEEE', GradientType=0); } - .select2-container--classic .select2-selection--single:focus { - border: 1px solid #5897fb; } - .select2-container--classic .select2-selection--single .select2-selection__rendered { - color: #444; - line-height: 28px; } - .select2-container--classic .select2-selection--single .select2-selection__clear { - cursor: pointer; - float: right; - font-weight: bold; - margin-right: 10px; } - .select2-container--classic .select2-selection--single .select2-selection__placeholder { - color: #999; } - .select2-container--classic .select2-selection--single .select2-selection__arrow { - background-color: #ddd; - border: none; - border-left: 1px solid #aaa; - border-top-right-radius: 4px; - border-bottom-right-radius: 4px; - height: 26px; - position: absolute; - top: 1px; - right: 1px; - width: 20px; - background-image: -webkit-linear-gradient(top, #eeeeee 50%, #cccccc 100%); - background-image: -o-linear-gradient(top, #eeeeee 50%, #cccccc 100%); - background-image: linear-gradient(to bottom, #eeeeee 50%, #cccccc 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFEEEEEE', endColorstr='#FFCCCCCC', GradientType=0); } - .select2-container--classic .select2-selection--single .select2-selection__arrow b { - border-color: #888 transparent transparent transparent; - border-style: solid; - border-width: 5px 4px 0 4px; - height: 0; - left: 50%; - margin-left: -4px; - margin-top: -2px; - position: absolute; - top: 50%; - width: 0; } - -.select2-container--classic[dir="rtl"] .select2-selection--single .select2-selection__clear { - float: left; } - -.select2-container--classic[dir="rtl"] .select2-selection--single .select2-selection__arrow { - border: none; - border-right: 1px solid #aaa; - border-radius: 0; - border-top-left-radius: 4px; - border-bottom-left-radius: 4px; - left: 1px; - right: auto; } - -.select2-container--classic.select2-container--open .select2-selection--single { - border: 1px solid #5897fb; } - .select2-container--classic.select2-container--open .select2-selection--single .select2-selection__arrow { - background: transparent; - border: none; } - .select2-container--classic.select2-container--open .select2-selection--single .select2-selection__arrow b { - border-color: transparent transparent #888 transparent; - border-width: 0 4px 5px 4px; } - -.select2-container--classic.select2-container--open.select2-container--above .select2-selection--single { - border-top: none; - border-top-left-radius: 0; - border-top-right-radius: 0; - background-image: -webkit-linear-gradient(top, white 0%, #eeeeee 50%); - background-image: -o-linear-gradient(top, white 0%, #eeeeee 50%); - background-image: linear-gradient(to bottom, white 0%, #eeeeee 50%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFFFFFFF', endColorstr='#FFEEEEEE', GradientType=0); } - -.select2-container--classic.select2-container--open.select2-container--below .select2-selection--single { - border-bottom: none; - border-bottom-left-radius: 0; - border-bottom-right-radius: 0; - background-image: -webkit-linear-gradient(top, #eeeeee 50%, white 100%); - background-image: -o-linear-gradient(top, #eeeeee 50%, white 100%); - background-image: linear-gradient(to bottom, #eeeeee 50%, white 100%); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFEEEEEE', endColorstr='#FFFFFFFF', GradientType=0); } - -.select2-container--classic .select2-selection--multiple { - background-color: white; - border: 1px solid #aaa; - border-radius: 4px; - cursor: text; - outline: 0; } - .select2-container--classic .select2-selection--multiple:focus { - border: 1px solid #5897fb; } - .select2-container--classic .select2-selection--multiple .select2-selection__rendered { - list-style: none; - margin: 0; - padding: 0 5px; } - .select2-container--classic .select2-selection--multiple .select2-selection__clear { - display: none; } - .select2-container--classic .select2-selection--multiple .select2-selection__choice { - background-color: #e4e4e4; - border: 1px solid #aaa; - border-radius: 4px; - cursor: default; - float: left; - margin-right: 5px; - margin-top: 5px; - padding: 0 5px; } - .select2-container--classic .select2-selection--multiple .select2-selection__choice__remove { - color: #888; - cursor: pointer; - display: inline-block; - font-weight: bold; - margin-right: 2px; } - .select2-container--classic .select2-selection--multiple .select2-selection__choice__remove:hover { - color: #555; } - -.select2-container--classic[dir="rtl"] .select2-selection--multiple .select2-selection__choice { - float: right; - margin-left: 5px; - margin-right: auto; } - -.select2-container--classic[dir="rtl"] .select2-selection--multiple .select2-selection__choice__remove { - margin-left: 2px; - margin-right: auto; } - -.select2-container--classic.select2-container--open .select2-selection--multiple { - border: 1px solid #5897fb; } - -.select2-container--classic.select2-container--open.select2-container--above .select2-selection--multiple { - border-top: none; - border-top-left-radius: 0; - border-top-right-radius: 0; } - -.select2-container--classic.select2-container--open.select2-container--below .select2-selection--multiple { - border-bottom: none; - border-bottom-left-radius: 0; - border-bottom-right-radius: 0; } - -.select2-container--classic .select2-search--dropdown .select2-search__field { - border: 1px solid #aaa; - outline: 0; } - -.select2-container--classic .select2-search--inline .select2-search__field { - outline: 0; - box-shadow: none; } - -.select2-container--classic .select2-dropdown { - background-color: white; - border: 1px solid transparent; } - -.select2-container--classic .select2-dropdown--above { - border-bottom: none; } - -.select2-container--classic .select2-dropdown--below { - border-top: none; } - -.select2-container--classic .select2-results > .select2-results__options { - max-height: 200px; - overflow-y: auto; } - -.select2-container--classic .select2-results__option[role=group] { - padding: 0; } - -.select2-container--classic .select2-results__option[aria-disabled=true] { - color: grey; } - -.select2-container--classic .select2-results__option--highlighted[aria-selected] { - background-color: #3875d7; - color: white; } - -.select2-container--classic .select2-results__group { - cursor: default; - display: block; - padding: 6px; } - -.select2-container--classic.select2-container--open .select2-dropdown { - border-color: #5897fb; } diff --git a/application/src/static/admin/css/vendor/select2/select2.min.css b/application/src/static/admin/css/vendor/select2/select2.min.css deleted file mode 100644 index 7c18ad59d..000000000 --- a/application/src/static/admin/css/vendor/select2/select2.min.css +++ /dev/null @@ -1 +0,0 @@ -.select2-container{box-sizing:border-box;display:inline-block;margin:0;position:relative;vertical-align:middle}.select2-container .select2-selection--single{box-sizing:border-box;cursor:pointer;display:block;height:28px;user-select:none;-webkit-user-select:none}.select2-container .select2-selection--single .select2-selection__rendered{display:block;padding-left:8px;padding-right:20px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.select2-container .select2-selection--single .select2-selection__clear{position:relative}.select2-container[dir="rtl"] .select2-selection--single .select2-selection__rendered{padding-right:8px;padding-left:20px}.select2-container .select2-selection--multiple{box-sizing:border-box;cursor:pointer;display:block;min-height:32px;user-select:none;-webkit-user-select:none}.select2-container .select2-selection--multiple .select2-selection__rendered{display:inline-block;overflow:hidden;padding-left:8px;text-overflow:ellipsis;white-space:nowrap}.select2-container .select2-search--inline{float:left}.select2-container .select2-search--inline .select2-search__field{box-sizing:border-box;border:none;font-size:100%;margin-top:5px;padding:0}.select2-container .select2-search--inline .select2-search__field::-webkit-search-cancel-button{-webkit-appearance:none}.select2-dropdown{background-color:white;border:1px solid #aaa;border-radius:4px;box-sizing:border-box;display:block;position:absolute;left:-100000px;width:100%;z-index:1051}.select2-results{display:block}.select2-results__options{list-style:none;margin:0;padding:0}.select2-results__option{padding:6px;user-select:none;-webkit-user-select:none}.select2-results__option[aria-selected]{cursor:pointer}.select2-container--open .select2-dropdown{left:0}.select2-container--open .select2-dropdown--above{border-bottom:none;border-bottom-left-radius:0;border-bottom-right-radius:0}.select2-container--open .select2-dropdown--below{border-top:none;border-top-left-radius:0;border-top-right-radius:0}.select2-search--dropdown{display:block;padding:4px}.select2-search--dropdown .select2-search__field{padding:4px;width:100%;box-sizing:border-box}.select2-search--dropdown .select2-search__field::-webkit-search-cancel-button{-webkit-appearance:none}.select2-search--dropdown.select2-search--hide{display:none}.select2-close-mask{border:0;margin:0;padding:0;display:block;position:fixed;left:0;top:0;min-height:100%;min-width:100%;height:auto;width:auto;opacity:0;z-index:99;background-color:#fff;filter:alpha(opacity=0)}.select2-hidden-accessible{border:0 !important;clip:rect(0 0 0 0) !important;-webkit-clip-path:inset(50%) !important;clip-path:inset(50%) !important;height:1px !important;overflow:hidden !important;padding:0 !important;position:absolute !important;width:1px !important;white-space:nowrap !important}.select2-container--default .select2-selection--single{background-color:#fff;border:1px solid #aaa;border-radius:4px}.select2-container--default .select2-selection--single .select2-selection__rendered{color:#444;line-height:28px}.select2-container--default .select2-selection--single .select2-selection__clear{cursor:pointer;float:right;font-weight:bold}.select2-container--default .select2-selection--single .select2-selection__placeholder{color:#999}.select2-container--default .select2-selection--single .select2-selection__arrow{height:26px;position:absolute;top:1px;right:1px;width:20px}.select2-container--default .select2-selection--single .select2-selection__arrow b{border-color:#888 transparent transparent transparent;border-style:solid;border-width:5px 4px 0 4px;height:0;left:50%;margin-left:-4px;margin-top:-2px;position:absolute;top:50%;width:0}.select2-container--default[dir="rtl"] .select2-selection--single .select2-selection__clear{float:left}.select2-container--default[dir="rtl"] .select2-selection--single .select2-selection__arrow{left:1px;right:auto}.select2-container--default.select2-container--disabled .select2-selection--single{background-color:#eee;cursor:default}.select2-container--default.select2-container--disabled .select2-selection--single .select2-selection__clear{display:none}.select2-container--default.select2-container--open .select2-selection--single .select2-selection__arrow b{border-color:transparent transparent #888 transparent;border-width:0 4px 5px 4px}.select2-container--default .select2-selection--multiple{background-color:white;border:1px solid #aaa;border-radius:4px;cursor:text}.select2-container--default .select2-selection--multiple .select2-selection__rendered{box-sizing:border-box;list-style:none;margin:0;padding:0 5px;width:100%}.select2-container--default .select2-selection--multiple .select2-selection__rendered li{list-style:none}.select2-container--default .select2-selection--multiple .select2-selection__clear{cursor:pointer;float:right;font-weight:bold;margin-top:5px;margin-right:10px;padding:1px}.select2-container--default .select2-selection--multiple .select2-selection__choice{background-color:#e4e4e4;border:1px solid #aaa;border-radius:4px;cursor:default;float:left;margin-right:5px;margin-top:5px;padding:0 5px}.select2-container--default .select2-selection--multiple .select2-selection__choice__remove{color:#999;cursor:pointer;display:inline-block;font-weight:bold;margin-right:2px}.select2-container--default .select2-selection--multiple .select2-selection__choice__remove:hover{color:#333}.select2-container--default[dir="rtl"] .select2-selection--multiple .select2-selection__choice,.select2-container--default[dir="rtl"] .select2-selection--multiple .select2-search--inline{float:right}.select2-container--default[dir="rtl"] .select2-selection--multiple .select2-selection__choice{margin-left:5px;margin-right:auto}.select2-container--default[dir="rtl"] .select2-selection--multiple .select2-selection__choice__remove{margin-left:2px;margin-right:auto}.select2-container--default.select2-container--focus .select2-selection--multiple{border:solid black 1px;outline:0}.select2-container--default.select2-container--disabled .select2-selection--multiple{background-color:#eee;cursor:default}.select2-container--default.select2-container--disabled .select2-selection__choice__remove{display:none}.select2-container--default.select2-container--open.select2-container--above .select2-selection--single,.select2-container--default.select2-container--open.select2-container--above .select2-selection--multiple{border-top-left-radius:0;border-top-right-radius:0}.select2-container--default.select2-container--open.select2-container--below .select2-selection--single,.select2-container--default.select2-container--open.select2-container--below .select2-selection--multiple{border-bottom-left-radius:0;border-bottom-right-radius:0}.select2-container--default .select2-search--dropdown .select2-search__field{border:1px solid #aaa}.select2-container--default .select2-search--inline .select2-search__field{background:transparent;border:none;outline:0;box-shadow:none;-webkit-appearance:textfield}.select2-container--default .select2-results>.select2-results__options{max-height:200px;overflow-y:auto}.select2-container--default .select2-results__option[role=group]{padding:0}.select2-container--default .select2-results__option[aria-disabled=true]{color:#999}.select2-container--default .select2-results__option[aria-selected=true]{background-color:#ddd}.select2-container--default .select2-results__option .select2-results__option{padding-left:1em}.select2-container--default .select2-results__option .select2-results__option .select2-results__group{padding-left:0}.select2-container--default .select2-results__option .select2-results__option .select2-results__option{margin-left:-1em;padding-left:2em}.select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option{margin-left:-2em;padding-left:3em}.select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option{margin-left:-3em;padding-left:4em}.select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option{margin-left:-4em;padding-left:5em}.select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option{margin-left:-5em;padding-left:6em}.select2-container--default .select2-results__option--highlighted[aria-selected]{background-color:#5897fb;color:white}.select2-container--default .select2-results__group{cursor:default;display:block;padding:6px}.select2-container--classic .select2-selection--single{background-color:#f7f7f7;border:1px solid #aaa;border-radius:4px;outline:0;background-image:-webkit-linear-gradient(top, #fff 50%, #eee 100%);background-image:-o-linear-gradient(top, #fff 50%, #eee 100%);background-image:linear-gradient(to bottom, #fff 50%, #eee 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFFFFFFF', endColorstr='#FFEEEEEE', GradientType=0)}.select2-container--classic .select2-selection--single:focus{border:1px solid #5897fb}.select2-container--classic .select2-selection--single .select2-selection__rendered{color:#444;line-height:28px}.select2-container--classic .select2-selection--single .select2-selection__clear{cursor:pointer;float:right;font-weight:bold;margin-right:10px}.select2-container--classic .select2-selection--single .select2-selection__placeholder{color:#999}.select2-container--classic .select2-selection--single .select2-selection__arrow{background-color:#ddd;border:none;border-left:1px solid #aaa;border-top-right-radius:4px;border-bottom-right-radius:4px;height:26px;position:absolute;top:1px;right:1px;width:20px;background-image:-webkit-linear-gradient(top, #eee 50%, #ccc 100%);background-image:-o-linear-gradient(top, #eee 50%, #ccc 100%);background-image:linear-gradient(to bottom, #eee 50%, #ccc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFEEEEEE', endColorstr='#FFCCCCCC', GradientType=0)}.select2-container--classic .select2-selection--single .select2-selection__arrow b{border-color:#888 transparent transparent transparent;border-style:solid;border-width:5px 4px 0 4px;height:0;left:50%;margin-left:-4px;margin-top:-2px;position:absolute;top:50%;width:0}.select2-container--classic[dir="rtl"] .select2-selection--single .select2-selection__clear{float:left}.select2-container--classic[dir="rtl"] .select2-selection--single .select2-selection__arrow{border:none;border-right:1px solid #aaa;border-radius:0;border-top-left-radius:4px;border-bottom-left-radius:4px;left:1px;right:auto}.select2-container--classic.select2-container--open .select2-selection--single{border:1px solid #5897fb}.select2-container--classic.select2-container--open .select2-selection--single .select2-selection__arrow{background:transparent;border:none}.select2-container--classic.select2-container--open .select2-selection--single .select2-selection__arrow b{border-color:transparent transparent #888 transparent;border-width:0 4px 5px 4px}.select2-container--classic.select2-container--open.select2-container--above .select2-selection--single{border-top:none;border-top-left-radius:0;border-top-right-radius:0;background-image:-webkit-linear-gradient(top, #fff 0%, #eee 50%);background-image:-o-linear-gradient(top, #fff 0%, #eee 50%);background-image:linear-gradient(to bottom, #fff 0%, #eee 50%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFFFFFFF', endColorstr='#FFEEEEEE', GradientType=0)}.select2-container--classic.select2-container--open.select2-container--below .select2-selection--single{border-bottom:none;border-bottom-left-radius:0;border-bottom-right-radius:0;background-image:-webkit-linear-gradient(top, #eee 50%, #fff 100%);background-image:-o-linear-gradient(top, #eee 50%, #fff 100%);background-image:linear-gradient(to bottom, #eee 50%, #fff 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFEEEEEE', endColorstr='#FFFFFFFF', GradientType=0)}.select2-container--classic .select2-selection--multiple{background-color:white;border:1px solid #aaa;border-radius:4px;cursor:text;outline:0}.select2-container--classic .select2-selection--multiple:focus{border:1px solid #5897fb}.select2-container--classic .select2-selection--multiple .select2-selection__rendered{list-style:none;margin:0;padding:0 5px}.select2-container--classic .select2-selection--multiple .select2-selection__clear{display:none}.select2-container--classic .select2-selection--multiple .select2-selection__choice{background-color:#e4e4e4;border:1px solid #aaa;border-radius:4px;cursor:default;float:left;margin-right:5px;margin-top:5px;padding:0 5px}.select2-container--classic .select2-selection--multiple .select2-selection__choice__remove{color:#888;cursor:pointer;display:inline-block;font-weight:bold;margin-right:2px}.select2-container--classic .select2-selection--multiple .select2-selection__choice__remove:hover{color:#555}.select2-container--classic[dir="rtl"] .select2-selection--multiple .select2-selection__choice{float:right;margin-left:5px;margin-right:auto}.select2-container--classic[dir="rtl"] .select2-selection--multiple .select2-selection__choice__remove{margin-left:2px;margin-right:auto}.select2-container--classic.select2-container--open .select2-selection--multiple{border:1px solid #5897fb}.select2-container--classic.select2-container--open.select2-container--above .select2-selection--multiple{border-top:none;border-top-left-radius:0;border-top-right-radius:0}.select2-container--classic.select2-container--open.select2-container--below .select2-selection--multiple{border-bottom:none;border-bottom-left-radius:0;border-bottom-right-radius:0}.select2-container--classic .select2-search--dropdown .select2-search__field{border:1px solid #aaa;outline:0}.select2-container--classic .select2-search--inline .select2-search__field{outline:0;box-shadow:none}.select2-container--classic .select2-dropdown{background-color:#fff;border:1px solid transparent}.select2-container--classic .select2-dropdown--above{border-bottom:none}.select2-container--classic .select2-dropdown--below{border-top:none}.select2-container--classic .select2-results>.select2-results__options{max-height:200px;overflow-y:auto}.select2-container--classic .select2-results__option[role=group]{padding:0}.select2-container--classic .select2-results__option[aria-disabled=true]{color:grey}.select2-container--classic .select2-results__option--highlighted[aria-selected]{background-color:#3875d7;color:#fff}.select2-container--classic .select2-results__group{cursor:default;display:block;padding:6px}.select2-container--classic.select2-container--open .select2-dropdown{border-color:#5897fb} diff --git a/application/src/static/admin/css/widgets.css b/application/src/static/admin/css/widgets.css deleted file mode 100644 index 14ef12db9..000000000 --- a/application/src/static/admin/css/widgets.css +++ /dev/null @@ -1,574 +0,0 @@ -/* SELECTOR (FILTER INTERFACE) */ - -.selector { - width: 800px; - float: left; -} - -.selector select { - width: 380px; - height: 17.2em; -} - -.selector-available, .selector-chosen { - float: left; - width: 380px; - text-align: center; - margin-bottom: 5px; -} - -.selector-chosen select { - border-top: none; -} - -.selector-available h2, .selector-chosen h2 { - border: 1px solid #ccc; - border-radius: 4px 4px 0 0; -} - -.selector-chosen h2 { - background: #79aec8; - color: #fff; -} - -.selector .selector-available h2 { - background: #f8f8f8; - color: #666; -} - -.selector .selector-filter { - background: white; - border: 1px solid #ccc; - border-width: 0 1px; - padding: 8px; - color: #999; - font-size: 10px; - margin: 0; - text-align: left; -} - -.selector .selector-filter label, -.inline-group .aligned .selector .selector-filter label { - float: left; - margin: 7px 0 0; - width: 18px; - height: 18px; - padding: 0; - overflow: hidden; - line-height: 1; -} - -.selector .selector-available input { - width: 320px; - margin-left: 8px; -} - -.selector ul.selector-chooser { - float: left; - width: 22px; - background-color: #eee; - border-radius: 10px; - margin: 10em 5px 0 5px; - padding: 0; -} - -.selector-chooser li { - margin: 0; - padding: 3px; - list-style-type: none; -} - -.selector select { - padding: 0 10px; - margin: 0 0 10px; - border-radius: 0 0 4px 4px; -} - -.selector-add, .selector-remove { - width: 16px; - height: 16px; - display: block; - text-indent: -3000px; - overflow: hidden; - cursor: default; - opacity: 0.3; -} - -.active.selector-add, .active.selector-remove { - opacity: 1; -} - -.active.selector-add:hover, .active.selector-remove:hover { - cursor: pointer; -} - -.selector-add { - background: url(../img/selector-icons.svg) 0 -96px no-repeat; -} - -.active.selector-add:focus, .active.selector-add:hover { - background-position: 0 -112px; -} - -.selector-remove { - background: url(../img/selector-icons.svg) 0 -64px no-repeat; -} - -.active.selector-remove:focus, .active.selector-remove:hover { - background-position: 0 -80px; -} - -a.selector-chooseall, a.selector-clearall { - display: inline-block; - height: 16px; - text-align: left; - margin: 1px auto 3px; - overflow: hidden; - font-weight: bold; - line-height: 16px; - color: #666; - text-decoration: none; - opacity: 0.3; -} - -a.active.selector-chooseall:focus, a.active.selector-clearall:focus, -a.active.selector-chooseall:hover, a.active.selector-clearall:hover { - color: #447e9b; -} - -a.active.selector-chooseall, a.active.selector-clearall { - opacity: 1; -} - -a.active.selector-chooseall:hover, a.active.selector-clearall:hover { - cursor: pointer; -} - -a.selector-chooseall { - padding: 0 18px 0 0; - background: url(../img/selector-icons.svg) right -160px no-repeat; - cursor: default; -} - -a.active.selector-chooseall:focus, a.active.selector-chooseall:hover { - background-position: 100% -176px; -} - -a.selector-clearall { - padding: 0 0 0 18px; - background: url(../img/selector-icons.svg) 0 -128px no-repeat; - cursor: default; -} - -a.active.selector-clearall:focus, a.active.selector-clearall:hover { - background-position: 0 -144px; -} - -/* STACKED SELECTORS */ - -.stacked { - float: left; - width: 490px; -} - -.stacked select { - width: 480px; - height: 10.1em; -} - -.stacked .selector-available, .stacked .selector-chosen { - width: 480px; -} - -.stacked .selector-available { - margin-bottom: 0; -} - -.stacked .selector-available input { - width: 422px; -} - -.stacked ul.selector-chooser { - height: 22px; - width: 50px; - margin: 0 0 10px 40%; - background-color: #eee; - border-radius: 10px; -} - -.stacked .selector-chooser li { - float: left; - padding: 3px 3px 3px 5px; -} - -.stacked .selector-chooseall, .stacked .selector-clearall { - display: none; -} - -.stacked .selector-add { - background: url(../img/selector-icons.svg) 0 -32px no-repeat; - cursor: default; -} - -.stacked .active.selector-add { - background-position: 0 -32px; - cursor: pointer; -} - -.stacked .active.selector-add:focus, .stacked .active.selector-add:hover { - background-position: 0 -48px; - cursor: pointer; -} - -.stacked .selector-remove { - background: url(../img/selector-icons.svg) 0 0 no-repeat; - cursor: default; -} - -.stacked .active.selector-remove { - background-position: 0 0px; - cursor: pointer; -} - -.stacked .active.selector-remove:focus, .stacked .active.selector-remove:hover { - background-position: 0 -16px; - cursor: pointer; -} - -.selector .help-icon { - background: url(../img/icon-unknown.svg) 0 0 no-repeat; - display: inline-block; - vertical-align: middle; - margin: -2px 0 0 2px; - width: 13px; - height: 13px; -} - -.selector .selector-chosen .help-icon { - background: url(../img/icon-unknown-alt.svg) 0 0 no-repeat; -} - -.selector .search-label-icon { - background: url(../img/search.svg) 0 0 no-repeat; - display: inline-block; - height: 18px; - width: 18px; -} - -/* DATE AND TIME */ - -p.datetime { - line-height: 20px; - margin: 0; - padding: 0; - color: #666; - font-weight: bold; -} - -.datetime span { - white-space: nowrap; - font-weight: normal; - font-size: 11px; - color: #ccc; -} - -.datetime input, .form-row .datetime input.vDateField, .form-row .datetime input.vTimeField { - margin-left: 5px; - margin-bottom: 4px; -} - -table p.datetime { - font-size: 11px; - margin-left: 0; - padding-left: 0; -} - -.datetimeshortcuts .clock-icon, .datetimeshortcuts .date-icon { - position: relative; - display: inline-block; - vertical-align: middle; - height: 16px; - width: 16px; - overflow: hidden; -} - -.datetimeshortcuts .clock-icon { - background: url(../img/icon-clock.svg) 0 0 no-repeat; -} - -.datetimeshortcuts a:focus .clock-icon, -.datetimeshortcuts a:hover .clock-icon { - background-position: 0 -16px; -} - -.datetimeshortcuts .date-icon { - background: url(../img/icon-calendar.svg) 0 0 no-repeat; - top: -1px; -} - -.datetimeshortcuts a:focus .date-icon, -.datetimeshortcuts a:hover .date-icon { - background-position: 0 -16px; -} - -.timezonewarning { - font-size: 11px; - color: #999; -} - -/* URL */ - -p.url { - line-height: 20px; - margin: 0; - padding: 0; - color: #666; - font-size: 11px; - font-weight: bold; -} - -.url a { - font-weight: normal; -} - -/* FILE UPLOADS */ - -p.file-upload { - line-height: 20px; - margin: 0; - padding: 0; - color: #666; - font-size: 11px; - font-weight: bold; -} - -.aligned p.file-upload { - margin-left: 170px; -} - -.file-upload a { - font-weight: normal; -} - -.file-upload .deletelink { - margin-left: 5px; -} - -span.clearable-file-input label { - color: #333; - font-size: 11px; - display: inline; - float: none; -} - -/* CALENDARS & CLOCKS */ - -.calendarbox, .clockbox { - margin: 5px auto; - font-size: 12px; - width: 19em; - text-align: center; - background: white; - border: 1px solid #ddd; - border-radius: 4px; - box-shadow: 0 2px 4px rgba(0, 0, 0, 0.15); - overflow: hidden; - position: relative; -} - -.clockbox { - width: auto; -} - -.calendar { - margin: 0; - padding: 0; -} - -.calendar table { - margin: 0; - padding: 0; - border-collapse: collapse; - background: white; - width: 100%; -} - -.calendar caption, .calendarbox h2 { - margin: 0; - text-align: center; - border-top: none; - background: #f5dd5d; - font-weight: 700; - font-size: 12px; - color: #333; -} - -.calendar th { - padding: 8px 5px; - background: #f8f8f8; - border-bottom: 1px solid #ddd; - font-weight: 400; - font-size: 12px; - text-align: center; - color: #666; -} - -.calendar td { - font-weight: 400; - font-size: 12px; - text-align: center; - padding: 0; - border-top: 1px solid #eee; - border-bottom: none; -} - -.calendar td.selected a { - background: #79aec8; - color: #fff; -} - -.calendar td.nonday { - background: #f8f8f8; -} - -.calendar td.today a { - font-weight: 700; -} - -.calendar td a, .timelist a { - display: block; - font-weight: 400; - padding: 6px; - text-decoration: none; - color: #444; -} - -.calendar td a:focus, .timelist a:focus, -.calendar td a:hover, .timelist a:hover { - background: #79aec8; - color: white; -} - -.calendar td a:active, .timelist a:active { - background: #417690; - color: white; -} - -.calendarnav { - font-size: 10px; - text-align: center; - color: #ccc; - margin: 0; - padding: 1px 3px; -} - -.calendarnav a:link, #calendarnav a:visited, -#calendarnav a:focus, #calendarnav a:hover { - color: #999; -} - -.calendar-shortcuts { - background: white; - font-size: 11px; - line-height: 11px; - border-top: 1px solid #eee; - padding: 8px 0; - color: #ccc; -} - -.calendarbox .calendarnav-previous, .calendarbox .calendarnav-next { - display: block; - position: absolute; - top: 8px; - width: 15px; - height: 15px; - text-indent: -9999px; - padding: 0; -} - -.calendarnav-previous { - left: 10px; - background: url(../img/calendar-icons.svg) 0 0 no-repeat; -} - -.calendarbox .calendarnav-previous:focus, -.calendarbox .calendarnav-previous:hover { - background-position: 0 -15px; -} - -.calendarnav-next { - right: 10px; - background: url(../img/calendar-icons.svg) 0 -30px no-repeat; -} - -.calendarbox .calendarnav-next:focus, -.calendarbox .calendarnav-next:hover { - background-position: 0 -45px; -} - -.calendar-cancel { - margin: 0; - padding: 4px 0; - font-size: 12px; - background: #eee; - border-top: 1px solid #ddd; - color: #333; -} - -.calendar-cancel:focus, .calendar-cancel:hover { - background: #ddd; -} - -.calendar-cancel a { - color: black; - display: block; -} - -ul.timelist, .timelist li { - list-style-type: none; - margin: 0; - padding: 0; -} - -.timelist a { - padding: 2px; -} - -/* EDIT INLINE */ - -.inline-deletelink { - float: right; - text-indent: -9999px; - background: url(../img/inline-delete.svg) 0 0 no-repeat; - width: 16px; - height: 16px; - border: 0px none; -} - -.inline-deletelink:focus, .inline-deletelink:hover { - cursor: pointer; -} - -/* RELATED WIDGET WRAPPER */ -.related-widget-wrapper { - float: left; /* display properly in form rows with multiple fields */ - overflow: hidden; /* clear floated contents */ -} - -.related-widget-wrapper-link { - opacity: 0.3; -} - -.related-widget-wrapper-link:link { - opacity: .8; -} - -.related-widget-wrapper-link:link:focus, -.related-widget-wrapper-link:link:hover { - opacity: 1; -} - -select + .related-widget-wrapper-link, -.related-widget-wrapper-link + .related-widget-wrapper-link { - margin-left: 7px; -} diff --git a/application/src/static/admin/fonts/LICENSE.txt b/application/src/static/admin/fonts/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/application/src/static/admin/fonts/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/application/src/static/admin/fonts/README.txt b/application/src/static/admin/fonts/README.txt deleted file mode 100644 index b247bef33..000000000 --- a/application/src/static/admin/fonts/README.txt +++ /dev/null @@ -1,3 +0,0 @@ -Roboto webfont source: https://www.google.com/fonts/specimen/Roboto -WOFF files extracted using https://github.com/majodev/google-webfonts-helper -Weights used in this project: Light (300), Regular (400), Bold (700) diff --git a/application/src/static/admin/fonts/Roboto-Bold-webfont.woff b/application/src/static/admin/fonts/Roboto-Bold-webfont.woff deleted file mode 100644 index 6e0f56267..000000000 Binary files a/application/src/static/admin/fonts/Roboto-Bold-webfont.woff and /dev/null differ diff --git a/application/src/static/admin/fonts/Roboto-Light-webfont.woff b/application/src/static/admin/fonts/Roboto-Light-webfont.woff deleted file mode 100644 index b9e99185c..000000000 Binary files a/application/src/static/admin/fonts/Roboto-Light-webfont.woff and /dev/null differ diff --git a/application/src/static/admin/fonts/Roboto-Regular-webfont.woff b/application/src/static/admin/fonts/Roboto-Regular-webfont.woff deleted file mode 100644 index 96c1986f0..000000000 Binary files a/application/src/static/admin/fonts/Roboto-Regular-webfont.woff and /dev/null differ diff --git a/application/src/static/admin/img/LICENSE b/application/src/static/admin/img/LICENSE deleted file mode 100644 index a4faaa1df..000000000 --- a/application/src/static/admin/img/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Code Charm Ltd - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/application/src/static/admin/img/README.txt b/application/src/static/admin/img/README.txt deleted file mode 100644 index 4eb2e492a..000000000 --- a/application/src/static/admin/img/README.txt +++ /dev/null @@ -1,7 +0,0 @@ -All icons are taken from Font Awesome (http://fontawesome.io/) project. -The Font Awesome font is licensed under the SIL OFL 1.1: -- https://scripts.sil.org/OFL - -SVG icons source: https://github.com/encharm/Font-Awesome-SVG-PNG -Font-Awesome-SVG-PNG is licensed under the MIT license (see file license -in current folder). diff --git a/application/src/static/admin/img/calendar-icons.svg b/application/src/static/admin/img/calendar-icons.svg deleted file mode 100644 index dbf21c39d..000000000 --- a/application/src/static/admin/img/calendar-icons.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - - - - - - - diff --git a/application/src/static/admin/img/gis/move_vertex_off.svg b/application/src/static/admin/img/gis/move_vertex_off.svg deleted file mode 100644 index 228854f3b..000000000 --- a/application/src/static/admin/img/gis/move_vertex_off.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/application/src/static/admin/img/gis/move_vertex_on.svg b/application/src/static/admin/img/gis/move_vertex_on.svg deleted file mode 100644 index 96b87fdd7..000000000 --- a/application/src/static/admin/img/gis/move_vertex_on.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/application/src/static/admin/img/icon-addlink.svg b/application/src/static/admin/img/icon-addlink.svg deleted file mode 100644 index e004fb162..000000000 --- a/application/src/static/admin/img/icon-addlink.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/application/src/static/admin/img/icon-alert.svg b/application/src/static/admin/img/icon-alert.svg deleted file mode 100644 index e51ea83f5..000000000 --- a/application/src/static/admin/img/icon-alert.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/application/src/static/admin/img/icon-calendar.svg b/application/src/static/admin/img/icon-calendar.svg deleted file mode 100644 index 97910a994..000000000 --- a/application/src/static/admin/img/icon-calendar.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - diff --git a/application/src/static/admin/img/icon-changelink.svg b/application/src/static/admin/img/icon-changelink.svg deleted file mode 100644 index bbb137aa0..000000000 --- a/application/src/static/admin/img/icon-changelink.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/application/src/static/admin/img/icon-clock.svg b/application/src/static/admin/img/icon-clock.svg deleted file mode 100644 index bf9985d3f..000000000 --- a/application/src/static/admin/img/icon-clock.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - diff --git a/application/src/static/admin/img/icon-deletelink.svg b/application/src/static/admin/img/icon-deletelink.svg deleted file mode 100644 index 4059b1554..000000000 --- a/application/src/static/admin/img/icon-deletelink.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/application/src/static/admin/img/icon-no.svg b/application/src/static/admin/img/icon-no.svg deleted file mode 100644 index 2e0d3832c..000000000 --- a/application/src/static/admin/img/icon-no.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/application/src/static/admin/img/icon-unknown-alt.svg b/application/src/static/admin/img/icon-unknown-alt.svg deleted file mode 100644 index 1c6b99fc0..000000000 --- a/application/src/static/admin/img/icon-unknown-alt.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/application/src/static/admin/img/icon-unknown.svg b/application/src/static/admin/img/icon-unknown.svg deleted file mode 100644 index 50b4f9727..000000000 --- a/application/src/static/admin/img/icon-unknown.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/application/src/static/admin/img/icon-viewlink.svg b/application/src/static/admin/img/icon-viewlink.svg deleted file mode 100644 index a1ca1d3f4..000000000 --- a/application/src/static/admin/img/icon-viewlink.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/application/src/static/admin/img/icon-yes.svg b/application/src/static/admin/img/icon-yes.svg deleted file mode 100644 index 5883d877e..000000000 --- a/application/src/static/admin/img/icon-yes.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/application/src/static/admin/img/inline-delete.svg b/application/src/static/admin/img/inline-delete.svg deleted file mode 100644 index 17d1ad67c..000000000 --- a/application/src/static/admin/img/inline-delete.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/application/src/static/admin/img/search.svg b/application/src/static/admin/img/search.svg deleted file mode 100644 index c8c69b2ac..000000000 --- a/application/src/static/admin/img/search.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/application/src/static/admin/img/selector-icons.svg b/application/src/static/admin/img/selector-icons.svg deleted file mode 100644 index 926b8e21b..000000000 --- a/application/src/static/admin/img/selector-icons.svg +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/application/src/static/admin/img/sorting-icons.svg b/application/src/static/admin/img/sorting-icons.svg deleted file mode 100644 index 7c31ec911..000000000 --- a/application/src/static/admin/img/sorting-icons.svg +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - - - - - - - - - - - - - - diff --git a/application/src/static/admin/img/tooltag-add.svg b/application/src/static/admin/img/tooltag-add.svg deleted file mode 100644 index 1ca64ae5b..000000000 --- a/application/src/static/admin/img/tooltag-add.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/application/src/static/admin/img/tooltag-arrowright.svg b/application/src/static/admin/img/tooltag-arrowright.svg deleted file mode 100644 index b664d6193..000000000 --- a/application/src/static/admin/img/tooltag-arrowright.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/application/src/static/admin/js/SelectBox.js b/application/src/static/admin/js/SelectBox.js deleted file mode 100644 index 1927b4cef..000000000 --- a/application/src/static/admin/js/SelectBox.js +++ /dev/null @@ -1,110 +0,0 @@ -'use strict'; -{ - const SelectBox = { - cache: {}, - init: function(id) { - const box = document.getElementById(id); - SelectBox.cache[id] = []; - const cache = SelectBox.cache[id]; - for (const node of box.options) { - cache.push({value: node.value, text: node.text, displayed: 1}); - } - }, - redisplay: function(id) { - // Repopulate HTML select box from cache - const box = document.getElementById(id); - box.innerHTML = ''; - for (const node of SelectBox.cache[id]) { - if (node.displayed) { - const new_option = new Option(node.text, node.value, false, false); - // Shows a tooltip when hovering over the option - new_option.title = node.text; - box.appendChild(new_option); - } - } - }, - filter: function(id, text) { - // Redisplay the HTML select box, displaying only the choices containing ALL - // the words in text. (It's an AND search.) - const tokens = text.toLowerCase().split(/\s+/); - for (const node of SelectBox.cache[id]) { - node.displayed = 1; - const node_text = node.text.toLowerCase(); - for (const token of tokens) { - if (node_text.indexOf(token) === -1) { - node.displayed = 0; - break; // Once the first token isn't found we're done - } - } - } - SelectBox.redisplay(id); - }, - delete_from_cache: function(id, value) { - let delete_index = null; - const cache = SelectBox.cache[id]; - for (const [i, node] of cache.entries()) { - if (node.value === value) { - delete_index = i; - break; - } - } - cache.splice(delete_index, 1); - }, - add_to_cache: function(id, option) { - SelectBox.cache[id].push({value: option.value, text: option.text, displayed: 1}); - }, - cache_contains: function(id, value) { - // Check if an item is contained in the cache - for (const node of SelectBox.cache[id]) { - if (node.value === value) { - return true; - } - } - return false; - }, - move: function(from, to) { - const from_box = document.getElementById(from); - for (const option of from_box.options) { - const option_value = option.value; - if (option.selected && SelectBox.cache_contains(from, option_value)) { - SelectBox.add_to_cache(to, {value: option_value, text: option.text, displayed: 1}); - SelectBox.delete_from_cache(from, option_value); - } - } - SelectBox.redisplay(from); - SelectBox.redisplay(to); - }, - move_all: function(from, to) { - const from_box = document.getElementById(from); - for (const option of from_box.options) { - const option_value = option.value; - if (SelectBox.cache_contains(from, option_value)) { - SelectBox.add_to_cache(to, {value: option_value, text: option.text, displayed: 1}); - SelectBox.delete_from_cache(from, option_value); - } - } - SelectBox.redisplay(from); - SelectBox.redisplay(to); - }, - sort: function(id) { - SelectBox.cache[id].sort(function(a, b) { - a = a.text.toLowerCase(); - b = b.text.toLowerCase(); - if (a > b) { - return 1; - } - if (a < b) { - return -1; - } - return 0; - } ); - }, - select_all: function(id) { - const box = document.getElementById(id); - for (const option of box.options) { - option.selected = true; - } - } - }; - window.SelectBox = SelectBox; -} diff --git a/application/src/static/admin/js/SelectFilter2.js b/application/src/static/admin/js/SelectFilter2.js deleted file mode 100644 index 6c709a08c..000000000 --- a/application/src/static/admin/js/SelectFilter2.js +++ /dev/null @@ -1,236 +0,0 @@ -/*global SelectBox, gettext, interpolate, quickElement, SelectFilter*/ -/* -SelectFilter2 - Turns a multiple-select box into a filter interface. - -Requires core.js and SelectBox.js. -*/ -'use strict'; -{ - window.SelectFilter = { - init: function(field_id, field_name, is_stacked) { - if (field_id.match(/__prefix__/)) { - // Don't initialize on empty forms. - return; - } - const from_box = document.getElementById(field_id); - from_box.id += '_from'; // change its ID - from_box.className = 'filtered'; - - for (const p of from_box.parentNode.getElementsByTagName('p')) { - if (p.classList.contains("info")) { - // Remove

, because it just gets in the way. - from_box.parentNode.removeChild(p); - } else if (p.classList.contains("help")) { - // Move help text up to the top so it isn't below the select - // boxes or wrapped off on the side to the right of the add - // button: - from_box.parentNode.insertBefore(p, from_box.parentNode.firstChild); - } - } - - //

or
- const selector_div = quickElement('div', from_box.parentNode); - selector_div.className = is_stacked ? 'selector stacked' : 'selector'; - - //
- const selector_available = quickElement('div', selector_div); - selector_available.className = 'selector-available'; - const title_available = quickElement('h2', selector_available, interpolate(gettext('Available %s') + ' ', [field_name])); - quickElement( - 'span', title_available, '', - 'class', 'help help-tooltip help-icon', - 'title', interpolate( - gettext( - 'This is the list of available %s. You may choose some by ' + - 'selecting them in the box below and then clicking the ' + - '"Choose" arrow between the two boxes.' - ), - [field_name] - ) - ); - - const filter_p = quickElement('p', selector_available, '', 'id', field_id + '_filter'); - filter_p.className = 'selector-filter'; - - const search_filter_label = quickElement('label', filter_p, '', 'for', field_id + '_input'); - - quickElement( - 'span', search_filter_label, '', - 'class', 'help-tooltip search-label-icon', - 'title', interpolate(gettext("Type into this box to filter down the list of available %s."), [field_name]) - ); - - filter_p.appendChild(document.createTextNode(' ')); - - const filter_input = quickElement('input', filter_p, '', 'type', 'text', 'placeholder', gettext("Filter")); - filter_input.id = field_id + '_input'; - - selector_available.appendChild(from_box); - const choose_all = quickElement('a', selector_available, gettext('Choose all'), 'title', interpolate(gettext('Click to choose all %s at once.'), [field_name]), 'href', '#', 'id', field_id + '_add_all_link'); - choose_all.className = 'selector-chooseall'; - - //
    - const selector_chooser = quickElement('ul', selector_div); - selector_chooser.className = 'selector-chooser'; - const add_link = quickElement('a', quickElement('li', selector_chooser), gettext('Choose'), 'title', gettext('Choose'), 'href', '#', 'id', field_id + '_add_link'); - add_link.className = 'selector-add'; - const remove_link = quickElement('a', quickElement('li', selector_chooser), gettext('Remove'), 'title', gettext('Remove'), 'href', '#', 'id', field_id + '_remove_link'); - remove_link.className = 'selector-remove'; - - //
    - const selector_chosen = quickElement('div', selector_div); - selector_chosen.className = 'selector-chosen'; - const title_chosen = quickElement('h2', selector_chosen, interpolate(gettext('Chosen %s') + ' ', [field_name])); - quickElement( - 'span', title_chosen, '', - 'class', 'help help-tooltip help-icon', - 'title', interpolate( - gettext( - 'This is the list of chosen %s. You may remove some by ' + - 'selecting them in the box below and then clicking the ' + - '"Remove" arrow between the two boxes.' - ), - [field_name] - ) - ); - - const to_box = quickElement('select', selector_chosen, '', 'id', field_id + '_to', 'multiple', '', 'size', from_box.size, 'name', from_box.name); - to_box.className = 'filtered'; - const clear_all = quickElement('a', selector_chosen, gettext('Remove all'), 'title', interpolate(gettext('Click to remove all chosen %s at once.'), [field_name]), 'href', '#', 'id', field_id + '_remove_all_link'); - clear_all.className = 'selector-clearall'; - - from_box.name = from_box.name + '_old'; - - // Set up the JavaScript event handlers for the select box filter interface - const move_selection = function(e, elem, move_func, from, to) { - if (elem.classList.contains('active')) { - move_func(from, to); - SelectFilter.refresh_icons(field_id); - } - e.preventDefault(); - }; - choose_all.addEventListener('click', function(e) { - move_selection(e, this, SelectBox.move_all, field_id + '_from', field_id + '_to'); - }); - add_link.addEventListener('click', function(e) { - move_selection(e, this, SelectBox.move, field_id + '_from', field_id + '_to'); - }); - remove_link.addEventListener('click', function(e) { - move_selection(e, this, SelectBox.move, field_id + '_to', field_id + '_from'); - }); - clear_all.addEventListener('click', function(e) { - move_selection(e, this, SelectBox.move_all, field_id + '_to', field_id + '_from'); - }); - filter_input.addEventListener('keypress', function(e) { - SelectFilter.filter_key_press(e, field_id); - }); - filter_input.addEventListener('keyup', function(e) { - SelectFilter.filter_key_up(e, field_id); - }); - filter_input.addEventListener('keydown', function(e) { - SelectFilter.filter_key_down(e, field_id); - }); - selector_div.addEventListener('change', function(e) { - if (e.target.tagName === 'SELECT') { - SelectFilter.refresh_icons(field_id); - } - }); - selector_div.addEventListener('dblclick', function(e) { - if (e.target.tagName === 'OPTION') { - if (e.target.closest('select').id === field_id + '_to') { - SelectBox.move(field_id + '_to', field_id + '_from'); - } else { - SelectBox.move(field_id + '_from', field_id + '_to'); - } - SelectFilter.refresh_icons(field_id); - } - }); - from_box.closest('form').addEventListener('submit', function() { - SelectBox.select_all(field_id + '_to'); - }); - SelectBox.init(field_id + '_from'); - SelectBox.init(field_id + '_to'); - // Move selected from_box options to to_box - SelectBox.move(field_id + '_from', field_id + '_to'); - - if (!is_stacked) { - // In horizontal mode, give the same height to the two boxes. - const j_from_box = document.getElementById(field_id + '_from'); - const j_to_box = document.getElementById(field_id + '_to'); - let height = filter_p.offsetHeight + j_from_box.offsetHeight; - - const j_to_box_style = window.getComputedStyle(j_to_box); - if (j_to_box_style.getPropertyValue('box-sizing') === 'border-box') { - // Add the padding and border to the final height. - height += parseInt(j_to_box_style.getPropertyValue('padding-top'), 10) - + parseInt(j_to_box_style.getPropertyValue('padding-bottom'), 10) - + parseInt(j_to_box_style.getPropertyValue('border-top-width'), 10) - + parseInt(j_to_box_style.getPropertyValue('border-bottom-width'), 10); - } - - j_to_box.style.height = height + 'px'; - } - - // Initial icon refresh - SelectFilter.refresh_icons(field_id); - }, - any_selected: function(field) { - // Temporarily add the required attribute and check validity. - field.required = true; - const any_selected = field.checkValidity(); - field.required = false; - return any_selected; - }, - refresh_icons: function(field_id) { - const from = document.getElementById(field_id + '_from'); - const to = document.getElementById(field_id + '_to'); - // Active if at least one item is selected - document.getElementById(field_id + '_add_link').classList.toggle('active', SelectFilter.any_selected(from)); - document.getElementById(field_id + '_remove_link').classList.toggle('active', SelectFilter.any_selected(to)); - // Active if the corresponding box isn't empty - document.getElementById(field_id + '_add_all_link').classList.toggle('active', from.querySelector('option')); - document.getElementById(field_id + '_remove_all_link').classList.toggle('active', to.querySelector('option')); - }, - filter_key_press: function(event, field_id) { - const from = document.getElementById(field_id + '_from'); - // don't submit form if user pressed Enter - if ((event.which && event.which === 13) || (event.keyCode && event.keyCode === 13)) { - from.selectedIndex = 0; - SelectBox.move(field_id + '_from', field_id + '_to'); - from.selectedIndex = 0; - event.preventDefault(); - } - }, - filter_key_up: function(event, field_id) { - const from = document.getElementById(field_id + '_from'); - const temp = from.selectedIndex; - SelectBox.filter(field_id + '_from', document.getElementById(field_id + '_input').value); - from.selectedIndex = temp; - }, - filter_key_down: function(event, field_id) { - const from = document.getElementById(field_id + '_from'); - // right arrow -- move across - if ((event.which && event.which === 39) || (event.keyCode && event.keyCode === 39)) { - const old_index = from.selectedIndex; - SelectBox.move(field_id + '_from', field_id + '_to'); - from.selectedIndex = (old_index === from.length) ? from.length - 1 : old_index; - return; - } - // down arrow -- wrap around - if ((event.which && event.which === 40) || (event.keyCode && event.keyCode === 40)) { - from.selectedIndex = (from.length === from.selectedIndex + 1) ? 0 : from.selectedIndex + 1; - } - // up arrow -- wrap around - if ((event.which && event.which === 38) || (event.keyCode && event.keyCode === 38)) { - from.selectedIndex = (from.selectedIndex === 0) ? from.length - 1 : from.selectedIndex - 1; - } - } - }; - - window.addEventListener('load', function(e) { - document.querySelectorAll('select.selectfilter, select.selectfilterstacked').forEach(function(el) { - const data = el.dataset; - SelectFilter.init(el.id, data.fieldName, parseInt(data.isStacked, 10)); - }); - }); -} diff --git a/application/src/static/admin/js/actions.js b/application/src/static/admin/js/actions.js deleted file mode 100644 index dae69920b..000000000 --- a/application/src/static/admin/js/actions.js +++ /dev/null @@ -1,154 +0,0 @@ -/*global gettext, interpolate, ngettext*/ -'use strict'; -{ - const $ = django.jQuery; - let lastChecked; - - $.fn.actions = function(opts) { - const options = $.extend({}, $.fn.actions.defaults, opts); - const actionCheckboxes = $(this); - let list_editable_changed = false; - const showQuestion = function() { - $(options.acrossClears).hide(); - $(options.acrossQuestions).show(); - $(options.allContainer).hide(); - }, - showClear = function() { - $(options.acrossClears).show(); - $(options.acrossQuestions).hide(); - $(options.actionContainer).toggleClass(options.selectedClass); - $(options.allContainer).show(); - $(options.counterContainer).hide(); - }, - reset = function() { - $(options.acrossClears).hide(); - $(options.acrossQuestions).hide(); - $(options.allContainer).hide(); - $(options.counterContainer).show(); - }, - clearAcross = function() { - reset(); - $(options.acrossInput).val(0); - $(options.actionContainer).removeClass(options.selectedClass); - }, - checker = function(checked) { - if (checked) { - showQuestion(); - } else { - reset(); - } - $(actionCheckboxes).prop("checked", checked) - .parent().parent().toggleClass(options.selectedClass, checked); - }, - updateCounter = function() { - const sel = $(actionCheckboxes).filter(":checked").length; - // data-actions-icnt is defined in the generated HTML - // and contains the total amount of objects in the queryset - const actions_icnt = $('.action-counter').data('actionsIcnt'); - $(options.counterContainer).html(interpolate( - ngettext('%(sel)s of %(cnt)s selected', '%(sel)s of %(cnt)s selected', sel), { - sel: sel, - cnt: actions_icnt - }, true)); - $(options.allToggle).prop("checked", function() { - let value; - if (sel === actionCheckboxes.length) { - value = true; - showQuestion(); - } else { - value = false; - clearAcross(); - } - return value; - }); - }; - // Show counter by default - $(options.counterContainer).show(); - // Check state of checkboxes and reinit state if needed - $(this).filter(":checked").each(function(i) { - $(this).parent().parent().toggleClass(options.selectedClass); - updateCounter(); - if ($(options.acrossInput).val() === 1) { - showClear(); - } - }); - $(options.allToggle).show().on('click', function() { - checker($(this).prop("checked")); - updateCounter(); - }); - $("a", options.acrossQuestions).on('click', function(event) { - event.preventDefault(); - $(options.acrossInput).val(1); - showClear(); - }); - $("a", options.acrossClears).on('click', function(event) { - event.preventDefault(); - $(options.allToggle).prop("checked", false); - clearAcross(); - checker(0); - updateCounter(); - }); - lastChecked = null; - $(actionCheckboxes).on('click', function(event) { - if (!event) { event = window.event; } - const target = event.target ? event.target : event.srcElement; - if (lastChecked && $.data(lastChecked) !== $.data(target) && event.shiftKey === true) { - let inrange = false; - $(lastChecked).prop("checked", target.checked) - .parent().parent().toggleClass(options.selectedClass, target.checked); - $(actionCheckboxes).each(function() { - if ($.data(this) === $.data(lastChecked) || $.data(this) === $.data(target)) { - inrange = (inrange) ? false : true; - } - if (inrange) { - $(this).prop("checked", target.checked) - .parent().parent().toggleClass(options.selectedClass, target.checked); - } - }); - } - $(target).parent().parent().toggleClass(options.selectedClass, target.checked); - lastChecked = target; - updateCounter(); - }); - $('form#changelist-form table#result_list tr').on('change', 'td:gt(0) :input', function() { - list_editable_changed = true; - }); - $('form#changelist-form button[name="index"]').on('click', function(event) { - if (list_editable_changed) { - return confirm(gettext("You have unsaved changes on individual editable fields. If you run an action, your unsaved changes will be lost.")); - } - }); - $('form#changelist-form input[name="_save"]').on('click', function(event) { - let action_changed = false; - $('select option:selected', options.actionContainer).each(function() { - if ($(this).val()) { - action_changed = true; - } - }); - if (action_changed) { - if (list_editable_changed) { - return confirm(gettext("You have selected an action, but you haven’t saved your changes to individual fields yet. Please click OK to save. You’ll need to re-run the action.")); - } else { - return confirm(gettext("You have selected an action, and you haven’t made any changes on individual fields. You’re probably looking for the Go button rather than the Save button.")); - } - } - }); - }; - /* Setup plugin defaults */ - $.fn.actions.defaults = { - actionContainer: "div.actions", - counterContainer: "span.action-counter", - allContainer: "div.actions span.all", - acrossInput: "div.actions input.select-across", - acrossQuestions: "div.actions span.question", - acrossClears: "div.actions span.clear", - allToggle: "#action-toggle", - selectedClass: "selected" - }; - $(document).ready(function() { - const $actionsEls = $('tr input.action-select'); - if ($actionsEls.length > 0) { - $actionsEls.actions(); - } - }); -} diff --git a/application/src/static/admin/js/actions.min.js b/application/src/static/admin/js/actions.min.js deleted file mode 100644 index 29fd0d8c2..000000000 --- a/application/src/static/admin/js/actions.min.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict';{const a=django.jQuery;let e;a.fn.actions=function(g){const b=a.extend({},a.fn.actions.defaults,g),f=a(this);let k=!1;const l=function(){a(b.acrossClears).hide();a(b.acrossQuestions).show();a(b.allContainer).hide()},m=function(){a(b.acrossClears).show();a(b.acrossQuestions).hide();a(b.actionContainer).toggleClass(b.selectedClass);a(b.allContainer).show();a(b.counterContainer).hide()},n=function(){a(b.acrossClears).hide();a(b.acrossQuestions).hide();a(b.allContainer).hide();a(b.counterContainer).show()}, -p=function(){n();a(b.acrossInput).val(0);a(b.actionContainer).removeClass(b.selectedClass)},q=function(c){c?l():n();a(f).prop("checked",c).parent().parent().toggleClass(b.selectedClass,c)},h=function(){const c=a(f).filter(":checked").length,d=a(".action-counter").data("actionsIcnt");a(b.counterContainer).html(interpolate(ngettext("%(sel)s of %(cnt)s selected","%(sel)s of %(cnt)s selected",c),{sel:c,cnt:d},!0));a(b.allToggle).prop("checked",function(){let a;c===f.length?(a=!0,l()):(a=!1,p());return a})}; -a(b.counterContainer).show();a(this).filter(":checked").each(function(c){a(this).parent().parent().toggleClass(b.selectedClass);h();1===a(b.acrossInput).val()&&m()});a(b.allToggle).show().on("click",function(){q(a(this).prop("checked"));h()});a("a",b.acrossQuestions).on("click",function(c){c.preventDefault();a(b.acrossInput).val(1);m()});a("a",b.acrossClears).on("click",function(c){c.preventDefault();a(b.allToggle).prop("checked",!1);p();q(0);h()});e=null;a(f).on("click",function(c){c||(c=window.event); -const d=c.target?c.target:c.srcElement;if(e&&a.data(e)!==a.data(d)&&!0===c.shiftKey){let c=!1;a(e).prop("checked",d.checked).parent().parent().toggleClass(b.selectedClass,d.checked);a(f).each(function(){if(a.data(this)===a.data(e)||a.data(this)===a.data(d))c=c?!1:!0;c&&a(this).prop("checked",d.checked).parent().parent().toggleClass(b.selectedClass,d.checked)})}a(d).parent().parent().toggleClass(b.selectedClass,d.checked);e=d;h()});a("form#changelist-form table#result_list tr").on("change","td:gt(0) :input", -function(){k=!0});a('form#changelist-form button[name="index"]').on("click",function(a){if(k)return confirm(gettext("You have unsaved changes on individual editable fields. If you run an action, your unsaved changes will be lost."))});a('form#changelist-form input[name="_save"]').on("click",function(c){let d=!1;a("select option:selected",b.actionContainer).each(function(){a(this).val()&&(d=!0)});if(d)return k?confirm(gettext("You have selected an action, but you haven\u2019t saved your changes to individual fields yet. Please click OK to save. You\u2019ll need to re-run the action.")): -confirm(gettext("You have selected an action, and you haven\u2019t made any changes on individual fields. You\u2019re probably looking for the Go button rather than the Save button."))})};a.fn.actions.defaults={actionContainer:"div.actions",counterContainer:"span.action-counter",allContainer:"div.actions span.all",acrossInput:"div.actions input.select-across",acrossQuestions:"div.actions span.question",acrossClears:"div.actions span.clear",allToggle:"#action-toggle",selectedClass:"selected"};a(document).ready(function(){const g= -a("tr input.action-select");0 -// -'use strict'; -{ - const DateTimeShortcuts = { - calendars: [], - calendarInputs: [], - clockInputs: [], - clockHours: { - default_: [ - [gettext_noop('Now'), -1], - [gettext_noop('Midnight'), 0], - [gettext_noop('6 a.m.'), 6], - [gettext_noop('Noon'), 12], - [gettext_noop('6 p.m.'), 18] - ] - }, - dismissClockFunc: [], - dismissCalendarFunc: [], - calendarDivName1: 'calendarbox', // name of calendar
    that gets toggled - calendarDivName2: 'calendarin', // name of
    that contains calendar - calendarLinkName: 'calendarlink', // name of the link that is used to toggle - clockDivName: 'clockbox', // name of clock
    that gets toggled - clockLinkName: 'clocklink', // name of the link that is used to toggle - shortCutsClass: 'datetimeshortcuts', // class of the clock and cal shortcuts - timezoneWarningClass: 'timezonewarning', // class of the warning for timezone mismatch - timezoneOffset: 0, - init: function() { - const body = document.getElementsByTagName('body')[0]; - const serverOffset = body.dataset.adminUtcOffset; - if (serverOffset) { - const localOffset = new Date().getTimezoneOffset() * -60; - DateTimeShortcuts.timezoneOffset = localOffset - serverOffset; - } - - for (const inp of document.getElementsByTagName('input')) { - if (inp.type === 'text' && inp.classList.contains('vTimeField')) { - DateTimeShortcuts.addClock(inp); - DateTimeShortcuts.addTimezoneWarning(inp); - } - else if (inp.type === 'text' && inp.classList.contains('vDateField')) { - DateTimeShortcuts.addCalendar(inp); - DateTimeShortcuts.addTimezoneWarning(inp); - } - } - }, - // Return the current time while accounting for the server timezone. - now: function() { - const body = document.getElementsByTagName('body')[0]; - const serverOffset = body.dataset.adminUtcOffset; - if (serverOffset) { - const localNow = new Date(); - const localOffset = localNow.getTimezoneOffset() * -60; - localNow.setTime(localNow.getTime() + 1000 * (serverOffset - localOffset)); - return localNow; - } else { - return new Date(); - } - }, - // Add a warning when the time zone in the browser and backend do not match. - addTimezoneWarning: function(inp) { - const warningClass = DateTimeShortcuts.timezoneWarningClass; - let timezoneOffset = DateTimeShortcuts.timezoneOffset / 3600; - - // Only warn if there is a time zone mismatch. - if (!timezoneOffset) { - return; - } - - // Check if warning is already there. - if (inp.parentNode.querySelectorAll('.' + warningClass).length) { - return; - } - - let message; - if (timezoneOffset > 0) { - message = ngettext( - 'Note: You are %s hour ahead of server time.', - 'Note: You are %s hours ahead of server time.', - timezoneOffset - ); - } - else { - timezoneOffset *= -1; - message = ngettext( - 'Note: You are %s hour behind server time.', - 'Note: You are %s hours behind server time.', - timezoneOffset - ); - } - message = interpolate(message, [timezoneOffset]); - - const warning = document.createElement('span'); - warning.className = warningClass; - warning.textContent = message; - inp.parentNode.appendChild(document.createElement('br')); - inp.parentNode.appendChild(warning); - }, - // Add clock widget to a given field - addClock: function(inp) { - const num = DateTimeShortcuts.clockInputs.length; - DateTimeShortcuts.clockInputs[num] = inp; - DateTimeShortcuts.dismissClockFunc[num] = function() { DateTimeShortcuts.dismissClock(num); return true; }; - - // Shortcut links (clock icon and "Now" link) - const shortcuts_span = document.createElement('span'); - shortcuts_span.className = DateTimeShortcuts.shortCutsClass; - inp.parentNode.insertBefore(shortcuts_span, inp.nextSibling); - const now_link = document.createElement('a'); - now_link.href = "#"; - now_link.textContent = gettext('Now'); - now_link.addEventListener('click', function(e) { - e.preventDefault(); - DateTimeShortcuts.handleClockQuicklink(num, -1); - }); - const clock_link = document.createElement('a'); - clock_link.href = '#'; - clock_link.id = DateTimeShortcuts.clockLinkName + num; - clock_link.addEventListener('click', function(e) { - e.preventDefault(); - // avoid triggering the document click handler to dismiss the clock - e.stopPropagation(); - DateTimeShortcuts.openClock(num); - }); - - quickElement( - 'span', clock_link, '', - 'class', 'clock-icon', - 'title', gettext('Choose a Time') - ); - shortcuts_span.appendChild(document.createTextNode('\u00A0')); - shortcuts_span.appendChild(now_link); - shortcuts_span.appendChild(document.createTextNode('\u00A0|\u00A0')); - shortcuts_span.appendChild(clock_link); - - // Create clock link div - // - // Markup looks like: - //
    - //

    Choose a time

    - // - //

    Cancel

    - //
    - - const clock_box = document.createElement('div'); - clock_box.style.display = 'none'; - clock_box.style.position = 'absolute'; - clock_box.className = 'clockbox module'; - clock_box.id = DateTimeShortcuts.clockDivName + num; - document.body.appendChild(clock_box); - clock_box.addEventListener('click', function(e) { e.stopPropagation(); }); - - quickElement('h2', clock_box, gettext('Choose a time')); - const time_list = quickElement('ul', clock_box); - time_list.className = 'timelist'; - // The list of choices can be overridden in JavaScript like this: - // DateTimeShortcuts.clockHours.name = [['3 a.m.', 3]]; - // where name is the name attribute of the . - const name = typeof DateTimeShortcuts.clockHours[inp.name] === 'undefined' ? 'default_' : inp.name; - DateTimeShortcuts.clockHours[name].forEach(function(element) { - const time_link = quickElement('a', quickElement('li', time_list), gettext(element[0]), 'href', '#'); - time_link.addEventListener('click', function(e) { - e.preventDefault(); - DateTimeShortcuts.handleClockQuicklink(num, element[1]); - }); - }); - - const cancel_p = quickElement('p', clock_box); - cancel_p.className = 'calendar-cancel'; - const cancel_link = quickElement('a', cancel_p, gettext('Cancel'), 'href', '#'); - cancel_link.addEventListener('click', function(e) { - e.preventDefault(); - DateTimeShortcuts.dismissClock(num); - }); - - document.addEventListener('keyup', function(event) { - if (event.which === 27) { - // ESC key closes popup - DateTimeShortcuts.dismissClock(num); - event.preventDefault(); - } - }); - }, - openClock: function(num) { - const clock_box = document.getElementById(DateTimeShortcuts.clockDivName + num); - const clock_link = document.getElementById(DateTimeShortcuts.clockLinkName + num); - - // Recalculate the clockbox position - // is it left-to-right or right-to-left layout ? - if (window.getComputedStyle(document.body).direction !== 'rtl') { - clock_box.style.left = findPosX(clock_link) + 17 + 'px'; - } - else { - // since style's width is in em, it'd be tough to calculate - // px value of it. let's use an estimated px for now - clock_box.style.left = findPosX(clock_link) - 110 + 'px'; - } - clock_box.style.top = Math.max(0, findPosY(clock_link) - 30) + 'px'; - - // Show the clock box - clock_box.style.display = 'block'; - document.addEventListener('click', DateTimeShortcuts.dismissClockFunc[num]); - }, - dismissClock: function(num) { - document.getElementById(DateTimeShortcuts.clockDivName + num).style.display = 'none'; - document.removeEventListener('click', DateTimeShortcuts.dismissClockFunc[num]); - }, - handleClockQuicklink: function(num, val) { - let d; - if (val === -1) { - d = DateTimeShortcuts.now(); - } - else { - d = new Date(1970, 1, 1, val, 0, 0, 0); - } - DateTimeShortcuts.clockInputs[num].value = d.strftime(get_format('TIME_INPUT_FORMATS')[0]); - DateTimeShortcuts.clockInputs[num].focus(); - DateTimeShortcuts.dismissClock(num); - }, - // Add calendar widget to a given field. - addCalendar: function(inp) { - const num = DateTimeShortcuts.calendars.length; - - DateTimeShortcuts.calendarInputs[num] = inp; - DateTimeShortcuts.dismissCalendarFunc[num] = function() { DateTimeShortcuts.dismissCalendar(num); return true; }; - - // Shortcut links (calendar icon and "Today" link) - const shortcuts_span = document.createElement('span'); - shortcuts_span.className = DateTimeShortcuts.shortCutsClass; - inp.parentNode.insertBefore(shortcuts_span, inp.nextSibling); - const today_link = document.createElement('a'); - today_link.href = '#'; - today_link.appendChild(document.createTextNode(gettext('Today'))); - today_link.addEventListener('click', function(e) { - e.preventDefault(); - DateTimeShortcuts.handleCalendarQuickLink(num, 0); - }); - const cal_link = document.createElement('a'); - cal_link.href = '#'; - cal_link.id = DateTimeShortcuts.calendarLinkName + num; - cal_link.addEventListener('click', function(e) { - e.preventDefault(); - // avoid triggering the document click handler to dismiss the calendar - e.stopPropagation(); - DateTimeShortcuts.openCalendar(num); - }); - quickElement( - 'span', cal_link, '', - 'class', 'date-icon', - 'title', gettext('Choose a Date') - ); - shortcuts_span.appendChild(document.createTextNode('\u00A0')); - shortcuts_span.appendChild(today_link); - shortcuts_span.appendChild(document.createTextNode('\u00A0|\u00A0')); - shortcuts_span.appendChild(cal_link); - - // Create calendarbox div. - // - // Markup looks like: - // - //
    - //

    - // - // February 2003 - //

    - //
    - // - //
    - //
    - // Yesterday | Today | Tomorrow - //
    - //

    Cancel

    - //
    - const cal_box = document.createElement('div'); - cal_box.style.display = 'none'; - cal_box.style.position = 'absolute'; - cal_box.className = 'calendarbox module'; - cal_box.id = DateTimeShortcuts.calendarDivName1 + num; - document.body.appendChild(cal_box); - cal_box.addEventListener('click', function(e) { e.stopPropagation(); }); - - // next-prev links - const cal_nav = quickElement('div', cal_box); - const cal_nav_prev = quickElement('a', cal_nav, '<', 'href', '#'); - cal_nav_prev.className = 'calendarnav-previous'; - cal_nav_prev.addEventListener('click', function(e) { - e.preventDefault(); - DateTimeShortcuts.drawPrev(num); - }); - - const cal_nav_next = quickElement('a', cal_nav, '>', 'href', '#'); - cal_nav_next.className = 'calendarnav-next'; - cal_nav_next.addEventListener('click', function(e) { - e.preventDefault(); - DateTimeShortcuts.drawNext(num); - }); - - // main box - const cal_main = quickElement('div', cal_box, '', 'id', DateTimeShortcuts.calendarDivName2 + num); - cal_main.className = 'calendar'; - DateTimeShortcuts.calendars[num] = new Calendar(DateTimeShortcuts.calendarDivName2 + num, DateTimeShortcuts.handleCalendarCallback(num)); - DateTimeShortcuts.calendars[num].drawCurrent(); - - // calendar shortcuts - const shortcuts = quickElement('div', cal_box); - shortcuts.className = 'calendar-shortcuts'; - let day_link = quickElement('a', shortcuts, gettext('Yesterday'), 'href', '#'); - day_link.addEventListener('click', function(e) { - e.preventDefault(); - DateTimeShortcuts.handleCalendarQuickLink(num, -1); - }); - shortcuts.appendChild(document.createTextNode('\u00A0|\u00A0')); - day_link = quickElement('a', shortcuts, gettext('Today'), 'href', '#'); - day_link.addEventListener('click', function(e) { - e.preventDefault(); - DateTimeShortcuts.handleCalendarQuickLink(num, 0); - }); - shortcuts.appendChild(document.createTextNode('\u00A0|\u00A0')); - day_link = quickElement('a', shortcuts, gettext('Tomorrow'), 'href', '#'); - day_link.addEventListener('click', function(e) { - e.preventDefault(); - DateTimeShortcuts.handleCalendarQuickLink(num, +1); - }); - - // cancel bar - const cancel_p = quickElement('p', cal_box); - cancel_p.className = 'calendar-cancel'; - const cancel_link = quickElement('a', cancel_p, gettext('Cancel'), 'href', '#'); - cancel_link.addEventListener('click', function(e) { - e.preventDefault(); - DateTimeShortcuts.dismissCalendar(num); - }); - document.addEventListener('keyup', function(event) { - if (event.which === 27) { - // ESC key closes popup - DateTimeShortcuts.dismissCalendar(num); - event.preventDefault(); - } - }); - }, - openCalendar: function(num) { - const cal_box = document.getElementById(DateTimeShortcuts.calendarDivName1 + num); - const cal_link = document.getElementById(DateTimeShortcuts.calendarLinkName + num); - const inp = DateTimeShortcuts.calendarInputs[num]; - - // Determine if the current value in the input has a valid date. - // If so, draw the calendar with that date's year and month. - if (inp.value) { - const format = get_format('DATE_INPUT_FORMATS')[0]; - const selected = inp.value.strptime(format); - const year = selected.getUTCFullYear(); - const month = selected.getUTCMonth() + 1; - const re = /\d{4}/; - if (re.test(year.toString()) && month >= 1 && month <= 12) { - DateTimeShortcuts.calendars[num].drawDate(month, year, selected); - } - } - - // Recalculate the clockbox position - // is it left-to-right or right-to-left layout ? - if (window.getComputedStyle(document.body).direction !== 'rtl') { - cal_box.style.left = findPosX(cal_link) + 17 + 'px'; - } - else { - // since style's width is in em, it'd be tough to calculate - // px value of it. let's use an estimated px for now - cal_box.style.left = findPosX(cal_link) - 180 + 'px'; - } - cal_box.style.top = Math.max(0, findPosY(cal_link) - 75) + 'px'; - - cal_box.style.display = 'block'; - document.addEventListener('click', DateTimeShortcuts.dismissCalendarFunc[num]); - }, - dismissCalendar: function(num) { - document.getElementById(DateTimeShortcuts.calendarDivName1 + num).style.display = 'none'; - document.removeEventListener('click', DateTimeShortcuts.dismissCalendarFunc[num]); - }, - drawPrev: function(num) { - DateTimeShortcuts.calendars[num].drawPreviousMonth(); - }, - drawNext: function(num) { - DateTimeShortcuts.calendars[num].drawNextMonth(); - }, - handleCalendarCallback: function(num) { - let format = get_format('DATE_INPUT_FORMATS')[0]; - // the format needs to be escaped a little - format = format.replace('\\', '\\\\') - .replace('\r', '\\r') - .replace('\n', '\\n') - .replace('\t', '\\t') - .replace("'", "\\'"); - return function(y, m, d) { - DateTimeShortcuts.calendarInputs[num].value = new Date(y, m - 1, d).strftime(format); - DateTimeShortcuts.calendarInputs[num].focus(); - document.getElementById(DateTimeShortcuts.calendarDivName1 + num).style.display = 'none'; - }; - }, - handleCalendarQuickLink: function(num, offset) { - const d = DateTimeShortcuts.now(); - d.setDate(d.getDate() + offset); - DateTimeShortcuts.calendarInputs[num].value = d.strftime(get_format('DATE_INPUT_FORMATS')[0]); - DateTimeShortcuts.calendarInputs[num].focus(); - DateTimeShortcuts.dismissCalendar(num); - } - }; - - window.addEventListener('load', DateTimeShortcuts.init); - window.DateTimeShortcuts = DateTimeShortcuts; -} diff --git a/application/src/static/admin/js/admin/RelatedObjectLookups.js b/application/src/static/admin/js/admin/RelatedObjectLookups.js deleted file mode 100644 index 8c95df7c1..000000000 --- a/application/src/static/admin/js/admin/RelatedObjectLookups.js +++ /dev/null @@ -1,159 +0,0 @@ -/*global SelectBox, interpolate*/ -// Handles related-objects functionality: lookup link for raw_id_fields -// and Add Another links. -'use strict'; -{ - const $ = django.jQuery; - - function showAdminPopup(triggeringLink, name_regexp, add_popup) { - const name = triggeringLink.id.replace(name_regexp, ''); - let href = triggeringLink.href; - if (add_popup) { - if (href.indexOf('?') === -1) { - href += '?_popup=1'; - } else { - href += '&_popup=1'; - } - } - const win = window.open(href, name, 'height=500,width=800,resizable=yes,scrollbars=yes'); - win.focus(); - return false; - } - - function showRelatedObjectLookupPopup(triggeringLink) { - return showAdminPopup(triggeringLink, /^lookup_/, true); - } - - function dismissRelatedLookupPopup(win, chosenId) { - const name = win.name; - const elem = document.getElementById(name); - if (elem.classList.contains('vManyToManyRawIdAdminField') && elem.value) { - elem.value += ',' + chosenId; - } else { - document.getElementById(name).value = chosenId; - } - win.close(); - } - - function showRelatedObjectPopup(triggeringLink) { - return showAdminPopup(triggeringLink, /^(change|add|delete)_/, false); - } - - function updateRelatedObjectLinks(triggeringLink) { - const $this = $(triggeringLink); - const siblings = $this.nextAll('.view-related, .change-related, .delete-related'); - if (!siblings.length) { - return; - } - const value = $this.val(); - if (value) { - siblings.each(function() { - const elm = $(this); - elm.attr('href', elm.attr('data-href-template').replace('__fk__', value)); - }); - } else { - siblings.removeAttr('href'); - } - } - - function dismissAddRelatedObjectPopup(win, newId, newRepr) { - const name = win.name; - const elem = document.getElementById(name); - if (elem) { - const elemName = elem.nodeName.toUpperCase(); - if (elemName === 'SELECT') { - elem.options[elem.options.length] = new Option(newRepr, newId, true, true); - } else if (elemName === 'INPUT') { - if (elem.classList.contains('vManyToManyRawIdAdminField') && elem.value) { - elem.value += ',' + newId; - } else { - elem.value = newId; - } - } - // Trigger a change event to update related links if required. - $(elem).trigger('change'); - } else { - const toId = name + "_to"; - const o = new Option(newRepr, newId); - SelectBox.add_to_cache(toId, o); - SelectBox.redisplay(toId); - } - win.close(); - } - - function dismissChangeRelatedObjectPopup(win, objId, newRepr, newId) { - const id = win.name.replace(/^edit_/, ''); - const selectsSelector = interpolate('#%s, #%s_from, #%s_to', [id, id, id]); - const selects = $(selectsSelector); - selects.find('option').each(function() { - if (this.value === objId) { - this.textContent = newRepr; - this.value = newId; - } - }); - selects.next().find('.select2-selection__rendered').each(function() { - // The element can have a clear button as a child. - // Use the lastChild to modify only the displayed value. - this.lastChild.textContent = newRepr; - this.title = newRepr; - }); - win.close(); - } - - function dismissDeleteRelatedObjectPopup(win, objId) { - const id = win.name.replace(/^delete_/, ''); - const selectsSelector = interpolate('#%s, #%s_from, #%s_to', [id, id, id]); - const selects = $(selectsSelector); - selects.find('option').each(function() { - if (this.value === objId) { - $(this).remove(); - } - }).trigger('change'); - win.close(); - } - - window.showRelatedObjectLookupPopup = showRelatedObjectLookupPopup; - window.dismissRelatedLookupPopup = dismissRelatedLookupPopup; - window.showRelatedObjectPopup = showRelatedObjectPopup; - window.updateRelatedObjectLinks = updateRelatedObjectLinks; - window.dismissAddRelatedObjectPopup = dismissAddRelatedObjectPopup; - window.dismissChangeRelatedObjectPopup = dismissChangeRelatedObjectPopup; - window.dismissDeleteRelatedObjectPopup = dismissDeleteRelatedObjectPopup; - - // Kept for backward compatibility - window.showAddAnotherPopup = showRelatedObjectPopup; - window.dismissAddAnotherPopup = dismissAddRelatedObjectPopup; - - $(document).ready(function() { - $("a[data-popup-opener]").on('click', function(event) { - event.preventDefault(); - opener.dismissRelatedLookupPopup(window, $(this).data("popup-opener")); - }); - $('body').on('click', '.related-widget-wrapper-link', function(e) { - e.preventDefault(); - if (this.href) { - const event = $.Event('django:show-related', {href: this.href}); - $(this).trigger(event); - if (!event.isDefaultPrevented()) { - showRelatedObjectPopup(this); - } - } - }); - $('body').on('change', '.related-widget-wrapper select', function(e) { - const event = $.Event('django:update-related'); - $(this).trigger(event); - if (!event.isDefaultPrevented()) { - updateRelatedObjectLinks(this); - } - }); - $('.related-widget-wrapper select').trigger('change'); - $('body').on('click', '.related-lookup', function(e) { - e.preventDefault(); - const event = $.Event('django:lookup-related'); - $(this).trigger(event); - if (!event.isDefaultPrevented()) { - showRelatedObjectLookupPopup(this); - } - }); - }); -} diff --git a/application/src/static/admin/js/autocomplete.js b/application/src/static/admin/js/autocomplete.js deleted file mode 100644 index c922b303a..000000000 --- a/application/src/static/admin/js/autocomplete.js +++ /dev/null @@ -1,38 +0,0 @@ -'use strict'; -{ - const $ = django.jQuery; - const init = function($element, options) { - const settings = $.extend({ - ajax: { - data: function(params) { - return { - term: params.term, - page: params.page - }; - } - } - }, options); - $element.select2(settings); - }; - - $.fn.djangoAdminSelect2 = function(options) { - const settings = $.extend({}, options); - $.each(this, function(i, element) { - const $element = $(element); - init($element, settings); - }); - return this; - }; - - $(function() { - // Initialize all autocomplete widgets except the one in the template - // form used when a new formset is added. - $('.admin-autocomplete').not('[name*=__prefix__]').djangoAdminSelect2(); - }); - - $(document).on('formset:added', (function() { - return function(event, $newFormset) { - return $newFormset.find('.admin-autocomplete').djangoAdminSelect2(); - }; - })(this)); -} diff --git a/application/src/static/admin/js/calendar.js b/application/src/static/admin/js/calendar.js deleted file mode 100644 index 64598bbb6..000000000 --- a/application/src/static/admin/js/calendar.js +++ /dev/null @@ -1,207 +0,0 @@ -/*global gettext, pgettext, get_format, quickElement, removeChildren*/ -/* -calendar.js - Calendar functions by Adrian Holovaty -depends on core.js for utility functions like removeChildren or quickElement -*/ -'use strict'; -{ - // CalendarNamespace -- Provides a collection of HTML calendar-related helper functions - const CalendarNamespace = { - monthsOfYear: [ - gettext('January'), - gettext('February'), - gettext('March'), - gettext('April'), - gettext('May'), - gettext('June'), - gettext('July'), - gettext('August'), - gettext('September'), - gettext('October'), - gettext('November'), - gettext('December') - ], - daysOfWeek: [ - pgettext('one letter Sunday', 'S'), - pgettext('one letter Monday', 'M'), - pgettext('one letter Tuesday', 'T'), - pgettext('one letter Wednesday', 'W'), - pgettext('one letter Thursday', 'T'), - pgettext('one letter Friday', 'F'), - pgettext('one letter Saturday', 'S') - ], - firstDayOfWeek: parseInt(get_format('FIRST_DAY_OF_WEEK')), - isLeapYear: function(year) { - return (((year % 4) === 0) && ((year % 100) !== 0 ) || ((year % 400) === 0)); - }, - getDaysInMonth: function(month, year) { - let days; - if (month === 1 || month === 3 || month === 5 || month === 7 || month === 8 || month === 10 || month === 12) { - days = 31; - } - else if (month === 4 || month === 6 || month === 9 || month === 11) { - days = 30; - } - else if (month === 2 && CalendarNamespace.isLeapYear(year)) { - days = 29; - } - else { - days = 28; - } - return days; - }, - draw: function(month, year, div_id, callback, selected) { // month = 1-12, year = 1-9999 - const today = new Date(); - const todayDay = today.getDate(); - const todayMonth = today.getMonth() + 1; - const todayYear = today.getFullYear(); - let todayClass = ''; - - // Use UTC functions here because the date field does not contain time - // and using the UTC function variants prevent the local time offset - // from altering the date, specifically the day field. For example: - // - // ``` - // var x = new Date('2013-10-02'); - // var day = x.getDate(); - // ``` - // - // The day variable above will be 1 instead of 2 in, say, US Pacific time - // zone. - let isSelectedMonth = false; - if (typeof selected !== 'undefined') { - isSelectedMonth = (selected.getUTCFullYear() === year && (selected.getUTCMonth() + 1) === month); - } - - month = parseInt(month); - year = parseInt(year); - const calDiv = document.getElementById(div_id); - removeChildren(calDiv); - const calTable = document.createElement('table'); - quickElement('caption', calTable, CalendarNamespace.monthsOfYear[month - 1] + ' ' + year); - const tableBody = quickElement('tbody', calTable); - - // Draw days-of-week header - let tableRow = quickElement('tr', tableBody); - for (let i = 0; i < 7; i++) { - quickElement('th', tableRow, CalendarNamespace.daysOfWeek[(i + CalendarNamespace.firstDayOfWeek) % 7]); - } - - const startingPos = new Date(year, month - 1, 1 - CalendarNamespace.firstDayOfWeek).getDay(); - const days = CalendarNamespace.getDaysInMonth(month, year); - - let nonDayCell; - - // Draw blanks before first of month - tableRow = quickElement('tr', tableBody); - for (let i = 0; i < startingPos; i++) { - nonDayCell = quickElement('td', tableRow, ' '); - nonDayCell.className = "nonday"; - } - - function calendarMonth(y, m) { - function onClick(e) { - e.preventDefault(); - callback(y, m, this.textContent); - } - return onClick; - } - - // Draw days of month - let currentDay = 1; - for (let i = startingPos; currentDay <= days; i++) { - if (i % 7 === 0 && currentDay !== 1) { - tableRow = quickElement('tr', tableBody); - } - if ((currentDay === todayDay) && (month === todayMonth) && (year === todayYear)) { - todayClass = 'today'; - } else { - todayClass = ''; - } - - // use UTC function; see above for explanation. - if (isSelectedMonth && currentDay === selected.getUTCDate()) { - if (todayClass !== '') { - todayClass += " "; - } - todayClass += "selected"; - } - - const cell = quickElement('td', tableRow, '', 'class', todayClass); - const link = quickElement('a', cell, currentDay, 'href', '#'); - link.addEventListener('click', calendarMonth(year, month)); - currentDay++; - } - - // Draw blanks after end of month (optional, but makes for valid code) - while (tableRow.childNodes.length < 7) { - nonDayCell = quickElement('td', tableRow, ' '); - nonDayCell.className = "nonday"; - } - - calDiv.appendChild(calTable); - } - }; - - // Calendar -- A calendar instance - function Calendar(div_id, callback, selected) { - // div_id (string) is the ID of the element in which the calendar will - // be displayed - // callback (string) is the name of a JavaScript function that will be - // called with the parameters (year, month, day) when a day in the - // calendar is clicked - this.div_id = div_id; - this.callback = callback; - this.today = new Date(); - this.currentMonth = this.today.getMonth() + 1; - this.currentYear = this.today.getFullYear(); - if (typeof selected !== 'undefined') { - this.selected = selected; - } - } - Calendar.prototype = { - drawCurrent: function() { - CalendarNamespace.draw(this.currentMonth, this.currentYear, this.div_id, this.callback, this.selected); - }, - drawDate: function(month, year, selected) { - this.currentMonth = month; - this.currentYear = year; - - if(selected) { - this.selected = selected; - } - - this.drawCurrent(); - }, - drawPreviousMonth: function() { - if (this.currentMonth === 1) { - this.currentMonth = 12; - this.currentYear--; - } - else { - this.currentMonth--; - } - this.drawCurrent(); - }, - drawNextMonth: function() { - if (this.currentMonth === 12) { - this.currentMonth = 1; - this.currentYear++; - } - else { - this.currentMonth++; - } - this.drawCurrent(); - }, - drawPreviousYear: function() { - this.currentYear--; - this.drawCurrent(); - }, - drawNextYear: function() { - this.currentYear++; - this.drawCurrent(); - } - }; - window.Calendar = Calendar; - window.CalendarNamespace = CalendarNamespace; -} diff --git a/application/src/static/admin/js/cancel.js b/application/src/static/admin/js/cancel.js deleted file mode 100644 index cfe06c279..000000000 --- a/application/src/static/admin/js/cancel.js +++ /dev/null @@ -1,28 +0,0 @@ -'use strict'; -{ - // Call function fn when the DOM is loaded and ready. If it is already - // loaded, call the function now. - // http://youmightnotneedjquery.com/#ready - function ready(fn) { - if (document.readyState !== 'loading') { - fn(); - } else { - document.addEventListener('DOMContentLoaded', fn); - } - } - - ready(function() { - function handleClick(event) { - event.preventDefault(); - if (window.location.search.indexOf('&_popup=1') === -1) { - window.history.back(); // Go back if not a popup. - } else { - window.close(); // Otherwise, close the popup. - } - } - - document.querySelectorAll('.cancel-link').forEach(function(el) { - el.addEventListener('click', handleClick); - }); - }); -} diff --git a/application/src/static/admin/js/change_form.js b/application/src/static/admin/js/change_form.js deleted file mode 100644 index 96a4c62ef..000000000 --- a/application/src/static/admin/js/change_form.js +++ /dev/null @@ -1,16 +0,0 @@ -'use strict'; -{ - const inputTags = ['BUTTON', 'INPUT', 'SELECT', 'TEXTAREA']; - const modelName = document.getElementById('django-admin-form-add-constants').dataset.modelName; - if (modelName) { - const form = document.getElementById(modelName + '_form'); - for (const element of form.elements) { - // HTMLElement.offsetParent returns null when the element is not - // rendered. - if (inputTags.includes(element.tagName) && !element.disabled && element.offsetParent) { - element.focus(); - break; - } - } - } -} diff --git a/application/src/static/admin/js/collapse.js b/application/src/static/admin/js/collapse.js deleted file mode 100644 index c6c7b0f68..000000000 --- a/application/src/static/admin/js/collapse.js +++ /dev/null @@ -1,43 +0,0 @@ -/*global gettext*/ -'use strict'; -{ - window.addEventListener('load', function() { - // Add anchor tag for Show/Hide link - const fieldsets = document.querySelectorAll('fieldset.collapse'); - for (const [i, elem] of fieldsets.entries()) { - // Don't hide if fields in this fieldset have errors - if (elem.querySelectorAll('div.errors, ul.errorlist').length === 0) { - elem.classList.add('collapsed'); - const h2 = elem.querySelector('h2'); - const link = document.createElement('a'); - link.id = 'fieldsetcollapser' + i; - link.className = 'collapse-toggle'; - link.href = '#'; - link.textContent = gettext('Show'); - h2.appendChild(document.createTextNode(' (')); - h2.appendChild(link); - h2.appendChild(document.createTextNode(')')); - } - } - // Add toggle to hide/show anchor tag - const toggleFunc = function(ev) { - if (ev.target.matches('.collapse-toggle')) { - ev.preventDefault(); - ev.stopPropagation(); - const fieldset = ev.target.closest('fieldset'); - if (fieldset.classList.contains('collapsed')) { - // Show - ev.target.textContent = gettext('Hide'); - fieldset.classList.remove('collapsed'); - } else { - // Hide - ev.target.textContent = gettext('Show'); - fieldset.classList.add('collapsed'); - } - } - }; - document.querySelectorAll('fieldset.module').forEach(function(el) { - el.addEventListener('click', toggleFunc); - }); - }); -} diff --git a/application/src/static/admin/js/collapse.min.js b/application/src/static/admin/js/collapse.min.js deleted file mode 100644 index 06201c597..000000000 --- a/application/src/static/admin/js/collapse.min.js +++ /dev/null @@ -1,2 +0,0 @@ -'use strict';window.addEventListener("load",function(){var c=document.querySelectorAll("fieldset.collapse");for(const [a,b]of c.entries())if(0===b.querySelectorAll("div.errors, ul.errorlist").length){b.classList.add("collapsed");c=b.querySelector("h2");const d=document.createElement("a");d.id="fieldsetcollapser"+a;d.className="collapse-toggle";d.href="#";d.textContent=gettext("Show");c.appendChild(document.createTextNode(" ("));c.appendChild(d);c.appendChild(document.createTextNode(")"))}const e= -function(a){if(a.target.matches(".collapse-toggle")){a.preventDefault();a.stopPropagation();const b=a.target.closest("fieldset");b.classList.contains("collapsed")?(a.target.textContent=gettext("Hide"),b.classList.remove("collapsed")):(a.target.textContent=gettext("Show"),b.classList.add("collapsed"))}};document.querySelectorAll("fieldset.module").forEach(function(a){a.addEventListener("click",e)})}); diff --git a/application/src/static/admin/js/core.js b/application/src/static/admin/js/core.js deleted file mode 100644 index 8ef27b348..000000000 --- a/application/src/static/admin/js/core.js +++ /dev/null @@ -1,163 +0,0 @@ -// Core javascript helper functions -'use strict'; - -// quickElement(tagType, parentReference [, textInChildNode, attribute, attributeValue ...]); -function quickElement() { - const obj = document.createElement(arguments[0]); - if (arguments[2]) { - const textNode = document.createTextNode(arguments[2]); - obj.appendChild(textNode); - } - const len = arguments.length; - for (let i = 3; i < len; i += 2) { - obj.setAttribute(arguments[i], arguments[i + 1]); - } - arguments[1].appendChild(obj); - return obj; -} - -// "a" is reference to an object -function removeChildren(a) { - while (a.hasChildNodes()) { - a.removeChild(a.lastChild); - } -} - -// ---------------------------------------------------------------------------- -// Find-position functions by PPK -// See https://www.quirksmode.org/js/findpos.html -// ---------------------------------------------------------------------------- -function findPosX(obj) { - let curleft = 0; - if (obj.offsetParent) { - while (obj.offsetParent) { - curleft += obj.offsetLeft - obj.scrollLeft; - obj = obj.offsetParent; - } - } else if (obj.x) { - curleft += obj.x; - } - return curleft; -} - -function findPosY(obj) { - let curtop = 0; - if (obj.offsetParent) { - while (obj.offsetParent) { - curtop += obj.offsetTop - obj.scrollTop; - obj = obj.offsetParent; - } - } else if (obj.y) { - curtop += obj.y; - } - return curtop; -} - -//----------------------------------------------------------------------------- -// Date object extensions -// ---------------------------------------------------------------------------- -{ - Date.prototype.getTwelveHours = function() { - return this.getHours() % 12 || 12; - }; - - Date.prototype.getTwoDigitMonth = function() { - return (this.getMonth() < 9) ? '0' + (this.getMonth() + 1) : (this.getMonth() + 1); - }; - - Date.prototype.getTwoDigitDate = function() { - return (this.getDate() < 10) ? '0' + this.getDate() : this.getDate(); - }; - - Date.prototype.getTwoDigitTwelveHour = function() { - return (this.getTwelveHours() < 10) ? '0' + this.getTwelveHours() : this.getTwelveHours(); - }; - - Date.prototype.getTwoDigitHour = function() { - return (this.getHours() < 10) ? '0' + this.getHours() : this.getHours(); - }; - - Date.prototype.getTwoDigitMinute = function() { - return (this.getMinutes() < 10) ? '0' + this.getMinutes() : this.getMinutes(); - }; - - Date.prototype.getTwoDigitSecond = function() { - return (this.getSeconds() < 10) ? '0' + this.getSeconds() : this.getSeconds(); - }; - - Date.prototype.getFullMonthName = function() { - return typeof window.CalendarNamespace === "undefined" - ? this.getTwoDigitMonth() - : window.CalendarNamespace.monthsOfYear[this.getMonth()]; - }; - - Date.prototype.strftime = function(format) { - const fields = { - B: this.getFullMonthName(), - c: this.toString(), - d: this.getTwoDigitDate(), - H: this.getTwoDigitHour(), - I: this.getTwoDigitTwelveHour(), - m: this.getTwoDigitMonth(), - M: this.getTwoDigitMinute(), - p: (this.getHours() >= 12) ? 'PM' : 'AM', - S: this.getTwoDigitSecond(), - w: '0' + this.getDay(), - x: this.toLocaleDateString(), - X: this.toLocaleTimeString(), - y: ('' + this.getFullYear()).substr(2, 4), - Y: '' + this.getFullYear(), - '%': '%' - }; - let result = '', i = 0; - while (i < format.length) { - if (format.charAt(i) === '%') { - result = result + fields[format.charAt(i + 1)]; - ++i; - } - else { - result = result + format.charAt(i); - } - ++i; - } - return result; - }; - - // ---------------------------------------------------------------------------- - // String object extensions - // ---------------------------------------------------------------------------- - String.prototype.strptime = function(format) { - const split_format = format.split(/[.\-/]/); - const date = this.split(/[.\-/]/); - let i = 0; - let day, month, year; - while (i < split_format.length) { - switch (split_format[i]) { - case "%d": - day = date[i]; - break; - case "%m": - month = date[i] - 1; - break; - case "%Y": - year = date[i]; - break; - case "%y": - // A %y value in the range of [00, 68] is in the current - // century, while [69, 99] is in the previous century, - // according to the Open Group Specification. - if (parseInt(date[i], 10) >= 69) { - year = date[i]; - } else { - year = (new Date(Date.UTC(date[i], 0))).getUTCFullYear() + 100; - } - break; - } - ++i; - } - // Create Date object from UTC since the parsed value is supposed to be - // in UTC, not local time. Also, the calendar uses UTC functions for - // date extraction. - return new Date(Date.UTC(year, month, day)); - }; -} diff --git a/application/src/static/admin/js/inlines.js b/application/src/static/admin/js/inlines.js deleted file mode 100644 index 82ec02723..000000000 --- a/application/src/static/admin/js/inlines.js +++ /dev/null @@ -1,348 +0,0 @@ -/*global DateTimeShortcuts, SelectFilter*/ -/** - * Django admin inlines - * - * Based on jQuery Formset 1.1 - * @author Stanislaus Madueke (stan DOT madueke AT gmail DOT com) - * @requires jQuery 1.2.6 or later - * - * Copyright (c) 2009, Stanislaus Madueke - * All rights reserved. - * - * Spiced up with Code from Zain Memon's GSoC project 2009 - * and modified for Django by Jannis Leidel, Travis Swicegood and Julien Phalip. - * - * Licensed under the New BSD License - * See: https://opensource.org/licenses/bsd-license.php - */ -'use strict'; -{ - const $ = django.jQuery; - $.fn.formset = function(opts) { - const options = $.extend({}, $.fn.formset.defaults, opts); - const $this = $(this); - const $parent = $this.parent(); - const updateElementIndex = function(el, prefix, ndx) { - const id_regex = new RegExp("(" + prefix + "-(\\d+|__prefix__))"); - const replacement = prefix + "-" + ndx; - if ($(el).prop("for")) { - $(el).prop("for", $(el).prop("for").replace(id_regex, replacement)); - } - if (el.id) { - el.id = el.id.replace(id_regex, replacement); - } - if (el.name) { - el.name = el.name.replace(id_regex, replacement); - } - }; - const totalForms = $("#id_" + options.prefix + "-TOTAL_FORMS").prop("autocomplete", "off"); - let nextIndex = parseInt(totalForms.val(), 10); - const maxForms = $("#id_" + options.prefix + "-MAX_NUM_FORMS").prop("autocomplete", "off"); - const minForms = $("#id_" + options.prefix + "-MIN_NUM_FORMS").prop("autocomplete", "off"); - let addButton; - - /** - * The "Add another MyModel" button below the inline forms. - */ - const addInlineAddButton = function() { - if (addButton === null) { - if ($this.prop("tagName") === "TR") { - // If forms are laid out as table rows, insert the - // "add" button in a new table row: - const numCols = $this.eq(-1).children().length; - $parent.append('' + options.addText + ""); - addButton = $parent.find("tr:last a"); - } else { - // Otherwise, insert it immediately after the last form: - $this.filter(":last").after('"); - addButton = $this.filter(":last").next().find("a"); - } - } - addButton.on('click', addInlineClickHandler); - }; - - const addInlineClickHandler = function(e) { - e.preventDefault(); - const template = $("#" + options.prefix + "-empty"); - const row = template.clone(true); - row.removeClass(options.emptyCssClass) - .addClass(options.formCssClass) - .attr("id", options.prefix + "-" + nextIndex); - addInlineDeleteButton(row); - row.find("*").each(function() { - updateElementIndex(this, options.prefix, totalForms.val()); - }); - // Insert the new form when it has been fully edited. - row.insertBefore($(template)); - // Update number of total forms. - $(totalForms).val(parseInt(totalForms.val(), 10) + 1); - nextIndex += 1; - // Hide the add button if there's a limit and it's been reached. - if ((maxForms.val() !== '') && (maxForms.val() - totalForms.val()) <= 0) { - addButton.parent().hide(); - } - // Show the remove buttons if there are more than min_num. - toggleDeleteButtonVisibility(row.closest('.inline-group')); - - // Pass the new form to the post-add callback, if provided. - if (options.added) { - options.added(row); - } - $(document).trigger('formset:added', [row, options.prefix]); - }; - - /** - * The "X" button that is part of every unsaved inline. - * (When saved, it is replaced with a "Delete" checkbox.) - */ - const addInlineDeleteButton = function(row) { - if (row.is("tr")) { - // If the forms are laid out in table rows, insert - // the remove button into the last table cell: - row.children(":last").append('"); - } else if (row.is("ul") || row.is("ol")) { - // If they're laid out as an ordered/unordered list, - // insert an
  • after the last list item: - row.append('
  • ' + options.deleteText + "
  • "); - } else { - // Otherwise, just insert the remove button as the - // last child element of the form's container: - row.children(":first").append('' + options.deleteText + ""); - } - // Add delete handler for each row. - row.find("a." + options.deleteCssClass).on('click', inlineDeleteHandler.bind(this)); - }; - - const inlineDeleteHandler = function(e1) { - e1.preventDefault(); - const deleteButton = $(e1.target); - const row = deleteButton.closest('.' + options.formCssClass); - const inlineGroup = row.closest('.inline-group'); - // Remove the parent form containing this button, - // and also remove the relevant row with non-field errors: - const prevRow = row.prev(); - if (prevRow.length && prevRow.hasClass('row-form-errors')) { - prevRow.remove(); - } - row.remove(); - nextIndex -= 1; - // Pass the deleted form to the post-delete callback, if provided. - if (options.removed) { - options.removed(row); - } - $(document).trigger('formset:removed', [row, options.prefix]); - // Update the TOTAL_FORMS form count. - const forms = $("." + options.formCssClass); - $("#id_" + options.prefix + "-TOTAL_FORMS").val(forms.length); - // Show add button again once below maximum number. - if ((maxForms.val() === '') || (maxForms.val() - forms.length) > 0) { - addButton.parent().show(); - } - // Hide the remove buttons if at min_num. - toggleDeleteButtonVisibility(inlineGroup); - // Also, update names and ids for all remaining form controls so - // they remain in sequence: - let i, formCount; - const updateElementCallback = function() { - updateElementIndex(this, options.prefix, i); - }; - for (i = 0, formCount = forms.length; i < formCount; i++) { - updateElementIndex($(forms).get(i), options.prefix, i); - $(forms.get(i)).find("*").each(updateElementCallback); - } - }; - - const toggleDeleteButtonVisibility = function(inlineGroup) { - if ((minForms.val() !== '') && (minForms.val() - totalForms.val()) >= 0) { - inlineGroup.find('.inline-deletelink').hide(); - } else { - inlineGroup.find('.inline-deletelink').show(); - } - }; - - $this.each(function(i) { - $(this).not("." + options.emptyCssClass).addClass(options.formCssClass); - }); - - // Create the delete buttons for all unsaved inlines: - $this.filter('.' + options.formCssClass + ':not(.has_original):not(.' + options.emptyCssClass + ')').each(function() { - addInlineDeleteButton($(this)); - }); - toggleDeleteButtonVisibility($this); - - // Create the add button, initially hidden. - addButton = options.addButton; - addInlineAddButton(); - - // Show the add button if allowed to add more items. - // Note that max_num = None translates to a blank string. - const showAddButton = maxForms.val() === '' || (maxForms.val() - totalForms.val()) > 0; - if ($this.length && showAddButton) { - addButton.parent().show(); - } else { - addButton.parent().hide(); - } - - return this; - }; - - /* Setup plugin defaults */ - $.fn.formset.defaults = { - prefix: "form", // The form prefix for your django formset - addText: "add another", // Text for the add link - deleteText: "remove", // Text for the delete link - addCssClass: "add-row", // CSS class applied to the add link - deleteCssClass: "delete-row", // CSS class applied to the delete link - emptyCssClass: "empty-row", // CSS class applied to the empty row - formCssClass: "dynamic-form", // CSS class applied to each form in a formset - added: null, // Function called each time a new form is added - removed: null, // Function called each time a form is deleted - addButton: null // Existing add button to use - }; - - - // Tabular inlines --------------------------------------------------------- - $.fn.tabularFormset = function(selector, options) { - const $rows = $(this); - - const reinitDateTimeShortCuts = function() { - // Reinitialize the calendar and clock widgets by force - if (typeof DateTimeShortcuts !== "undefined") { - $(".datetimeshortcuts").remove(); - DateTimeShortcuts.init(); - } - }; - - const updateSelectFilter = function() { - // If any SelectFilter widgets are a part of the new form, - // instantiate a new SelectFilter instance for it. - if (typeof SelectFilter !== 'undefined') { - $('.selectfilter').each(function(index, value) { - const namearr = value.name.split('-'); - SelectFilter.init(value.id, namearr[namearr.length - 1], false); - }); - $('.selectfilterstacked').each(function(index, value) { - const namearr = value.name.split('-'); - SelectFilter.init(value.id, namearr[namearr.length - 1], true); - }); - } - }; - - const initPrepopulatedFields = function(row) { - row.find('.prepopulated_field').each(function() { - const field = $(this), - input = field.find('input, select, textarea'), - dependency_list = input.data('dependency_list') || [], - dependencies = []; - $.each(dependency_list, function(i, field_name) { - dependencies.push('#' + row.find('.field-' + field_name).find('input, select, textarea').attr('id')); - }); - if (dependencies.length) { - input.prepopulate(dependencies, input.attr('maxlength')); - } - }); - }; - - $rows.formset({ - prefix: options.prefix, - addText: options.addText, - formCssClass: "dynamic-" + options.prefix, - deleteCssClass: "inline-deletelink", - deleteText: options.deleteText, - emptyCssClass: "empty-form", - added: function(row) { - initPrepopulatedFields(row); - reinitDateTimeShortCuts(); - updateSelectFilter(); - }, - addButton: options.addButton - }); - - return $rows; - }; - - // Stacked inlines --------------------------------------------------------- - $.fn.stackedFormset = function(selector, options) { - const $rows = $(this); - const updateInlineLabel = function(row) { - $(selector).find(".inline_label").each(function(i) { - const count = i + 1; - $(this).html($(this).html().replace(/(#\d+)/g, "#" + count)); - }); - }; - - const reinitDateTimeShortCuts = function() { - // Reinitialize the calendar and clock widgets by force, yuck. - if (typeof DateTimeShortcuts !== "undefined") { - $(".datetimeshortcuts").remove(); - DateTimeShortcuts.init(); - } - }; - - const updateSelectFilter = function() { - // If any SelectFilter widgets were added, instantiate a new instance. - if (typeof SelectFilter !== "undefined") { - $(".selectfilter").each(function(index, value) { - const namearr = value.name.split('-'); - SelectFilter.init(value.id, namearr[namearr.length - 1], false); - }); - $(".selectfilterstacked").each(function(index, value) { - const namearr = value.name.split('-'); - SelectFilter.init(value.id, namearr[namearr.length - 1], true); - }); - } - }; - - const initPrepopulatedFields = function(row) { - row.find('.prepopulated_field').each(function() { - const field = $(this), - input = field.find('input, select, textarea'), - dependency_list = input.data('dependency_list') || [], - dependencies = []; - $.each(dependency_list, function(i, field_name) { - dependencies.push('#' + row.find('.form-row .field-' + field_name).find('input, select, textarea').attr('id')); - }); - if (dependencies.length) { - input.prepopulate(dependencies, input.attr('maxlength')); - } - }); - }; - - $rows.formset({ - prefix: options.prefix, - addText: options.addText, - formCssClass: "dynamic-" + options.prefix, - deleteCssClass: "inline-deletelink", - deleteText: options.deleteText, - emptyCssClass: "empty-form", - removed: updateInlineLabel, - added: function(row) { - initPrepopulatedFields(row); - reinitDateTimeShortCuts(); - updateSelectFilter(); - updateInlineLabel(row); - }, - addButton: options.addButton - }); - - return $rows; - }; - - $(document).ready(function() { - $(".js-inline-admin-formset").each(function() { - const data = $(this).data(), - inlineOptions = data.inlineFormset; - let selector; - switch(data.inlineType) { - case "stacked": - selector = inlineOptions.name + "-group .inline-related"; - $(selector).stackedFormset(selector, inlineOptions.options); - break; - case "tabular": - selector = inlineOptions.name + "-group .tabular.inline-related tbody:first > tr.form-row"; - $(selector).tabularFormset(selector, inlineOptions.options); - break; - } - }); - }); -} diff --git a/application/src/static/admin/js/inlines.min.js b/application/src/static/admin/js/inlines.min.js deleted file mode 100644 index fc6dddc6b..000000000 --- a/application/src/static/admin/js/inlines.min.js +++ /dev/null @@ -1,11 +0,0 @@ -'use strict';{const b=django.jQuery;b.fn.formset=function(c){const a=b.extend({},b.fn.formset.defaults,c),e=b(this),l=e.parent(),m=function(a,d,h){const g=new RegExp("("+d+"-(\\d+|__prefix__))");d=d+"-"+h;b(a).prop("for")&&b(a).prop("for",b(a).prop("for").replace(g,d));a.id&&(a.id=a.id.replace(g,d));a.name&&(a.name=a.name.replace(g,d))},f=b("#id_"+a.prefix+"-TOTAL_FORMS").prop("autocomplete","off");let n=parseInt(f.val(),10);const h=b("#id_"+a.prefix+"-MAX_NUM_FORMS").prop("autocomplete","off"),q= -b("#id_"+a.prefix+"-MIN_NUM_FORMS").prop("autocomplete","off");let k;const t=function(g){g.preventDefault();g=b("#"+a.prefix+"-empty");const d=g.clone(!0);d.removeClass(a.emptyCssClass).addClass(a.formCssClass).attr("id",a.prefix+"-"+n);r(d);d.find("*").each(function(){m(this,a.prefix,f.val())});d.insertBefore(b(g));b(f).val(parseInt(f.val(),10)+1);n+=1;""!==h.val()&&0>=h.val()-f.val()&&k.parent().hide();p(d.closest(".inline-group"));a.added&&a.added(d);b(document).trigger("formset:added",[d,a.prefix])}, -r=function(b){b.is("tr")?b.children(":last").append('"):b.is("ul")||b.is("ol")?b.append('
  • '+a.deleteText+"
  • "):b.children(":first").append(''+a.deleteText+"");b.find("a."+a.deleteCssClass).on("click",u.bind(this))},u=function(g){g.preventDefault();var d=b(g.target).closest("."+a.formCssClass);g=d.closest(".inline-group"); -var f=d.prev();f.length&&f.hasClass("row-form-errors")&&f.remove();d.remove();--n;a.removed&&a.removed(d);b(document).trigger("formset:removed",[d,a.prefix]);d=b("."+a.formCssClass);b("#id_"+a.prefix+"-TOTAL_FORMS").val(d.length);(""===h.val()||0'+a.addText+"");k=l.find("tr:last a")}else e.filter(":last").after('"), -k=e.filter(":last").next().find("a");k.on("click",t)})();c=""===h.val()||0 tr.form-row",b(c).tabularFormset(c,a.options)}})})}; diff --git a/application/src/static/admin/js/jquery.init.js b/application/src/static/admin/js/jquery.init.js deleted file mode 100644 index f40b27f47..000000000 --- a/application/src/static/admin/js/jquery.init.js +++ /dev/null @@ -1,8 +0,0 @@ -/*global jQuery:false*/ -'use strict'; -/* Puts the included jQuery into our own namespace using noConflict and passing - * it 'true'. This ensures that the included jQuery doesn't pollute the global - * namespace (i.e. this preserves pre-existing values for both window.$ and - * window.jQuery). - */ -window.django = {jQuery: jQuery.noConflict(true)}; diff --git a/application/src/static/admin/js/nav_sidebar.js b/application/src/static/admin/js/nav_sidebar.js deleted file mode 100644 index efaa7214b..000000000 --- a/application/src/static/admin/js/nav_sidebar.js +++ /dev/null @@ -1,39 +0,0 @@ -'use strict'; -{ - const toggleNavSidebar = document.getElementById('toggle-nav-sidebar'); - if (toggleNavSidebar !== null) { - const navLinks = document.querySelectorAll('#nav-sidebar a'); - function disableNavLinkTabbing() { - for (const navLink of navLinks) { - navLink.tabIndex = -1; - } - } - function enableNavLinkTabbing() { - for (const navLink of navLinks) { - navLink.tabIndex = 0; - } - } - - const main = document.getElementById('main'); - let navSidebarIsOpen = localStorage.getItem('django.admin.navSidebarIsOpen'); - if (navSidebarIsOpen === null) { - navSidebarIsOpen = 'true'; - } - if (navSidebarIsOpen === 'false') { - disableNavLinkTabbing(); - } - main.classList.toggle('shifted', navSidebarIsOpen === 'true'); - - toggleNavSidebar.addEventListener('click', function() { - if (navSidebarIsOpen === 'true') { - navSidebarIsOpen = 'false'; - disableNavLinkTabbing(); - } else { - navSidebarIsOpen = 'true'; - enableNavLinkTabbing(); - } - localStorage.setItem('django.admin.navSidebarIsOpen', navSidebarIsOpen); - main.classList.toggle('shifted'); - }); - } -} diff --git a/application/src/static/admin/js/popup_response.js b/application/src/static/admin/js/popup_response.js deleted file mode 100644 index 2b1d3dd31..000000000 --- a/application/src/static/admin/js/popup_response.js +++ /dev/null @@ -1,16 +0,0 @@ -/*global opener */ -'use strict'; -{ - const initData = JSON.parse(document.getElementById('django-admin-popup-response-constants').dataset.popupResponse); - switch(initData.action) { - case 'change': - opener.dismissChangeRelatedObjectPopup(window, initData.value, initData.obj, initData.new_value); - break; - case 'delete': - opener.dismissDeleteRelatedObjectPopup(window, initData.value); - break; - default: - opener.dismissAddRelatedObjectPopup(window, initData.value, initData.obj); - break; - } -} diff --git a/application/src/static/admin/js/prepopulate.js b/application/src/static/admin/js/prepopulate.js deleted file mode 100644 index 89e95ab44..000000000 --- a/application/src/static/admin/js/prepopulate.js +++ /dev/null @@ -1,43 +0,0 @@ -/*global URLify*/ -'use strict'; -{ - const $ = django.jQuery; - $.fn.prepopulate = function(dependencies, maxLength, allowUnicode) { - /* - Depends on urlify.js - Populates a selected field with the values of the dependent fields, - URLifies and shortens the string. - dependencies - array of dependent fields ids - maxLength - maximum length of the URLify'd string - allowUnicode - Unicode support of the URLify'd string - */ - return this.each(function() { - const prepopulatedField = $(this); - - const populate = function() { - // Bail if the field's value has been changed by the user - if (prepopulatedField.data('_changed')) { - return; - } - - const values = []; - $.each(dependencies, function(i, field) { - field = $(field); - if (field.val().length > 0) { - values.push(field.val()); - } - }); - prepopulatedField.val(URLify(values.join(' '), maxLength, allowUnicode)); - }; - - prepopulatedField.data('_changed', false); - prepopulatedField.on('change', function() { - prepopulatedField.data('_changed', true); - }); - - if (!prepopulatedField.val()) { - $(dependencies.join(',')).on('keyup change focus', populate); - } - }); - }; -} diff --git a/application/src/static/admin/js/prepopulate.min.js b/application/src/static/admin/js/prepopulate.min.js deleted file mode 100644 index 11ead4990..000000000 --- a/application/src/static/admin/js/prepopulate.min.js +++ /dev/null @@ -1 +0,0 @@ -'use strict';{const b=django.jQuery;b.fn.prepopulate=function(d,f,g){return this.each(function(){const a=b(this),h=function(){if(!a.data("_changed")){var e=[];b.each(d,function(a,c){c=b(c);0 elements - // (i.e., `typeof document.createElement( "object" ) === "function"`). - // We don't want to classify *any* DOM node as a function. - return typeof obj === "function" && typeof obj.nodeType !== "number"; - }; - - -var isWindow = function isWindow( obj ) { - return obj != null && obj === obj.window; - }; - - -var document = window.document; - - - - var preservedScriptAttributes = { - type: true, - src: true, - nonce: true, - noModule: true - }; - - function DOMEval( code, node, doc ) { - doc = doc || document; - - var i, val, - script = doc.createElement( "script" ); - - script.text = code; - if ( node ) { - for ( i in preservedScriptAttributes ) { - - // Support: Firefox 64+, Edge 18+ - // Some browsers don't support the "nonce" property on scripts. - // On the other hand, just using `getAttribute` is not enough as - // the `nonce` attribute is reset to an empty string whenever it - // becomes browsing-context connected. - // See https://github.com/whatwg/html/issues/2369 - // See https://html.spec.whatwg.org/#nonce-attributes - // The `node.getAttribute` check was added for the sake of - // `jQuery.globalEval` so that it can fake a nonce-containing node - // via an object. - val = node[ i ] || node.getAttribute && node.getAttribute( i ); - if ( val ) { - script.setAttribute( i, val ); - } - } - } - doc.head.appendChild( script ).parentNode.removeChild( script ); - } - - -function toType( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; -} -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.5.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - even: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return ( i + 1 ) % 2; - } ) ); - }, - - odd: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return i % 2; - } ) ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - copy = options[ name ]; - - // Prevent Object.prototype pollution - // Prevent never-ending loop - if ( name === "__proto__" || target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - src = target[ name ]; - - // Ensure proper type for the source value - if ( copyIsArray && !Array.isArray( src ) ) { - clone = []; - } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { - clone = {}; - } else { - clone = src; - } - copyIsArray = false; - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - // Evaluates a script in a provided context; falls back to the global one - // if not specified. - globalEval: function( code, options, doc ) { - DOMEval( code, { nonce: options && options.nonce }, doc ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return flat( ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( _i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = toType( obj ); - - if ( isFunction( obj ) || isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.5 - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://js.foundation/ - * - * Date: 2020-03-14 - */ -( function( window ) { -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - nonnativeSelectorCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ( {} ).hasOwnProperty, - arr = [], - pop = arr.pop, - pushNative = arr.push, - push = arr.push, - slice = arr.slice, - - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[ i ] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + - "ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram - identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + - "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - - // "Attribute values must be CSS identifiers [capture 5] - // or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + - whitespace + "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + - whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + - "*" ), - rdescend = new RegExp( whitespace + "|>" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + - whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + - whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + - "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + - "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rhtml = /HTML$/i, - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), - funescape = function( escape, nonHex ) { - var high = "0x" + escape.slice( 1 ) - 0x10000; - - return nonHex ? - - // Strip the backslash prefix from a non-hex escape sequence - nonHex : - - // Replace a hexadecimal escape sequence with the encoded Unicode code point - // Support: IE <=11+ - // For values outside the Basic Multilingual Plane (BMP), manually construct a - // surrogate pair - high < 0 ? - String.fromCharCode( high + 0x10000 ) : - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + - ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - inDisabledFieldset = addCombinator( - function( elem ) { - return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - ( arr = slice.call( preferredDoc.childNodes ) ), - preferredDoc.childNodes - ); - - // Support: Android<4.0 - // Detect silently failing push.apply - // eslint-disable-next-line no-unused-expressions - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - pushNative.apply( target, slice.call( els ) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - - // Can't trust NodeList.length - while ( ( target[ j++ ] = els[ i++ ] ) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - setDocument( context ); - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { - - // ID selector - if ( ( m = match[ 1 ] ) ) { - - // Document context - if ( nodeType === 9 ) { - if ( ( elem = context.getElementById( m ) ) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && ( elem = newContext.getElementById( m ) ) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[ 2 ] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !nonnativeSelectorCache[ selector + " " ] && - ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && - - // Support: IE 8 only - // Exclude object elements - ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { - - newSelector = selector; - newContext = context; - - // qSA considers elements outside a scoping root when evaluating child or - // descendant combinators, which is not what we want. - // In such cases, we work around the behavior by prefixing every selector in the - // list with an ID selector referencing the scope context. - // The technique has to be used as well when a leading combinator is used - // as such selectors are not recognized by querySelectorAll. - // Thanks to Andrew Dupont for this technique. - if ( nodeType === 1 && - ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - - // We can use :scope instead of the ID hack if the browser - // supports it & if we're not changing the context. - if ( newContext !== context || !support.scope ) { - - // Capture the context ID, setting it first if necessary - if ( ( nid = context.getAttribute( "id" ) ) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", ( nid = expando ) ); - } - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + - toSelector( groups[ i ] ); - } - newSelector = groups.join( "," ); - } - - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - nonnativeSelectorCache( selector, true ); - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return ( cache[ key + " " ] = value ); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement( "fieldset" ); - - try { - return !!fn( el ); - } catch ( e ) { - return false; - } finally { - - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split( "|" ), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[ i ] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( ( cur = cur.nextSibling ) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return ( name === "input" || name === "button" ) && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - inDisabledFieldset( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction( function( argument ) { - argument = +argument; - return markFunction( function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ ( j = matchIndexes[ i ] ) ] ) { - seed[ j ] = !( matches[ j ] = seed[ j ] ); - } - } - } ); - } ); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - var namespace = elem.namespaceURI, - docElem = ( elem.ownerDocument || elem ).documentElement; - - // Support: IE <=8 - // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes - // https://bugs.jquery.com/ticket/4833 - return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9 - 11+, Edge 12 - 18+ - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( preferredDoc != document && - ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, - // Safari 4 - 5 only, Opera <=11.6 - 12.x only - // IE/Edge & older browsers don't support the :scope pseudo-class. - // Support: Safari 6.0 only - // Safari 6.0 supports :scope but it's an alias of :root there. - support.scope = assert( function( el ) { - docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); - return typeof el.querySelectorAll !== "undefined" && - !el.querySelectorAll( ":scope fieldset div" ).length; - } ); - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert( function( el ) { - el.className = "i"; - return !el.getAttribute( "className" ); - } ); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert( function( el ) { - el.appendChild( document.createComment( "" ) ); - return !el.getElementsByTagName( "*" ).length; - } ); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert( function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - } ); - - // ID filter and find - if ( support.getById ) { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute( "id" ) === attrId; - }; - }; - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode( "id" ); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( ( elem = elems[ i++ ] ) ) { - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find[ "TAG" ] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { - - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert( function( el ) { - - var input; - - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll( "[selected]" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push( "~=" ); - } - - // Support: IE 11+, Edge 15 - 18+ - // IE 11/Edge don't find elements on a `[name='']` query in some cases. - // Adding a temporary attribute to the document before the selection works - // around the issue. - // Interestingly, IE 10 & older don't seem to have the issue. - input = document.createElement( "input" ); - input.setAttribute( "name", "" ); - el.appendChild( input ); - if ( !el.querySelectorAll( "[name='']" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + - whitespace + "*(?:''|\"\")" ); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll( ":checked" ).length ) { - rbuggyQSA.push( ":checked" ); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push( ".#.+[+~]" ); - } - - // Support: Firefox <=3.6 - 5 only - // Old Firefox doesn't throw on a badly-escaped identifier. - el.querySelectorAll( "\\\f" ); - rbuggyQSA.push( "[\\r\\n\\f]" ); - } ); - - assert( function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement( "input" ); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll( "[name=d]" ).length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: Opera 10 - 11 only - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll( "*,:x" ); - rbuggyQSA.push( ",.*:" ); - } ); - } - - if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector ) ) ) ) { - - assert( function( el ) { - - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - } ); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - ) ); - } : - function( a, b ) { - if ( b ) { - while ( ( b = b.parentNode ) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { - - // Choose the first element that is related to our preferred document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( a == document || a.ownerDocument == preferredDoc && - contains( preferredDoc, a ) ) { - return -1; - } - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( b == document || b.ownerDocument == preferredDoc && - contains( preferredDoc, b ) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - return a == document ? -1 : - b == document ? 1 : - /* eslint-enable eqeqeq */ - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( ( cur = cur.parentNode ) ) { - ap.unshift( cur ); - } - cur = b; - while ( ( cur = cur.parentNode ) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[ i ] === bp[ i ] ) { - i++; - } - - return i ? - - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[ i ], bp[ i ] ) : - - // Otherwise nodes in our document sort first - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - ap[ i ] == preferredDoc ? -1 : - bp[ i ] == preferredDoc ? 1 : - /* eslint-enable eqeqeq */ - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - setDocument( elem ); - - if ( support.matchesSelector && documentIsHTML && - !nonnativeSelectorCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch ( e ) { - nonnativeSelectorCache( expr, true ); - } - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( context.ownerDocument || context ) != document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( elem.ownerDocument || elem ) != document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return ( sel + "" ).replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - - // If no nodeType, this is expected to be an array - while ( ( node = elem[ i++ ] ) ) { - - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[ 1 ] = match[ 1 ].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[ 3 ] = ( match[ 3 ] || match[ 4 ] || - match[ 5 ] || "" ).replace( runescape, funescape ); - - if ( match[ 2 ] === "~=" ) { - match[ 3 ] = " " + match[ 3 ] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[ 1 ] = match[ 1 ].toLowerCase(); - - if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { - - // nth-* requires argument - if ( !match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[ 4 ] = +( match[ 4 ] ? - match[ 5 ] + ( match[ 6 ] || 1 ) : - 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); - match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); - - // other types prohibit arguments - } else if ( match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[ 6 ] && match[ 2 ]; - - if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[ 3 ] ) { - match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - - // Get excess from tokenize (recursively) - ( excess = tokenize( unquoted, true ) ) && - - // advance to the next closing parenthesis - ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { - - // excess is a negative index - match[ 0 ] = match[ 0 ].slice( 0, excess ); - match[ 2 ] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { - return true; - } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - ( pattern = new RegExp( "(^|" + whitespace + - ")" + className + "(" + whitespace + "|$)" ) ) && classCache( - className, function( elem ) { - return pattern.test( - typeof elem.className === "string" && elem.className || - typeof elem.getAttribute !== "undefined" && - elem.getAttribute( "class" ) || - "" - ); - } ); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - /* eslint-disable max-len */ - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - /* eslint-enable max-len */ - - }; - }, - - "CHILD": function( type, what, _argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, _context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( ( node = node[ dir ] ) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( ( node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - - // Use previously-cached element index if available - if ( useCache ) { - - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - - // Use the same loop as above to seek `elem` from the start - while ( ( node = ++nodeIndex && node && node[ dir ] || - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || - ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction( function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[ i ] ); - seed[ idx ] = !( matches[ idx ] = matched[ i ] ); - } - } ) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - - // Potentially complex pseudos - "not": markFunction( function( selector ) { - - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction( function( seed, matches, _context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( ( elem = unmatched[ i ] ) ) { - seed[ i ] = !( matches[ i ] = elem ); - } - } - } ) : - function( elem, _context, xml ) { - input[ 0 ] = elem; - matcher( input, null, xml, results ); - - // Don't keep the element (issue #299) - input[ 0 ] = null; - return !results.pop(); - }; - } ), - - "has": markFunction( function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - } ), - - "contains": markFunction( function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; - }; - } ), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - - // lang value must be a valid identifier - if ( !ridentifier.test( lang || "" ) ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( ( elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); - return false; - }; - } ), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && - ( !document.hasFocus || document.hasFocus() ) && - !!( elem.type || elem.href || ~elem.tabIndex ); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return ( nodeName === "input" && !!elem.checked ) || - ( nodeName === "option" && !!elem.selected ); - }, - - "selected": function( elem ) { - - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - // eslint-disable-next-line no-unused-expressions - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos[ "empty" ]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( ( attr = elem.getAttribute( "type" ) ) == null || - attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo( function() { - return [ 0 ]; - } ), - - "last": createPositionalPseudo( function( _matchIndexes, length ) { - return [ length - 1 ]; - } ), - - "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - } ), - - "even": createPositionalPseudo( function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "odd": createPositionalPseudo( function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? - argument + length : - argument > length ? - length : - argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ) - } -}; - -Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || ( match = rcomma.exec( soFar ) ) ) { - if ( match ) { - - // Don't consume trailing commas as valid - soFar = soFar.slice( match[ 0 ].length ) || soFar; - } - groups.push( ( tokens = [] ) ); - } - - matched = false; - - // Combinators - if ( ( match = rcombinators.exec( soFar ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - - // Cast descendant combinators to space - type: match[ 0 ].replace( rtrim, " " ) - } ); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || - ( match = preFilters[ type ]( match ) ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - type: type, - matches: match - } ); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[ i ].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || ( elem[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || - ( outerCache[ elem.uniqueID ] = {} ); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( ( oldCache = uniqueCache[ key ] ) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return ( newCache[ 2 ] = oldCache[ 2 ] ); - } else { - - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[ i ]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[ 0 ]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[ i ], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( ( elem = unmatched[ i ] ) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction( function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( - selector || "*", - context.nodeType ? [ context ] : context, - [] - ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( ( elem = temp[ i ] ) ) { - matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) ) { - - // Restore matcherIn since elem is not yet a final match - temp.push( ( matcherIn[ i ] = elem ) ); - } - } - postFinder( null, ( matcherOut = [] ), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) && - ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { - - seed[ temp ] = !( results[ temp ] = elem ); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - } ); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[ 0 ].type ], - implicitRelative = leadingRelative || Expr.relative[ " " ], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - ( checkContext = context ).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { - matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; - } else { - matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[ j ].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens - .slice( 0, i - 1 ) - .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), - - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), - len = elems.length; - - if ( outermost ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - outermostContext = context == document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( !context && elem.ownerDocument != document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( ( matcher = elementMatchers[ j++ ] ) ) { - if ( matcher( elem, context || document, xml ) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - - // They will have gone through all possible matchers - if ( ( elem = !matcher && elem ) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( ( matcher = setMatchers[ j++ ] ) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !( unmatched[ i ] || setMatched[ i ] ) ) { - setMatched[ i ] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[ i ] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( - selector, - matcherFromGroupMatchers( elementMatchers, setMatchers ) - ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( ( selector = compiled.selector || selector ) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[ 0 ] = match[ 0 ].slice( 0 ); - if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { - - context = ( Expr.find[ "ID" ]( token.matches[ 0 ] - .replace( runescape, funescape ), context ) || [] )[ 0 ]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[ i ]; - - // Abort if we hit a combinator - if ( Expr.relative[ ( type = token.type ) ] ) { - break; - } - if ( ( find = Expr.find[ type ] ) ) { - - // Search, expanding context for leading sibling combinators - if ( ( seed = find( - token.matches[ 0 ].replace( runescape, funescape ), - rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || - context - ) ) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert( function( el ) { - - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; -} ); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert( function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute( "href" ) === "#"; -} ) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - } ); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert( function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -} ) ) { - addHandle( "value", function( elem, _name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - } ); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert( function( el ) { - return el.getAttribute( "disabled" ) == null; -} ) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; - } - } ); -} - -return Sizzle; - -} )( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Filtered directly for both simple and complex selectors - return jQuery.filter( qualifier, elements, not ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, _i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, _i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, _i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( elem.contentDocument != null && - - // Support: IE 11+ - // elements with no `data` attribute has an object - // `contentDocument` with a `null` prototype. - getProto( elem.contentDocument ) ) { - - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && toType( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( _i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // rejected_handlers.disable - // fulfilled_handlers.disable - tuples[ 3 - i ][ 3 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock, - - // progress_handlers.lock - tuples[ 0 ][ 3 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( toType( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, _key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; - - -// Matches dashed string for camelizing -var rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g; - -// Used by camelCase as callback to replace() -function fcamelCase( _all, letter ) { - return letter.toUpperCase(); -} - -// Convert dashed to camelCase; used by the css and data modules -// Support: IE <=9 - 11, Edge 12 - 15 -// Microsoft forgot to hump their vendor prefix (#9572) -function camelCase( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); -} -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( camelCase ); - } else { - key = camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var documentElement = document.documentElement; - - - - var isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ); - }, - composed = { composed: true }; - - // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only - // Check attachment across shadow DOM boundaries when possible (gh-3504) - // Support: iOS 10.0-10.2 only - // Early iOS 10 versions support `attachShadow` but not `getRootNode`, - // leading to errors. We need to check for `getRootNode`. - if ( documentElement.getRootNode ) { - isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ) || - elem.getRootNode( composed ) === elem.ownerDocument; - }; - } -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - isAttached( elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, scale, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = elem.nodeType && - ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Support: Firefox <=54 - // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) - initial = initial / 2; - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - while ( maxIterations-- ) { - - // Evaluate and update our best guess (doubling guesses that zero out). - // Finish if the scale equals or crosses 1 (making the old*new product non-positive). - jQuery.style( elem, prop, initialInUnit + unit ); - if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { - maxIterations = 0; - } - initialInUnit = initialInUnit / scale; - - } - - initialInUnit = initialInUnit * 2; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); - -var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); - - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; - - // Support: IE <=9 only - // IE <=9 replaces "; - support.option = !!div.lastChild; -} )(); - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "", "
    " ], - col: [ 2, "", "
    " ], - tr: [ 2, "", "
    " ], - td: [ 3, "", "
    " ], - - _default: [ 0, "", "" ] -}; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - -// Support: IE <=9 only -if ( !support.option ) { - wrapMap.optgroup = wrapMap.option = [ 1, "" ]; -} - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, attached, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( toType( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - attached = isAttached( elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( attached ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 - 11+ -// focus() and blur() are asynchronous, except when they are no-op. -// So expect focus to be synchronous when the element is already active, -// and blur to be synchronous when the element is not already active. -// (focus and blur are always synchronous in other supported browsers, -// this just defines when we can count on it). -function expectSync( elem, type ) { - return ( elem === safeActiveElement() ) === ( type === "focus" ); -} - -// Support: IE <=9 only -// Accessing document.activeElement can throw unexpectedly -// https://bugs.jquery.com/ticket/13393 -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Only attach events to objects that accept data - if ( !acceptData( elem ) ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = Object.create( null ); - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( nativeEvent ), - - handlers = ( - dataPriv.get( this, "events" ) || Object.create( null ) - )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // If the event is namespaced, then each handler is only invoked if it is - // specially universal or its namespaces are a superset of the event's. - if ( !event.rnamespace || handleObj.namespace === false || - event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - - // Utilize native event to ensure correct state for checkable inputs - setup: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Claim the first handler - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - // dataPriv.set( el, "click", ... ) - leverageNative( el, "click", returnTrue ); - } - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Force setup before triggering a click - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - leverageNative( el, "click" ); - } - - // Return non-false to allow normal event-path propagation - return true; - }, - - // For cross-browser consistency, suppress native .click() on links - // Also prevent it if we're currently inside a leveraged native-event stack - _default: function( event ) { - var target = event.target; - return rcheckableType.test( target.type ) && - target.click && nodeName( target, "input" ) && - dataPriv.get( target, "click" ) || - nodeName( target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -// Ensure the presence of an event listener that handles manually-triggered -// synthetic events by interrupting progress until reinvoked in response to -// *native* events that it fires directly, ensuring that state changes have -// already occurred before other listeners are invoked. -function leverageNative( el, type, expectSync ) { - - // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add - if ( !expectSync ) { - if ( dataPriv.get( el, type ) === undefined ) { - jQuery.event.add( el, type, returnTrue ); - } - return; - } - - // Register the controller as a special universal handler for all event namespaces - dataPriv.set( el, type, false ); - jQuery.event.add( el, type, { - namespace: false, - handler: function( event ) { - var notAsync, result, - saved = dataPriv.get( this, type ); - - if ( ( event.isTrigger & 1 ) && this[ type ] ) { - - // Interrupt processing of the outer synthetic .trigger()ed event - // Saved data should be false in such cases, but might be a leftover capture object - // from an async native handler (gh-4350) - if ( !saved.length ) { - - // Store arguments for use when handling the inner native event - // There will always be at least one argument (an event object), so this array - // will not be confused with a leftover capture object. - saved = slice.call( arguments ); - dataPriv.set( this, type, saved ); - - // Trigger the native event and capture its result - // Support: IE <=9 - 11+ - // focus() and blur() are asynchronous - notAsync = expectSync( this, type ); - this[ type ](); - result = dataPriv.get( this, type ); - if ( saved !== result || notAsync ) { - dataPriv.set( this, type, false ); - } else { - result = {}; - } - if ( saved !== result ) { - - // Cancel the outer synthetic event - event.stopImmediatePropagation(); - event.preventDefault(); - return result.value; - } - - // If this is an inner synthetic event for an event with a bubbling surrogate - // (focus or blur), assume that the surrogate already propagated from triggering the - // native event and prevent that from happening again here. - // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the - // bubbling surrogate propagates *after* the non-bubbling base), but that seems - // less bad than duplication. - } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { - event.stopPropagation(); - } - - // If this is a native event triggered above, everything is now in order - // Fire an inner synthetic event with the original arguments - } else if ( saved.length ) { - - // ...and capture the result - dataPriv.set( this, type, { - value: jQuery.event.trigger( - - // Support: IE <=9 - 11+ - // Extend with the prototype to reset the above stopImmediatePropagation() - jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), - saved.slice( 1 ), - this - ) - } ); - - // Abort handling of the native event - event.stopImmediatePropagation(); - } - } - } ); -} - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || Date.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - code: true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { - jQuery.event.special[ type ] = { - - // Utilize native event if possible so blur/focus sequence is correct - setup: function() { - - // Claim the first handler - // dataPriv.set( this, "focus", ... ) - // dataPriv.set( this, "blur", ... ) - leverageNative( this, type, expectSync ); - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function() { - - // Force setup before trigger - leverageNative( this, type ); - - // Return non-false to allow normal event-path propagation - return true; - }, - - delegateType: delegateType - }; -} ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - // Support: IE <=10 - 11, Edge 12 - 13 only - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( elem ).children( "tbody" )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { - elem.type = elem.type.slice( 5 ); - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.get( src ); - events = pdataOld.events; - - if ( events ) { - dataPriv.remove( dest, "handle events" ); - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = flat( args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - valueIsFunction = isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( valueIsFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( valueIsFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl && !node.noModule ) { - jQuery._evalUrl( node.src, { - nonce: node.nonce || node.getAttribute( "nonce" ) - }, doc ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && isAttached( node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html; - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = isAttached( elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - -var swap = function( elem, options, callback ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.call( elem ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - -var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - container.style.cssText = "position:absolute;left:-11111px;width:60px;" + - "margin-top:1px;padding:0;border:0"; - div.style.cssText = - "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + - "margin:auto;border:1px;padding:1px;" + - "width:60%;top:1%"; - documentElement.appendChild( container ).appendChild( div ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; - - // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 - // Some styles come back with percentage values, even though they shouldn't - div.style.right = "60%"; - pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; - - // Support: IE 9 - 11 only - // Detect misreporting of content dimensions for box-sizing:border-box elements - boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; - - // Support: IE 9 only - // Detect overflow:scroll screwiness (gh-3699) - // Support: Chrome <=64 - // Don't get tricked when zoom affects offsetWidth (gh-4029) - div.style.position = "absolute"; - scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - function roundPixelMeasures( measure ) { - return Math.round( parseFloat( measure ) ); - } - - var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, - reliableTrDimensionsVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - jQuery.extend( support, { - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelBoxStyles: function() { - computeStyleTests(); - return pixelBoxStylesVal; - }, - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - }, - scrollboxSize: function() { - computeStyleTests(); - return scrollboxSizeVal; - }, - - // Support: IE 9 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Behavior in IE 9 is more subtle than in newer versions & it passes - // some versions of this test; make sure not to make it pass there! - reliableTrDimensions: function() { - var table, tr, trChild, trStyle; - if ( reliableTrDimensionsVal == null ) { - table = document.createElement( "table" ); - tr = document.createElement( "tr" ); - trChild = document.createElement( "div" ); - - table.style.cssText = "position:absolute;left:-11111px"; - tr.style.height = "1px"; - trChild.style.height = "9px"; - - documentElement - .appendChild( table ) - .appendChild( tr ) - .appendChild( trChild ); - - trStyle = window.getComputedStyle( tr ); - reliableTrDimensionsVal = parseInt( trStyle.height ) > 3; - - documentElement.removeChild( table ); - } - return reliableTrDimensionsVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !isAttached( elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style, - vendorProps = {}; - -// Return a vendor-prefixed property or undefined -function vendorPropName( name ) { - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a potentially-mapped jQuery.cssProps or vendor prefixed property -function finalPropName( name ) { - var final = jQuery.cssProps[ name ] || vendorProps[ name ]; - - if ( final ) { - return final; - } - if ( name in emptyStyle ) { - return name; - } - return vendorProps[ name ] = vendorPropName( name ) || name; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }; - -function setPositiveNumber( _elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { - var i = dimension === "width" ? 1 : 0, - extra = 0, - delta = 0; - - // Adjustment may not be necessary - if ( box === ( isBorderBox ? "border" : "content" ) ) { - return 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin - if ( box === "margin" ) { - delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); - } - - // If we get here with a content-box, we're seeking "padding" or "border" or "margin" - if ( !isBorderBox ) { - - // Add padding - delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // For "border" or "margin", add border - if ( box !== "padding" ) { - delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - - // But still keep track of it otherwise - } else { - extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - - // If we get here with a border-box (content + padding + border), we're seeking "content" or - // "padding" or "margin" - } else { - - // For "content", subtract padding - if ( box === "content" ) { - delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // For "content" or "padding", subtract border - if ( box !== "margin" ) { - delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - // Account for positive content-box scroll gutter when requested by providing computedVal - if ( !isBorderBox && computedVal >= 0 ) { - - // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border - // Assuming integer scroll gutter, subtract the rest and round down - delta += Math.max( 0, Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - computedVal - - delta - - extra - - 0.5 - - // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter - // Use an explicit zero to avoid NaN (gh-3964) - ) ) || 0; - } - - return delta; -} - -function getWidthOrHeight( elem, dimension, extra ) { - - // Start with computed style - var styles = getStyles( elem ), - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). - // Fake content-box until we know it's needed to know the true value. - boxSizingNeeded = !support.boxSizingReliable() || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - valueIsBorderBox = isBorderBox, - - val = curCSS( elem, dimension, styles ), - offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); - - // Support: Firefox <=54 - // Return a confounding non-pixel value or feign ignorance, as appropriate. - if ( rnumnonpx.test( val ) ) { - if ( !extra ) { - return val; - } - val = "auto"; - } - - - // Support: IE 9 - 11 only - // Use offsetWidth/offsetHeight for when box sizing is unreliable. - // In those cases, the computed value can be trusted to be border-box. - if ( ( !support.boxSizingReliable() && isBorderBox || - - // Support: IE 10 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Interestingly, in some cases IE 9 doesn't suffer from this issue. - !support.reliableTrDimensions() && nodeName( elem, "tr" ) || - - // Fall back to offsetWidth/offsetHeight when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - val === "auto" || - - // Support: Android <=4.1 - 4.3 only - // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) - !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && - - // Make sure the element is visible & connected - elem.getClientRects().length ) { - - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Where available, offsetWidth/offsetHeight approximate border box dimensions. - // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the - // retrieved value as a content box dimension. - valueIsBorderBox = offsetProp in elem; - if ( valueIsBorderBox ) { - val = elem[ offsetProp ]; - } - } - - // Normalize "" and auto - val = parseFloat( val ) || 0; - - // Adjust for the element's box model - return ( val + - boxModelAdjustment( - elem, - dimension, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles, - - // Provide the current computed size to request scroll gutter calculation (gh-3589) - val - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "gridArea": true, - "gridColumn": true, - "gridColumnEnd": true, - "gridColumnStart": true, - "gridRow": true, - "gridRowEnd": true, - "gridRowStart": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: {}, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append - // "px" to a few hardcoded values. - if ( type === "number" && !isCustomProp ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( _i, dimension ) { - jQuery.cssHooks[ dimension ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, dimension, extra ); - } ) : - getWidthOrHeight( elem, dimension, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = getStyles( elem ), - - // Only read styles.position if the test has a chance to fail - // to avoid forcing a reflow. - scrollboxSizeBuggy = !support.scrollboxSize() && - styles.position === "absolute", - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) - boxSizingNeeded = scrollboxSizeBuggy || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - subtract = extra ? - boxModelAdjustment( - elem, - dimension, - extra, - isBorderBox, - styles - ) : - 0; - - // Account for unreliable border-box dimensions by comparing offset* to computed and - // faking a content-box to get border and padding (gh-3699) - if ( isBorderBox && scrollboxSizeBuggy ) { - subtract -= Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - parseFloat( styles[ dimension ] ) - - boxModelAdjustment( elem, dimension, "border", false, styles ) - - 0.5 - ); - } - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ dimension ] = value; - value = jQuery.css( elem, dimension ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( prefix !== "margin" ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && ( - jQuery.cssHooks[ tween.prop ] || - tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = Date.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 15 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY and Edge just mirrors - // the overflowX value there. - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - result.stop.bind( result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = Date.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -function classesToArray( value ) { - if ( Array.isArray( value ) ) { - return value; - } - if ( typeof value === "string" ) { - return value.match( rnothtmlwhite ) || []; - } - return []; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isValidValue = type === "string" || Array.isArray( value ); - - if ( typeof stateVal === "boolean" && isValidValue ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( isValidValue ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = classesToArray( value ); - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, valueIsFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - valueIsFunction = isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( valueIsFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -support.focusin = "onfocusin" in window; - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - stopPropagationCallback = function( e ) { - e.stopPropagation(); - }; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = lastElement = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - lastElement = cur; - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( - dataPriv.get( cur, "events" ) || Object.create( null ) - )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - - if ( event.isPropagationStopped() ) { - lastElement.addEventListener( type, stopPropagationCallback ); - } - - elem[ type ](); - - if ( event.isPropagationStopped() ) { - lastElement.removeEventListener( type, stopPropagationCallback ); - } - - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - - // Handle: regular nodes (via `this.ownerDocument`), window - // (via `this.document`) & document (via `this`). - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = { guid: Date.now() }; - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && toType( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - if ( a == null ) { - return ""; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( _i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() + " " ] = - ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) - .concat( match[ 2 ] ); - } - } - match = responseHeaders[ key.toLowerCase() + " " ]; - } - return match == null ? null : match.join( ", " ); - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 15 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available and should be processed, append data to url - if ( s.data && ( s.processData || typeof s.data === "string" ) ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + - uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Use a noop converter for missing script - if ( !isSuccess && jQuery.inArray( "script", s.dataTypes ) > -1 ) { - s.converters[ "text script" ] = function() {}; - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( _i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - -jQuery.ajaxPrefilter( function( s ) { - var i; - for ( i in s.headers ) { - if ( i.toLowerCase() === "content-type" ) { - s.contentType = s.headers[ i ] || ""; - } - } -} ); - - -jQuery._evalUrl = function( url, options, doc ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - - // Only evaluate the response if it is successful (gh-4126) - // dataFilter is not invoked for failure responses, so using it instead - // of the default converter is kludgy but it works. - converters: { - "text script": function() {} - }, - dataFilter: function( response ) { - jQuery.globalEval( response, options, doc ); - } - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var htmlIsFunction = isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.ontimeout = - xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain or forced-by-attrs requests - if ( s.crossDomain || s.scriptAttrs ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " - [[92mo[0m] Spoiler generations have correct format. Found 800 -''')) - - print(beautify_ansi_text('''[0;32m/opt/conda/lib/python3.7/site-packages/requests/adapters.py in [0;36msend[0;34m(self, request, stream, timeout, verify, cert, proxies) -[1;32m 517 [0;32mraise SSLError[0;34m(e[0;34m, request[0;34m=request[0;34m)[0;34m[0;34m -[1;32m 518 [0;34m -[0;32m--> 519[0;31m [0;32mraise ConnectionError[0;34m(e[0;34m, request[0;34m=request[0;34m)[0;34m[0;34m -[1;32m 520 [0;34m -[1;32m 521 [0;32mexcept ClosedPoolError [0;32mas e[0;34m:[0;34m[0;34m''')) diff --git a/application/src/tira/endpoints/vm_api.py b/application/src/tira/endpoints/vm_api.py deleted file mode 100644 index 1897042f9..000000000 --- a/application/src/tira/endpoints/vm_api.py +++ /dev/null @@ -1,976 +0,0 @@ -from django.template.loader import render_to_string -from django.db.utils import IntegrityError -from django.core.cache import cache -from django.views.decorators.csrf import csrf_exempt -import logging - -from grpc import RpcError, StatusCode -from tira.authentication import auth -from tira.checks import check_permissions, check_resources_exist, check_conditional_permissions -from tira.forms import * -from django.http import JsonResponse, HttpResponseNotAllowed -from django.conf import settings -from http import HTTPStatus - -from tira.model import TransitionLog, EvaluationLog, TransactionLog -from tira.grpc_client import GrpcClient -import tira.tira_model as model -from tira.util import get_tira_id, reroute_host, link_to_discourse_team -from tira.views import add_context -from functools import wraps -import json -from markdown import markdown - -include_navigation = True if settings.DEPLOYMENT == "legacy" else False -from discourse_client_in_disraptor.discourse_api_client import get_disraptor_user - -logger = logging.getLogger("tira") -logger.info("ajax_routes: Logger active") - - -def host_call(func): - """ This is a decorator for methods that connect to a host. It handles all exceptions that can occur - in the grpc communication. It also adds a reply consistent with the return status of the grpc call. """ - - @wraps(func) - def func_wrapper(request, *args, **kwargs): - try: - response = func(request, *args, **kwargs) - except RpcError as e: - ex_message = "FAILED" - try: - logger.exception(f"{request.get_full_path()}: connection failed with {e}") - if e.code() == StatusCode.UNAVAILABLE: # .code() is implemented by the _channel._InteractiveRpcError - logger.exception(f"Connection Unavailable: {e.debug_error_string()}") - ex_message = f"The requested host is unavailable. If you think this is a mistake, please contact " \ - "your task organizer." # This happens if the GRPC Server is not running - if e.code() == StatusCode.INVALID_ARGUMENT: - logger.exception(f"Invalid Argument: {e.debug_error_string()}") - ex_message = f"Response returned with an invalid argument: {e.debug_error_string()}" # - except Exception as e2: # There is a RpcError but not an Interactive one. This should not happen - logger.exception(f"{request.get_full_path()}: Unexpected Exception occurred: {e2}") - ex_message = f"An unexpected exception occurred: {e2}" - return JsonResponse({'status': "2", 'message': ex_message}, status=HTTPStatus.INTERNAL_SERVER_ERROR) - - except Exception as e: - logger.exception(f"{request.get_full_path()}: Server Error: {e}") - return JsonResponse({'status': "1", 'message': f"An unexpected exception occurred: {e}"}, - status=HTTPStatus.INTERNAL_SERVER_ERROR) - - if response.status == 0: - return JsonResponse({'status': 0, - 'message': response.transactionId}, - status=HTTPStatus.ACCEPTED) - if response.status == 2: - return JsonResponse({'status': 2, - 'message': f"Virtual machine not found on host: {response.message}"}, - status=HTTPStatus.NOT_FOUND) - if response.status == 3: - return JsonResponse({'status': 1, - 'message': f"Virtual machine is in the wrong state for your request: {response.message}"}, - status=HTTPStatus.BAD_REQUEST) - if response.status == 4: - return JsonResponse({'status': 1, - 'message': f"VM is archived: {response.message}"}, - status=HTTPStatus.NOT_FOUND) - if response.status == 5: - return JsonResponse({'status': 2, - 'message': f"VM is not accessible: {response.message}"}, - status=HTTPStatus.NOT_FOUND) - if response.status == 6: - return JsonResponse({'status': 1, - 'message': f"Requested input run was not found: {response.message}"}, - status=HTTPStatus.NOT_FOUND) - if response.status == 7: - return JsonResponse({'status': 1, - 'message': f"Evaluation failed due to malformed run output: {response.message}"}, - status=HTTPStatus.BAD_REQUEST) - if response.status == 8: - return JsonResponse({'status': 1, - 'message': f"Input malformed: {response.message}"}, - status=HTTPStatus.BAD_REQUEST) - if response.status == 9: - return JsonResponse({'status': 2, - 'message': f"Host ist busy: {response.message}"}, - status=HTTPStatus.SERVICE_UNAVAILABLE) - - return JsonResponse( - {'status': 2, - 'message': f"{response.transactionId} was rejected by the host: {response.message}"}, - status=HTTPStatus.INTERNAL_SERVER_ERROR) - - return func_wrapper - - -# --------------------------------------------------------------------- -# VM actions -# --------------------------------------------------------------------- - -@check_permissions -@check_resources_exist('json') -def vm_state(request, vm_id): - try: - state = TransitionLog.objects.get_or_create(vm_id=vm_id, defaults={'vm_state': 0})[0].vm_state - except IntegrityError as e: - logger.warning(f"failed to read state for vm {vm_id} with {e}") - state = 0 - return JsonResponse({'status': 0, 'state': state}) - - -@check_permissions -@check_resources_exist('json') -def vm_running_evaluations(request, vm_id): - results = EvaluationLog.objects.filter(vm_id=vm_id) - return JsonResponse({'status': 0, 'running_evaluations': True if results else False}) - - -@check_permissions -@check_resources_exist('json') -def get_running_evaluations(request, vm_id): - results = EvaluationLog.objects.filter(vm_id=vm_id) - return JsonResponse({'status': 0, 'running_evaluations': [{"vm_id": r.vm_id, "run_id": r.run_id, - "running_on": r.running_on, "last_update": r.last_update} - for r in results]}) - - -@add_context -@check_permissions -def docker_software_details(request, context, vm_id, docker_software_id): - context['docker_software_details'] = model.get_docker_software(int(docker_software_id)) - - if 'mount_hf_model' in context['docker_software_details'] and context['docker_software_details']['mount_hf_model']: - mount_hf_model = [] - for i in context['docker_software_details']['mount_hf_model'].split(): - mount_hf_model += [{'href': f'https://huggingface.co/{i}', 'display_name': i}] - - context['docker_software_details']['mount_hf_model_display'] = mount_hf_model - - return JsonResponse({'status': 0, "context": context}) - -@check_permissions -def huggingface_model_mounts(request, vm_id, hf_model): - from tira.huggingface_hub_integration import huggingface_model_mounts, snapshot_download_hf_model - context = {'hf_model_available': False, 'hf_model_for_vm': vm_id} - - try: - context['hf_model_available'] = huggingface_model_mounts([hf_model.replace('--', '/')]) is not None - except: - pass - - if not context['hf_model_available']: - try: - snapshot_download_hf_model(hf_model) - context['hf_model_available'] = True - except Exception as e: - logger.warning(e) - return JsonResponse({'status': '1', 'message': str(e)}) - - return JsonResponse({'status': 0, "context": context}) - - -@add_context -@check_permissions -def upload_group_details(request, context, task_id, vm_id, upload_id): - try: - context['upload_group_details'] = model.get_upload(task_id, vm_id, upload_id) - except Exception as e: - return JsonResponse({'status': "1", 'message': f"An unexpected exception occurred: {e}"}) - - return JsonResponse({'status': 0, "context": context}) - - -@check_conditional_permissions(restricted=True) -@host_call -def vm_create(request, hostname, vm_id, ova_file): - uid = auth.get_user_id(request) - host = reroute_host(hostname) - return GrpcClient(host).vm_create(vm_id=vm_id, ova_file=ova_file, user_id=uid, hostname=host) - - -@check_permissions -@check_resources_exist('json') -@host_call -def vm_start(request, vm_id): - vm = model.get_vm(vm_id) - # NOTE vm_id is different from vm.vmName (latter one includes the 01-tira-ubuntu-... - return GrpcClient(reroute_host(vm['host'])).vm_start(vm_id=vm_id) - - -@check_permissions -@check_resources_exist('json') -@host_call -def vm_shutdown(request, vm_id): - vm = model.get_vm(vm_id) - return GrpcClient(reroute_host(vm['host'])).vm_shutdown(vm_id=vm_id) - - -@check_permissions -@check_resources_exist('json') -@host_call -def vm_stop(request, vm_id): - vm = model.get_vm(vm_id) - return GrpcClient(reroute_host(vm['host'])).vm_stop(vm_id=vm_id) - - -@check_permissions -@check_resources_exist('json') -def vm_info(request, vm_id): - vm = model.get_vm(vm_id) - host = reroute_host(vm['host']) - if not host: - logger.exception(f"/grpc/{vm_id}/vm-info: connection to {host} failed, because host is empty") - return JsonResponse({'status': 'Rejected', 'message': "SERVER_ERROR"}, status=HTTPStatus.INTERNAL_SERVER_ERROR) - try: - grpc_client = GrpcClient(host) - response_vm_info = grpc_client.vm_info(vm_id=vm_id) - _ = TransitionLog.objects.update_or_create(vm_id=vm_id, defaults={'vm_state': response_vm_info.state}) - del grpc_client - except RpcError as e: - ex_message = "FAILED" - try: - if e.code() == StatusCode.UNAVAILABLE: # .code() is implemented by the _channel._InteractiveRpcError - logger.exception(f"/grpc/{vm_id}/vm-info: connection to {host} failed with {e}") - ex_message = "Host Unavailable" # This happens if the GRPC Server is not running - if e.code() == StatusCode.INVALID_ARGUMENT: # .code() is implemented by the _channel._InteractiveRpcError - ex_message = "VM is archived" # If there is no VM with the requested name on the host. - _ = TransitionLog.objects.update_or_create(vm_id=vm_id, defaults={'vm_state': 8}) - except Exception as e2: # There is a RpcError but not an Interactive one. This should not happen - logger.exception(f"/grpc/{vm_id}/vm-info: Unexpected Execption occured: {e2}") - return JsonResponse({'status': 1, 'message': ex_message}, status=HTTPStatus.INTERNAL_SERVER_ERROR) - except Exception as e: - logger.exception(f"/grpc/{vm_id}/vm-info: connection to {host} failed with {e}") - return JsonResponse({'status': 1, 'message': "SERVER_ERROR"}, status=HTTPStatus.INTERNAL_SERVER_ERROR) - - return JsonResponse({'status': 0, 'context': { - "guestOs": response_vm_info.guestOs, - "memorySize": response_vm_info.memorySize, - "numberOfCpus": response_vm_info.numberOfCpus, - "sshPort": response_vm_info.sshPort, - "rdpPort": response_vm_info.rdpPort, - "host": response_vm_info.host, - "sshPortStatus": response_vm_info.sshPortStatus, - "rdpPortStatus": response_vm_info.rdpPortStatus, - "state": response_vm_info.state, - }}) - - -# --------------------------------------------------------------------- -# Software actions -# --------------------------------------------------------------------- -@check_permissions -@check_resources_exist("json") -def software_add(request, task_id, vm_id): - if request.method == "GET": - if not task_id or task_id is None or task_id == 'None': - return JsonResponse({"status": 1, "message": "Please specify the associated task_id."}) - - software = model.add_software(task_id, vm_id) - if not software: - return JsonResponse({'status': 1, 'message': 'Failed to create a new Software.'}, - status=HTTPStatus.INTERNAL_SERVER_ERROR) - - context = { - "task": task_id, - "vm_id": vm_id, - "software": { - "id": software['id'], - "command": software['command'], - "working_dir": software['working_directory'], - "dataset": software['dataset'], - "creation_date": software['creation_date'], - "last_edit": software['last_edit'] - } - } - return JsonResponse({"status": 0, "message": "ok", "context": context}) - else: - return JsonResponse({"status": 1, "message": "POST is not allowed here."}) - -@check_permissions -@check_resources_exist('json') -def software_save(request, task_id, vm_id, software_id): - if request.method == "POST": - data = json.loads(request.body) - new_dataset = data.get("input_dataset") - if not model.dataset_exists(new_dataset): - return JsonResponse({'status': 1, 'message': f"Cannot save, the dataset {new_dataset} does not exist."}) - - software = model.update_software(task_id, vm_id, software_id, - data.get("command"), - data.get("working_dir"), - data.get("input_dataset"), - data.get("input_run")) - - message = "failed to save software for an unknown reasons" - try: - if software: - return JsonResponse({'status': 0, "message": f"Saved {software_id}", 'last_edit': software.lastEditDate}, - status=HTTPStatus.ACCEPTED) - except Exception as e: - message = str(e) - - return JsonResponse({'status': 1, "message": message}, status=HTTPStatus.BAD_REQUEST) - return JsonResponse({'status': 1, 'message': f"GET is not implemented for add dataset"}) - - -@check_permissions -@check_resources_exist('json') -def software_delete(request, task_id, vm_id, software_id): - delete_ok = model.delete_software(task_id, vm_id, software_id) - - if delete_ok: - return JsonResponse({'status': 0}, status=HTTPStatus.ACCEPTED) - else: - return JsonResponse({'status': 1, 'message': 'Cannot delete software, because it has a valid ' - 'evaluation assigned (or it does not exist.)'}, - status=HTTPStatus.INTERNAL_SERVER_ERROR) - - -@check_permissions -@check_resources_exist('json') -@host_call -def run_execute(request, task_id, vm_id, software_id): - vm = model.get_vm(vm_id) - software = model.get_software(task_id, vm_id, software_id=software_id) - if not model.dataset_exists(software["dataset"]): - return JsonResponse({'status': 1, 'message': f'The dataset {software["dataset"]} does not exist'}) - host = reroute_host(vm['host']) - future_run_id = get_tira_id() - grpc_client = GrpcClient(host) - response = grpc_client.run_execute(vm_id=vm_id, - dataset_id=software["dataset"], - run_id=future_run_id, - input_run_vm_id="", - input_run_dataset_id="", - input_run_run_id=software["run"], - optional_parameters="", - task_id=task_id, - software_id=software_id) - del grpc_client - return response - - -@host_call -def _master_vm_eval_call(vm_id, dataset_id, run_id, evaluator): - """ Called when the evaluation is done via master vm. - This method calls the grpc client """ - host = reroute_host(evaluator["host"]) - grpc_client = GrpcClient(host) - response = grpc_client.run_eval(vm_id=evaluator["vm_id"], - dataset_id=dataset_id, - run_id=get_tira_id(), - input_run_vm_id=vm_id, - input_run_dataset_id=dataset_id, - input_run_run_id=run_id, - optional_parameters="") - del grpc_client - return response - - -def _git_runner_vm_eval_call(vm_id, dataset_id, run_id, evaluator): - """ called when the evaluation is done via git runner. - This method calls the git utilities in git_runner.py to start the git CI - """ - try: - transaction_id = model.get_git_integration(dataset_id=dataset_id)\ - .run_evaluate_with_git_workflow(evaluator['task_id'], dataset_id, vm_id, run_id, - evaluator['git_runner_image'], evaluator['git_runner_command'], - evaluator['git_repository_id'], evaluator['evaluator_id']) - except Exception as e: - return JsonResponse({'status': 1, 'message': str(e)}, status=HTTPStatus.INTERNAL_SERVER_ERROR) - - return JsonResponse({'status': 0, 'message': transaction_id}, status=HTTPStatus.ACCEPTED) - - -@check_conditional_permissions(private_run_ok=True) -@check_resources_exist('json') -def run_eval(request, vm_id, dataset_id, run_id): - """ Get the evaluator for dataset_id from the model. - Then, send a GRPC-call to the host running the evaluator with the run data. - Then, log vm_id and run_id to the evaluation log as ongoing. - """ - # check if evaluation already exists - existing_evaluations = EvaluationLog.objects.filter(run_id=run_id) - if existing_evaluations and len(existing_evaluations) > 5: - return JsonResponse({'status': '1', 'message': "An evaluation is already in progress."}, - status=HTTPStatus.PRECONDITION_FAILED) - - evaluator = model.get_evaluator(dataset_id) - if 'is_git_runner' in evaluator and evaluator['is_git_runner']: - ret = _git_runner_vm_eval_call(vm_id, dataset_id, run_id, evaluator) - git_runner = model.get_git_integration(dataset_id=dataset_id) - running_pipelines = git_runner.all_running_pipelines_for_repository( - evaluator['git_repository_id'], - cache, - force_cache_refresh=True - ) - - return ret - - return _master_vm_eval_call(vm_id, dataset_id, run_id, evaluator) - - -@check_conditional_permissions(private_run_ok=True) -def run_delete(request, dataset_id, vm_id, run_id): - delete_ok = model.delete_run(dataset_id, vm_id, run_id) - if delete_ok: - return JsonResponse({'status': 0}, status=HTTPStatus.ACCEPTED) - return JsonResponse({'status': 1, 'message': f"Can not delete run {run_id} since it is used as an input run."}, status=HTTPStatus.ACCEPTED) - - -@check_permissions -@check_resources_exist('json') -@host_call -def run_abort(request, vm_id): - """ """ - vm = model.get_vm(vm_id) - host = reroute_host(vm['host']) - - grpc_client = GrpcClient(host) - response = grpc_client.run_abort(vm_id=vm_id) - del grpc_client - return response - -@csrf_exempt -@check_permissions -@check_resources_exist("json") -def upload(request, task_id, vm_id, dataset_id, upload_id): - if request.method == 'POST': - if not dataset_id or dataset_id is None or dataset_id == 'None': - return JsonResponse({"status": 1, "message": "Please specify the associated dataset."}) - - uploaded_file = request.FILES['file'] - new_run = model.add_uploaded_run(task_id, vm_id, dataset_id, upload_id, uploaded_file) - if model.git_pipeline_is_enabled_for_task(task_id, cache): - run_eval(request=request, vm_id=vm_id, dataset_id=dataset_id, run_id=new_run["run"]["run_id"]) - - return JsonResponse({"status": 0, "message": "ok", "new_run": new_run, "started_evaluation": True}) - return JsonResponse({"status": 0, "message": "ok", "new_run": new_run, "started_evaluation": False}) - else: - return JsonResponse({"status": 1, "message": "GET is not allowed here."}) - - -@check_permissions -@check_resources_exist("json") -def delete_upload(request, task_id, vm_id, upload_id): - try: - model.delete_upload(task_id, vm_id, upload_id) - return JsonResponse({"status": 0, "message": "ok"}) - except Exception as e: - logger.warning('Failed to delete upload: ' + str(e)) - logger.exception(e) - return JsonResponse({"status": 0, "message": "Failed" + str(e)}) - - -@check_permissions -@check_resources_exist("json") -def add_upload(request, task_id, vm_id): - if request.method == "GET": - if not task_id or task_id is None or task_id == 'None': - return JsonResponse({"status": 1, "message": "Please specify the associated task_id."}) - rename_to = request.GET.get('rename_to', None) - rename_to = None if not rename_to or not rename_to.strip() else rename_to - - upload = model.add_upload(task_id, vm_id, rename_to) - if not upload: - return JsonResponse({'status': 1, 'message': 'Failed to create a new Upload.'}, - status=HTTPStatus.INTERNAL_SERVER_ERROR) - - context = { - "task": task_id, - "vm_id": vm_id, - "upload": upload - } - return JsonResponse({"status": 0, "message": "ok", "context": context}) - else: - return JsonResponse({"status": 1, "message": "POST is not allowed here."}) - - -@csrf_exempt -@check_permissions -@check_resources_exist("json") -def docker_software_add(request, task_id, vm_id): - if request.method == 'POST': - if not task_id or task_id is None or task_id == 'None': - return JsonResponse({"status": 1, "message": "Please specify the associated task_id."}) - - data = json.loads(request.body) - if not data.get('image'): - return JsonResponse({"status": 1, "message": "Please specify the associated docker image."}) - - if not data.get('command'): - return JsonResponse({"status": 1, "message": "Please specify the associated docker command."}) - - submission_git_repo = None - build_environment = None - if data.get('code_repository_id'): - submission_git_repo = model.model.get_submission_git_repo_or_none(data.get('code_repository_id'), vm_id, True) - - if not submission_git_repo: - return JsonResponse({"status": 1, "message": f"The code repository '{data.get('code_repository_id'):}' does not exist."}) - - if not data.get('build_environment'): - return JsonResponse({"status": 1, "message": f"Please specify the build_environment for linking the code."}) - - build_environment = json.dumps(data.get('build_environment')) - - new_docker_software = model.add_docker_software(task_id, vm_id, - data.get('image'), data.get('command'), - data.get('inputJob', None), - submission_git_repo, build_environment - ) - - if data.get('mount_hf_model'): - try: - from tira.huggingface_hub_integration import huggingface_model_mounts - mounts = huggingface_model_mounts(data.get('mount_hf_model')) - model.add_docker_software_mounts(new_docker_software, mounts) - - except Exception as e: - return JsonResponse({"status": 1, "message": str(e)}) - - return JsonResponse({"status": 0, "message": "ok", "context": new_docker_software}) - else: - return JsonResponse({"status": 1, "message": "GET is not allowed here."}) - -@check_permissions -@check_resources_exist('json') -def docker_software_save(request, task_id, vm_id, docker_software_id): - if request.method == "POST": - try: - data = json.loads(request.body) - model.update_docker_software_metadata(docker_software_id, - data.get("display_name"), - data.get("description"), - data.get("paper_link"), - data.get("ir_re_ranker", False), - data.get("ir_re_ranking_input", False)) - return JsonResponse({'status': 0, "message": "Software edited successfully"}) - except Exception as e: - return JsonResponse({'status': 1, 'message': f"Error while editing software: " + str(e)}) - return JsonResponse({'status': 1, 'message': f"GET is not implemented for edit software"}) - - -@check_permissions -def add_software_submission_git_repository(request, task_id, vm_id): - if request.method != "POST": - return JsonResponse({'status': 1, 'message': f"GET is not implemented for edit upload"}) - - try: - data = json.loads(request.body) - external_owner = data['external_owner'] - private = not data.get('allow_public_repo', False) - disraptor_user = get_disraptor_user(request, allow_unauthenticated_user=False) - - if not disraptor_user or not type(disraptor_user) == str: - return JsonResponse({'status': 1, 'message': f"Please authenticate."}) - - if not model.github_user_exists(external_owner): - return JsonResponse({'status': 1, 'message': f"The user '{external_owner}' does not exist on Github, maybe a typo?"}) - - software_submission_git_repo = model.get_submission_git_repo(vm_id, task_id, disraptor_user, external_owner, - private) - - return JsonResponse({'status': 0, "context": software_submission_git_repo}) - except Exception as e: - logger.exception(e) - logger.warning('Error while adding your git repository: ' + str(e)) - return JsonResponse({'status': 1, 'message': f"Error while adding your git repository: " + str(e)}) - -@check_permissions -def get_token(request, vm_id): - disraptor_user = get_disraptor_user(request, allow_unauthenticated_user=False) - - if not disraptor_user or not type(disraptor_user) == str: - return JsonResponse({'status': 1, 'message': f"Please authenticate."}) - - try: - return JsonResponse({'status': 0, "context": {'token': model.get_discourse_token_for_user(vm_id, disraptor_user)}}) - except: - return JsonResponse({'status': 1, 'message': f"Could not extract the discourse/disraptor user, please authenticate."}) - -@check_permissions -def get_software_submission_git_repository(request, task_id, vm_id): - try: - if task_id not in settings.CODE_SUBMISSION_REFERENCE_REPOSITORIES or not model.load_docker_data(task_id, vm_id, cache, force_cache_refresh=False): - return JsonResponse({'status': 0, "context": {'disabled': True}}) - - return JsonResponse({'status': 0, "context": model.get_submission_git_repo(vm_id, task_id)}) - except Exception as e: - logger.exception(e) - logger.warning('Error while getting your git repository: ' + str(e)) - return JsonResponse({'status': 1, 'message': f"Error while getting your git repository: " + str(e)}) - - -@check_permissions -@check_resources_exist('json') -def upload_save(request, task_id, vm_id, upload_id): - if request.method == "POST": - try: - data = json.loads(request.body) - model.update_upload_metadata(task_id, vm_id, upload_id, - data.get("display_name"), - data.get("description"), - data.get("paper_link")) - return JsonResponse({'status': 0, "message": "Software edited successfully"}) - except Exception as e: - logger.exception(e) - logger.warning('Error while editing upload: ' + str(e)) - return JsonResponse({'status': 1, 'message': f"Error while editing upload: " + str(e)}) - return JsonResponse({'status': 1, 'message': f"GET is not implemented for edit upload"}) - - -@check_permissions -@check_resources_exist('json') -def docker_software_delete(request, task_id, vm_id, docker_software_id): - delete_ok = model.delete_docker_software(task_id, vm_id, docker_software_id) - - if delete_ok: - return JsonResponse({'status': 0}, status=HTTPStatus.ACCEPTED) - else: - return JsonResponse({'status': 1, 'message': 'Cannot delete docker software, because it has a valid ' - 'evaluation assigned (or it does not exist.)'}, - status=HTTPStatus.INTERNAL_SERVER_ERROR) - - -def __normalize_command(cmd, evaluator): - to_normalize = {'inputRun': '/tira-data/input-run', - 'outputDir': '/tira-data/output', - 'inputDataset': '/tira-data/input' - } - - if 'inputRun' in cmd and evaluator: - to_normalize['outputDir'] = '/tira-data/eval_output' - to_normalize['inputDataset'] = '/tira-data/input_truth' - - for k, v in to_normalize.items(): - cmd = cmd.replace('$' + k, v).replace('${' + k + '}', v) - - return cmd - - -def construct_verbosity_output(image, command, approach, task, dataset): - command = __normalize_command(command, '') - return { - 'tira_run_export': f'tira-run --export-dataset {task}/{dataset} --output-directory tira-dataset', - 'cli_command': 'tira-run \\\n --input-directory tira-dataset \\\n --output-directory tira-output \\\n ' + - '--approach \'' + approach + '\'', - 'python_command': f'tira.run("{approach}", "tira-dataset")', - 'docker_command': 'docker run --rm -ti \\\n -v ${PWD}/tira-dataset:/tira-data/input:ro \\\n -v ' - '${PWD}/tira-output:/tira-data/output:rw -\\\n -entrypoint sh ' + f'\\\n ' - f't{image} \\\n -c \'{command}\'', - 'image': image, 'command': command - } - - -def __rendered_references(task_id, vm_id, run): - task = model.get_task(task_id) - bib_references = { - 'run': '@Comment {No bib entry specified for the run, please contact the team/organizers for clarification.}', - 'task': '@Comment {No bib entry specified for the task, please contact the organizers for clarification.}', - 'dataset': '@Comment {No bib entry specified for the dataset, please contact the organizers for clarification.}', - } - markdown_references = {'run': None, 'task': None, 'dataset': None} - - if run['dataset'] == 'antique-test-20230107-training': - markdown_references['dataset'] = '[ANTIQUE](https://ir.webis.de/anthology/2020.ecir_conference-20202.21/) ' + \ - 'is a non-factoid quesiton answering dataset based on the questions and ' + \ - 'answers of Yahoo! Webscope L6.' - bib_references['dataset'] = '''@inproceedings{Hashemi2020Antique, - title = {ANTIQUE: A Non-Factoid Question Answering Benchmark}, - author = {Helia Hashemi and Mohammad Aliannejadi and Hamed Zamani and Bruce Croft}, - booktitle = {ECIR}, - year = {2020} -}''' - - if task_id == 'ir-benchmarks': - markdown_references['task'] = '[TIRA](https://webis.de/publications?q=TIRA#froebe_2023b) ' + \ - 'respectively [TIREx](https://webis.de/publications#froebe_2023e) ' + \ - 'is used to enable reprodicible and blinded experiments.' - bib_references['task'] = '''@InProceedings{froebe:2023b, - address = {Berlin Heidelberg New York}, - author = {Maik Fr{\"o}be and Matti Wiegmann and Nikolay Kolyada and Bastian Grahm and Theresa Elstner and Frank Loebe and Matthias Hagen and Benno Stein and Martin Potthast}, - booktitle = {Advances in Information Retrieval. 45th European Conference on {IR} Research ({ECIR} 2023)}, - doi = {10.1007/978-3-031-28241-6_20}, - editor = {Jaap Kamps and Lorraine Goeuriot and Fabio Crestani and Maria Maistro and Hideo Joho and Brian Davis and Cathal Gurrin and Udo Kruschwitz and Annalina Caputo}, - month = apr, - pages = {236--241}, - publisher = {Springer}, - series = {Lecture Notes in Computer Science}, - site = {Dublin, Irland}, - title = {{Continuous Integration for Reproducible Shared Tasks with TIRA.io}}, - todo = {pages, code}, - url = {https://doi.org/10.1007/978-3-031-28241-6_20}, - year = 2023 -} - -@InProceedings{froebe:2023e, - author = {Maik Fr{\"o}be and {Jan Heinrich} Reimer and Sean MacAvaney and Niklas Deckers and Simon Reich and Janek Bevendorff and Benno Stein and Matthias Hagen and Martin Potthast}, - booktitle = {46th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2023)}, - month = jul, - publisher = {ACM}, - site = {Taipei, Taiwan}, - title = {{The Information Retrieval Experiment Platform}}, - todo = {annote, doi, editor, pages, url, videourl}, - year = 2023 -}''' - - if run['software'] == 'MonoT5 3b (tira-ir-starter-gygaggle)': - markdown_references['run'] = 'The implementation of [MonoT5](https://arxiv.org/abs/2101.05667) in [PyGaggle](https://ir.webis.de/anthology/2021.sigirconf_conference-2021.304/).' - bib_references['run'] = '''@article{DBLP:journals/corr/abs-2101-05667, - author = {Ronak Pradeep and Rodrigo Frassetto Nogueira and Jimmy Lin}, - title = {The Expando-Mono-Duo Design Pattern for Text Ranking with Pretrained Sequence-to-Sequence Models}, - journal = {CoRR}, - volume = {abs/2101.05667}, - year = {2021}, - url = {https://arxiv.org/abs/2101.05667}, - eprinttype = {arXiv}, - eprint = {2101.05667}, - timestamp = {Mon, 20 Mar 2023 15:35:34 +0100}, - biburl = {https://dblp.org/rec/journals/corr/abs-2101-05667.bib}, - bibsource = {dblp computer science bibliography, https://dblp.org} -} - -@inproceedings{lin-2021-pyserini, - author = {Jimmy Lin and Xueguang Ma and Sheng{-}Chieh Lin and Jheng{-}Hong Yang and Ronak Pradeep and Rodrigo Frassetto Nogueira}, - editor = {Fernando Diaz and Chirag Shah and Torsten Suel and Pablo Castells and Rosie Jones and Tetsuya Sakai}, - title = {Pyserini: {A} Python Toolkit for Reproducible Information Retrieval Research with Sparse and Dense Representations}, - booktitle = {{SIGIR} '21: The 44th International {ACM} {SIGIR} Conference on Research and Development in Information Retrieval, Virtual Event, Canada, July 11-15, 2021}, - pages = {2356--2362}, - publisher = {{ACM}}, - year = {2021}, - url = {https://doi.org/10.1145/3404835.3463238}, - doi = {10.1145/3404835.3463238}, - timestamp = {Mon, 20 Mar 2023 15:35:34 +0100}, - biburl = {https://dblp.org/rec/conf/sigir/LinMLYPN21.bib}, - bibsource = {dblp computer science bibliography, https://dblp.org} -}''' - - if run['software'] == 'DLH (tira-ir-starter-pyterrier)': - markdown_references['run'] = 'The implementation of [DLH](https://ir.webis.de/anthology/2006.ecir_conference-2006.3/) in [PyTerrier](https://ir.webis.de/anthology/2021.cikm_conference-2021.533/).' - bib_references['run'] = '''@inproceedings{amati-2006-frequentist, - author = {Giambattista Amati}, - editor = {Mounia Lalmas and Andy MacFarlane and Stefan M. R{\"{u}}ger and Anastasios Tombros and Theodora Tsikrika and Alexei Yavlinsky}, - title = {Frequentist and Bayesian Approach to Information Retrieval}, - booktitle = {Advances in Information Retrieval, 28th European Conference on {IR} Research, {ECIR} 2006, London, UK, April 10-12, 2006, Proceedings}, - series = {Lecture Notes in Computer Science}, - volume = {3936}, - pages = {13--24}, - publisher = {Springer}, - year = {2006}, - url = {https://doi.org/10.1007/11735106\_3}, - doi = {10.1007/11735106\_3}, - timestamp = {Tue, 14 May 2019 10:00:37 +0200}, - biburl = {https://dblp.org/rec/conf/ecir/Amati06.bib}, - bibsource = {dblp computer science bibliography, https://dblp.org} -} - -@inproceedings{macdonald-2021-pyterrier, - author = {Craig Macdonald and Nicola Tonellotto and Sean MacAvaney and Iadh Ounis}, - editor = {Gianluca Demartini and Guido Zuccon and J. Shane Culpepper and Zi Huang and Hanghang Tong}, - title = {PyTerrier: Declarative Experimentation in Python from {BM25} to Dense - Retrieval}, - booktitle = {{CIKM} '21: The 30th {ACM} International Conference on Information and Knowledge Management, Virtual Event, Queensland, Australia, November 1 - 5, 2021}, - pages = {4526--4533}, - publisher = {{ACM}}, - year = {2021}, - url = {https://doi.org/10.1145/3459637.3482013}, - doi = {10.1145/3459637.3482013}, - timestamp = {Tue, 02 Nov 2021 12:01:17 +0100}, - biburl = {https://dblp.org/rec/conf/cikm/MacdonaldTMO21.bib}, - bibsource = {dblp computer science bibliography, https://dblp.org} -}''' - - - print(run) - ret_bib = '' - ret_markdown = ['Please cite the approach / resources if you use them. Potential candidates are:'] - missing_references = [] - for t in ['run', 'dataset', 'task']: - ret_bib += bib_references[t] + '\n\n' - if markdown_references[t]: - ret_markdown += [markdown_references[t]] - else: - missing_references += [t] - - if missing_references: - ret_markdown += ['There are missing references for ' + (', '.join(missing_references)) + '. ' + - 'Please contact the organizers ' + - f'[{task["organizer"]}](https://www.tira.io/g/tira_org_{task["organizer_id"]}) or the team ' + - f'[{vm_id}]({link_to_discourse_team(vm_id)}) for clarification.' - ] - - return ret_bib.strip(), markdown('
    '.join(ret_markdown).strip()) - -@check_permissions -@check_resources_exist('json') -def run_details(request, task_id, vm_id, run_id): - run = model.get_run(dataset_id=None, vm_id=vm_id, run_id=run_id) - software, docker_software, run_upload = None, None, None - vm_id_from_run = None - - repro_details = {'tira-run-export': None, 'tira-run-cli': None, 'tira-run-python': None, 'docker': None} - - - if 'software_id' in run and run['software_id']: - software = model.get_software(software) - vm_id_from_run = software['vm'] - elif 'docker_software_id' in run and run['docker_software_id']: - docker_software = model.get_docker_software(run['docker_software_id']) - print(docker_software) - vm_id_from_run = docker_software['vm_id'] - - if docker_software['public_image_name']: - repro_details = construct_verbosity_output(docker_software['public_image_name'], docker_software['command'], - task_id + '/' + vm_id + '/' + docker_software['display_name'], - task_id, run['dataset']) - - elif 'upload_id' in run and run['upload_id']: - import tira.model as modeldb - run_upload = modeldb.Upload.objects.filter(vm__vm_id=vm_id, id=run['upload_id']).get() - vm_id_from_run = run_upload.vm.vm_id - - if not vm_id_from_run or vm_id != vm_id_from_run: - return HttpResponseNotAllowed(f"Access forbidden.") - - ret = {'description': 'No description is available.', 'previous_stage': None, - 'cli_command': None, 'docker_command': None, 'python_command': None - } - - ret['references_bibtex'], ret['references_markdown'] = __rendered_references(task_id, vm_id, run) - - for k, v in repro_details.items(): - ret[k] = v - - return JsonResponse({'status': 0, 'context': ret}) - - -@check_permissions -@check_resources_exist('json') -def software_details(request, task_id, vm_id, software_name): - docker_software = model.get_docker_software_by_name(software_name, vm_id, task_id) - - if not docker_software: - return JsonResponse({'status': 0, 'message': f'Could not find a software with name "{software_name}"'}) - - repro_details = {'tira-run-export': None, 'tira-run-cli': None, 'tira-run-python': None, 'docker': None, 'image': None, 'command': None} - if docker_software['public_image_name']: - repro_details = construct_verbosity_output(docker_software['public_image_name'], docker_software['command'], - task_id + '/' + vm_id + '/' + docker_software['display_name'], - task_id, '') - - ret = {'description': 'No description is available.', 'previous_stage': None, - 'cli_command': 'TBD cli.', 'docker_command': 'TBD docker.', 'python_command': 'TBD python.' - } - - for k, v in repro_details.items(): - ret[k] = v - - return JsonResponse({'status': 0, 'context': ret}) - - -@check_permissions -@check_resources_exist('json') -def run_execute_docker_software(request, task_id, vm_id, dataset_id, docker_software_id, docker_resources, rerank_dataset=None): - if not task_id or task_id is None or task_id == 'None': - return JsonResponse({"status": 1, "message": "Please specify the associated task_id."}) - - if not vm_id or vm_id is None or vm_id == 'None': - return JsonResponse({"status": 1, "message": "Please specify the associated vm_id."}) - - if not docker_software_id or docker_software_id is None or docker_software_id == 'None': - return JsonResponse({"status": 1, "message": "Please specify the associated docker_software_id."}) - - docker_software = model.get_docker_software(docker_software_id) - - if not docker_software: - return JsonResponse({"status": 1, "message": f"There is no docker image with id {docker_software_id}"}) - - input_run = None - if 'ir_re_ranker' in docker_software and docker_software.get('ir_re_ranker', False) and rerank_dataset and rerank_dataset.lower() != 'none': - reranking_datasets = model.get_all_reranking_datasets() - - if rerank_dataset not in reranking_datasets: - background_process = None - try: - background_process = model.create_re_rank_output_on_dataset(task_id, vm_id, software_id=None, - docker_software_id=docker_software_id, - dataset_id=dataset_id) - except Exception as e: - logger.warning(e) - - visit_job_message = 'Failed to start job.' - - if background_process: - visit_job_message = f'Please visit https://tira.io/background_jobs/{task_id}/{background_process} ' + \ - ' to view the progress of the job that creates the rerank output.' - - return JsonResponse({"status": 1, "message": - f"The execution of your software depends on the reranking dataset {rerank_dataset}" - f", but {rerank_dataset} was never executed on the dataset {dataset_id}. " - f"Please execute first the software on the specified dataset so that you can re-rank it. " - f"{visit_job_message}"}) - - input_run = reranking_datasets[rerank_dataset] - input_run['replace_original_dataset'] = True - - if dataset_id != input_run['dataset_id']: - return JsonResponse({"status": 1, "message": "There seems to be a configuration error:" + - f" The reranking dataset {input_run['dataset_id']} is not" + - f" the specified dataset {dataset_id}."}) - - assert dataset_id == input_run['dataset_id'] - - if not dataset_id or dataset_id is None or dataset_id == 'None': - return JsonResponse({"status": 1, "message": "Please specify the associated dataset_id."}) - - evaluator = model.get_evaluator(dataset_id) - - if not evaluator or 'is_git_runner' not in evaluator or not evaluator['is_git_runner'] or 'git_runner_image' not in evaluator or not evaluator['git_runner_image'] or 'git_runner_command' not in evaluator or not evaluator['git_runner_command'] or 'git_repository_id' not in evaluator or not evaluator['git_repository_id']: - return JsonResponse({"status": 1, "message": "The dataset is misconfigured. Docker-execute only available for git-evaluators"}) - - input_runs, errors = model.get_ordered_input_runs_of_software(docker_software, task_id, dataset_id, vm_id) - - if errors: - return JsonResponse({"status": 1, "message": errors[0]}) - - git_runner = model.get_git_integration(task_id=task_id) - git_runner.run_docker_software_with_git_workflow( - task_id, dataset_id, vm_id, get_tira_id(), evaluator['git_runner_image'], - evaluator['git_runner_command'], evaluator['git_repository_id'], evaluator['evaluator_id'], - docker_software['tira_image_name'], docker_software['command'], - 'docker-software-' + docker_software_id, docker_resources, - input_run if input_run else input_runs, - docker_software.get('mount_hf_model', None), - docker_software.get('tira_image_workdir', None), - ) - - running_pipelines = git_runner.all_running_pipelines_for_repository( - evaluator['git_repository_id'], - cache, - force_cache_refresh=True - ) - print('Refreshed Cache for repo ' + str(evaluator['git_repository_id']) + ' with ' + - str(len(running_pipelines)) + ' jobs.') - - return JsonResponse({'status': 0}, status=HTTPStatus.ACCEPTED) - - -@check_permissions -def stop_docker_software(request, task_id, user_id, run_id): - if not request.method == 'GET': - return JsonResponse({"status": 1, "message": "Only GET is allowed here"}) - else: - datasets = model.get_datasets_by_task(task_id) - git_runner = model.get_git_integration(task_id=task_id) - - if not git_runner: - return JsonResponse({"status": 1, "message": f"No git integration found for task {task_id}"}) - - for dataset in datasets: - git_runner.stop_job_and_clean_up( - model.get_evaluator(dataset["dataset_id"])["git_repository_id"], - user_id, run_id, cache - ) - - return JsonResponse({"status": 0, "message": "Run successfully stopped"}) - diff --git a/application/src/tira/forms.py b/application/src/tira/forms.py deleted file mode 100644 index 051ff8e4e..000000000 --- a/application/src/tira/forms.py +++ /dev/null @@ -1,109 +0,0 @@ -from django import forms - - -class LoginForm(forms.Form): - user_id = forms.CharField(label="User ID", max_length=100, - widget=forms.TextInput(attrs={"class": "uk-input", "placeholder": "Enter Tira User ID"})) - password = forms.CharField(label="Password", max_length=100, - widget=forms.PasswordInput(attrs={"class": "uk-input", "placeholder": "Enter Password"})) - - -class CreateVmForm(forms.Form): - """ hostname,vm_id,ova_id""" - bulk_create = forms.CharField(label="Enter VMs to be created (newline separated)", - widget=forms.Textarea(attrs={"class": "uk-textarea", "rows": "5", - "placeholder": "hostname,vm_id_1,ova_id\nhostname,vm_id_2,..."})) - - -class AdminCreateGroupForm(forms.Form): - vm_id = forms.CharField(label="Group ID", max_length=100, - widget=forms.TextInput(attrs={"class": "uk-input", "placeholder": "Enter a group ID (same as userName for VM)"})) - - -class ArchiveVmForm(forms.Form): - bulk_archive = forms.CharField(label="Enter VM_IDs to be archived (newline separated)", - widget=forms.Textarea(attrs={"class": "uk-textarea", "rows": "5", - "placeholder": "vm_id_1\nvm_id_2\n..."})) - - -class ModifyVmForm(forms.Form): - vm_id = forms.CharField(label="VM ID", max_length=200, - widget=forms.TextInput(attrs={"class": "uk-input", "placeholder": "Enter Tira VM ID"})) - memory = forms.IntegerField(label="Memory (GB)", min_value=1, max_value=128, - widget=forms.NumberInput(attrs={"class": "uk-input"})) - cpu = forms.IntegerField(label="CPU", min_value=1, max_value=32, - widget=forms.NumberInput(attrs={"class": "uk-input"})) - storage = forms.IntegerField(label="Storage (GB)", min_value=1, max_value=4000, - widget=forms.NumberInput(attrs={"class": "uk-input"})) - storage_type = forms.ChoiceField(label="Storage Type", choices=[("HDD", "HDD"), ("SFTP", "SFTP")], - widget=forms.Select(attrs={"class": "uk-select"})) - - -class CreateTaskForm(forms.Form): - """ task_id, task_name, task_description, organizer, website """ - task_id = forms.CharField(label="Task ID", max_length=100, - widget=forms.TextInput(attrs={"class": "uk-input", - "placeholder": "task-id-lowercase-with-dashes"})) - task_name = forms.CharField(label="Task Name", max_length=200, - widget=forms.TextInput(attrs={"class": "uk-input", - "placeholder": "Titlecase Name of the Task."})) - master_vm_id = forms.CharField(label="Master VM_ID", max_length=200, - widget=forms.TextInput(attrs={"class": "uk-input", - "placeholder": "id-lowercase-with-dashes"})) - organizer = forms.CharField(label="Host_ID", max_length=100, - widget=forms.TextInput(attrs={"class": "uk-input"})) - website = forms.URLField(label="Task Website", max_length=200, initial='http://', - widget=forms.URLInput(attrs={"class": "uk-input"})) - task_description = forms.CharField(label="Task Description", - widget=forms.Textarea(attrs={"class": "uk-textarea", "rows": "3", - "placeholder": "Describe your task"})) - - -class AddDatasetForm(forms.Form): - """ id_prefix, dataset_name, evaluator: master_vm_id, command, workingDirectory, measures, measureKeys """ - task_id = forms.CharField(label="For Task ID", max_length=100, required=True, - widget=forms.TextInput(attrs={"class": "uk-input", - "placeholder": "task-id-lowercase-with-dashes"})) - - dataset_id_prefix = forms.SlugField(label="Dataset ID prefix", max_length=200, required=True, - widget=forms.TextInput(attrs={"class": "uk-input"})) - - dataset_name = forms.CharField(label="Dataset Name", max_length=200, required=True, - widget=forms.TextInput(attrs={"class": "uk-input", - "placeholder": "Titlecase Name of the Dataset."})) - create_training = forms.BooleanField(label="training", required=False, initial=True, - widget=forms.CheckboxInput(attrs={"class": "uk-checkbox"})) - create_test = forms.BooleanField(label="test", required=False, initial=True, - widget=forms.CheckboxInput(attrs={"class": "uk-checkbox"})) - create_dev = forms.BooleanField(label="dev", required=False, - widget=forms.CheckboxInput(attrs={"class": "uk-checkbox"})) - - master_vm_id = forms.CharField(label="Master VM_ID", max_length=200, required=True, - widget=forms.TextInput(attrs={"class": "uk-input", - "placeholder": "id-lowercase-with-dashes"})) - command = forms.CharField(label="Evaluator Command", max_length=200, required=False, - widget=forms.TextInput(attrs={"class": "uk-input", - "placeholder": "Command to be run from working directory."})) - working_directory = forms.CharField(label="Evaluator Working Directory", max_length=200, required=False, - widget=forms.TextInput(attrs={"class": "uk-input", - "placeholder": "/path/to/directory. Defaults to home."})) - measures = forms.CharField(label="Measures (separate by newline)", required=False, - widget=forms.Textarea(attrs={"class": "uk-textarea", "rows": "5", - "placeholder": "Measure Name,measure_key\n" - "Name will be displayed to the users.\n" - "measure_key must be as output by the evaluation software."})) - - -class ReviewForm(forms.Form): - """ Form to create Reviews. Delivered on the tira.review route and handeled by the review view.""" - no_errors = forms.BooleanField(label="No Errors", required=False, - widget=forms.CheckboxInput( - attrs={"id": "no-error-checkbox", "class": "uk-checkbox"})) - output_error = forms.BooleanField(label="Output Error", required=False, - widget=forms.CheckboxInput( - attrs={"id": "output-error-checkbox", "class": "uk-checkbox"})) - software_error = forms.BooleanField(label="Software Error", required=False, - widget=forms.CheckboxInput( - attrs={"id": "software-error-checkbox", "class": "uk-checkbox"})) - comment = forms.CharField(label="Comment", required=False, - widget=forms.Textarea(attrs={"class": "uk-textarea", "rows": "6"})) diff --git a/application/src/tira/frontend-vuetify/.devcontainer.json b/application/src/tira/frontend-vuetify/.devcontainer.json deleted file mode 100644 index 2df3fe9f6..000000000 --- a/application/src/tira/frontend-vuetify/.devcontainer.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "image": "webis/tira:vuetify-dev-0.0.1", - "customizations": { - "vscode": { - "extensions": [] - } - } -} diff --git a/application/src/tira/frontend-vuetify/.gitignore b/application/src/tira/frontend-vuetify/.gitignore deleted file mode 100644 index 8fddcf370..000000000 --- a/application/src/tira/frontend-vuetify/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -.DS_Store -node_modules -/dist - - -# local env files -.env.local -.env.*.local - -# Log files -npm-debug.log* -yarn-debug.log* -yarn-error.log* -pnpm-debug.log* - -# Editor directories and files -.idea -.vscode -*.suo -*.ntvs* -*.njsproj -*.sln -*.sw? -coverage/badge-branches.svg -coverage/badge-functions.svg -coverage/badge-statements.svg -coverage/clover.xml -coverage/coverage-final.json -coverage/coverage-summary.json -coverage/lcov-report/ -coverage/lcov.info - diff --git a/application/src/tira/frontend-vuetify/Dockerfile.dev b/application/src/tira/frontend-vuetify/Dockerfile.dev deleted file mode 100644 index 4fb1d3a77..000000000 --- a/application/src/tira/frontend-vuetify/Dockerfile.dev +++ /dev/null @@ -1,15 +0,0 @@ -# docker build -t webis/tira:vuetify-dev-0.0.1 -f Dockerfile.dev . -FROM node - -ADD package.json yarn.lock vite.config.ts jest.config.js babel.config.js /tmp-del/ - -RUN cd /tmp-del \ - && yarn create vuetify \ - && yarn install \ - && yarn install --dev \ - && mv node_modules/* /usr/local/lib/node_modules \ - && mv node_modules/.bin /usr/local/lib/node_modules/.bin \ - && mv node_modules/.yarn-integrity /usr/local/lib/node_modules/.yarn-integrity \ - && cd / \ - && rm -R /tmp-del/ - diff --git a/application/src/tira/frontend-vuetify/README.md b/application/src/tira/frontend-vuetify/README.md deleted file mode 100644 index 136e38b33..000000000 --- a/application/src/tira/frontend-vuetify/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# TIRA Vuetify Frontend - -Start with `yarn dev`. - -Go to [http://127.0.0.1:3000/#/tasks](http://127.0.0.1:3000/#/tasks) for plain and to [http://127.0.0.1:3000/index-discourse.html#/tasks](http://127.0.0.1:3000/index-discourse.html#/tasks) for discourse adjusted development. - -## Project setup - -``` -# yarn -yarn -``` - -### Compiles and hot-reloads for development - -``` -# yarn -yarn dev -``` - -### Compiles and minifies for production - -``` -# yarn -yarn build -``` - -### Customize configuration - -See [Configuration Reference](https://vitejs.dev/config/). diff --git a/application/src/tira/frontend-vuetify/coverage/badge-lines.svg b/application/src/tira/frontend-vuetify/coverage/badge-lines.svg deleted file mode 100644 index 9febb8bf5..000000000 --- a/application/src/tira/frontend-vuetify/coverage/badge-lines.svg +++ /dev/null @@ -1 +0,0 @@ -Coverage:lines: 52.99%Coverage:lines52.99% \ No newline at end of file diff --git a/application/src/tira/frontend-vuetify/index.html b/application/src/tira/frontend-vuetify/index.html deleted file mode 100644 index bfffae889..000000000 --- a/application/src/tira/frontend-vuetify/index.html +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - TIRA Reproducible Experiments - - - -
    - - - - diff --git a/application/src/tira/frontend-vuetify/node_modules b/application/src/tira/frontend-vuetify/node_modules deleted file mode 120000 index 23dd0736e..000000000 --- a/application/src/tira/frontend-vuetify/node_modules +++ /dev/null @@ -1 +0,0 @@ -/usr/local/lib/node_modules \ No newline at end of file diff --git a/application/src/tira/frontend-vuetify/src/TaskOverview.vue b/application/src/tira/frontend-vuetify/src/TaskOverview.vue deleted file mode 100644 index 923032019..000000000 --- a/application/src/tira/frontend-vuetify/src/TaskOverview.vue +++ /dev/null @@ -1,160 +0,0 @@ - - - diff --git a/application/src/tira/frontend-vuetify/src/Tasks.vue b/application/src/tira/frontend-vuetify/src/Tasks.vue deleted file mode 100644 index 24e9eb42c..000000000 --- a/application/src/tira/frontend-vuetify/src/Tasks.vue +++ /dev/null @@ -1,117 +0,0 @@ - - - diff --git a/application/src/tira/frontend-vuetify/src/__tests__/user-metadata-extraction-test.ts b/application/src/tira/frontend-vuetify/src/__tests__/user-metadata-extraction-test.ts deleted file mode 100644 index 58bf31719..000000000 --- a/application/src/tira/frontend-vuetify/src/__tests__/user-metadata-extraction-test.ts +++ /dev/null @@ -1,54 +0,0 @@ -import { extractRole, extractCsrf } from '../utils' - - -function doc(html: string) { - return new DOMParser().parseFromString(html, 'text/html') -} - -test('Guest is the default role if a non-existing role is specified.', () => { - let d = doc('') - - expect(extractRole(d)).toBe('guest') -}); - -test('Guest is the default role if the json can not be parsed.', () => { - let d = doc('') - - expect(extractRole(d)).toBe('guest') -}); - -test('Guest is the default role if a valid json is in a different tag.', () => { - let d = doc('') - - expect(extractRole(d)).toBe('guest') -}); - -test('Extract admin if admin is specified.', () => { - let d = doc('') - - expect(extractRole(d)).toBe('admin') -}); - -test('Extract participant if participant is specified.', () => { - let d = doc('') - - expect(extractRole(d)).toBe('participant') -}); - -test('Csrf Token can be extracted.', () => { - let d = doc('
    ') - - expect(extractCsrf(d)).toBe('xyz') -}) - -test('Csrf Token can be extracted and is string.', () => { - let d = doc('
    ') - - expect(extractCsrf(d)).toBe('1234') -}) - -test('Csrf Token is empty string if not available.', () => { - let d = doc('
    ') - - expect(extractCsrf(d)).toBe('') -}) diff --git a/application/src/tira/frontend-vuetify/src/components/EditTask.vue b/application/src/tira/frontend-vuetify/src/components/EditTask.vue deleted file mode 100644 index 63499430a..000000000 --- a/application/src/tira/frontend-vuetify/src/components/EditTask.vue +++ /dev/null @@ -1,201 +0,0 @@ - - - diff --git a/application/src/tira/frontend-vuetify/src/components/RunActions.vue b/application/src/tira/frontend-vuetify/src/components/RunActions.vue deleted file mode 100644 index 56c23dea0..000000000 --- a/application/src/tira/frontend-vuetify/src/components/RunActions.vue +++ /dev/null @@ -1,148 +0,0 @@ - - diff --git a/application/src/tira/frontend-vuetify/src/components/TiraTaskAdmin.vue b/application/src/tira/frontend-vuetify/src/components/TiraTaskAdmin.vue deleted file mode 100644 index 335b16fd8..000000000 --- a/application/src/tira/frontend-vuetify/src/components/TiraTaskAdmin.vue +++ /dev/null @@ -1,106 +0,0 @@ - - - diff --git a/application/src/tira/frontend-vuetify/src/submission-components/DockerSubmission.vue b/application/src/tira/frontend-vuetify/src/submission-components/DockerSubmission.vue deleted file mode 100644 index d9ecd4019..000000000 --- a/application/src/tira/frontend-vuetify/src/submission-components/DockerSubmission.vue +++ /dev/null @@ -1,128 +0,0 @@ - - - \ No newline at end of file diff --git a/application/src/tira/frontend-vuetify/src/submission-components/UploadSubmission.vue b/application/src/tira/frontend-vuetify/src/submission-components/UploadSubmission.vue deleted file mode 100644 index ef422f0e3..000000000 --- a/application/src/tira/frontend-vuetify/src/submission-components/UploadSubmission.vue +++ /dev/null @@ -1,352 +0,0 @@ - - - diff --git a/application/src/tira/git_runner.py b/application/src/tira/git_runner.py deleted file mode 100644 index cb787e926..000000000 --- a/application/src/tira/git_runner.py +++ /dev/null @@ -1,88 +0,0 @@ -from django.conf import settings -from django.template.loader import render_to_string -from git import Repo -import tempfile -import logging -import gitlab -from pathlib import Path -import shutil -from datetime import datetime as dt -import os -import stat -import string -import json -from slugify import slugify -from tqdm import tqdm -from glob import glob -import subprocess -import markdown -from itertools import chain - -from copy import deepcopy -from tira.grpc_client import new_transaction -from tira.model import TransactionLog, EvaluationLog -from .proto import tira_host_pb2, tira_host_pb2_grpc -import requests - -logger = logging.getLogger('tira') - - -def all_git_runners(): - from tira.tira_model import model - ret = [] - for git_integration in model.all_git_integrations(return_dict=True): - try: - ret += [get_git_runner(git_integration)] - except Exception as e: - print(f'Could not load git integration: {git_integration}. Skip') - logger.warn(f'Could not load git integration: {git_integration}. Skip') - - return ret - - -def check_that_git_integration_is_valid(namespace_url, private_token): - import tira.model as modeldb - from tira.tira_model import model - git_integration = {'namespace_url': namespace_url, 'private_token': private_token} - - try: - git_integration = modeldb.GitIntegration.objects.get(namespace_url=namespace_url) - git_integration = model._git_integration_to_dict(git_integration) - git_integration['private_token'] = private_token - except: - pass - - try: - git_runner = get_git_runner(git_integration) - - if not git_runner: - return (False, 'Invalid Parameters.') - - all_user_repositories = git_runner.all_user_repositories() - if all_user_repositories is not None and len(all_user_repositories) >= 0: - return (True, 'The git credentials are valid (tested by counting repositories).') - else: - return (False, 'The git credentials are not valid (tested by counting repositories).') - except Exception as e: - return (False, f'The Git credentials are not valid: {e}') - - -def get_git_runner(git_integration): - from tira.git_runner_integration import GitLabRunner, GithubRunner - - if not git_integration or 'namespace_url' not in git_integration: - return None - - if 'github.com' in git_integration['namespace_url']: - return GithubRunner(git_integration['private_token']) - else: - return GitLabRunner( - git_integration['private_token'], git_integration['host'], git_integration['user_name'], - git_integration['user_password'], git_integration['gitlab_repository_namespace_id'], - git_integration['image_registry_prefix'], git_integration['user_repository_branch'] - ) - -def get_git_runner_for_software_integration(): - from tira.git_runner_integration import GithubRunner - return GithubRunner(settings.GITHUB_TOKEN) - diff --git a/application/src/tira/git_runner_integration.py b/application/src/tira/git_runner_integration.py deleted file mode 100644 index a4bc1d8b7..000000000 --- a/application/src/tira/git_runner_integration.py +++ /dev/null @@ -1,1251 +0,0 @@ -from django.conf import settings -from django.template.loader import render_to_string -from git import Repo -import tempfile -import logging -import gitlab -from github import Github -from pathlib import Path -import shutil -from datetime import datetime as dt -import os -import stat -import string -import json -from slugify import slugify -from tqdm import tqdm -from glob import glob -import markdown -from itertools import chain - -from copy import deepcopy -from tira.grpc_client import new_transaction -from tira.model import TransactionLog, EvaluationLog -from .proto import tira_host_pb2, tira_host_pb2_grpc -import requests - -logger = logging.getLogger('tira') - - -def normalize_file(file_content, tira_user_name, task_id): - default_datasets = {'webpage-classification': 'webpage-classification/tiny-sample-20231023-training', - 'ir-lab-jena-leipzig-wise-2023': 'workshop-on-open-web-search/retrieval-20231027-training', - 'ir-lab-jena-leipzig-sose-2023': 'workshop-on-open-web-search/retrieval-20231027-training', - 'workshop-on-open-web-search': 'workshop-on-open-web-search/retrieval-20231027-training', - 'ir-benchmarks': 'workshop-on-open-web-search/retrieval-20231027-training', - } - - return file_content.replace('TIRA_USER_FOR_AUTOMATIC_REPLACEMENT', tira_user_name) \ - .replace('TIRA_TASK_ID_FOR_AUTOMATIC_REPLACEMENT', task_id) \ - .replace('TIRA_DATASET_FOR_AUTOMATIC_REPLACEMENT', default_datasets.get(task_id, '')) - - -def convert_size(size_bytes): - import math - if size_bytes == 0: - return "0B" - size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") - i = int(math.floor(math.log(size_bytes, 1024))) - p = math.pow(1024, i) - s = round(size_bytes / p, 2) - - return f"{s} {size_name[i]}" - - -def write_to_file(file_name, content): - open(file_name, 'w').write(content) - - -class GitRunner: - def create_task_repository(self, task_id): - """ - Create the repository with the name "task_id" in the organization. - An organization has task repositories (execute and evaluate submissions) - and multiple user repositories (hosts docker images). - Does nothing, if the repository already exists. - - Parameters - ---------- - task_id: str - Name of the task repository - """ - logger.info(f"Creating task repository for task {task_id} ...") - repo = self.existing_repository(task_id) - if repo: - return int(repo.id) - - project = self._create_task_repository_on_gitHoster(task_id) - - with tempfile.TemporaryDirectory() as tmp_dir: - repo = Repo.init(tmp_dir) - write_to_file(str(tmp_dir) + '/' + self.template_ci_file_name(), self.template_ci()) - write_to_file(str(tmp_dir) + '/README.md', self.template_readme(task_id)) - write_to_file(str(tmp_dir) + '/tira', self.template_tira_cmd_script(project)) - os.chmod(str(tmp_dir) + '/tira', os.stat(str(tmp_dir) + '/tira').st_mode | stat.S_IEXEC) - - repo.create_remote('origin', self.repo_url(project.id)) - self.ensure_branch_is_main(repo) - repo.index.add(['README.md', self.template_ci_file_name(), 'tira']) - repo.index.commit('Initial commit') - repo.remote().push(self.user_repository_branch, o='ci.skip') - - logger.info(f"Created task repository for task {task_id} with new id {project.id}") - return project.id - - def template_ci_file_name(self): - raise ValueError('ToDo: Implement.') - - def _create_task_repository_on_gitHoster(self, task_id): - raise ValueError('ToDo: Implement.') - - def repo_url(self, repo_id): - raise ValueError('ToDo: Implement.') - - def ensure_branch_is_main(self, repo): - try: - # for some git versions we need to manually switch, may fail if the branch is already correct - repo.git.checkout('-b', self.user_repository_branch) - except: - pass - - def clone_repository_and_create_new_branch(self, repo_url, branch_name, directory): - repo = Repo.clone_from(repo_url, directory, branch='main') - repo.head.reference = repo.create_head(branch_name) - - return repo - - def dict_to_key_value_file(self, d): - return '\n'.join([(k + '=' + str(v)).strip() for (k,v) in d.items()]) - - def write_metadata_for_ci_job_to_repository(self, tmp_dir, task_id, transaction_id, dataset_id, vm_id, run_id, identifier, - git_runner_image, git_runner_command, evaluator_id, - user_image_to_execute, user_command_to_execute, tira_software_id, - resources, input_run, mount_hf_model, workdir_in_user_image): - job_dir = Path(tmp_dir) / dataset_id / vm_id / run_id - job_dir.mkdir(parents=True, exist_ok=True) - - metadata = { - # The pipeline executed first a pseudo software so the following three values are - # only dummy values so that the software runs successful. - 'TIRA_IMAGE_TO_EXECUTE': user_image_to_execute, - 'TIRA_VM_ID': vm_id, - 'TIRA_COMMAND_TO_EXECUTE': user_command_to_execute, - 'TIRA_SOFTWARE_ID': tira_software_id, - 'TIRA_DATASET_ID': dataset_id, - 'TIRA_RUN_ID': run_id, - 'TIRA_CPU_COUNT': str(settings.GIT_CI_AVAILABLE_RESOURCES[resources]['cores']), - 'TIRA_MEMORY_IN_GIBIBYTE': str(settings.GIT_CI_AVAILABLE_RESOURCES[resources]['ram']), - 'TIRA_GPU': str(settings.GIT_CI_AVAILABLE_RESOURCES[resources]['gpu']), - 'TIRA_DATA': str(settings.GIT_CI_AVAILABLE_RESOURCES[resources]['data']), - 'TIRA_DATASET_TYPE': 'training' if 'training' in dataset_id else 'test', - - # The actual important stuff for the evaluator: - 'TIRA_TASK_ID': task_id, - 'TIRA_EVALUATOR_TRANSACTION_ID': transaction_id, - 'TIRA_GIT_ID': identifier, - 'TIRA_EVALUATION_IMAGE_TO_EXECUTE': git_runner_image, - 'TIRA_EVALUATION_COMMAND_TO_EXECUTE': git_runner_command, - 'TIRA_EVALUATION_SOFTWARE_ID': evaluator_id, - } - - if mount_hf_model and type(mount_hf_model) == str and len(mount_hf_model.strip()) > 0: - metadata['TIRA_MOUNT_HF_MODEL'] = mount_hf_model.strip() - - if workdir_in_user_image and type(workdir_in_user_image) == str and len(workdir_in_user_image.strip()) > 0: - metadata['TIRA_WORKDIR'] = workdir_in_user_image.strip() - - if input_run and type(input_run) != list: - metadata['TIRA_INPUT_RUN_DATASET_ID'] = input_run['dataset_id'] - metadata['TIRA_INPUT_RUN_VM_ID'] = input_run['vm_id'] - metadata['TIRA_INPUT_RUN_RUN_ID'] = input_run['run_id'] - if input_run.get('replace_original_dataset', False): - metadata['TIRA_INPUT_RUN_REPLACES_ORIGINAL_DATASET'] = 'true' - elif input_run and type(input_run) == list and len(input_run) > 0: - metadata['TIRA_INPUT_RUN_DATASET_IDS'] = json.dumps([i['dataset_id'] for i in input_run]) - metadata['TIRA_INPUT_RUN_VM_IDS'] = json.dumps([i['vm_id'] for i in input_run]) - metadata['TIRA_INPUT_RUN_RUN_IDS'] = json.dumps([i['run_id'] for i in input_run]) - - open(job_dir / 'job-to-execute.txt', 'w').write(self.dict_to_key_value_file(metadata)) - - def create_user_repository(self, user_name): - """ - Create the repository for user with the name "user_name" in the organization. - An organization has task repositories (execute and evaluate submissions) - and multiple user repositories (hosts docker images). - Creates an authentication token, that allows the user to upload images to this repository. - Does nothing, if the repository already exists. - - Parameters - ---------- - user_name: str - Name of the user. The created repository has the name tira-user-${user_name} - """ - client = self.gitHoster_client - repo = 'tira-user-' + user_name - existing_repo = self.existing_repository(repo) - if existing_repo: - return existing_repo.id - - project = self._create_task_repository_on_gitHoster(repo) - - token = self._create_access_token_gitHoster(project, repo) - - self.initialize_user_repository(project.id, repo, token.token) - - return project.id - - def initialize_user_repository(self, git_repository_id, repo_name, token): - project_readme = render_to_string('tira/git_user_repository_readme.md', context={ - 'user_name': repo_name.replace('tira-user-', ''), - 'repo_name': repo_name, - 'token': token, - 'image_prefix': self.image_registry_prefix + '/' + repo_name + '/' - }) - - with tempfile.TemporaryDirectory() as tmp_dir: - repo = Repo.init(tmp_dir) - write_to_file(str(tmp_dir) + '/README.md', project_readme) - - repo.create_remote('origin', self.repo_url(git_repository_id)) - self.ensure_branch_is_main(repo) - repo.index.add(['README.md']) - repo.index.commit('Initial commit') - repo.remote().push(self.user_repository_branch) - - def docker_images_in_user_repository(self, user_name, cache=None, force_cache_refresh=False): - """ TODO Dane - List all docker images uploaded by the user with the name "user_name" to his user repository - - Parameters - ---------- - user_name: str - Name of the user. - - Return - ---------- - images: Iterable[str] - The images uploaded by the user. - """ - cache_key = 'docker-images-in-user-repository-tira-user-' + user_name - if cache: - ret = cache.get(cache_key) - if ret is not None and not force_cache_refresh: - return ret - - ret = [] - repo = self.existing_repository('tira-user-' + user_name) - if not repo: - self.create_user_repository(user_name) - return ret - - covered_images = set() - for registry_repository in repo.repositories.list(): - for registry in registry_repository.manager.list(): - for image in registry.tags.list(get_all=True): - if image.location in covered_images: - continue - covered_images.add(image.location) - image_manifest = self.get_manifest_of_docker_image_image_repository(image.location.split(':')[0], image.location.split(':')[1], cache, force_cache_refresh) - - ret += [{'image': image.location, - 'architecture': image_manifest['architecture'], - 'created': image_manifest['created'].split('.')[0], - 'size': image_manifest['size'], - 'raw_size': image_manifest['raw_size'], - 'digest': image_manifest['digest'] - }] - - ret = sorted(list(ret), key=lambda i: i['image']) - - if cache: - logger.info(f"Cache refreshed for key {cache_key} ...") - cache.set(cache_key, ret) - - return ret - - def help_on_uploading_docker_image(self, user_name, cache=None, force_cache_refresh=False): - """ TODO - Each user repository has a readme.md , that contains instructions on - how to upload images to the repository. - This method extracts those instructions from the readme and returns them. - - Parameters - ---------- - user_name: str - Name of the user. - - Return - ---------- - help: [str] - The personalized instructions on how to upload images - to be shown in the webinterface. - """ - cache_key = 'help-on-uploading-docker-image-tira-user-' + user_name - if cache: - ret = cache.get(cache_key) - if ret is not None and not force_cache_refresh: - return ret - - repo = self.existing_repository('tira-user-' + user_name) - if not repo: - self.create_user_repository(user_name) - return self.help_on_uploading_docker_image(user_name, cache) - - # Hacky at the moment - ret = repo.files.get('README.md', ref='main').decode().decode('UTF-8').split('## Create an docker image')[1] - ret = '## Create an docker image\n\n' + ret - - ret = markdown.markdown(ret) - - if cache: - logger.info(f"Cache refreshed for key {cache_key} ...") - cache.set(cache_key, ret) - - return ret - - def add_new_tag_to_docker_image_repository(self, repository_name, existing_tag, new_tag): - """ TODO Niklas - The repository with the name "repository_name" contains an docker image - with the tag "existing_tag". - This method adds the tag "new_tag" to the image with the tag "existing_tag". - - Parameters - ---------- - repository_name: str - Name of the repository with an docker image with the tag "existing_tag". - - existing_tag: str - Tag of the docker image. - - new_tag: str - The to be added tag of the docker image. - """ - raise ValueError('ToDo: Implement.') - - def extract_configuration_of_finished_job(self, git_repository_id, dataset_id, vm_id, run_id): - gl_project = self.gitHoster_client.projects.get(int(git_repository_id)) - with tempfile.TemporaryDirectory() as tmp_dir: - repo = self.clone_repository_and_create_new_branch(self.repo_url(git_repository_id), 'dummy-br', tmp_dir) - f = glob(tmp_dir + '/' + dataset_id + '/' + vm_id + '/' + run_id + '/job-executed-on-*.txt') - - if len(f) != 1: - return None - - return open(f[0]).read() - - def all_user_repositories(self): - """ - Lists all user repositories in the organization. - - Return - ---------- - user_repositories: Iterable[str] - List of all user repositories in the organization. - """ - raise ValueError('ToDo: Implement.') - - def run_and_evaluate_user_software(self, task_id, dataset_id, - user_name, run_id, user_software_id, user_docker_image, user_command, - git_repository_id, evaluator_id, evaluator_software_id, - evaluator_docker_image, evaluator_command): - """ TODO - Execute the specified software (docker image and a command) - on a dataset and evaluate the output. - - Erzeugt neue Datei und commited diese als Trigger für Workflow/CI. - - Parameters - ---------- - task_id: str - Name of the task repository. - - dataset_id: str - Dataset on which the software is to be executed. - - user_name: str - Name of the user. The repository of the user has the name tira-user-${user_name}. - - run_id: str - Identifier of the resulting run. - - user_software_id: str - ID of the to be executed software. - (identifies docker images and command) - - user_docker_image: str - The to be execued docker image. - - user_command: str - The to be executed command in "user_docker_image". - - git_repository_id: str - Identifier of the task repository - (gitlab: int; github: ???) - - evaluator_id: str - Identifier of the resulting evaluation. - - evaluator_software_id: str - ID of the to be executed evaluation software. - (identifies the evaluation docker images and evaluation command) - - - evaluator_docker_image: str - The to be execued docker image used for evaluation. - - evaluator_command: str - The to be executed evaluation command in "evaluation_docker_image". - - Return - ---------- - transaction_id: str - ID of the running transaction. - """ - raise ValueError('ToDo: Implement.') - - def stop_job_and_clean_up(self, git_repository_id, user_name, run_id): - """ - All runs that are currently running, pending, or failed - life in a dedicated branch. - Every successfully (without errors/failures and with evaluation) - executed software is merged into the main branch. - This method stops a potentially running pipeline identified by the run_id - of the user "user_id" and deletes the branch. - - Parameters - ---------- - git_repository_id: str - Identifier of the task repository. - (gitlab: int; github: int) - - user_name: str - Name of the user. The repository of the user has the name "tira-user-${user_name}". - - run_id: str - Identifier of the to be stopped run. - - Return - ---------- - - - """ - raise ValueError('ToDo: Implement.') - - def yield_all_running_pipelines(self, git_repository_id): - """ TODO - Yield all pipelines/workflows that are currently running, pending, or failed. - - - Parameters - ---------- - git_repository_id: str - Identifier of the task repository. - (gitlab: int; github: int) - - Return - ---------- - jobs: Iteratable[dict] - all pipelines/workflows that are currently running, pending, or failed. - Each entry has the following fields: - 'run_id', - 'execution', - 'stdOutput', - 'started_at', - 'pipeline_name', - 'job_config', - 'pipeline' - """ - raise ValueError('ToDo: Implement.') - - def archive_software(self, working_directory, software_definition, download_images, persist_images, upload_images): - from tira.util import run_cmd, docker_image_details - image = software_definition['TIRA_EVALUATION_IMAGE_TO_EXECUTE'] if 'TIRA_EVALUATION_IMAGE_TO_EXECUTE' in software_definition else software_definition['TIRA_IMAGE_TO_EXECUTE'] - dockerhub_image = software_definition['TIRA_IMAGE_TO_EXECUTE_IN_DOCKERHUB'] if 'TIRA_IMAGE_TO_EXECUTE_IN_DOCKERHUB' in software_definition else None - - - if download_images: - print(f'Run docker pull {image}.') - run_cmd(['podman', 'pull', image]) - - description = docker_image_details(image) - - Path(working_directory + '/docker-softwares').mkdir(parents=True, exist_ok=True) - image_name = working_directory + '/docker-softwares/' + description['image_id'] + '.tar' - - if persist_images and not os.path.isfile(image_name): - print(f'Run image save {image} -o {image_name}.') - run_cmd(['podman', 'image', 'save', image, '-o', image_name]) - - if upload_images and dockerhub_image: - run_cmd(['podman', 'tag', image, dockerhub_image]) - print(f'Run image push {dockerhub_image}.') - run_cmd(['podman', 'push', dockerhub_image]) - - description['local_image'] = image_name - software_definition['image_details'] = description - - return software_definition - - def archive_all_softwares(self, working_directory, download_images=True, persist_images=True, upload_images=True): - existing_software = [json.loads(i) for i in open(working_directory + '/.tira/submitted-software.jsonl', 'r')] - existing_evaluators = [json.loads(i) for i in open(working_directory + '/.tira/evaluators.jsonl', 'r')] - - software, evaluators = [], [] - - for s in tqdm(existing_software, 'Software'): - software += [json.dumps(self.archive_software(working_directory, s, download_images, persist_images, upload_images))] - - for e in tqdm(existing_evaluators, 'Evaluators'): - evaluators += [json.dumps(self.archive_software(working_directory, e, download_images, persist_images, upload_images))] - - open((Path(working_directory) / '.tira' / 'submitted-software.jsonl').absolute(), 'w').write('\n'.join(software)) - open((Path(working_directory) / '.tira' / 'evaluators.jsonl').absolute(), 'w').write('\n'.join(evaluators)) - - def archive_repository(self, repo_name, working_directory, copy_runs=True, download_images=True, persist_images=True, upload_images=True, persist_datasets=True): - from tira.tira_model import get_docker_software, get_docker_softwares_with_runs, get_dataset - from django.template.loader import render_to_string - - softwares = set() - evaluations = set() - datasets = {} - - if not os.path.isdir(working_directory + '/.git'): - repo = self.existing_repository(repo_name) - print(f'Clone repository {repo.name}. Working in {working_directory}') - repo = Repo.clone_from(self.repo_url(repo.id), working_directory, branch='main') - else: - print(f'Use existing repo in {working_directory}.') - self.archive_all_softwares(working_directory, download_images, persist_images, upload_images) - return - - Path(working_directory + '/docker-softwares').mkdir(parents=True, exist_ok=True) - - print("Exporting docker images...") - downloaded_images = set() - for job_file in tqdm(sorted(list(glob(working_directory + '/*/*/*/job-executed-on*.txt'))), "Export Docker Images"): - job = [i.split('=', 1) for i in open(job_file, 'r')] - job = {k.strip(): v.strip() for k, v in job} - image = job['TIRA_IMAGE_TO_EXECUTE'].strip() - - if self.image_registry_prefix.lower() not in image.lower(): - continue - - - datasets[job['TIRA_DATASET_ID']] = get_dataset(job['TIRA_DATASET_ID']) - - try: - software_metadata = get_docker_software( - int(job["TIRA_SOFTWARE_ID"].replace('docker-software-', ''))) - if copy_runs: - runs = get_docker_softwares_with_runs(job["TIRA_TASK_ID"], job["TIRA_VM_ID"]) - except: - continue - - if copy_runs: - - runs = [i for i in runs if - int(i['docker_software_id']) == (int(job["TIRA_SOFTWARE_ID"].replace('docker-software-', '')))] - runs = list(chain(*[i['runs'] for i in runs])) - runs = [i for i in runs if - (i['input_run_id'] == job['TIRA_RUN_ID'] or i['run_id'] == job['TIRA_RUN_ID'])] - - for run in runs: - result_out_dir = (Path(job_file.split('/job-executed-on')[0]) / ( - 'evaluation' if run['is_evaluation'] else 'run')) - result_out_dir.mkdir(parents=True, exist_ok=True) - shutil.copytree( - Path(settings.TIRA_ROOT) / 'data' / 'runs' / job['TIRA_DATASET_ID'] / job['TIRA_VM_ID'] / run[ - 'run_id'], result_out_dir / run['run_id'] - ) - - image_name = (slugify(image) + '.tar').replace('/', '-') - - dockerhub_image = f'docker.io/webis/{job["TIRA_TASK_ID"]}-submissions:' + ( - image_name.split('-tira-user-')[1]).replace('.tar', '').strip() - - downloaded_images.add(image) - softwares.add(json.dumps({ - "TIRA_IMAGE_TO_EXECUTE": image, - "TIRA_IMAGE_TO_EXECUTE_IN_DOCKERHUB": dockerhub_image, - "TIRA_VM_ID": job["TIRA_VM_ID"], - "TIRA_COMMAND_TO_EXECUTE": job["TIRA_COMMAND_TO_EXECUTE"], - "TIRA_TASK_ID": job["TIRA_TASK_ID"], - "TIRA_SOFTWARE_ID": job["TIRA_SOFTWARE_ID"], - "TIRA_SOFTWARE_NAME": software_metadata['display_name'], - "TIRA_IDS_OF_PREVIOUS_STAGES": [] if 'input_docker_software_id' not in software_metadata or not software_metadata['input_docker_software_id'] else [software_metadata['input_docker_software_id']] - })) - - evaluations.add(json.dumps({ - "TIRA_DATASET_ID": job['TIRA_DATASET_ID'].strip(), - "TIRA_EVALUATION_IMAGE_TO_EXECUTE": job["TIRA_EVALUATION_IMAGE_TO_EXECUTE"].strip(), - "TIRA_EVALUATION_COMMAND_TO_EXECUTE": job["TIRA_EVALUATION_COMMAND_TO_EXECUTE"].strip() - })) - - (Path(working_directory) / '.tira').mkdir(parents=True, exist_ok=True) - open((Path(working_directory) / '.tira' / 'submitted-software.jsonl').absolute(), 'w').write('\n'.join(softwares)) - open((Path(working_directory) / '.tira' / 'evaluators.jsonl').absolute(), 'w').write('\n'.join(evaluations)) - open((Path(working_directory) / 'tira.py').absolute(), 'w').write( - render_to_string('tira/tira_git_cmd.py', context={})) - open((Path(working_directory) / 'requirements.txt').absolute(), 'w').write('docker==5.0.3\npandas\njupyterlab') - open((Path(working_directory) / 'Makefile').absolute(), 'w').write( - render_to_string('tira/tira_git_makefile', context={})) - open((Path(working_directory) / 'Tutorial.ipynb').absolute(), 'w').write( - render_to_string('tira/tira_git_tutorial.ipynb', context={})) - # open((Path(working_directory) / 'README.md').absolute(), 'a+').write(render_to_string('tira/tira_git_cmd.py', context={})) - - if persist_datasets: - logger.info(f'Archive datasets') - for dataset_name, dataset_definition in tqdm(datasets.items(), 'Archive Datasets'): - if 'is_confidential' in dataset_definition and not dataset_definition['is_confidential']: - for i in ['training-datasets', 'training-datasets-truth']: - shutil.copytree( - Path(settings.TIRA_ROOT) / 'data' / 'datasets' / i / job['TIRA_TASK_ID'] / dataset_name, - Path(working_directory) / dataset_name / i) - - self.archive_all_softwares(working_directory, download_images, persist_images, upload_images) - #logger.info(f'Archive repository into {repo_name}.zip') - #shutil.make_archive(repo_name, 'zip', working_directory) - logger.info(f'The repository is archived into {working_directory}') - - -class GitLabRunner(GitRunner): - - def __init__(self, private_token, host, user_name, user_password, gitlab_repository_namespace_id, image_registry_prefix, user_repository_branch): - self.git_token = private_token - self.user_name = user_name - self.host = host - self.user_password = user_password - self.namespace_id = int(gitlab_repository_namespace_id) - self.image_registry_prefix = image_registry_prefix - self.user_repository_branch = user_repository_branch - self.gitHoster_client = gitlab.Gitlab('https://' + host, private_token=self.git_token) - #self.gitHoster_client = gitlab.Gitlab('https://' + host, private_token=json.load(open('/home/maik/.tira/.tira-settings.json'))['access_token']) - - def template_ci(self): - """ - returns the CI-Pipeline template file as string - """ - return render_to_string('tira/git_task_repository_gitlab_ci.yml', context={}) - - def template_ci_file_name(self): - return '.gitlab-ci.yml' - - def template_readme(self, task_id): - """ - returns the readme template file for Gitlab as string - """ - return render_to_string('tira/git_task_repository_readme.md', context={'task_name': task_id}) - - def template_tira_cmd_script(self, project): - return render_to_string('tira/tira_git_cmd.sh', context={'project_id': project.id, - 'ci_server_host': self.host}) - - def repo_url(self, git_repository_id): - project = self.gitHoster_client.projects.get(git_repository_id) - - return project.http_url_to_repo.replace( - self.host, - self.user_name + ':' + self.git_token + '@' + self.host - ) - - def get_manifest_of_docker_image_image_repository(self, repository_name, tag, cache, force_cache_refresh): - """ - Background for the implementation: - https://dille.name/blog/2018/09/20/how-to-tag-docker-images-without-pulling-them/ - https://gitlab.com/gitlab-org/gitlab/-/issues/23156 - """ - original_repository_name = repository_name - registry_host = self.image_registry_prefix.split('/')[0] - repository_name = repository_name.split(registry_host + '/')[-1] - - cache_key = f'docker-manifest-for-repo-{repository_name}-{tag}' - if cache: - ret = cache.get(cache_key) - if ret is not None: - return ret - - try: - token = requests.get(f'https://{self.host}:{self.git_token}@git.webis.de/jwt/auth?client_id=docker&offline_token=true&service=container_registry&scope=repository:{repository_name}:push,pull,blob,upload') - - if not token.ok: - raise ValueError(token.content.decode('UTF-8')) - - token = json.loads(token.content.decode('UTF-8'))['token'] - headers = {'Accept': 'application/vnd.docker.distribution.manifest.v2+json', - 'Content-Type': 'application/vnd.docker.distribution.manifest.v2+json', - 'Authorization': 'Bearer ' + token} - manifest = requests.get(f'https://{registry_host}/v2/{repository_name}/manifests/{tag}', headers=headers) - - if not manifest.ok: - raise ValueError('-->' + manifest.content.decode('UTF-8')) - - image_metadata = json.loads(manifest.content.decode('UTF-8')) - raw_size = image_metadata['config']['size'] + sum([i['size'] for i in image_metadata['layers']]) - size = convert_size(raw_size) - - image_config = requests.get(f'https://{registry_host}/v2/{repository_name}/blobs/{image_metadata["config"]["digest"]}', headers=headers) - - if not image_config.ok: - raise ValueError('-->' + image_config.content.decode('UTF-8')) - - image_config = json.loads(image_config.content.decode('UTF-8')) - - ret = { - 'architecture': image_config['architecture'], - 'created': image_config['created'], - 'size': size, - 'raw_size': raw_size, - 'digest': image_metadata["config"]["digest"].split(':')[-1][:12] - } - except Exception as e: - logger.warn('Exception during loading of metadata for docker image', e) - ret = { - 'architecture': 'Loading...', - 'created': 'Loading...', - 'size': 'Loading...', - 'digest': 'Loading...', - 'raw_size': 'Loading...', - } - - if cache: - logger.info(f"Cache refreshed for key {cache_key} ...") - cache.set(cache_key, ret) - - return ret - - def run_evaluate_with_git_workflow(self, task_id, dataset_id, vm_id, run_id, git_runner_image, - git_runner_command, git_repository_id, evaluator_id): - msg = f"start run_eval with git: {task_id} - {dataset_id} - {vm_id} - {run_id}" - transaction_id = self.start_git_workflow(task_id, dataset_id, vm_id, run_id, git_runner_image, - git_runner_command, git_repository_id, evaluator_id, - 'ubuntu:18.04', - 'echo \'No software to execute. Only evaluation\'', - '-1', list(settings.GIT_CI_AVAILABLE_RESOURCES.keys())[0], None, None, None) - - t = TransactionLog.objects.get(transaction_id=transaction_id) - _ = EvaluationLog.objects.update_or_create(vm_id=vm_id, run_id=run_id, running_on=vm_id, - transaction=t) - - return transaction_id - - def run_docker_software_with_git_workflow(self, task_id, dataset_id, vm_id, run_id, git_runner_image, - git_runner_command, git_repository_id, evaluator_id, - user_image_to_execute, user_command_to_execute, tira_software_id, - resources, input_run, mount_hf_model, workdir_in_user_image): - msg = f"start run_docker_image with git: {task_id} - {dataset_id} - {vm_id} - {run_id}" - transaction_id = self.start_git_workflow(task_id, dataset_id, vm_id, run_id, git_runner_image, - git_runner_command, git_repository_id, evaluator_id, - user_image_to_execute, user_command_to_execute, tira_software_id, resources, input_run, mount_hf_model, workdir_in_user_image) - - # TODO: add transaction to log - - return transaction_id - - def start_git_workflow(self, task_id, dataset_id, vm_id, run_id, git_runner_image, - git_runner_command, git_repository_id, evaluator_id, - user_image_to_execute, user_command_to_execute, tira_software_id, resources, input_run, mount_hf_model, workdir_in_user_image): - msg = f"start git-workflow with git: {task_id} - {dataset_id} - {vm_id} - {run_id}" - transaction_id = new_transaction(msg, in_grpc=False) - logger.info(msg) - - identifier = f"eval---{dataset_id}---{vm_id}---{run_id}---started-{str(dt.now().strftime('%Y-%m-%d-%H-%M-%S'))}" - - with tempfile.TemporaryDirectory() as tmp_dir: - repo = self.clone_repository_and_create_new_branch(self.repo_url(git_repository_id), identifier, tmp_dir) - - self.write_metadata_for_ci_job_to_repository(tmp_dir, task_id, transaction_id, dataset_id, vm_id, run_id, - identifier, git_runner_image, git_runner_command, evaluator_id, - user_image_to_execute, user_command_to_execute, tira_software_id, - resources, input_run, mount_hf_model, workdir_in_user_image) - - self.commit_and_push(repo, dataset_id, vm_id, run_id, identifier, git_repository_id, resources) - - t = TransactionLog.objects.get(transaction_id=transaction_id) - _ = EvaluationLog.objects.update_or_create(vm_id=vm_id, run_id=run_id, running_on=vm_id, - transaction=t) - - return transaction_id - - def commit_and_push(self, repo, dataset_id, vm_id, run_id, identifier, git_repository_id, resources): - repo.index.add([str(Path(dataset_id) / vm_id / run_id / 'job-to-execute.txt')]) - repo.index.commit("Evaluate software: " + identifier) - gpu_resources = str(settings.GIT_CI_AVAILABLE_RESOURCES[resources]['gpu']).strip() - data_resources = str(settings.GIT_CI_AVAILABLE_RESOURCES[resources]['data']).strip() - - if gpu_resources == '0' and data_resources == 'no': - repo.remote().push(identifier) - else: - repo.remote().push(identifier, **{'o': 'ci.skip'}) - - gl_project = self.gitHoster_client.projects.get(int(git_repository_id)) - gl_project.pipelines.create({'ref': identifier, 'variables': [ - {'key': 'TIRA_GPU', 'value': gpu_resources}, - {'key': 'TIRA_DATA', 'value': data_resources}, - ]}) - - def add_new_tag_to_docker_image_repository(self, repository_name, old_tag, new_tag): - """ - Background for the implementation: - https://dille.name/blog/2018/09/20/how-to-tag-docker-images-without-pulling-them/ - https://gitlab.com/gitlab-org/gitlab/-/issues/23156 - """ - original_repository_name = repository_name - registry_host = self.image_registry_prefix.split('/')[0] - repository_name = repository_name.split(registry_host + '/')[-1] - - token = requests.get(f'https://{self.host}:{self.git_token}@git.webis.de/jwt/auth?client_id=docker&offline_token=true&service=container_registry&scope=repository:{repository_name}:push,pull') - - if not token.ok: - raise ValueError(token.content.decode('UTF-8')) - - token = json.loads(token.content.decode('UTF-8'))['token'] - headers = {'Accept': 'application/vnd.docker.distribution.manifest.v2+json', - 'Content-Type': 'application/vnd.docker.distribution.manifest.v2+json', - 'Authorization': 'Bearer ' + token} - - manifest = requests.get(f'https://{registry_host}/v2/{repository_name}/manifests/{old_tag}', headers=headers) - - if not manifest.ok: - raise ValueError('-->' + manifest.content.decode('UTF-8')) - manifest = manifest.content.decode('UTF-8') - - manifest = requests.put(f'https://{registry_host}/v2/{repository_name}/manifests/{new_tag}', headers=headers, data=manifest) - - if not manifest.ok: - raise ValueError(manifest.content.decode('UTF-8')) - - return original_repository_name + ':' + new_tag - - def all_user_repositories(self): - """ - Lists all user repositories in the organization. - - Return - ---------- - user_repositories: Iterable[str] - List of all user repositories in the organization. - """ - - ret = [] - for potential_existing_projects in self.gitHoster_client.projects.list(search='tira-user-', get_all=True): - if 'tira-user-' in potential_existing_projects.name and int(potential_existing_projects.namespace['id']) == self.namespace_id: - ret += [potential_existing_projects.name] - return set(ret) - - def existing_repository(self, repo): - for potential_existing_projects in self.gitHoster_client.projects.list(search=repo): - if potential_existing_projects.name == repo and int(potential_existing_projects.namespace['id']) == self.namespace_id: - return potential_existing_projects - - def clean_task_repository(self, task_id): - project = self.existing_repository(task_id) - for pipeline in project.pipelines.list(get_all=True): - print('Delete Pipeline: ' + str(pipeline.id)) - if pipeline.status not in {'skipped', 'canceled', 'failed', 'success'}: - print('Skip running pipeline ' + str(pipeline.id)) - continue - pipeline.delete() - - def _create_task_repository_on_gitHoster(self, task_id): - project = self.existing_repository(task_id) - if project: - print(f'Repository found "{task_id}".') - return project - - project = self.gitHoster_client.projects.create( - {'name': task_id, 'namespace_id': str(self.namespace_id), - "default_branch": self.user_repository_branch}) - return project - - def _create_access_token_gitHoster(self, project, repo): - return project.access_tokens.create( - {"name": repo, "scopes": ['read_registry', 'write_registry'], "access_level": 30, "expires_at": "2024-10-08"}) - - def stop_job_and_clean_up(self, git_repository_id, user_name, run_id, cache=None): - """ - All runs that are currently running, pending, or failed - life in a dedicated branch. - Every successfully (without errors/failures and with evaluation) - executed software is merged into the main branch. - This method stops a potentially running pipeline identified by the run_id - of the user "user_id" and deletes the branch. - - Parameters - ---------- - git_repository_id: str - Identifier of the task repository. - (gitlab: int; github: int) - - user_name: str - Name of the user. The repository of the user has the name "tira-user-${user_name}". - - run_id: str - Identifier of the to be stopped run. - - Return - ---------- - - - """ - gl = self.gitHoster_client - gl_project = gl.projects.get(int(git_repository_id)) - - for pipeline in self.yield_all_running_pipelines(git_repository_id, user_name, cache, True): - if run_id == pipeline['run_id']: - branch = pipeline['branch'] if 'branch' in pipeline else pipeline['pipeline'].ref - if ('---' + user_name + '---') not in branch: - continue - if ('---' + run_id + '---') not in branch: - continue - - if 'pipeline' in pipeline: - pipeline['pipeline'].cancel() - gl_project.branches.delete(branch) - - - def yield_all_running_pipelines(self, git_repository_id, user_id, cache=None, force_cache_refresh=False): - for pipeline in self.all_running_pipelines_for_repository(git_repository_id, cache, force_cache_refresh): - pipeline = deepcopy(pipeline) - - if ('---' + user_id + '---') not in pipeline['pipeline_name']: - continue - - if ('-training---' + user_id + '---') not in pipeline['pipeline_name']: - pipeline['stdOutput'] = 'Output for runs on the test-data is hidden.' - - yield pipeline - - - def all_running_pipelines_for_repository(self, git_repository_id, cache=None, force_cache_refresh=False): - cache_key = 'all-running-pipelines-repo-' + str(git_repository_id) - if cache: - try: - ret = cache.get(cache_key) - if ret is not None and not force_cache_refresh: - logger.debug('get ret from cache', ret) - return ret - except ModuleNotFoundError as e: - logger.exception(f"Could not find cache module {cache_key}.") - - ret = [] - gl = self.gitHoster_client - gl_project = gl.projects.get(int(git_repository_id)) - already_covered_run_ids = set() - for status in ['scheduled', 'running', 'pending', 'created', 'waiting_for_resource', 'preparing']: - for pipeline in gl_project.pipelines.list(status=status): - user_software_job = None - evaluation_job = None - for job in pipeline.jobs.list(): - if 'run-user-software' == job.name: - user_software_job = job - if 'evaluate-software-result' == job.name: - evaluation_job = job - - p = (pipeline.ref + '---started-').split('---started-')[0] - - execution = {'scheduling': 'running', 'execution': 'pending', 'evaluation': 'pending'} - if user_software_job.status == 'running': - execution = {'scheduling': 'done', 'execution': 'running', 'evaluation': 'pending'} - elif user_software_job.status != 'created': - execution = {'scheduling': 'done', 'execution': 'done', 'evaluation': 'running'} - - stdout = 'Output for runs on the test-data is hidden.' - if '-training---' in p: - try: - stdout = '' - user_software_job = gl_project.jobs.get(user_software_job.id) - stdout = self.clean_job_output(user_software_job.trace().decode('UTF-8')) - except: - # Job is not started or similar - pass - - run_id = p.split('---')[-1] - - already_covered_run_ids.add(run_id) - job_config = self.extract_job_configuration(gl_project, pipeline.ref) - if job_config: - ret += [{ - 'run_id': run_id, - 'execution': execution, - 'stdOutput': stdout, - 'started_at': p.split('---')[-1], - 'pipeline_name': p, - 'job_config': job_config, - 'pipeline': pipeline - }] - - ret += self.__all_failed_pipelines_for_repository(gl_project, already_covered_run_ids) - - if cache: - logger.info(f"Cache refreshed for key {cache_key} ...") - cache.set(cache_key, ret) - - return ret - - def clean_job_output(self, ret): - ret = ''.join(filter(lambda x: x in string.printable, ret.strip())) - if '$ eval "${TIRA_COMMAND_TO_EXECUTE}"[0;m' in ret: - return self.clean_job_suffix(ret.split('$ eval "${TIRA_COMMAND_TO_EXECUTE}"[0;m')[1]) - elif '$ eval "${TIRA_EVALUATION_COMMAND_TO_EXECUTE}"[0;m' in ret: - return self.clean_job_suffix(ret.split('$ eval "${TIRA_EVALUATION_COMMAND_TO_EXECUTE}"[0;m')[1]) - else: - # Job not jet started. - return '' - - def clean_job_suffix(self, ret): - if "[32;1m$ env|grep 'TIRA' > task.env" in ret: - ret = ret.split("[32;1m$ env|grep 'TIRA' > task.env")[0] - if "section_end:" in ret: - ret = ret.split("section_end:")[0] - - return ret.strip() - - def extract_job_configuration(self, gl_project, branch): - ret = {} - - if not branch or branch.strip().lower() == 'main': - return None - - try: - for commit in gl_project.commits.list(ref_name=branch, page=0, per_page=3): - if len(ret) > 0: - break - - if branch in commit.title and 'Merge' not in commit.title: - for diff_entry in commit.diff(): - if len(ret) > 0: - break - - if diff_entry['old_path'] == diff_entry['new_path'] and diff_entry['new_path'].endswith('/job-to-execute.txt'): - diff_entry = diff_entry['diff'].replace('\n+', '\n').split('\n') - ret = {i.split('=')[0].strip():i.split('=')[1].strip() for i in diff_entry if len(i.split('=')) == 2} - except Exception as e: - logger.warn(f'Could not extract job configuration on "{branch}".', e) - pass - - if 'TIRA_COMMAND_TO_EXECUTE' in ret and "'No software to execute. Only evaluation'" in ret['TIRA_COMMAND_TO_EXECUTE'] and ('TIRA_SOFTWARE_ID' not in ret or '-1' == ret['TIRA_SOFTWARE_ID']): - software_from_db = {'display_name': 'Evaluate Run', 'image': 'evaluator', 'command': 'evaluator'} - else: - try: - from tira.tira_model import model - software_from_db = model.get_docker_software(int(ret['TIRA_SOFTWARE_ID'].split('docker-software-')[-1])) - except Exception as e: - logger.warn(f'Could not extract the software from the database for "{json.dumps(ret)}": {str(e)}') - software_from_db = {} - - return { - 'software_name': software_from_db.get('display_name', 'Loading...'), - 'image': software_from_db.get('user_image_name', 'Loading...'), - 'command': software_from_db.get('command', 'Loading...'), - 'cores': str(ret.get('TIRA_CPU_COUNT', 'Loading...')) + ' CPU Cores', - 'ram': str(ret.get('TIRA_MEMORY_IN_GIBIBYTE', 'Loading...')) + 'GB of RAM', - 'gpu': str(ret.get('TIRA_GPU', 'Loading...')) + ' GPUs', - 'data': str(ret.get('TIRA_DATA', 'Loading...')) + ' Mounts', - 'dataset_type': ret.get('TIRA_DATASET_TYPE', 'Loading...'), - 'dataset': ret.get('TIRA_DATASET_ID', 'Loading...'), - 'software_id': ret.get('TIRA_SOFTWARE_ID', 'Loading...'), - 'task_id': ret.get('TIRA_TASK_ID', 'Loading...'), - } - - def __all_failed_pipelines_for_repository(self, gl_project, already_covered_run_ids): - ret = [] - - for branch in gl_project.branches.list(): - branch = branch.name - p = (branch + '---started-').split('---started-')[0] - run_id = p.split('---')[-1] - - if run_id in already_covered_run_ids: - continue - - job_config = self.extract_job_configuration(gl_project, branch) - if not job_config: - continue - - ret += [{'run_id': run_id, 'execution': {'scheduling': 'failed', 'execution': 'failed', 'evaluation': 'failed'}, 'pipeline_name': p, 'stdOutput': 'Job did not run. (Maybe it is still submitted to the cluster or failed to start. It might take up to 5 minutes to submit a Job to the cluster.)', 'started_at': p.split('---')[-1], 'branch': branch, 'job_config': job_config}] - - return ret - - -class GithubRunner(GitRunner): - - def __init__(self, github_token): - self.git_token = github_token - self.gitHoster_client = Github(self.git_token) - - def _convert_repository_id_to_repository_name(self, repository_id): - for repo in self.gitHoster_client.get_user().get_repos(): - if repo.id == repository_id: - return repo.name - - def template_ci(self): - """ - returns the Workflow template file as string - """ - # TODO: create workflow template file at tira/application/src/tira/templates/tira/git_task_repository_github_workflow.yml - return render_to_string('tira/git_task_repository_github_workflow.yml', context={}) - - def template_readme(self, task_id): - """ - returns the readme template file for Github as string - """ - # TODO: create readme template file for Github at tira/application/src/tira/templates/tira/git_task_repository_github_workflow.yml - return render_to_string('tira/git_task_repository_github_readme.md', context={'task_name': task_id}) - - def template_tira_cmd_script(self, project_id): - return render_to_string('tira/tira_git_cmd.sh', context={'project_id': project_id, - 'ci_server_host': "https://github.com"}) - - def add_new_tag_to_docker_image_repository(self, repository_name, old_tag, new_tag): - for repo in self.gitHoster_client.get_user().get_repos(): - if repo.name == repository_name: - tags = repo.tags - if new_tag not in tags: - repo.create_tag(new_tag) - repo.git.push(tags=True) #Brauchen wir das? - else: - logger.info(f"Tag: {new_tag} already exists with the same name") - - def all_user_repositories(self): - """ - Lists all user repositories in the organization "user_name". - - Return - ---------- - user_repositories: Iterable[str] - List of all user repositories in the organization. - """ - - ret = [] - for repo in self.gitHoster_client.get_user().get_repos(): - ret.append(repo.name) - - return set(ret) - - def stop_job_and_clean_up(self, git_repository_id, user_name, run_id): - """ - All runs that are currently running, pending, or failed - life in a dedicated branch. - Every successfully (without errors/failures and with evaluation) - executed software is merged into the main branch. - This method stops a potentially running pipeline identified by the run_id - of the user "user_id" and deletes the branch. - - Parameters - ---------- - git_repository_id: str - Identifier of the task repository. - (gitlab: int; github: int) - - user_name: str - Name of the user. The repository of the user has the name "tira-user-${user_name}". - - run_id: str - Identifier of the to be stopped run. - - Return - ---------- - - - """ - repository_name = self._convert_repository_id_to_repository_name(git_repository_id) - - # cancel worflow run - run = self.gitHoster_client.get_user().get_repo(repository_name).get_workflow_run(run_id) - run.cancel() - - # delete branch - branch_name = run.head_branch - self.gitHoster_client.get_user().get_repo(repository_name).get_git_ref(f"heads/{branch_name}").delete - - def _create_task_repository_on_gitHoster(self, task_id): - # create new repository and rename the default branch - project = self.gitHoster_client.get_user().create_repo(name = task_id) - for branch in project.get_branches(): - project.rename_branch(branch=branch, new_name= self.user_repository_branch) - return project - - def _create_access_token_gitHoster(self, project ,repo): - raise ValueError('ToDo: Implement this.') - - def yield_all_running_pipelines(self, git_repository_id): - """ - Yield all pipelines/workflows that are currently running, pending, or failed. - - - Parameters - ---------- - git_repository_id: str - Identifier of the task repository. - (gitlab: int; github: int) - - Return - ---------- - jobs: Iteratable[dict] - all pipelines/workflows that are currently running, pending, or failed. - Each entry has the following fields: - 'run_id', - 'execution', - 'stdOutput', - 'started_at', - 'pipeline_name', - 'job_config', - 'pipeline' - """ - # https://docs.github.com/en/rest/actions/workflow-jobs?apiVersion=2022-11-28#get-a-job-for-a-workflow-run - pass - - def git_user_exists(self, user_name): - try: - return self.gitHoster_client.get_user(user_name) is not None - except: - return False - - def get_git_runner_for_software_integration(self, reference_repository_name, user_repository_name, - user_repository_namespace, github_user, tira_user_name, - dockerhub_token, dockerhub_user, tira_client_token, - repository_search_prefix, tira_task_id, tira_code_repository_id, - tira_client_user, private): - user = self.gitHoster_client.get_user() - try: - user_repo = user.get_repo(f'{user_repository_namespace}/{user_repository_name}') - if user_repo: - return user_repo - except: - # repository does not exist. - pass - - return self.create_software_submission_repository_for_user(reference_repository_name, user_repository_name, - user_repository_namespace, github_user, - tira_user_name, dockerhub_token, dockerhub_user, - tira_client_token, repository_search_prefix, - tira_task_id, tira_code_repository_id, - tira_client_user, private) - - def create_software_submission_repository_for_user(self, reference_repository_name, user_repository_name, - user_repository_namespace, github_user, tira_user_name, - dockerhub_token, dockerhub_user, tira_client_token, - repository_search_prefix, tira_task_id, tira_code_repository_id, - tira_client_user, private): - reference_repo = self.gitHoster_client.get_repo(reference_repository_name) - - org = self.gitHoster_client.get_organization(user_repository_namespace) - repo = org.create_repo(user_repository_name, - f'The repository of user {tira_user_name} for code submissions in TIRA.', private=private) - repo.add_to_collaborators(github_user, 'admin') - - repo.create_secret('TIRA_DOCKER_REGISTRY_TOKEN', dockerhub_token) - repo.create_secret('TIRA_DOCKER_REGISTRY_USER', dockerhub_user) - repo.create_secret('TIRA_CLIENT_TOKEN', tira_client_token) - repo.create_secret('TIRA_CLIENT_USER', tira_client_user) - repo.create_secret('TIRA_CODE_REPOSITORY_ID', tira_code_repository_id) - - contents = reference_repo.get_contents(repository_search_prefix) - while contents: - file_content = contents.pop(0) - if file_content.type == "dir": - contents.extend(reference_repo.get_contents(file_content.path)) - else: - decoded_content = file_content.decoded_content.decode() - decoded_content = normalize_file(decoded_content, tira_user_name, tira_task_id) - repo.create_file(file_content.path, 'Initial Commit.', decoded_content) - - return repo - diff --git a/application/src/tira/grpc/grpc_server.py b/application/src/tira/grpc/grpc_server.py deleted file mode 100644 index a926e499a..000000000 --- a/application/src/tira/grpc/grpc_server.py +++ /dev/null @@ -1,165 +0,0 @@ -from django.conf import settings -from concurrent import futures -import grpc -import logging - -from tira.proto import tira_host_pb2, tira_host_pb2_grpc -from tira.model import TransitionLog, EvaluationLog, TransactionLog -import tira.tira_model as model -import django - -grpc_port = settings.APPLICATION_GRPC_PORT - -logger = logging.getLogger("tira") - - -class TiraApplicationService(tira_host_pb2_grpc.TiraApplicationService): - def set_state(self, request, context): - """ TODO error handling """ - django.db.connection.close() - logger.debug(f" Application Server received vm-state {request.state} for {request.vmId}") - print(f"Application Server received vm-state {request.state} for {request.vmId}. Transaction: {request.transaction.transactionId}") - try: - TransactionLog.objects.filter(transaction_id=request.transaction.transactionId).update( - last_status=request.transaction.status, - last_message=f"TiraApplicationService:set_state:{request.transaction.message}" - ) - - t = TransactionLog.objects.get(transaction_id=request.transaction.transactionId) - - _ = TransitionLog.objects.update_or_create(vm_id=request.vmId, defaults={'vm_state': request.state, - 'transaction': t}) - except Exception as e: - logger.warning(e) - return tira_host_pb2.Transaction(status=tira_host_pb2.Status.FAILED, - message=f"TiraApplicationService:set_state:FAILED with {e}", - transactionId=request.transaction.transactionId) - - return tira_host_pb2.Transaction(status=tira_host_pb2.Status.SUCCESS, - message="TiraApplicationService:set_state:SUCCESS", - transactionId=request.transaction.transactionId) - - def complete_transaction(self, request, context): - """ Marks a transaction as completed if the - This is basically the final stage of a a TIRA message exchange. - """ - django.db.connection.close() - logger.debug(f" Application Server received complete_transaction for {request.transactionId}") - print(f" Application Server received complete_transaction for {request.transactionId}") - - try: - _ = TransactionLog.objects.filter(transaction_id=request.transactionId).update( - completed=True, - last_status=str(request.status), - last_message=f"TiraApplicationService:complete_transaction:{request.message}") - - except Exception as e: - logger.warning(e) - return tira_host_pb2.Transaction(status=tira_host_pb2.Status.FAILED, - message=f"TiraApplicationService:complete_transaction:FAILED with {e}", - transactionId=request.transactionId) - - return tira_host_pb2.Transaction(status=tira_host_pb2.Status.SUCCESS, - message="TiraApplicationService:complete_transaction:SUCCESS", - transactionId=request.transactionId) - - def confirm_vm_create(self, request, context): - """ This gets called if a vm was successfully created. Right now it just says 'yes' when called. - See tira_host.proto for request specification. - """ - django.db.connection.close() - logger.debug(f" Application Server received vm-create confirmation with \n" - f"{request.vmID}, {request.userName}, {request.initialUserPw}, {request.ip}, {request.sshPort}, " - f"{request.rdpPort}") - - _ = TransactionLog.objects.filter(transaction_id=request.transaction.transactionId).update( - completed=False, - last_status=str(request.transaction.status), - last_message=request.transaction.message) - - if request.transaction.status == tira_host_pb2.Status.SUCCESS: - model.add_vm(request.vmId, request.userName, request.initialUserPw, - request.ip, request.host, request.sshPort, request.rdpPort) - - else: - logger.error("Application received confirm_vm_create with status Failed:\n" - f"{request.vmID}, {request.userName}, {request.initialUserPw}, {request.ip}, " - f"{request.sshPort}, {request.rdpPort}") - - return tira_host_pb2.Transaction(status=tira_host_pb2.Status.SUCCESS, - message="Application accepted vm create confirmation", - transactionId=request.transaction.transactionId) - - def confirm_vm_delete(self, request, context): - """ This gets called if a run_eval finishes and receives the EvaluationResults. - Right now it just says 'yes' when called. See tira_host.proto for request specification. - TODO this should remove the deleted vm from the model. - """ - django.db.connection.close() - print(f" Application Server received vm_delete confirmation with: \n" - f"{request.vmId.vmId} measures.") - - return tira_host_pb2.Transaction(status=tira_host_pb2.Status.SUCCESS, - message="Application accepted vm delete confirmation", - transactionId=request.transaction.transactionId) - - def confirm_run_eval(self, request, context): - """ This gets called if a run_eval finishes and receives the EvaluationResults. - We use this to load a new evaluation run into the database. - See tira_host.proto for request specification. - """ - django.db.connection.close() - logger.debug(f" Application Server received run-eval confirmation with: \n" - f"{request.runId.runId} - {request.runId.vmId} - {request.transaction.transactionId} and {len(request.measures)} measures.") - print(f" Application Server received run-eval confirmation with: \n" - f"{request.runId.runId} - {request.runId.vmId} - {request.transaction.transactionId} and {len(request.measures)} measures.") - - result = model.add_run(request.runId.datasetId, request.runId.vmId, request.runId.runId) - - EvaluationLog.objects.filter(vm_id=request.runId.vmId, run_id=request.runId.runId).delete() - EvaluationLog.objects.filter(transaction__transaction_id=request.transaction.transactionId).delete() - - _ = TransactionLog.objects.filter(transaction_id=request.transaction.transactionId).update( - completed=False, - last_status=str(request.transaction.status), - last_message=request.transaction.message) - - return tira_host_pb2.Transaction(status=tira_host_pb2.Status.SUCCESS, - message=f"Application accepted evaluation confirmation with request.runId.datasetId={request.runId.datasetId}, request.runId.vmId={request.runId.vmId}, request.runId.runId={request.runId.runId}. Result {result}.", - transactionId=request.transaction.transactionId) - - def confirm_run_execute(self, request, context): - """ This gets called if a run_execute finishes. We use this to load the new run in the database. - See tira_host.proto for request specification. - """ - django.db.connection.close() - logger.debug(f" Application Server received run-eval confirmation with: \n" - f"{request.runId.runId}.") - - result = model.add_run(request.runId.datasetId, request.runId.vmId, request.runId.runId) - EvaluationLog.objects.filter(vm_id=request.runId.vmId, run_id=request.runId.runId).delete() - _ = TransactionLog.objects.filter(transaction_id=request.transaction.transactionId).update( - completed=False, - last_status=str(request.transaction.status), - last_message=request.transaction.message) - - return tira_host_pb2.Transaction(status=tira_host_pb2.Status.SUCCESS, - message=f"Application accepted run execute confirmation with: request.runId.datasetId={request.runId.datasetId}, request.runId.vmId={request.runId.vmId}, request.runId.runId={request.runId.runId}. Result {result}.", - transactionId=request.transaction.transactionId) - - def heartbeat(self, request, context): - """ - - """ - pass - - -def serve(): - server = grpc.server(futures.ThreadPoolExecutor(max_workers=50)) - tira_host_pb2_grpc.add_TiraApplicationServiceServicer_to_server(TiraApplicationService(), server) - listen_addr = f'[::]:{grpc_port}' - server.add_insecure_port(listen_addr) - server.start() - print("Starting tira-application server on %s", listen_addr) - logger.info("Starting tira-application server on %s", listen_addr) - server.wait_for_termination() diff --git a/application/src/tira/huggingface_hub_integration.py b/application/src/tira/huggingface_hub_integration.py deleted file mode 100644 index 2f2691857..000000000 --- a/application/src/tira/huggingface_hub_integration.py +++ /dev/null @@ -1,50 +0,0 @@ -import importlib -from typing import Iterable -from huggingface_hub import scan_cache_dir, snapshot_download -import os -import sys - -tira_cli_io_utils = None - -for p in sys.path: - p = str(os.path.abspath(p)) + '/' - if ('-packages/') in p: - p = p.split('-packages/')[0] + '-packages/' - - if os.path.exists(f"{p}/tira/io_utils.py"): - tira_cli_io_utils_spec = importlib.util.spec_from_file_location("tira_cli.io_utils", f"{p}/tira/io_utils.py") - tira_cli_io_utils = importlib.util.module_from_spec(tira_cli_io_utils_spec) - tira_cli_io_utils_spec.loader.exec_module(tira_cli_io_utils) - continue - -TIRA_HOST_HF_HOME = tira_cli_io_utils._default_hf_home_in_tira_host() -HF_CACHE = None - -def _hf_repos(): - global HF_CACHE - if HF_CACHE is None: - HF_CACHE = scan_cache_dir() - return {i.repo_id: str(i) for i in HF_CACHE.repos} - - -def huggingface_model_mounts(models:Iterable[str]): - if not models: - return [] - - mounts = tira_cli_io_utils.huggingface_model_mounts(models) - repos = _hf_repos() - print(repos) - print(models) - - ret = [] - for model in models: - if model in repos: - ret.append(repos[model]) - else: - raise Exception(f"Model {model} is not available in the Huggingface cache") - - return {'MOUNT_HF_MODEL': ' '.join(models), 'HF_HOME': TIRA_HOST_HF_HOME, 'HF_CACHE_SCAN': ret} - -def snapshot_download_hf_model(model: str): - os.environ['HF_HOME'] = TIRA_HOST_HF_HOME - snapshot_download(repo_id=model.replace('--', '/')) diff --git a/application/src/tira/management/commands/archive_runs_to_zenodo.py b/application/src/tira/management/commands/archive_runs_to_zenodo.py deleted file mode 100644 index c1d2e928c..000000000 --- a/application/src/tira/management/commands/archive_runs_to_zenodo.py +++ /dev/null @@ -1,171 +0,0 @@ -from django.core.management.base import BaseCommand, CommandError -from django.core.management import call_command -from django.apps import apps -from tira.views import zip_run, zip_runs -from tira.endpoints.data_api import model, public_submission_or_none -from tqdm import tqdm -import json -import shutil - -def md5(filename): - import hashlib - return hashlib.md5(open(filename,'rb').read()).hexdigest() - -class Command(BaseCommand): - help = 'Dump software outputs for Zenodo' - - def handle(self, *args, **options): - dataset_groups = { - 'trec-recent': [ - 'msmarco-passage-trec-dl-2019-judged-20230107-training', 'msmarco-passage-trec-dl-2020-judged-20230107-training', 'trec-tip-of-the-tongue-dev-20230607-training' - ], - 'tiny-test-collections': [ - 'antique-test-20230107-training', 'vaswani-20230107-training', 'cranfield-20230107-training', 'nfcorpus-test-20230107-training' - ], - 'trec-medical': [ - 'medline-2004-trec-genomics-2004-20230107-training', 'medline-2017-trec-pm-2017-20230211-training', 'cord19-fulltext-trec-covid-20230107-training', 'medline-2017-trec-pm-2018-20230211-training', 'medline-2004-trec-genomics-2005-20230107-training' - ], - 'clef-labs': [ - 'argsme-touche-2020-task-1-20230209-training', 'argsme-touche-2021-task-1-20230209-training', 'longeval-short-july-20230513-training', 'longeval-heldout-20230513-training', 'longeval-long-september-20230513-training', 'longeval-train-20230513-training' - ], - 'clueweb': [ - 'clueweb09-en-trec-web-2009-20230107-training', 'clueweb09-en-trec-web-2010-20230107-training', 'clueweb09-en-trec-web-2011-20230107-training', 'clueweb09-en-trec-web-2012-20230107-training', 'clueweb12-touche-2020-task-2-20230209-training', 'clueweb12-touche-2021-task-2-20230209-training', 'clueweb12-trec-misinfo-2019-20240214-training', 'clueweb12-trec-web-2013-20230107-training', 'clueweb12-trec-web-2014-20230107-training', 'gov-trec-web-2002-20230209-training', 'gov-trec-web-2003-20230209-training', 'gov-trec-web-2004-20230209-training', 'gov2-trec-tb-2004-20230209-training', 'gov2-trec-tb-2005-20230209-training', 'gov2-trec-tb-2006-20230209-training' - ], - 'trec-core': [ - 'wapo-v2-trec-core-2018-20230107-training', 'disks45-nocr-trec8-20230209-training', 'disks45-nocr-trec7-20230209-training', 'disks45-nocr-trec-robust-2004-20230209-training' - ], - 'ir-lab': [ - 'anthology-20240411-training', 'ir-acl-anthology-20240504-training' - ] - } - - #we publish document processors only for fully public datasets, query processors can be published on all groups - fully_public_datasets = dataset_groups['trec-recent'] + dataset_groups['tiny-test-collections'] + dataset_groups['trec-medical'] + dataset_groups['clef-labs'] + dataset_groups['ir-lab'] - - systems = { - 'ir-benchmarks': { - 'tira-ir-starter': { - 'Index (tira-ir-starter-pyterrier)': 'pyterrier-indexes' - }, - 'seanmacavaney': { - 'DocT5Query': 'doc-t5-query', - 'corpus-graph': 'corpus-graph', - }, - 'ows': { - 'pyterrier-anceindex': 'pyterrier-anceindex' - }, - 'ir-lab-sose-2024': { - 'tira-ir-starter': { - 'Index (tira-ir-starter-pyterrier)': 'ir-lab-sose-2024', - 'Index (pyterrier-stanford-lemmatizer)': 'ir-lab-sose-2024', - }, - 'seanmacavaney': { - 'DocT5Query': 'ir-lab-sose-2024', - 'corpus-graph': 'ir-lab-sose-2024', - }, - 'ows': { - 'pyterrier-anceindex': 'ir-lab-sose-2024' - }, - 'naverlabseurope': { - 'Splade (Index)': 'ir-lab-sose-2024' - } - }, - } - - aggregated_systems = { - 'ir-benchmarks': { - 'qpptk': { - 'all-predictors': 'qpptk-all-predictors', - }, - 'salamander': { - 'classify-comparative-queries': 'qpptk-all-predictors', - }, - 'ows': { - 'query-segmentation-hyb-a': 'qpptk-all-predictors', - }, - 'dossier': { - 'pre-retrieval-query-intent': 'qpptk-all-predictors' - }, - 'tu-dresden-03': { - 'qe-gpt3.5-sq-zs': 'qpptk-all-predictors', - 'qe-llama-sq-zs': 'qpptk-all-predictors', - 'qe-llama-sq-fs': 'qpptk-all-predictors', - 'qe-llama-cot': 'qpptk-all-predictors', - 'qe-flan-ul2-sq-zs': 'qpptk-all-predictors', - 'qe-flan-ul2-sq-fs': 'qpptk-all-predictors', - 'qe-flan-ul2-cot': 'qpptk-all-predictors', - } - - # pre-retrieval query intent, post-retrieval query intent - # splade - # comparative questions - # entity linking - }, - - 'workshop-on-open-web-search': { - 'tu-dresden-03': { - - 'qe-gpt3.5-cot': 'qpptk-all-predictors', - 'qe-gpt3.5-sq-fs': 'qpptk-all-predictors', - }, - 'marcel-gohsen': { - 'query-interpretation': 'qpptk-all-predictors', - 'entity-linking': 'qpptk-all-predictors', - } - } - } - - ret = {} - - #for task_id in systems.keys(): - for task_id in []: - ret[task_id] = {} - for user_id in systems[task_id].keys(): - ret[task_id][user_id] = {} - for display_name in systems[task_id][user_id].keys(): - ret[task_id][user_id][display_name] = {} - output_dir = systems[task_id][user_id][display_name] - for i in tqdm(fully_public_datasets): - run_id = model.runs(task_id, i, user_id, display_name)[0]['run_id'] - target_file = f'{output_dir}/{run_id}.zip' - - zip_file = zip_run(i, user_id, run_id) - shutil.copyfile(zip_file, target_file) - ret[task_id][user_id][display_name][i] = {'run_id': run_id, 'md5': md5(target_file)} - - print(json.dumps(ret)) - - ret = {} - - for task_id in aggregated_systems.keys(): - ret[task_id] = {} - for user_id in aggregated_systems[task_id].keys(): - ret[task_id][user_id] = {} - for display_name, output_dir in aggregated_systems[task_id][user_id].items(): - ret[task_id][user_id][display_name] = {} - - for dataset_group, datasets in tqdm(dataset_groups.items(), display_name): - run_ids = {} - file_name = f'{user_id}-{display_name}-{dataset_group}' - target_file = f'{output_dir}/{file_name}.zip' - - for dataset in datasets: - runs_on_dataset = model.runs(task_id, dataset, user_id, display_name) - if len(runs_on_dataset) > 0: - run_ids[dataset] = runs_on_dataset[0] - else: - print(f'skip dataset {dataset} for {display_name}') - - - if len(run_ids) == 0: - print(f'Skip group {dataset_group} for {display_name}.') - continue - - zip_file = zip_runs(user_id, [(k, v) for k,v in run_ids.items()], file_name) - shutil.copyfile(zip_file, target_file) - ret[task_id][user_id][display_name][dataset_group] = {'dataset_group': dataset_group, 'md5': md5(target_file), 'run_ids': run_ids} - - print(json.dumps(ret)) - - - diff --git a/application/src/tira/management/commands/cache_daemon.py b/application/src/tira/management/commands/cache_daemon.py deleted file mode 100644 index c03b0cc45..000000000 --- a/application/src/tira/management/commands/cache_daemon.py +++ /dev/null @@ -1,102 +0,0 @@ -from django.conf import settings -from django.core.cache import cache -import logging -from django.core.management.base import BaseCommand, CommandError -from django.core.management import call_command -import tira.tira_model as model - -import time -import datetime - -logger = logging.getLogger("cache_daemon") -from tira.tira_model import get_git_integration, get_all_reranking_datasets -from tira.git_runner import all_git_runners - - -class Command(BaseCommand): - help = 'cache daemon' - - def keep_running_softwares_fresh(self, sleep_time): - while True: - time.sleep(int(sleep_time)) - print(str(datetime.datetime.now()) + ': Start loop to keep the running softwares fresh (sleeped ' + str(int(sleep_time)) + ' seconds) ...') - for task in model.get_tasks(): - if task is None: - continue - if model.git_pipeline_is_enabled_for_task(task['task_id'], cache): - if 'featured' not in task or not task['featured']: - print(f'Skip inactive task {task["task_id"]}') - continue - - evaluators_for_task = model.get_evaluators_for_task(task['task_id'], cache) - repositories = set([i['git_repository_id'] for i in evaluators_for_task if i['is_git_runner'] and i['git_repository_id']]) - - for git_repository_id in repositories: - try: - print(task['task_id'] + '--->' + str(git_repository_id)) - git_integration = get_git_integration(task_id=task['task_id']) - running_pipelines = git_integration.all_running_pipelines_for_repository(git_repository_id, cache, force_cache_refresh=True) - print('Refreshed Cache (' + str(datetime.datetime.now()) + '): ' + task['task_id'] + ' on repo ' + str(git_repository_id) + ' has ' + str(len(running_pipelines)) + ' jobs.') - except Exception as e: - print(f'Exception during refreshing the repository {git_repository_id}: e') - logger.warn(f'Exception during refreshing the repository {git_repository_id}', e) - continue - - time.sleep(0.1) - - def refresh_user_images_in_repo(self, git_runner, sleep_time): - users_of_active_tasks = set() - for task in model.get_tasks(): - if task is None: - continue - if 'featured' in task and task['featured'] and 'allowed_task_teams' in task and task['allowed_task_teams']: - users_of_active_tasks |= set([i.strip() for i in task['allowed_task_teams'].split('\n') if i and i.strip()]) - - print(str(datetime.datetime.now()) + ': Start loop to keep the user images fresh (sleeped ' + str(int(sleep_time)) + f' seconds) for {users_of_active_tasks} ...', flush=True) - - for user in users_of_active_tasks: - try: - images = git_runner.docker_images_in_user_repository(user, cache, force_cache_refresh=True) - print('Refreshed Cache (' + str(datetime.datetime.now()) + '): ' + user + ' has ' + str(len(images)) + ' images.', flush=True) - except Exception as e: - print('Exception during refreshing image repository {user}: {e}', flush=True) - continue - time.sleep(0.1) - - def keep_user_images_fresh(self, sleep_time): - while True: - time.sleep(int(sleep_time)) - print(str(datetime.datetime.now()) + ': Start loop over all git runners to keep user images fresh (sleeped ' + str(int(sleep_time)) + ' seconds) ...', flush=True) - for git_runner in all_git_runners(): - try: - self.refresh_user_images_in_repo(git_runner, sleep_time) - except Exception as e: - print(f'Exception in keep_user_images_fresh: {e}', flush=True) - continue - - def keep_reranking_datasets_fresh(self, sleep_time): - while True: - time.sleep(int(sleep_time)) - print(str(datetime.datetime.now()) + ': Start keep_reranking_datasets_fresh (sleeped ' + str(int(sleep_time)) + ' seconds) ...') - try: - get_all_reranking_datasets(True) - except Exception as e: - print(f'Exception in keep_reranking_datasets_fresh: {e}') - - def handle(self, *args, **options): - call_command('createcachetable') - - if 'keep_running_softwares_fresh' in options and options['keep_running_softwares_fresh']: - self.keep_running_softwares_fresh(options['keep_running_softwares_fresh']) - - if 'keep_user_images_fresh' in options and options['keep_user_images_fresh']: - self.keep_user_images_fresh(options['keep_user_images_fresh']) - - if 'keep_reranking_datasets_fresh' in options and options['keep_reranking_datasets_fresh']: - self.keep_reranking_datasets_fresh(options['keep_reranking_datasets_fresh']) - - def add_arguments(self, parser): - parser.add_argument('--keep_running_softwares_fresh', default=None, type=str) - parser.add_argument('--keep_reranking_datasets_fresh', default=None, type=str) - parser.add_argument('--keep_user_images_fresh', default=None, type=str) - diff --git a/application/src/tira/management/commands/dump_tira.py b/application/src/tira/management/commands/dump_tira.py deleted file mode 100644 index 909801dfb..000000000 --- a/application/src/tira/management/commands/dump_tira.py +++ /dev/null @@ -1,13 +0,0 @@ -from django.core.management.base import BaseCommand, CommandError -from django.core.management import call_command -from django.apps import apps - -class Command(BaseCommand): - help = 'dump all of tira' - - def handle(self, *args, **options): - tira_config = apps.get_app_config('tira') - models = [f'tira.{i}' for i in tira_config.models] - - cmd = ['dumpdata'] + models + ['--indent', '2'] - call_command(*cmd) diff --git a/application/src/tira/management/commands/git_runner_cli.py b/application/src/tira/management/commands/git_runner_cli.py deleted file mode 100644 index 0558fa255..000000000 --- a/application/src/tira/management/commands/git_runner_cli.py +++ /dev/null @@ -1,248 +0,0 @@ -import os -import django - -from django.conf import settings -import logging -import time -from contextlib import contextmanager -from django.core.management.base import BaseCommand, CommandError -from django.core.management import call_command -from django.core.cache import cache -from tqdm import tqdm -import json -from slugify import slugify - -from tira.git_runner import get_git_runner -from tira.tira_model import load_refresh_timestamp_for_cache_key, get_git_integration, create_re_rank_output_on_dataset, get_all_reranking_datasets, add_input_run_id_to_all_rerank_runs - -from tira.util import get_tira_id -logger = logging.getLogger("tira") - - -class Command(BaseCommand): - """Run git_runner via cli. - Later this will become a fully fledged cli tool that we use as wrapper in the repository. - At the moment, we just execute some predefined commands - """ - - def run_command_create_user_repository(self, options, git_runner): - print(f'Create a user repository for {options["create_user_repository"]}.') - repo_id = create_user_repository(options['create_user_repository']) - print(f'The new repository has the id ${repo_id}') - print(add_new_tag_to_docker_image_repository('registry.webis.de/code-research/tira/tira-user-del-maik-user-repo/my-software', '0.0.3', '0.0.1-tira-docker-software-id-name-x')) - print('Images: ' + str(git_runner.docker_images_in_user_repository(options['create_user_repository']))) - - def run_command_create_task_repository(self, options, git_runner): - print(f'Create a task-repository for {options["create_task_repository"]}.') - repo_id = git_runner.create_task_repository(options['create_task_repository']) - print(f'The new task-repository has the id ${repo_id}') - - def run_command_running_jobs(self, options, git_runner): - if 'user_id' not in options or not options['user_id']: - raise ValueError('Please pass --user_id as argument.') - - print(list(git_runner.yield_all_running_pipelines(options['running_jobs'], options['user_id'], cache, True))) - - print(load_refresh_timestamp_for_cache_key(cache, 'all-running-pipelines-repo-' +options['running_jobs'])) - - def run_command_stop_job_and_clean_up(self, options, git_runner): - if 'user_id' not in options or not options['user_id']: - raise ValueError('Please pass --user_id as argument.') - - if 'run_id' not in options or not options['run_id']: - raise ValueError('Please pass --run_id as argument.') - - git_runner.stop_job_and_clean_up(options['stop_job_and_clean_up'], options['user_id'], options['run_id']) - - def archive_repository_add_images_from_git_repo(self, options): - import tira.model as modeldb - with open(options['archive_repository_add_images_from_git_repo'], 'r') as f: - for l in tqdm(f): - l = json.loads(l) - if 'docker-software-' not in l['TIRA_SOFTWARE_ID']: - print('Skip') - continue - - docker_software_id = int(l['TIRA_SOFTWARE_ID'].split('docker-software-')[1]) - software = modeldb.DockerSoftware.objects.get(docker_software_id=docker_software_id) - if l['TIRA_COMMAND_TO_EXECUTE'] != software.command or not l['TIRA_IMAGE_TO_EXECUTE'].startswith(software.user_image_name) or l['TIRA_IMAGE_TO_EXECUTE'] != software.tira_image_name: - print('Skip') - continue - - software.public_image_name = l['TIRA_IMAGE_TO_EXECUTE_IN_DOCKERHUB'] - software.public_image_size = max(l['image_details']['size'], l['image_details']['virtual_size']) - software.save() - - def archive_docker_software(self, approach, git_runner): - import tira.model as modeldb - from tira.util import docker_image_details - - task_id, vm_id, name = approach.split('/') - software = modeldb.DockerSoftware.objects.filter(vm__vm_id=vm_id, task__task_id=task_id, display_name=name, deleted=False) - - if len(software) != 1: - raise ValueError(f'Found {software} but expected a single entry.') - - software = software[0] - if software.public_image_name and software.public_image_size: - print(f'Software "{approach}" is already public.') - return - - print(software) - image_name = (slugify(software.tira_image_name)).replace('/', '-') - dockerhub_image = f'docker.io/webis/{task_id}-submissions:' + image_name.split('-tira-user-')[1].strip() - - software_definition = {'TIRA_IMAGE_TO_EXECUTE': software.tira_image_name, 'TIRA_IMAGE_TO_EXECUTE_IN_DOCKERHUB': dockerhub_image} - git_runner.archive_software('/tmp/', software_definition, download_images=True, persist_images=False, upload_images=True) - image_metadata = docker_image_details(software.tira_image_name) - - print(image_metadata) - print(image_name) - print(dockerhub_image) - software.public_image_name = dockerhub_image - software.public_image_size = image_metadata['size'] - software.save() - - def handle(self, *args, **options): - if 'organization' not in options or not options['organization']: - raise ValueError('Please pass --organization') - - git_runner = get_git_integration(options['organization'], None) - print(f'Use {git_runner}.') - - if 'archive_repository' in options and options['archive_repository']: - git_runner.archive_repository( - repo_name=options['archive_repository'], - working_directory='./' + options['archive_repository'], - download_images=options['archive_repository_download_images'].lower() == 'true', - persist_images=options['archive_repository_persist_images'].lower() == 'true', - upload_images=options['archive_repository_upload_images'].lower() == 'true', - persist_datasets=options['archive_repository_persist_datasets'].lower() == 'true', - copy_runs=options['archive_repository_copy_runs'].lower() == 'true' - ) - - if 'create_task_repository' in options and options['create_task_repository']: - self.run_command_create_task_repository(options, git_runner) - - if 'create_user_repository' in options and options['create_user_repository']: - self.run_command_create_user_repository(options, git_runner) - - if 'running_jobs' in options and options['running_jobs']: - self.run_command_running_jobs(options, git_runner) - - if 'stop_job_and_clean_up' in options and options['stop_job_and_clean_up']: - self.run_command_stop_job_and_clean_up(options, git_runner) - - if 'archive_repository_add_images_from_git_repo' in options and options['archive_repository_add_images_from_git_repo']: - self.archive_repository_add_images_from_git_repo(options) - - if 'archive_docker_software' in options and options['archive_docker_software']: - self.archive_docker_software(options['archive_docker_software'], git_runner) - - if 'run_image' in options and options['run_image']: - git_runner.start_git_workflow(task_id='clickbait-spoiling', - dataset_id='task-1-type-classification-validation-20220924-training', - vm_id='princess-knight', - run_id=get_tira_id(), - git_runner_image='webis/pan-clickbait-spoiling-evaluator:0.0.10', - git_runner_command="""bash -c '/clickbait-spoiling-eval.py --task 2 --ground_truth_spoiler $inputDataset --input_run $inputRun --output_prototext ${outputDir}/evaluation.prototext'""", - git_repository_id=2761, - evaluator_id='task-2-spoiler-generation-validation-20220924-training-evaluator', - user_image_to_execute='registry.webis.de/code-research/tira/tira-user-princess-knight/naive-baseline-task2:0.0.1-tira-docker-software-id-genteel-upstream', - user_command_to_execute='/naive-baseline-task-2.py --input $inputDataset/input.jsonl --output $outputDir/run.jsonl', - tira_software_id='17', - resources='small-resources-gpu', - - ) - - if 'clean_repository' in options and options['clean_repository']: -# raise ValueError('ToDo: please insert the git authentication token with the name "tira-automation-bot-gitlab-admin-token" (maiks keepass) to git_runner.py method get_git_runner' - git_runner.clean_task_repository(options['clean_repository']) - - if 'docker_images_in_user_repository' in options and options['docker_images_in_user_repository']: - print(git_runner.docker_images_in_user_repository(options['docker_images_in_user_repository'])) - - if 'rerank' in options and options['rerank']: - docker_software_id = 244 # "BM25 (tira-ir-starter-pyterrier)" - # Execute once in k8s: ./manage.py git_runner_cli --organization webis --rerank true - # Copy File - # Comment out dataset id - # configure ir-dataset: add ir_datasets image, ir_datasets re-ranking command, ir_datasets resources - # For new datasets: INSERT INTO tira_dockersoftware (`command`, `display_name`, `user_image_name`, `tira_image_name`, `deleted`, `task_id`, `vm_id`, `description`, `paper_link`, `ir_re_ranker`, `ir_re_ranking_input`) VALUES ('tbd', 'Anserini MS-MARCO Dev', 'tbd', 'tbd', 0, 'reneuir-2024', 'froebe', 'tbd', '', 0, 1); - # db statement: SELECT * FROM tira_run WHERE run_id LIKE '%rerank-%'; - # re-run with update re-ranking jobs - datasets = [ - 'cranfield-20230107-training', 'antique-test-20230107-training', 'vaswani-20230107-training', - 'msmarco-passage-trec-dl-2019-judged-20230107-training', 'medline-2004-trec-genomics-2004-20230107-training', - 'wapo-v2-trec-core-2018-20230107-training', 'cord19-fulltext-trec-covid-20230107-training', - 'disks45-nocr-trec7-20230209-training', 'disks45-nocr-trec8-20230209-training', - 'disks45-nocr-trec-robust-2004-20230209-training', 'nfcorpus-test-20230107-training', - 'argsme-touche-2020-task-1-20230209-training', 'argsme-touche-2021-task-1-20230209-training', - 'msmarco-passage-trec-dl-2020-judged-20230107-training', 'medline-2004-trec-genomics-2005-20230107-training', - 'gov-trec-web-2002-20230209-training', 'gov-trec-web-2003-20230209-training', 'gov-trec-web-2004-20230209-training', - 'gov2-trec-tb-2006-20230209-training', 'gov2-trec-tb-2004-20230209-training', 'gov2-trec-tb-2005-20230209-training', - 'medline-2017-trec-pm-2017-20230211-training', 'medline-2017-trec-pm-2018-20230211-training', - 'clueweb12-trec-misinfo-2019-20240214-training', 'longeval-heldout-20230513-training', - 'longeval-long-september-20230513-training', 'longeval-short-july-20230513-training', - 'longeval-train-20230513-training', 'trec-tip-of-the-tongue-dev-20230607-training', - 'longeval-2023-06-20240418-training', 'longeval-2023-08-20240418-training', - 'ir-acl-anthology-topics-leipzig-20240423-test', 'ir-acl-anthology-topics-leipzig-20240423-test', 'ir-acl-anthology-topics-augsburg-20240525_0-test', 'ir-acl-anthology-20240504-training', 'ir-acl-anthology-topics-koeln-20240614-test', - 'ms-marco-100-queries-20240629-training', # /mnt/ceph/tira/data/runs/ms-marco-100-queries-20240629-training/froebe/2024-06-30-22-13-09-rerank-2024-06-30-22-23-08 - 'ms-marco-1000-queries-20240629-training', #/mnt/ceph/tira/data/runs/ms-marco-1000-queries-20240629-training/froebe/2024-06-30-22-14-54-rerank-2024-06-30-23-07-44 - 'ms-marco-all-dev-queries-20240629-training', #/mnt/ceph/tira/data/runs/dl-top-10-docs-20240701-training/tira-ir-starter/2024-07-01-15-45-55-rerank-2024-07-02-10-40-56 - 'dl-top-10-docs-20240701-training', # /mnt/ceph/tira/data/runs/dl-top-10-docs-20240701-training/froebe/2024-07-01-15-45-55-rerank-2024-07-02-10-40-56 - 'dl-top-100-docs-20240701-training', #/mnt/ceph/tira/data/runs/dl-top-100-docs-20240701-training/tira-ir-starter/2024-07-01-15-46-44-rerank-2024-07-02-10-49-03 - 'dl-top-1000-docs-20240701-training', # /mnt/ceph/tira/data/runs/dl-top-1000-docs-20240701-training/froebe/2024-07-01-15-47-04-rerank-2024-07-02-10-53-30 - ] - for dataset in datasets: - print(dataset) - tmp = create_re_rank_output_on_dataset(task_id='ir-benchmarks', vm_id='tira-ir-starter', software_id=None, docker_software_id=docker_software_id, dataset_id=dataset) - if tmp: - print(f'/mnt/ceph/tira/data/runs/{tmp["dataset_id"]}/{tmp["vm_id"]}/{tmp["run_id"]}/') - - - docker_software_id = 242 # "ChatNoir" - datasets = [ - 'clueweb09-en-trec-web-2009-20230107-training', 'clueweb09-en-trec-web-2010-20230107-training', - 'clueweb09-en-trec-web-2011-20230107-training', 'clueweb09-en-trec-web-2012-20230107-training', - 'clueweb12-trec-web-2013-20230107-training', 'clueweb12-trec-web-2014-20230107-training', - 'clueweb12-touche-2020-task-2-20230209-training', - 'clueweb12-touche-2021-task-2-20230209-training' - ] - for dataset in datasets: - print(dataset) - tmp = create_re_rank_output_on_dataset(task_id='ir-benchmarks', vm_id='tira-ir-starter', software_id=None, docker_software_id=docker_software_id, dataset_id=dataset) - if tmp: - print(f'/mnt/ceph/tira/data/runs/{tmp["dataset_id"]}/{tmp["vm_id"]}/{tmp["run_id"]}/') - - print(git_runner.extract_configuration_of_finished_job(2979, dataset_id='clinicaltrials-2017-trec-pm-2017-20230107-training', vm_id='tira-ir-starter', run_id='2023-01-12-15-02-11')) - - print('\n\nReranking Datasets:\n\n') - - - #for i in get_all_reranking_datasets(True).items(): - # print(i) - - add_input_run_id_to_all_rerank_runs() - - def add_arguments(self, parser): - parser.add_argument('--create_task_repository', default=None, type=str) - parser.add_argument('--create_user_repository', default=None, type=str) - parser.add_argument('--clean_repository', default=None, type=str) - parser.add_argument('--run_image', default=None, type=str) - parser.add_argument('--archive_repository', default=None, type=str) - parser.add_argument('--archive_repository_download_images', default='false', type=str) - parser.add_argument('--archive_repository_persist_images', default='false', type=str) - parser.add_argument('--archive_repository_upload_images', default='false', type=str) - parser.add_argument('--archive_repository_add_images_from_git_repo', default=None, type=str) - parser.add_argument('--archive_docker_software', default=None, type=str) - parser.add_argument('--archive_repository_persist_datasets', default='false', type=str) - parser.add_argument('--archive_repository_copy_runs', default='false', type=str) - parser.add_argument('--running_jobs', default=None, type=str) - parser.add_argument('--stop_job_and_clean_up', default=None, type=str) - parser.add_argument('--user_id', default=None, type=str) - parser.add_argument('--run_id', default=None, type=str) - parser.add_argument('--docker_images_in_user_repository', default=None, type=str) - parser.add_argument('--organization', default=None, type=str) - parser.add_argument('--rerank', default=None, type=str) - diff --git a/application/src/tira/management/commands/ir_datasets_loader_cli.py b/application/src/tira/management/commands/ir_datasets_loader_cli.py deleted file mode 100644 index 1d9356154..000000000 --- a/application/src/tira/management/commands/ir_datasets_loader_cli.py +++ /dev/null @@ -1,94 +0,0 @@ -import os -import django - -from django.conf import settings -import logging -import time -from contextlib import contextmanager -from django.core.management.base import BaseCommand, CommandError -from django.core.management import call_command -from django.core.cache import cache -import json - -from pathlib import Path -from tira.ir_datasets_loader import IrDatasetsLoader - -logger = logging.getLogger("tira") - - -class Command(BaseCommand): - """Run ir_datasets_loader via cli. - Loads a dataset by a given ir_datasets ID and maps the data to standardized formats - in preparation to full-rank or re-rank operations with PyTerrier - - @param --ir_dataset_id: required, string: the dataset ID as used by ir_datasets - @param --output_dataset_path: optional, string: the path to the directory where the output will be stored - @param --output_dataset_truth_path: optional, string: the path to the directory where the output will be stored - @param --include_original {True}: optional, boolean: flag to signal, if the original data should be included - @param --rerank: optional, string: if used, mapping will be in preparation for re-ranking operations and a path to file - with TREC-run formatted data is required - """ - - def import_dataset_for_fullrank(self, ir_datasets_id: str, output_dataset_path: Path, output_dataset_truth_path: Path, include_original: bool, skip_documents: bool, skip_qrels: bool, skip_duplicate_ids: bool, allowlist_path_ids: bool): - print(f'Task: Full-Rank -> create files: \n documents.jsonl \n queries.jsonl \n qrels.txt \n at {output_dataset_path}/') - datasets_loader = IrDatasetsLoader() - datasets_loader.load_dataset_for_fullrank(ir_datasets_id, output_dataset_path, output_dataset_truth_path, include_original, skip_documents = skip_documents, skip_qrels = skip_qrels, skip_duplicate_ids = skip_duplicate_ids, allowlist_path_ids = allowlist_path_ids) - - - def import_dataset_for_rerank(self, ir_datasets_id: str, output_dataset_path: Path, output_dataset_truth_path: Path, include_original: bool, run_file: Path, skip_qrels: bool): - print(f'Task: Re-Rank -> create files: \n rerank.jsonl \n qrels.txt \n at {output_dataset_path}/') - datasets_loader = IrDatasetsLoader() - datasets_loader.load_dataset_for_rerank(ir_datasets_id, output_dataset_path, output_dataset_truth_path, include_original, run_file) - - - def contains_all_required_args(self, options): - if 'input_dataset_directory' in options and options['input_dataset_directory']: - metadata = json.load(open(options['input_dataset_directory'] + '/metadata.json')) - options['ir_datasets_id'] = metadata['ir_datasets_id'] - options['include_original'] = metadata.get('include_original', 'true') - - return 'ir_datasets_id' in options and options['ir_datasets_id'] - - def handle(self, *args, **options): - if not self.contains_all_required_args(options): - raise ValueError('Could not handle options' + str(options)) - return - - truth_path = Path(options['output_dataset_truth_path']) if 'output_dataset_truth_path' in options and options['output_dataset_truth_path'] else None - output_path = Path(options['output_dataset_path']) if 'output_dataset_path' in options and options['output_dataset_path'] else None - - skip_qrels = options['skip_qrels'] or str(options['output_dataset_truth_path']).strip() == '/tmp' - - if options['rerank']: - self.import_dataset_for_rerank( - options['ir_datasets_id'], - output_path, - truth_path, - options['include_original'].lower() == 'true', - options['rerank'], - skip_qrels = skip_qrels, - ) - else: - self.import_dataset_for_fullrank( - options['ir_datasets_id'], - output_path, - truth_path, - options['include_original'].lower() == 'true', - skip_documents = options['skip_documents'], - skip_qrels = skip_qrels, - skip_duplicate_ids = options['skip_duplicate_ids'], - allowlist_path_ids = options['allowlist_path_ids'] - ) - - def add_arguments(self, parser): - parser.add_argument('--ir_datasets_id', default=None, type=str) - parser.add_argument('--output_dataset_path', default=None, type=Path) - parser.add_argument('--output_dataset_truth_path', default='/tmp', type=Path) - parser.add_argument('--include_original', default='True', type=str) - parser.add_argument('--skip_documents', default=False, type=bool) - parser.add_argument('--skip_qrels', default=False, type=bool) - parser.add_argument('--input_dataset_directory', default=None, type=str) - parser.add_argument('--skip_duplicate_ids', default=True, type=bool) - parser.add_argument('--rerank', default=None, type=Path) - parser.add_argument('--allowlist_path_ids', default=None, type=Path, required=False) - diff --git a/application/src/tira/management/commands/playground.py b/application/src/tira/management/commands/playground.py deleted file mode 100644 index 9b858e9af..000000000 --- a/application/src/tira/management/commands/playground.py +++ /dev/null @@ -1,32 +0,0 @@ -from django.core.management.base import BaseCommand - - -class Command(BaseCommand): - """Runs some playground command. - """ - - def handle(self, *args, **options): - from tira.git_runner import all_git_runners - - g = all_git_runners() - assert len(g) == 1 - for i in []: #['ul-nostalgic-turing', 'ul-trusting-neumann', 'ul-dreamy-zuse', 'ul-lucid-lovelace', 'ul-dazzling-euclid', 'ul-kangaroo-query-crew', 'ul-graceful-galileo', 'ul-suspicious-shannon', 'ul-the-golden-retrievers', 'ul-confident-torvalds']: - g[0].create_user_repository(i) - - #class tmp(): - # body= '{"group": "ir-lab-sose-2023-armafira", "team": "a", "username": "mf2", "email": "del-me", "affiliation": "mf2", "country": "c", "employment": "e", "participation": "p", "instructorName": "i", "instructorEmail": "i", "questions": ""}' - # session = {} - - #print(tmp().body) - # - #request = tmp() - #context = {'user_id': 'mf2'} - #print(add_registration(request, context, 'ir-lab-jena-leipzig-sose-2023', 'del-me-maik')) - - #from tira.ir_datasets_loader import run_irds_command - #run_irds_command('tmp-test-maik', 'pssda', 'webis/tira-ir-datasets-starter:0.0.45-pangram', '/irds_cli.sh --skip_qrels true --ir_datasets_id pangrams --output_dataset_path $outputDir', '/tmp/sda-1/1/') - #run_irds_command('tmp-test-maik', 'pssda', 'webis/tira-ir-datasets-starter:0.0.45-pangram', '/irds_cli.sh --skip_documents true --ir_datasets_id pangrams --output_dataset_truth_path $outputDir', '/tmp/sda-1/2/') - - - def add_arguments(self, parser): - pass diff --git a/application/src/tira/management/commands/run_to_evaluations.py b/application/src/tira/management/commands/run_to_evaluations.py deleted file mode 100644 index 2a9e54233..000000000 --- a/application/src/tira/management/commands/run_to_evaluations.py +++ /dev/null @@ -1,25 +0,0 @@ -from django.core.management.base import BaseCommand, CommandError -from django.core.management import call_command -from django.apps import apps -import tira.tira_model as model -import json - -class Command(BaseCommand): - help = 'export run to evaluations' - - def handle(self, *args, **options): - ret = {} - for dataset in model.get_datasets_by_task('ir-benchmarks', return_only_names=True): - mapping = {} - submissions = model.get_vms_with_reviews(dataset['dataset_id']) - for submission in submissions: - for run in submission['runs']: - if 'is_evaluation' not in run or not run['is_evaluation']: - continue - if run['input_run_id'] not in mapping: - mapping[run['input_run_id']] = [] - mapping[run['input_run_id']] += [run['run_id']] - ret[dataset['dataset_id']] = mapping - with open('run-to-evaluations.json', 'w') as f: - f.write(json.dumps(ret)) - diff --git a/application/src/tira/models.py b/application/src/tira/models.py deleted file mode 100644 index 3e7e7d2f6..000000000 --- a/application/src/tira/models.py +++ /dev/null @@ -1 +0,0 @@ -from tira.model import * \ No newline at end of file diff --git a/application/src/tira/static/tira/css/tira-style.css b/application/src/tira/static/tira/css/tira-style.css deleted file mode 100644 index 7adacc84e..000000000 --- a/application/src/tira/static/tira/css/tira-style.css +++ /dev/null @@ -1,147 +0,0 @@ -.index-main-cover { - background: #323232 url("../img/background2.jpg") no-repeat top right; - background-size: 100%; - height: 250px; - width: 100%; - min-height: 200px; - font-size: 1.05rem; -} - -.index-main-cover h1 { - font-size: 2.65rem -} - -.cover-background-fade { - position: absolute; - top: 0; - bottom: 0; - left: 0; - right: 0; - background: linear-gradient(to bottom, rgba(0, 0, 0, 0.2) 0%, rgba(0, 0, 0, 0.2) 40%, rgba(0, 0, 0, 0.85) 85%) -} - -.dataset-detail-icon { - height: 15px; - width: 15px; - min-width: 20px; - min-height: 20px; -} - -.dropdown-scroll { - /*height: auto;*/ - max-height: calc(100vh - 52ex); - overflow-x: auto; - overflow-y: visible; -} - -.scrollable-table { - overflow-x: visible; - overflow-y: hidden; -} - -a.uk-button-primary { - color: #fff; -} - -.tira-content label { - display: inline; -} - -.uk-checkbox, .uk-radio{ - width: 16px !important; -} - -.uk-accordion table, .uk-card table { - border-collapse: inherit; -} - -.table-background-yellow { - background-color: #fff6ed; -} - -.table-background-yellow .uk-button { - border-color: darkorange; -} - -.table-background-red { - background-color: #ffe3e3; -} - -.table-background-red .uk-button{ - border-color: #a41515; -} - -.table-background-green { - background-color: #e5ffe3; -} - -.table-background-green .uk-button{ - border-color: #006b1b; -} - -.tira-button-selected, .tira-button-selected:hover { - border-color: #006b1b; - color: #006b1b; -} - -@keyframes flash-green-keyframes { - from {background-color: #006b1b;} - to {background-color: white;} -} - -@keyframes flash-red-keyframes { - from {background-color: darkred;} - to {background-color: white;} -} - -.flash-red { - animation-name: flash-red-keyframes; - animation-duration: 3s; -} - -.flash-green { - animation-name: flash-green-keyframes; - animation-duration: 3s; -} - -@media (max-width: 800px) { - .index-main-cover { - height: 200px - } -} - -@media (max-width: 400px) { - .index-main-cover { - height: 175px - } -} - -@media (max-width: 300px) { - .index-main-cover { - height: 150px - } -} - -@media (min-width: 960px) { - .index-main-cover { - height: 300px; - min-height: 200px; - } -} -/* strange disraptor hover fixes? */ -.disraptor-content .sortable:hover { - background-color: revert; -} -.disraptor-content .webis-data-table .sortable { - cursor: revert; -} - -.submit-button { - width: 100%; - border-radius: 5px; - margin-top: 5px; -} - -.hide-element { - display: none; -} diff --git a/application/src/tira/static/tira/img/background1.jpg b/application/src/tira/static/tira/img/background1.jpg deleted file mode 100644 index f40dfc3e0..000000000 Binary files a/application/src/tira/static/tira/img/background1.jpg and /dev/null differ diff --git a/application/src/tira/static/tira/img/background2.jpg b/application/src/tira/static/tira/img/background2.jpg deleted file mode 100644 index c90e49dd4..000000000 Binary files a/application/src/tira/static/tira/img/background2.jpg and /dev/null differ diff --git a/application/src/tira/static/tira/img/google-icon.png b/application/src/tira/static/tira/img/google-icon.png deleted file mode 100644 index 7c4c9f85f..000000000 Binary files a/application/src/tira/static/tira/img/google-icon.png and /dev/null differ diff --git a/application/src/tira/static/tira/img/ia-icon.png b/application/src/tira/static/tira/img/ia-icon.png deleted file mode 100644 index 4a5b2c67b..000000000 Binary files a/application/src/tira/static/tira/img/ia-icon.png and /dev/null differ diff --git a/application/src/tira/static/tira/img/image-licence.txt b/application/src/tira/static/tira/img/image-licence.txt deleted file mode 100644 index b27b52844..000000000 --- a/application/src/tira/static/tira/img/image-licence.txt +++ /dev/null @@ -1,2 +0,0 @@ -v1 - https://www.istockphoto.com/de/foto/blauen-und-wei%C3%9Fen-kirchen-von-oia-santorin-griechenland-gm164015369-23359009 -v2 - https://www.shutterstock.com/image-photo/sunset-oia-santorini-greece-1005762703 \ No newline at end of file diff --git a/application/src/tira/static/tira/img/logo-tira-32x32.png b/application/src/tira/static/tira/img/logo-tira-32x32.png deleted file mode 100644 index e69de29bb..000000000 diff --git a/application/src/tira/static/tira/img/logo-tira-40x40-transparent.png b/application/src/tira/static/tira/img/logo-tira-40x40-transparent.png deleted file mode 100644 index ae5bc7167..000000000 Binary files a/application/src/tira/static/tira/img/logo-tira-40x40-transparent.png and /dev/null differ diff --git a/application/src/tira/static/tira/img/logo-tira-40x40.png b/application/src/tira/static/tira/img/logo-tira-40x40.png deleted file mode 100644 index 3ead968de..000000000 Binary files a/application/src/tira/static/tira/img/logo-tira-40x40.png and /dev/null differ diff --git a/application/src/tira/static/tira/img/zenodo-icon.png b/application/src/tira/static/tira/img/zenodo-icon.png deleted file mode 100644 index 5578cb0a2..000000000 Binary files a/application/src/tira/static/tira/img/zenodo-icon.png and /dev/null differ diff --git a/application/src/tira/static/tira/js/review.js b/application/src/tira/static/tira/js/review.js deleted file mode 100644 index af2f29b67..000000000 --- a/application/src/tira/static/tira/js/review.js +++ /dev/null @@ -1,107 +0,0 @@ -let vm_id = null; -let dataset_id = null; -let run_id = null; - -// change view when blind state changes -function setBlindButton(blinded){ - if ( blinded === false ) { - $('#blind-button').show() - $('#blind-text').show() - $('#unblind-button').hide() - $('#unblind-text').hide() - } else { - $('#blind-button').hide() - $('#blind-text').hide() - $('#unblind-button').show() - $('#unblind-text').show() - } -} - -// change view when published state changes -function setPublishButton(published){ - if ( published === true ) { - $('#publish-button').hide(); - $('#publish-text').hide(); - $('#unpublish-button').show(); - $('#unpublish-text').show(); - } else { - $('#publish-button').show(); - $('#publish-text').show(); - $('#unpublish-button').hide(); - $('#unpublish-text').hide(); - } -} - -// when publish state changes: notify server and update view if successful -function publish(bool) { - $.ajax({ - type:"GET", - url: "/publish/" + vm_id + "/" + dataset_id + "/" + run_id + "/" + bool, - data: {}, - success: function( data ) - { - if(data.status === "0"){ - setPublishButton(data.published) - } - } - }) -} - -// when blind state changes: notify server and update view if successful -function blind(bool) { - $.ajax({ - type:"GET", - url: "/blind/" + vm_id + "/" + dataset_id + "/" + run_id + "/" + bool, - data:{}, - success: function( data ) - { - if(data.status === "0"){ - setBlindButton(data.blinded) - } - } - }) -} - -/* Init state and events for this page -* - initial state of blind, publish, and review -* - events for publishing and blinding -* - events to uncheck checkboxes -*/ -function addReviewEvents(p, b, vid, did, rid) { - vm_id = vid; - dataset_id =did; - run_id = rid; - p = p !== "False"; // Convert booleans to JS style - b = b !== "False"; - setPublishButton(p) - setBlindButton(b) - - $('#blind-button').click(function () { - blind(true) - }) - $('#unblind-button').click(function () { - blind(false) - }) - $('#publish-button').click(function () { - publish(true) - }) - $('#unpublish-button').click(function () { - publish(false) - }) - $('#no-error-checkbox').change(function () { - if(this.checked) { - $('#software-error-checkbox').prop('checked', false); - $('#output-error-checkbox').prop('checked', false); - } - }) - $('#software-error-checkbox').change(function () { - if(this.checked) { - $('#no-error-checkbox').prop('checked', false); - } - }) - $('#output-error-checkbox').change(function () { - if(this.checked) { - $('#no-error-checkbox').prop('checked', false); - } - }) -} \ No newline at end of file diff --git a/application/src/tira/templates/tira/background_jobs.html b/application/src/tira/templates/tira/background_jobs.html deleted file mode 100644 index b894ac7e5..000000000 --- a/application/src/tira/templates/tira/background_jobs.html +++ /dev/null @@ -1,48 +0,0 @@ -{% extends 'tira/base.html' %} -{% load render_bundle from webpack_loader %} -{% block title %}TIRA{% endblock %} -{% block navbar %}{% include "tira/navbar.html" with nav_active='tasks' %}{% endblock %} - -{% block content %} -{% csrf_token %} - -{% if include_navigation %} - - - -{% endif %} - - - - - -
    - -
    - -
    -
    -

    TIRA — Background Job: {{job.title}}

    - - If your job is finished (i.e., you see the output that you expect and there is an success exit code of 0), please go back to your task /task/{{task}}. - Please refresh the page to see updates. - -

    Metadata

    -
      -
    • Last Contact to the Job: {{job.last_contact}}
    • -
    • Exit code: {{job.exit_code}}
    • -
    - -

    Stdout/Stderr of the job

    -
    -        {{job.stdout}}
    -        
    -
    - -
    - -{% endblock %} - - - diff --git a/application/src/tira/templates/tira/base.html b/application/src/tira/templates/tira/base.html deleted file mode 100644 index 5b6f0c536..000000000 --- a/application/src/tira/templates/tira/base.html +++ /dev/null @@ -1,44 +0,0 @@ - -{% load static %} - - - {% block title %}{% endblock %} - - - - - - - - - - - - - - - - - - -
    -{% block navbar %}{% endblock %} -{% block content %}{% endblock %} -
    -
    - - - - - diff --git a/application/src/tira/templates/tira/git-repo-template/script.py b/application/src/tira/templates/tira/git-repo-template/script.py deleted file mode 100644 index af43c5d21..000000000 --- a/application/src/tira/templates/tira/git-repo-template/script.py +++ /dev/null @@ -1,16 +0,0 @@ -import argparse - -def parse_args(): - parser = argparse.ArgumentParser(prog='script') - parser.add_argument('-i', '--input', required=True, help='the input to the script.') - parser.add_argument('-o', '--output', required=True, help='the output of the script.') - - return parser.parse_args() - -if __name__ == '__main__': - args = parse_args() - - print(f'This is a demo, I ignore the passed input {args.input} and write some content into the output file {args.output}.') - with open(args.output + '/predictions.jsonl', 'w') as f: - f.write('hello world') - print('Done. I wrote "hello world" to {args.output}/predictions.jsonl.') diff --git a/application/src/tira/templates/tira/index.html b/application/src/tira/templates/tira/index.html deleted file mode 100644 index 229b3d190..000000000 --- a/application/src/tira/templates/tira/index.html +++ /dev/null @@ -1,161 +0,0 @@ -{% extends 'tira/base.html' %} -{% load render_bundle from webpack_loader %} -{% block title %}TIRA{% endblock %} -{% block description %}TIRA List of Tasks{% endblock %} -{% block navbar %}{% include "tira/navbar.html" with nav_active='tasks' %}{% endblock %} - -{% block content %} -{% csrf_token %} -
    -
    - -
    - -
    -
    -
    -

    TIRA — Evaluation as a Service

    -

    Improving the replicability of shared tasks in computer science

    -
    - -
    -
    - - -
    -

    Shared Tasks

    -
    -
    -
    -
    - -
    - {% if role == 'admin' or organizer_teams %} - -
    -
    - - -
    -
    - -
    -
    - - -
    -
    - {% elif role == 'guest' %} - - {% else %} - -
    -
    - -

    Organize Your Task

    - -

    - To organize a task, please add your organization first. If your organization already exists (please see https://www.tira.io/g?filter=tira_org), please ask a member of this organization to invite you. -

    - - -
    -
    - {% endif %} -
    -
    - - -
    - - - - - - - - - - - - - - {% for task in tasks %} - - - - - - - - - - {% empty %} - - - - {% endfor %} - -
    TaskFeaturedLast ActivityLatest DatasetsCreatedOrganization
    - {{ task.task_name }} - ({{ task.software_count }} submissions) - - {% if task.featured %} - Featured! - {% endif %} - {{ task.last_modified }}{{ task.dataset_last_created }}{{ task.dataset_first_created }}{{ task.organizer }}
    Sorry, we could not find any tasks.
    - - -
    -
    - -
    - -{% if role == 'admin' or organizer_teams or vm_ids %} -
    - -
    -{% endif %} - -{% render_bundle 'index' %} - -{% if include_navigation %} - - - -{% endif %} - - - - - - -{% endblock %} diff --git a/application/src/tira/templates/tira/login.html b/application/src/tira/templates/tira/login.html deleted file mode 100644 index 00781c3f6..000000000 --- a/application/src/tira/templates/tira/login.html +++ /dev/null @@ -1,42 +0,0 @@ -{% extends 'tira/base.html' %} -{% load static %} -{% block title %}TIRA - Login{% endblock %} -{% block description %}TIRA - Login{% endblock %} -{% block navbar %}{% include "tira/navbar.html" with nav_active='index' %}{% endblock %} - -{% block content %} - - - -
    -
    -

    Login

    -
    - -
    - {% if form_error %} - {{ form_error }} - {% endif %} -
    - {% csrf_token %} - {{ form.non_field_errors }} - {% for field in form %} -
    - {{ field.errors }} - {{ field.label_tag }} - {{ field }} -
    - {% endfor %} - - -
    -
    - -
    - -{% endblock %} \ No newline at end of file diff --git a/application/src/tira/templates/tira/navbar.html b/application/src/tira/templates/tira/navbar.html deleted file mode 100644 index 86ea0f9b4..000000000 --- a/application/src/tira/templates/tira/navbar.html +++ /dev/null @@ -1,62 +0,0 @@ -{% load static %} -{% if include_navigation %} -
    -
    - - - - -
    -
    - - -{% endif %} \ No newline at end of file diff --git a/application/src/tira/templates/tira/tira_git_cmd.py b/application/src/tira/templates/tira/tira_git_cmd.py deleted file mode 100644 index 604c0b72e..000000000 --- a/application/src/tira/templates/tira/tira_git_cmd.py +++ /dev/null @@ -1,226 +0,0 @@ -import tempfile -import os -import sys -import json -from pathlib import Path -from glob import glob -import docker -import pandas as pd -from packaging import version -import shutil - - -def all_softwares(): - ret = [] - for software_id, software_definition in ___load_softwares().items(): - ret += [{'approach': software_id, 'team': software_definition['TIRA_VM_ID'], 'image': software_definition['TIRA_IMAGE_TO_EXECUTE'], 'command': software_definition['TIRA_COMMAND_TO_EXECUTE']}] - - return pd.DataFrame(ret) - - -def all_datasets(): - ret = [] - for i in glob('*/training-datasets/'): - cnt = 0 - for j in glob(i + '*'): - cnt += len(list(open(j))) - - ret += [{'dataset': i.split('/training-datasets/')[0], 'records': cnt}] - - return pd.DataFrame(ret).sort_values('dataset') - - -def ___load_softwares(): - softwares = [json.loads(i) for i in open('.tira/submitted-software.jsonl')] - - return {i['TIRA_TASK_ID'] + '/' + i['TIRA_VM_ID'] + '/' + i['TIRA_SOFTWARE_NAME']: i for i in softwares} - - -def load_data(approach): - ret = [] - - for i in glob(approach + '*/training-datasets-truth/*.json*'): - ret += [pd.read_json(i, orient='records', lines=True)] - - return pd.concat(ret) - - -def __num(s): - try: - return int(s) - except ValueError: - try: - return float(s) - except ValueError: - return s - - -def __load_evaluators(): - evaluators = [json.loads(i) for i in open('.tira/evaluators.jsonl')] - ret = {i['TIRA_DATASET_ID']: i for i in evaluators} - - for evaluator in evaluators: - dataset_id = evaluator['TIRA_DATASET_ID'] - current_version = version.parse(ret[dataset_id]['TIRA_EVALUATION_IMAGE_TO_EXECUTE'].split(':')[-1]) - available_version = version.parse(evaluator['TIRA_EVALUATION_IMAGE_TO_EXECUTE'].split(':')[-1]) - - if available_version > current_version: - ret[dataset_id] = evaluator - - return ret - - -def __load_job_data(job_file): - job = [i.split('=') for i in open(job_file, 'r')] - return {k.strip():v.strip() for k,v in job} - - -def all_evaluated_appraoches(): - id_to_software_name = {int(i['TIRA_SOFTWARE_ID'].split('docker-software-')[1]):i['TIRA_SOFTWARE_NAME'] for i in ___load_softwares().values()} - ret = [] - for evaluation in glob('*/*/*/evaluation'): - job_dir = glob(evaluation + '/../job-executed-on*.txt') - if len(job_dir) != 1: - raise ValueError('Can not handle multiple job definitions: ', job_dir) - - job_definition = __load_job_data(job_dir[0]) - job_identifier = job_definition['TIRA_TASK_ID'] + '/' + job_definition['TIRA_VM_ID'] + '/' + id_to_software_name[int(job_definition['TIRA_SOFTWARE_ID'].split('docker-software-')[1])] - - for eval_run in glob(f"{evaluation}/*/output/"): - - try: - i = {'approach': job_identifier, 'dataset': job_definition['TIRA_DATASET_ID']} - i.update(__load_output(eval_run, evaluation=True)) - ret += [i] - except: - pass - - return pd.DataFrame(ret) - - -def all_evaluators(): - ret = [] - for i in __load_evaluators().values(): - ret += [{'dataset': i['TIRA_DATASET_ID'], 'image': i['TIRA_EVALUATION_IMAGE_TO_EXECUTE'], 'command': i['TIRA_EVALUATION_COMMAND_TO_EXECUTE']}] - - return pd.DataFrame(ret) - - -def __extract_image_and_command(identifier, evaluator=False): - softwares = ___load_softwares() if not evaluator else __load_evaluators() - - if identifier in softwares and not evaluator: - return softwares[identifier]['TIRA_IMAGE_TO_EXECUTE'], softwares[identifier]['TIRA_COMMAND_TO_EXECUTE'] - if evaluator: - for k, v in softwares.items(): - if k.startswith(identifier): - return v['TIRA_DATASET_ID'], v['TIRA_EVALUATION_IMAGE_TO_EXECUTE'], v['TIRA_EVALUATION_COMMAND_TO_EXECUTE'] - - raise ValueError(f'There is no {("evaluator" if evaluator else "software")} identified by "{identifier}". Choices are: {sorted(list(softwares))}') - - -def __load_output(directory, evaluation=False, verbose=False): - files = glob(str(directory) + '/*' ) - - if evaluation: - files = [i for i in files if i.endswith('.prototext')] - - if len(files) != 1: - raise ValueError('Expected exactly one output file. Got: ', files) - - files = files[0] - - if verbose: - print(f'Read file from {files}') - - if evaluation: - ret = {} - for i in [i for i in open(files, 'r').read().split('measure') if 'key:' in i and 'value:' in i]: - key = i.split('key:')[1].split('value')[0].split('"')[1] - value = i.split('key:')[1].split('value')[1].split('"')[1] - - ret[key.strip()] = __num(value.strip()) - - return ret - else: - return pd.read_json(files, lines=True, orient='records') - - -def __normalize_command(cmd): - to_normalize = {'inputRun': '/tira-data/output', - 'outputDir': '/tira-data/output', - 'inputDataset': '/tira-data/input' - } - - if 'inputRun' in cmd: - to_normalize['outputDir'] = '/tira-data/eval_output' - to_normalize['inputDataset'] = '/tira-data/input_truth' - - for k,v in to_normalize.items(): - cmd = cmd.replace('$' + k, v).replace('${' + k + '}', v) - - return cmd - - -def persist_dataset(data, verbose): - tmp_dir = Path(tempfile.TemporaryDirectory().name) - input_dir = tmp_dir / 'input' - output_dir = tmp_dir / 'output' - eval_output_dir = tmp_dir / 'eval_output' - - os.makedirs(str(output_dir.absolute()), exist_ok=True) - os.makedirs(str(eval_output_dir.absolute()), exist_ok=True) - - if type(data) == pd.DataFrame: - if verbose: - print(f'Write {len(data)} records to {input_dir}/input.jsonl') - os.makedirs(str(input_dir.absolute()), exist_ok=True) - data.to_json(input_dir / 'input.jsonl', lines=True, orient='records') - shutil.copytree(input_dir, tmp_dir / 'input_truth') - else: - shutil.copytree(Path(data) / 'training-datasets', input_dir) - shutil.copytree(Path(data) / 'training-datasets-truth', tmp_dir / 'input_truth') - - return tmp_dir - - -def run(identifier=None, image=None, command=None, data=None, evaluate=False, verbose=False): - if image is None or command is None: - image, command = __extract_image_and_command(identifier) - try: - environ = os.environ.copy() - if sys.platform == "linux" and os.path.exists(os.path.expanduser("~/.docker/desktop/docker.sock")): - environ["DOCKER_HOST"] = "unix:///" + os.path.expanduser("~/.docker/desktop/docker.sock") - client = docker.from_env(environment=environ) - - assert len(client.images.list()) >= 0 - assert len(client.containers.list()) >= 0 - except Exception as e: - raise ValueError('It seems like docker is not installed?', e) - - data_dir = persist_dataset(data, verbose) - command = __normalize_command(command) - - if verbose: - print(f'Run software with: docker run --rm -ti -v {tmp_dir}:/tira-data --entrypoint sh {image} {command}') - - client.containers.run(image, entrypoint='sh', command=f'-c "{command}"', volumes={str(data_dir): {'bind': '/tira-data/', 'mode': 'rw'}}) - - if evaluate: - if type(evaluate) is not str: - evaluate = data - evaluate, image, command = __extract_image_and_command(evaluate, evaluator=True) - command = __normalize_command(command) - if verbose: - print(f'Evaluate software with: docker run --rm -ti -v {tmp_dir}:/tira-data --entrypoint sh {image} {command}') - - client.containers.run(image, entrypoint='sh', command=f'-c "{command}"', volumes={str(data_dir): {'bind': '/tira-data/', 'mode': 'rw'}}) - - if evaluate: - approach_name = identifier if identifier else f'"{command}"@{image}' - eval_results = {'approach': approach_name, 'evaluate': evaluate} - eval_results.update(__load_output(Path(data_dir) / 'eval_output', evaluation=True, verbose=verbose)) - return __load_output(Path(data_dir) / 'output', verbose=verbose), pd.DataFrame([eval_results]) - else: - return __load_output(Path(data_dir) / 'output', verbose=verbose) - diff --git a/application/src/tira/templates/tira/veutify_page.html b/application/src/tira/templates/tira/veutify_page.html deleted file mode 100644 index d4ed18a63..000000000 --- a/application/src/tira/templates/tira/veutify_page.html +++ /dev/null @@ -1,26 +0,0 @@ - -{% load static %} - - - - - - TIRA Reproducible Experiments - - - - - - {% csrf_token %} - -
    - - - diff --git a/application/src/tira/tests/tests.py b/application/src/tira/tests/tests.py deleted file mode 100644 index b9717b536..000000000 --- a/application/src/tira/tests/tests.py +++ /dev/null @@ -1,10 +0,0 @@ -from django.test import TestCase - - -class TestSetup(TestCase): - def setUp(self) -> None: - self.setup = True - - def test_setup_success(self): - """ test if tests work """ - self.assertTrue(self.setup) diff --git a/application/src/tira/urls.py b/application/src/tira/urls.py deleted file mode 100644 index bac1816dd..000000000 --- a/application/src/tira/urls.py +++ /dev/null @@ -1,129 +0,0 @@ -from django.urls import path, re_path - -from django.views.generic import TemplateView - -from . import views -from .endpoints import organizer_api, admin_api, vm_api, data_api, diffir_api, serp_api - -urlpatterns = [ - path('', views.veutify_page, name='index'), - path('task', views.veutify_page, name='index'), - path('tasks', views.veutify_page, name='index'), - path('background_jobs//', views.background_jobs, name='background_jobs'), - - path('task//user//dataset//download/.zip', views.download_rundir, name='download_rundir'), - path('data-download/git-repo-template//.zip', views.download_repo_template, name='download_repo_template'), - path('data-download///.zip', views.download_datadir, name='download_datadir'), - - re_path(r'^frontend-vuetify/.*', views.veutify_page, name='vuetify_page'), - re_path(r'^task-overview/.*', views.veutify_page, name='vuetify_page'), - path('task/', views.veutify_page, name='vuetify_page'), - path('task//', views.veutify_page, name='vuetify_page'), - path('task//', views.veutify_page, name='vuetify_page'), - re_path(r'^submit/.*', views.veutify_page, name='vuetify_page'), - re_path(r'^tirex/.*', views.veutify_page, name='tirex'), - path('login', views.login, name='login'), - path('logout', views.logout, name='logout'), - - # grpc client endpoints - path('task//vm//add_software/vm', vm_api.software_add, name='software_add'), - path('task//vm//add_software/docker', vm_api.docker_software_add, name='docker_software_add'), - path('task//vm//add_software/upload', vm_api.add_upload, name='add_upload'), - path('task//vm//save_software/docker/', vm_api.docker_software_save, name='docker_software_save'), - path('task//vm//save_software/upload/', vm_api.upload_save, name='docker_software_save'), - path('task//vm//save_software/vm/', vm_api.software_save, name='software_save'), - path('task//vm//delete_software/vm/', vm_api.software_delete, name='software_delete'), - path('task//vm//delete_software/docker/', vm_api.docker_software_delete, name='docker_delete'), - path('task//vm//run_details/', vm_api.run_details, name='run_details'), - path('task//vm//software_details/', vm_api.software_details, name='software_details'), - - path('task//vm//upload//', vm_api.upload, name='upload'), - path('task//vm//upload-delete/', vm_api.delete_upload, name='deleteupload'), - - path('grpc//vm_info', vm_api.vm_info, name='vm_info'), - path('grpc//vm_state', vm_api.vm_state, name='vm_state'), - path('grpc//vm_start', vm_api.vm_start, name='vm_start'), - path('grpc//vm_shutdown', vm_api.vm_shutdown, name="vm_shutdown"), - path('grpc//vm_stop', vm_api.vm_stop, name="vm_stop"), - path('grpc//vm_shutdown', vm_api.vm_shutdown, name="vm_shutdown"), - path('grpc//run_abort', vm_api.run_abort, name="run_abort"), - path('grpc//vm_running_evaluations', vm_api.vm_running_evaluations, name="vm_running_evaluations"), - path('grpc//get_running_evaluations', vm_api.get_running_evaluations, name="get_running_evaluations"), - path('grpc///run_execute/vm/', vm_api.run_execute, name="run_execute"), - path('grpc///run_execute/docker////', vm_api.run_execute_docker_software, name='run_execute_docker_software'), - - path('grpc//run_eval//', vm_api.run_eval, name="run_eval"), - path('grpc//run_delete//', vm_api.run_delete, name="run_delete"), - path('grpc///stop_docker_software/', vm_api.stop_docker_software, name="stop_docker_software"), - - path('tira-admin/reload/vms', admin_api.admin_reload_vms, name='tira-admin-reload-vms'), - path('tira-admin/reload/datasets', admin_api.admin_reload_datasets, name='tira-admin-reload-datasets'), - path('tira-admin/reload/tasks', admin_api.admin_reload_tasks, name='tira-admin-reload-tasks'), - path('tira-admin/reload-data', admin_api.admin_reload_data, name='tira-admin-reload-data'), - path('tira-admin/reload-runs/', admin_api.admin_reload_runs, name='tira-admin-reload-runs'), - - path('tira-admin/create-vm', admin_api.admin_create_vm, name='tira-admin-create-vm'), - path('tira-admin/archive-vm', admin_api.admin_archive_vm, name='tira-admin-archive-vm'), - path('tira-admin/modify-vm', admin_api.admin_modify_vm, name='tira-admin-modify-vm'), - path('tira-admin/export-participants/.csv', data_api.export_registrations, name='export_registrations'), - path('tira-admin//create-task', admin_api.admin_create_task, name='tira-admin-create-task'), - path('tira-admin/edit-task/', admin_api.admin_edit_task, name='tira-admin-edit-task'), - path('tira-admin/delete-task/', admin_api.admin_delete_task, name='tira-admin-delete-task'), - path('tira-admin/add-dataset/', admin_api.admin_add_dataset, name='tira-admin-add-dataset'), - path('tira-admin/upload-dataset///', admin_api.admin_upload_dataset, name='tira-admin-upload-dataset'), - path('tira-admin/import-irds-dataset/', admin_api.admin_import_ir_dataset, name='tira-admin-import-irds-dataset'), - path('tira-admin/edit-dataset/', admin_api.admin_edit_dataset, name='tira-admin-edit-dataset'), - path('tira-admin/delete-dataset/', admin_api.admin_delete_dataset, name='tira-admin-delete-dataset'), - path('tira-admin/add-organizer/', admin_api.admin_add_organizer, name='tira-admin-add-organizer'), - path('tira-admin/edit-organizer/', admin_api.admin_edit_organizer, name='tira-admin-edit-organizer'), - path('tira-admin/edit-review///', admin_api.admin_edit_review, name='tira-admin-edit-review'), - path('tira-admin/create-group/', admin_api.admin_create_group, name='tira-admin-create-group'), - - path('publish////', organizer_api.publish, name='publish'), - path('blind////', organizer_api.blind, name='blind'), - - path('api/evaluations//', data_api.get_evaluations_by_dataset, name='get_evaluations_by_dataset'), - path('api/evaluations-of-vm//', data_api.get_evaluations_by_vm, name='get_evaluations_by_vm'), - path('api/evaluation//', data_api.get_evaluation, name='get_evaluation'), - path('api/submissions//', data_api.get_submissions_by_dataset, name='get_submissions_by_dataset'), - path('api/docker-softwares-details//', vm_api.docker_software_details, name='software_details'), - path('api/huggingface_model_mounts/vm//', vm_api.huggingface_model_mounts, name='huggingface_model_mounts'), - path('api/upload-group-details///', vm_api.upload_group_details, name='upload_id'), - path('api/evaluations_of_run//', data_api.get_evaluations_of_run, name='evaluations_of_run'), - path('api/configuration-of-evaluation//', data_api.get_configuration_of_evaluation, name='get_configuration_of_evaluation'), - path('api/list-runs////', data_api.runs, name='runs'), - path('api/ova-list', data_api.get_ova_list, name='get_ova_list'), - path('api/host-list', data_api.get_host_list, name='get_host_list'), - path('api/organizer-list', data_api.get_organizer_list, name='get_organizer_list'), - path('api/task-list', data_api.get_task_list, name='get_task_list'), - path('api/task/', data_api.get_task, name='get_task'), - path('api/registration_formular/', data_api.get_registration_formular, name='get_registration_formular'), - path('api/dataset/', data_api.get_dataset, name='get_dataset'), - path('api/datasets_by_task/', data_api.get_dataset_for_task, name='get_dataset_for_task'), - path('api/organizer/', data_api.get_organizer, name='get_organizer'), - path('api/role', data_api.get_role, name='get_role'), - path('api/task//user/', data_api.get_user, name='get_user'), - path('api/task//user//refresh-docker-images', data_api.update_docker_images, name="get_updated_docker_images"), - path('api/count-of-team-submissions/', organizer_api.get_count_of_team_submissions, name='get_count_of_team_submissions'), - path('api/count-of-missing-reviews/', organizer_api.get_count_of_missing_reviews, name='get_count_of_missing_reviews'), - path('api/task//user//software/running/', data_api.get_running_software, name='get_running_software'), - path('api/task//public-submissions', data_api.public_submissions, name='public_submissions'), - path('api/task//submission-details//', data_api.public_submission, name='public_submission'), - path('api/review///', data_api.get_review, name='get_review'), - path('api/registration/add_registration//', data_api.add_registration, name='add_registration'), - path('api/submissions-for-task///', data_api.submissions_for_task, name="submissions_for_task"), - path('api/tirex-components', data_api.tirex_components, name='tirex_components'), - path('api/tirex-snippet', data_api.get_snippet_to_run_components, name='get_snippet_to_run_components'), - path('api/snippets-for-tirex-components', data_api.get_snippet_to_run_components, name='get_snippet_to_run_components'), - path('api/re-ranking-datasets/', data_api.reranking_datasets, name='reranking_datasets'), - path('api/submissions-of-user/', data_api.submissions_of_user, name='submissions_of_user'), - path('api/add_software_submission_git_repository//', vm_api.add_software_submission_git_repository, name='add_software_submission_git_repository'), - path('api/get_software_submission_git_repository//', vm_api.get_software_submission_git_repository, name='get_software_submission_git_repository'), - path('api/token/', vm_api.get_token, name='get_token'), - path('api/import-submission////', data_api.import_submission, name='import_submission'), - path('diffir////', diffir_api.diffir, name='diffir'), - path('serp//user//dataset///', serp_api.serp, name='serp'), - -] - -app_name = 'tira' diff --git a/application/src/tira/views.py b/application/src/tira/views.py deleted file mode 100644 index 5df2dd840..000000000 --- a/application/src/tira/views.py +++ /dev/null @@ -1,239 +0,0 @@ -from django.shortcuts import render, redirect -from django.template.loader import render_to_string -from django.http import JsonResponse, FileResponse -from django.conf import settings -from django.core.cache import cache -from django.utils.safestring import mark_safe -from django.core.serializers.json import DjangoJSONEncoder -import logging - -import tira.tira_model as model -from .tira_data import get_run_runtime, get_run_file_list, get_stderr, get_stdout, get_tira_log -from .authentication import auth -from .checks import check_permissions, check_resources_exist, check_conditional_permissions -from .forms import * -from pathlib import Path -from datetime import datetime as dt -import os -import zipfile -import json -from http import HTTPStatus -import tempfile - -logger = logging.getLogger("tira") -logger.info("Views: Logger active") - - -def add_context(func): - def func_wrapper(request, *args, **kwargs): - uid = auth.get_user_id(request) - vm_id = None - - if args and 'vm_id' in args: - vm_id = args['vm_id'] - elif kwargs and 'vm_id' in kwargs: - vm_id = kwargs['vm_id'] - - context = { - "include_navigation": True if settings.DEPLOYMENT == "legacy" else False, - "user_id": uid, - "role": auth.get_role(request, user_id=uid, vm_id=vm_id), - "organizer_teams": mark_safe(json.dumps(auth.get_organizer_ids(request))) - } - return func(request, context, *args, **kwargs, ) - - return func_wrapper - - -@add_context -def index(request, context): - context["tasks"] = model.get_tasks(include_dataset_stats=True) - context["organizer_teams"] = auth.get_organizer_ids(request) - context["vm_ids"] = auth.get_vm_ids(request, None) - - - if context["role"] != auth.ROLE_GUEST: - context["vm_id"] = auth.get_vm_id(request, context["user_id"]) - - return render(request, 'tira/index.html', context) - - -@check_permissions -@add_context -def background_jobs(request, context, task_id, job_id): - context['task'] = task_id - context['job'] = model.get_job_details(task_id, None, job_id) - - return render(request, 'tira/background_jobs.html', context) - - -@add_context -def veutify_page(request, context, **kwargs): - return render(request, 'tira/veutify_page.html', context) - - -@add_context -def login(request, context): - """ Hand out the login form - Note that this is only called in legacy deployment. Disraptor is supposed to catch the route to /login - """ - - if request.method == "POST": - form = LoginForm(request.POST) - if form.is_valid(): - # read form data, do auth.login(request, user_id, password) - valid = auth.login(request, user_id=form.cleaned_data["user_id"], password=form.cleaned_data["password"]) - if valid: - return redirect('tira:index') - else: - context["form_error"] = "Login Invalid" - else: - form = LoginForm() - - context["form"] = form - return render(request, 'tira/login.html', context) - - -def logout(request): - auth.logout(request) - return redirect('tira:index') - - -def _add_task_to_context(context, task_id, dataset_id): - datasets = model.get_datasets_by_task(task_id) - - context["datasets"] = json.dumps({ds['dataset_id']: ds for ds in datasets}, cls=DjangoJSONEncoder) - context['selected_dataset_id'] = dataset_id - context['test_dataset_ids'] = json.dumps([ds['dataset_id'] for ds in datasets if ds['is_confidential']], - cls=DjangoJSONEncoder) - context['training_dataset_ids'] = json.dumps([ds['dataset_id'] for ds in datasets if not ds['is_confidential']], - cls=DjangoJSONEncoder) - task = model.get_task(task_id) - context["task_id"] = task['task_id'] - context["task_name"] = json.dumps(task['task_name'], cls=DjangoJSONEncoder) - context["organizer"] = json.dumps(task['organizer'], cls=DjangoJSONEncoder) - context["task_description"] = json.dumps(task['task_description'], cls=DjangoJSONEncoder) - context["web"] = json.dumps(task['web'], cls=DjangoJSONEncoder) - - -def _add_user_vms_to_context(request, context, task_id, include_docker_details=True): - if context["role"] != auth.ROLE_GUEST: - allowed_vms_for_task = model.all_allowed_task_teams(task_id) - vm_id = auth.get_vm_id(request, context["user_id"]) - vm_ids = [] - - if allowed_vms_for_task is None or vm_id in allowed_vms_for_task: - context["vm_id"] = vm_id - - if getattr(auth, "get_vm_ids", None): - vm_ids = [i for i in auth.get_vm_ids(request, context["user_id"]) if allowed_vms_for_task is None or i in allowed_vms_for_task] - - context['user_vms_for_task'] = vm_ids - - docker = ['Your account has no docker registry. Please contact an organizer.'] - - if include_docker_details and len(vm_ids) > 0: - docker = model.load_docker_data(task_id, vm_ids[0], cache, force_cache_refresh=False) - - if not docker: - docker = ['Docker is not enabled for this task.'] - else: - docker = docker['docker_software_help'].split('\n') - docker = [i for i in docker if 'docker login' in i or 'docker push' in i or 'docker build -t' in i] - docker = [i.replace('/my-software:0.0.1', '/').replace('', '').replace('', '').replace('

    ', '').replace('

    ', '') for i in docker] - docker = [i if 'docker build -t' not in i else 'docker tag ' + i.split('docker build -t')[-1].split(' -f ')[0].strip() for i in docker] - - context['docker_documentation'] = docker - - -def zip_run(dataset_id, vm_id, run_id): - """ Zip the given run and hand it out for download. Deletes the zip on the server again. """ - path_to_be_zipped = Path(settings.TIRA_ROOT) / "data" / "runs" / dataset_id / vm_id / run_id - zipped = Path(f"{path_to_be_zipped.stem}.zip") - - with zipfile.ZipFile(zipped, "w", zipfile.ZIP_DEFLATED) as zipf: - for f in path_to_be_zipped.rglob('*'): - zipf.write(f, arcname=f.relative_to(path_to_be_zipped.parent)) - - return zipped - -def zip_runs(vm_id, dataset_ids_and_run_ids, name): - """ Zip the given run and hand it out for download. Deletes the zip on the server again. """ - - zipped = Path(f"{name}.zip") - - with zipfile.ZipFile(zipped, "w", zipfile.ZIP_DEFLATED) as zipf: - for dataset_id, run_id in dataset_ids_and_run_ids: - path_to_be_zipped = Path(settings.TIRA_ROOT) / "data" / "runs" / dataset_id / vm_id / run_id - for f in path_to_be_zipped.rglob('*'): - zipf.write(f, arcname=f.relative_to(path_to_be_zipped.parent)) - - return zipped - -@check_conditional_permissions(public_data_ok=True) -@check_resources_exist('json') -def download_rundir(request, task_id, dataset_id, vm_id, run_id): - """ Zip the given run and hand it out for download. Deletes the zip on the server again. """ - zipped = zip_run(dataset_id, vm_id, run_id) - - if zipped.exists(): - response = FileResponse(open(zipped, "rb"), as_attachment=True, filename=f"{run_id}-{zipped.stem}.zip") - os.remove(zipped) - return response - else: - return JsonResponse({'status': 1, 'reason': f'File does not exist: {zipped}'}, - status=HTTPStatus.INTERNAL_SERVER_ERROR) - - -@check_conditional_permissions(public_data_ok=True) -@check_resources_exist('json') -def download_input_rundir(request, task_id, dataset_id, vm_id, run_id): - return download_rundir(request, task_id, dataset_id, vm_id, input_run_id) - -def download_repo_template(request, task_id, vm_id): - with tempfile.TemporaryDirectory() as tmpdirname: - directory = Path(tmpdirname) / f'git-repo-template-{task_id}' - os.makedirs(directory, exist_ok=True) - os.makedirs(directory / '.github' / 'workflows', exist_ok=True) - context = { - 'task_id': task_id, - 'image': f'registry.webis.de/code-research/tira/tira-user-{vm_id}/github-action-submission:0.0.1', - 'input_dataset': model.reference_dataset(task_id), - } - - with (directory / 'README.md').open('w') as readme, (directory / 'script.py').open('w') as script, (directory / 'requirements.txt').open('w') as requirements, (directory / 'Dockerfile').open('w') as dockerfile, (directory / '.github' / 'workflows' / 'upload-software-to-tira.yml').open('w') as ci: - readme.write(render_to_string('tira/git-repo-template/README.md', context=context)) - dockerfile.write(render_to_string('tira/git-repo-template/Dockerfile', context=context)) - requirements.write('argparse') - script.write(render_to_string('tira/git-repo-template/script.py', context=context)) - ci.write(render_to_string('tira/git-repo-template/github-action.yml', context=context)) - - zipped = Path(tmpdirname) / f'{task_id}.zip' - with zipfile.ZipFile(zipped, "w") as zipf: - for f in directory.rglob('*'): - zipf.write(f, arcname=f.relative_to(directory)) - - return FileResponse(open(zipped, "rb"), as_attachment=True, filename=f"git-repo-template-{task_id}.zip") - -@check_permissions -def download_datadir(request, dataset_type, input_type, dataset_id): - input_type = input_type.lower().replace('input', '') - input_type = '' if len(input_type) < 2 else input_type - task_id = model.get_dataset(dataset_id)['task'] - - path = model.model.data_path / f'{dataset_type}-datasets{input_type}' / task_id / dataset_id - - if not path.exists(): - return JsonResponse({'status': 1, 'reason': f'File does not exist: {path}'}, - status=HTTPStatus.INTERNAL_SERVER_ERROR) - - zipped = Path(f"{path.stem}.zip") - with zipfile.ZipFile(zipped, "w") as zipf: - for f in path.rglob('*'): - zipf.write(f, arcname=f.relative_to(path.parent)) - - if zipped.exists(): - response = FileResponse(open(zipped, "rb"), as_attachment=True, filename=f"{dataset_id}-{dataset_type}{input_type}.zip") - os.remove(zipped) - return response - diff --git a/application/src/tira/__init__.py b/application/src/tira_app/__init__.py similarity index 100% rename from application/src/tira/__init__.py rename to application/src/tira_app/__init__.py diff --git a/application/src/tira/admin.py b/application/src/tira_app/admin.py similarity index 89% rename from application/src/tira/admin.py rename to application/src/tira_app/admin.py index 3fc8ecd07..8cf462a55 100644 --- a/application/src/tira/admin.py +++ b/application/src/tira_app/admin.py @@ -1,5 +1,6 @@ from django.contrib import admin -import tira.model as modeldb + +from . import model as modeldb # Register your models here. diff --git a/application/src/tira/apps.py b/application/src/tira_app/apps.py similarity index 61% rename from application/src/tira/apps.py rename to application/src/tira_app/apps.py index 452aa5575..926187884 100644 --- a/application/src/tira/apps.py +++ b/application/src/tira_app/apps.py @@ -2,4 +2,5 @@ class TiraConfig(AppConfig): - name = 'tira' + name = "tira_app" + label = "tira" diff --git a/application/src/tira_app/authentication.py b/application/src/tira_app/authentication.py new file mode 100644 index 000000000..90fef41bb --- /dev/null +++ b/application/src/tira_app/authentication.py @@ -0,0 +1,546 @@ +import json +import logging +import os +from functools import wraps +from typing import Optional + +from django.conf import settings +from django.http import HttpRequest, HttpResponseNotAllowed +from slugify import slugify + +from . import tira_model as model + +logger = logging.getLogger(__name__) + +# TODO: this file can be reduced significantly when the differen deployment configurations are removed + + +class Authentication(object): + """Base class for Authentication and Role Management""" + + subclasses: dict[str, type] = {} + _AUTH_SOURCE = "superclass" + ROLE_TIRA = "tira" # super admin if we ever need it + ROLE_ADMIN = "admin" # is admin for the requested resource, so all permissions + ROLE_PARTICIPANT = "participant" # has edit but not admin permissions - user-header is set, group is set + ROLE_USER = ( # is logged in, but has no edit permissions - user-header is set, group (tira-vm-vm_id) is not set + "user" + ) + ROLE_FORBIDDEN = "forbidden" + ROLE_GUEST = "guest" # not logged in -> user-header is not set + + def __init_subclass__(cls): + """Init base class based on parameter on creation""" + super().__init_subclass__() + cls.subclasses[cls._AUTH_SOURCE] = cls + + def __new__(cls, authentication_source=None): + """Create base class based on parameter of construction + :param api: the api type + :return: the instance + """ + return super(Authentication, cls).__new__(cls.subclasses[authentication_source]) + + def __init__(self, **kwargs): + pass + + @staticmethod + def get_default_vm_id(user_id: str) -> str: + return f"{user_id}-default" + + def get_role( + self, + request: HttpRequest, + user_id: Optional[str] = None, + vm_id: Optional[str] = None, + task_id: Optional[str] = None, + ): + """Determine the role of the user on the requested page (determined by the given directives). + + @param request: djangos request object associated to the http request + @param user_id: id of the user requesting the resource + @param resource_id: to check which role user_id has on the requested resource + @param resource_type: the type of the resource: {vm_id, task_id, dataset_id} + :return ROLE_GUEST: if disraptor token is wrong or user is not logged in + :return ROLE_ADMIN: if user is in group 'admins' + :return ROLE_PARTICIPANT: if user is in the vm-group of the given :param vm_id: + :return ROLE_USER: if user is logged in, but not in the group of :param vm_id: + """ + return self.ROLE_GUEST + + def get_auth_source(self): + return self._AUTH_SOURCE + + def get_user_id(self, request: HttpRequest): + return None + + def get_vm_id(self, request: HttpRequest, user_id): + return "None" + + def login(self, request: HttpRequest, **kwargs): + pass + + def logout(self, request: HttpRequest, **kwargs): + pass + + def create_group(self, vm_id): + return {"status": 0, "message": f"create_group is not implemented for {self._AUTH_SOURCE}"} + + def get_organizer_ids(self, request: HttpRequest, user_id=None): + pass + + def get_vm_ids(self, request: HttpRequest, user_id=None): + pass + + def user_is_organizer_for_endpoint( + self, + request, + path, + task_id, + organizer_id_from_params, + dataset_id_from_params, + run_id_from_params, + vm_id_from_params, + role, + ): + return False + + def is_admin_for_task(self, request): + """ + Returns true if the user is an admin for the task specified in the request (false if the request url does not + point to a task or if the user is only admin for some other task). + """ + organizer_ids = auth.get_organizer_ids(request) + + if not organizer_ids or not isinstance(organizer_ids, list) or len(organizer_ids) < 1: + return False + + task_id = None + if request.path_info.startswith("submit/") or request.path_info.startswith("/submit/"): + task_id = (request.path_info + "/").split("submit/")[1].split("/")[0] + elif request.path_info.startswith("task-overview/") or request.path_info.startswith("/task-overview/"): + task_id = (request.path_info + "/").split("task-overview/")[1].split("/")[0] + + if not task_id: + return False + + try: + task = model.get_task(task_id) + except Exception: + return False + + if not task: + return False + + return task is not None and "organizer_id" in task and task["organizer_id"] in organizer_ids + + +def check_disraptor_token(func): + @wraps(func) + def func_wrapper(auth, request, *args, **kwargs): + _DISRAPTOR_APP_SECRET_KEY = os.getenv("DISRAPTOR_APP_SECRET_KEY") + + if not request.headers.get("X-Disraptor-App-Secret-Key", None) == _DISRAPTOR_APP_SECRET_KEY: + return HttpResponseNotAllowed("Access forbidden.") + + return func(auth, request, *args, **kwargs) + + return func_wrapper + + +class DisraptorAuthentication(Authentication): + _AUTH_SOURCE = "disraptor" + + def __init__(self, **kwargs): + """Disraptor authentication that delegates all authentication to discourse/disraptor. + @param kwargs: + unused, only for consistency to the LegacyAuthentication + """ + super(DisraptorAuthentication, self).__init__(**kwargs) + self.discourse_client = model.discourse_api_client() + + def _get_user_id(self, request: HttpRequest) -> Optional[str]: + """Return the content of the X-Disraptor-User header set in the http request""" + user_id = request.headers.get("X-Disraptor-User", None) + if user_id is not None: + vm_id = Authentication.get_default_vm_id(user_id) + _ = model.get_vm(vm_id, create_if_none=True) + return user_id + + def _is_in_group(self, request: HttpRequest, group_name="tira_reviewer") -> bool: + """return True if the user is in the given disraptor group""" + return group_name in request.headers.get("X-Disraptor-Groups", "").split(",") + + def _parse_tira_groups(self, groups: list[str]) -> dict[str, str]: + """find all groups with 'tira_' prefix and return key and value of the group. + Note: Groupnames should be in the format '[tira_]key[_value]' + """ + for group in groups: + g = group.split("_") + if g[0] == "tira": + try: + key = g[1] + except IndexError: + continue + try: + value = g[2] + except IndexError: + value = None + yield {"key": key, "value": value} + + def _get_user_groups(self, request: HttpRequest, group_type: str = "vm") -> list: + """read groups from the disraptor groups header. + @param group_type: {"vm", "org"}, indicate the class of groups. + """ + all_groups = request.headers.get("X-Disraptor-Groups", "None").split(",") + user_id = f"{request.headers.get('X-Disraptor-User', None)}-default" + + if group_type == "vm": # if we check for groups of a virtual machine + ret = [group["value"] for group in self._parse_tira_groups(all_groups) if group["key"] == "vm"] + + # Some discourse vm groups are created manually, so we have to ensure that they also have a vm + for vm_id in ret: + _ = model.get_vm(vm_id, create_if_none=True) + + return ret + [user_id] + if group_type == "org": # if we check for organizer groups of a user + return [group["value"] for group in self._parse_tira_groups(all_groups) if group["key"] == "org"] + + raise ValueError(f"Can't handle group type {group_type}") + + @check_disraptor_token + def get_role( + self, + request: HttpRequest, + user_id: Optional[str] = None, + vm_id: Optional[str] = None, + task_id: Optional[str] = None, + ): + """Determine the role of the user on the requested page (determined by the given directives). + This is a minimalistic implementation that suffices for the current features of TIRA. + + This implementation relies only on the request object, since disraptor takes care of the rest. + + Currently only checks: (1) is user admin, (2) otherwise, is user owner of the vm (ROLE_PARTICIPANT) + """ + + if ( + self._is_in_group(request, "admins") + or self._is_in_group(request, "tira_reviewer") + or self.is_admin_for_task(request) + ): + return self.ROLE_ADMIN + + user_groups = self._get_user_groups(request, group_type="vm") + # Role for users with permissions for the vm + if vm_id in user_groups: + return self.ROLE_PARTICIPANT + elif user_id and not vm_id: + return self.ROLE_USER + # Role without permissions for the vm + elif user_id and vm_id in user_groups: + return self.ROLE_FORBIDDEN + return self.ROLE_GUEST + + @check_disraptor_token + def get_user_id(self, request: HttpRequest): + """public wrapper of _get_user_id that checks conditions""" + return self._get_user_id(request) + + @check_disraptor_token + def get_vm_id(self, request: HttpRequest, user_id=None): + """return the vm_id of the first vm_group ("tira-vm-") found. + If there is no vm-group, return "no-vm-assigned" + """ + + return self.get_vm_ids(request, user_id)[0] + + @check_disraptor_token + def get_organizer_ids(self, request: HttpRequest, user_id=None): + """return the organizer ids of all organizer teams that the user is found in ("tira-org-"). + If there is no vm-group, return the empty list + """ + + return self._get_user_groups(request, group_type="org") + + @check_disraptor_token + def get_vm_ids(self, request: HttpRequest, user_id=None): + """returns a list of all vm_ids of the all vm_groups ("tira-vm-") found. + If there is no vm-group, a list with "no-vm-assigned" is returned + """ + vms = self._get_user_groups(request) + user_id = self._get_user_id(request) + + if user_id is None: + return vms + + return vms if len(vms) >= 1 else [Authentication.get_default_vm_id(user_id)] + + def _create_discourse_vm_group(self, vm): + """Create the vm group in the distaptor. Members of this group will be owners of the vm and + have all permissions. + :param vm: a vm dict as returned by tira_model.get_vm + {"vm_id", "user_password", "roles", "host", "admin_name", "admin_pw", "ip", "ssh", "rdp", "archived"} + + """ + group_bio = f"""Members of this group have access to the virtual machine {vm['vm_id']}:

    +
      +
    • Host: {vm['host']}
    • +
    • User: {vm['vm_id']}
    • +
    • Passwort: {vm['user_password']}
    • +
    • SSH Port: {vm['ssh']}
    • +
    • RDP Port: {vm['rdp']}
    • +
    • SSH Example: sshpass -p {vm['user_password']} ssh {vm['vm_id']}@{vm['host']} -p {vm['ssh']} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
    • +


    + Please contact us when you have questions. + """ + return self.discourse_client.create_group(f"tira_vm_{vm['vm_id']}", group_bio, 2) + + def notify_organizers_of_new_participants(self, data, task_id): + task = model.get_task(task_id) + message = ( + """Dear Organizers """ + + task["organizer"] + + """ of """ + + task_id + + """ + +This message intends to inform you that there is a new registration for your task on """ + + task_id + + """ has a new registration: + +""" + + json.dumps(data) + + """ + +Best regards""" + ) + + self.discourse_client.write_message( + f'New Registration to {task_id} by {data["group"]}', + message, + "tira_org_" + slugify(task["organizer"].lower()), + ) + + def create_group(self, vm): + """Create the vm group in the distaptor. Members of this group will be owners of the vm and + have all permissions. + :param vm: a vm dict as returned by tira_model.get_vm + """ + vm_group = self._create_discourse_vm_group(vm) + invite_link = self.discourse_client.create_invite_link(vm_group) + message = f"""Invite Mail: Please use this link to create your login for TIRA: {invite_link}. + After login to TIRA, you can find the credentials and usage examples for your + dedicated virtual machine {vm['vm_id']} here: https://www.tira.io/g/tira_vm_{vm['vm_id']}""" + + return message + + def create_organizer_group(self, organizer_name, user_name): + group_bio = f"""Members of this team organize shared tasks in TIRA as in shared tasks as {organizer_name}. +

    + + Please do not hesitate to design your page accorging to your needs.""" + + group_id = self.discourse_client.create_group(f"tira_org_{organizer_name}", group_bio, 0) + self.discourse_client.add_user_as_owner_to_group(group_id, user_name) + + def create_docker_group(self, team_name, user_name): + group_bio = f"""Members of this team participate in shared tasks as {team_name}.

    + + Please do not hesitate to design your team's page accorging to your needs.""" + + group_id = self.discourse_client.create_group(f"tira_vm_{slugify(team_name)}", group_bio, 0) + model.get_vm(team_name, create_if_none=True) + self.discourse_client.add_user_as_owner_to_group(group_id, user_name) + + def user_is_organizer_for_endpoint( + self, + request, + path, + task_id, + organizer_id_from_params, + dataset_id_from_params, + run_id_from_params, + vm_id_from_params, + role, + ): + if request is None or path is None: + return False + if not path.startswith("/"): + path = "/" + path + + organizer_ids = self.get_organizer_ids(request) + + if path == "/api/organizer-list" and ( + role == auth.ROLE_PARTICIPANT or role == auth.ROLE_ADMIN or role == auth.ROLE_USER or role == auth.ROLE_TIRA + ): + return True + + if path.startswith("/tira-admin/add-organizer/"): + existing_organizer_ids = set([i["organizer_id"] for i in model.get_organizer_list()]) + orga_name = path.split("/tira-admin/add-organizer/")[1] + + return ( + len(orga_name.split("/")) == 1 + and orga_name not in existing_organizer_ids + and organizer_id_from_params == orga_name + and ( + role == auth.ROLE_PARTICIPANT + or role == auth.ROLE_ADMIN + or role == auth.ROLE_USER + or role == auth.ROLE_TIRA + ) + ) + + if not organizer_ids or len(organizer_ids) < 1: + return False + + organizer_id_from_dataset_id, organizer_id_from_run_id = None, None + + if run_id_from_params: + try: + dataset_id_from_run = model.get_run(run_id=run_id_from_params, vm_id=None, dataset_id=None)["dataset"] + organizer_id_from_run_id = model.get_dataset(dataset_id_from_run)["organizer_id"] + except Exception: + return False + + if dataset_id_from_params: + try: + organizer_id_from_dataset_id = model.get_dataset(dataset_id_from_params).get("organizer_id", None) + except Exception: + return False + + potentially_inconsistent_ids = [ + organizer_id_from_params, + organizer_id_from_dataset_id, + organizer_id_from_run_id, + ] + if len(set([i for i in potentially_inconsistent_ids if i is not None])) > 1: + return False + + task = None + if task_id: + try: + task = model.get_task(task_id) + except Exception: + pass + + return ( + path == "/api/organizer-list" + or (task and "organizer_id" in task and task["organizer_id"] in organizer_ids) + or ( + organizer_id_from_params + and organizer_id_from_params in organizer_ids + and path in set(f"/tira-admin/{i}/create-task" for i in organizer_ids) + ) + or ( + organizer_id_from_params + and organizer_id_from_params in organizer_ids + and path in set(f"/tira-admin/edit-organizer/{i}" for i in organizer_ids) + ) + or ( + organizer_id_from_run_id + and organizer_id_from_run_id in organizer_ids + and path.startswith(f"/task/{organizer_id_from_run_id}/vm/") + ) + or ( + organizer_id_from_run_id + and organizer_id_from_run_id in organizer_ids + and organizer_id_from_dataset_id + and path == f"/api/review/{dataset_id_from_params}/{vm_id_from_params}/{run_id_from_params}" + ) + or ( + organizer_id_from_run_id + and organizer_id_from_run_id in organizer_ids + and organizer_id_from_dataset_id + and path == f"/tira-admin/edit-review/{dataset_id_from_params}/{vm_id_from_params}/{run_id_from_params}" + ) + or ( + organizer_id_from_dataset_id + and organizer_id_from_dataset_id in organizer_ids + and path == f"/tira-admin/edit-dataset/{dataset_id_from_params}" + ) + or ( + organizer_id_from_dataset_id + and organizer_id_from_dataset_id in organizer_ids + and path == f"/tira-admin/delete-dataset/{dataset_id_from_params}" + ) + or ( + organizer_id_from_dataset_id + and organizer_id_from_dataset_id in organizer_ids + and path.startswith("/data-download/") + and path.endswith(f"/{dataset_id_from_params}.zip") + ) + ) + + +auth = Authentication(authentication_source="disraptor") + + +""" +Trusted Header Authentication implementation to integrate with Django +""" + +from typing import NamedTuple + +from django.contrib.auth.models import AnonymousUser +from rest_framework import authentication + +_DISRAPTOR_APP_SECRET_KEY = os.getenv("DISRAPTOR_APP_SECRET_KEY") + + +class User(NamedTuple): + username: str + is_staff: bool + + +class TiraGuest(AnonymousUser): + def __init__(self) -> None: + super().__init__() + self.username = "guest" + self._groups: list[str] = [] + self.is_staff = False + + def __str__(self) -> str: + return self.username + + @property + def is_anonymous(self): + return True + + @property + def is_authenticated(self): + return False + + +class TiraUser(AnonymousUser): + def __init__(self, username: str, groups: list[str]) -> None: + super().__init__() + self.username = username + self._groups = groups + self.is_staff = "admins" in groups or "tira_reviewer" in groups + + def __str__(self) -> str: + return self.username + + @property + def is_anonymous(self): + return False + + @property + def is_authenticated(self): + return True + + +class TrustedHeaderAuthentication(authentication.BaseAuthentication): + + def authenticate(self, request) -> tuple[User, None]: + if not request.headers.get("X-Disraptor-App-Secret-Key", None) == _DISRAPTOR_APP_SECRET_KEY: + return HttpResponseNotAllowed("Access forbidden.") + username = request.headers.get("X-Disraptor-User") + groups = request.headers.get("X-Disraptor-Groups") + grouplist = [] if not groups else groups.split(",") + if not username: + return (TiraGuest(), None) + + return (TiraUser(username, grouplist), None) diff --git a/application/src/tira/checks.py b/application/src/tira_app/checks.py similarity index 55% rename from application/src/tira/checks.py rename to application/src/tira_app/checks.py index 35c9d900f..854acee0d 100644 --- a/application/src/tira/checks.py +++ b/application/src/tira_app/checks.py @@ -1,20 +1,27 @@ -from django.shortcuts import render, redirect -from django.urls import resolve -from .authentication import auth -import tira.tira_model as model -from django.core.exceptions import PermissionDenied -from django.http import JsonResponse, Http404, HttpResponseNotAllowed -from http import HTTPStatus +import logging from functools import wraps + from django.conf import settings -import logging +from django.http import Http404, HttpRequest, HttpResponseNotAllowed, HttpResponseRedirect, JsonResponse +from django.shortcuts import redirect +from django.urls import resolve +from . import tira_model as model +from .authentication import auth logger = logging.getLogger("tira") +def redirect_to_login() -> HttpResponseRedirect: + """ + Returns a redirection response that redirects the user to the login page ("/login"). Note that this URL does not + "exist" (what even is existance) but is redirected by the reverse proxy to the authentication page. + """ + return redirect("/login", permanent=False) + + def check_permissions(func): - """ A decorator that checks if the requesting user has the needed permissions to call the decorated function. + """A decorator that checks if the requesting user has the needed permissions to call the decorated function. This decorator redirects or blocks requests if the requesting user does not have permission. This decorator considers the resources requested in the decorated function: - vm_id: returns the permissions the requesting user has on the vm @@ -24,94 +31,140 @@ def check_permissions(func): # TODO at some point, this should not return http responses anymore but send a 'forbidden' json response :raises: django.core.exceptions.PermissionDenied """ + @wraps(func) - def func_wrapper(request, *args, **kwargs): - vm_id = kwargs.get('vm_id', None) - user_id = kwargs.get('user_id', None) + def func_wrapper(request: HttpRequest, *args, **kwargs): + vm_id = kwargs.get("vm_id", None) + user_id = kwargs.get("user_id", None) if vm_id is None and user_id is not None: # some endpoints say user_id instead of vm_id vm_id = user_id - dataset_id = kwargs.get('dataset_id', None) - run_id = kwargs.get('run_id', None) - task_id = kwargs.get('task_id', None) - organizer_id = kwargs.get('organizer_id', None) + dataset_id = kwargs.get("dataset_id", None) + run_id = kwargs.get("run_id", None) + task_id = kwargs.get("task_id", None) + organizer_id = kwargs.get("organizer_id", None) role = auth.get_role(request, user_id=auth.get_user_id(request)) if role == auth.ROLE_ADMIN or role == auth.ROLE_TIRA: return func(request, *args, **kwargs) - + # Listing runs for ir-lab at CLEF 2024 is public for the moment - if request.path_info.startswith('api/list-runs/ir-lab-padua-2024/') or request.path_info.startswith('/api/list-runs/ir-lab-padua-2024/') or request.path_info.startswith('api/list-runs/ir-lab-sose-2024/') or request.path_info.startswith('/api/list-runs/ir-lab-sose-2024/'): - return func(request, *args, **kwargs) - + if ( + request.path_info.startswith("api/list-runs/ir-lab-padua-2024/") + or request.path_info.startswith("/api/list-runs/ir-lab-padua-2024/") + or request.path_info.startswith("api/list-runs/ir-lab-sose-2024/") + or request.path_info.startswith("/api/list-runs/ir-lab-sose-2024/") + ): + return func(request, *args, **kwargs) + # Listing runs for ir-lab at CLEF 2024 is public for the moment - if (request.path_info.startswith('task/ir-lab-padua-2024/user/') or request.path_info.startswith('/task/ir-lab-padua-2024/user/') or request.path_info.startswith('task/ir-lab-sose-2024/user/') or request.path_info.startswith('/task/ir-lab-sose-2024/user/')) and request.path_info.endswith('.zip') and '/user/' in request.path_info and '/dataset/' in request.path_info and 'download' in request.path_info and request.path_info.split('/user/')[1].split('/')[1] == 'dataset' and request.path_info.split('/user/')[1].split('/')[3] == 'download': + if ( + ( + request.path_info.startswith("task/ir-lab-padua-2024/user/") + or request.path_info.startswith("/task/ir-lab-padua-2024/user/") + or request.path_info.startswith("task/ir-lab-sose-2024/user/") + or request.path_info.startswith("/task/ir-lab-sose-2024/user/") + ) + and request.path_info.endswith(".zip") + and "/user/" in request.path_info + and "/dataset/" in request.path_info + and "download" in request.path_info + and request.path_info.split("/user/")[1].split("/")[1] == "dataset" + and request.path_info.split("/user/")[1].split("/")[3] == "download" + ): return func(request, *args, **kwargs) # SERP endpoint is allowed for runs that are published and unblinded - if (request.path_info.startswith('serp/') or request.path_info.startswith('/serp/')) and run_id \ - and run_id in request.path_info and model.run_is_public_and_unblinded(run_id): + if ( + (request.path_info.startswith("serp/") or request.path_info.startswith("/serp/")) + and run_id + and run_id in request.path_info + and model.run_is_public_and_unblinded(run_id) + ): return func(request, *args, **kwargs) - if (request.path_info.startswith('data-download/') or request.path_info.startswith('/data-download/')) and dataset_is_public(dataset_id): + if ( + request.path_info.startswith("data-download/") or request.path_info.startswith("/data-download/") + ) and dataset_is_public(dataset_id): return func(request, *args, **kwargs) - if 'run_id_1' in kwargs or 'run_id_2' in kwargs: - return HttpResponseNotAllowed(f"Access forbidden.") + if "run_id_1" in kwargs or "run_id_2" in kwargs: + return HttpResponseNotAllowed("Access forbidden.") - if request.path_info.startswith(f'/task/{task_id}/vm/{vm_id}/software_details/'): - software_name = request.path_info.split(f'/task/{task_id}/vm/{vm_id}/software_details/')[1].split('/')[0] + if request.path_info.startswith(f"/task/{task_id}/vm/{vm_id}/software_details/"): + software_name = request.path_info.split(f"/task/{task_id}/vm/{vm_id}/software_details/")[1].split("/")[0] software = model.get_docker_software_by_name(software_name, vm_id, task_id) - if software and 'public_image_name' in software and software['public_image_name']: + if software and "public_image_name" in software and software["public_image_name"]: return func(request, *args, **kwargs) - if request.path_info.startswith(f'/task/{task_id}/vm/{vm_id}/run_details/'): + if request.path_info.startswith(f"/task/{task_id}/vm/{vm_id}/run_details/"): review = model.model.get_run_review(run_id=run_id, dataset_id=dataset_id, vm_id=vm_id) - logger.warning(f'Show run details for {run_id}: {review}.') - print(f'Show run details for {run_id}: {review}.') - if review and 'published' in review and 'blinded' in review and review['published'] and not review['blinded']: + logger.warning(f"Show run details for {run_id}: {review}.") + print(f"Show run details for {run_id}: {review}.") + if ( + review + and "published" in review + and "blinded" in review + and review["published"] + and not review["blinded"] + ): return func(request, *args, **kwargs) - if auth.user_is_organizer_for_endpoint(request=request, path=request.path_info, task_id=task_id, - organizer_id_from_params=organizer_id, dataset_id_from_params=dataset_id, - run_id_from_params=run_id, vm_id_from_params=vm_id, role=role): + if auth.user_is_organizer_for_endpoint( + request=request, + path=request.path_info, + task_id=task_id, + organizer_id_from_params=organizer_id, + dataset_id_from_params=dataset_id, + run_id_from_params=run_id, + vm_id_from_params=vm_id, + role=role, + ): return func(request, *args, **kwargs) if vm_id: if not model.vm_exists(vm_id): # If the resource does not exist - return redirect('tira:login') + return redirect_to_login() role = auth.get_role(request, user_id=auth.get_user_id(request), vm_id=vm_id) if run_id and dataset_id: # this prevents participants from viewing hidden runs if not model.run_exists(vm_id, dataset_id, run_id): - return Http404(f'The VM {vm_id} has no run with the id {run_id} on {dataset_id}.') + return Http404(f"The VM {vm_id} has no run with the id {run_id} on {dataset_id}.") review = model.get_run_review(dataset_id, vm_id, run_id) dataset = model.get_dataset(dataset_id) - is_review_visible = (not review['blinded']) or review['published'] or not dataset.get('is_confidential', True) + is_review_visible = ( + (not review["blinded"]) or review["published"] or not dataset.get("is_confidential", True) + ) if not is_review_visible: role = auth.ROLE_USER if task_id: # This checks if the registration requirement is fulfilled. if model.get_task(task_id)["require_registration"]: if not model.user_is_registered(task_id, request): - return HttpResponseNotAllowed(f"Access forbidden. You must register first.") + return HttpResponseNotAllowed("Access forbidden. You must register first.") if role == auth.ROLE_PARTICIPANT: return func(request, *args, **kwargs) - elif role == auth.ROLE_GUEST: # If guests access a restricted resource, we send them to login - return redirect('tira:login') - - if 'docker_software_id' in kwargs and vm_id: - docker_software = model.get_docker_software(int(kwargs['docker_software_id'])) - if docker_software and 'vm_id' in docker_software and auth.ROLE_PARTICIPANT == auth.get_role(request, user_id=auth.get_user_id(request), vm_id=docker_software['vm_id']): + elif role == auth.ROLE_GUEST: + return redirect_to_login() + + if "docker_software_id" in kwargs and vm_id: + docker_software = model.get_docker_software(int(kwargs["docker_software_id"])) + if ( + docker_software + and "vm_id" in docker_software + and auth.ROLE_PARTICIPANT + == auth.get_role(request, user_id=auth.get_user_id(request), vm_id=docker_software["vm_id"]) + ): return func(request, *args, **kwargs) - return HttpResponseNotAllowed(f"Access forbidden.") + return HttpResponseNotAllowed("Access forbidden.") return func_wrapper -def check_conditional_permissions(restricted=False, public_data_ok=False, private_run_ok=False, - not_registered_ok=False): - """ A decorator that checks if the requesting user has the needed permissions to call the decorated function. +def check_conditional_permissions( + restricted=False, public_data_ok=False, private_run_ok=False, not_registered_ok=False +): + """A decorator that checks if the requesting user has the needed permissions to call the decorated function. This decorator redirects or blocks requests if the requesting user does not have permission. This decorator considers the resources requested in the decorated function: - vm_id: returns the permissions the requesting user has on the vm @@ -127,43 +180,50 @@ def check_conditional_permissions(restricted=False, public_data_ok=False, privat :raises: django.core.exceptions.PermissionDenied """ + def decorator(func): @wraps(func) def func_wrapper(request, vm_id, *args, dataset_id=None, run_id=None, **kwargs): # Admins can access and do everything - kwargs['vm_id'] = vm_id - user_id = kwargs.get('user_id', None) - task_id = kwargs.get('task_id', None) + kwargs["vm_id"] = vm_id + user_id = kwargs.get("user_id", None) + task_id = kwargs.get("task_id", None) if vm_id is None and user_id is not None: # some endpoints say user_id instead of vm_id vm_id = user_id if dataset_id: - kwargs['dataset_id'] = dataset_id + kwargs["dataset_id"] = dataset_id if run_id: - kwargs['run_id'] = run_id + kwargs["run_id"] = run_id role = auth.get_role(request, user_id=auth.get_user_id(request)) if role == auth.ROLE_ADMIN or role == auth.ROLE_TIRA: return func(request, *args, **kwargs) - elif auth.user_is_organizer_for_endpoint(request=request, path=request.path_info, task_id=task_id, - organizer_id_from_params=None, - dataset_id_from_params=dataset_id, - run_id_from_params=run_id, vm_id_from_params=vm_id, role=role): + elif auth.user_is_organizer_for_endpoint( + request=request, + path=request.path_info, + task_id=task_id, + organizer_id_from_params=None, + dataset_id_from_params=dataset_id, + run_id_from_params=run_id, + vm_id_from_params=vm_id, + role=role, + ): return func(request, *args, **kwargs) elif restricted: - return HttpResponseNotAllowed(f"Access restricted.") + return HttpResponseNotAllowed("Access restricted.") if vm_id: # First we determine the role of the user on the resource he requests if not model.vm_exists(vm_id): - return redirect('tira:login') + return redirect_to_login() role_on_vm = auth.get_role(request, user_id=auth.get_user_id(request), vm_id=vm_id) if run_id and dataset_id: role = auth.ROLE_USER if not model.run_exists(vm_id, dataset_id, run_id): - return Http404(f'The VM {vm_id} has no run with the id {run_id} on {dataset_id}.') + return Http404(f"The VM {vm_id} has no run with the id {run_id} on {dataset_id}.") review = model.get_run_review(dataset_id, vm_id, run_id) - is_review_visible = (not review['blinded']) or review['published'] - is_dataset_confidential = model.get_dataset(dataset_id).get('is_confidential', True) + is_review_visible = (not review["blinded"]) or review["published"] + is_dataset_confidential = model.get_dataset(dataset_id).get("is_confidential", True) # if the run is visible OR if we make an exception for public datasets if is_review_visible: role = role_on_vm @@ -180,60 +240,70 @@ def func_wrapper(request, vm_id, *args, dataset_id=None, run_id=None, **kwargs): if task_id and not not_registered_ok: # This checks if the registration requirement is fulfilled. if model.get_task(task_id)["require_registration"]: if not model.user_is_registered(task_id, request): - return HttpResponseNotAllowed(f"Access forbidden. You must register first.") + return HttpResponseNotAllowed("Access forbidden. You must register first.") - if not restricted and role == auth.ROLE_PARTICIPANT: # Participants can access when it is their resource, the resource is visible to them, and the call is not restricted + # Participants can access when it is their resource, the resource is visible to them, and the call is not + # restricted + if not restricted and role == auth.ROLE_PARTICIPANT: return func(request, *args, **kwargs) if public_data_ok and run_is_public(run_id, vm_id, dataset_id): return func(request, *args, **kwargs) elif role == auth.ROLE_GUEST: # If guests access a restricted resource, we send them to login - return redirect('tira:login') + return redirect_to_login() - return HttpResponseNotAllowed(f"Access forbidden.") + return HttpResponseNotAllowed("Access forbidden.") return func_wrapper + return decorator def run_is_public(run_id, vm_id, dataset_id): - if not run_id or not vm_id or not dataset_id or (dataset_id not in settings.PUBLIC_TRAINING_DATA and not dataset_id.endswith('-training')): + if ( + not run_id + or not vm_id + or not dataset_id + or (dataset_id not in settings.PUBLIC_TRAINING_DATA and not dataset_id.endswith("-training")) + ): return False i = model.get_run_review(dataset_id, vm_id, run_id) - if not (i and 'blinded' in i and 'published' in i and not i['blinded'] and i['published']): + if not (i and "blinded" in i and "published" in i and not i["blinded"] and i["published"]): return False return dataset_is_public(dataset_id) -def dataset_is_public(dataset_id): - if not dataset_id or (dataset_id not in settings.PUBLIC_TRAINING_DATA and not dataset_id.endswith('-training')): +def dataset_is_public(dataset_id: str) -> bool: + if not dataset_id or (dataset_id not in settings.PUBLIC_TRAINING_DATA and not dataset_id.endswith("-training")): return False i = model.get_dataset(dataset_id) - return i and 'is_confidential' in i and not i['is_confidential'] and 'is_deprecated' in i and not i['is_deprecated'] + return ("is_confidential" in i) and not i["is_confidential"] and ("is_deprecated" in i) and not i["is_deprecated"] -def check_resources_exist(reply_as='json'): - """ A decorator that checks if the resources given as parameters actually exist. """ +def check_resources_exist(reply_as="json"): + """A decorator that checks if the resources given as parameters actually exist.""" + def decorator(func): @wraps(func) def func_wrapper(request, *args, **kwargs): def return_fail(message, request_vm_instead=False): logger.warning(message) - if reply_as == 'json': - response = JsonResponse({'status': 1, 'message': message}) + if reply_as == "json": + response = JsonResponse({"status": 1, "message": message}) return response if request_vm_instead: - return redirect('tira:login') + return redirect_to_login() return Http404(message) if "vm_id" in kwargs: if not model.vm_exists(kwargs["vm_id"]): logger.error(f"{resolve(request.path_info).url_name}: vm_id does not exist") if "task_id" in kwargs: - return return_fail(f'There is no vm with id {kwargs["vm_id"]} matching your request.', - request_vm_instead=True) + return return_fail( + f'There is no vm with id {kwargs["vm_id"]} matching your request.', request_vm_instead=True + ) return return_fail(f"vm_id {kwargs['vm_id']} does not exist", request_vm_instead=True) @@ -267,4 +337,5 @@ def return_fail(message, request_vm_instead=False): return func(request, *args, **kwargs) return func_wrapper + return decorator diff --git a/application/src/tira/data/FileDatabase.py b/application/src/tira_app/data/FileDatabase.py similarity index 64% rename from application/src/tira/data/FileDatabase.py rename to application/src/tira_app/data/FileDatabase.py index b9d288033..9a687075e 100644 --- a/application/src/tira/data/FileDatabase.py +++ b/application/src/tira_app/data/FileDatabase.py @@ -1,19 +1,31 @@ -from google.protobuf.text_format import Parse -from pathlib import Path import logging -from django.conf import settings -from datetime import datetime, timezone import re -from shutil import rmtree +from datetime import datetime from datetime import datetime as dt +from datetime import timezone +from pathlib import Path +from shutil import rmtree +from typing import _T, Optional -from tira.util import TiraModelWriteError, TiraModelIntegrityError -from tira.proto import TiraClientWebMessages_pb2 as modelpb -from tira.util import auto_reviewer, extract_year_from_dataset_id +from django.conf import settings +from google.protobuf.text_format import Parse + +from ..proto import TiraClientWebMessages_pb2 as modelpb +from ..util import TiraModelWriteError, auto_reviewer, extract_year_from_dataset_id logger = logging.getLogger("tira") +def _coalesce(*args: Optional[_T]) -> Optional[_T]: + """ + Returns the first argument that is not None. Returns None if no such value exists. + """ + for x in args: + if x is not None: + return x + return None + + class FileDatabase(object): """ This is the class to interface a TIRA Filedatabase. @@ -26,6 +38,7 @@ class FileDatabase(object): add is the public IF to add to the model get is the public IF to get data from the model """ + tira_root = settings.TIRA_ROOT tasks_dir_path = tira_root / Path("model/tasks") users_file_path = tira_root / Path("model/users/users.prototext") @@ -60,7 +73,7 @@ def __init__(self): "software": dt.now(), "default_tasks": dt.now(), "software_by_vm": dt.now(), - "software_count_by_dataset": dt.now() + "software_count_by_dataset": dt.now(), } self.build_model() @@ -78,26 +91,26 @@ def build_model(self): self._build_software_counts() def reload_vms(self): - """ reload VM and user data from the export format of the model """ + """reload VM and user data from the export format of the model""" self._parse_vm_list() def reload_datasets(self): - """ reload dataset data from the export format of the model """ + """reload dataset data from the export format of the model""" self._parse_dataset_list() def reload_tasks(self): - """ reload task data from the export format of the model """ + """reload task data from the export format of the model""" self._parse_task_list() self._build_task_relations() self._build_software_relations() self._build_software_counts() def reload_runs(self, vm_id): - """ reload run data for a VM from the export format of the model """ + """reload run data for a VM from the export format of the model""" raise NotImplementedError("Not Implemented: Runs are loaded on access when using FileDatabase") def _parse_organizer_list(self): - """ Parse the PB Database and extract all hosts. + """Parse the PB Database and extract all hosts. :return: a dict {hostId: {"name", "years"} """ if (dt.now() - self.updates["organizers"]).seconds < 10 and self.organizers: @@ -116,7 +129,7 @@ def _parse_vm_list(self): self.vms = {user.userName: user for user in users.users} def _parse_task_list(self): - """ Parse the PB Database and extract all tasks. + """Parse the PB Database and extract all tasks. :return: 1. a dict with the tasks {"taskId": {"name", "description", "dataset_count", "organizer", "year", "web"}} 2. a dict with default tasks of datasets {"dataset_id": "task_id"} @@ -126,7 +139,7 @@ def _parse_task_list(self): print("parsing tasks") tasks = {} - logger.info('loading tasks') + logger.info("loading tasks") for task_path in self.tasks_dir_path.glob("*"): task = Parse(open(task_path, "r").read(), modelpb.Tasks.Task()) tasks[task.taskId] = task @@ -134,14 +147,14 @@ def _parse_task_list(self): self.tasks = tasks def _parse_dataset_list(self): - """ Load all the datasets from the Filedatabase. + """Load all the datasets from the Filedatabase. :return: a dict {dataset_id: dataset protobuf object} """ if (dt.now() - self.updates["datasets"]).seconds < 10 and self.datasets: return print("parsing datasets") datasets = {} - logger.info('loading datasets') + logger.info("loading datasets") for dataset_file in self.datasets_dir_path.rglob("*.prototext"): dataset = Parse(open(dataset_file, "r").read(), modelpb.Dataset()) datasets[dataset.datasetId] = dataset @@ -149,7 +162,7 @@ def _parse_dataset_list(self): self.datasets = datasets def _parse_software_list(self): - """ extract the software files. We invent a new id for the lookup since software has none: + """extract the software files. We invent a new id for the lookup since software has none: - $ Afterwards sets self.software: a dict with the new key and a list of software objects as value """ @@ -157,7 +170,7 @@ def _parse_software_list(self): return print("parsing software") software = {} - logger.info('loading softwares') + logger.info("loading softwares") for task_dir in self.softwares_dir_path.glob("*"): for user_dir in task_dir.glob("*"): s = Parse(open(user_dir / "softwares.prototext", "r").read(), modelpb.Softwares()) @@ -168,8 +181,7 @@ def _parse_software_list(self): # _build methods reconstruct the relations once per parse. This is a shortcut for frequent joins. def _build_task_relations(self): - """ parse the relation dicts self.default_tasks and self.task_organizers from self.tasks - """ + """parse the relation dicts self.default_tasks and self.task_organizers from self.tasks""" default_tasks = {} task_organizers = {} for task_id, task in self.tasks.items(): @@ -213,8 +225,8 @@ def _build_software_counts(self): self.software_count_by_dataset = counts # _load methods parse files on the fly when pages are called - def load_review(self, dataset_id, vm_id, run_id): - """ This method loads a review or toggles auto reviewer if it does not exist. """ + def load_review(self, dataset_id: str, vm_id: str, run_id: str) -> modelpb.RunReview: + """This method loads a review or toggles auto reviewer if it does not exist.""" review_path = self.RUNS_DIR_PATH / dataset_id / vm_id / run_id review_file = review_path / "run-review.bin" @@ -224,11 +236,12 @@ def load_review(self, dataset_id, vm_id, run_id): return review review = modelpb.RunReview() + # FIXME: I am not closing my file handle :((((((((( review.ParseFromString(open(review_file, "rb").read()) return review def _load_vm(self, vm_id): - """ load a vm object from vm_dir_path """ + """load a vm object from vm_dir_path""" return Parse(open(self.vm_dir_path / f"{vm_id}.prototext").read(), modelpb.VirtualMachine()) def _load_softwares(self, task_id, vm_id): @@ -238,15 +251,16 @@ def _load_softwares(self, task_id, vm_id): if not software_file.exists(): software_file.touch() - return Parse(open(self.softwares_dir_path / task_id / vm_id / "softwares.prototext", "r").read(), - modelpb.Softwares()) + return Parse( + open(self.softwares_dir_path / task_id / vm_id / "softwares.prototext", "r").read(), modelpb.Softwares() + ) - def _load_run(self, dataset_id, vm_id, run_id, return_deleted=False): + def _load_run(self, dataset_id, vm_id, run_id, return_deleted: bool = False): run_dir = self.RUNS_DIR_PATH / dataset_id / vm_id / run_id if not (run_dir / "run.bin").exists(): if (run_dir / "run.prototext").exists(): r = Parse(open(run_dir / "run.prototext", "r").read(), modelpb.Run()) - open(run_dir / "run.bin", 'wb').write(r.SerializeToString()) + open(run_dir / "run.bin", "wb").write(r.SerializeToString()) else: logger.error(f"Try to read a run without a run.bin: {dataset_id}-{vm_id}-{run_id}") run = modelpb.Run() @@ -265,7 +279,7 @@ def _load_run(self, dataset_id, vm_id, run_id, return_deleted=False): return run def _load_vm_evaluations(self, dataset_id, vm_id, only_published): - """ load all evaluations for a user on a given dataset + """load all evaluations for a user on a given dataset :param dataset_id: id/name of the dataset :param vm_id: id/name of the user :return: {run_id: modelpb.Evaluation} @@ -292,36 +306,36 @@ def get_evaluation_measures(self, evaluation): # --------------------------------------------------------------------- def _save_task(self, task_proto, overwrite=False): - """ makes persistant changes to task: store in memory and to file. - Returns false if task exists and overwrite is false. """ + """makes persistant changes to task: store in memory and to file. + Returns false if task exists and overwrite is false.""" # open(f'/home/tira/{task_id}.prototext', 'wb').write(new_task.SerializeToString()) - new_task_file_path = self.tasks_dir_path / f'{task_proto.taskId}.prototext' + new_task_file_path = self.tasks_dir_path / f"{task_proto.taskId}.prototext" if not overwrite and new_task_file_path.exists(): - raise TiraModelWriteError(f"Failed to write vm, vm exists and overwrite is not allowed here") + raise TiraModelWriteError("Failed to write vm, vm exists and overwrite is not allowed here") self.tasks[task_proto.taskId] = task_proto - open(new_task_file_path, 'w').write(str(task_proto)) + open(new_task_file_path, "w").write(str(task_proto)) self._build_task_relations() def _save_vm(self, vm_proto, overwrite=False): - new_vm_file_path = self.vm_dir_path / f'{vm_proto.virtualMachineId}.prototext' + new_vm_file_path = self.vm_dir_path / f"{vm_proto.virtualMachineId}.prototext" if not overwrite and new_vm_file_path.exists(): - raise TiraModelWriteError(f"Failed to write vm, vm exists and overwrite is not allowed here") + raise TiraModelWriteError("Failed to write vm, vm exists and overwrite is not allowed here") # self.vms[vm_proto.virtualMachineId] = vm_proto # TODO see issue:30 - open(new_vm_file_path, 'w').write(str(vm_proto)) + open(new_vm_file_path, "w").write(str(vm_proto)) def _save_dataset(self, dataset_proto, task_id, overwrite=False): - """ dataset_dir_path/task_id/dataset_id.prototext """ - new_dataset_file_path = self.datasets_dir_path / task_id / f'{dataset_proto.datasetId}.prototext' + """dataset_dir_path/task_id/dataset_id.prototext""" + new_dataset_file_path = self.datasets_dir_path / task_id / f"{dataset_proto.datasetId}.prototext" if not overwrite and new_dataset_file_path.exists(): - raise TiraModelWriteError(f"Failed to write dataset, dataset exists and overwrite is not allowed here") + raise TiraModelWriteError("Failed to write dataset, dataset exists and overwrite is not allowed here") (self.datasets_dir_path / task_id).mkdir(exist_ok=True, parents=True) - open(new_dataset_file_path, 'w').write(str(dataset_proto)) + open(new_dataset_file_path, "w").write(str(dataset_proto)) self.datasets[dataset_proto.datasetId] = dataset_proto def _save_review(self, dataset_id, vm_id, run_id, review): review_path = self.RUNS_DIR_PATH / dataset_id / vm_id / run_id - open(review_path / "run-review.prototext", 'w').write(str(review)) - open(review_path / "run-review.bin", 'wb').write(review.SerializeToString()) + open(review_path / "run-review.prototext", "w").write(str(review)) + open(review_path / "run-review.bin", "wb").write(review.SerializeToString()) def _save_softwares(self, task_id, vm_id, softwares): with open(self.softwares_dir_path / task_id / vm_id / "softwares.prototext", "w+") as prototext_file: @@ -329,18 +343,18 @@ def _save_softwares(self, task_id, vm_id, softwares): prototext_file.write(str(softwares)) def _save_run(self, dataset_id, vm_id, run_id, run): - run_dir = (self.RUNS_DIR_PATH / dataset_id / vm_id / run_id) + run_dir = self.RUNS_DIR_PATH / dataset_id / vm_id / run_id run_dir.mkdir(parents=True, exist_ok=True) - open(run_dir / "run.prototext", 'w').write(str(run)) - open(run_dir / "run.bin", 'wb').write(run.SerializeToString()) + open(run_dir / "run.prototext", "w").write(str(run)) + open(run_dir / "run.bin", "wb").write(run.SerializeToString()) # ------------------------------------------------------------ # add methods to add new data to the model # ------------------------------------------------------------ def add_vm(self, vm_id, user_name, initial_user_password, ip, host, ssh, rdp): - """ Add a new task to the database. + """Add a new task to the database. This will not overwrite existing files and instead do nothing and return false """ new_vm = modelpb.VirtualMachine() @@ -348,8 +362,8 @@ def add_vm(self, vm_id, user_name, initial_user_password, ip, host, ssh, rdp): new_vm.vmId = vm_id new_vm.vmName = vm_id new_vm.host = host - new_vm.adminName = 'admin' # Note these are required but deprecated - new_vm.adminPw = 'admin' # Note these are required but deprecated + new_vm.adminName = "admin" # Note these are required but deprecated + new_vm.adminPw = "admin" # Note these are required but deprecated new_vm.userName = user_name new_vm.userPw = initial_user_password new_vm.ip = ip @@ -358,8 +372,8 @@ def add_vm(self, vm_id, user_name, initial_user_password, ip, host, ssh, rdp): self._save_vm(new_vm) def create_task(self, task_id, task_name, task_description, master_vm_id, organizer, website): - """ Add a new task to the database. - CAUTION: This function does not do any sanity checks and will OVERWRITE existing tasks """ + """Add a new task to the database. + CAUTION: This function does not do any sanity checks and will OVERWRITE existing tasks""" new_task = modelpb.Tasks.Task() new_task.taskId = task_id new_task.taskName = task_name @@ -370,8 +384,7 @@ def create_task(self, task_id, task_name, task_description, master_vm_id, organi self._save_task(new_task) def add_dataset(self, task_id, dataset_id, dataset_type, dataset_name): - """ TODO documentation - """ + """TODO documentation""" # update task_dir_path/task_id.prototext: dataset_id = f"{dataset_id}-{dataset_type}" @@ -379,30 +392,30 @@ def add_dataset(self, task_id, dataset_id, dataset_type, dataset_name): if not for_task: raise KeyError(f"No task with id {task_id}") - if dataset_type == 'test' and dataset_id not in for_task.testDataset: + if dataset_type == "test" and dataset_id not in for_task.testDataset: for_task.testDataset.append(dataset_id) - elif dataset_type in {'training', 'dev'} and dataset_id not in for_task.trainingDataset: + elif dataset_type in {"training", "dev"} and dataset_id not in for_task.trainingDataset: for_task.trainingDataset.append(dataset_id) - elif dataset_type not in {'training', 'dev', 'test'}: + elif dataset_type not in {"training", "dev", "test"}: raise KeyError("dataset type must be test, training, or dev") - task_ok = self._save_task(for_task, overwrite=True) + self._save_task(for_task, overwrite=True) # create new dataset_dir_path/task_id/dataset_id.prototext ds = modelpb.Dataset() ds.datasetId = dataset_id ds.displayName = dataset_name ds.isDeprecated = False - ds.isConfidential = True if dataset_type == 'test' else False - dataset_ok = self._save_dataset(ds, task_id) + ds.isConfidential = True if dataset_type == "test" else False + self._save_dataset(ds, task_id) # create dirs data_path/dataset/test-dataset[-truth]/task_id/dataset-id-type new_dirs = [] - if dataset_type == 'test': - new_dirs.append((self.data_path / f'test-datasets' / task_id / dataset_id)) - new_dirs.append((self.data_path / f'test-datasets-truth' / task_id / dataset_id)) + if dataset_type == "test": + new_dirs.append((self.data_path / "test-datasets" / task_id / dataset_id)) + new_dirs.append((self.data_path / "test-datasets-truth" / task_id / dataset_id)) else: - new_dirs.append((self.data_path / f'training-datasets' / task_id / dataset_id)) - new_dirs.append((self.data_path / f'training-datasets-truth' / task_id / dataset_id)) + new_dirs.append((self.data_path / "training-datasets" / task_id / dataset_id)) + new_dirs.append((self.data_path / "training-datasets-truth" / task_id / dataset_id)) for d in new_dirs: d.mkdir(parents=True, exist_ok=True) @@ -413,7 +426,7 @@ def _add_software(self, task_id, vm_id): software = modelpb.Softwares.Software() s = self._load_softwares(task_id, vm_id) try: - last_software_count = re.search(r'\d+$', s.softwares[-1].id) + last_software_count = re.search(r"\d+$", s.softwares[-1].id) software_count = int(last_software_count.group()) + 1 if last_software_count else None if software_count is None: # invalid software id value @@ -443,8 +456,7 @@ def _add_software(self, task_id, vm_id): self.software[f"{task_id}${vm_id}"] = software_list def add_evaluator(self, vm_id, task_id, dataset_id, dataset_type, command, working_directory, measures): - """ TODO documentation - """ + """TODO documentation""" evaluator_id = f"{dataset_id}-evaluator" dataset_id = f"{dataset_id}-{dataset_type}" @@ -460,45 +472,56 @@ def add_evaluator(self, vm_id, task_id, dataset_id, dataset_type, command, worki ev.evaluatorId = evaluator_id ev.command = command ev.workingDirectory = working_directory - ev.measures = ",".join([x[0].strip('\r') for x in measures]) - ev.measureKeys.extend([x[1].strip('\r') for x in measures]) + ev.measures = ",".join([x[0].strip("\r") for x in measures]) + ev.measureKeys.extend([x[1].strip("\r") for x in measures]) vm.evaluators.append(ev) self._save_vm(vm, overwrite=True) - def _update_review(self, dataset_id, vm_id, run_id, - reviewer_id: str = None, review_date: str = None, has_errors: bool = None, - has_no_errors: bool = None, no_errors: bool = None, missing_output: bool = None, - extraneous_output: bool = None, invalid_output: bool = None, has_error_output: bool = None, - other_errors: bool = None, comment: str = None, published: bool = None, blinded: bool = None, - has_warnings: bool = False): - """ updates the review specified by dataset_id, vm_id, and run_id with the values given in the parameters. + def _update_review( + self, + dataset_id, + vm_id, + run_id, + reviewer_id: Optional[str] = None, + review_date: Optional[str] = None, + has_errors: Optional[bool] = None, + has_no_errors: Optional[bool] = None, + no_errors: Optional[bool] = None, + missing_output: Optional[bool] = None, + extraneous_output: Optional[bool] = None, + invalid_output: Optional[bool] = None, + has_error_output: Optional[bool] = None, + other_errors: Optional[bool] = None, + comment: Optional[str] = None, + published: Optional[bool] = None, + blinded: Optional[bool] = None, + has_warnings: Optional[bool] = False, + ): + """updates the review specified by dataset_id, vm_id, and run_id with the values given in the parameters. Required Parameters are also required in the function """ review = self.load_review(dataset_id, vm_id, run_id) - def update(x, y): - return y if y is not None else x - - review.reviewerId = update(review.reviewerId, reviewer_id) - review.reviewDate = update(review.reviewDate, review_date) - review.hasErrors = update(review.hasErrors, has_errors) - review.hasWarnings = update(review.hasWarnings, has_warnings) - review.hasNoErrors = update(review.hasNoErrors, has_no_errors) - review.noErrors = update(review.noErrors, no_errors) - review.missingOutput = update(review.missingOutput, missing_output) - review.extraneousOutput = update(review.extraneousOutput, extraneous_output) - review.invalidOutput = update(review.invalidOutput, invalid_output) - review.hasErrorOutput = update(review.hasErrorOutput, has_error_output) - review.otherErrors = update(review.otherErrors, other_errors) - review.comment = update(review.comment, comment) - review.published = update(review.published, published) - review.blinded = update(review.blinded, blinded) + review.reviewerId = _coalesce(reviewer_id, review.reviewerId) + review.reviewDate = _coalesce(review_date, review.reviewDate) + review.hasErrors = _coalesce(has_errors, review.hasErrors) + review.hasWarnings = _coalesce(has_warnings, review.hasWarnings) + review.hasNoErrors = _coalesce(has_no_errors, review.hasNoErrors) + review.noErrors = _coalesce(no_errors, review.noErrors) + review.missingOutput = _coalesce(missing_output, review.missingOutput) + review.extraneousOutput = _coalesce(extraneous_output, review.extraneousOutput) + review.invalidOutput = _coalesce(invalid_output, review.invalidOutput) + review.hasErrorOutput = _coalesce(has_error_output, review.hasErrorOutput) + review.otherErrors = _coalesce(other_errors, review.otherErrors) + review.comment = _coalesce(comment, review.comment) + review.published = _coalesce(published, review.published) + review.blinded = _coalesce(blinded, review.blinded) self._save_review(dataset_id, vm_id, run_id, review) - def _update_run(self, dataset_id, vm_id, run_id, deleted: bool = None): - """ updates the run specified by dataset_id, vm_id, and run_id with the values given in the parameters. - Required Parameters are also required in the function + def _update_run(self, dataset_id, vm_id, run_id, deleted: Optional[bool] = None): + """updates the run specified by dataset_id, vm_id, and run_id with the values given in the parameters. + Required Parameters are also required in the function """ run = self._load_run(dataset_id, vm_id, run_id) @@ -508,8 +531,17 @@ def update(x, y): run.deleted = update(run.deleted, deleted) self._save_run(dataset_id, vm_id, run_id, run) - def update_software(self, task_id, vm_id, software_id, command: str = None, working_directory: str = None, - dataset: str = None, run: str = None, deleted: bool = False): + def update_software( + self, + task_id, + vm_id, + software_id, + command: Optional[str] = None, + working_directory: Optional[str] = None, + dataset: Optional[str] = None, + run: Optional[str] = None, + deleted: bool = False, + ): def update(x, y): return y if y is not None else x @@ -531,7 +563,7 @@ def update(x, y): # TODO add option to truly delete the software. def delete_software(self, task_id, vm_id, software_id): s = self._load_softwares(task_id, vm_id) - found = False + for software in s.softwares: if software.id == software_id: break @@ -554,57 +586,75 @@ def get_vm(self, vm_id: str, create_if_none=False): return self.vms.get(vm_id, None) def get_tasks(self) -> list: - tasks = [self.get_task(task.taskId) - for task in self.tasks.values()] + tasks = [self.get_task(task.taskId) for task in self.tasks.values()] return tasks def get_run(self, dataset_id: str, vm_id: str, run_id: str, return_deleted: bool = False) -> dict: run = self._load_run(dataset_id, vm_id, run_id, return_deleted) - return {"software": run.softwareId, - "run_id": run.runId, "input_run_id": run.inputRun, - "dataset": run.inputDataset, "downloadable": run.downloadable} + return { + "software": run.softwareId, + "run_id": run.runId, + "input_run_id": run.inputRun, + "dataset": run.inputDataset, + "downloadable": run.downloadable, + } def get_task(self, task_id: str) -> dict: t = self.tasks[task_id] - return {"task_name": t.taskName, - "description": t.taskDescription, - "commandPlaceholder": "" if t.commandPlaceholder == "mySoftware -c $inputDataset -r $inputRun -o $outputDir" else t.commandPlaceholder, - "commandDescription": "" if t.commandDescription == "Available variables: $inputDataset, $inputRun, $outputDir, $dataServer, and $token." else t.commandDescription, - "task_id": t.taskId, - "dataset_count": len(t.trainingDataset) + len(t.testDataset), - "software_count": len(self.software_by_task.get(t.taskId, {0})), - "web": t.web, - "organizer": self.organizers.get(t.hostId, modelpb.Hosts.Host()).name, - "year": self.organizers.get(t.hostId, modelpb.Hosts.Host()).years - } + return { + "task_name": t.taskName, + "description": t.taskDescription, + "commandPlaceholder": ( + "" + if t.commandPlaceholder == "mySoftware -c $inputDataset -r $inputRun -o $outputDir" + else t.commandPlaceholder + ), + "commandDescription": ( + "" + if t.commandDescription + == "Available variables: $inputDataset, $inputRun, $outputDir," + " $dataServer, and $token." + else t.commandDescription + ), + "task_id": t.taskId, + "dataset_count": len(t.trainingDataset) + len(t.testDataset), + "software_count": len(self.software_by_task.get(t.taskId, {0})), + "web": t.web, + "organizer": self.organizers.get(t.hostId, modelpb.Hosts.Host()).name, + "year": self.organizers.get(t.hostId, modelpb.Hosts.Host()).years, + } def get_dataset(self, dataset_id: str) -> dict: dataset = self.datasets[dataset_id] return { - "display_name": dataset.displayName, "evaluator_id": dataset.evaluatorId, + "display_name": dataset.displayName, + "evaluator_id": dataset.evaluatorId, "dataset_id": dataset.datasetId, - "is_confidential": dataset.isConfidential, "is_deprecated": dataset.isDeprecated, + "is_confidential": dataset.isConfidential, + "is_deprecated": dataset.isDeprecated, "year": extract_year_from_dataset_id(dataset_id), "task": self.default_tasks.get(dataset.datasetId, "None"), - 'organizer': self.task_organizers.get(dataset.datasetId, ""), - "software_count": self.software_count_by_dataset.get(dataset.datasetId, 0) + "organizer": self.task_organizers.get(dataset.datasetId, ""), + "software_count": self.software_count_by_dataset.get(dataset.datasetId, 0), } def get_datasets(self) -> dict: - """ Get a dict of dataset_id: dataset_json_descriptor """ + """Get a dict of dataset_id: dataset_json_descriptor""" return {dataset_id: self.get_dataset(dataset_id) for dataset_id in self.datasets} def get_datasets_by_task(self, task_id: str, include_deprecated=False) -> list: - """ return the list of datasets associated with this task_id + """return the list of datasets associated with this task_id @param task_id: id string of the task the dataset belongs to @param include_deprecated: Default False. If True, also returns datasets marked as deprecated. @return: a list of json-formatted datasets, as returned by get_dataset """ - return [self.get_dataset(dataset.datasetId) - for dataset in self.datasets.values() - if task_id == self.default_tasks.get(dataset.datasetId, "") and - not (dataset.isDeprecated and not include_deprecated)] + return [ + self.get_dataset(dataset.datasetId) + for dataset in self.datasets.values() + if task_id == self.default_tasks.get(dataset.datasetId, "") + and not (dataset.isDeprecated and not include_deprecated) + ] def get_organizer(self, organizer_id: str): # TODO should return as dict @@ -617,7 +667,7 @@ def get_ova_list(self) -> list: return [f"{ova_file.stem}.ova" for ova_file in self.ova_dir.glob("*.ova")] def get_vm_list(self): - """ load the vm-info file which stores all active vms as such: + """load the vm-info file which stores all active vms as such: \t[\t]\n ... @@ -628,16 +678,15 @@ def parse_vm_list(vm_list): for list_entry in vm_list: try: list_entry = list_entry.split("\t") - yield [list_entry[0], list_entry[1].strip(), list_entry[2].strip() if len(list_entry) > 2 else ''] + yield [list_entry[0], list_entry[1].strip(), list_entry[2].strip() if len(list_entry) > 2 else ""] except IndexError as e: logger.error(e, list_entry) - return list(parse_vm_list(open(self.vm_list_file, 'r'))) + return list(parse_vm_list(open(self.vm_list_file, "r"))) def get_vms_by_dataset(self, dataset_id: str) -> list: - """ return a list of vm_id's that have runs on this dataset """ - return [user_run_dir.stem - for user_run_dir in (self.RUNS_DIR_PATH / dataset_id).glob("*")] + """return a list of vm_id's that have runs on this dataset""" + return [user_run_dir.stem for user_run_dir in (self.RUNS_DIR_PATH / dataset_id).glob("*")] def get_vm_runs_by_dataset(self, dataset_id: str, vm_id: str, return_deleted: bool = False) -> list: runs = {} @@ -651,7 +700,7 @@ def get_vm_runs_by_dataset(self, dataset_id: str, vm_id: str, return_deleted: bo return list(runs.values()) def get_vm_runs_by_task(self, task_id: str, vm_id: str, return_deleted: bool = False) -> list: - """ returns a list of all the runs of a user over all datasets in json (as returned by _load_user_runs) """ + """returns a list of all the runs of a user over all datasets in json (as returned by _load_user_runs)""" relevant_datasets = {software["dataset"] for software in self.get_software(task_id, vm_id)} runs = [] for dataset_id in relevant_datasets: @@ -666,23 +715,36 @@ def get_vms_with_reviews(self, dataset_id): vms = [] for vm_id, run in vm_runs.items(): - runs = [{"run": run, "review": vm_reviews.get(vm_id, None).get(run["run_id"], None)} - for run in vm_runs.get(vm_id)] - unreviewed_count = len([1 for r in vm_reviews[vm_id].values() - if not r.get("hasErrors", None) and not r.get("hasNoErrors", None)]) - published_count = len([1 for r in vm_reviews[vm_id].values() - if r.get("published", None)]) - blinded_count = len([1 for r in vm_reviews[vm_id].values() - if r.get("blinded", None)]) - vms.append({"vm_id": vm_id, "runs": runs, "unreviewed_count": unreviewed_count, - "blinded_count": blinded_count, "published_count": published_count}) + runs = [ + {"run": run, "review": vm_reviews.get(vm_id, None).get(run["run_id"], None)} + for run in vm_runs.get(vm_id) + ] + unreviewed_count = len( + [ + 1 + for r in vm_reviews[vm_id].values() + if not r.get("hasErrors", None) and not r.get("hasNoErrors", None) + ] + ) + published_count = len([1 for r in vm_reviews[vm_id].values() if r.get("published", None)]) + blinded_count = len([1 for r in vm_reviews[vm_id].values() if r.get("blinded", None)]) + vms.append( + { + "vm_id": vm_id, + "runs": runs, + "unreviewed_count": unreviewed_count, + "blinded_count": blinded_count, + "published_count": published_count, + } + ) return vms def get_evaluations_with_keys_by_dataset(self, dataset_id, include_unpublished): vm_ids = self.get_vms_by_dataset(dataset_id) vm_evaluations = { vm_id: self.get_vm_evaluations_by_dataset(dataset_id, vm_id, only_public_results=not include_unpublished) - for vm_id in vm_ids} + for vm_id in vm_ids + } keys = set() for e1 in vm_evaluations.values(): for e2 in e1.values(): @@ -690,23 +752,27 @@ def get_evaluations_with_keys_by_dataset(self, dataset_id, include_unpublished): ev_keys = list(keys) if include_unpublished: vm_reviews = {vm_id: self.get_vm_reviews_by_dataset(dataset_id, vm_id) for vm_id in vm_ids} - evaluations = [{"vm_id": vm_id, - "run_id": run_id, - "blinded": vm_reviews.get(vm_id, {}).get(run_id, {}).get("blinded", False), - "published": vm_reviews.get(vm_id, {}).get(run_id, {}).get("published", False), - "measures": [measures.get(k, "-") for k in ev_keys]} - for vm_id, measures_by_runs in vm_evaluations.items() - for run_id, measures in measures_by_runs.items()] + evaluations = [ + { + "vm_id": vm_id, + "run_id": run_id, + "blinded": vm_reviews.get(vm_id, {}).get(run_id, {}).get("blinded", False), + "published": vm_reviews.get(vm_id, {}).get(run_id, {}).get("published", False), + "measures": [measures.get(k, "-") for k in ev_keys], + } + for vm_id, measures_by_runs in vm_evaluations.items() + for run_id, measures in measures_by_runs.items() + ] else: - evaluations = [{"vm_id": vm_id, - "run_id": run_id, - "measures": [measures.get(k, "-") for k in ev_keys]} - for vm_id, measures_by_runs in vm_evaluations.items() - for run_id, measures in measures_by_runs.items()] + evaluations = [ + {"vm_id": vm_id, "run_id": run_id, "measures": [measures.get(k, "-") for k in ev_keys]} + for vm_id, measures_by_runs in vm_evaluations.items() + for run_id, measures in measures_by_runs.items() + ] return ev_keys, evaluations def get_evaluator(self, dataset_id, task_id=None): - """ returns a dict containing the evaluator parameters: + """returns a dict containing the evaluator parameters: vm_id: id of the master vm running the evaluator host: ip or hostname of the host @@ -720,8 +786,7 @@ def get_evaluator(self, dataset_id, task_id=None): task = self.tasks.get(task_id) vm_id = task.virtualMachineId - master_vm = Parse(open(self.vm_dir_path / f"{vm_id}.prototext", "r").read(), - modelpb.VirtualMachine()) + master_vm = Parse(open(self.vm_dir_path / f"{vm_id}.prototext", "r").read(), modelpb.VirtualMachine()) result = {"vm_id": vm_id, "host": master_vm.host} for evaluator in master_vm.evaluators: @@ -733,36 +798,56 @@ def get_evaluator(self, dataset_id, task_id=None): return result def get_vm_evaluations_by_dataset(self, dataset_id, vm_id, only_public_results=True): - """ Return a dict of run_id: evaluation_results for the given vm on the given dataset + """Return a dict of run_id: evaluation_results for the given vm on the given dataset @param only_public_results: only return the measures for published datasets. """ - return {run_id: self.get_evaluation_measures(ev) - for run_id, ev in - self._load_vm_evaluations(dataset_id, vm_id, only_published=only_public_results).items()} + return { + run_id: self.get_evaluation_measures(ev) + for run_id, ev in self._load_vm_evaluations(dataset_id, vm_id, only_published=only_public_results).items() + } def get_run_review(self, dataset_id: str, vm_id: str, run_id: str) -> dict: review = self.load_review(dataset_id, vm_id, run_id) - return {"reviewer": review.reviewerId, "noErrors": review.noErrors, - "missingOutput": review.missingOutput, - "extraneousOutput": review.extraneousOutput, "invalidOutput": review.invalidOutput, - "hasErrorOutput": review.hasErrorOutput, "otherErrors": review.otherErrors, - "comment": review.comment, "hasErrors": review.hasErrors, "hasWarnings": review.hasWarnings, - "hasNoErrors": review.hasNoErrors, "published": review.published, "blinded": review.blinded - } + return { + "reviewer": review.reviewerId, + "noErrors": review.noErrors, + "missingOutput": review.missingOutput, + "extraneousOutput": review.extraneousOutput, + "invalidOutput": review.invalidOutput, + "hasErrorOutput": review.hasErrorOutput, + "otherErrors": review.otherErrors, + "comment": review.comment, + "hasErrors": review.hasErrors, + "hasWarnings": review.hasWarnings, + "hasNoErrors": review.hasNoErrors, + "published": review.published, + "blinded": review.blinded, + } def get_vm_reviews_by_dataset(self, dataset_id: str, vm_id: str) -> dict: - return {run_id_dir.stem: self.get_run_review(dataset_id, vm_id, run_id_dir.stem) - for run_id_dir in (self.RUNS_DIR_PATH / dataset_id / vm_id).glob("*")} + return { + run_id_dir.stem: self.get_run_review(dataset_id, vm_id, run_id_dir.stem) + for run_id_dir in (self.RUNS_DIR_PATH / dataset_id / vm_id).glob("*") + } def get_software(self, task_id, vm_id, software_id=None): - """ Returns the software with the given name of a vm on a task """ - sw = [{"id": software.id, "count": software.count, - "task_id": task_id, "vm_id": vm_id, - "command": software.command, "working_directory": software.workingDirectory, - "dataset": software.dataset, "run": software.run, "creation_date": software.creationDate, - "last_edit": software.lastEditDate} - for software in self.software.get(f"{task_id}${vm_id}", [])] + """Returns the software with the given name of a vm on a task""" + sw = [ + { + "id": software.id, + "count": software.count, + "task_id": task_id, + "vm_id": vm_id, + "command": software.command, + "working_directory": software.workingDirectory, + "dataset": software.dataset, + "run": software.run, + "creation_date": software.creationDate, + "last_edit": software.lastEditDate, + } + for software in self.software.get(f"{task_id}${vm_id}", []) + ] if software_id is None: return sw @@ -772,13 +857,22 @@ def get_software(self, task_id, vm_id, software_id=None): return s def get_software_by_vm(self, task_id, vm_id): - """ Returns the softwares of a vm on a task """ - return [{"id": software.id, "count": software.count, - "task_id": task_id, "vm_id": vm_id, - "command": software.command, "working_directory": software.workingDirectory, - "dataset": software.dataset, "run": software.run, "creation_date": software.creationDate, - "last_edit": software.lastEditDate} - for software in self.software.get(f"{task_id}${vm_id}", [])] + """Returns the softwares of a vm on a task""" + return [ + { + "id": software.id, + "count": software.count, + "task_id": task_id, + "vm_id": vm_id, + "command": software.command, + "working_directory": software.workingDirectory, + "dataset": software.dataset, + "run": software.run, + "creation_date": software.creationDate, + "last_edit": software.lastEditDate, + } + for software in self.software.get(f"{task_id}${vm_id}", []) + ] def get_software_with_runs(self, task_id, vm_id): softwares = self.get_software_by_vm(task_id, vm_id) @@ -787,8 +881,8 @@ def get_software_with_runs(self, task_id, vm_id): runs_with_input = {} # get the runs which have an input_run_id for r in runs: # if we loop once, might as well get the review-info here. - r['review'] = self.get_run_review(r.get("dataset"), vm_id, r.get("run_id")) - if r.get("input_run_id") == 'none': + r["review"] = self.get_run_review(r.get("dataset"), vm_id, r.get("run_id")) + if r.get("input_run_id") == "none": continue runs_with_input.setdefault(r.get("input_run_id"), []).append(r) @@ -802,13 +896,7 @@ def get_software_with_runs(self, task_id, vm_id): for r in v: runs_by_software.setdefault(r.get("software"), []).append(r) - return [{"software": sw, - "runs": runs_by_software.get(sw["id"]) - } for sw in softwares] - - def get_users_vms(self): - """ Return the users list. """ - return self.vms + return [{"software": sw, "runs": runs_by_software.get(sw["id"])} for sw in softwares] # ------------------------------------------------------------ # add methods to add new data to the model @@ -821,33 +909,62 @@ def add_software(self, task_id: str, vm_id: str): logger.exception(f"Exception while adding software ({task_id}, {vm_id}): {e}") raise TiraModelWriteError(f"Failed to write VM {vm_id}") - def update_review(self, dataset_id, vm_id, run_id, - reviewer_id: str = None, review_date: str = None, has_errors: bool = None, - has_no_errors: bool = None, no_errors: bool = None, missing_output: bool = None, - extraneous_output: bool = None, invalid_output: bool = None, has_error_output: bool = None, - other_errors: bool = None, comment: str = None, published: bool = None, blinded: bool = None, - has_warnings: bool = False) -> bool: - """ updates the review specified by dataset_id, vm_id, and run_id with the values given in the parameters. + def update_review( + self, + dataset_id, + vm_id, + run_id, + reviewer_id: Optional[str] = None, + review_date: Optional[str] = None, + has_errors: Optional[bool] = None, + has_no_errors: Optional[bool] = None, + no_errors: Optional[bool] = None, + missing_output: Optional[bool] = None, + extraneous_output: Optional[bool] = None, + invalid_output: Optional[bool] = None, + has_error_output: Optional[bool] = None, + other_errors: Optional[bool] = None, + comment: Optional[str] = None, + published: Optional[bool] = None, + blinded: Optional[bool] = None, + has_warnings: bool = False, + ) -> bool: + """updates the review specified by dataset_id, vm_id, and run_id with the values given in the parameters. Required Parameters are also required in the function """ try: - self._update_review(dataset_id, vm_id, run_id, reviewer_id, review_date, has_errors, has_no_errors, - no_errors, - missing_output, extraneous_output, invalid_output, has_error_output, - other_errors, comment, published, blinded, has_warnings) + self._update_review( + dataset_id, + vm_id, + run_id, + reviewer_id, + review_date, + has_errors, + has_no_errors, + no_errors, + missing_output, + extraneous_output, + invalid_output, + has_error_output, + other_errors, + comment, + published, + blinded, + has_warnings, + ) return True except Exception as e: logger.exception(f"Exception while saving review ({dataset_id}, {vm_id}, {run_id}): {e}") return False def add_run(self, *args, **kwargs): - """ The FileDatabase loads runs and reviews from the Protobuf files every time, - so this method currently does nothing. """ + """The FileDatabase loads runs and reviews from the Protobuf files every time, + so this method currently does nothing.""" pass - def update_run(self, dataset_id, vm_id, run_id, deleted: bool = None): - """ updates the run specified by dataset_id, vm_id, and run_id with the values given in the parameters. - Required Parameters are also required in the function + def update_run(self, dataset_id, vm_id, run_id, deleted: Optional[bool] = None): + """updates the run specified by dataset_id, vm_id, and run_id with the values given in the parameters. + Required Parameters are also required in the function """ try: self._update_run(dataset_id, vm_id, run_id, deleted) diff --git a/application/src/tira/data/HybridDatabase.py b/application/src/tira_app/data/HybridDatabase.py similarity index 53% rename from application/src/tira/data/HybridDatabase.py rename to application/src/tira_app/data/HybridDatabase.py index 01712d624..0ee0d8dff 100644 --- a/application/src/tira/data/HybridDatabase.py +++ b/application/src/tira_app/data/HybridDatabase.py @@ -1,26 +1,32 @@ -from google.protobuf.text_format import Parse -from pathlib import Path +import gzip +import json import logging -from django.conf import settings -from django.db import IntegrityError -from shutil import rmtree -from datetime import datetime as dt -import randomname import os import zipfile -import json -import gzip +from datetime import datetime as dt +from pathlib import Path +from typing import Any, Optional -from tira.util import TiraModelWriteError, TiraModelIntegrityError -from tira.proto import TiraClientWebMessages_pb2 as modelpb -from tira.util import auto_reviewer, now, get_today_timestamp, get_tira_id, link_to_discourse_team +import randomname +from django.conf import settings +from django.db import IntegrityError +from google.protobuf.text_format import Parse -import tira.model as modeldb -import tira.data.data as dbops +from .. import model as modeldb +from ..proto import TiraClientWebMessages_pb2 as modelpb +from ..util import ( + TiraModelIntegrityError, + TiraModelWriteError, + auto_reviewer, + get_tira_id, + get_today_timestamp, + link_to_discourse_team, + now, +) +from . import data as dbops logger = logging.getLogger("tira_db") -#SELECT tira_dockersoftware.vm_id, COUNT(DISTINCT(tira_dockersoftware.docker_software_id)) AS 'Software Count', SUM(tira_run.run_id IS NOT NULL) AS 'Executed Runs' FROM tira_dockersoftware LEFT JOIN tira_run ON tira_dockersoftware.docker_software_id = tira_run.docker_software_id where tira_dockersoftware.task_id = 'webpage-classification' GROUP BY tira_dockersoftware.vm_id; class HybridDatabase(object): """ @@ -36,6 +42,7 @@ class HybridDatabase(object): add is the public IF to add to the model get is the public IF to get data from the model """ + tira_root = settings.TIRA_ROOT tasks_dir_path = tira_root / Path("model/tasks") users_file_path = tira_root / Path("model/users/users.prototext") @@ -48,12 +55,12 @@ class HybridDatabase(object): softwares_dir_path = tira_root / Path("model/softwares") data_path = tira_root / Path("data/datasets") runs_dir_path = tira_root / Path("data/runs") - custom_irds_datasets_path = tira_root / "state" / "custom-ir-datasets" + custom_irds_datasets_path = tira_root / "state" / "custom-ir-datasets" def __init__(self): pass - def create_model(self, admin_user_name='admin', admin_password='admin'): + def create_model(self, admin_user_name="admin", admin_password="admin"): self.users_file_path.parent.mkdir(exist_ok=True, parents=True) self.tasks_dir_path.mkdir(exist_ok=True, parents=True) self.organizers_file_path.parent.mkdir(exist_ok=True, parents=True) @@ -72,34 +79,40 @@ def create_model(self, admin_user_name='admin', admin_password='admin'): self.host_list_file.touch(exist_ok=True) self.organizers_file_path.touch(exist_ok=True) - modeldb.VirtualMachine.objects.create(vm_id=admin_user_name, user_password=admin_password, - roles='reviewer') + modeldb.VirtualMachine.objects.create(vm_id=admin_user_name, user_password=admin_password, roles="reviewer") self._save_vm(vm_id=admin_user_name, user_name=admin_user_name, initial_user_password=admin_password) def index_model_from_files(self): self.vm_list_file.touch(exist_ok=True) - dbops.index(self.organizers_file_path, self.users_file_path, self.vm_dir_path, self.tasks_dir_path, - self.datasets_dir_path, self.softwares_dir_path, self.runs_dir_path) + dbops.index( + self.organizers_file_path, + self.users_file_path, + self.vm_dir_path, + self.tasks_dir_path, + self.datasets_dir_path, + self.softwares_dir_path, + self.runs_dir_path, + ) def reload_vms(self): - """ reload VM and user data from the export format of the model """ + """reload VM and user data from the export format of the model""" dbops.reload_vms(self.users_file_path, self.vm_dir_path) def reload_datasets(self): - """ reload dataset data from the export format of the model """ + """reload dataset data from the export format of the model""" dbops.reload_datasets(self.datasets_dir_path) def reload_tasks(self): - """ reload task data from the export format of the model """ + """reload task data from the export format of the model""" dbops.reload_tasks(self.tasks_dir_path) def reload_runs(self, vm_id): - """ reload run data for a VM from the export format of the model """ + """reload run data for a VM from the export format of the model""" dbops.reload_runs(self.runs_dir_path, vm_id) # _load methods parse files on the fly when pages are called def load_review(self, dataset_id, vm_id, run_id): - """ This method loads a review or toggles auto reviewer if it does not exist. """ + """This method loads a review or toggles auto reviewer if it does not exist.""" review_path = self.runs_dir_path / dataset_id / vm_id / run_id review_file = review_path / "run-review.bin" @@ -120,16 +133,17 @@ def _load_softwares(self, task_id, vm_id): if not software_file.exists(): software_file.touch() - return Parse(open(self.softwares_dir_path / task_id / vm_id / "softwares.prototext", "r").read(), - modelpb.Softwares()) + return Parse( + open(self.softwares_dir_path / task_id / vm_id / "softwares.prototext", "r").read(), modelpb.Softwares() + ) def _load_run(self, dataset_id, vm_id, run_id, return_deleted=False): - """ Load a protobuf run file with some edge-case checks. """ + """Load a protobuf run file with some edge-case checks.""" run_dir = self.runs_dir_path / dataset_id / vm_id / run_id if not (run_dir / "run.bin").exists(): if (run_dir / "run.prototext").exists(): r = Parse(open(run_dir / "run.prototext", "r").read(), modelpb.Run()) - open(run_dir / "run.bin", 'wb').write(r.SerializeToString()) + open(run_dir / "run.bin", "wb").write(r.SerializeToString()) else: logger.error(f"Try to read a run without a run.bin: {dataset_id}-{vm_id}-{run_id}") run = modelpb.Run() @@ -151,12 +165,21 @@ def _load_run(self, dataset_id, vm_id, run_id, return_deleted=False): # ---- save methods to update protos # --------------------------------------------------------------------- - def _save_vm(self, vm_id=None, user_name=None, initial_user_password=None, ip=None, host=None, ssh=None, rdp=None, - overwrite=False): - new_vm_file_path = self.vm_dir_path / f'{vm_id}.prototext' + def _save_vm( + self, + vm_id=None, + user_name=None, + initial_user_password=None, + ip=None, + host=None, + ssh=None, + rdp=None, + overwrite=False, + ): + new_vm_file_path = self.vm_dir_path / f"{vm_id}.prototext" if not overwrite and new_vm_file_path.exists(): - raise TiraModelWriteError(f"Failed to write vm, vm exists and overwrite is not allowed here") + raise TiraModelWriteError("Failed to write vm, vm exists and overwrite is not allowed here") elif overwrite and new_vm_file_path.exists(): vm = Parse(open(new_vm_file_path).read(), modelpb.VirtualMachine()) else: @@ -165,65 +188,70 @@ def _save_vm(self, vm_id=None, user_name=None, initial_user_password=None, ip=No vm.vmId = vm_id if vm_id else vm.vmId vm.vmName = vm_id if vm_id else vm.vmName vm.host = host if host else vm.host - vm.adminName = vm.adminName if vm.adminName else 'admin' # Note these are required but deprecated - vm.adminPw = vm.adminPw if vm.adminPw else 'admin' # Note these are required but deprecated + vm.adminName = vm.adminName if vm.adminName else "admin" # Note these are required but deprecated + vm.adminPw = vm.adminPw if vm.adminPw else "admin" # Note these are required but deprecated vm.userName = user_name if user_name else vm.userName vm.userPw = initial_user_password if initial_user_password else vm.userPw vm.ip = ip if ip else vm.ip vm.portSsh = rdp if rdp else vm.portSsh vm.portRdp = ssh if ssh else vm.portRdp - open(new_vm_file_path, 'w').write(str(vm)) + open(new_vm_file_path, "w").write(str(vm)) return True def _save_review(self, dataset_id, vm_id, run_id, review): - """ Save the reivew to the protobuf dump. Create the file if it does not exist. """ + """Save the reivew to the protobuf dump. Create the file if it does not exist.""" review_path = self.runs_dir_path / dataset_id / vm_id / run_id - open(review_path / "run-review.prototext", 'w').write(str(review)) - open(review_path / "run-review.bin", 'wb').write(review.SerializeToString()) + open(review_path / "run-review.prototext", "w").write(str(review)) + open(review_path / "run-review.bin", "wb").write(review.SerializeToString()) def _save_softwares(self, task_id, vm_id, softwares): open(self.softwares_dir_path / task_id / vm_id / "softwares.prototext", "w+").write(str(softwares)) def _save_run(self, dataset_id, vm_id, run_id, run): - run_dir = (self.runs_dir_path / dataset_id / vm_id / run_id) + run_dir = self.runs_dir_path / dataset_id / vm_id / run_id run_dir.mkdir(parents=True, exist_ok=True) - open(run_dir / "run.prototext", 'w').write(str(run)) - open(run_dir / "run.bin", 'wb').write(run.SerializeToString()) + open(run_dir / "run.prototext", "w").write(str(run)) + open(run_dir / "run.bin", "wb").write(run.SerializeToString()) ######################################### # Public Interface Methods ################################### @staticmethod - def _vm_as_dict(vm): - return {"vm_id": vm.vm_id, "user_password": vm.user_password, "roles": vm.roles, - "host": vm.host, "admin_name": vm.admin_name, "admin_pw": vm.admin_pw, - "ip": vm.ip, "ssh": vm.ssh, "rdp": vm.rdp, "archived": vm.archived} + def _vm_as_dict(vm: modeldb.VirtualMachine) -> dict[str, Any]: + return { + "vm_id": vm.vm_id, + "user_password": vm.user_password, + "roles": vm.roles, + "host": vm.host, + "admin_name": vm.admin_name, + "admin_pw": vm.admin_pw, + "ip": vm.ip, + "ssh": vm.ssh, + "rdp": vm.rdp, + "archived": vm.archived, + } - def get_vm(self, vm_id: str, create_if_none=False): + def get_vm(self, vm_id: str, create_if_none: bool = False) -> dict[str, Any]: if create_if_none: vm, _ = modeldb.VirtualMachine.objects.get_or_create(vm_id=vm_id) else: vm = modeldb.VirtualMachine.objects.get(vm_id=vm_id) return self._vm_as_dict(vm) - def get_users_vms(self): - """ Return the users list. """ - return [self._vm_as_dict(vm) for vm in modeldb.VirtualMachine.objects.all()] - def _task_to_dict(self, task, include_dataset_stats=False): def _add_dataset_stats(res, dataset_set): if not dataset_set: - res["dataset_last_created"] = '' - res["dataset_first_created"] = '' - res["dataset_last_modified"] = '' + res["dataset_last_created"] = "" + res["dataset_first_created"] = "" + res["dataset_last_modified"] = "" else: - res["dataset_last_created"] = dataset_set.latest('created').created.year - res["dataset_first_created"] = dataset_set.earliest('created').created.year - res["dataset_last_modified"] = dataset_set.latest('last_modified').created + res["dataset_last_created"] = dataset_set.latest("created").created.year + res["dataset_first_created"] = dataset_set.earliest("created").created.year + res["dataset_last_modified"] = dataset_set.latest("last_modified").created return res if task.organizer: @@ -236,35 +264,40 @@ def _add_dataset_stats(res, dataset_set): org_id = "" try: master_vm_id = task.vm.vm_id - except AttributeError as e: + except AttributeError: logger.error(f"Task with id {task.task_id} has no master vm associated") master_vm_id = "None" - result = {"task_id": task.task_id, "task_name": task.task_name, "task_description": task.task_description, - "organizer": org_name, - "organizer_id": org_id, - "web": task.web, - "year": org_year, - "featured": task.featured, - "require_registration": task.require_registration, - "require_groups": task.require_groups, - "restrict_groups": task.restrict_groups, - "allowed_task_teams": task.allowed_task_teams, - "master_vm_id": master_vm_id, - "is_ir_task": task.is_ir_task, - "irds_re_ranking_image": task.irds_re_ranking_image, - "irds_re_ranking_command": task.irds_re_ranking_command, - "irds_re_ranking_resource": task.irds_re_ranking_resource, - "dataset_count": task.dataset_set.count(), - "software_count": task.software_set.count(), - "max_std_out_chars_on_test_data": task.max_std_out_chars_on_test_data, - "max_std_err_chars_on_test_data": task.max_std_err_chars_on_test_data, - "max_file_list_chars_on_test_data": task.max_file_list_chars_on_test_data, - "command_placeholder": task.command_placeholder, "command_description": task.command_description, - "dataset_label": task.dataset_label, - "max_std_out_chars_on_test_data_eval": task.max_std_out_chars_on_test_data_eval, - "max_std_err_chars_on_test_data_eval": task.max_std_err_chars_on_test_data_eval, - "max_file_list_chars_on_test_data_eval": task.max_file_list_chars_on_test_data_eval} + result = { + "task_id": task.task_id, + "task_name": task.task_name, + "task_description": task.task_description, + "organizer": org_name, + "organizer_id": org_id, + "web": task.web, + "year": org_year, + "featured": task.featured, + "require_registration": task.require_registration, + "require_groups": task.require_groups, + "restrict_groups": task.restrict_groups, + "allowed_task_teams": task.allowed_task_teams, + "master_vm_id": master_vm_id, + "is_ir_task": task.is_ir_task, + "irds_re_ranking_image": task.irds_re_ranking_image, + "irds_re_ranking_command": task.irds_re_ranking_command, + "irds_re_ranking_resource": task.irds_re_ranking_resource, + "dataset_count": task.dataset_set.count(), + "software_count": task.software_set.count(), + "max_std_out_chars_on_test_data": task.max_std_out_chars_on_test_data, + "max_std_err_chars_on_test_data": task.max_std_err_chars_on_test_data, + "max_file_list_chars_on_test_data": task.max_file_list_chars_on_test_data, + "command_placeholder": task.command_placeholder, + "command_description": task.command_description, + "dataset_label": task.dataset_label, + "max_std_out_chars_on_test_data_eval": task.max_std_out_chars_on_test_data_eval, + "max_std_err_chars_on_test_data_eval": task.max_std_err_chars_on_test_data_eval, + "max_file_list_chars_on_test_data_eval": task.max_file_list_chars_on_test_data_eval, + } if include_dataset_stats: _add_dataset_stats(result, task.dataset_set.all()) @@ -278,13 +311,13 @@ def _tasks_to_dict(self, tasks, include_dataset_stats=False): yield self._task_to_dict(task, include_dataset_stats) - def get_tasks(self, include_dataset_stats=False) -> list: - return list(self._tasks_to_dict(modeldb.Task.objects.select_related('organizer').all(), - include_dataset_stats)) + def get_tasks(self, include_dataset_stats: str = False) -> list[dict[str, Any]]: + return list(self._tasks_to_dict(modeldb.Task.objects.select_related("organizer").all(), include_dataset_stats)) - def get_task(self, task_id: str, include_dataset_stats) -> dict: - return self._task_to_dict(modeldb.Task.objects.select_related('organizer').get(task_id=task_id), - include_dataset_stats) + def get_task(self, task_id: str, include_dataset_stats) -> dict[str, Any]: + return self._task_to_dict( + modeldb.Task.objects.select_related("organizer").get(task_id=task_id), include_dataset_stats + ) def _dataset_to_dict(self, dataset): evaluator_id = None if not dataset.evaluator else dataset.evaluator.evaluator_id @@ -294,16 +327,20 @@ def _dataset_to_dict(self, dataset): "display_name": dataset.display_name, "evaluator_id": evaluator_id, "dataset_id": dataset.dataset_id, - "is_confidential": dataset.is_confidential, "is_deprecated": dataset.is_deprecated, + "is_confidential": dataset.is_confidential, + "is_deprecated": dataset.is_deprecated, "year": dataset.released, "task": dataset.default_task.task_id, - 'organizer': dataset.default_task.organizer.name, - 'organizer_id': dataset.default_task.organizer.organizer_id, + "organizer": dataset.default_task.organizer.name, + "organizer_id": dataset.default_task.organizer.organizer_id, "software_count": modeldb.Software.objects.filter(dataset__dataset_id=dataset.dataset_id).count(), "runs_count": runs.count(), - 'evaluations_count': runs.filter(evaluator__isnull=False).count(), - 'evaluations_public_count': modeldb.Review.objects.filter(run__run_id__in=[r.run_id for r in runs.filter(evaluator__isnull=False)] - ).filter(published=True).count(), + "evaluations_count": runs.filter(evaluator__isnull=False).count(), + "evaluations_public_count": modeldb.Review.objects.filter( + run__run_id__in=[r.run_id for r in runs.filter(evaluator__isnull=False)] + ) + .filter(published=True) + .count(), "default_upload_name": dataset.default_upload_name, "created": dataset.created, "last_modified": dataset.last_modified, @@ -311,43 +348,56 @@ def _dataset_to_dict(self, dataset): "irds_import_command": dataset.irds_import_command, "irds_import_truth_command": dataset.irds_import_truth_command, "evaluator_git_runner_image": dataset.evaluator.git_runner_image if evaluator_id else None, - "evaluator_git_runner_command": dataset.evaluator.git_runner_command if evaluator_id else None, + "evaluator_git_runner_command": dataset.evaluator.git_runner_command if evaluator_id else None, } - def get_dataset(self, dataset_id: str) -> dict: + def get_dataset(self, dataset_id: str) -> dict[str, Any]: try: - return self._dataset_to_dict(modeldb.Dataset.objects.select_related('default_task', 'evaluator') - .get(dataset_id=dataset_id)) + return self._dataset_to_dict( + modeldb.Dataset.objects.select_related("default_task", "evaluator").get(dataset_id=dataset_id) + ) except modeldb.Dataset.DoesNotExist: return {} def get_datasets(self) -> dict: - """ Get a dict of dataset_id: dataset_json_descriptor """ - return {dataset.dataset_id: self._dataset_to_dict(dataset) - for dataset in modeldb.Dataset.objects.select_related('default_task', 'evaluator').all()} + """Get a dict of dataset_id: dataset_json_descriptor""" + return { + dataset.dataset_id: self._dataset_to_dict(dataset) + for dataset in modeldb.Dataset.objects.select_related("default_task", "evaluator").all() + } - def get_datasets_by_task(self, task_id: str, include_deprecated=False, return_only_names=False) -> list: - """ return the list of datasets associated with this task_id + def get_datasets_by_task( + self, task_id: str, include_deprecated=False, return_only_names=False + ) -> list[dict[str, Any]]: + """return the list of datasets associated with this task_id @param task_id: id string of the task the dataset belongs to @param include_deprecated: Default False. If True, also returns datasets marked as deprecated. @return: a list of json-formatted datasets, as returned by get_dataset """ - ret = [d for d in modeldb.TaskHasDataset.objects.filter(task=task_id) if not (d.dataset.is_deprecated and not include_deprecated)] + ret = [ + d + for d in modeldb.TaskHasDataset.objects.filter(task=task_id) + if not (d.dataset.is_deprecated and not include_deprecated) + ] if return_only_names: - return [{'dataset_id': d.dataset.dataset_id, 'display_name': d.dataset.display_name} for d in ret] + return [{"dataset_id": d.dataset.dataset_id, "display_name": d.dataset.display_name} for d in ret] else: return [self._dataset_to_dict(d.dataset) for d in ret] def get_docker_software(self, docker_software_id: str) -> dict: try: - return self._docker_software_to_dict(modeldb.DockerSoftware.objects.get(docker_software_id=docker_software_id)) + return self._docker_software_to_dict( + modeldb.DockerSoftware.objects.get(docker_software_id=docker_software_id) + ) except modeldb.Dataset.DoesNotExist: return {} def get_docker_software_by_name(self, name, vm_id, task_id) -> dict: try: - ret = modeldb.DockerSoftware.objects.filter(vm__vm_id=vm_id, task__task_id=task_id, display_name=name, deleted=False) + ret = modeldb.DockerSoftware.objects.filter( + vm__vm_id=vm_id, task__task_id=task_id, display_name=name, deleted=False + ) if len(ret) == 0: return {} @@ -361,7 +411,9 @@ def run_is_public_and_unblinded(self, run_id): return review.published and not review.blinded def get_reranking_docker_softwares(self): - return [self._docker_software_to_dict(i) for i in modeldb.DockerSoftware.objects.filter(ir_re_ranking_input=True)] + return [ + self._docker_software_to_dict(i) for i in modeldb.DockerSoftware.objects.filter(ir_re_ranking_input=True) + ] def get_all_docker_software_rerankers(self): return [self._docker_software_to_dict(i) for i in modeldb.DockerSoftware.objects.filter(ir_re_ranker=True)] @@ -369,10 +421,10 @@ def get_all_docker_software_rerankers(self): def get_runs_for_docker_software(self, docker_software_id): docker_software = modeldb.DockerSoftware.objects.get(docker_software_id=docker_software_id) - return [self._run_as_dict(i) for i in modeldb.Run.objects.filter(docker_software = docker_software)] + return [self._run_as_dict(i) for i in modeldb.Run.objects.filter(docker_software=docker_software)] def update_input_run_id_for_run(self, run_id, input_run_id): - print(f'Set input_run to {input_run_id} for run_id={run_id}') + print(f"Set input_run to {input_run_id} for run_id={run_id}") run = modeldb.Run.objects.get(run_id=run_id) run.input_run = modeldb.Run.objects.get(run_id=input_run_id) if input_run_id else None run.save() @@ -381,31 +433,44 @@ def _organizer_to_dict(self, organizer): git_integrations = [] for git_integration in organizer.git_integrations.all(): - git_integrations += [{'namespace_url': git_integration.namespace_url, 'private_token': ''}] + git_integrations += [{"namespace_url": git_integration.namespace_url, "private_token": ""}] - git_integrations += [{'namespace_url': '', 'private_token': ''}] + git_integrations += [{"namespace_url": "", "private_token": ""}] return { "organizer_id": organizer.organizer_id, "name": organizer.name, "years": organizer.years, "web": organizer.web, - "gitUrlToNamespace": git_integrations[0]['namespace_url'], - "gitPrivateToken": git_integrations[0]['private_token'], + "gitUrlToNamespace": git_integrations[0]["namespace_url"], + "gitPrivateToken": git_integrations[0]["private_token"], } def get_organizer(self, organizer_id: str): return self._organizer_to_dict(modeldb.Organizer.objects.get(organizer_id=organizer_id)) @staticmethod - def create_submission_git_repo(repo_url, vm_id, docker_registry_user, docker_registry_token, discourse_api_key, - reference_repository_url, external_owner, discourse_api_user, discourse_api_descr): + def create_submission_git_repo( + repo_url, + vm_id, + docker_registry_user, + docker_registry_token, + discourse_api_key, + reference_repository_url, + external_owner, + discourse_api_user, + discourse_api_descr, + ): return modeldb.SoftwareSubmissionGitRepository.objects.create( - repository_url=repo_url, vm=modeldb.VirtualMachine.objects.get(vm_id=vm_id), - reference_repository_url=reference_repository_url, external_owner=external_owner, - docker_registry_token=docker_registry_token, docker_registry_user=docker_registry_user, - tira_client_token=discourse_api_key, tira_client_user=discourse_api_user, - tira_client_description=discourse_api_descr + repository_url=repo_url, + vm=modeldb.VirtualMachine.objects.get(vm_id=vm_id), + reference_repository_url=reference_repository_url, + external_owner=external_owner, + docker_registry_token=docker_registry_token, + docker_registry_user=docker_registry_user, + tira_client_token=discourse_api_key, + tira_client_user=discourse_api_user, + tira_client_description=discourse_api_descr, ) def get_submission_git_repo_or_none(self, repository_url, vm_id, return_object=False): @@ -415,11 +480,14 @@ def get_submission_git_repo_or_none(self, repository_url, vm_id, return_object=F if return_object: return ret - return {'repo_url': ret.repository_url, 'http_repo_url': 'https://github.com/' + ret.repository_url, - 'ssh_repo_url': f'git@github.com:{ret.repository_url}.git', - 'owner_url': ret.external_owner, 'http_owner_url': 'https://github.com/' + ret.external_owner - } - except: + return { + "repo_url": ret.repository_url, + "http_repo_url": "https://github.com/" + ret.repository_url, + "ssh_repo_url": f"git@github.com:{ret.repository_url}.git", + "owner_url": ret.external_owner, + "http_owner_url": "https://github.com/" + ret.external_owner, + } + except Exception: return {} if not return_object else None def get_host_list(self) -> list: @@ -432,7 +500,7 @@ def get_organizer_list(self) -> list: return [self._organizer_to_dict(organizer) for organizer in modeldb.Organizer.objects.all()] def get_vm_list(self): - """ load the vm-info file which stores all active vms as such: + """load the vm-info file which stores all active vms as such: \t[\t]\n ... @@ -443,24 +511,29 @@ def parse_vm_list(vm_list): for list_entry in vm_list: try: list_entry = list_entry.split("\t") - yield [list_entry[0], list_entry[1].strip(), list_entry[2].strip() if len(list_entry) > 2 else ''] + yield [list_entry[0], list_entry[1].strip(), list_entry[2].strip() if len(list_entry) > 2 else ""] except IndexError as e: logger.error(e, list_entry) - return list(parse_vm_list(open(self.vm_list_file, 'r'))) + return list(parse_vm_list(open(self.vm_list_file, "r"))) @staticmethod def get_vms_by_dataset(dataset_id: str) -> list: - """ return a list of vm_id's that have runs on this dataset """ - return [run.software.vm.vm_id for run in modeldb.Run.objects.select_related('input_dataset', 'software') + """return a list of vm_id's that have runs on this dataset""" + return [ + run.software.vm.vm_id + for run in modeldb.Run.objects.select_related("input_dataset", "software") .exclude(input_dataset=None) .filter(input_dataset__dataset_id=dataset_id) .exclude(software=None) - .all()] + .all() + ] @staticmethod def _run_as_dict(run): - is_evaluation = False if not run.input_run or run.input_run.run_id == 'none' or run.input_run.run_id == 'None' else True + is_evaluation = ( + False if not run.input_run or run.input_run.run_id == "none" or run.input_run.run_id == "None" else True + ) software = None vm = None software_id, evaluator_id, docker_software_id, upload_id = None, None, None, None @@ -480,35 +553,42 @@ def _run_as_dict(run): upload_id = run.upload.id vm = run.upload.vm.vm_id - return {"software": software, - "vm": vm, - "run_id": run.run_id, - "input_run_id": "" if not run.input_run or run.input_run.run_id == 'none' or run.input_run.run_id == 'None' - else run.input_run.run_id, - "is_evaluation": is_evaluation, - "dataset": "" if not run.input_dataset else run.input_dataset.dataset_id, - "downloadable": run.downloadable, - "software_id": software_id, - "evaluator_id": evaluator_id, - "docker_software_id": docker_software_id, - "upload_id": upload_id, - } + return { + "software": software, + "vm": vm, + "run_id": run.run_id, + "input_run_id": ( + "" + if not run.input_run or run.input_run.run_id == "none" or run.input_run.run_id == "None" + else run.input_run.run_id + ), + "is_evaluation": is_evaluation, + "dataset": "" if not run.input_dataset else run.input_dataset.dataset_id, + "downloadable": run.downloadable, + "software_id": software_id, + "evaluator_id": evaluator_id, + "docker_software_id": docker_software_id, + "upload_id": upload_id, + } def get_run(self, dataset_id: str, vm_id: str, run_id: str, return_deleted: bool = False) -> dict: - run = modeldb.Run.objects.select_related('software', 'input_dataset').get(run_id=run_id) + run = modeldb.Run.objects.select_related("software", "input_dataset").get(run_id=run_id) if run.deleted and not return_deleted: return {} return self._run_as_dict(run) def get_vm_runs_by_dataset(self, dataset_id: str, vm_id: str, return_deleted: bool = False) -> list: - return [self._run_as_dict(run) for run in - modeldb.Run.objects.select_related('software', 'input_dataset') - .filter(input_dataset__dataset_id=dataset_id, software__vm__vm_id=vm_id) - if (run.deleted or not return_deleted)] + return [ + self._run_as_dict(run) + for run in modeldb.Run.objects.select_related("software", "input_dataset").filter( + input_dataset__dataset_id=dataset_id, software__vm__vm_id=vm_id + ) + if (run.deleted or not return_deleted) + ] def _get_ordered_runs_from_reviews(self, reviews, vm_id, preloaded=True, is_upload=False, is_docker=False): - """ yields all runs with reviews and their evaluation runs with reviews produced by software from a given vm + """yields all runs with reviews and their evaluation runs with reviews produced by software from a given vm evaluation runs (which have a run as input run) are yielded directly after the runs they use. :param reviews: a querySet of modeldb.Review objects to @@ -517,14 +597,17 @@ def _get_ordered_runs_from_reviews(self, reviews, vm_id, preloaded=True, is_uplo Otherwise assume they were preloaded :param is_upload: if true, get only uploaded runs """ + def _run_dict(review_obj): run = self._run_as_dict(review_obj.run) run["review"] = self._review_as_dict(review_obj) - run["reviewed"] = True if not review_obj.has_errors \ - and not review_obj.has_no_errors \ - and not review_obj.has_warnings else False - run['is_upload'] = is_upload - run['is_docker'] = is_docker + run["reviewed"] = ( + True + if not review_obj.has_errors and not review_obj.has_no_errors and not review_obj.has_warnings + else False + ) + run["is_upload"] = is_upload + run["is_docker"] = is_docker return run if is_upload: @@ -537,26 +620,39 @@ def _run_dict(review_obj): for review in reviews_qs: yield _run_dict(review) - r2 = reviews.filter(run__input_run__run_id=review.run.run_id).all() if preloaded \ - else modeldb.Review.objects.select_related('run').filter(run__input_run__run_id=review.run.run_id).all() + r2 = ( + reviews.filter(run__input_run__run_id=review.run.run_id).all() + if preloaded + else modeldb.Review.objects.select_related("run").filter(run__input_run__run_id=review.run.run_id).all() + ) for review2 in r2: yield _run_dict(review2) - def upload_to_dict(self, upload, vm_id): + def upload_to_dict(self, upload: modeldb.Upload, vm_id: str) -> dict[str, Any]: def _runs_by_upload(up): - reviews = modeldb.Review.objects.select_related("run", "run__upload", "run__evaluator", "run__input_run", - "run__input_dataset").filter(run__upload=up).all() + reviews = ( + modeldb.Review.objects.select_related( + "run", "run__upload", "run__evaluator", "run__input_run", "run__input_dataset" + ) + .filter(run__upload=up) + .all() + ) return list(self._get_ordered_runs_from_reviews(reviews, vm_id, preloaded=False, is_upload=True)) - return {"id": upload.id, "task_id": upload.task.task_id, "vm_id": upload.vm.vm_id, - "dataset": None if not upload.dataset else upload.dataset.dataset_id, - "last_edit": upload.last_edit_date, "runs": _runs_by_upload(upload), - "display_name": upload.display_name, "description": upload.description, - "paper_link": upload.paper_link, - "rename_to": upload.rename_to, - } + return { + "id": upload.id, + "task_id": upload.task.task_id, + "vm_id": upload.vm.vm_id, + "dataset": None if not upload.dataset else upload.dataset.dataset_id, + "last_edit": upload.last_edit_date, + "runs": _runs_by_upload(upload), + "display_name": upload.display_name, + "description": upload.description, + "paper_link": upload.paper_link, + "rename_to": upload.rename_to, + } def get_upload_with_runs(self, task_id, vm_id): ret = [] @@ -571,12 +667,13 @@ def get_upload(self, task_id, vm_id, upload_id): def get_discourse_token_for_user(self, vm_id): try: return modeldb.DiscourseTokenForUser.objects.get(vm_id__vm_id=vm_id).token - except: + except Exception: return None def create_discourse_token_for_user(self, vm_id, discourse_api_key): - modeldb.DiscourseTokenForUser.objects.create(vm_id=modeldb.VirtualMachine.objects.get(vm_id=vm_id), - token=discourse_api_key) + modeldb.DiscourseTokenForUser.objects.create( + vm_id=modeldb.VirtualMachine.objects.get(vm_id=vm_id), token=discourse_api_key + ) @staticmethod def get_uploads(task_id, vm_id, return_names_only=True): @@ -593,14 +690,15 @@ def _docker_software_to_dict(self, ds): if ds.input_docker_software: input_docker_software = ds.input_docker_software.display_name if ds.input_upload: - input_docker_software = 'Upload ' + ds.input_upload.display_name + input_docker_software = "Upload " + ds.input_upload.display_name if input_docker_software: previous_stages = [input_docker_software] - additional_inputs = modeldb.DockerSoftwareHasAdditionalInput.objects \ - .select_related('input_docker_software', 'input_upload') \ - .filter(docker_software__docker_software_id=ds.docker_software_id) \ - .order_by('position') + additional_inputs = ( + modeldb.DockerSoftwareHasAdditionalInput.objects.select_related("input_docker_software", "input_upload") + .filter(docker_software__docker_software_id=ds.docker_software_id) + .order_by("position") + ) for i in additional_inputs: if i.input_docker_software: previous_stages += [i.input_docker_software.display_name] @@ -609,40 +707,59 @@ def _docker_software_to_dict(self, ds): link_code = None try: - link_code = modeldb.LinkToSoftwareSubmissionGitRepository.objects.get(docker_software__docker_software_id=ds.docker_software_id) + link_code = modeldb.LinkToSoftwareSubmissionGitRepository.objects.get( + docker_software__docker_software_id=ds.docker_software_id + ) link_code = self.__link_to_code(link_code.build_environment) - except Exception as e: + except Exception: link_code = None mount_hf_model = None - hf_models = modeldb.HuggingFaceModelsOfSoftware.objects \ - .filter(docker_software__docker_software_id=ds.docker_software_id) \ - .only('mount_hf_model') + hf_models = modeldb.HuggingFaceModelsOfSoftware.objects.filter( + docker_software__docker_software_id=ds.docker_software_id + ).only("mount_hf_model") if hf_models and len(hf_models) > 0: mount_hf_model = hf_models[0].mount_hf_model - return {'docker_software_id': ds.docker_software_id, 'display_name': ds.display_name, - 'user_image_name': ds.user_image_name, 'command': ds.command, - 'tira_image_name': ds.tira_image_name, 'task_id': ds.task.task_id, - 'vm_id': ds.vm.vm_id, 'description': ds.description, 'paper_link': ds.paper_link, - 'input_docker_software': input_docker_software, - 'input_docker_software_id': ds.input_docker_software.docker_software_id if ds.input_docker_software else None, - 'input_upload_id': ds.input_upload.id if ds.input_upload else None, - "ir_re_ranker": True if ds.ir_re_ranker else False, - 'public_image_name': ds.public_image_name, - "ir_re_ranking_input": True if ds.ir_re_ranking_input else False, - 'previous_stages': previous_stages, - 'tira_image_workdir': ds.tira_image_workdir, - 'link_code': link_code, 'mount_hf_model': mount_hf_model - } + return { + "docker_software_id": ds.docker_software_id, + "display_name": ds.display_name, + "user_image_name": ds.user_image_name, + "command": ds.command, + "tira_image_name": ds.tira_image_name, + "task_id": ds.task.task_id, + "vm_id": ds.vm.vm_id, + "description": ds.description, + "paper_link": ds.paper_link, + "input_docker_software": input_docker_software, + "input_docker_software_id": ( + ds.input_docker_software.docker_software_id if ds.input_docker_software else None + ), + "input_upload_id": ds.input_upload.id if ds.input_upload else None, + "ir_re_ranker": True if ds.ir_re_ranker else False, + "public_image_name": ds.public_image_name, + "ir_re_ranking_input": True if ds.ir_re_ranking_input else False, + "previous_stages": previous_stages, + "tira_image_workdir": ds.tira_image_workdir, + "link_code": link_code, + "mount_hf_model": mount_hf_model, + } @staticmethod def cloned_submissions_of_user(vm_id, task_id): ret = [] - for i in modeldb.SoftwareClone.objects.filter(vm__vm_id=vm_id, task__task_id=task_id).select_related("docker_software", "upload"): + for i in modeldb.SoftwareClone.objects.filter(vm__vm_id=vm_id, task__task_id=task_id).select_related( + "docker_software", "upload" + ): if i.docker_software: - ret += [{"docker_software_id": i.docker_software.docker_software_id, "display_name": i.docker_software.display_name, "type": "docker"}] + ret += [ + { + "docker_software_id": i.docker_software.docker_software_id, + "display_name": i.docker_software.display_name, + "type": "docker", + } + ] elif i.upload: ret += [{"id": i.upload.id, "display_name": i.upload.display_name, "type": "upload"}] @@ -652,31 +769,43 @@ def cloned_submissions_of_user(vm_id, task_id): def submissions_of_user(vm_id): ret = [] for i in modeldb.DockerSoftware.objects.filter(vm__vm_id=vm_id, deleted=False): - ret += [{'title': i.task_id + '/' + i.display_name, 'task_id': i.task_id, 'type': 'docker', 'id': i.docker_software_id}] + ret += [ + { + "title": i.task_id + "/" + i.display_name, + "task_id": i.task_id, + "type": "docker", + "id": i.docker_software_id, + } + ] for i in modeldb.Upload.objects.filter(vm__vm_id=vm_id, deleted=False): - ret += [{'title': i.task_id + '/' + i.display_name, 'task_id': i.task_id, 'type': 'upload', 'id': i.id}] + ret += [{"title": i.task_id + "/" + i.display_name, "task_id": i.task_id, "type": "upload", "id": i.id}] return ret @staticmethod def import_submission(task_id, vm_id, submission_type, s_id): docker_software, upload = None, None - if submission_type == 'docker': - docker_software = modeldb.DockerSoftware.objects.filter(vm__vm_id=vm_id, docker_software_id=s_id, deleted=False)[0] + if submission_type == "docker": + docker_software = modeldb.DockerSoftware.objects.filter( + vm__vm_id=vm_id, docker_software_id=s_id, deleted=False + )[0] else: upload = modeldb.Upload.objects.filter(vm__vm_id=vm_id, id=s_id, deleted=False)[0] - modeldb.SoftwareClone.objects.create(vm=modeldb.VirtualMachine.objects.get(vm_id=vm_id), - task=modeldb.Task.objects.get(task_id=task_id), - docker_software=docker_software, upload=upload) - + modeldb.SoftwareClone.objects.create( + vm=modeldb.VirtualMachine.objects.get(vm_id=vm_id), + task=modeldb.Task.objects.get(task_id=task_id), + docker_software=docker_software, + upload=upload, + ) def get_count_of_missing_reviews(self, task_id): prepared_statement = """ SELECT tira_run.input_dataset_id, - SUM(CASE WHEN tira_review.has_errors = False AND tira_review.has_no_errors = FALSE AND tira_review.has_warnings = FALSE THEN 1 ELSE 0 END) as ToReview, + SUM(CASE WHEN tira_review.has_errors = False AND tira_review.has_no_errors = FALSE AND + tira_review.has_warnings = FALSE THEN 1 ELSE 0 END) as ToReview, COUNT(*) as submissions FROM tira_run @@ -693,18 +822,20 @@ def get_count_of_missing_reviews(self, task_id): ret = [] rows = self.execute_raw_sql_statement(prepared_statement, params=[task_id]) for dataset_id, to_review, submissions in rows: - ret += [{'dataset_id': dataset_id, 'to_review': to_review, 'submissions': submissions}] + ret += [{"dataset_id": dataset_id, "to_review": to_review, "submissions": submissions}] return ret def get_count_of_team_submissions(self, task_id): - task = self.get_task(task_id, False) - all_teams_on_task = set([i.strip() for i in task['allowed_task_teams'].split() if i.strip()]) - prepared_statement = """ + task = self.get_task(task_id, False) + all_teams_on_task = set([i.strip() for i in task["allowed_task_teams"].split() if i.strip()]) + prepared_statement = """ SELECT tira_dockersoftware.vm_id as vm, - SUM(CASE WHEN tira_review.has_errors = False AND tira_review.has_no_errors = FALSE AND tira_review.has_warnings = FALSE THEN 1 ELSE 0 END) as ToReview, - COUNT(*) - SUM(CASE WHEN tira_review.has_errors = False AND tira_review.has_no_errors = FALSE AND tira_review.has_warnings = FALSE THEN 1 ELSE 0 END) as submissions, + SUM(CASE WHEN tira_review.has_errors = False AND tira_review.has_no_errors = FALSE AND + tira_review.has_warnings = FALSE THEN 1 ELSE 0 END) as ToReview, + COUNT(*) - SUM(CASE WHEN tira_review.has_errors = False AND tira_review.has_no_errors = FALSE AND + tira_review.has_warnings = FALSE THEN 1 ELSE 0 END) as submissions, COUNT(*) as total FROM tira_run @@ -720,15 +851,23 @@ def get_count_of_team_submissions(self, task_id): tira_dockersoftware.vm_id; """ - ret = [] - rows = self.execute_raw_sql_statement(prepared_statement, params=[task_id]) - for vm, to_review, submissions, total in rows: - if vm is not None: - ret += [{'team': vm, 'reviewed': submissions, 'to_review': to_review, 'total': total, 'link': link_to_discourse_team(vm)}] - for team in all_teams_on_task: - if team not in [t['team'] for t in ret]: - ret += [{'team': team, 'reviewed': 0, 'to_review': 0, 'total': 0, 'link': link_to_discourse_team(team)}] - return ret + ret = [] + rows = self.execute_raw_sql_statement(prepared_statement, params=[task_id]) + for vm, to_review, submissions, total in rows: + if vm is not None: + ret += [ + { + "team": vm, + "reviewed": submissions, + "to_review": to_review, + "total": total, + "link": link_to_discourse_team(vm), + } + ] + for team in all_teams_on_task: + if team not in [t["team"] for t in ret]: + ret += [{"team": team, "reviewed": 0, "to_review": 0, "total": 0, "link": link_to_discourse_team(team)}] + return ret def runs(self, task_id, dataset_id, vm_id, software_id): prepared_statement = """ @@ -745,43 +884,74 @@ def runs(self, task_id, dataset_id, vm_id, software_id): LEFT JOIN tira_review as tira_run_review ON tira_run.run_id = tira_run_review.run_id LEFT JOIN - tira_softwareclone AS software_clone ON tira_dockersoftware.docker_software_id = software_clone.docker_software_id + tira_softwareclone AS software_clone ON + tira_dockersoftware.docker_software_id = software_clone.docker_software_id LEFT JOIN tira_softwareclone AS upload_clone ON tira_run.upload_id = upload_clone.upload_id WHERE - ((tira_run_review.published = TRUE AND tira_run_review.blinded = FALSE) OR tira_dockersoftware.task_id = 'ir-lab-padua-2024' OR tira_dockersoftware.task_id = 'ir-lab-sose-2024') + ((tira_run_review.published = TRUE AND tira_run_review.blinded = FALSE) OR + tira_dockersoftware.task_id = 'ir-lab-padua-2024' OR + tira_dockersoftware.task_id = 'ir-lab-sose-2024') AND tira_run.input_dataset_id = %s - AND (tira_dockersoftware.task_id = %s OR tira_upload.task_id = %s OR tira_software.task_id = %s or software_clone.task_id = %s or upload_clone.task_id = %s) + AND (tira_dockersoftware.task_id = %s OR tira_upload.task_id = %s OR tira_software.task_id = %s or + software_clone.task_id = %s or upload_clone.task_id = %s) AND (tira_dockersoftware.vm_id = %s OR tira_upload.vm_id = %s OR tira_software.vm_id = %s) - AND (tira_dockersoftware.display_name = %s OR tira_upload.display_name = %s OR tira_software.id = %s) - + AND (tira_dockersoftware.display_name = %s OR tira_upload.display_name = %s OR + tira_software.id = %s) + ORDER BY - tira_run.run_id ASC; + tira_run.run_id ASC; """ - params = [dataset_id, task_id, task_id, task_id, task_id, task_id, vm_id, vm_id, vm_id, software_id, software_id, software_id] - return [{'run_id': i[0], 'software_id': i[1], 'upload_id': i[2]} for i in self.execute_raw_sql_statement(prepared_statement, params)] - - def get_runs_for_vm(self, vm_id, docker_software_id, upload_id, include_unpublished=True, round_floats=True, show_only_unreviewed=False): + params = [ + dataset_id, + task_id, + task_id, + task_id, + task_id, + task_id, + vm_id, + vm_id, + vm_id, + software_id, + software_id, + software_id, + ] + return [ + {"run_id": i[0], "software_id": i[1], "upload_id": i[2]} + for i in self.execute_raw_sql_statement(prepared_statement, params) + ] + + def get_runs_for_vm( + self, + vm_id, + docker_software_id, + upload_id, + include_unpublished=True, + round_floats=True, + show_only_unreviewed=False, + ): prepared_statement = """ SELECT evaluation_run.input_dataset_id, evaluation_run.run_id, input_run.run_id, tira_upload.display_name, tira_upload.vm_id, tira_software.vm_id, tira_dockersoftware.display_name, tira_dockersoftware.vm_id, - tira_evaluation_review.published, tira_evaluation_review.blinded, tira_run_review.published, + tira_evaluation_review.published, tira_evaluation_review.blinded, tira_run_review.published, tira_run_review.blinded, tira_evaluation.measure_key, tira_evaluation.measure_value, tira_run_review.reviewer_id, tira_run_review.no_errors, tira_run_review.has_errors, - tira_run_review.has_no_errors, tira_evaluation_review.reviewer_id, tira_run_review.reviewer_id, tira_linktosoftwaresubmissiongitrepository.build_environment + tira_run_review.has_no_errors, tira_evaluation_review.reviewer_id, tira_run_review.reviewer_id, + tira_linktosoftwaresubmissiongitrepository.build_environment FROM tira_run as evaluation_run - INNER JOIN - tira_run as input_run ON evaluation_run.input_run_id = input_run.run_id + INNER JOIN + tira_run as input_run ON evaluation_run.input_run_id = input_run.run_id LEFT JOIN - tira_upload ON input_run.upload_id = tira_upload.id + tira_upload ON input_run.upload_id = tira_upload.id LEFT JOIN - tira_software ON input_run.software_id = tira_software.id + tira_software ON input_run.software_id = tira_software.id LEFT JOIN - tira_dockersoftware ON input_run.docker_software_id = tira_dockersoftware.docker_software_id + tira_dockersoftware ON input_run.docker_software_id = tira_dockersoftware.docker_software_id LEFT JOIN - tira_linktosoftwaresubmissiongitrepository ON tira_dockersoftware.docker_software_id = tira_linktosoftwaresubmissiongitrepository.docker_software_id + tira_linktosoftwaresubmissiongitrepository ON + tira_dockersoftware.docker_software_id = tira_linktosoftwaresubmissiongitrepository.docker_software_id LEFT JOIN tira_review as tira_evaluation_review ON evaluation_run.run_id = tira_evaluation_review.run_id LEFT JOIN @@ -789,20 +959,20 @@ def get_runs_for_vm(self, vm_id, docker_software_id, upload_id, include_unpublis LEFT JOIN tira_evaluation ON tira_evaluation.run_id = evaluation_run.run_id WHERE - evaluation_run.input_run_id is not null AND evaluation_run.deleted = FALSE - AND evaluation_run.evaluator_id IS NOT NULL AND input_run.deleted = False - AND (tira_dockersoftware.vm_id = %s OR tira_upload.vm_id = %s OR tira_software.vm_id = %s ) AND + evaluation_run.input_run_id is not null AND evaluation_run.deleted = FALSE + AND evaluation_run.evaluator_id IS NOT NULL AND input_run.deleted = False + AND (tira_dockersoftware.vm_id = %s OR tira_upload.vm_id = %s OR tira_software.vm_id = %s ) AND ORDER BY - tira_evaluation.id ASC; + tira_evaluation.id ASC; """ params = [vm_id, vm_id, vm_id] if upload_id: - prepared_statement = prepared_statement.replace('', 'tira_upload.id = %s') + prepared_statement = prepared_statement.replace("", "tira_upload.id = %s") params += [upload_id] else: - prepared_statement = prepared_statement.replace('', 'tira_dockersoftware.docker_software_id = %s') + prepared_statement = prepared_statement.replace("", "tira_dockersoftware.docker_software_id = %s") params += [docker_software_id] rows = self.execute_raw_sql_statement(prepared_statement, params) @@ -817,37 +987,54 @@ def get_runs_for_vm(self, vm_id, docker_software_id, upload_id, include_unpublis tira_run as input_run INNER JOIN tira_upload ON input_run.upload_id = tira_upload.id - LEFT JOIN + LEFT JOIN tira_run as evaluation_run ON evaluation_run.input_run_id = input_run.run_id WHERE - evaluation_run.input_run_id is null AND input_run.deleted = False + evaluation_run.input_run_id is null AND input_run.deleted = False AND tira_upload.vm_id = %s AND tira_upload.id = %s ORDER BY - input_run.run_id ASC; + input_run.run_id ASC; """ rows = self.execute_raw_sql_statement(prepared_statement, [vm_id, upload_id]) for dataset_id, run_id, display_name in rows: print(run_id) - from_uploads += [{'dataset_id': dataset_id, 'vm_id': vm_id, 'input_software_name': display_name, - 'run_id': run_id, 'input_run_id': run_id, 'published': False, 'blinded': True, - 'is_upload': True, 'is_software': False, 'review_state': 'no-review', 'measures': {} - }] + from_uploads += [ + { + "dataset_id": dataset_id, + "vm_id": vm_id, + "input_software_name": display_name, + "run_id": run_id, + "input_run_id": run_id, + "published": False, + "blinded": True, + "is_upload": True, + "is_software": False, + "review_state": "no-review", + "measures": {}, + } + ] return ret[0], (ret[1] + from_uploads) def get_docker_softwares_with_runs(self, task_id, vm_id): def _runs_by_docker_software(ds): - reviews = modeldb.Review.objects.select_related("run", "run__upload", "run__evaluator", "run__input_run", - "run__input_dataset").filter(run__docker_software=ds).all() + reviews = ( + modeldb.Review.objects.select_related( + "run", "run__upload", "run__evaluator", "run__input_run", "run__input_dataset" + ) + .filter(run__docker_software=ds) + .all() + ) return list(self._get_ordered_runs_from_reviews(reviews, vm_id, preloaded=False, is_docker=True)) docker_softwares = self.get_docker_softwares(task_id, vm_id, return_only_names=False) - docker_softwares = [{**self._docker_software_to_dict(ds), 'runs': _runs_by_docker_software(ds)} - for ds in docker_softwares] + docker_softwares = [ + {**self._docker_software_to_dict(ds), "runs": _runs_by_docker_software(ds)} for ds in docker_softwares + ] return docker_softwares @@ -856,32 +1043,38 @@ def get_docker_softwares(task_id, vm_id, return_only_names=True): ret = modeldb.DockerSoftware.objects.filter(vm__vm_id=vm_id, task__task_id=task_id, deleted=False) if return_only_names: - return [{'docker_software_id': i.docker_software_id, 'display_name': i.display_name} for i in ret] + return [{"docker_software_id": i.docker_software_id, "display_name": i.display_name} for i in ret] else: return ret def get_public_docker_softwares(self, task_id, return_only_names=True, return_details=True): - ret = modeldb.DockerSoftware.objects.filter(task__task_id=task_id, deleted=False, - public_image_name__isnull=False) + ret = modeldb.DockerSoftware.objects.filter( + task__task_id=task_id, deleted=False, public_image_name__isnull=False + ) ret = [i for i in ret if i.public_image_name and i.public_image_size] if return_only_names: - return [{'docker_software_id': i.docker_software_id, 'display_name': i.display_name, 'vm_id': i.vm_id - } - for i in ret] + return [ + {"docker_software_id": i.docker_software_id, "display_name": i.display_name, "vm_id": i.vm_id} + for i in ret + ] elif return_details: return [self._docker_software_to_dict(i) for i in ret] else: return ret def delete_docker_software(self, task_id, vm_id, docker_software_id): - software_qs = modeldb.DockerSoftware.objects.filter(vm_id=vm_id, task_id=task_id, - docker_software_id=docker_software_id) + software_qs = modeldb.DockerSoftware.objects.filter( + vm_id=vm_id, task_id=task_id, docker_software_id=docker_software_id + ) - reviews_qs = modeldb.Review.objects.filter(run__input_run__docker_software__docker_software_id=docker_software_id, - run__input_run__docker_software__task_id=task_id, - run__input_run__docker_software__vm_id=vm_id, no_errors=True) + reviews_qs = modeldb.Review.objects.filter( + run__input_run__docker_software__docker_software_id=docker_software_id, + run__input_run__docker_software__task_id=task_id, + run__input_run__docker_software__vm_id=vm_id, + no_errors=True, + ) if not reviews_qs.exists() and software_qs.exists(): software_qs.delete() @@ -896,18 +1089,24 @@ def get_irds_docker_software_id(self, task_id, vm_id, software_id, docker_softwa irds_re_ranking_image = task.get("irds_re_ranking_image", "") irds_re_ranking_command = task.get("irds_re_ranking_command", "") irds_re_ranking_resource = task.get("irds_re_ranking_resource", "") - irds_display_name = 'IRDS-Job For ' + task_id + f' (vm: {vm_id}, software: {software_id}, docker: {docker_software_id})' + irds_display_name = ( + "IRDS-Job For " + task_id + f" (vm: {vm_id}, software: {software_id}, docker: {docker_software_id})" + ) if not is_ir_task or not irds_re_ranking_image or not irds_re_ranking_command or not irds_re_ranking_resource: - raise ValueError('This is not a irds-re-ranking task:' + str(task)) + raise ValueError("This is not a irds-re-ranking task:" + str(task)) task = modeldb.Task.objects.get(task_id=task_id) - vm = modeldb.VirtualMachine.objects.get(vm_id='froebe') + vm = modeldb.VirtualMachine.objects.get(vm_id="froebe") - ret = modeldb.DockerSoftware.objects.filter(vm=vm, task=task, command=irds_re_ranking_command, - tira_image_name=irds_re_ranking_image, - user_image_name=irds_re_ranking_image, - display_name=irds_display_name) + ret = modeldb.DockerSoftware.objects.filter( + vm=vm, + task=task, + command=irds_re_ranking_command, + tira_image_name=irds_re_ranking_image, + user_image_name=irds_re_ranking_image, + display_name=irds_display_name, + ) if len(ret) > 0: return ret[0] @@ -918,23 +1117,27 @@ def get_irds_docker_software_id(self, task_id, vm_id, software_id, docker_softwa command=irds_re_ranking_command, tira_image_name=irds_re_ranking_image, user_image_name=irds_re_ranking_image, - display_name=irds_display_name + display_name=irds_display_name, ) - ret = modeldb.DockerSoftware.objects.filter(vm=vm, task=task, command=irds_re_ranking_command, - tira_image_name=irds_re_ranking_image, - user_image_name=irds_re_ranking_image, - display_name=irds_display_name) + ret = modeldb.DockerSoftware.objects.filter( + vm=vm, + task=task, + command=irds_re_ranking_command, + tira_image_name=irds_re_ranking_image, + user_image_name=irds_re_ranking_image, + display_name=irds_display_name, + ) return ret[0] if len(ret) > 0 else None def get_evaluations_of_run(self, vm_id, run_id): - prepared_statement = ''' + prepared_statement = """ SELECT evaluation_run.run_id FROM tira_run as evaluation_run - INNER JOIN + INNER JOIN tira_run as input_run ON evaluation_run.input_run_id = input_run.run_id LEFT JOIN tira_upload ON input_run.upload_id = tira_upload.id @@ -945,26 +1148,31 @@ def get_evaluations_of_run(self, vm_id, run_id): WHERE evaluation_run.input_run_id = %s and evaluation_run.evaluator_id IS NOT NULL AND (tira_upload.vm_id = %s OR tira_software.vm_id = %s OR tira_dockersoftware.vm_id = %s) - ''' + """ return [i[0] for i in self.execute_raw_sql_statement(prepared_statement, [run_id, vm_id, vm_id, vm_id])] def get_vms_with_reviews(self, dataset_id: str): - """ returns a list of dicts with: - {"vm_id": vm_id, - "runs": [{run, review}, ...], - "unreviewed_count": unreviewed_count, - "blinded_count": blinded_count, - "published_count": published_count} - """ + """returns a list of dicts with: + {"vm_id": vm_id, + "runs": [{run, review}, ...], + "unreviewed_count": unreviewed_count, + "blinded_count": blinded_count, + "published_count": published_count} + """ results = [] - reviews = modeldb.Review.objects.select_related('run', 'run__software', 'run__docker_software', - 'run__evaluator', 'run__upload', - 'run__input_run').filter( - run__input_dataset__dataset_id=dataset_id).all() + reviews = ( + modeldb.Review.objects.select_related( + "run", "run__software", "run__docker_software", "run__evaluator", "run__upload", "run__input_run" + ) + .filter(run__input_dataset__dataset_id=dataset_id) + .all() + ) - upload_vms = {vm_id["run__upload__vm__vm_id"] for vm_id in reviews.values('run__upload__vm__vm_id')} - software_vms = {vm_id["run__software__vm__vm_id"] for vm_id in reviews.values('run__software__vm__vm_id')} - docker_vms = {vm_id["run__docker_software__vm__vm_id"] for vm_id in reviews.values('run__docker_software__vm__vm_id')} + upload_vms = {vm_id["run__upload__vm__vm_id"] for vm_id in reviews.values("run__upload__vm__vm_id")} + software_vms = {vm_id["run__software__vm__vm_id"] for vm_id in reviews.values("run__software__vm__vm_id")} + docker_vms = { + vm_id["run__docker_software__vm__vm_id"] for vm_id in reviews.values("run__docker_software__vm__vm_id") + } for vm_id in upload_vms.union(software_vms).union(docker_vms): if not vm_id: @@ -977,24 +1185,31 @@ def get_vms_with_reviews(self, dataset_id: str): if vm_id in docker_vms: runs += list(self._get_ordered_runs_from_reviews(reviews, vm_id, is_docker=True)) - results.append({"vm_id": vm_id, - "runs": runs, - "unreviewed_count": len([_['reviewed'] for _ in runs if _['reviewed'] is True]), - "blinded_count": len([_['review']['blinded'] for _ in runs if _['review']['blinded'] is True]), - "published_count": len([_['review']['published'] for _ in runs if _['review']['published'] is True]), - }) + results.append( + { + "vm_id": vm_id, + "runs": runs, + "unreviewed_count": len([_["reviewed"] for _ in runs if _["reviewed"] is True]), + "blinded_count": len([_["review"]["blinded"] for _ in runs if _["review"]["blinded"] is True]), + "published_count": len( + [_["review"]["published"] for _ in runs if _["review"]["published"] is True] + ), + } + ) return results def get_vm_runs_by_task(self, task_id: str, vm_id: str, return_deleted: bool = False) -> list: - """ returns a list of all the runs of a user over all datasets in json (as returned by _load_user_runs) """ - return [self._run_as_dict(run) for run in - modeldb.Run.objects.select_related('software', 'input_dataset') - .filter(software__vm__vm_id=vm_id, input_dataset__default_task__task_id=task_id, - software__task__task_id=task_id) - if (run.deleted or not return_deleted)] + """returns a list of all the runs of a user over all datasets in json (as returned by _load_user_runs)""" + return [ + self._run_as_dict(run) + for run in modeldb.Run.objects.select_related("software", "input_dataset").filter( + software__vm__vm_id=vm_id, input_dataset__default_task__task_id=task_id, software__task__task_id=task_id + ) + if (run.deleted or not return_deleted) + ] - def get_evaluator(self, dataset_id, task_id=None): - """ returns a dict containing the evaluator parameters: + def get_evaluator(self, dataset_id: str, task_id: Optional[str] = None) -> dict[str, Any]: + """returns a dict containing the evaluator parameters: vm_id: id of the master vm running the evaluator host: ip or hostname of the host @@ -1008,37 +1223,49 @@ def get_evaluator(self, dataset_id, task_id=None): vm_id = master_vm.vm_id host = master_vm.host else: - vm_id = '' - host = '' + vm_id = "" + host = "" if not task_id: - dataset = modeldb.Dataset.objects.filter(evaluator=evaluator).latest('last_modified') + dataset = modeldb.Dataset.objects.filter(evaluator=evaluator).latest("last_modified") task_id = dataset.default_task.task_id - return {"vm_id": vm_id, "host": host, "command": evaluator.command, "task_id": task_id, - "evaluator_id": evaluator.evaluator_id, - "working_dir": evaluator.working_directory, 'measures': evaluator.measures, - "is_git_runner": evaluator.is_git_runner, "git_runner_image": evaluator.git_runner_image, - "git_runner_command": evaluator.git_runner_command, "git_repository_id": evaluator.git_repository_id, } + return { + "vm_id": vm_id, + "host": host, + "command": evaluator.command, + "task_id": task_id, + "evaluator_id": evaluator.evaluator_id, + "working_dir": evaluator.working_directory, + "measures": evaluator.measures, + "is_git_runner": evaluator.is_git_runner, + "git_runner_image": evaluator.git_runner_image, + "git_runner_command": evaluator.git_runner_command, + "git_repository_id": evaluator.git_repository_id, + } @staticmethod def get_vm_evaluations_by_dataset(dataset_id, vm_id, only_public_results=True): - """ Return a dict of run_id: evaluation_results for the given vm on the given dataset + """Return a dict of run_id: evaluation_results for the given vm on the given dataset {run_id: {measure.key: measure.value for measure in evaluation.measure}} @param only_public_results: only return the measures for published datasets. """ result = {} - for run in modeldb.Run.objects.filter(software__vm__vm_id=vm_id, input_dataset__dataset_id=dataset_id, - deleted=False): + for run in modeldb.Run.objects.filter( + software__vm__vm_id=vm_id, input_dataset__dataset_id=dataset_id, deleted=False + ): if only_public_results and modeldb.Review.objects.filter(run=run).exists(): if not modeldb.Review.objects.get(run=run).published: continue - result[run.run_id] = {evaluation.measure_key: evaluation.measure_value - for evaluation in run.evaluation_set.all()} + result[run.run_id] = { + evaluation.measure_key: evaluation.measure_value for evaluation in run.evaluation_set.all() + } return result - def get_evaluations_with_keys_by_dataset(self, dataset_id, include_unpublished=False, round_floats=True, show_only_unreviewed=False): + def get_evaluations_with_keys_by_dataset( + self, dataset_id, include_unpublished=False, round_floats=True, show_only_unreviewed=False + ): """ This function returns the data to render the Leaderboards: A list of keys 'ev-keys' of the evaluation measures which will be the column titles, and a list of evaluations. Each evaluations contains the vm and run id @@ -1052,18 +1279,18 @@ def get_evaluations_with_keys_by_dataset(self, dataset_id, include_unpublished=F evaluation is a list of evaluations, each evaluation is a dict with {vm_id: str, run_id: str, measures: list} """ - prepared_statement = ''' + prepared_statement = """ SELECT - evaluation_run.input_dataset_id, evaluation_run.run_id, input_run.run_id, tira_upload.display_name, tira_upload.vm_id, tira_software.vm_id, - tira_dockersoftware.display_name, tira_dockersoftware.vm_id, tira_evaluation_review.published, - tira_evaluation_review.blinded, tira_run_review.published, tira_run_review.blinded, - tira_evaluation.measure_key, tira_evaluation.measure_value, tira_run_review.reviewer_id, - tira_run_review.no_errors, tira_run_review.has_errors, tira_run_review.has_no_errors, - tira_evaluation_review.reviewer_id, tira_run_review.reviewer_id, + evaluation_run.input_dataset_id, evaluation_run.run_id, input_run.run_id, tira_upload.display_name, + tira_upload.vm_id, tira_software.vm_id, tira_dockersoftware.display_name, tira_dockersoftware.vm_id, + tira_evaluation_review.published, tira_evaluation_review.blinded, tira_run_review.published, + tira_run_review.blinded, tira_evaluation.measure_key, tira_evaluation.measure_value, + tira_run_review.reviewer_id, tira_run_review.no_errors, tira_run_review.has_errors, + tira_run_review.has_no_errors, tira_evaluation_review.reviewer_id, tira_run_review.reviewer_id, tira_linktosoftwaresubmissiongitrepository.build_environment FROM tira_run as evaluation_run - INNER JOIN + INNER JOIN tira_run as input_run ON evaluation_run.input_run_id = input_run.run_id LEFT JOIN tira_upload ON input_run.upload_id = tira_upload.id @@ -1072,7 +1299,8 @@ def get_evaluations_with_keys_by_dataset(self, dataset_id, include_unpublished=F LEFT JOIN tira_dockersoftware ON input_run.docker_software_id = tira_dockersoftware.docker_software_id LEFT JOIN - tira_linktosoftwaresubmissiongitrepository ON tira_dockersoftware.docker_software_id = tira_linktosoftwaresubmissiongitrepository.docker_software_id + tira_linktosoftwaresubmissiongitrepository ON + tira_dockersoftware.docker_software_id = tira_linktosoftwaresubmissiongitrepository.docker_software_id LEFT JOIN tira_review as tira_evaluation_review ON evaluation_run.run_id = tira_evaluation_review.run_id LEFT JOIN @@ -1080,26 +1308,28 @@ def get_evaluations_with_keys_by_dataset(self, dataset_id, include_unpublished=F LEFT JOIN tira_evaluation ON tira_evaluation.run_id = evaluation_run.run_id WHERE - evaluation_run.input_run_id is not null AND evaluation_run.deleted = FALSE AND input_run.deleted = False + evaluation_run.input_run_id is not null AND evaluation_run.deleted = FALSE AND input_run.deleted = False AND evaluation_run.evaluator_id IS NOT NULL AND ORDER BY tira_evaluation.id ASC; - ''' + """ dataset_id_statement = [] dataset_ids = [dataset_id] additional_datasets = modeldb.Dataset.objects.get(dataset_id=dataset_id).meta_dataset_of if additional_datasets: - dataset_ids += [j.strip() for j in additional_datasets.split(',') if j.strip()] + dataset_ids += [j.strip() for j in additional_datasets.split(",") if j.strip()] for _ in dataset_ids: - dataset_id_statement += ['evaluation_run.input_dataset_id = %s'] - dataset_id_statement = ' OR '.join(dataset_id_statement) - prepared_statement = prepared_statement.replace('', f'({dataset_id_statement})') + dataset_id_statement += ["evaluation_run.input_dataset_id = %s"] + dataset_id_statement = " OR ".join(dataset_id_statement) + prepared_statement = prepared_statement.replace("", f"({dataset_id_statement})") rows = self.execute_raw_sql_statement(prepared_statement, params=dataset_ids) - return self.__parse_submissions(rows, include_unpublished, round_floats, show_only_unreviewed, show_only_unreviewed) + return self.__parse_submissions( + rows, include_unpublished, round_floats, show_only_unreviewed, show_only_unreviewed + ) @staticmethod def __link_to_code(build_environment): @@ -1108,27 +1338,36 @@ def __link_to_code(build_environment): try: build_environment = json.loads(json.loads(build_environment)) - except: + except Exception: return None - if 'GITHUB_REPOSITORY' not in build_environment or 'GITHUB_WORKFLOW' not in build_environment or 'GITHUB_SHA' not in build_environment: + if ( + "GITHUB_REPOSITORY" not in build_environment + or "GITHUB_WORKFLOW" not in build_environment + or "GITHUB_SHA" not in build_environment + ): return None - if build_environment['GITHUB_WORKFLOW'] == "Upload Docker Software to TIRA": - if 'TIRA_DOCKER_PATH' not in build_environment: + if build_environment["GITHUB_WORKFLOW"] == "Upload Docker Software to TIRA": + if "TIRA_DOCKER_PATH" not in build_environment: return None return f'https://github.com/{build_environment["GITHUB_REPOSITORY"]}/tree/{build_environment["GITHUB_SHA"]}/{build_environment["TIRA_DOCKER_PATH"]}' - if build_environment['GITHUB_WORKFLOW'] == ".github/workflows/upload-notebook-submission.yml" or build_environment['GITHUB_WORKFLOW'] == 'Upload Notebook to TIRA': - if 'TIRA_JUPYTER_NOTEBOOK' not in build_environment: + if ( + build_environment["GITHUB_WORKFLOW"] == ".github/workflows/upload-notebook-submission.yml" + or build_environment["GITHUB_WORKFLOW"] == "Upload Notebook to TIRA" + ): + if "TIRA_JUPYTER_NOTEBOOK" not in build_environment: return None return f'https://github.com/{build_environment["GITHUB_REPOSITORY"]}/tree/{build_environment["GITHUB_SHA"]}/jupyter-notebook-submissions/{build_environment["TIRA_JUPYTER_NOTEBOOK"]}' return None - def __parse_submissions(self, rows, include_unpublished, round_floats, include_without_evaluation=False, show_only_unreviewed=False): + def __parse_submissions( + self, rows, include_unpublished, round_floats, include_without_evaluation=False, show_only_unreviewed=False + ): keys = dict() input_run_to_evaluation = {} @@ -1140,22 +1379,42 @@ def round_if_float(fl): except ValueError: return fl - for dataset_id, run_id, input_run_id, upload_display_name, upload_vm_id, software_vm_id, docker_display_name, \ - docker_vm_id, eval_published, eval_blinded, run_published, run_blinded, m_key, m_value, \ - reviewer_id, no_errors, has_errors, has_no_errors, tira_evaluation_reviewer_id, tira_run_reviewer_id, build_environment in rows: + for ( + dataset_id, + run_id, + input_run_id, + upload_display_name, + upload_vm_id, + software_vm_id, + docker_display_name, + docker_vm_id, + eval_published, + eval_blinded, + run_published, + run_blinded, + m_key, + m_value, + reviewer_id, + no_errors, + has_errors, + has_no_errors, + tira_evaluation_reviewer_id, + tira_run_reviewer_id, + build_environment, + ) in rows: if (not include_without_evaluation and not m_key) or (not include_unpublished and not eval_published): continue - if show_only_unreviewed and tira_evaluation_reviewer_id != 'tira' and tira_run_reviewer_id != 'tira': + if show_only_unreviewed and tira_evaluation_reviewer_id != "tira" and tira_run_reviewer_id != "tira": continue if run_id not in input_run_to_evaluation: - input_run_to_evaluation[run_id] = {'measures': {}} + input_run_to_evaluation[run_id] = {"measures": {}} - vm_id = 'None' - software_name = '' - pretty_run_id = run_id if '-evaluated-run-' not in run_id else run_id.split('-evaluated-run-')[1] + vm_id = "None" + software_name = "" + pretty_run_id = run_id if "-evaluated-run-" not in run_id else run_id.split("-evaluated-run-")[1] is_upload, is_software = False, False if upload_display_name and upload_vm_id: vm_id = upload_vm_id @@ -1169,37 +1428,38 @@ def round_if_float(fl): vm_id = software_vm_id is_software = True - review_state = 'no-review' - if reviewer_id and reviewer_id != 'tira': - review_state = 'valid' if no_errors and has_no_errors and not has_errors else 'invalid' - - input_run_to_evaluation[run_id]['dataset_id'] = dataset_id - input_run_to_evaluation[run_id]['vm_id'] = vm_id - input_run_to_evaluation[run_id]['input_software_name'] = software_name - input_run_to_evaluation[run_id]['run_id'] = pretty_run_id - input_run_to_evaluation[run_id]['input_run_id'] = input_run_id - input_run_to_evaluation[run_id]['published'] = eval_published - input_run_to_evaluation[run_id]['blinded'] = eval_blinded or run_blinded - input_run_to_evaluation[run_id]['is_upload'] = is_upload - input_run_to_evaluation[run_id]['is_software'] = is_software - input_run_to_evaluation[run_id]['review_state'] = review_state - input_run_to_evaluation[run_id]['link_code'] = self.__link_to_code(build_environment) + review_state = "no-review" + if reviewer_id and reviewer_id != "tira": + review_state = "valid" if no_errors and has_no_errors and not has_errors else "invalid" + + input_run_to_evaluation[run_id]["dataset_id"] = dataset_id + input_run_to_evaluation[run_id]["vm_id"] = vm_id + input_run_to_evaluation[run_id]["input_software_name"] = software_name + input_run_to_evaluation[run_id]["run_id"] = pretty_run_id + input_run_to_evaluation[run_id]["input_run_id"] = input_run_id + input_run_to_evaluation[run_id]["published"] = eval_published + input_run_to_evaluation[run_id]["blinded"] = eval_blinded or run_blinded + input_run_to_evaluation[run_id]["is_upload"] = is_upload + input_run_to_evaluation[run_id]["is_software"] = is_software + input_run_to_evaluation[run_id]["review_state"] = review_state + input_run_to_evaluation[run_id]["link_code"] = self.__link_to_code(build_environment) if m_key: - input_run_to_evaluation[run_id]['measures'][m_key] = m_value - keys[m_key] = '' + input_run_to_evaluation[run_id]["measures"][m_key] = m_value + keys[m_key] = "" keys = list(keys.keys()) ret = [] for i in input_run_to_evaluation.values(): - i['measures'] = [round_if_float(i['measures'].get(k, '-')) for k in keys] + i["measures"] = [round_if_float(i["measures"].get(k, "-")) for k in keys] ret += [i] return keys, ret def execute_raw_sql_statement(self, prepared_statement, params): from django.db import connection + ret = [] with connection.cursor() as cursor: cursor.execute(prepared_statement, params=params) @@ -1220,105 +1480,148 @@ def get_evaluation(self, run_id): def get_software_with_runs(self, task_id, vm_id): def _runs_by_software(software): - reviews = modeldb.Review.objects.select_related("run", "run__software", "run__evaluator", "run__input_run", - "run__input_dataset").filter(run__software=software).all() + reviews = ( + modeldb.Review.objects.select_related( + "run", "run__software", "run__evaluator", "run__input_run", "run__input_dataset" + ) + .filter(run__software=software) + .all() + ) return list(self._get_ordered_runs_from_reviews(reviews, vm_id, preloaded=False)) - return [{"software": self._software_to_dict(s), - "runs": _runs_by_software(s) - } for s in modeldb.Software.objects.filter(vm__vm_id=vm_id, task__task_id=task_id, deleted=False)] + return [ + {"software": self._software_to_dict(s), "runs": _runs_by_software(s)} + for s in modeldb.Software.objects.filter(vm__vm_id=vm_id, task__task_id=task_id, deleted=False) + ] @staticmethod def _review_as_dict(review): - return {"reviewer": review.reviewer_id, "noErrors": review.no_errors, - "missingOutput": review.missing_output, - "extraneousOutput": review.extraneous_output, "invalidOutput": review.invalid_output, - "hasErrorOutput": review.has_error_output, "otherErrors": review.other_errors, - "comment": review.comment, "hasErrors": review.has_errors, "hasWarnings": review.has_warnings, - "hasNoErrors": review.has_no_errors, "published": review.published, "blinded": review.blinded} + return { + "reviewer": review.reviewer_id, + "noErrors": review.no_errors, + "missingOutput": review.missing_output, + "extraneousOutput": review.extraneous_output, + "invalidOutput": review.invalid_output, + "hasErrorOutput": review.has_error_output, + "otherErrors": review.other_errors, + "comment": review.comment, + "hasErrors": review.has_errors, + "hasWarnings": review.has_warnings, + "hasNoErrors": review.has_no_errors, + "published": review.published, + "blinded": review.blinded, + } def get_run_review(self, dataset_id: str, vm_id: str, run_id: str) -> dict: review = modeldb.Review.objects.get(run__run_id=run_id) return self._review_as_dict(review) def get_vm_reviews_by_dataset(self, dataset_id: str, vm_id: str) -> dict: - return {review.run.run_id: self._review_as_dict(review) - for review in modeldb.Review.objects.select_related('run'). - filter(run__input_dataset__dataset_id=dataset_id, run__software__vm__vm_id=vm_id)} + return { + review.run.run_id: self._review_as_dict(review) + for review in modeldb.Review.objects.select_related("run").filter( + run__input_dataset__dataset_id=dataset_id, run__software__vm__vm_id=vm_id + ) + } @staticmethod def _software_to_dict(software): - return {"id": software.software_id, "count": software.count, - "task_id": software.task.task_id, "vm_id": software.vm.vm_id, - "command": software.command, "working_directory": software.working_directory, - "dataset": None if not software.dataset else software.dataset.dataset_id, - "run": 'none', # always none, this is a relict from a past version we keep for compatibility. - "creation_date": software.creation_date, - "last_edit": software.last_edit_date} + return { + "id": software.software_id, + "count": software.count, + "task_id": software.task.task_id, + "vm_id": software.vm.vm_id, + "command": software.command, + "working_directory": software.working_directory, + "dataset": None if not software.dataset else software.dataset.dataset_id, + "run": "none", # always none, this is a relict from a past version we keep for compatibility. + "creation_date": software.creation_date, + "last_edit": software.last_edit_date, + } def get_software(self, task_id, vm_id, software_id): - """ Returns the software with the given name of a vm on a task """ + """Returns the software with the given name of a vm on a task""" return self._software_to_dict( - modeldb.Software.objects.get(vm__vm_id=vm_id, task__task_id=task_id, software_id=software_id)) + modeldb.Software.objects.get(vm__vm_id=vm_id, task__task_id=task_id, software_id=software_id) + ) def get_software_by_task(self, task_id, vm_id): - return [self._software_to_dict(sw) - for sw in modeldb.Software.objects.filter(vm__vm_id=vm_id, task__task_id=task_id, deleted=False)] + return [ + self._software_to_dict(sw) + for sw in modeldb.Software.objects.filter(vm__vm_id=vm_id, task__task_id=task_id, deleted=False) + ] def get_software_by_vm(self, task_id, vm_id): - """ Returns the software of a vm on a task in json """ - return [self._software_to_dict(software) - for software in modeldb.Software.objects.filter(vm__vm_id=vm_id, task__task_id=task_id, deleted=False)] + """Returns the software of a vm on a task in json""" + return [ + self._software_to_dict(software) + for software in modeldb.Software.objects.filter(vm__vm_id=vm_id, task__task_id=task_id, deleted=False) + ] def add_vm(self, vm_id, user_name, initial_user_password, ip, host, ssh, rdp): - """ Add a new task to the database. + """Add a new task to the database. This will not overwrite existing files and instead do nothing and return false """ if self._save_vm(vm_id, user_name, initial_user_password, ip, host, ssh, rdp): try: - modeldb.VirtualMachine.objects.create(vm_id=vm_id, user_password=initial_user_password, - roles='user', host=host, ip=ip, ssh=ssh, rdp=rdp) + modeldb.VirtualMachine.objects.create( + vm_id=vm_id, user_password=initial_user_password, roles="user", host=host, ip=ip, ssh=ssh, rdp=rdp + ) except IntegrityError as e: - logger.exception(f"Failed to add new vm {vm_id} with ", e) + logger.exception(f"Failed to add new vm {vm_id} with ", exc_info=e) raise TiraModelIntegrityError(e) else: raise TiraModelWriteError(f"Failed to write VM {vm_id}") def add_registration(self, data): - task = modeldb.Task.objects.select_related('organizer').get(task_id=data['task_id']) + task = modeldb.Task.objects.select_related("organizer").get(task_id=data["task_id"]) - if data['group'] not in task.allowed_task_teams and task.restrict_groups: + if data["group"] not in task.allowed_task_teams and task.restrict_groups: raise ValueError(f'Team name is not allowed "{data["group"]}". Allowed: {task.allowed_task_teams}') - if data['group'] and data['group'].strip() and data['group'] not in task.allowed_task_teams and not task.restrict_groups: + if ( + data["group"] + and data["group"].strip() + and data["group"] not in task.allowed_task_teams + and not task.restrict_groups + ): allowed_task_teams = task.allowed_task_teams - allowed_task_teams = '' if not allowed_task_teams else allowed_task_teams - allowed_task_teams += '\n' + (data['group'].strip()) + allowed_task_teams = "" if not allowed_task_teams else allowed_task_teams + allowed_task_teams += "\n" + (data["group"].strip()) task.allowed_task_teams = allowed_task_teams.strip() task.save() - - modeldb.Registration.objects.create(initial_owner=data['initial_owner'], - team_name=data['group'], - team_members=data['team'], - registered_on_task=task, - name=data['username'], - email=data['email'], - affiliation=data['affiliation'], - country=data['country'], - employment=data['employment'], - participates_for=data['participation'], - instructor_name=data['instructorName'], - instructor_email=data['instructorEmail'], - questions=data['questions']) - + modeldb.Registration.objects.create( + initial_owner=data["initial_owner"], + team_name=data["group"], + team_members=data["team"], + registered_on_task=task, + name=data["username"], + email=data["email"], + affiliation=data["affiliation"], + country=data["country"], + employment=data["employment"], + participates_for=data["participation"], + instructor_name=data["instructorName"], + instructor_email=data["instructorEmail"], + questions=data["questions"], + ) def all_registered_teams(self): - return set([i['team_name'] for i in modeldb.Registration.objects.values('team_name')]) - - def _fdb_create_task(self, task_id, task_name, task_description, master_vm_id, organizer_id, website, - help_command=None, help_text=None): - new_task_file_path = self.tasks_dir_path / f'{task_id}.prototext' + return set([i["team_name"] for i in modeldb.Registration.objects.values("team_name")]) + + def _fdb_create_task( + self, + task_id, + task_name, + task_description, + master_vm_id, + organizer_id, + website, + help_command=None, + help_text=None, + ): + new_task_file_path = self.tasks_dir_path / f"{task_id}.prototext" task = modelpb.Tasks.Task() task.taskId = task_id task.taskName = task_name @@ -1328,75 +1631,104 @@ def _fdb_create_task(self, task_id, task_name, task_description, master_vm_id, o task.web = website task.commandPlaceholder = help_command task.commandDescription = help_text - open(new_task_file_path, 'w').write(str(task)) - - def create_task(self, task_id, task_name, task_description, featured, master_vm_id, organizer, website, - require_registration, require_groups, restrict_groups, help_command=None, help_text=None, allowed_task_teams=None): - """ Add a new task to the database. - CAUTION: This function does not do any sanity checks and will OVERWRITE existing tasks """ - new_task = modeldb.Task.objects.create(task_id=task_id, - task_name=task_name, - vm=modeldb.VirtualMachine.objects.get(vm_id=master_vm_id), - task_description=task_description, - organizer=modeldb.Organizer.objects.get(organizer_id=organizer), - web=website, featured=featured, require_registration=require_registration, - require_groups=require_groups, - restrict_groups=restrict_groups, - allowed_task_teams=allowed_task_teams) + open(new_task_file_path, "w").write(str(task)) + + def create_task( + self, + task_id, + task_name, + task_description, + featured, + master_vm_id, + organizer, + website, + require_registration, + require_groups, + restrict_groups, + help_command=None, + help_text=None, + allowed_task_teams=None, + ): + """Add a new task to the database. + CAUTION: This function does not do any sanity checks and will OVERWRITE existing tasks""" + new_task = modeldb.Task.objects.create( + task_id=task_id, + task_name=task_name, + vm=modeldb.VirtualMachine.objects.get(vm_id=master_vm_id), + task_description=task_description, + organizer=modeldb.Organizer.objects.get(organizer_id=organizer), + web=website, + featured=featured, + require_registration=require_registration, + require_groups=require_groups, + restrict_groups=restrict_groups, + allowed_task_teams=allowed_task_teams, + ) if help_command: new_task.command_placeholder = help_command if help_text: new_task.command_description = help_text new_task.save() - self._fdb_create_task(task_id, task_name, task_description, master_vm_id, organizer, website, - help_command, help_text) + self._fdb_create_task( + task_id, task_name, task_description, master_vm_id, organizer, website, help_command, help_text + ) return self._task_to_dict(new_task) def _fdb_add_dataset_to_task(self, task_id, dataset_id, dataset_type): - task_file_path = self.tasks_dir_path / f'{task_id}.prototext' + task_file_path = self.tasks_dir_path / f"{task_id}.prototext" task = Parse(open(task_file_path, "r").read(), modelpb.Tasks.Task()) - if dataset_type == 'test': + if dataset_type == "test": task.testDataset.append(dataset_id) else: task.trainingDataset.append(dataset_id) - open(task_file_path, 'w').write(str(task)) + open(task_file_path, "w").write(str(task)) def _fdb_add_dataset(self, task_id, dataset_id, display_name, dataset_type, evaluator_id): - """ dataset_dir_path/task_id/dataset_id.prototext """ - new_dataset_file_path = self.datasets_dir_path / task_id / f'{dataset_id}.prototext' + """dataset_dir_path/task_id/dataset_id.prototext""" + new_dataset_file_path = self.datasets_dir_path / task_id / f"{dataset_id}.prototext" ds = modelpb.Dataset() ds.datasetId = dataset_id ds.displayName = display_name ds.evaluatorId = evaluator_id - if dataset_type == 'test': + if dataset_type == "test": ds.isConfidential = True else: ds.isConfidential = False (self.datasets_dir_path / task_id).mkdir(exist_ok=True, parents=True) - open(new_dataset_file_path, 'w').write(str(ds)) + open(new_dataset_file_path, "w").write(str(ds)) def get_new_dataset_id(self, dataset_id, task_id, dataset_type): - candidates = [''] + [f'_{i}' for i in range(100)] + candidates = [""] + [f"_{i}" for i in range(100)] for cand in candidates: dataset_id_candidate = f"{dataset_id}-{get_today_timestamp()}{cand}-{dataset_type}" - if self.dataset_exists(dataset_id_candidate) or \ - (self.data_path / f'{dataset_type}-datasets' / task_id / dataset_id_candidate).exists() or \ - (self.data_path / f'{dataset_type}-datasets-truth' / task_id / dataset_id_candidate).exists(): + if ( + self.dataset_exists(dataset_id_candidate) + or (self.data_path / f"{dataset_type}-datasets" / task_id / dataset_id_candidate).exists() + or (self.data_path / f"{dataset_type}-datasets-truth" / task_id / dataset_id_candidate).exists() + ): continue return dataset_id_candidate - raise ValueError('I could not find a dataset id.') - - - - def add_dataset(self, task_id, dataset_id, dataset_type, dataset_name, upload_name, irds_docker_image=None, irds_import_command=None, irds_import_truth_command=None): - """ Add a new dataset to a task - CAUTION: This function does not do any sanity (existence) checks and will OVERWRITE existing datasets """ + raise ValueError("I could not find a dataset id.") + + def add_dataset( + self, + task_id, + dataset_id, + dataset_type, + dataset_name, + upload_name, + irds_docker_image=None, + irds_import_command=None, + irds_import_truth_command=None, + ): + """Add a new dataset to a task + CAUTION: This function does not do any sanity (existence) checks and will OVERWRITE existing datasets""" dataset_id = self.get_new_dataset_id(dataset_id, task_id, dataset_type) if self.dataset_exists(dataset_id): @@ -1404,33 +1736,39 @@ def add_dataset(self, task_id, dataset_id, dataset_type, dataset_name, upload_na for_task = modeldb.Task.objects.get(task_id=task_id) - ds, _ = modeldb.Dataset.objects.update_or_create(dataset_id=dataset_id, defaults={ - 'default_task': for_task, - 'display_name': dataset_name, - 'is_confidential': True if dataset_type == 'test' else False, - 'released': str(dt.now()), - 'default_upload_name': upload_name, - 'irds_docker_image': irds_docker_image, - 'irds_import_command': irds_import_command, - 'irds_import_truth_command': irds_import_truth_command, - }) + ds, _ = modeldb.Dataset.objects.update_or_create( + dataset_id=dataset_id, + defaults={ + "default_task": for_task, + "display_name": dataset_name, + "is_confidential": True if dataset_type == "test" else False, + "released": str(dt.now()), + "default_upload_name": upload_name, + "irds_docker_image": irds_docker_image, + "irds_import_command": irds_import_command, + "irds_import_truth_command": irds_import_truth_command, + }, + ) - thds = modeldb.TaskHasDataset.objects.select_related('dataset').filter(task__task_id=task_id) + thds = modeldb.TaskHasDataset.objects.select_related("dataset").filter(task__task_id=task_id) - if dataset_type == 'test' and dataset_id not in {thd.dataset.dataset_id for thd in thds if thd.is_test}: + if dataset_type == "test" and dataset_id not in {thd.dataset.dataset_id for thd in thds if thd.is_test}: modeldb.TaskHasDataset.objects.create(task=for_task, dataset=ds, is_test=True) - elif dataset_type == 'training' and dataset_id not in {thd.dataset.dataset_id for thd in thds if - not thd.is_test}: + elif dataset_type == "training" and dataset_id not in { + thd.dataset.dataset_id for thd in thds if not thd.is_test + }: modeldb.TaskHasDataset.objects.create(task=for_task, dataset=ds, is_test=False) - elif dataset_type not in {'training', 'dev', 'test'}: + elif dataset_type not in {"training", "dev", "test"}: raise KeyError("dataset type must be test, training, or dev") self._fdb_add_dataset_to_task(task_id, dataset_id, dataset_type) - self._fdb_add_dataset(task_id, dataset_id, dataset_name, dataset_type, 'not-set') + self._fdb_add_dataset(task_id, dataset_id, dataset_name, dataset_type, "not-set") # create dirs data_path/dataset/test-dataset[-truth]/task_id/dataset-id-type - new_dirs = [(self.data_path / f'{dataset_type}-datasets' / task_id / dataset_id), - (self.data_path / f'{dataset_type}-datasets-truth' / task_id / dataset_id)] + new_dirs = [ + (self.data_path / f"{dataset_type}-datasets" / task_id / dataset_id), + (self.data_path / f"{dataset_type}-datasets-truth" / task_id / dataset_id), + ] for d in new_dirs: d.mkdir(parents=True, exist_ok=True) @@ -1438,11 +1776,11 @@ def add_dataset(self, task_id, dataset_id, dataset_type, dataset_name, upload_na return self._dataset_to_dict(ds), [str(nd) for nd in new_dirs] def _fdb_add_evaluator_to_vm(self, vm_id, evaluator_id, command, working_directory, measures): - """ Add the evaluator the the .prototext file in the Filedatabase - This file is potentially read by the host. - If it is not read by the host anymore, remove this function and all it's calls - """ - vm_file_path = self.vm_dir_path / f'{vm_id}.prototext' + """Add the evaluator the the .prototext file in the Filedatabase + This file is potentially read by the host. + If it is not read by the host anymore, remove this function and all it's calls + """ + vm_file_path = self.vm_dir_path / f"{vm_id}.prototext" vm = Parse(open(vm_file_path).read(), modelpb.VirtualMachine()) ev = modelpb.Evaluator() @@ -1452,21 +1790,32 @@ def _fdb_add_evaluator_to_vm(self, vm_id, evaluator_id, command, working_directo ev.measures = str(measures) # ",".join([x[0].strip('\r') for x in measures]) # ev.measureKeys.extend([x[1].strip('\r') for x in measures]) vm.evaluators.append(ev) - open(vm_file_path, 'w').write(str(vm)) + open(vm_file_path, "w").write(str(vm)) def _fdb_add_evaluator_to_dataset(self, task_id, dataset_id, evaluator_id): - """ Add the evaluator the the dataset.prototext file in the Filedatabase - This file is potentially read by the host. - If it is not read by the host anymore, remove this function and all it's calls - """ - dataset_file_path = self.datasets_dir_path / task_id / f'{dataset_id}.prototext' + """Add the evaluator the the dataset.prototext file in the Filedatabase + This file is potentially read by the host. + If it is not read by the host anymore, remove this function and all it's calls + """ + dataset_file_path = self.datasets_dir_path / task_id / f"{dataset_id}.prototext" ds = Parse(open(dataset_file_path, "r").read(), modelpb.Dataset()) ds.evaluatorId = evaluator_id - open(dataset_file_path, 'w').write(str(ds)) - - def add_evaluator(self, vm_id, task_id, dataset_id, command, working_directory, measures, is_git_runner, - git_runner_image, git_runner_command, git_repository_id): - """ Add a new Evaluator to the model (and the filedatabase as long as needed) + open(dataset_file_path, "w").write(str(ds)) + + def add_evaluator( + self, + vm_id, + task_id, + dataset_id, + command, + working_directory, + measures, + is_git_runner, + git_runner_image, + git_runner_command, + git_repository_id, + ): + """Add a new Evaluator to the model (and the filedatabase as long as needed) @param vm_id: vm id as string as usual @param task_id: task_id as string as usual @@ -1478,18 +1827,21 @@ def add_evaluator(self, vm_id, task_id, dataset_id, command, working_directory, @param git_repository_id: the repo ID where the new run will be conducted @param git_runner_command: the command for the runner @param git_runner_image: which image should be run for the evalution - """ + """ evaluator_id = f"{dataset_id}-evaluator" - ev, _ = modeldb.Evaluator.objects.update_or_create(evaluator_id=evaluator_id, defaults={ - 'command': command, - 'working_directory': working_directory, - 'measures': measures, - 'is_git_runner': is_git_runner, - 'git_runner_image': git_runner_image, - 'git_runner_command': git_runner_command, - 'git_repository_id': git_repository_id - }) + ev, _ = modeldb.Evaluator.objects.update_or_create( + evaluator_id=evaluator_id, + defaults={ + "command": command, + "working_directory": working_directory, + "measures": measures, + "is_git_runner": is_git_runner, + "git_runner_image": git_runner_image, + "git_runner_command": git_runner_command, + "git_repository_id": git_repository_id, + }, + ) # add evaluator to master vm if vm_id and not is_git_runner: @@ -1509,8 +1861,13 @@ def get_job_details(self, task_id, vm_id, job_id): return None else: ret = ret[0] - return {'title': ret.title, 'last_contact': ret.last_contact, 'job_id': job_id, 'exit_code': ret.exit_code, 'stdout': ret.stdout} - + return { + "title": ret.title, + "last_contact": ret.last_contact, + "job_id": job_id, + "exit_code": ret.exit_code, + "stdout": ret.stdout, + } def add_software(self, task_id: str, vm_id: str): software = modelpb.Softwares.Software() @@ -1530,15 +1887,30 @@ def add_software(self, task_id: str, vm_id: str): s.softwares.append(software) self._save_softwares(task_id, vm_id, s) - sw = modeldb.Software.objects.create(software_id=new_software_id, - vm=modeldb.VirtualMachine.objects.get(vm_id=vm_id), - task=modeldb.Task.objects.get(task_id=task_id), - count="", command="", working_directory="", - dataset=None, creation_date=date, last_edit_date=date) + sw = modeldb.Software.objects.create( + software_id=new_software_id, + vm=modeldb.VirtualMachine.objects.get(vm_id=vm_id), + task=modeldb.Task.objects.get(task_id=task_id), + count="", + command="", + working_directory="", + dataset=None, + creation_date=date, + last_edit_date=date, + ) return self._software_to_dict(sw) - def update_software(self, task_id, vm_id, software_id, command: str = None, working_directory: str = None, - dataset: str = None, run: str = None, deleted: bool = False): + def update_software( + self, + task_id, + vm_id, + software_id, + command: Optional[str] = None, + working_directory: Optional[str] = None, + dataset: Optional[str] = None, + run: Optional[str] = None, + deleted: bool = False, + ): def update(x, y): return y if y is not None else x @@ -1555,26 +1927,43 @@ def update(x, y): self._save_softwares(task_id, vm_id, s) modeldb.Software.objects.filter(software_id=software_id, vm__vm_id=vm_id).update( - command=software.command, working_directory=software.workingDirectory, + command=software.command, + working_directory=software.workingDirectory, deleted=software.deleted, dataset=modeldb.Dataset.objects.get(dataset_id=software.dataset), - last_edit_date=date) + last_edit_date=date, + ) if run: modeldb.SoftwareHasInputRun.objects.filter( software=modeldb.Software.objects.get(software_id=software_id, vm__vm_id=vm_id), - input_run=modeldb.Run.objects.get(run_id=run)) + input_run=modeldb.Run.objects.get(run_id=run), + ) return software return False - def update_review(self, dataset_id, vm_id, run_id, - reviewer_id: str = None, review_date: str = None, has_errors: bool = None, - has_no_errors: bool = None, no_errors: bool = None, missing_output: bool = None, - extraneous_output: bool = None, invalid_output: bool = None, has_error_output: bool = None, - other_errors: bool = None, comment: str = None, published: bool = None, blinded: bool = None, - has_warnings: bool = False) -> bool: - """ updates the review specified by dataset_id, vm_id, and run_id with the values given in the parameters. + def update_review( + self, + dataset_id, + vm_id, + run_id, + reviewer_id: Optional[str] = None, + review_date: Optional[str] = None, + has_errors: Optional[bool] = None, + has_no_errors: Optional[bool] = None, + no_errors: Optional[bool] = None, + missing_output: Optional[bool] = None, + extraneous_output: Optional[bool] = None, + invalid_output: Optional[bool] = None, + has_error_output: Optional[bool] = None, + other_errors: Optional[bool] = None, + comment: Optional[str] = None, + published: Optional[bool] = None, + blinded: Optional[bool] = None, + has_warnings: bool = False, + ) -> bool: + """updates the review specified by dataset_id, vm_id, and run_id with the values given in the parameters. Required Parameters are also required in the function """ @@ -1583,23 +1972,24 @@ def __update(x, y): try: # This changes the contents in the protobuf files - review = modeldb.Review.objects.prefetch_related('run').get(run__run_id=run_id) - - review_proto = modelpb.RunReview(runId=run_id, - reviewerId=__update(review.reviewer_id, reviewer_id), - reviewDate=__update(review.review_date, review_date), - hasErrors=__update(review.has_errors, has_errors), - hasWarnings=__update(review.has_warnings, has_warnings), - hasNoErrors=__update(review.has_no_errors, has_no_errors), - noErrors=__update(review.no_errors, no_errors), - missingOutput=__update(review.missing_output, missing_output), - extraneousOutput=__update(review.extraneous_output, extraneous_output), - invalidOutput=__update(review.invalid_output, invalid_output), - hasErrorOutput=__update(review.has_error_output, has_error_output), - otherErrors=__update(review.other_errors, other_errors), - comment=__update(review.comment, comment), - published=__update(review.published, published), - blinded=__update(review.blinded, blinded), + review = modeldb.Review.objects.prefetch_related("run").get(run__run_id=run_id) + + review_proto = modelpb.RunReview( + runId=run_id, + reviewerId=__update(review.reviewer_id, reviewer_id), + reviewDate=__update(review.review_date, review_date), + hasErrors=__update(review.has_errors, has_errors), + hasWarnings=__update(review.has_warnings, has_warnings), + hasNoErrors=__update(review.has_no_errors, has_no_errors), + noErrors=__update(review.no_errors, no_errors), + missingOutput=__update(review.missing_output, missing_output), + extraneousOutput=__update(review.extraneous_output, extraneous_output), + invalidOutput=__update(review.invalid_output, invalid_output), + hasErrorOutput=__update(review.has_error_output, has_error_output), + otherErrors=__update(review.other_errors, other_errors), + comment=__update(review.comment, comment), + published=__update(review.published, published), + blinded=__update(review.blinded, blinded), ) modeldb.Review.objects.filter(run__run_id=run_id).update( @@ -1616,7 +2006,7 @@ def __update(x, y): has_warnings=review_proto.hasWarnings, has_no_errors=review_proto.hasNoErrors, published=review_proto.published, - blinded=review_proto.blinded + blinded=review_proto.blinded, ) self._save_review(dataset_id, vm_id, run_id, review_proto) @@ -1627,23 +2017,24 @@ def __update(x, y): return False def add_run(self, dataset_id, vm_id, run_id): - """ Parses the specified run and adds it to the model. Does nothing if the run does not exist in the + """Parses the specified run and adds it to the model. Does nothing if the run does not exist in the FileDB. Runs the auto reviewer to generate an initial review. Also loads evaluations if present - """ + """ return dbops.parse_run(self.runs_dir_path, dataset_id, vm_id, run_id) def _list_files(self, startpath): import os + tree = "" for root, dirs, files in os.walk(startpath): - level = root.replace(startpath, '').count(os.sep) - indent = '..' * 2 * (level) - tree += '{}|-- {}/\n'.format(indent, os.path.basename(root)) - subindent = '..' * 2 * (level + 1) + level = root.replace(startpath, "").count(os.sep) + indent = ".." * 2 * (level) + tree += "{}|-- {}/\n".format(indent, os.path.basename(root)) + subindent = ".." * 2 * (level + 1) for f in files: - tree += '{}|-- {}\n'.format(subindent, f) + tree += "{}|-- {}\n".format(subindent, f) return tree def _assess_uploaded_files(self, run_dir: Path, output_dir: Path): @@ -1653,12 +2044,12 @@ def _assess_uploaded_files(self, run_dir: Path, output_dir: Path): def count_lines(file_name): try: - if file_name.suffix == '.gz': - return len(gzip.open(file_name, 'r').readlines()) + if file_name.suffix == ".gz": + return len(gzip.open(file_name, "r").readlines()) else: - return len(open(file_name, 'r').readlines()) - except: - return '--' + return len(open(file_name, "r").readlines()) + except Exception: + return "--" if root_files and not root_files[0].is_dir(): lines = count_lines(root_files[0]) @@ -1666,28 +2057,32 @@ def count_lines(file_name): else: lines = "--" size = "--" - open(run_dir / 'size.txt', 'w').write(f"0\n{size}\n{lines}\n{files}\n{dirs}") - open(run_dir / 'file-list.txt', 'w').write(self._list_files(str(output_dir))) + open(run_dir / "size.txt", "w").write(f"0\n{size}\n{lines}\n{files}\n{dirs}") + open(run_dir / "file-list.txt", "w").write(self._list_files(str(output_dir))) - def add_upload(self, task_id: str, vm_id: str, rename_to: str = None): + def add_upload(self, task_id: str, vm_id: str, rename_to: Optional[str] = None): upload = modeldb.Upload.objects.create( vm=modeldb.VirtualMachine.objects.get(vm_id=vm_id), task=modeldb.Task.objects.get(task_id=task_id), rename_to=rename_to, display_name=randomname.get_name(), - description='Please add a description that describes uploads of this type.' + description="Please add a description that describes uploads of this type.", ) return self.upload_to_dict(upload, vm_id) def delete_upload(self, task_id, vm_id, upload_id): - modeldb.Upload.objects.filter(id= upload_id, vm__vm_id = vm_id, task__task_id = task_id, ).update(deleted=True) + modeldb.Upload.objects.filter( + id=upload_id, + vm__vm_id=vm_id, + task__task_id=task_id, + ).update(deleted=True) def add_uploaded_run(self, task_id, vm_id, dataset_id, upload_id, uploaded_file): # First add to data new_id = get_tira_id() run_dir = self.runs_dir_path / dataset_id / vm_id / new_id - (run_dir / 'output').mkdir(parents=True) + (run_dir / "output").mkdir(parents=True) # Second add to proto dump run = modelpb.Run() @@ -1700,70 +2095,75 @@ def add_uploaded_run(self, task_id, vm_id, dataset_id, upload_id, uploaded_file) # Third add to database try: upload = modeldb.Upload.objects.get(vm__vm_id=vm_id, task__task_id=task_id, id=upload_id) - except: + except Exception: upload = modeldb.Upload.objects.get(vm__vm_id=vm_id, id=upload_id) upload.last_edit_date = now() upload.save() - db_run = modeldb.Run.objects.create(run_id=new_id, upload=upload, - input_dataset=modeldb.Dataset.objects.get(dataset_id=dataset_id), - task=modeldb.Task.objects.get(task_id=task_id), - downloadable=True) + db_run = modeldb.Run.objects.create( + run_id=new_id, + upload=upload, + input_dataset=modeldb.Dataset.objects.get(dataset_id=dataset_id), + task=modeldb.Task.objects.get(task_id=task_id), + downloadable=True, + ) - open(run_dir / "run.bin", 'wb').write(run.SerializeToString()) - open(run_dir / "run.prototext", 'w').write(str(run)) + open(run_dir / "run.bin", "wb").write(run.SerializeToString()) + open(run_dir / "run.prototext", "w").write(str(run)) if uploaded_file.name.endswith(".zip"): - with open(run_dir / 'output' / uploaded_file.name, 'wb+') as destination: + with open(run_dir / "output" / uploaded_file.name, "wb+") as destination: for chunk in uploaded_file.chunks(): destination.write(chunk) - with zipfile.ZipFile(run_dir / 'output' / uploaded_file.name, 'r') as zip_ref: - zip_ref.extractall(run_dir / 'output') + with zipfile.ZipFile(run_dir / "output" / uploaded_file.name, "r") as zip_ref: + zip_ref.extractall(run_dir / "output") else: default_filename = modeldb.Dataset.objects.get(dataset_id=dataset_id).default_upload_name - if upload.rename_to and upload.rename_to.replace(' ', '').replace('\\', '').replace('/', '').strip(): - default_filename = upload.rename_to.replace(' ', '').replace('\\', '').replace('/', '').strip() + if upload.rename_to and upload.rename_to.replace(" ", "").replace("\\", "").replace("/", "").strip(): + default_filename = upload.rename_to.replace(" ", "").replace("\\", "").replace("/", "").strip() - if not (run_dir / 'output' / default_filename).is_file(): - with open(run_dir / 'output' / default_filename, 'wb+') as destination: + if not (run_dir / "output" / default_filename).is_file(): + with open(run_dir / "output" / default_filename, "wb+") as destination: for chunk in uploaded_file.chunks(): destination.write(chunk) # Add size.txt and stdout and stderr, and file-list.txt - self._assess_uploaded_files(run_dir, (run_dir / 'output')) - open(run_dir / 'stdout.txt', 'w').write("This run was successfully uploaded.") - open(run_dir / 'stderr.txt', 'w').write("No errors.") + self._assess_uploaded_files(run_dir, (run_dir / "output")) + open(run_dir / "stdout.txt", "w").write("This run was successfully uploaded.") + open(run_dir / "stderr.txt", "w").write("No errors.") # add the review review = auto_reviewer(run_dir, run_dir.stem) - open(run_dir / "run-review.prototext", 'w').write(str(review)) - open(run_dir / "run-review.bin", 'wb').write(review.SerializeToString()) - - modeldb.Review.objects.update_or_create(run=db_run, defaults={ - 'reviewer_id': review.reviewerId, - 'review_date': review.reviewDate, - 'no_errors': review.noErrors, - 'missing_output': review.missingOutput, - 'extraneous_output': review.extraneousOutput, - 'invalid_output': review.invalidOutput, - 'has_error_output': review.hasErrorOutput, - 'other_errors': review.otherErrors, - 'comment': review.comment, - 'has_errors': review.hasErrors, - 'has_warnings': review.hasWarnings, - 'has_no_errors': review.hasNoErrors, - 'published': review.published, - 'blinded': review.blinded - }) + open(run_dir / "run-review.prototext", "w").write(str(review)) + open(run_dir / "run-review.bin", "wb").write(review.SerializeToString()) + + modeldb.Review.objects.update_or_create( + run=db_run, + defaults={ + "reviewer_id": review.reviewerId, + "review_date": review.reviewDate, + "no_errors": review.noErrors, + "missing_output": review.missingOutput, + "extraneous_output": review.extraneousOutput, + "invalid_output": review.invalidOutput, + "has_error_output": review.hasErrorOutput, + "other_errors": review.otherErrors, + "comment": review.comment, + "has_errors": review.hasErrors, + "has_warnings": review.hasWarnings, + "has_no_errors": review.hasNoErrors, + "published": review.published, + "blinded": review.blinded, + }, + ) returned_run = self._run_as_dict(db_run) - returned_run['review'] = self.get_run_review(dataset_id, vm_id, run.runId) + returned_run["review"] = self.get_run_review(dataset_id, vm_id, run.runId) - return {"run": returned_run, - "last_edit_date": upload.last_edit_date} + return {"run": returned_run, "last_edit_date": upload.last_edit_date} def update_upload_metadata(self, task_id, vm_id, upload_id, display_name, description, paper_link): modeldb.Upload.objects.filter(vm__vm_id=vm_id, task__task_id=task_id, id=upload_id).update( @@ -1773,13 +2173,26 @@ def update_upload_metadata(self, task_id, vm_id, upload_id, display_name, descri ) def add_docker_software_mounts(self, docker_software, mounts): - docker_software = modeldb.DockerSoftware.objects.get(docker_software_id=docker_software['docker_software_id']) + docker_software = modeldb.DockerSoftware.objects.get(docker_software_id=docker_software["docker_software_id"]) modeldb.HuggingFaceModelsOfSoftware.objects.create( - docker_software=docker_software, hf_home = mounts['HF_HOME'], mount_hf_model = mounts['MOUNT_HF_MODEL'], models_scan= mounts['HF_CACHE_SCAN'] + docker_software=docker_software, + hf_home=mounts["HF_HOME"], + mount_hf_model=mounts["MOUNT_HF_MODEL"], + models_scan=mounts["HF_CACHE_SCAN"], ) - def add_docker_software(self, task_id, vm_id, user_image_name, command, tira_image_name, input_docker_job=None, - input_upload=None, submission_git_repo=None, build_environment=None): + def add_docker_software( + self, + task_id, + vm_id, + user_image_name, + command, + tira_image_name, + input_docker_job=None, + input_upload=None, + submission_git_repo=None, + build_environment=None, + ): input_docker_software, input_upload_software = None, None if input_docker_job and 0 in input_docker_job: input_docker_software = modeldb.DockerSoftware.objects.get(docker_software_id=input_docker_job[0]) @@ -1797,7 +2210,9 @@ def add_docker_software(self, task_id, vm_id, user_image_name, command, tira_ima input_upload=input_upload_software, ) - additional_inputs = range(1, (0 if not input_upload else len(input_upload)) + (0 if not input_docker_job else len(input_docker_job))) + additional_inputs = range( + 1, (0 if not input_upload else len(input_upload)) + (0 if not input_docker_job else len(input_docker_job)) + ) for i in additional_inputs: inp, upl = None, None if i in input_docker_job: @@ -1811,23 +2226,32 @@ def add_docker_software(self, task_id, vm_id, user_image_name, command, tira_ima if submission_git_repo: modeldb.LinkToSoftwareSubmissionGitRepository.objects.create( - docker_software=docker_software, software_submission_git_repository=submission_git_repo, - commit_hash='', link_to_file='', build_environment=build_environment + docker_software=docker_software, + software_submission_git_repository=submission_git_repo, + commit_hash="", + link_to_file="", + build_environment=build_environment, ) return self._docker_software_to_dict(docker_software) + def update_docker_software_metadata( + self, docker_software_id, display_name, description, paper_link, ir_re_ranker, ir_re_ranking_input + ): + modeldb.DockerSoftware.objects.update_or_create( + docker_software_id=docker_software_id, + defaults={ + "display_name": display_name, + "description": description, + "paper_link": paper_link, + "ir_re_ranker": ir_re_ranker, + "ir_re_ranking_input": ir_re_ranking_input, + }, + ) - def update_docker_software_metadata(self, docker_software_id, display_name, description, paper_link, - ir_re_ranker, ir_re_ranking_input): - software = modeldb.DockerSoftware.objects.update_or_create(docker_software_id = docker_software_id, - defaults={"display_name": display_name, "description": description, "paper_link": paper_link, - "ir_re_ranker": ir_re_ranker, "ir_re_ranking_input": ir_re_ranking_input}) - - - def update_run(self, dataset_id, vm_id, run_id, deleted: bool = None): - """ updates the run specified by dataset_id, vm_id, and run_id with the values given in the parameters. - Required Parameters are also required in the function + def update_run(self, dataset_id, vm_id, run_id, deleted: Optional[bool] = None): + """updates the run specified by dataset_id, vm_id, and run_id with the values given in the parameters. + Required Parameters are also required in the function """ try: run = self._load_run(dataset_id, vm_id, run_id) @@ -1840,16 +2264,28 @@ def update(x, y): self._save_run(dataset_id, vm_id, run_id, run) except Exception as e: - raise TiraModelWriteError(f"Exception while saving run ({dataset_id}, {vm_id}, {run_id})", e) - - def _fdb_edit_task(self, task_id, task_name, task_description, master_vm_id, organizer_id, website, - help_command=None, help_text=None): - task_file_path = self.tasks_dir_path / f'{task_id}.prototext' + raise TiraModelWriteError(f"Exception while saving run ({dataset_id}, {vm_id}, {run_id})", exc_info=e) + + def _fdb_edit_task( + self, + task_id, + task_name, + task_description, + master_vm_id, + organizer_id, + website, + help_command=None, + help_text=None, + ): + task_file_path = self.tasks_dir_path / f"{task_id}.prototext" if not task_file_path.exists(): logger.exception( - f"Can not save task {task_id} because the task file {task_file_path} does not exist. Creating this file now.") - self._fdb_create_task(task_id, task_name, task_description, master_vm_id, organizer_id, website, - help_command, help_text) + f"Can not save task {task_id} because the task file {task_file_path} does not exist. Creating this file" + " now." + ) + self._fdb_create_task( + task_id, task_name, task_description, master_vm_id, organizer_id, website, help_command, help_text + ) return task = Parse(open(task_file_path, "r").read(), modelpb.Tasks.Task()) task.taskId = task_id @@ -1860,14 +2296,28 @@ def _fdb_edit_task(self, task_id, task_name, task_description, master_vm_id, org task.web = website task.commandPlaceholder = help_command task.commandDescription = help_text - open(task_file_path, 'w').write(str(task)) - - def edit_task(self, task_id: str, task_name: str, task_description: str, featured: bool, master_vm_id, - organizer: str, website: str, require_registration: str, require_groups: str, restrict_groups: str, - help_command: str = None, help_text: str = None, allowed_task_teams: str = None, - is_ir_task: bool = False, irds_re_ranking_image: str = '', irds_re_ranking_command: str = '', - irds_re_ranking_resource: str = '' - ): + open(task_file_path, "w").write(str(task)) + + def edit_task( + self, + task_id: str, + task_name: str, + task_description: str, + featured: bool, + master_vm_id, + organizer: str, + website: str, + require_registration: str, + require_groups: str, + restrict_groups: str, + help_command: Optional[str] = None, + help_text: Optional[str] = None, + allowed_task_teams: Optional[str] = None, + is_ir_task: bool = False, + irds_re_ranking_image: str = "", + irds_re_ranking_command: str = "", + irds_re_ranking_resource: str = "", + ): task = modeldb.Task.objects.filter(task_id=task_id) vm = modeldb.VirtualMachine.objects.get(vm_id=master_vm_id) @@ -1885,7 +2335,7 @@ def edit_task(self, task_id: str, task_name: str, task_description: str, feature is_ir_task=is_ir_task, irds_re_ranking_image=irds_re_ranking_image, irds_re_ranking_command=irds_re_ranking_command, - irds_re_ranking_resource=irds_re_ranking_resource + irds_re_ranking_resource=irds_re_ranking_resource, ) if help_command: @@ -1893,30 +2343,31 @@ def edit_task(self, task_id: str, task_name: str, task_description: str, feature if help_text: task.update(command_description=help_text) - self._fdb_edit_task(task_id, task_name, task_description, master_vm_id, organizer, website, - help_command, help_text) + self._fdb_edit_task( + task_id, task_name, task_description, master_vm_id, organizer, website, help_command, help_text + ) return self._task_to_dict(modeldb.Task.objects.get(task_id=task_id)) def _fdb_edit_dataset(self, task_id, dataset_id, display_name, dataset_type, evaluator_id): - """ dataset_dir_path/task_id/dataset_id.prototext """ - dataset_file_path = self.datasets_dir_path / task_id / f'{dataset_id}.prototext' + """dataset_dir_path/task_id/dataset_id.prototext""" + dataset_file_path = self.datasets_dir_path / task_id / f"{dataset_id}.prototext" ds = Parse(open(dataset_file_path, "r").read(), modelpb.Dataset()) ds.displayName = display_name ds.evaluatorId = evaluator_id - if dataset_type == 'test': + if dataset_type == "test": ds.isConfidential = True else: ds.isConfidential = False - open(dataset_file_path, 'w').write(str(ds)) + open(dataset_file_path, "w").write(str(ds)) def _fdb_edit_evaluator_to_vm(self, vm_id, evaluator_id, command, working_directory, measures): - """ Edit the evaluator in the .prototext file in the Filedatabase - This file is potentially read by the host. - If it is not read by the host anymore, remove this function and all it's calls - """ - vm_file_path = self.vm_dir_path / f'{vm_id}.prototext' + """Edit the evaluator in the .prototext file in the Filedatabase + This file is potentially read by the host. + If it is not read by the host anymore, remove this function and all it's calls + """ + vm_file_path = self.vm_dir_path / f"{vm_id}.prototext" vm = Parse(open(vm_file_path).read(), modelpb.VirtualMachine()) for evaluator in vm.evaluators: @@ -1925,10 +2376,23 @@ def _fdb_edit_evaluator_to_vm(self, vm_id, evaluator_id, command, working_direct evaluator.workingDirectory = working_directory evaluator.measures = measures - open(vm_file_path, 'w').write(str(vm)) - - def edit_dataset(self, task_id, dataset_id, dataset_name, command, working_directory, measures, upload_name, - is_confidential, is_git_runner, git_runner_image, git_runner_command, git_repository_id): + open(vm_file_path, "w").write(str(vm)) + + def edit_dataset( + self, + task_id, + dataset_id, + dataset_name, + command, + working_directory, + measures, + upload_name, + is_confidential, + is_git_runner, + git_runner_image, + git_runner_command, + git_repository_id, + ): """ @param is_git_runner: a bool. If true, run_evaluations are done via git CI (see git_runner.py) @@ -1942,16 +2406,23 @@ def edit_dataset(self, task_id, dataset_id, dataset_name, command, working_direc default_task=for_task, display_name=dataset_name, default_upload_name=upload_name, - is_confidential=is_confidential) + is_confidential=is_confidential, + ) ds = modeldb.Dataset.objects.get(dataset_id=dataset_id) modeldb.TaskHasDataset.objects.filter(dataset=ds).update(task=for_task) - dataset_type = 'test' if is_confidential else 'training' + dataset_type = "test" if is_confidential else "training" ev = modeldb.Evaluator.objects.filter(dataset__dataset_id=dataset_id) - ev.update(command=command, working_directory=working_directory, measures=measures, - is_git_runner=is_git_runner, git_runner_image=git_runner_image, - git_runner_command=git_runner_command, git_repository_id=git_repository_id) + ev.update( + command=command, + working_directory=working_directory, + measures=measures, + is_git_runner=is_git_runner, + git_runner_image=git_runner_image, + git_runner_command=git_runner_command, + git_repository_id=git_repository_id, + ) ev_id = modeldb.Evaluator.objects.get(dataset__dataset_id=dataset_id).evaluator_id self._fdb_edit_dataset(task_id, dataset_id, dataset_name, dataset_type, ev_id) @@ -1961,19 +2432,24 @@ def edit_dataset(self, task_id, dataset_id, dataset_name, command, working_direc self._fdb_edit_evaluator_to_vm(vm_id, ev_id, command, working_directory, measures) except Exception as e: logger.exception( - f"failed to query 'VirtualMachineHasEvaluator' for evauator {ev_id}. Will not save changes made to the Filestore.", - e) + f"failed to query 'VirtualMachineHasEvaluator' for evauator {ev_id}. Will not save changes made to the" + " Filestore.", + e, + ) return self._dataset_to_dict(ds) def delete_software(self, task_id, vm_id, software_id): - """ Delete a software. - Deletion is denied when - - there is a successful evlauation assigned. + """Delete a software. + Deletion is denied when + - there is a successful evlauation assigned. """ - reviews_qs = modeldb.Review.objects.filter(run__input_run__software__software_id=software_id, - run__input_run__software__task_id=task_id, - run__input_run__software__vm_id=vm_id, no_errors=True) + reviews_qs = modeldb.Review.objects.filter( + run__input_run__software__software_id=software_id, + run__input_run__software__task_id=task_id, + run__input_run__software__vm_id=vm_id, + no_errors=True, + ) if reviews_qs.exists(): return False @@ -1990,14 +2466,14 @@ def delete_software(self, task_id, vm_id, software_id): return found def delete_run(self, dataset_id, vm_id, run_id): - """ delete the run in the database. + """delete the run in the database. Do not delete if: - the run is on the leaderboard. - the run is valid @return: true if it was deleted, false if it can not be deleted - """ + """ run = modeldb.Run.objects.get(run_id=run_id) review = modeldb.Review.objects.get(run=run) @@ -2009,7 +2485,7 @@ def delete_run(self, dataset_id, vm_id, run_id): return True def _fdb_delete_task(self, task_id): - task_file_path = self.tasks_dir_path / f'{task_id}.prototext' + task_file_path = self.tasks_dir_path / f"{task_id}.prototext" os.remove(task_file_path) def delete_task(self, task_id): @@ -2017,11 +2493,11 @@ def delete_task(self, task_id): self._fdb_delete_task(task_id) def _fdb_delete_dataset(self, task_id, dataset_id): - dataset_file_path = self.datasets_dir_path / task_id / f'{dataset_id}.prototext' + dataset_file_path = self.datasets_dir_path / task_id / f"{dataset_id}.prototext" os.remove(dataset_file_path) def _fdb_delete_dataset_from_task(self, task_id, dataset_id): - task_file_path = self.tasks_dir_path / f'{task_id}.prototext' + task_file_path = self.tasks_dir_path / f"{task_id}.prototext" task = Parse(open(task_file_path, "r").read(), modelpb.Tasks.Task()) for ind, ds in enumerate(task.testDataset): if ds == dataset_id: @@ -2031,36 +2507,37 @@ def _fdb_delete_dataset_from_task(self, task_id, dataset_id): if ds == dataset_id: del task.trainingDataset[ind] - open(task_file_path, 'w').write(str(task)) + open(task_file_path, "w").write(str(task)) def _fdb_delete_evaluator_from_vm(self, vm_id, evaluator_id): - vm_file_path = self.vm_dir_path / f'{vm_id}.prototext' + vm_file_path = self.vm_dir_path / f"{vm_id}.prototext" vm = Parse(open(vm_file_path).read(), modelpb.VirtualMachine()) for ind, ev in enumerate(vm.evaluators): if ev.evaluatorId == evaluator_id: del vm.evaluators[ind] - open(vm_file_path, 'w').write(str(vm)) + open(vm_file_path, "w").write(str(vm)) def delete_dataset(self, dataset_id): modeldb.Dataset.objects.filter(dataset_id=dataset_id).update(is_deprecated=True) - #ds = modeldb.Dataset.objects.select_related('default_task', 'evaluator').get(dataset_id=dataset_id) - #task_id = ds.default_task.task_id - #vm_id = ds.default_task.vm.vm_id - #try: + # ds = modeldb.Dataset.objects.select_related('default_task', 'evaluator').get(dataset_id=dataset_id) + # task_id = ds.default_task.task_id + # vm_id = ds.default_task.vm.vm_id + # try: # evaluator_id = ds.evaluator.evaluator_id # self._fdb_delete_evaluator_from_vm(vm_id, evaluator_id) - #except AttributeError as e: + # except AttributeError as e: # logger.exception(f"Exception deleting evaluator while deleting dataset {dataset_id}. " - # f"Maybe It never existed?", e) - #self._fdb_delete_dataset_from_task(task_id, dataset_id) - #self._fdb_delete_dataset(task_id, dataset_id) - #ds.delete() + # f"Maybe It never existed?", exc_info=e) + # self._fdb_delete_dataset_from_task(task_id, dataset_id) + # self._fdb_delete_dataset(task_id, dataset_id) + # ds.delete() def edit_organizer(self, organizer_id, name, years, web, git_integrations=[]): - org, _ = modeldb.Organizer.objects.update_or_create(organizer_id=organizer_id, defaults={ - 'name': name, 'years': years, 'web': web}) + org, _ = modeldb.Organizer.objects.update_or_create( + organizer_id=organizer_id, defaults={"name": name, "years": years, "web": web} + ) org.git_integrations.set(git_integrations) return org @@ -2081,13 +2558,15 @@ def get_git_integration(self, namespace_url, private_token, return_dict=False, c if not namespace_url or not namespace_url.strip(): return None - defaults = {'private_token': private_token} + defaults = {"private_token": private_token} - if not private_token or not private_token.strip or ''.lower() in private_token.lower(): + if not private_token or not private_token.strip or "".lower() in private_token.lower(): defaults = {} if create_if_not_exists: - git_integration, _ = modeldb.GitIntegration.objects.get_or_create(namespace_url=namespace_url, defaults=defaults) + git_integration, _ = modeldb.GitIntegration.objects.get_or_create( + namespace_url=namespace_url, defaults=defaults + ) else: git_integration = modeldb.GitIntegration.objects.get(namespace_url=namespace_url) @@ -2101,7 +2580,6 @@ def all_git_integrations(self, return_dict=False): return ret - def _registration_to_dict(self, registration): return { "team_name": registration.team_name, @@ -2119,7 +2597,6 @@ def _registration_to_dict(self, registration): "last_modified": registration.last_modified, } - # methods to check for existence @staticmethod def task_exists(task_id: str) -> bool: @@ -2146,45 +2623,66 @@ def software_exists(task_id: str, vm_id: str, software_id: str) -> bool: return modeldb.Software.objects.filter(software_id=software_id, vm__vm_id=vm_id).exists() @staticmethod - def all_matching_run_ids(vm_id: str, input_dataset_id: str, task_id: str, software_id: str, docker_software_id: int, upload_id: int): - ret = [] + def all_matching_run_ids( + vm_id: str, + input_dataset_id: str, + task_id: str, + software_id: Optional[str], + docker_software_id: Optional[int], + upload_id: Optional[int], + ) -> list[str]: + ret: list[str] = [] if software_id: - ret += [i.run_id for i in modeldb.Run.objects.filter( - software__software_id=software_id, task__task_id=task_id, input_dataset__dataset_id=input_dataset_id - )] + ret += [ + i.run_id + for i in modeldb.Run.objects.filter( + software__software_id=software_id, task__task_id=task_id, input_dataset__dataset_id=input_dataset_id + ) + ] if docker_software_id: - ret += [i.run_id for i in modeldb.Run.objects.filter( - docker_software__docker_software_id=docker_software_id, input_dataset__dataset_id=input_dataset_id - )] + ret += [ + i.run_id + for i in modeldb.Run.objects.filter( + docker_software__docker_software_id=docker_software_id, input_dataset__dataset_id=input_dataset_id + ) + ] if not software_id and not docker_software_id and vm_id: - ret += [i.run_id for i in modeldb.Run.objects.filter( - upload__vm__vm_id=vm_id, input_dataset__dataset_id=input_dataset_id, - )] + ret += [ + i.run_id + for i in modeldb.Run.objects.filter( + upload__vm__vm_id=vm_id, + input_dataset__dataset_id=input_dataset_id, + ) + ] if upload_id: - ret += [i.run_id for i in modeldb.Run.objects.filter( - upload__id=upload_id, input_dataset__dataset_id=input_dataset_id - )] + ret += [ + i.run_id + for i in modeldb.Run.objects.filter(upload__id=upload_id, input_dataset__dataset_id=input_dataset_id) + ] return [i for i in ret if i] def get_ordered_additional_input_runs_of_software(self, docker_software): ret = [] - if not docker_software or 'docker_software_id' not in docker_software: + if not docker_software or "docker_software_id" not in docker_software: return [] - additional_inputs = modeldb.DockerSoftwareHasAdditionalInput.objects \ - .filter(docker_software__docker_software_id=docker_software['docker_software_id']) \ - .order_by('position') + additional_inputs = modeldb.DockerSoftwareHasAdditionalInput.objects.filter( + docker_software__docker_software_id=docker_software["docker_software_id"] + ).order_by("position") for i in additional_inputs: - ret += [(i.input_docker_software.docker_software_id if i.input_docker_software else None, - i.input_upload.id if i.input_upload else None - )] + ret += [ + ( + i.input_docker_software.docker_software_id if i.input_docker_software else None, + i.input_upload.id if i.input_upload else None, + ) + ] return ret diff --git a/application/src/tira/data/data.py b/application/src/tira_app/data/data.py similarity index 59% rename from application/src/tira/data/data.py rename to application/src/tira_app/data/data.py index c261f7f6a..f57b3fcc9 100644 --- a/application/src/tira/data/data.py +++ b/application/src/tira_app/data/data.py @@ -1,24 +1,28 @@ """ These methods are utilities to parse Tira's Model from the protobuf files into a database. """ -from tira.proto import TiraClientWebMessages_pb2 as modelpb -from tira.proto import tira_host_pb2 as model_host -from google.protobuf.text_format import Parse -from tira.util import extract_year_from_dataset_id, auto_reviewer -from pathlib import Path -import tira.model as modeldb + import logging + +from google.protobuf.text_format import Parse from tqdm import tqdm -MODEL_ROOT = Path("/mnt/ceph/tira/model") -TASKS_DIR_PATH = MODEL_ROOT / Path("tasks") -ORGANIZERS_FILE_PATH = MODEL_ROOT / Path("organizers/organizers.prototext") +from .. import model as modeldb +from ..proto import TiraClientWebMessages_pb2 as modelpb +from ..util import auto_reviewer, extract_year_from_dataset_id logger = logging.getLogger("tira") -def index(organizers_file_path, users_file_path, vm_dir_path, tasks_dir_path, - datasets_dir_path, softwares_dir_path, runs_dir_path): +def index( + organizers_file_path, + users_file_path, + vm_dir_path, + tasks_dir_path, + datasets_dir_path, + softwares_dir_path, + runs_dir_path, +): _parse_organizer_list(organizers_file_path) _parse_vm_list(users_file_path, vm_dir_path) _parse_dataset_list(datasets_dir_path) @@ -52,17 +56,15 @@ def reload_runs(runs_dir_path, vm_id): def _parse_organizer_list(organizers_file_path): - """ Parse the PB Database and extract all hosts. + """Parse the PB Database and extract all hosts. :return: a dict {hostId: {"name", "years"} """ organizers = modelpb.Hosts() Parse(open(organizers_file_path, "r").read(), organizers) for org in organizers.hosts: - _, _ = modeldb.Organizer.objects.update_or_create(organizer_id=org.hostId, defaults={ - 'name': org.name, - 'years': org.years, - 'web': org.web - }) + _, _ = modeldb.Organizer.objects.update_or_create( + organizer_id=org.hostId, defaults={"name": org.name, "years": org.years, "web": org.web} + ) def _parse_vm_list(users_file_path, vm_dir_path): @@ -71,106 +73,116 @@ def _parse_vm_list(users_file_path, vm_dir_path): for user in users.users: try: vm = Parse(open(vm_dir_path / f"{user.userName}.prototext").read(), modelpb.VirtualMachine()) - vm2, _ = modeldb.VirtualMachine.objects.update_or_create(vm_id=user.userName, defaults={ - 'user_password': user.userPw, - 'roles': user.roles, - 'host': vm.host, - 'admin_name': vm.adminName, - 'admin_pw': vm.adminPw, - 'ip': vm.ip, - 'ssh': vm.portSsh, - 'rdp': vm.portRdp}) + vm2, _ = modeldb.VirtualMachine.objects.update_or_create( + vm_id=user.userName, + defaults={ + "user_password": user.userPw, + "roles": user.roles, + "host": vm.host, + "admin_name": vm.adminName, + "admin_pw": vm.adminPw, + "ip": vm.ip, + "ssh": vm.portSsh, + "rdp": vm.portRdp, + }, + ) for evaluator in vm.evaluators: ev, _ = modeldb.Evaluator.objects.update_or_create( - evaluator_id=evaluator.evaluatorId, defaults={ - 'command': evaluator.command, - 'working_directory': evaluator.workingDirectory, - 'measures': evaluator.measures, - 'is_deprecated': evaluator.isDeprecated - }) + evaluator_id=evaluator.evaluatorId, + defaults={ + "command": evaluator.command, + "working_directory": evaluator.workingDirectory, + "measures": evaluator.measures, + "is_deprecated": evaluator.isDeprecated, + }, + ) modeldb.VirtualMachineHasEvaluator.objects.update_or_create(evaluator=ev, vm=vm2) - except FileNotFoundError as e: + except FileNotFoundError: logger.exception(f"Could not find VM file for vm_id {user.userName}") - _, _ = modeldb.VirtualMachine.objects.update_or_create(vm_id=user.userName, defaults={ - 'user_password': user.userPw, - 'roles': user.roles}) + _, _ = modeldb.VirtualMachine.objects.update_or_create( + vm_id=user.userName, defaults={"user_password": user.userPw, "roles": user.roles} + ) def _parse_task_list(tasks_dir_path): - """ Parse the PB Database and extract all tasks. + """Parse the PB Database and extract all tasks. :return: 1. a dict with the tasks {"taskId": {"name", "description", "dataset_count", "organizer", "year", "web"}} 2. a dict with default tasks of datasets {"dataset_id": "task_id"} """ - logger.info('loading tasks') + logger.info("loading tasks") for task_path in tasks_dir_path.glob("*"): task = Parse(open(task_path, "r").read(), modelpb.Tasks.Task()) vm, _ = modeldb.VirtualMachine.objects.get_or_create(vm_id=task.virtualMachineId) organizer, _ = modeldb.Organizer.objects.get_or_create(organizer_id=task.hostId) - t, _ = modeldb.Task.objects.update_or_create(task_id=task.taskId, defaults={ - 'task_name': task.taskName, - 'task_description': task.taskDescription, - 'vm': vm, - 'organizer': organizer, - 'web': task.web, - 'max_std_out_chars_on_test_data': task.maxStdOutCharsOnTestData, - 'max_std_err_chars_on_test_data': task.maxStdErrCharsOnTestData, - 'max_file_list_chars_on_test_data': task.maxFileListCharsOnTestData, - 'command_placeholder': task.commandPlaceholder, - 'command_description': task.commandDescription, - 'dataset_label': task.datasetLabel, - 'max_std_out_chars_on_test_data_eval': task.maxStdOutCharsOnTestDataEval, - 'max_std_err_chars_on_test_data_eval': task.maxStdErrCharsOnTestDataEval, - 'max_file_list_chars_on_test_data_eval': task.maxFileListCharsOnTestDataEval}) + t, _ = modeldb.Task.objects.update_or_create( + task_id=task.taskId, + defaults={ + "task_name": task.taskName, + "task_description": task.taskDescription, + "vm": vm, + "organizer": organizer, + "web": task.web, + "max_std_out_chars_on_test_data": task.maxStdOutCharsOnTestData, + "max_std_err_chars_on_test_data": task.maxStdErrCharsOnTestData, + "max_file_list_chars_on_test_data": task.maxFileListCharsOnTestData, + "command_placeholder": task.commandPlaceholder, + "command_description": task.commandDescription, + "dataset_label": task.datasetLabel, + "max_std_out_chars_on_test_data_eval": task.maxStdOutCharsOnTestDataEval, + "max_std_err_chars_on_test_data_eval": task.maxStdErrCharsOnTestDataEval, + "max_file_list_chars_on_test_data_eval": task.maxFileListCharsOnTestDataEval, + }, + ) # allowed_servers for allowed_server in task.allowedServers: - modeldb.AllowedServer.objects.update_or_create( - task=t, - server_address=allowed_server) + modeldb.AllowedServer.objects.update_or_create(task=t, server_address=allowed_server) # datasets for train_dataset in task.trainingDataset: - dataset, _ = modeldb.Dataset.objects.update_or_create(dataset_id=train_dataset, defaults={ - 'default_task': t - }) + dataset, _ = modeldb.Dataset.objects.update_or_create( + dataset_id=train_dataset, defaults={"default_task": t} + ) # dataset.default_task = t # dataset.save() - modeldb.TaskHasDataset.objects.update_or_create(task=t, dataset=dataset, defaults={'is_test': False}) + modeldb.TaskHasDataset.objects.update_or_create(task=t, dataset=dataset, defaults={"is_test": False}) for test_dataset in task.testDataset: - dataset, _ = modeldb.Dataset.objects.update_or_create(dataset_id=test_dataset, defaults={ - 'default_task': t - }) - modeldb.TaskHasDataset.objects.update_or_create(task=t, dataset=dataset, defaults={'is_test': True}) + dataset, _ = modeldb.Dataset.objects.update_or_create(dataset_id=test_dataset, defaults={"default_task": t}) + modeldb.TaskHasDataset.objects.update_or_create(task=t, dataset=dataset, defaults={"is_test": True}) def _parse_dataset_list(datasets_dir_path): - """ Load all the datasets from the Filedatabase. + """Load all the datasets from the Filedatabase. :return: a dict {dataset_id: dataset protobuf object} """ - logger.info('loading datasets') + logger.info("loading datasets") for dataset_file in datasets_dir_path.rglob("*.prototext"): - logger.info('Process dataset: ' + str(dataset_file)) + logger.info("Process dataset: " + str(dataset_file)) dataset = Parse(open(dataset_file, "r").read(), modelpb.Dataset()) evaluator, _ = modeldb.Evaluator.objects.get_or_create(evaluator_id=dataset.evaluatorId) - modeldb.Dataset.objects.update_or_create(dataset_id=dataset.datasetId, defaults={ - 'display_name': dataset.displayName, - 'evaluator': evaluator, - 'is_confidential': dataset.isConfidential, - 'is_deprecated': dataset.isDeprecated, - 'data_server': dataset.dataServer, - 'released': extract_year_from_dataset_id(dataset.datasetId)}) + modeldb.Dataset.objects.update_or_create( + dataset_id=dataset.datasetId, + defaults={ + "display_name": dataset.displayName, + "evaluator": evaluator, + "is_confidential": dataset.isConfidential, + "is_deprecated": dataset.isDeprecated, + "data_server": dataset.dataServer, + "released": extract_year_from_dataset_id(dataset.datasetId), + }, + ) def _parse_software_list(softwares_dir_path): - """ extract the software files. We invent a new id for the lookup since software has none: + """extract the software files. We invent a new id for the lookup since software has none: - $ Afterwards sets self.software: a dict with the new key and a list of software objects as value """ # software = {} - logger.info('loading softwares') + logger.info("loading softwares") for task_dir in softwares_dir_path.glob("*"): for user_dir in task_dir.glob("*"): s = Parse(open(user_dir / "softwares.prototext", "r").read(), modelpb.Softwares()) @@ -178,15 +190,20 @@ def _parse_software_list(softwares_dir_path): vm, _ = modeldb.VirtualMachine.objects.get_or_create(vm_id=user_dir.stem) task, _ = modeldb.Task.objects.get_or_create(task_id=task_dir.stem) dataset, _ = modeldb.Dataset.objects.get_or_create(dataset_id=software.dataset) - modeldb.Software.objects.update_or_create(software_id=software.id, vm=vm, task=task, defaults={ - 'count': software.count, - 'command': software.command, - 'working_directory': software.workingDirectory, - 'dataset': dataset, - 'creation_date': software.creationDate, - 'last_edit_date': software.lastEditDate, - 'deleted': software.deleted - }) + modeldb.Software.objects.update_or_create( + software_id=software.id, + vm=vm, + task=task, + defaults={ + "count": software.count, + "command": software.command, + "working_directory": software.workingDirectory, + "dataset": dataset, + "creation_date": software.creationDate, + "last_edit_date": software.lastEditDate, + "deleted": software.deleted, + }, + ) # software_list = [user_software for user_software in s.softwares if not user_software.deleted] # software[f"{task_dir.stem}${user_dir.stem}"] = software_list @@ -194,24 +211,24 @@ def _parse_software_list(softwares_dir_path): def _parse_runs_evaluations(runs_dir_path): for dataset_dir in tqdm(runs_dir_path.glob("*")): dataset_id = dataset_dir.stem - for vm_dir in tqdm(dataset_dir.glob("*"), desc=f'{dataset_id}'): + for vm_dir in tqdm(dataset_dir.glob("*"), desc=f"{dataset_id}"): vm_id = vm_dir.stem parse_runs_for_vm(runs_dir_path, dataset_id, vm_id) def _parse_run(run_id, task_id, run_proto, vm, dataset): def __get_docker_software(): - if 'docker-software-' not in run_proto.softwareId: + if "docker-software-" not in run_proto.softwareId: return None try: - docker_software_id = str(int(run_proto.softwareId.split('docker-software-')[-1])) + docker_software_id = str(int(run_proto.softwareId.split("docker-software-")[-1])) return modeldb.DockerSoftware.objects.get(docker_software_id=docker_software_id) except modeldb.DockerSoftware.DoesNotExist: logger.exception(f"Run {run_id} lists a docker-software {run_proto.softwareId}, but None exists.") return None def __get_upload(): - if 'upload' not in run_proto.softwareId: + if "upload" not in run_proto.softwareId: return None try: upload, _ = modeldb.Upload.objects.get_or_create(vm=vm, task=modeldb.Task.objects.get(task_id=task_id)) @@ -222,11 +239,11 @@ def __get_upload(): return None def __get_evaluator(): - if 'eval' not in run_proto.softwareId: + if "eval" not in run_proto.softwareId: return None try: return modeldb.Evaluator.objects.get(evaluator_id=run_proto.softwareId) - except modeldb.Evaluator.DoesNotExist as e2: + except modeldb.Evaluator.DoesNotExist: logger.exception(f"Run {run_id} lists an evaluation software {run_proto.softwareId}, but None exists.") return None @@ -245,20 +262,22 @@ def __get_software(): software = __get_software() if not docker_software and not upload and not evaluator else None if not docker_software and not upload and not evaluator and not software: - logger.exception(f"Run {run_id} is dangling:" - f"{run_proto}") - - r, _ = modeldb.Run.objects.update_or_create(run_id=run_proto.runId, defaults={ - 'software': software, - 'docker_software': docker_software, - 'evaluator': evaluator, - 'upload': upload, - 'input_dataset': dataset, - 'task': modeldb.Task.objects.get(task_id=task_id), - 'downloadable': run_proto.downloadable, - 'deleted': run_proto.deleted, - 'access_token': run_proto.accessToken - }) + logger.exception(f"Run {run_id} is dangling:{run_proto}") + + r, _ = modeldb.Run.objects.update_or_create( + run_id=run_proto.runId, + defaults={ + "software": software, + "docker_software": docker_software, + "evaluator": evaluator, + "upload": upload, + "input_dataset": dataset, + "task": modeldb.Task.objects.get(task_id=task_id), + "downloadable": run_proto.downloadable, + "deleted": run_proto.deleted, + "access_token": run_proto.accessToken, + }, + ) return r @@ -269,34 +288,37 @@ def _parse_review(run_dir, run): # AutoReviewer action here if not review_file.exists(): review = auto_reviewer(run_dir, run_dir.stem) - open(run_dir / "run-review.prototext", 'w').write(str(review)) - open(run_dir / "run-review.bin", 'wb').write(review.SerializeToString()) + open(run_dir / "run-review.prototext", "w").write(str(review)) + open(run_dir / "run-review.bin", "wb").write(review.SerializeToString()) else: review = modelpb.RunReview() review.ParseFromString(open(review_file, "rb").read()) - modeldb.Review.objects.update_or_create(run=run, defaults={ - 'reviewer_id': review.reviewerId, - 'review_date': review.reviewDate, - 'no_errors': review.noErrors, - 'missing_output': review.missingOutput, - 'extraneous_output': review.extraneousOutput, - 'invalid_output': review.invalidOutput, - 'has_error_output': review.hasErrorOutput, - 'other_errors': review.otherErrors, - 'comment': review.comment, - 'has_errors': review.hasErrors, - 'has_warnings': review.hasWarnings, - 'has_no_errors': review.hasNoErrors, - 'published': review.published, - 'blinded': review.blinded - }) + modeldb.Review.objects.update_or_create( + run=run, + defaults={ + "reviewer_id": review.reviewerId, + "review_date": review.reviewDate, + "no_errors": review.noErrors, + "missing_output": review.missingOutput, + "extraneous_output": review.extraneousOutput, + "invalid_output": review.invalidOutput, + "has_error_output": review.hasErrorOutput, + "other_errors": review.otherErrors, + "comment": review.comment, + "has_errors": review.hasErrors, + "has_warnings": review.hasWarnings, + "has_no_errors": review.hasNoErrors, + "published": review.published, + "blinded": review.blinded, + }, + ) def _parse_evalutions(run_dir, run): if (run_dir / "output/evaluation.prototext").exists() and not (run_dir / "output/evaluation.bin").exists(): evaluation = Parse(open(run_dir / "output/evaluation.prototext", "r").read(), modelpb.Evaluation()) - open(run_dir / "output" / "evaluation.bin", 'wb').write(evaluation.SerializeToString()) + open(run_dir / "output" / "evaluation.bin", "wb").write(evaluation.SerializeToString()) # parse the runs if (run_dir / "output/evaluation.bin").exists(): @@ -308,7 +330,7 @@ def _parse_evalutions(run_dir, run): def parse_runs_for_vm(runs_dir_path, dataset_id, vm_id, verbose=False): vm_dir = runs_dir_path / dataset_id / vm_id - for run_dir in tqdm(vm_dir.glob('*'), desc=f'{vm_id}'): + for run_dir in tqdm(vm_dir.glob("*"), desc=f"{vm_id}"): try: result = parse_run(runs_dir_path, dataset_id, vm_id, run_dir.stem) if verbose: @@ -319,13 +341,13 @@ def parse_runs_for_vm(runs_dir_path, dataset_id, vm_id, verbose=False): def parse_run(runs_dir_path, dataset_id, vm_id, run_id): run_dir = runs_dir_path / dataset_id / vm_id / run_id - return_message = '' + return_message = "" # Error correction: normalize the proto files that are parsed # Skip this run if there is no run file if (run_dir / "run.prototext").exists(): run_proto = Parse(open(run_dir / "run.prototext", "r").read(), modelpb.Run()) - open(run_dir / "run.bin", 'wb').write(run_proto.SerializeToString()) + open(run_dir / "run.bin", "wb").write(run_proto.SerializeToString()) elif (run_dir / "run.bin").exists(): run_proto = modelpb.Run() run_proto.ParseFromString(open(run_dir / "run.bin", "rb").read()) @@ -340,7 +362,7 @@ def parse_run(runs_dir_path, dataset_id, vm_id, run_id): except modeldb.VirtualMachine.DoesNotExist as e: # If the vm was deleted but runs still exist, we land here. We skip indexing these runs. msg = f"Skip run {run_id}: VM {vm_id} does not exist" - logger.exception(msg, e) + logger.exception(msg, exc_info=e) return msg # Error Correction: Skip runs where Dataset no not exist anymore @@ -348,8 +370,8 @@ def parse_run(runs_dir_path, dataset_id, vm_id, run_id): dataset = modeldb.Dataset.objects.get(dataset_id=run_proto.inputDataset) except modeldb.Dataset.DoesNotExist as e: # If the dataset was deleted, but there are still runs left. - msg = f'Skip run {run_id}: Dataset {run_proto.inputDataset} does not exist {e}' - logger.exception(msg, e) + msg = f"Skip run {run_id}: Dataset {run_proto.inputDataset} does not exist {e}" + logger.exception(msg, exc_info=e) return msg # Error Correction. If run files dont add a task_id (which is optional), we use the default task of the dataset @@ -361,12 +383,11 @@ def parse_run(runs_dir_path, dataset_id, vm_id, run_id): run = None try: run = _parse_run(run_id, task_id, run_proto, vm, dataset) - return_message += f'|Run added: {run}|' - + return_message += f"|Run added: {run}|" + except Exception as e: - msg = f'Skip run {run_id}: Creation of run had an unexpected Error' \ - f'Run: {run_proto}' - logger.exception(msg, e) + msg = f"Skip run {run_id}: Creation of run had an unexpected ErrorRun: {run_proto}" + logger.exception(msg, exc_info=e) return msg # If this run has an input run (i.e. it's an evaluation) we set the reference here. @@ -375,14 +396,12 @@ def parse_run(runs_dir_path, dataset_id, vm_id, run_id): input_run, _ = modeldb.Run.objects.update_or_create(run_id=run_proto.inputRun) run.input_run = input_run run.save() - return_message += f'|Updated input_run of run |' + return_message += "|Updated input_run of run |" # parse the reviews _parse_review(run_dir, run) - return_message += f'|Run updated during parsing of reviews|' + return_message += "|Run updated during parsing of reviews|" _parse_evalutions(run_dir, run) return return_message - - diff --git a/application/src/tira_app/endpoints/admin_api.py b/application/src/tira_app/endpoints/admin_api.py new file mode 100644 index 000000000..4f5820399 --- /dev/null +++ b/application/src/tira_app/endpoints/admin_api.py @@ -0,0 +1,680 @@ +import json +import logging +import os +import tempfile +import traceback +import zipfile +from datetime import datetime as dt +from http import HTTPStatus + +from django.conf import settings +from django.core.serializers.json import DjangoJSONEncoder +from django.http import JsonResponse + +from .. import tira_model as model +from ..authentication import auth +from ..checks import check_conditional_permissions, check_permissions, check_resources_exist +from ..git_runner import check_that_git_integration_is_valid +from ..ir_datasets_loader import run_irds_command + +logger = logging.getLogger("tira") +logger.info("ajax_routes: Logger active") + + +def handle_get_model_exceptions(func): + def decorate(request, *args, **kwargs): + if request.method == "GET": + try: + msg = func(*args, **kwargs) + return JsonResponse({"status": 0, "message": msg}, status=HTTPStatus.OK) + except Exception as e: + logger.exception(f"{func.__name__} failed with {e}", exc_info=e) + return JsonResponse( + {"status": 1, "message": f"{func.__name__} failed with {e}"}, + status=HTTPStatus.INTERNAL_SERVER_ERROR, + ) + + return JsonResponse({"status": 1, "message": f"{request.method} is not allowed."}, status=HTTPStatus.FORBIDDEN) + + return decorate + + +@check_permissions +@handle_get_model_exceptions +def admin_reload_data(): + model.build_model() + return "Model data was reloaded successfully" + + +@check_permissions +@handle_get_model_exceptions +def admin_reload_vms(): + model.reload_vms() + return "VM data was reloaded successfully" + + +@check_permissions +@handle_get_model_exceptions +def admin_reload_datasets(): + model.reload_datasets() + return "Dataset data was reloaded successfully" + + +@check_permissions +@handle_get_model_exceptions +def admin_reload_tasks(): + model.reload_tasks() + return "Task data was reloaded successfully" + + +@check_conditional_permissions(restricted=True) +@handle_get_model_exceptions +def admin_reload_runs(vm_id): + model.reload_runs(vm_id) + return "Runs data was reloaded for {} on {} successfully" + + +@check_permissions +def admin_create_vm(request): # TODO implement + """Hook for create_vm posts. Responds with json objects indicating the state of the create process.""" + + if request.method == "POST": + data = json.loads(request.body) + + return JsonResponse({"status": 0, "message": f"Not implemented yet, received: {data}"}) + + return JsonResponse({"status": 1, "message": "GET is not implemented for vm create"}) + + +@check_permissions +def admin_archive_vm(request): + return JsonResponse({"status": 1, "message": "Not implemented"}, status=HTTPStatus.NOT_IMPLEMENTED) + + +@check_permissions +def admin_modify_vm(request): + if request.method == "POST": + data = json.loads(request.body) + + return JsonResponse({"status": 0, "message": f"Not implemented yet, received: {data}"}) + + return JsonResponse({"status": 1, "message": "GET is not implemented for modify vm"}) + + +@check_permissions +def admin_create_task(request, organizer_id): + """Create an entry in the model for the task. Use data supplied by a model. + Return a json status message.""" + + if request.method == "POST": + data = json.loads(request.body) + + task_id = data["task_id"] + featured = data["featured"] + master_vm_id = data.get("master_vm_id", "princess-knight") # dummy default VM + master_vm_id = master_vm_id if master_vm_id else "princess-knight" # default dummy vm + require_registration = data["require_registration"] + require_groups = data["require_groups"] + restrict_groups = data["restrict_groups"] + + if not model.organizer_exists(organizer_id): + return JsonResponse({"status": 1, "message": f"Organizer with ID {organizer_id} does not exist"}) + if model.task_exists(task_id): + return JsonResponse({"status": 1, "message": f"Task with ID {task_id} already exist"}) + if not model.vm_exists(master_vm_id): + return JsonResponse({"status": 1, "message": f"VM with ID {master_vm_id} does not exist"}) + + new_task = model.create_task( + task_id, + data["name"], + data["description"], + featured, + master_vm_id, + organizer_id, + data["website"], + require_registration, + require_groups, + restrict_groups, + help_command=data["help_command"], + help_text=data["help_text"], + allowed_task_teams=data["task_teams"], + ) + + new_task = json.dumps(new_task, cls=DjangoJSONEncoder) + return JsonResponse({"status": 0, "context": new_task, "message": f"Created Task with Id: {data['task_id']}"}) + + return JsonResponse( + {"status": 1, "message": "GET is not implemented for admin_create_task"}, status=HTTPStatus.NOT_IMPLEMENTED + ) + + +@check_permissions +@check_resources_exist("json") +def admin_edit_task(request, task_id): + """Edit a task. Expects a POST message with all task data.""" + if request.method == "POST": + data = json.loads(request.body) + organizer = data["organizer"] + featured = data["featured"] + master_vm_id = data.get("master_vm_id", "princess-knight") # default dummy vm + master_vm_id = master_vm_id if master_vm_id else "princess-knight" # default dummy vm + require_registration = data["require_registration"] + require_groups = data["require_groups"] + restrict_groups = data["restrict_groups"] + + if not model.organizer_exists(organizer): + return JsonResponse({"status": 1, "message": f"Organizer with ID {organizer} does not exist"}) + if not model.vm_exists(master_vm_id): + return JsonResponse({"status": 1, "message": f"VM with ID {master_vm_id} does not exist"}) + + task = model.edit_task( + task_id, + data["name"], + data["description"], + featured, + master_vm_id, + organizer, + data["website"], + require_registration, + require_groups, + restrict_groups, + help_command=data["help_command"], + help_text=data["help_text"], + allowed_task_teams=data["task_teams"], + is_ir_task=data.get("is_information_retrieval_task", False), + irds_re_ranking_image=data.get("irds_re_ranking_image", ""), + irds_re_ranking_command=data.get("irds_re_ranking_command", ""), + irds_re_ranking_resource=data.get("irds_re_ranking_resource", ""), + ) + + return JsonResponse( + { + "status": 0, + "context": json.dumps(task, cls=DjangoJSONEncoder), + "message": f"Edited Task with Id: {task_id}", + } + ) + + return JsonResponse({"status": 1, "message": "GET is not implemented for edit task"}) + + +@check_permissions +@check_resources_exist("json") +def admin_delete_task(request, task_id): + model.delete_task(task_id) + return JsonResponse({"status": 0, "message": f"Deleted task {task_id}"}) + + +@check_permissions +def admin_add_dataset(request, task_id): + """Create an entry in the model for the task. Use data supplied by a model. + Return a json status message.""" + if request.method == "POST": + data = json.loads(request.body) + + if not all(k in data.keys() for k in ["dataset_id", "name", "task"]): + return JsonResponse({"status": 1, "message": "Error: Task, dataset name, and dataset ID must be set."}) + + dataset_id_prefix = data["dataset_id"] + dataset_name = data["name"] + task_id_from_data = data["task"] + + if task_id_from_data != task_id: + from django.http import HttpResponseNotAllowed + + return HttpResponseNotAllowed("Access forbidden.") + + upload_name = data.get("upload_name", "predictions.jsonl") + command = data.get("evaluator_command", "") + working_directory = data.get("evaluator_working_directory", "") + measures = data.get("evaluation_measures", "") + + is_git_runner = data.get("is_git_runner", False) + git_runner_image = data.get("git_runner_image", "") + git_runner_command = data.get("git_runner_command", "") + git_repository_id = data.get("git_repository_id", "") + + irds_docker_image = data.get("irds_docker_image", None) + irds_docker_image = None if not irds_docker_image else irds_docker_image + irds_import_command = data.get("irds_import_command", None) + irds_import_command = None if not irds_import_command else irds_import_command + irds_import_truth_command = data.get("irds_import_truth_command", None) + irds_import_truth_command = None if not irds_import_truth_command else irds_import_truth_command + + if not data.get("use_existing_repository", True): + git_repository_id = model.get_git_integration(task_id=task_id).create_task_repository(task_id) + + master_vm_id = model.get_task(task_id)["master_vm_id"] + + if not model.task_exists(task_id): + return JsonResponse({"status": 1, "message": f"Task with ID {task_id} does not exist"}) + if data["type"] not in {"test", "training"}: + return JsonResponse({"status": 1, "message": "Dataset type must be 'test' or 'training'"}) + + try: + if data["type"] == "training": + ds, paths = model.add_dataset( + task_id, + dataset_id_prefix, + "training", + dataset_name, + upload_name, + irds_docker_image, + irds_import_command, + irds_import_truth_command, + ) + elif data["type"] == "test": + ds, paths = model.add_dataset( + task_id, + dataset_id_prefix, + "test", + dataset_name, + upload_name, + irds_docker_image, + irds_import_command, + irds_import_truth_command, + ) + + model.add_evaluator( + master_vm_id, + task_id, + ds["dataset_id"], + command, + working_directory, + not measures, + is_git_runner, + git_runner_image, + git_runner_command, + git_repository_id, + ) + path_string = "\n ".join(paths) + return JsonResponse( + { + "status": 0, + "context": ds, + "message": ( + f"Created new dataset with id {ds['dataset_id']}. " + "Store your datasets in the following Paths:\n" + f"{path_string}" + ), + } + ) + except FileExistsError as e: + logger.exception(e) + return JsonResponse({"status": 1, "message": "A Dataset with this id already exists."}) + + return JsonResponse({"status": 1, "message": "GET is not implemented for add dataset"}) + + +@check_permissions +@check_resources_exist("json") +def admin_edit_dataset(request, dataset_id): + """Edit a dataset with the given dataset_id + Send the new data of the dataset via POST. All these keys must be given and will be set: + + - name: New display name of the dataset + - task: The associated task + - master_id: ID of the vm that runs the evaluator for this dataset + - type: 'training' or 'test' + - evaluator_working_directory: working directory of the evaluator on the master vm + - evaluator_command: command to be run on the master vm to evaluate the output of runs on the dataset + - evaluation_measures: (str) the measures output by the evaluator. Sent as a string with: + ` + Display Name of Measure1,key_of_measure_1\n + Display Name of Measure2,key_of_measure_2\n + ... + ` + - is_git_runner + - git_runner_image + - git_runner_command + - git_repository_id + """ + if request.method == "POST": + data = json.loads(request.body) + + dataset_name = data["name"] + task_id = data["task"] + is_confidential = not data["publish"] + + command = data["evaluator_command"] + working_directory = data["evaluator_working_directory"] + measures = "" # here for legacy reasons. TIRA uses the measures provided by the evaluator + + is_git_runner = data["is_git_runner"] + git_runner_image = data["git_runner_image"] + git_runner_command = data["git_runner_command"] + git_repository_id = data["git_repository_id"] + + print(data["use_existing_repository"]) + print(data["git_repository_id"]) + if not data["use_existing_repository"]: + git_repository_id = model.get_git_integration(task_id=task_id).create_task_repository(task_id) + + upload_name = data["upload_name"] + + if not model.task_exists(task_id): + return JsonResponse({"status": 1, "message": f"Task with ID {task_id} does not exist"}) + + ds = model.edit_dataset( + task_id, + dataset_id, + dataset_name, + command, + working_directory, + measures, + upload_name, + is_confidential, + is_git_runner, + git_runner_image, + git_runner_command, + git_repository_id, + ) + + from django.core.cache import cache + + model.git_pipeline_is_enabled_for_task(task_id, cache, force_cache_refresh=True) + + return JsonResponse({"status": 0, "context": ds, "message": f"Updated Dataset {ds['dataset_id']}."}) + + return JsonResponse({"status": 1, "message": "GET is not implemented for add dataset"}) + + +def call_django_command_failsave(cmd, args): + import sys + from io import StringIO + + from django.core.management import call_command + + captured_stdout = StringIO() + captured_stderr = StringIO() + + error = None + + sys.stdout = captured_stdout + sys.stderr = captured_stderr + + try: + call_command(cmd, **args) + except Exception as e: + error = str(e) + error += "\n\n" + traceback.format_exc() + + sys.stdout = sys.__stdout__ + sys.stderr = sys.__stderr__ + + return {"stdout": str(captured_stdout.getvalue()), "stderr": str(captured_stderr.getvalue()), "error": error} + + +@check_permissions +def admin_import_ir_dataset(request, task_id): + """Create multiple datasets for the pased ir-dataset. + Return a json status message.""" + if request.method == "POST": + data = json.loads(request.body) + + if not all(k in data.keys() for k in ["dataset_id", "name", "image"]): + return JsonResponse({"status": 1, "message": "Error: dataset_id, name, and image must be set."}) + + dataset_id_prefix = data["dataset_id"] + dataset_name = data["name"] + + upload_name = data.get("upload_name", "run.txt") + evaluator_command = data.get("evaluator_command", "") + working_directory = data.get("evaluator_working_directory", "") + measures = data.get("evaluation_measures", "") + + is_git_runner = data.get("is_git_runner", True) + git_runner_image = data.get("git_runner_image", data.get("image")) + git_runner_command = data.get("git_runner_command", settings.IR_MEASURES_COMMAND) + git_repository_id = model.get_git_integration(task_id=task_id).create_task_repository(task_id) + irds_import_command = ( + f'/irds_cli.sh --skip_qrels true --ir_datasets_id {data["dataset_id"]} --output_dataset_path $outputDir' + ) + irds_import_truth_command = ( + f'/irds_cli.sh --skip_documents true --ir_datasets_id {data["dataset_id"]} --output_dataset_truth_path' + " $outputDir" + ) + + master_vm_id = None + + try: + if data["type"] == "training": + ds, (dataset_path, dataset_truth_path) = model.add_dataset( + task_id, + dataset_id_prefix, + "training", + dataset_name, + upload_name, + irds_docker_image=git_runner_image, + irds_import_command=irds_import_command, + irds_import_truth_command=irds_import_truth_command, + ) + elif data["type"] == "test": + ds, (dataset_path, dataset_truth_path) = model.add_dataset( + task_id, + dataset_id_prefix, + "test", + dataset_name, + upload_name, + irds_docker_image=git_runner_image, + irds_import_command=irds_import_command, + irds_import_truth_command=irds_import_truth_command, + ) + else: + return JsonResponse( + {"status": 1, "message": "Invalid data type. Expected training or test, got : " + data["type"]} + ) + except FileExistsError as e: + logger.exception(e) + return JsonResponse({"status": 1, "message": "A Dataset with this id already exists. Error: " + str(e)}) + + model.add_evaluator( + master_vm_id, + task_id, + ds["dataset_id"], + evaluator_command, + working_directory, + not measures, + is_git_runner, + git_runner_image, + git_runner_command, + git_repository_id, + ) + + # TODO: what is the up-to-date href for background_jobs? + try: + process_id = run_irds_command( + ds["task"], + ds["dataset_id"], + ds["irds_docker_image"], + ds["irds_import_command"], + dataset_path, + ds["irds_import_truth_command"], + dataset_truth_path, + ) + return JsonResponse( + { + "status": 0, + "context": ds, + "message": "Imported dataset successfull.", + "href": f"/background_jobs/{task_id}/{process_id}", + } + ) + except Exception as e: + return JsonResponse({"status": 1, "context": {}, "message": f"Import of dataset failed with: {e}."}) + + return JsonResponse({"status": 1, "message": "GET is not implemented for add dataset"}) + + +@check_permissions +@check_resources_exist("json") +def admin_delete_dataset(request, dataset_id): + try: + model.delete_dataset(dataset_id) + return JsonResponse({"status": 0, "message": f"Deleted dataset {dataset_id}"}) + except Exception as e: + return JsonResponse({"status": 1, "message": f"Could not delete dataset {dataset_id}: {e}"}) + + +@check_permissions +def admin_add_organizer(request, organizer_id): + if request.method == "POST": + data = json.loads(request.body) + add_default_git_integrations = False + + if data["gitUrlToNamespace"]: + git_integration_is_valid, error_message = check_that_git_integration_is_valid( + data["gitUrlToNamespace"], data["gitPrivateToken"] + ) + + if not git_integration_is_valid: + return JsonResponse({"status": 1, "message": error_message}) + else: + add_default_git_integrations = True + + model.edit_organizer( + organizer_id, data["name"], data["years"], data["web"], data["gitUrlToNamespace"], data["gitPrivateToken"] + ) + + if add_default_git_integrations: + git_integrations = [model.model.get_git_integration(settings.DEFAULT_GIT_INTEGRATION_URL, "")] + model.model.edit_organizer( + organizer_id, data["name"], data["years"], data["web"], git_integrations=git_integrations + ) + + auth.create_organizer_group(organizer_id, auth.get_user_id(request)) + return JsonResponse({"status": 0, "message": f"Added Organizer {organizer_id}"}) + + return JsonResponse({"status": 1, "message": "GET is not implemented for add organizer"}) + + +@check_permissions +@check_resources_exist("json") +def admin_edit_organizer(request, organizer_id): + if request.method == "POST": + data = json.loads(request.body) + + if data["gitUrlToNamespace"]: + git_integration_is_valid, error_message = check_that_git_integration_is_valid( + data["gitUrlToNamespace"], data["gitPrivateToken"] + ) + + if not git_integration_is_valid: + return JsonResponse({"status": 1, "message": error_message}) + + model.edit_organizer( + organizer_id, data["name"], data["years"], data["web"], data["gitUrlToNamespace"], data["gitPrivateToken"] + ) + return JsonResponse({"status": 0, "message": f"Updated Organizer {organizer_id}"}) + + return JsonResponse({"status": 1, "message": "GET is not implemented for edit organizer"}) + + +@check_conditional_permissions(restricted=True) +@check_resources_exist("json") +def admin_create_group(request, vm_id): + """this is a rest endpoint to grant a user permissions on a vm""" + vm = model.get_vm(vm_id) + message = auth.create_group(vm) + return JsonResponse({"status": 0, "message": message}) + + +@check_conditional_permissions(restricted=True) +@check_resources_exist("json") +def admin_edit_review(request, dataset_id, vm_id, run_id): + if request.method == "POST": + data = json.loads(request.body) + no_errors = data.get("no_errors", True) + output_error = data.get("output_error", False) + software_error = data.get("software_error", False) + comment = data["comment"] + + # sanity checks + if no_errors and (output_error or software_error): + JsonResponse({"status": 1, "message": "Error type is not clearly selected."}) + + username = auth.get_user_id(request) + has_errors = output_error or software_error + has_no_errors = not has_errors + + model.update_review( + dataset_id, + vm_id, + run_id, + username, + str(dt.utcnow()), + has_errors, + has_no_errors, + no_errors=no_errors, + invalid_output=output_error, + has_error_output=output_error, + other_errors=software_error, + comment=comment, + ) + return JsonResponse({"status": 0, "message": f"Updated review for run {run_id}"}) + + return JsonResponse({"status": 1, "message": "GET is not implemented for edit organizer"}) + + +@check_permissions +def admin_upload_dataset(request, task_id, dataset_id, dataset_type): + if request.method != "POST": + return JsonResponse({"status": 1, "message": "GET is not allowed here."}) + + if not dataset_id or dataset_id is None or dataset_id == "None": + return JsonResponse({"status": 1, "message": "Please specify the associated dataset."}) + + if dataset_type not in ["input", "truth"]: + return JsonResponse( + {"status": 1, "message": f"Invalid dataset_type. Expected 'input' or 'truth', but got: '{dataset_type}'"} + ) + + dataset_suffix = "" if dataset_type == "input" else "-truth" + + uploaded_file = request.FILES["file"] + + if not uploaded_file.name.endswith(".zip"): + return JsonResponse( + {"status": 1, "message": f"Invalid Upload. I expect a zip file, but got '{uploaded_file.name}'."} + ) + + dataset = model.get_dataset(dataset_id) + + if "dataset_id" not in dataset or dataset_id != dataset["dataset_id"]: + return JsonResponse({"status": 1, "message": "Unknown dataset_id."}) + + if dataset_id.endswith("-test"): + dataset_prefix = "test-" + elif dataset_id.endswith("-training"): + dataset_prefix = "training-" + else: + return JsonResponse({"status": 1, "message": "Unknown dataset_id."}) + + target_directory = model.model.data_path / (dataset_prefix + "datasets" + dataset_suffix) / task_id / dataset_id + + if not os.path.exists(target_directory): + return JsonResponse({"status": 1, "message": "Dataset directory 'target_directory' does not exist."}) + + if len(os.listdir(target_directory)) > 0: + return JsonResponse( + { + "status": 1, + "message": ( + "There is already some dataset uploaded. We prevent to overwrite data. Please create a new dataset" + " (i.e., a new version) if you want to update the dataset. Please reach out to us if creating a" + " new dataset would not solve your problem." + ), + } + ) + + with tempfile.TemporaryDirectory() as tmp_dir: + with open(tmp_dir + "/tmp.zip", "wb+") as fp_destination: + for chunk in uploaded_file.chunks(): + fp_destination.write(chunk) + + with zipfile.ZipFile(tmp_dir + "/tmp.zip", "r") as zip_ref: + zip_ref.extractall(target_directory) + + return JsonResponse( + {"status": 0, "message": f"Uploaded files '{os.listdir(target_directory)}' to '{target_directory}'."} + ) diff --git a/application/src/tira/endpoints/aha b/application/src/tira_app/endpoints/aha similarity index 100% rename from application/src/tira/endpoints/aha rename to application/src/tira_app/endpoints/aha diff --git a/application/src/tira/endpoints/data_api.py b/application/src/tira_app/endpoints/data_api.py similarity index 53% rename from application/src/tira/endpoints/data_api.py rename to application/src/tira_app/endpoints/data_api.py index 3f69a34a2..6ba47371c 100644 --- a/application/src/tira/endpoints/data_api.py +++ b/application/src/tira_app/endpoints/data_api.py @@ -1,45 +1,47 @@ -import logging +import csv +import datetime import json +import logging import textwrap +from copy import deepcopy +from http import HTTPStatus +from io import StringIO +from typing import Any, Union -from django.core.exceptions import BadRequest - -from tira.forms import * -import tira.tira_model as model -from tira.checks import check_permissions, check_resources_exist, check_conditional_permissions -from tira.tira_data import get_run_runtime, get_run_file_list, get_stderr, get_stdout, get_tira_log -from tira.views import add_context, _add_user_vms_to_context -from tira.authentication import auth - -from django.http import JsonResponse, HttpResponse from django.conf import settings from django.core.cache import cache from django.core.serializers.json import DjangoJSONEncoder -from http import HTTPStatus -import datetime -import csv -from io import StringIO -from copy import deepcopy -from tira.util import link_to_discourse_team +from django.http import HttpResponse, JsonResponse from slugify import slugify -include_navigation = True if settings.DEPLOYMENT == "legacy" else False +from .. import tira_model as model +from ..authentication import auth +from ..checks import check_permissions, check_resources_exist +from ..tira_data import get_run_file_list, get_run_runtime, get_stderr, get_stdout, get_tira_log +from ..util import link_to_discourse_team +from ..views import _add_user_vms_to_context, add_context + +include_navigation = False logger = logging.getLogger("tira") logger.info("ajax_routes: Logger active") -@check_resources_exist('json') +@check_resources_exist("json") @add_context def get_dataset_for_task(request, context, task_id): - if request.method == 'GET': + if request.method == "GET": try: datasets = model.get_datasets_by_task(task_id) - context['datasets'] = json.dumps({ds['dataset_id']: ds for ds in datasets}, cls=DjangoJSONEncoder) - context['selected_dataset_id'] = '' - context['test_dataset_ids'] = json.dumps([ds['dataset_id'] for ds in datasets if ds['is_confidential']], cls=DjangoJSONEncoder) - context['training_dataset_ids'] = json.dumps([ds['dataset_id'] for ds in datasets if not ds['is_confidential']], cls=DjangoJSONEncoder) + context["datasets"] = json.dumps({ds["dataset_id"]: ds for ds in datasets}, cls=DjangoJSONEncoder) + context["selected_dataset_id"] = "" + context["test_dataset_ids"] = json.dumps( + [ds["dataset_id"] for ds in datasets if ds["is_confidential"]], cls=DjangoJSONEncoder + ) + context["training_dataset_ids"] = json.dumps( + [ds["dataset_id"] for ds in datasets if not ds["is_confidential"]], cls=DjangoJSONEncoder + ) return JsonResponse({"status": "0", "context": context}) except Exception as e: logger.exception(e) @@ -48,60 +50,62 @@ def get_dataset_for_task(request, context, task_id): def __normalize_run(i, ev_keys, is_admin, user_vms_for_task, task_id, is_ir_task, is_training_dataset=False): i = deepcopy(i) - i['link_to_team'] = link_to_discourse_team(i['vm_id']) + i["link_to_team"] = link_to_discourse_team(i["vm_id"]) eval_run_id = i["run_id"] - for k, v in [('input_run_id', 'run_id')]: + for k, v in [("input_run_id", "run_id")]: i[v] = i[k] del i[k] - if is_admin or i['published'] or is_training_dataset: + if is_admin or i["published"] or is_training_dataset: for j in range(len(ev_keys)): try: - i[ev_keys[j]] = i['measures'][j] - except: + i[ev_keys[j]] = i["measures"][j] + except Exception: i[ev_keys[j]] = None - for j in ['measures']: + for j in ["measures"]: del i[j] - i['selectable'] = False - i['owned_by_user'] = is_admin or i['vm_id'] in user_vms_for_task + i["selectable"] = False + i["owned_by_user"] = is_admin or i["vm_id"] in user_vms_for_task - if not i['blinded'] and (i['owned_by_user'] or i['published'] or is_training_dataset): - i[ - 'link_results_download'] = f'/task/{task_id}/user/{i["vm_id"]}/dataset/{i["dataset_id"]}/download/{eval_run_id}.zip' - i[ - 'link_run_download'] = f'/task/{task_id}/user/{i["vm_id"]}/dataset/{i["dataset_id"]}/download/{i["run_id"]}.zip' + if not i["blinded"] and (i["owned_by_user"] or i["published"] or is_training_dataset): + i["link_results_download"] = ( + f'/task/{task_id}/user/{i["vm_id"]}/dataset/{i["dataset_id"]}/download/{eval_run_id}.zip' + ) + i["link_run_download"] = ( + f'/task/{task_id}/user/{i["vm_id"]}/dataset/{i["dataset_id"]}/download/{i["run_id"]}.zip' + ) if is_ir_task: - i['link_serp'] = f'/serp/{task_id}/user/{i["vm_id"]}/dataset/{i["dataset_id"]}/10/{i["run_id"]}' - i['selectable'] = True + i["link_serp"] = f'/serp/{task_id}/user/{i["vm_id"]}/dataset/{i["dataset_id"]}/10/{i["run_id"]}' + i["selectable"] = True return i def __inject_user_vms_for_task(request, context, task_id): _add_user_vms_to_context(request, context, task_id, include_docker_details=False) - return context['user_vms_for_task'] if 'user_vms_for_task' in context else [] + return context["user_vms_for_task"] if "user_vms_for_task" in context else [] @add_context def get_configuration_of_evaluation(request, context, task_id, dataset_id): dataset = model.get_dataset(dataset_id) - context['dataset'] = { - "display_name": dataset['display_name'], - "evaluator_id": dataset['evaluator_id'], - "dataset_id": dataset['dataset_id'], - "evaluator_git_runner_image": dataset['evaluator_git_runner_image'], - "evaluator_git_runner_command": dataset['evaluator_git_runner_command'], + context["dataset"] = { + "display_name": dataset["display_name"], + "evaluator_id": dataset["evaluator_id"], + "dataset_id": dataset["dataset_id"], + "evaluator_git_runner_image": dataset["evaluator_git_runner_image"], + "evaluator_git_runner_command": dataset["evaluator_git_runner_command"], } - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) @add_context -@check_resources_exist('json') +@check_resources_exist("json") def get_evaluations_by_dataset(request, context, task_id, dataset_id): - """ Return all evaluation results for all submission to a dataset + """Return all evaluation results for all submission to a dataset The frontend calls this to build the leaderboard in the task page when a task is selected from the dropdown @@ -117,9 +121,9 @@ def get_evaluations_by_dataset(request, context, task_id, dataset_id): } """ task = model.get_task(task_id, False) - is_ir_task = 'is_ir_task' in task and task['is_ir_task'] + is_ir_task = "is_ir_task" in task and task["is_ir_task"] is_admin = context["role"] == "admin" - show_only_unreviewed = request.GET.get('show_only_unreviewed', 'false').lower() == 'true' + show_only_unreviewed = request.GET.get("show_only_unreviewed", "false").lower() == "true" print(show_only_unreviewed) ev_keys, evaluations = model.get_evaluations_with_keys_by_dataset(dataset_id, is_admin, show_only_unreviewed) user_vms_for_task = __inject_user_vms_for_task(request, context, task_id) @@ -127,15 +131,18 @@ def get_evaluations_by_dataset(request, context, task_id, dataset_id): context["task_id"] = task_id context["dataset_id"] = dataset_id context["ev_keys"] = ev_keys - context["evaluations"] = sorted(evaluations, key=lambda r: r['run_id']) - headers = [{'title': 'Team', 'key': 'vm_id'}, {'title': 'Approach', 'key': 'input_software_name'}, - {'title': 'Run', 'key': 'run_id'}] - evaluation_headers = [{'title': k, 'key': k} for k in ev_keys] - - context["table_headers"] = headers + evaluation_headers + [{'title': '', 'key': 'actions', 'sortable': False}] + context["evaluations"] = sorted(evaluations, key=lambda r: r["run_id"]) + headers = [ + {"title": "Team", "key": "vm_id"}, + {"title": "Approach", "key": "input_software_name"}, + {"title": "Run", "key": "run_id"}, + ] + evaluation_headers = [{"title": k, "key": k} for k in ev_keys] + + context["table_headers"] = headers + evaluation_headers + [{"title": "", "key": "actions", "sortable": False}] context["table_headers_small_layout"] = [headers[1]] + evaluation_headers[:1] - context["table_sort_by"] = [{'key': ev_keys[0], 'order': 'desc'}] if ev_keys else [] + context["table_sort_by"] = [{"key": ev_keys[0], "order": "desc"}] if ev_keys else [] runs = [] for i in evaluations: @@ -143,54 +150,59 @@ def get_evaluations_by_dataset(request, context, task_id, dataset_id): context["runs"] = runs - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) @add_context @check_permissions def get_evaluations_by_vm(request, context, task_id, vm_id): task = model.get_task(task_id, False) - is_ir_task = 'is_ir_task' in task and task['is_ir_task'] + is_ir_task = "is_ir_task" in task and task["is_ir_task"] is_admin = context["role"] == "admin" user_vms_for_task = __inject_user_vms_for_task(request, context, task_id) - docker_software_id = request.GET.get('docker_software_id', '') - upload_id = request.GET.get('upload_id', '') + docker_software_id = request.GET.get("docker_software_id", "") + upload_id = request.GET.get("upload_id", "") if not docker_software_id and not upload_id or (docker_software_id and upload_id): - return JsonResponse({'status': 1, "message": 'Please pass either a docker_software_id or a upload_id. Got: ' + - f'upload_id = "{upload_id}" docker_software_id = "{docker_software_id}".'}) + return JsonResponse( + { + "status": 1, + "message": "Please pass either a docker_software_id or a upload_id. Got: " + + f'upload_id = "{upload_id}" docker_software_id = "{docker_software_id}".', + } + ) ev_keys, evaluations = model.get_runs_for_vm(vm_id, docker_software_id, upload_id) context["task_id"] = task_id context["ev_keys"] = ev_keys - headers = [{'title': 'Dataset', 'key': 'dataset_id'}, {'title': 'Run', 'key': 'run_id'}] + headers = [{"title": "Dataset", "key": "dataset_id"}, {"title": "Run", "key": "run_id"}] - context["table_sort_by"] = [{'key': 'run_id', 'order': 'desc'}] + context["table_sort_by"] = [{"key": "run_id", "order": "desc"}] runs = [] covered_evaluation_headers = set() for i in evaluations: - if 'dataset_id' not in i or not i['dataset_id']: + if "dataset_id" not in i or not i["dataset_id"]: continue - dataset_id = i['dataset_id'] - is_training_dataset = dataset_id.endswith('-training') + dataset_id = i["dataset_id"] + is_training_dataset = dataset_id.endswith("-training") i = __normalize_run(i, ev_keys, is_admin, user_vms_for_task, task_id, is_ir_task, is_training_dataset) - i['dataset_id'] = dataset_id + i["dataset_id"] = dataset_id for k in ev_keys: if k in i: covered_evaluation_headers.add(k) runs += [i] context["runs"] = runs - evaluation_headers = [{'title': k, 'key': k} for k in ev_keys if k in covered_evaluation_headers] + evaluation_headers = [{"title": k, "key": k} for k in ev_keys if k in covered_evaluation_headers] - context["table_headers"] = headers + evaluation_headers + [{'title': '', 'key': 'actions', 'sortable': False}] + context["table_headers"] = headers + evaluation_headers + [{"title": "", "key": "actions", "sortable": False}] context["table_headers_small_layout"] = headers - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) @add_context @@ -198,19 +210,19 @@ def get_evaluations_by_vm(request, context, task_id, vm_id): def get_evaluation(request, context, run_id, vm_id): run = model.get_run(None, None, run_id) review = model.get_run_review(None, None, run_id) - - if not run['is_evaluation'] or not vm_id: + + if not run["is_evaluation"] or not vm_id: # We need the vm_id to get the check working, otherwise we have no direct link to the vm. - return JsonResponse({'status': 1, "message": f"Run {run_id} is not an evaluation run."}) + return JsonResponse({"status": 1, "message": f"Run {run_id} is not an evaluation run."}) - dataset = model.get_dataset(run['dataset']) + dataset = model.get_dataset(run["dataset"]) - if context['role'] != 'admin' and review["blinded"] and dataset['is_confidential']: - return JsonResponse({'status': 1, "message": f"Run {run_id} is not unblinded."}) + if context["role"] != "admin" and review["blinded"] and dataset["is_confidential"]: + return JsonResponse({"status": 1, "message": f"Run {run_id} is not unblinded."}) context["evaluation"] = model.get_evaluation(run_id) - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) @check_resources_exist("json") @@ -222,15 +234,15 @@ def get_submissions_by_dataset(request, context, task_id, dataset_id): context["dataset_id"] = dataset_id context["vms"] = vms - return JsonResponse({'status': 1, "context": context}) + return JsonResponse({"status": 1, "context": context}) @check_permissions @check_resources_exist("json") @add_context def get_evaluations_of_run(request, context, vm_id, run_id): - context['evaluations'] = model.get_evaluations_of_run(vm_id, run_id) - return JsonResponse({'status': 0, "context": context}) + context["evaluations"] = model.get_evaluations_of_run(vm_id, run_id) + return JsonResponse({"status": 0, "context": context}) @check_permissions @@ -238,17 +250,17 @@ def get_evaluations_of_run(request, context, vm_id, run_id): @add_context def get_ova_list(request, context): context["ova_list"] = model.get_ova_list() - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) @add_context def runs(request, context, task_id, dataset_id, vm_id, software_id): runs = model.runs(task_id, dataset_id, vm_id, software_id) - context["runs"] = list(set([i['run_id'] for i in runs])) + context["runs"] = list(set([i["run_id"] for i in runs])) if len(runs) > 0: context["job_id"] = runs[0] - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) @check_permissions @@ -256,7 +268,7 @@ def runs(request, context, task_id, dataset_id, vm_id, software_id): @add_context def get_host_list(request, context): context["host_list"] = model.get_host_list() - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) @check_permissions @@ -264,20 +276,21 @@ def get_host_list(request, context): @add_context def get_organizer_list(request, context): organizer_list = model.get_organizer_list() - is_admin = context and 'role' in context and context['role'] == 'admin' + is_admin = context and "role" in context and context["role"] == "admin" orga_groups_of_user = set(auth.get_organizer_ids(request)) - - context["organizer_list"] = [i for i in organizer_list if is_admin or ('organizer_id' in i and i['organizer_id'] in orga_groups_of_user)] - - - return JsonResponse({'status': 0, "context": context}) + + context["organizer_list"] = [ + i for i in organizer_list if is_admin or ("organizer_id" in i and i["organizer_id"] in orga_groups_of_user) + ] + + return JsonResponse({"status": 0, "context": context}) @check_resources_exist("json") @add_context def get_task_list(request, context): context["task_list"] = model.get_tasks() - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) @check_resources_exist("json") @@ -285,7 +298,7 @@ def get_task_list(request, context): def get_registration_formular(request, context, task_id): context["remaining_team_names"] = model.remaining_team_names(task_id) - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) @check_resources_exist("json") @@ -296,13 +309,13 @@ def get_task(request, context, task_id): # TODO: remove this when vuetify frontend is active context["remaining_team_names"] = [] context["datasets"] = model.get_datasets_by_task(task_id, return_only_names=True) - context["datasets"] = sorted(context["datasets"], key=lambda i: i['display_name']) + context["datasets"] = sorted(context["datasets"], key=lambda i: i["display_name"]) for d in context["datasets"]: - if not d['display_name']: - d['display_name'] = d['dataset_id'] + if not d["display_name"]: + d["display_name"] = d["dataset_id"] _add_user_vms_to_context(request, context, task_id, include_docker_details=False) - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) @check_resources_exist("json") @@ -311,19 +324,21 @@ def get_dataset(request, context, dataset_id): context["dataset"] = model.get_dataset(dataset_id) context["evaluator"] = model.get_evaluator(dataset_id) - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) @check_resources_exist("json") @add_context def get_organizer(request, context, organizer_id): org = model.get_organizer(organizer_id) - return JsonResponse({'status': 0, "context": org}) + return JsonResponse({"status": 0, "context": org}) @add_context def get_role(request, context): - return JsonResponse({'status': 0, 'role': context['role'], 'organizer_teams': auth.get_organizer_ids(request), 'context': context}) + return JsonResponse( + {"status": 0, "role": context["role"], "organizer_teams": auth.get_organizer_ids(request), "context": context} + ) @check_resources_exist("json") @@ -332,7 +347,8 @@ def update_docker_images(request, context, task_id, user_id): docker = model.load_docker_data(task_id, user_id, cache, force_cache_refresh=True) context["docker"] = docker - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) + @check_resources_exist("json") @add_context @@ -343,48 +359,58 @@ def get_user(request, context, task_id, user_id): context["user_id"] = user_id context["vm"] = vm context["docker"] = docker - + # is_default indicates whether the user has a docker-only team, i.e., no virtual machine. # This is the case if the user-vm ends with default or if no host or admin name is configured. - context["is_default"] = user_id.endswith("default") or not vm['host'] or not vm['admin_name'] + context["is_default"] = user_id.endswith("default") or not vm["host"] or not vm["admin_name"] _add_user_vms_to_context(request, context, task_id) - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) @check_resources_exist("json") @add_context def get_running_software(request, context, task_id, user_id, force_cache_refresh): - context['running_software'] = [] - + context["running_software"] = [] + evaluators_for_task = model.get_evaluators_for_task(task_id, cache) - repositories = set([i['git_repository_id'] for i in evaluators_for_task if i['is_git_runner'] and i['git_repository_id']]) + repositories = set( + [i["git_repository_id"] for i in evaluators_for_task if i["is_git_runner"] and i["git_repository_id"]] + ) git_runner = model.get_git_integration(task_id=task_id) for git_repository_id in sorted(list(repositories)): - context['running_software'] += list(git_runner.yield_all_running_pipelines(int(git_repository_id), user_id, cache, force_cache_refresh=eval(force_cache_refresh))) - context['running_software_last_refresh'] = model.load_refresh_timestamp_for_cache_key(cache, 'all-running-pipelines-repo-' + str(git_repository_id)) - context['running_software_next_refresh'] = str(context['running_software_last_refresh'] + datetime.timedelta(seconds=15)) - context['running_software_last_refresh'] = str(context['running_software_last_refresh']) - - for software in context['running_software']: - if 'pipeline' in software: - del software['pipeline'] - - return JsonResponse({'status': 0, "context": context}) + context["running_software"] += list( + git_runner.yield_all_running_pipelines( + int(git_repository_id), user_id, cache, force_cache_refresh=eval(force_cache_refresh) + ) + ) + context["running_software_last_refresh"] = model.load_refresh_timestamp_for_cache_key( + cache, "all-running-pipelines-repo-" + str(git_repository_id) + ) + context["running_software_next_refresh"] = str( + context["running_software_last_refresh"] + datetime.timedelta(seconds=15) + ) + context["running_software_last_refresh"] = str(context["running_software_last_refresh"]) + + for software in context["running_software"]: + if "pipeline" in software: + del software["pipeline"] + + return JsonResponse({"status": 0, "context": context}) @add_context def public_submissions(request, context, task_id): - context['public_submissions'] = model.model.get_public_docker_softwares(task_id) + context["public_submissions"] = model.model.get_public_docker_softwares(task_id) - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) def public_submission_or_none(task_id, user_id, display_name): for i in model.model.get_public_docker_softwares(task_id, return_only_names=False, return_details=True): - if i['display_name'] == display_name and i['vm_id'] == user_id: + if i["display_name"] == display_name and i["vm_id"] == user_id: return i return None @@ -393,10 +419,10 @@ def public_submission_or_none(task_id, user_id, display_name): def public_submission(request, context, task_id, user_id, display_name): ret = public_submission_or_none(task_id, user_id, display_name) if ret: - context['submission'] = ret - return JsonResponse({'status': 0, "context": context}) + context["submission"] = ret + return JsonResponse({"status": 0, "context": context}) - return JsonResponse({'status': 1, "messge": f"Software '{task_id}/{user_id}/{display_name}' does not exist."}) + return JsonResponse({"status": 1, "messge": f"Software '{task_id}/{user_id}/{display_name}' does not exist."}) @check_permissions @@ -408,16 +434,18 @@ def get_review(request, context, dataset_id, vm_id, run_id): context["review"] = model.get_run_review(dataset_id, vm_id, run_id) context["runtime"] = get_run_runtime(dataset_id, vm_id, run_id) context["files"] = get_run_file_list(dataset_id, vm_id, run_id) - if context['role'] == 'admin': + if context["role"] == "admin": context["files"]["file_list"][0] = "output/" context["stdout"] = get_stdout(dataset_id, vm_id, run_id) context["stderr"] = get_stderr(dataset_id, vm_id, run_id) context["tira_log"] = get_tira_log(dataset_id, vm_id, run_id) - elif (context['role'] == auth.ROLE_PARTICIPANT) and ((not context['dataset'].get('is_confidential', True)) or not context["review"]['blinded']): + elif (context["role"] == auth.ROLE_PARTICIPANT) and ( + (not context["dataset"].get("is_confidential", True)) or not context["review"]["blinded"] + ): context["files"]["file_list"][0] = "output/" context["stdout"] = get_stdout(dataset_id, vm_id, run_id) context["stderr"] = get_stderr(dataset_id, vm_id, run_id) - context["review"]['blinded'] = False + context["review"]["blinded"] = False context["tira_log"] = "hidden" else: context["files"]["file_list"] = [] @@ -425,59 +453,65 @@ def get_review(request, context, dataset_id, vm_id, run_id): context["stderr"] = "hidden" context["tira_log"] = "hidden" - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) @add_context def add_registration(request, context, task_id, vm_id): - """ get the registration of a user on a task. If there is none """ + """get the registration of a user on a task. If there is none""" try: data = json.loads(request.body) - data['group'] = slugify(data['group']) - data['initial_owner'] = context['user_id'] - data['task_id'] = task_id + data["group"] = slugify(data["group"]) + data["initial_owner"] = context["user_id"] + data["task_id"] = task_id model.add_registration(data) - auth.create_docker_group(data['group'], data['initial_owner']) + auth.create_docker_group(data["group"], data["initial_owner"]) auth.notify_organizers_of_new_participants(data, task_id) - context['user_is_registered'] = True - context['vm_id'] = data['group'] - context['user_vms_for_task'] = [data['group']] + context["user_is_registered"] = True + context["vm_id"] = data["group"] + context["user_vms_for_task"] = [data["group"]] - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) except Exception as e: logger.warning(e) logger.exception(e) - return JsonResponse({'status': 0, "message": f"Encountered an exception: {e}"}, status=HTTPStatus.INTERNAL_SERVER_ERROR) + return JsonResponse( + {"status": 0, "message": f"Encountered an exception: {e}"}, status=HTTPStatus.INTERNAL_SERVER_ERROR + ) def expand_links(component): - links = [*component.get('links', [])] - ir_datasets_id = component.get('ir_datasets_id', None) + links = [*component.get("links", [])] + ir_datasets_id = component.get("ir_datasets_id", None) if ir_datasets_id: - if '/' in ir_datasets_id: - base = ir_datasets_id.split('/')[0] - fragment = f'#{ir_datasets_id}' + if "/" in ir_datasets_id: + base = ir_datasets_id.split("/")[0] + fragment = f"#{ir_datasets_id}" else: base = ir_datasets_id - fragment = '' + fragment = "" - links.append({ - 'display_name': 'ir_datasets', - 'href': f'https://ir-datasets.com/{base}.html{fragment}', - 'target': '_blank', - }) + links.append( + { + "display_name": "ir_datasets", + "href": f"https://ir-datasets.com/{base}.html{fragment}", + "target": "_blank", + } + ) - tirex_submission_id = component.get('tirex_submission_id', None) + tirex_submission_id = component.get("tirex_submission_id", None) if tirex_submission_id: - links.append({ - 'display_name': 'Submission in TIREx', - 'href': f'/submissions/{tirex_submission_id}', - }) + links.append( + { + "display_name": "Submission in TIREx", + "href": f"/submissions/{tirex_submission_id}", + } + ) if links: - component['links'] = links + component["links"] = links return component @@ -485,13 +519,13 @@ def expand_links(component): def flatten_components(components): flattened_components = [] for identifier, data in components.items(): - component = {'identifier': identifier, **data} + component = {"identifier": identifier, **data} - if 'components' in component: - component['components'] = flatten_components(data['components']) + if "components" in component: + component["components"] = flatten_components(data["components"]) - if 'tirex_submission_id' in data: - component['tirex_submission_id'] = data['tirex_submission_id'] + if "tirex_submission_id" in data: + component["tirex_submission_id"] = data["tirex_submission_id"] flattened_components.append(expand_links(component)) @@ -500,23 +534,24 @@ def flatten_components(components): @add_context def tirex_components(request, context): - context['tirex_components'] = flatten_components(settings.TIREX_COMPONENTS) - return JsonResponse({'status': 0, 'context': context}) + context["tirex_components"] = flatten_components(settings.TIREX_COMPONENTS) + return JsonResponse({"status": 0, "context": context}) -def flatten_tirex_components_to_id(obj, t=None): + +def flatten_tirex_components_to_id(obj: Union[dict[str, Any], Any], t=None): ret = {} - if type(obj) != dict: + if not isinstance(obj, dict): return ret - if 'tirex_submission_id' in obj: - assert obj['tirex_submission_id'] not in ret - obj['type'] = t - ret[obj['tirex_submission_id']] = obj + if "tirex_submission_id" in obj: + assert obj["tirex_submission_id"] not in ret + obj["type"] = t + ret[obj["tirex_submission_id"]] = obj for k, v in obj.items(): for i, j in flatten_tirex_components_to_id(v, t if t else k).items(): - ret[i] = j + ret[i] = j return ret @@ -525,31 +560,33 @@ def flatten_tirex_components_to_id(obj, t=None): def get_snippet_to_run_components(request): - component_key = request.GET.get('component') + component_key = request.GET.get("component") if component_key not in TIREX_ID_TO_COMPONENT: - return JsonResponse({'status': 1, 'message': f'Component "{component_key}" not found.'}) + return JsonResponse({"status": 1, "message": f'Component "{component_key}" not found.'}) component = TIREX_ID_TO_COMPONENT[component_key] - component_type = component['type'] - dataset_initialization = textwrap.dedent(''' + component_type = component["type"] + dataset_initialization = textwrap.dedent( + """ # You can replace Robust04 with other datasets dataset = pt.get_dataset("irds:disks45/nocr/trec-robust-2004") - ''').strip() - snippet = '' + """ + ).strip() + snippet = "" - if component_type == 'dataset': - dataset_initialization = '' - ir_datasets_id = component.get('ir_datasets_id') + if component_type == "dataset": + dataset_initialization = "" + ir_datasets_id = component.get("ir_datasets_id") if ir_datasets_id: - snippet = f''' + snippet = f""" dataset = pt.get_dataset('irds:{ir_datasets_id}') indexer = pt.IterDictIndexer('./index') indexref = indexer.index(dataset.get_corpus_iter()) - ''' + """ else: - snippet = f''' + snippet = f""" def get_corpus_iter(): # Iterate over the {component['display_name']} corpus corpus = ... @@ -558,54 +595,54 @@ def get_corpus_iter(): indexer = pt.IterDictIndexer('./index') indexref = indexer.index(get_corpus_iter()) - ''' - elif component_type == 'document_processing': - tirex_submission_id = component.get('tirex_submission_id') + """ + elif component_type == "document_processing": + tirex_submission_id = component.get("tirex_submission_id") if tirex_submission_id: - snippet = f''' + snippet = f""" transformed_docs = tira.pt.transform_documents('{tirex_submission_id}', dataset) - ''' - elif component_type == 'query_processing': - tirex_submission_id = component.get('tirex_submission_id') + """ + elif component_type == "query_processing": + tirex_submission_id = component.get("tirex_submission_id") if tirex_submission_id: - snippet = f''' + snippet = f""" topics = dataset.get_topics(variant='title') transformed_queries = tira.pt.transform_queries('{tirex_submission_id}', topics) - ''' - elif component_type in ('retrieval', 'reranking'): - tirex_submission_id = component.get('tirex_submission_id') + """ + elif component_type in ("retrieval", "reranking"): + tirex_submission_id = component.get("tirex_submission_id") if tirex_submission_id: - snippet = f''' + snippet = f""" run = tira.pt.from_retriever_submission('{tirex_submission_id}', dataset=dataset_id) - ''' - elif component_type == 'dataset': + """ + elif component_type == "dataset": pass else: - JsonResponse({'status': 1, 'message': f'Component type "{component_type}" does not exist...'}) + JsonResponse({"status": 1, "message": f'Component type "{component_type}" does not exist...'}) if snippet: snippet = textwrap.dedent(snippet).strip() if dataset_initialization: - snippet = dataset_initialization + '\n' + snippet + snippet = dataset_initialization + "\n" + snippet - return JsonResponse({'status': 0, 'context': {'snippet': snippet}}) + return JsonResponse({"status": 0, "context": {"snippet": snippet}}) @add_context def reranking_datasets(request, context, task_id): - context['re_ranking_datasets'] = model.get_all_reranking_datasets_for_task(task_id) - return JsonResponse({'status': 0, 'context': context}) + context["re_ranking_datasets"] = model.get_all_reranking_datasets_for_task(task_id) + return JsonResponse({"status": 0, "context": context}) @add_context @check_permissions def submissions_of_user(request, context, vm_id): try: - context['submissions_of_user'] = model.submissions_of_user(vm_id) - return JsonResponse({'status': 0, 'context': context}) - except: - return JsonResponse({'status': 1}) + context["submissions_of_user"] = model.submissions_of_user(vm_id) + return JsonResponse({"status": 0, "context": context}) + except Exception: + return JsonResponse({"status": 1}) @add_context @@ -613,29 +650,31 @@ def submissions_of_user(request, context, vm_id): def import_submission(request, context, task_id, vm_id, submission_type, s_id): try: model.import_submission(task_id, vm_id, submission_type, s_id) - return JsonResponse({'status': 0, 'context': context}) - except: - return JsonResponse({'status': 1}) + return JsonResponse({"status": 0, "context": context}) + except Exception: + return JsonResponse({"status": 1}) @add_context @check_permissions -@check_resources_exist('json') +@check_resources_exist("json") def submissions_for_task(request, context, task_id, user_id, submission_type): context["datasets"] = model.get_datasets_by_task(task_id, return_only_names=True) cloned_submissions = model.cloned_submissions_of_user(user_id, task_id) if submission_type == "upload": context["all_uploadgroups"] = model.get_uploads(task_id, user_id) - context["all_uploadgroups"] += [i for i in cloned_submissions if i['type'] == 'upload'] + context["all_uploadgroups"] += [i for i in cloned_submissions if i["type"] == "upload"] elif submission_type == "docker": context["docker"] = {"docker_softwares": model.get_docker_softwares(task_id, user_id)} - context["docker"]['docker_softwares'] += [i for i in cloned_submissions if i['type'] == 'docker'] + context["docker"]["docker_softwares"] += [i for i in cloned_submissions if i["type"] == "docker"] context["resources"] = settings.GIT_CI_AVAILABLE_RESOURCES elif submission_type == "vm": - context["message"] = "This option is not active for this shared task. " \ - "Please contact the organizers to enable submissions via virtual machines." + context["message"] = ( + "This option is not active for this shared task. " + "Please contact the organizers to enable submissions via virtual machines." + ) - return JsonResponse({'status': 0, "context": context}) + return JsonResponse({"status": 0, "context": context}) @check_permissions @@ -644,10 +683,23 @@ def submissions_for_task(request, context, task_id, user_id, submission_type): def export_registrations(request, context, task_id): ret = StringIO() - fieldnames = ['team_name', 'initial_owner', 'team_members', 'registered_on_task', 'name', 'email', 'affiliation', - 'country', 'employment', 'participates_for', 'instructor_name', 'instructor_email', 'questions', - 'created', 'last_modified' - ] + fieldnames = [ + "team_name", + "initial_owner", + "team_members", + "registered_on_task", + "name", + "email", + "affiliation", + "country", + "employment", + "participates_for", + "instructor_name", + "instructor_email", + "questions", + "created", + "last_modified", + ] writer = csv.DictWriter(ret, fieldnames=fieldnames) diff --git a/application/src/tira_app/endpoints/diffir_api.py b/application/src/tira_app/endpoints/diffir_api.py new file mode 100644 index 000000000..164af8a45 --- /dev/null +++ b/application/src/tira_app/endpoints/diffir_api.py @@ -0,0 +1,114 @@ +import json +import logging +from os.path import abspath +from pathlib import Path + +from django.conf import settings +from django.http import HttpResponse, JsonResponse + +from .. import tira_model as model +from ..checks import check_permissions +from ..views import add_context + +logger = logging.getLogger("tira") + + +def doc_file_for_run(vm_id, dataset_id, task_id, run_id): + checked_paths = [] + for evaluation in model.get_evaluations_of_run(vm_id, run_id): + for f in [ + ".data-top-10-for-rendering.jsonl.gz", + ".data-top-10-for-rendering.jsonl", + ".data-top-10-for-rendering.json.gz", + ".data-top-10-for-rendering.json", + ]: + p = Path(settings.TIRA_ROOT) / "data" / "runs" / dataset_id / vm_id / evaluation / "output" / f + checked_paths += [str(p)] + if p.is_file(): + return p + raise ValueError(f"Could not find .data-top-10-for-rendering.jsonl. Searched in {checked_paths}.") + + +def load_irds_metadata_of_task(task, dataset): + dataset_type = "training-datasets" if dataset.endswith("-training") else "test-datasets" + + metadata_file = Path(settings.TIRA_ROOT) / "data" / "datasets" / dataset_type / task / dataset / "metadata.json" + + if not metadata_file.is_file(): + raise ValueError(f"Configuration error: The expected file {metadata_file} does not exist.") + + return json.load(open(metadata_file, "r")) + + +def __normalize_ids(a, b): + return "---".join(sorted([a, b])) + + +@add_context +@check_permissions +def diffir(request, context, task_id, run_id_1, run_id_2, topk): + if request.method == "GET": + try: + run_1 = model.get_run(dataset_id=None, vm_id=None, run_id=run_id_1) + run_2 = model.get_run(dataset_id=None, vm_id=None, run_id=run_id_2) + + if run_1["dataset"] != run_2["dataset"]: + raise ValueError( + f'Run {run_id_1} has dataset {run_1["dataset"]} while run {run_id_2} has dataset ' + + f'{run_2["dataset"]}. Expected both to be identical' + ) + + diffir_file = ( + Path(settings.TIRA_ROOT) + / "state" + / "serp" + / "version-0.0.1" + / "runs" + / run_1["dataset"] + / __normalize_ids(run_1["vm"], run_2["vm"]) + / __normalize_ids(run_id_1, run_id_2) + / "diffir.html" + ) + diffir_dir = (diffir_file / "..").resolve() + + if diffir_file.is_file(): + return HttpResponse(open(diffir_file).read()) + + run_dir = Path(settings.TIRA_ROOT) / "data" / "runs" / run_1["dataset"] + run_1_file = run_dir / run_1["vm"] / run_id_1 / "output" / "run.txt" + run_2_file = run_dir / run_2["vm"] / run_id_2 / "output" / "run.txt" + + if not run_1_file.is_file(): + raise ValueError(f"Error: The expected file {run_1_file} does not exist.") + + if not run_2_file.is_file(): + raise ValueError(f"Error: The expected file {run_2_file} does not exist.") + + doc_files = [ + doc_file_for_run(run_1["vm"], run_1["dataset"], task_id, run_id_1), + doc_file_for_run(run_2["vm"], run_1["dataset"], task_id, run_id_2), + ] + + for doc_file in doc_files: + if not doc_file or not doc_file.is_file(): + raise ValueError(f"Error: expected two evaluations, but got only one in {doc_files}") + + from diffir.run import diff_from_local_data + + _, ret = diff_from_local_data( + [abspath(run_1_file), abspath(run_2_file)], + [str(i) for i in doc_files], + cli=False, + web=True, + print_html=False, + topk=topk, + ) + + diffir_dir.mkdir(parents=True, exist_ok=True) + with open(diffir_file, "w") as f: + f.write(ret) + + return HttpResponse(ret) + except Exception as e: + logger.exception(e) + return JsonResponse({"status": "0", "message": f"Encountered an exception: {e}"}) diff --git a/application/src/tira_app/endpoints/misc.py b/application/src/tira_app/endpoints/misc.py new file mode 100644 index 000000000..dbd32dc64 --- /dev/null +++ b/application/src/tira_app/endpoints/misc.py @@ -0,0 +1,41 @@ +""" +This file contains miscellaneous and **unversioned** endpoints (e.g., the /health or /info). +""" + +from django.urls import path +from rest_framework import status +from rest_framework.decorators import api_view +from rest_framework.request import Request +from rest_framework.response import Response +from tira import __version__ as tira_version + +rest_api_version = "v1.0.0-draft" + + +@api_view(["GET"]) +def health_endpoint(request: Request) -> Response: + """ + The /health endpoint returns 2xx on success (currently 204 because we don't respond with any content). It can be + used to check if the REST-API is served. + """ + return Response(status=status.HTTP_204_NO_CONTENT) + + +@api_view(["GET"]) +def info_endpoint(request: Request) -> Response: + """ + The /info endpoint contains general information about the running server (e.g., the version of TIRA that is + running). Do not add any sensitive information to this endpoint as it is **public**! + """ + return Response( + { + "version": tira_version, + "restApiVersion": rest_api_version, + } + ) + + +endpoints = [ + path("health", health_endpoint), + path("info", info_endpoint), +] diff --git a/application/src/tira/endpoints/organizer_api.py b/application/src/tira_app/endpoints/organizer_api.py similarity index 59% rename from application/src/tira/endpoints/organizer_api.py rename to application/src/tira_app/endpoints/organizer_api.py index 700dfea1c..7b3bc07d5 100644 --- a/application/src/tira/endpoints/organizer_api.py +++ b/application/src/tira_app/endpoints/organizer_api.py @@ -1,13 +1,11 @@ import logging -from tira.checks import check_permissions, check_resources_exist, check_conditional_permissions -from tira.forms import * from django.http import JsonResponse -from django.conf import settings -import tira.tira_model as model +from .. import tira_model as model +from ..checks import check_conditional_permissions, check_permissions, check_resources_exist -include_navigation = True if settings.DEPLOYMENT == "legacy" else False +include_navigation = False logger = logging.getLogger("tira") logger.info("ajax_routes: Logger active") @@ -18,41 +16,41 @@ @check_permissions -@check_resources_exist('json') +@check_resources_exist("json") def publish(request, vm_id, dataset_id, run_id, value): - value = (True if value == 'true' else False) - if request.method == 'GET': + value = True if value == "true" else False + if request.method == "GET": status = model.update_review(dataset_id, vm_id, run_id, published=value) if status: - context = {"status": "0", "published": value, "message": f"Published is now: {value}"} + context = {"status": "0", "published": value, "message": f"Published is now: {value}"} else: - context = {"status": "1", "published": (not value), "message": f"Published is now: {value}"} + context = {"status": "1", "published": (not value), "message": f"Published is now: {value}"} return JsonResponse(context) @check_conditional_permissions(restricted=True) -@check_resources_exist('json') +@check_resources_exist("json") def blind(request, vm_id, dataset_id, run_id, value): - value = (False if value == 'false' else True) + value = False if value == "false" else True - if request.method == 'GET': + if request.method == "GET": status = model.update_review(dataset_id, vm_id, run_id, blinded=value) if status: - context = {"status": "0", "blinded": value, "message": f"Blinded is now: {value}"} + context = {"status": "0", "blinded": value, "message": f"Blinded is now: {value}"} else: - context = {"status": "1", "blinded": (not value), "message": f"Blinded is now: {value}"} + context = {"status": "1", "blinded": (not value), "message": f"Blinded is now: {value}"} return JsonResponse(context) @check_permissions -@check_resources_exist('json') +@check_resources_exist("json") def get_count_of_missing_reviews(request, task_id): context = {"count_of_missing_reviews": model.get_count_of_missing_reviews(task_id)} return JsonResponse({"status": 0, "context": context}) @check_permissions -@check_resources_exist('json') +@check_resources_exist("json") def get_count_of_team_submissions(request, task_id): context = {"count_of_team_submissions": model.get_count_of_team_submissions(task_id)} return JsonResponse({"status": 0, "context": context}) diff --git a/application/src/tira_app/endpoints/serp_api.py b/application/src/tira_app/endpoints/serp_api.py new file mode 100644 index 000000000..06360a717 --- /dev/null +++ b/application/src/tira_app/endpoints/serp_api.py @@ -0,0 +1,41 @@ +import logging +from pathlib import Path + +from django.conf import settings +from django.http import HttpResponse, JsonResponse + +from ..checks import check_permissions +from ..endpoints.diffir_api import doc_file_for_run +from ..views import add_context + +logger = logging.getLogger("tira") + + +@add_context +@check_permissions +def serp(request, context, vm_id, dataset_id, task_id, run_id, topk): + # podman --storage-opt mount_program=/usr/bin/fuse-overlayfs run -v /mnt/ceph:/mnt/ceph:ro -ti webis/tira-application:0.0.45-diffir diffir --dataset cranfield /mnt/ceph/tira/data/runs/cranfield-20230107-training/tira-ir-starter/2023-02-13-12-40-07/output/run.txt + + if request.method == "GET": + try: + run_file = Path(settings.TIRA_ROOT) / "data" / "runs" / dataset_id / vm_id / run_id / "output" / "run.txt" + + if not run_file.is_file(): + raise ValueError(f"Error: The expected file {run_file} does not exist.") + + try: + from diffir.run import diff_from_local_data + except Exception as e: + logger.exception(e) + raise ValueError("Could not load dependency diffir") + + doc_file = doc_file_for_run(vm_id, dataset_id, task_id, run_id) + if doc_file and doc_file.is_file(): + _, rendered_serp = diff_from_local_data( + [str(run_file.resolve())], [str(doc_file)], cli=False, web=True, print_html=False, topk=topk + ) + + return HttpResponse(rendered_serp) + except Exception as e: + logger.exception(e) + return JsonResponse({"status": "1", "message": f"Encountered an exception: {e}"}) diff --git a/application/src/tira_app/endpoints/stdout_beautifier.py b/application/src/tira_app/endpoints/stdout_beautifier.py new file mode 100755 index 000000000..e7576153a --- /dev/null +++ b/application/src/tira_app/endpoints/stdout_beautifier.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +import logging +import os +import re +from subprocess import PIPE, run + +from bs4 import BeautifulSoup + +ansi_color_code_regex = re.compile("\\[(\\d)+(;)*(\\d)*m") +aha_exec = os.path.abspath(__file__).replace("stdout_beautifier.py", "aha") + +logger = logging.getLogger("tira") + + +def is_start_of_ansi_code(txt: str, pos: int) -> bool: + if txt[pos] != "[": + return False + + matches = ansi_color_code_regex.search(txt[pos : pos + 8]) + + return matches is not None and matches.span()[0] == 0 + + +def beautify_ansi_text(txt: str) -> str: + # try: + ret = "" + for i in range(len(txt)): + if is_start_of_ansi_code(txt, i): + ret += "\033[" + else: + ret += txt[i] + p = run([aha_exec], stdout=PIPE, input=ret, encoding="utf8") + ret = p.stdout + if p.returncode != 0: + raise ValueError(f"Returncode invalid: {p.returncode}. Got {ret}.") + + soup = BeautifulSoup(ret, "html.parser") + return "\n\n".join([str(i) for i in soup.select("pre")]) + + +if __name__ == "__main__": + print( + beautify_ansi_text( + """ [[92mo[0m] The file local-copy-of-input-run/run.jsonl is in JSONL format. + [[92mo[0m] The file training-datasets-truth/clickbait-spoiling/task-2-spoiler-generation-validation-20220924-training/validation.jsonl is in JSONL format. + [[92mo[0m] Spoiler generations have correct format. Found 800 Here I try to escape + [[92mo[0m] Spoiler generations have correct format. Found 800 +""" + ) + ) + + print( + beautify_ansi_text( + """[0;32m/opt/conda/lib/python3.7/site-packages/requests/adapters.py in [0;36msend[0;34m(self, request, stream, timeout, verify, cert, proxies) +[1;32m 517 [0;32mraise SSLError[0;34m(e[0;34m, request[0;34m=request[0;34m)[0;34m[0;34m +[1;32m 518 [0;34m +[0;32m--> 519[0;31m [0;32mraise ConnectionError[0;34m(e[0;34m, request[0;34m=request[0;34m)[0;34m[0;34m +[1;32m 520 [0;34m +[1;32m 521 [0;32mexcept ClosedPoolError [0;32mas e[0;34m:[0;34m[0;34m""" + ) + ) diff --git a/application/src/tira_app/endpoints/v1/__init__.py b/application/src/tira_app/endpoints/v1/__init__.py new file mode 100644 index 000000000..03673625b --- /dev/null +++ b/application/src/tira_app/endpoints/v1/__init__.py @@ -0,0 +1,17 @@ +from django.urls import include, path + +from ._datasets import endpoints as dataset_endpoints +from ._evaluations import endpoints as evaluation_endpoints +from ._organizers import endpoints as organizer_endpoints +from ._runs import endpoints as run_endpoints +from ._tasks import endpoints as task_endpoints +from ._user import endpoints as user_endpoints + +endpoints = [ + path("datasets/", include(dataset_endpoints)), + path("evaluations/", include(evaluation_endpoints)), + path("organizers/", include(organizer_endpoints)), + path("runs/", include(run_endpoints)), + path("tasks/", include(task_endpoints)), + path("user/", include(user_endpoints)), +] diff --git a/application/src/tira_app/endpoints/v1/_datasets.py b/application/src/tira_app/endpoints/v1/_datasets.py new file mode 100644 index 000000000..2d2fb8d50 --- /dev/null +++ b/application/src/tira_app/endpoints/v1/_datasets.py @@ -0,0 +1,43 @@ +from django.urls import path +from rest_framework import pagination +from rest_framework.permissions import IsAdminUser +from rest_framework.serializers import CharField, ModelSerializer +from rest_framework_json_api.views import ModelViewSet + +from ... import model as modeldb +from ._tasks import TaskSerializer + + +class DatasetSerializer(ModelSerializer): + id = CharField(source="dataset_id") + default_task = TaskSerializer() + + class Meta: + model = modeldb.Dataset + fields = [ + "id", + "default_task", + "display_name", + "evaluator", + "is_confidential", + "is_deprecated", + "data_server", + "released", + "default_upload_name", + "created", + "last_modified", + ] + + +class _DatasetView(ModelViewSet): + queryset = modeldb.Dataset.objects.all() + serializer_class = DatasetSerializer + pagination_class = pagination.CursorPagination + lookup_field = "dataset_id" + permission_classes = [IsAdminUser] # TODO: set to something sensible + + +endpoints = [ + path("", _DatasetView.as_view({"get": "list"})), + path("/", _DatasetView.as_view({"get": "retrieve", "delete": "destroy"})), +] diff --git a/application/src/tira_app/endpoints/v1/_evaluations.py b/application/src/tira_app/endpoints/v1/_evaluations.py new file mode 100644 index 000000000..08b280a73 --- /dev/null +++ b/application/src/tira_app/endpoints/v1/_evaluations.py @@ -0,0 +1,25 @@ +from django.urls import path +from rest_framework import pagination +from rest_framework.permissions import IsAdminUser +from rest_framework.serializers import ModelSerializer +from rest_framework_json_api.views import ModelViewSet + +from ... import model as modeldb + + +class EvaluationSerializer(ModelSerializer): + class Meta: + model = modeldb.Evaluation + fields = ["measure_key", "measure_value", "evaluator", "run"] + + +class _EvaluationView(ModelViewSet): + queryset = modeldb.Evaluation.objects.all() + serializer_class = EvaluationSerializer + pagination_class = pagination.CursorPagination + permission_classes = [IsAdminUser] # TODO: set to something sensible + + +endpoints = [ + path("", _EvaluationView.as_view({"get": "list"})), +] diff --git a/application/src/tira_app/endpoints/v1/_organizers.py b/application/src/tira_app/endpoints/v1/_organizers.py new file mode 100644 index 000000000..fdc100350 --- /dev/null +++ b/application/src/tira_app/endpoints/v1/_organizers.py @@ -0,0 +1,49 @@ +from django.urls import path +from rest_framework import pagination +from rest_framework.permissions import IsAdminUser +from rest_framework.serializers import CharField, ModelSerializer +from rest_framework_json_api.views import ModelViewSet + +from ... import model as modeldb + + +class OrganizerSerializer(ModelSerializer): + id = CharField(source="organizer_id") + website = CharField(source="web") + + class Meta: + model = modeldb.Organizer + fields = ["id", "name", "years", "website"] + + +# TODO: creating an organizer should behave like: admin_add_organizer +# TODO: editing an organizer should behave like: admin_edit_organizer + + +class _OrganizerView(ModelViewSet): + queryset = modeldb.Organizer.objects.all() + serializer_class = OrganizerSerializer + pagination_class = pagination.CursorPagination + permission_classes = [IsAdminUser] # TODO: set to something sensible + lookup_field = "organizer_id" + + filterset_fields = { + "name": ( + "exact", + "contains", + ), + "web": ( + "exact", + "contains", + ), + "years": ( + "exact", + "contains", + ), + } + + +endpoints = [ + path("", _OrganizerView.as_view({"get": "list", "post": "create"})), + path("/", _OrganizerView.as_view({"get": "retrieve", "delete": "destroy"})), +] diff --git a/application/src/tira_app/endpoints/v1/_runs.py b/application/src/tira_app/endpoints/v1/_runs.py new file mode 100644 index 000000000..cea1958c9 --- /dev/null +++ b/application/src/tira_app/endpoints/v1/_runs.py @@ -0,0 +1,62 @@ +from django.urls import path +from rest_framework import pagination +from rest_framework.generics import RetrieveAPIView +from rest_framework.permissions import IsAdminUser +from rest_framework.serializers import CharField, ModelSerializer, Serializer +from rest_framework_json_api.views import ModelViewSet + +from ... import model as modeldb + + +class _RunSerializer(Serializer): + id = CharField(source="run_id") + + class Meta: + model = modeldb.Run + fields = ["id", "downloadable", "deleted"] + + +class _ReviewSerializer(ModelSerializer): + run_id = CharField(source="run") + + class Meta: + model = modeldb.Review + fields = [ + "run_id", + "reviewer_id", + "review_date", + "no_errors", + "missing_output", + "extraneous_output", + "invalid_output", + "has_error_output", + "other_errors", + "comment", + "has_errors", + "has_warnings", + "has_no_errors", + "published", + "blinded", + ] + + +class _RunView(ModelViewSet): + queryset = modeldb.Run.objects.all() + serializer_class = _RunSerializer + pagination_class = pagination.CursorPagination + lookup_field = "run_id" + permission_classes = [IsAdminUser] # TODO: set to something sensible + + +class _ReviewDetailView(RetrieveAPIView): + queryset = modeldb.Review + serializer_class = _ReviewSerializer + lookup_field = "run" + permission_classes = [IsAdminUser] # TODO: set to something sensible + + +endpoints = [ + path("", _RunView.as_view({"get": "list"})), + path("/", _RunView.as_view({"get": "retrieve", "delete": "destroy"})), + path("/review", _ReviewDetailView.as_view()), +] diff --git a/application/src/tira_app/endpoints/v1/_tasks.py b/application/src/tira_app/endpoints/v1/_tasks.py new file mode 100644 index 000000000..5cfad5916 --- /dev/null +++ b/application/src/tira_app/endpoints/v1/_tasks.py @@ -0,0 +1,74 @@ +from django.urls import path +from rest_framework import pagination +from rest_framework.permissions import IsAdminUser +from rest_framework.serializers import CharField, ModelSerializer +from rest_framework_json_api.views import ModelViewSet + +from ... import model as modeldb +from ._evaluations import EvaluationSerializer +from ._organizers import OrganizerSerializer + + +class DatasetNameOnlySerializer(ModelSerializer): + id = CharField(source="dataset_id") + + class Meta: + model = modeldb.Dataset + fields = ["id", "display_name"] + + +class TaskSerializer(ModelSerializer): + id = CharField(source="task_id") + name = CharField(source="task_name") + description = CharField(source="task_description") + organizer = OrganizerSerializer() + website = CharField(source="web") + datasets = DatasetNameOnlySerializer(source="dataset_set", many=True, required=False, read_only=True) + + class Meta: + model = modeldb.Task + fields = "__all__" + + +class RegistrationSerializer(ModelSerializer): + + class Meta: + model = modeldb.Registration + fields = "__all__" + + +class _TaskView(ModelViewSet): + queryset = modeldb.Task.objects.all() + serializer_class = TaskSerializer + pagination_class = pagination.CursorPagination + permission_classes = [IsAdminUser] # TODO: set to something sensible + lookup_field = "task_id" + + +class _EvaluationView(ModelViewSet): + serializer_class = EvaluationSerializer + pagination_class = pagination.CursorPagination + permission_classes = [IsAdminUser] # TODO: set to something sensible + lookup_field = "task_id" + + def get_queryset(self): + return modeldb.Evaluation.objects.filter(run__task=self.kwargs[self.lookup_field]) + + +class _RegistrationView(ModelViewSet): + serializer_class = RegistrationSerializer + pagination_class = pagination.CursorPagination + permission_classes = [IsAdminUser] # TODO: set to something sensible + lookup_field = "task_id" + + def get_queryset(self): + return modeldb.Registration.objects.filter(registered_on_task=self.kwargs[self.lookup_field]) + + +endpoints = [ + path("", _TaskView.as_view({"get": "list", "post": "create"})), + path("/", _TaskView.as_view({"get": "retrieve", "delete": "destroy"})), + path("/evaluations", _EvaluationView.as_view({"get": "list"})), + path("/registrations", _RegistrationView.as_view({"get": "list", "post": "create"})), + # path("/submissions", _SubmissionView.as_view({'get': 'list', 'post': 'create'})), +] diff --git a/application/src/tira_app/endpoints/v1/_user.py b/application/src/tira_app/endpoints/v1/_user.py new file mode 100644 index 000000000..7c74362f5 --- /dev/null +++ b/application/src/tira_app/endpoints/v1/_user.py @@ -0,0 +1,14 @@ +from django.urls import path +from rest_framework.decorators import api_view +from rest_framework.request import Request +from rest_framework.response import Response + + +@api_view(["GET"]) +def user_endpoint(request: Request) -> Response: + return Response({"username": request.user.username, "groups": request.user.groups}) + + +endpoints = [ + path("", user_endpoint), +] diff --git a/application/src/tira_app/endpoints/vm_api.py b/application/src/tira_app/endpoints/vm_api.py new file mode 100644 index 000000000..d614436e5 --- /dev/null +++ b/application/src/tira_app/endpoints/vm_api.py @@ -0,0 +1,1138 @@ +import json +import logging +from functools import wraps +from http import HTTPStatus + +from discourse_client_in_disraptor.discourse_api_client import get_disraptor_user +from django.conf import settings +from django.core.cache import cache +from django.db.utils import IntegrityError +from django.http import HttpResponseNotAllowed, JsonResponse +from django.views.decorators.csrf import csrf_exempt +from grpc import RpcError, StatusCode +from markdown import markdown + +from .. import tira_model as model +from ..authentication import auth +from ..checks import check_conditional_permissions, check_permissions, check_resources_exist +from ..grpc_client import GrpcClient +from ..model import EvaluationLog, TransitionLog +from ..util import get_tira_id, link_to_discourse_team, reroute_host +from ..views import add_context + +include_navigation = False + +logger = logging.getLogger("tira") +logger.info("ajax_routes: Logger active") + + +def host_call(func): + """This is a decorator for methods that connect to a host. It handles all exceptions that can occur + in the grpc communication. It also adds a reply consistent with the return status of the grpc call.""" + + @wraps(func) + def func_wrapper(request, *args, **kwargs): + try: + response = func(request, *args, **kwargs) + except RpcError as e: + ex_message = "FAILED" + try: + logger.exception(f"{request.get_full_path()}: connection failed with {e}") + if e.code() == StatusCode.UNAVAILABLE: # .code() is implemented by the _channel._InteractiveRpcError + logger.exception(f"Connection Unavailable: {e.debug_error_string()}") + ex_message = ( # This happens if the GRPC Server is not running + "The requested host is unavailable. If you think this is a mistake, please contact " + "your task organizer." + ) + if e.code() == StatusCode.INVALID_ARGUMENT: + logger.exception(f"Invalid Argument: {e.debug_error_string()}") + ex_message = f"Response returned with an invalid argument: {e.debug_error_string()}" # + except Exception as e2: # There is a RpcError but not an Interactive one. This should not happen + logger.exception(f"{request.get_full_path()}: Unexpected Exception occurred: {e2}") + ex_message = f"An unexpected exception occurred: {e2}" + return JsonResponse({"status": "2", "message": ex_message}, status=HTTPStatus.INTERNAL_SERVER_ERROR) + + except Exception as e: + logger.exception(f"{request.get_full_path()}: Server Error: {e}") + return JsonResponse( + {"status": "1", "message": f"An unexpected exception occurred: {e}"}, + status=HTTPStatus.INTERNAL_SERVER_ERROR, + ) + + if response.status == 0: + return JsonResponse({"status": 0, "message": response.transactionId}, status=HTTPStatus.ACCEPTED) + if response.status == 2: + return JsonResponse( + {"status": 2, "message": f"Virtual machine not found on host: {response.message}"}, + status=HTTPStatus.NOT_FOUND, + ) + if response.status == 3: + return JsonResponse( + {"status": 1, "message": f"Virtual machine is in the wrong state for your request: {response.message}"}, + status=HTTPStatus.BAD_REQUEST, + ) + if response.status == 4: + return JsonResponse( + {"status": 1, "message": f"VM is archived: {response.message}"}, status=HTTPStatus.NOT_FOUND + ) + if response.status == 5: + return JsonResponse( + {"status": 2, "message": f"VM is not accessible: {response.message}"}, status=HTTPStatus.NOT_FOUND + ) + if response.status == 6: + return JsonResponse( + {"status": 1, "message": f"Requested input run was not found: {response.message}"}, + status=HTTPStatus.NOT_FOUND, + ) + if response.status == 7: + return JsonResponse( + {"status": 1, "message": f"Evaluation failed due to malformed run output: {response.message}"}, + status=HTTPStatus.BAD_REQUEST, + ) + if response.status == 8: + return JsonResponse( + {"status": 1, "message": f"Input malformed: {response.message}"}, status=HTTPStatus.BAD_REQUEST + ) + if response.status == 9: + return JsonResponse( + {"status": 2, "message": f"Host ist busy: {response.message}"}, status=HTTPStatus.SERVICE_UNAVAILABLE + ) + + return JsonResponse( + {"status": 2, "message": f"{response.transactionId} was rejected by the host: {response.message}"}, + status=HTTPStatus.INTERNAL_SERVER_ERROR, + ) + + return func_wrapper + + +# --------------------------------------------------------------------- +# VM actions +# --------------------------------------------------------------------- + + +@check_permissions +@check_resources_exist("json") +def vm_state(request, vm_id): + try: + state = TransitionLog.objects.get_or_create(vm_id=vm_id, defaults={"vm_state": 0})[0].vm_state + except IntegrityError as e: + logger.warning(f"failed to read state for vm {vm_id} with {e}") + state = 0 + return JsonResponse({"status": 0, "state": state}) + + +@check_permissions +@check_resources_exist("json") +def vm_running_evaluations(request, vm_id): + results = EvaluationLog.objects.filter(vm_id=vm_id) + return JsonResponse({"status": 0, "running_evaluations": True if results else False}) + + +@check_permissions +@check_resources_exist("json") +def get_running_evaluations(request, vm_id): + results = EvaluationLog.objects.filter(vm_id=vm_id) + return JsonResponse( + { + "status": 0, + "running_evaluations": [ + {"vm_id": r.vm_id, "run_id": r.run_id, "running_on": r.running_on, "last_update": r.last_update} + for r in results + ], + } + ) + + +@add_context +@check_permissions +def docker_software_details(request, context, vm_id, docker_software_id): + context["docker_software_details"] = model.get_docker_software(int(docker_software_id)) + + if "mount_hf_model" in context["docker_software_details"] and context["docker_software_details"]["mount_hf_model"]: + mount_hf_model = [] + for i in context["docker_software_details"]["mount_hf_model"].split(): + mount_hf_model += [{"href": f"https://huggingface.co/{i}", "display_name": i}] + + context["docker_software_details"]["mount_hf_model_display"] = mount_hf_model + + return JsonResponse({"status": 0, "context": context}) + + +@check_permissions +def huggingface_model_mounts(request, vm_id, hf_model): + from ..huggingface_hub_integration import huggingface_model_mounts, snapshot_download_hf_model + + context = {"hf_model_available": False, "hf_model_for_vm": vm_id} + + try: + context["hf_model_available"] = huggingface_model_mounts([hf_model.replace("--", "/")]) is not None + except Exception: + pass + + if not context["hf_model_available"]: + try: + snapshot_download_hf_model(hf_model) + context["hf_model_available"] = True + except Exception as e: + logger.warning(e) + return JsonResponse({"status": "1", "message": str(e)}) + + return JsonResponse({"status": 0, "context": context}) + + +@add_context +@check_permissions +def upload_group_details(request, context, task_id, vm_id, upload_id): + try: + context["upload_group_details"] = model.get_upload(task_id, vm_id, upload_id) + except Exception as e: + return JsonResponse({"status": "1", "message": f"An unexpected exception occurred: {e}"}) + + return JsonResponse({"status": 0, "context": context}) + + +@check_conditional_permissions(restricted=True) +@host_call +def vm_create(request, hostname, vm_id, ova_file): + uid = auth.get_user_id(request) + host = reroute_host(hostname) + return GrpcClient(host).vm_create(vm_id=vm_id, ova_file=ova_file, user_id=uid, hostname=host) + + +@check_permissions +@check_resources_exist("json") +@host_call +def vm_start(request, vm_id): + vm = model.get_vm(vm_id) + # NOTE vm_id is different from vm.vmName (latter one includes the 01-tira-ubuntu-... + return GrpcClient(reroute_host(vm["host"])).vm_start(vm_id=vm_id) + + +@check_permissions +@check_resources_exist("json") +@host_call +def vm_shutdown(request, vm_id): + vm = model.get_vm(vm_id) + return GrpcClient(reroute_host(vm["host"])).vm_shutdown(vm_id=vm_id) + + +@check_permissions +@check_resources_exist("json") +@host_call +def vm_stop(request, vm_id): + vm = model.get_vm(vm_id) + return GrpcClient(reroute_host(vm["host"])).vm_stop(vm_id=vm_id) + + +@check_permissions +@check_resources_exist("json") +def vm_info(request, vm_id): + vm = model.get_vm(vm_id) + host = reroute_host(vm["host"]) + if not host: + logger.exception(f"/grpc/{vm_id}/vm-info: connection to {host} failed, because host is empty") + return JsonResponse({"status": "Rejected", "message": "SERVER_ERROR"}, status=HTTPStatus.INTERNAL_SERVER_ERROR) + try: + grpc_client = GrpcClient(host) + response_vm_info = grpc_client.vm_info(vm_id=vm_id) + _ = TransitionLog.objects.update_or_create(vm_id=vm_id, defaults={"vm_state": response_vm_info.state}) + del grpc_client + except RpcError as e: + ex_message = "FAILED" + try: + if e.code() == StatusCode.UNAVAILABLE: # .code() is implemented by the _channel._InteractiveRpcError + logger.exception(f"/grpc/{vm_id}/vm-info: connection to {host} failed with {e}") + ex_message = "Host Unavailable" # This happens if the GRPC Server is not running + if e.code() == StatusCode.INVALID_ARGUMENT: # .code() is implemented by the _channel._InteractiveRpcError + ex_message = "VM is archived" # If there is no VM with the requested name on the host. + _ = TransitionLog.objects.update_or_create(vm_id=vm_id, defaults={"vm_state": 8}) + except Exception as e2: # There is a RpcError but not an Interactive one. This should not happen + logger.exception(f"/grpc/{vm_id}/vm-info: Unexpected Execption occured: {e2}") + return JsonResponse({"status": 1, "message": ex_message}, status=HTTPStatus.INTERNAL_SERVER_ERROR) + except Exception as e: + logger.exception(f"/grpc/{vm_id}/vm-info: connection to {host} failed with {e}") + return JsonResponse({"status": 1, "message": "SERVER_ERROR"}, status=HTTPStatus.INTERNAL_SERVER_ERROR) + + return JsonResponse( + { + "status": 0, + "context": { + "guestOs": response_vm_info.guestOs, + "memorySize": response_vm_info.memorySize, + "numberOfCpus": response_vm_info.numberOfCpus, + "sshPort": response_vm_info.sshPort, + "rdpPort": response_vm_info.rdpPort, + "host": response_vm_info.host, + "sshPortStatus": response_vm_info.sshPortStatus, + "rdpPortStatus": response_vm_info.rdpPortStatus, + "state": response_vm_info.state, + }, + } + ) + + +# --------------------------------------------------------------------- +# Software actions +# --------------------------------------------------------------------- +@check_permissions +@check_resources_exist("json") +def software_add(request, task_id, vm_id): + if request.method == "GET": + if not task_id or task_id is None or task_id == "None": + return JsonResponse({"status": 1, "message": "Please specify the associated task_id."}) + + software = model.add_software(task_id, vm_id) + if not software: + return JsonResponse( + {"status": 1, "message": "Failed to create a new Software."}, status=HTTPStatus.INTERNAL_SERVER_ERROR + ) + + context = { + "task": task_id, + "vm_id": vm_id, + "software": { + "id": software["id"], + "command": software["command"], + "working_dir": software["working_directory"], + "dataset": software["dataset"], + "creation_date": software["creation_date"], + "last_edit": software["last_edit"], + }, + } + return JsonResponse({"status": 0, "message": "ok", "context": context}) + else: + return JsonResponse({"status": 1, "message": "POST is not allowed here."}) + + +@check_permissions +@check_resources_exist("json") +def software_save(request, task_id, vm_id, software_id): + if request.method == "POST": + data = json.loads(request.body) + new_dataset = data.get("input_dataset") + if not model.dataset_exists(new_dataset): + return JsonResponse({"status": 1, "message": f"Cannot save, the dataset {new_dataset} does not exist."}) + + software = model.update_software( + task_id, + vm_id, + software_id, + data.get("command"), + data.get("working_dir"), + data.get("input_dataset"), + data.get("input_run"), + ) + + message = "failed to save software for an unknown reasons" + try: + if software: + return JsonResponse( + {"status": 0, "message": f"Saved {software_id}", "last_edit": software.lastEditDate}, + status=HTTPStatus.ACCEPTED, + ) + except Exception as e: + message = str(e) + + return JsonResponse({"status": 1, "message": message}, status=HTTPStatus.BAD_REQUEST) + return JsonResponse({"status": 1, "message": "GET is not implemented for add dataset"}) + + +@check_permissions +@check_resources_exist("json") +def software_delete(request, task_id, vm_id, software_id): + delete_ok = model.delete_software(task_id, vm_id, software_id) + + if delete_ok: + return JsonResponse({"status": 0}, status=HTTPStatus.ACCEPTED) + else: + return JsonResponse( + { + "status": 1, + "message": "Cannot delete software, because it has a valid evaluation assigned (or it does not exist.)", + }, + status=HTTPStatus.INTERNAL_SERVER_ERROR, + ) + + +@check_permissions +@check_resources_exist("json") +@host_call +def run_execute(request, task_id, vm_id, software_id): + vm = model.get_vm(vm_id) + software = model.get_software(task_id, vm_id, software_id=software_id) + if not model.dataset_exists(software["dataset"]): + return JsonResponse({"status": 1, "message": f'The dataset {software["dataset"]} does not exist'}) + host = reroute_host(vm["host"]) + future_run_id = get_tira_id() + grpc_client = GrpcClient(host) + response = grpc_client.run_execute( + vm_id=vm_id, + dataset_id=software["dataset"], + run_id=future_run_id, + input_run_vm_id="", + input_run_dataset_id="", + input_run_run_id=software["run"], + optional_parameters="", + task_id=task_id, + software_id=software_id, + ) + del grpc_client + return response + + +@host_call +def _master_vm_eval_call(vm_id, dataset_id, run_id, evaluator): + """Called when the evaluation is done via master vm. + This method calls the grpc client""" + host = reroute_host(evaluator["host"]) + grpc_client = GrpcClient(host) + response = grpc_client.run_eval( + vm_id=evaluator["vm_id"], + dataset_id=dataset_id, + run_id=get_tira_id(), + input_run_vm_id=vm_id, + input_run_dataset_id=dataset_id, + input_run_run_id=run_id, + optional_parameters="", + ) + del grpc_client + return response + + +def _git_runner_vm_eval_call(vm_id, dataset_id, run_id, evaluator): + """called when the evaluation is done via git runner. + This method calls the git utilities in git_runner.py to start the git CI + """ + try: + transaction_id = model.get_git_integration(dataset_id=dataset_id).run_evaluate_with_git_workflow( + evaluator["task_id"], + dataset_id, + vm_id, + run_id, + evaluator["git_runner_image"], + evaluator["git_runner_command"], + evaluator["git_repository_id"], + evaluator["evaluator_id"], + ) + except Exception as e: + return JsonResponse({"status": 1, "message": str(e)}, status=HTTPStatus.INTERNAL_SERVER_ERROR) + + return JsonResponse({"status": 0, "message": transaction_id}, status=HTTPStatus.ACCEPTED) + + +@check_conditional_permissions(private_run_ok=True) +@check_resources_exist("json") +def run_eval(request, vm_id, dataset_id, run_id): + """Get the evaluator for dataset_id from the model. + Then, send a GRPC-call to the host running the evaluator with the run data. + Then, log vm_id and run_id to the evaluation log as ongoing. + """ + # check if evaluation already exists + existing_evaluations = EvaluationLog.objects.filter(run_id=run_id) + if existing_evaluations and len(existing_evaluations) > 5: + return JsonResponse( + {"status": "1", "message": "An evaluation is already in progress."}, status=HTTPStatus.PRECONDITION_FAILED + ) + + evaluator = model.get_evaluator(dataset_id) + if "is_git_runner" in evaluator and evaluator["is_git_runner"]: + ret = _git_runner_vm_eval_call(vm_id, dataset_id, run_id, evaluator) + git_runner = model.get_git_integration(dataset_id=dataset_id) + git_runner.all_running_pipelines_for_repository(evaluator["git_repository_id"], cache, force_cache_refresh=True) + + return ret + + return _master_vm_eval_call(vm_id, dataset_id, run_id, evaluator) + + +@check_conditional_permissions(private_run_ok=True) +def run_delete(request, dataset_id, vm_id, run_id): + delete_ok = model.delete_run(dataset_id, vm_id, run_id) + if delete_ok: + return JsonResponse({"status": 0}, status=HTTPStatus.ACCEPTED) + return JsonResponse( + {"status": 1, "message": f"Can not delete run {run_id} since it is used as an input run."}, + status=HTTPStatus.ACCEPTED, + ) + + +@check_permissions +@check_resources_exist("json") +@host_call +def run_abort(request, vm_id): + """ """ + vm = model.get_vm(vm_id) + host = reroute_host(vm["host"]) + + grpc_client = GrpcClient(host) + response = grpc_client.run_abort(vm_id=vm_id) + del grpc_client + return response + + +@csrf_exempt +@check_permissions +@check_resources_exist("json") +def upload(request, task_id, vm_id, dataset_id, upload_id): + if request.method == "POST": + if not dataset_id or dataset_id is None or dataset_id == "None": + return JsonResponse({"status": 1, "message": "Please specify the associated dataset."}) + + uploaded_file = request.FILES["file"] + new_run = model.add_uploaded_run(task_id, vm_id, dataset_id, upload_id, uploaded_file) + if model.git_pipeline_is_enabled_for_task(task_id, cache): + run_eval(request=request, vm_id=vm_id, dataset_id=dataset_id, run_id=new_run["run"]["run_id"]) + + return JsonResponse({"status": 0, "message": "ok", "new_run": new_run, "started_evaluation": True}) + return JsonResponse({"status": 0, "message": "ok", "new_run": new_run, "started_evaluation": False}) + else: + return JsonResponse({"status": 1, "message": "GET is not allowed here."}) + + +@check_permissions +@check_resources_exist("json") +def delete_upload(request, task_id, vm_id, upload_id): + try: + model.delete_upload(task_id, vm_id, upload_id) + return JsonResponse({"status": 0, "message": "ok"}) + except Exception as e: + logger.warning("Failed to delete upload: " + str(e)) + logger.exception(e) + return JsonResponse({"status": 0, "message": "Failed" + str(e)}) + + +@check_permissions +@check_resources_exist("json") +def add_upload(request, task_id, vm_id): + if request.method == "GET": + if not task_id or task_id is None or task_id == "None": + return JsonResponse({"status": 1, "message": "Please specify the associated task_id."}) + rename_to = request.GET.get("rename_to", None) + rename_to = None if not rename_to or not rename_to.strip() else rename_to + + upload = model.add_upload(task_id, vm_id, rename_to) + if not upload: + return JsonResponse( + {"status": 1, "message": "Failed to create a new Upload."}, status=HTTPStatus.INTERNAL_SERVER_ERROR + ) + + context = {"task": task_id, "vm_id": vm_id, "upload": upload} + return JsonResponse({"status": 0, "message": "ok", "context": context}) + else: + return JsonResponse({"status": 1, "message": "POST is not allowed here."}) + + +@csrf_exempt +@check_permissions +@check_resources_exist("json") +def docker_software_add(request, task_id, vm_id): + if request.method == "POST": + if not task_id or task_id is None or task_id == "None": + return JsonResponse({"status": 1, "message": "Please specify the associated task_id."}) + + data = json.loads(request.body) + if not data.get("image"): + return JsonResponse({"status": 1, "message": "Please specify the associated docker image."}) + + if not data.get("command"): + return JsonResponse({"status": 1, "message": "Please specify the associated docker command."}) + + submission_git_repo = None + build_environment = None + if data.get("code_repository_id"): + submission_git_repo = model.model.get_submission_git_repo_or_none( + data.get("code_repository_id"), vm_id, True + ) + + if not submission_git_repo: + return JsonResponse( + {"status": 1, "message": f"The code repository '{data.get('code_repository_id'):}' does not exist."} + ) + + if not data.get("build_environment"): + return JsonResponse( + {"status": 1, "message": "Please specify the build_environment for linking the code."} + ) + + build_environment = json.dumps(data.get("build_environment")) + + new_docker_software = model.add_docker_software( + task_id, + vm_id, + data.get("image"), + data.get("command"), + data.get("inputJob", None), + submission_git_repo, + build_environment, + ) + + if data.get("mount_hf_model"): + try: + from ..huggingface_hub_integration import huggingface_model_mounts + + mounts = huggingface_model_mounts(data.get("mount_hf_model")) + model.add_docker_software_mounts(new_docker_software, mounts) + + except Exception as e: + return JsonResponse({"status": 1, "message": str(e)}) + + return JsonResponse({"status": 0, "message": "ok", "context": new_docker_software}) + else: + return JsonResponse({"status": 1, "message": "GET is not allowed here."}) + + +@check_permissions +@check_resources_exist("json") +def docker_software_save(request, task_id, vm_id, docker_software_id): + if request.method == "POST": + try: + data = json.loads(request.body) + model.update_docker_software_metadata( + docker_software_id, + data.get("display_name"), + data.get("description"), + data.get("paper_link"), + data.get("ir_re_ranker", False), + data.get("ir_re_ranking_input", False), + ) + return JsonResponse({"status": 0, "message": "Software edited successfully"}) + except Exception as e: + return JsonResponse({"status": 1, "message": f"Error while editing software: {e}"}) + return JsonResponse({"status": 1, "message": "GET is not implemented for edit software"}) + + +@check_permissions +def add_software_submission_git_repository(request, task_id, vm_id): + if request.method != "POST": + return JsonResponse({"status": 1, "message": "GET is not implemented for edit upload"}) + + try: + data = json.loads(request.body) + external_owner = data["external_owner"] + private = not data.get("allow_public_repo", False) + disraptor_user = get_disraptor_user(request, allow_unauthenticated_user=False) + + if not disraptor_user or not isinstance(disraptor_user, str): + return JsonResponse({"status": 1, "message": "Please authenticate."}) + + if not model.github_user_exists(external_owner): + return JsonResponse( + {"status": 1, "message": f"The user '{external_owner}' does not exist on Github, maybe a typo?"} + ) + + software_submission_git_repo = model.get_submission_git_repo( + vm_id, task_id, disraptor_user, external_owner, private + ) + + return JsonResponse({"status": 0, "context": software_submission_git_repo}) + except Exception as e: + logger.exception(e) + logger.warning("Error while adding your git repository: " + str(e)) + return JsonResponse({"status": 1, "message": f"Error while adding your git repository: {e}"}) + + +@check_permissions +def get_token(request, vm_id): + disraptor_user = get_disraptor_user(request, allow_unauthenticated_user=False) + + if not disraptor_user or not isinstance(disraptor_user, str): + return JsonResponse({"status": 1, "message": "Please authenticate."}) + + try: + return JsonResponse( + {"status": 0, "context": {"token": model.get_discourse_token_for_user(vm_id, disraptor_user)}} + ) + except Exception: + return JsonResponse( + {"status": 1, "message": "Could not extract the discourse/disraptor user, please authenticate."} + ) + + +@check_permissions +def get_software_submission_git_repository(request, task_id, vm_id): + try: + if task_id not in settings.CODE_SUBMISSION_REFERENCE_REPOSITORIES or not model.load_docker_data( + task_id, vm_id, cache, force_cache_refresh=False + ): + return JsonResponse({"status": 0, "context": {"disabled": True}}) + + return JsonResponse({"status": 0, "context": model.get_submission_git_repo(vm_id, task_id)}) + except Exception as e: + logger.exception(e) + logger.warning("Error while getting your git repository: " + str(e)) + return JsonResponse({"status": 1, "message": f"Error while getting your git repository: {e}"}) + + +@check_permissions +@check_resources_exist("json") +def upload_save(request, task_id, vm_id, upload_id): + if request.method == "POST": + try: + data = json.loads(request.body) + model.update_upload_metadata( + task_id, vm_id, upload_id, data.get("display_name"), data.get("description"), data.get("paper_link") + ) + return JsonResponse({"status": 0, "message": "Software edited successfully"}) + except Exception as e: + logger.exception(e) + logger.warning("Error while editing upload: " + str(e)) + return JsonResponse({"status": 1, "message": f"Error while editing upload: {e}"}) + return JsonResponse({"status": 1, "message": "GET is not implemented for edit upload"}) + + +@check_permissions +@check_resources_exist("json") +def docker_software_delete(request, task_id, vm_id, docker_software_id): + delete_ok = model.delete_docker_software(task_id, vm_id, docker_software_id) + + if delete_ok: + return JsonResponse({"status": 0}, status=HTTPStatus.ACCEPTED) + else: + return JsonResponse( + { + "status": 1, + "message": ( + "Cannot delete docker software, because it has a valid evaluation assigned (or it does not exist.)" + ), + }, + status=HTTPStatus.INTERNAL_SERVER_ERROR, + ) + + +def __normalize_command(cmd, evaluator): + to_normalize = { + "inputRun": "/tira-data/input-run", + "outputDir": "/tira-data/output", + "inputDataset": "/tira-data/input", + } + + if "inputRun" in cmd and evaluator: + to_normalize["outputDir"] = "/tira-data/eval_output" + to_normalize["inputDataset"] = "/tira-data/input_truth" + + for k, v in to_normalize.items(): + cmd = cmd.replace("$" + k, v).replace("${" + k + "}", v) + + return cmd + + +def construct_verbosity_output(image, command, approach, task, dataset): + command = __normalize_command(command, "") + return { + "tira_run_export": f"tira-run --export-dataset {task}/{dataset} --output-directory tira-dataset", + "cli_command": "tira-run \\\n --input-directory tira-dataset \\\n --output-directory tira-output \\\n " + + "--approach '" + + approach + + "'", + "python_command": f'tira.run("{approach}", "tira-dataset")', + "docker_command": ( + "docker run --rm -ti \\\n -v ${PWD}/tira-dataset:/tira-data/input:ro \\\n -v " + "${PWD}/tira-output:/tira-data/output:rw -\\\n -entrypoint sh " + ) + + f"\\\n t{image} \\\n -c '{command}'", + "image": image, + "command": command, + } + + +def __rendered_references(task_id, vm_id, run): + task = model.get_task(task_id) + bib_references = { + "run": "@Comment {No bib entry specified for the run, please contact the team/organizers for clarification.}", + "task": "@Comment {No bib entry specified for the task, please contact the organizers for clarification.}", + "dataset": ( + "@Comment {No bib entry specified for the dataset, please contact the organizers for clarification.}" + ), + } + markdown_references = {"run": None, "task": None, "dataset": None} + + if run["dataset"] == "antique-test-20230107-training": + markdown_references["dataset"] = ( + "[ANTIQUE](https://ir.webis.de/anthology/2020.ecir_conference-20202.21/) " + + "is a non-factoid quesiton answering dataset based on the questions and " + + "answers of Yahoo! Webscope L6." + ) + bib_references[ + "dataset" + ] = """@inproceedings{Hashemi2020Antique, + title = {ANTIQUE: A Non-Factoid Question Answering Benchmark}, + author = {Helia Hashemi and Mohammad Aliannejadi and Hamed Zamani and Bruce Croft}, + booktitle = {ECIR}, + year = {2020} +}""" + + if task_id == "ir-benchmarks": + markdown_references["task"] = ( + "[TIRA](https://webis.de/publications?q=TIRA#froebe_2023b) " + + "respectively [TIREx](https://webis.de/publications#froebe_2023e) " + + "is used to enable reprodicible and blinded experiments." + ) + bib_references[ + "task" + ] = """@InProceedings{froebe:2023b, + address = {Berlin Heidelberg New York}, + author = {Maik Fr{\"o}be and Matti Wiegmann and Nikolay Kolyada and Bastian Grahm and Theresa Elstner and Frank Loebe and Matthias Hagen and Benno Stein and Martin Potthast}, + booktitle = {Advances in Information Retrieval. 45th European Conference on {IR} Research ({ECIR} 2023)}, + doi = {10.1007/978-3-031-28241-6_20}, + editor = {Jaap Kamps and Lorraine Goeuriot and Fabio Crestani and Maria Maistro and Hideo Joho and Brian Davis and Cathal Gurrin and Udo Kruschwitz and Annalina Caputo}, + month = apr, + pages = {236--241}, + publisher = {Springer}, + series = {Lecture Notes in Computer Science}, + site = {Dublin, Irland}, + title = {{Continuous Integration for Reproducible Shared Tasks with TIRA.io}}, + todo = {pages, code}, + url = {https://doi.org/10.1007/978-3-031-28241-6_20}, + year = 2023 +} + +@InProceedings{froebe:2023e, + author = {Maik Fr{\"o}be and {Jan Heinrich} Reimer and Sean MacAvaney and Niklas Deckers and Simon Reich and Janek Bevendorff and Benno Stein and Matthias Hagen and Martin Potthast}, + booktitle = {46th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2023)}, + month = jul, + publisher = {ACM}, + site = {Taipei, Taiwan}, + title = {{The Information Retrieval Experiment Platform}}, + todo = {annote, doi, editor, pages, url, videourl}, + year = 2023 +}""" + + if run["software"] == "MonoT5 3b (tira-ir-starter-gygaggle)": + markdown_references["run"] = ( + "The implementation of [MonoT5](https://arxiv.org/abs/2101.05667) in" + " [PyGaggle](https://ir.webis.de/anthology/2021.sigirconf_conference-2021.304/)." + ) + bib_references[ + "run" + ] = """@article{DBLP:journals/corr/abs-2101-05667, + author = {Ronak Pradeep and Rodrigo Frassetto Nogueira and Jimmy Lin}, + title = {The Expando-Mono-Duo Design Pattern for Text Ranking with Pretrained Sequence-to-Sequence Models}, + journal = {CoRR}, + volume = {abs/2101.05667}, + year = {2021}, + url = {https://arxiv.org/abs/2101.05667}, + eprinttype = {arXiv}, + eprint = {2101.05667}, + timestamp = {Mon, 20 Mar 2023 15:35:34 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-2101-05667.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{lin-2021-pyserini, + author = {Jimmy Lin and Xueguang Ma and Sheng{-}Chieh Lin and Jheng{-}Hong Yang and Ronak Pradeep and Rodrigo Frassetto Nogueira}, + editor = {Fernando Diaz and Chirag Shah and Torsten Suel and Pablo Castells and Rosie Jones and Tetsuya Sakai}, + title = {Pyserini: {A} Python Toolkit for Reproducible Information Retrieval Research with Sparse and Dense Representations}, + booktitle = {{SIGIR} '21: The 44th International {ACM} {SIGIR} Conference on Research and Development in Information Retrieval, Virtual Event, Canada, July 11-15, 2021}, + pages = {2356--2362}, + publisher = {{ACM}}, + year = {2021}, + url = {https://doi.org/10.1145/3404835.3463238}, + doi = {10.1145/3404835.3463238}, + timestamp = {Mon, 20 Mar 2023 15:35:34 +0100}, + biburl = {https://dblp.org/rec/conf/sigir/LinMLYPN21.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +}""" + + if run["software"] == "DLH (tira-ir-starter-pyterrier)": + markdown_references["run"] = ( + "The implementation of [DLH](https://ir.webis.de/anthology/2006.ecir_conference-2006.3/) in" + " [PyTerrier](https://ir.webis.de/anthology/2021.cikm_conference-2021.533/)." + ) + bib_references[ + "run" + ] = """@inproceedings{amati-2006-frequentist, + author = {Giambattista Amati}, + editor = {Mounia Lalmas and Andy MacFarlane and Stefan M. R{\"{u}}ger and Anastasios Tombros and Theodora Tsikrika and Alexei Yavlinsky}, + title = {Frequentist and Bayesian Approach to Information Retrieval}, + booktitle = {Advances in Information Retrieval, 28th European Conference on {IR} Research, {ECIR} 2006, London, UK, April 10-12, 2006, Proceedings}, + series = {Lecture Notes in Computer Science}, + volume = {3936}, + pages = {13--24}, + publisher = {Springer}, + year = {2006}, + url = {https://doi.org/10.1007/11735106\\_3}, + doi = {10.1007/11735106\\_3}, + timestamp = {Tue, 14 May 2019 10:00:37 +0200}, + biburl = {https://dblp.org/rec/conf/ecir/Amati06.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{macdonald-2021-pyterrier, + author = {Craig Macdonald and Nicola Tonellotto and Sean MacAvaney and Iadh Ounis}, + editor = {Gianluca Demartini and Guido Zuccon and J. Shane Culpepper and Zi Huang and Hanghang Tong}, + title = {PyTerrier: Declarative Experimentation in Python from {BM25} to Dense + Retrieval}, + booktitle = {{CIKM} '21: The 30th {ACM} International Conference on Information and Knowledge Management, Virtual Event, Queensland, Australia, November 1 - 5, 2021}, + pages = {4526--4533}, + publisher = {{ACM}}, + year = {2021}, + url = {https://doi.org/10.1145/3459637.3482013}, + doi = {10.1145/3459637.3482013}, + timestamp = {Tue, 02 Nov 2021 12:01:17 +0100}, + biburl = {https://dblp.org/rec/conf/cikm/MacdonaldTMO21.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +}""" + + print(run) + ret_bib = "" + ret_markdown = ["Please cite the approach / resources if you use them. Potential candidates are:"] + missing_references = [] + for t in ["run", "dataset", "task"]: + ret_bib += bib_references[t] + "\n\n" + if markdown_references[t]: + ret_markdown += [markdown_references[t]] + else: + missing_references += [t] + + if missing_references: + ret_markdown += [ + "There are missing references for " + + ", ".join(missing_references) + + ". " + + "Please contact the organizers " + + f'[{task["organizer"]}](https://www.tira.io/g/tira_org_{task["organizer_id"]}) or the team ' + + f"[{vm_id}]({link_to_discourse_team(vm_id)}) for clarification." + ] + + return ret_bib.strip(), markdown("
    ".join(ret_markdown).strip()) + + +@check_permissions +@check_resources_exist("json") +def run_details(request, task_id, vm_id, run_id): + run = model.get_run(dataset_id=None, vm_id=vm_id, run_id=run_id) + software, docker_software, run_upload = None, None, None + vm_id_from_run = None + + repro_details = {"tira-run-export": None, "tira-run-cli": None, "tira-run-python": None, "docker": None} + + if "software_id" in run and run["software_id"]: + software = model.get_software(software) + vm_id_from_run = software["vm"] + elif "docker_software_id" in run and run["docker_software_id"]: + docker_software = model.get_docker_software(run["docker_software_id"]) + print(docker_software) + vm_id_from_run = docker_software["vm_id"] + + if docker_software["public_image_name"]: + repro_details = construct_verbosity_output( + docker_software["public_image_name"], + docker_software["command"], + task_id + "/" + vm_id + "/" + docker_software["display_name"], + task_id, + run["dataset"], + ) + + elif "upload_id" in run and run["upload_id"]: + from .. import model as modeldb + + run_upload = modeldb.Upload.objects.filter(vm__vm_id=vm_id, id=run["upload_id"]).get() + vm_id_from_run = run_upload.vm.vm_id + + if not vm_id_from_run or vm_id != vm_id_from_run: + return HttpResponseNotAllowed("Access forbidden.") + + ret = { + "description": "No description is available.", + "previous_stage": None, + "cli_command": None, + "docker_command": None, + "python_command": None, + } + + ret["references_bibtex"], ret["references_markdown"] = __rendered_references(task_id, vm_id, run) + + for k, v in repro_details.items(): + ret[k] = v + + return JsonResponse({"status": 0, "context": ret}) + + +@check_permissions +@check_resources_exist("json") +def software_details(request, task_id, vm_id, software_name): + docker_software = model.get_docker_software_by_name(software_name, vm_id, task_id) + + if not docker_software: + return JsonResponse({"status": 0, "message": f'Could not find a software with name "{software_name}"'}) + + repro_details = { + "tira-run-export": None, + "tira-run-cli": None, + "tira-run-python": None, + "docker": None, + "image": None, + "command": None, + } + if docker_software["public_image_name"]: + repro_details = construct_verbosity_output( + docker_software["public_image_name"], + docker_software["command"], + task_id + "/" + vm_id + "/" + docker_software["display_name"], + task_id, + "", + ) + + ret = { + "description": "No description is available.", + "previous_stage": None, + "cli_command": "TBD cli.", + "docker_command": "TBD docker.", + "python_command": "TBD python.", + } + + for k, v in repro_details.items(): + ret[k] = v + + return JsonResponse({"status": 0, "context": ret}) + + +@check_permissions +@check_resources_exist("json") +def run_execute_docker_software( + request, task_id, vm_id, dataset_id, docker_software_id, docker_resources, rerank_dataset=None +): + if not task_id or task_id is None or task_id == "None": + return JsonResponse({"status": 1, "message": "Please specify the associated task_id."}) + + if not vm_id or vm_id is None or vm_id == "None": + return JsonResponse({"status": 1, "message": "Please specify the associated vm_id."}) + + if not docker_software_id or docker_software_id is None or docker_software_id == "None": + return JsonResponse({"status": 1, "message": "Please specify the associated docker_software_id."}) + + docker_software = model.get_docker_software(docker_software_id) + + if not docker_software: + return JsonResponse({"status": 1, "message": f"There is no docker image with id {docker_software_id}"}) + + input_run = None + if ( + "ir_re_ranker" in docker_software + and docker_software.get("ir_re_ranker", False) + and rerank_dataset + and rerank_dataset.lower() != "none" + ): + reranking_datasets = model.get_all_reranking_datasets() + + if rerank_dataset not in reranking_datasets: + background_process = None + try: + background_process = model.create_re_rank_output_on_dataset( + task_id, vm_id, software_id=None, docker_software_id=docker_software_id, dataset_id=dataset_id + ) + except Exception as e: + logger.warning(e) + + visit_job_message = "Failed to start job." + + # TODO: what is a new URL for that? + if background_process: + visit_job_message = ( + f"Please visit https://tira.io/background_jobs/{task_id}/{background_process} " + + " to view the progress of the job that creates the rerank output." + ) + + return JsonResponse( + { + "status": 1, + "message": ( + f"The execution of your software depends on the reranking dataset {rerank_dataset}" + f", but {rerank_dataset} was never executed on the dataset {dataset_id}. " + "Please execute first the software on the specified dataset so that you can re-rank it. " + f"{visit_job_message}" + ), + } + ) + + input_run = reranking_datasets[rerank_dataset] + input_run["replace_original_dataset"] = True + + if dataset_id != input_run["dataset_id"]: + return JsonResponse( + { + "status": 1, + "message": "There seems to be a configuration error:" + + f" The reranking dataset {input_run['dataset_id']} is not" + + f" the specified dataset {dataset_id}.", + } + ) + + assert dataset_id == input_run["dataset_id"] + + if not dataset_id or dataset_id is None or dataset_id == "None": + return JsonResponse({"status": 1, "message": "Please specify the associated dataset_id."}) + + evaluator = model.get_evaluator(dataset_id) + + if ( + not evaluator + or "is_git_runner" not in evaluator + or not evaluator["is_git_runner"] + or "git_runner_image" not in evaluator + or not evaluator["git_runner_image"] + or "git_runner_command" not in evaluator + or not evaluator["git_runner_command"] + or "git_repository_id" not in evaluator + or not evaluator["git_repository_id"] + ): + return JsonResponse( + {"status": 1, "message": "The dataset is misconfigured. Docker-execute only available for git-evaluators"} + ) + + input_runs, errors = model.get_ordered_input_runs_of_software(docker_software, task_id, dataset_id, vm_id) + + if errors: + return JsonResponse({"status": 1, "message": errors[0]}) + + git_runner = model.get_git_integration(task_id=task_id) + git_runner.run_docker_software_with_git_workflow( + task_id, + dataset_id, + vm_id, + get_tira_id(), + evaluator["git_runner_image"], + evaluator["git_runner_command"], + evaluator["git_repository_id"], + evaluator["evaluator_id"], + docker_software["tira_image_name"], + docker_software["command"], + "docker-software-" + docker_software_id, + docker_resources, + input_run if input_run else input_runs, + docker_software.get("mount_hf_model", None), + docker_software.get("tira_image_workdir", None), + ) + + running_pipelines = git_runner.all_running_pipelines_for_repository( + evaluator["git_repository_id"], cache, force_cache_refresh=True + ) + print( + "Refreshed Cache for repo " + + str(evaluator["git_repository_id"]) + + " with " + + str(len(running_pipelines)) + + " jobs." + ) + + return JsonResponse({"status": 0}, status=HTTPStatus.ACCEPTED) + + +@check_permissions +def stop_docker_software(request, task_id, user_id, run_id): + if not request.method == "GET": + return JsonResponse({"status": 1, "message": "Only GET is allowed here"}) + else: + datasets = model.get_datasets_by_task(task_id) + git_runner = model.get_git_integration(task_id=task_id) + + if not git_runner: + return JsonResponse({"status": 1, "message": f"No git integration found for task {task_id}"}) + + for dataset in datasets: + git_runner.stop_job_and_clean_up( + model.get_evaluator(dataset["dataset_id"])["git_repository_id"], user_id, run_id, cache + ) + + return JsonResponse({"status": 0, "message": "Run successfully stopped"}) diff --git a/application/src/tira_app/git_runner.py b/application/src/tira_app/git_runner.py new file mode 100644 index 000000000..df8207aee --- /dev/null +++ b/application/src/tira_app/git_runner.py @@ -0,0 +1,73 @@ +import logging + +from django.conf import settings + +logger = logging.getLogger("tira") + + +def all_git_runners(): + from .tira_model import model + + ret = [] + for git_integration in model.all_git_integrations(return_dict=True): + try: + ret += [get_git_runner(git_integration)] + except Exception: + print(f"Could not load git integration: {git_integration}. Skip") + logger.warn(f"Could not load git integration: {git_integration}. Skip") + + return ret + + +def check_that_git_integration_is_valid(namespace_url, private_token): + from . import model as modeldb + from .tira_model import model + + git_integration = {"namespace_url": namespace_url, "private_token": private_token} + + try: + git_integration = modeldb.GitIntegration.objects.get(namespace_url=namespace_url) + git_integration = model._git_integration_to_dict(git_integration) + git_integration["private_token"] = private_token + except Exception: + pass + + try: + git_runner = get_git_runner(git_integration) + + if not git_runner: + return (False, "Invalid Parameters.") + + all_user_repositories = git_runner.all_user_repositories() + if all_user_repositories is not None and len(all_user_repositories) >= 0: + return (True, "The git credentials are valid (tested by counting repositories).") + else: + return (False, "The git credentials are not valid (tested by counting repositories).") + except Exception as e: + return (False, f"The Git credentials are not valid: {e}") + + +def get_git_runner(git_integration): + from .git_runner_integration import GithubRunner, GitLabRunner + + if not git_integration or "namespace_url" not in git_integration: + return None + + if "github.com" in git_integration["namespace_url"]: + return GithubRunner(git_integration["private_token"]) + else: + return GitLabRunner( + git_integration["private_token"], + git_integration["host"], + git_integration["user_name"], + git_integration["user_password"], + git_integration["gitlab_repository_namespace_id"], + git_integration["image_registry_prefix"], + git_integration["user_repository_branch"], + ) + + +def get_git_runner_for_software_integration(): + from .git_runner_integration import GithubRunner + + return GithubRunner(settings.GITHUB_TOKEN) diff --git a/application/src/tira_app/git_runner_integration.py b/application/src/tira_app/git_runner_integration.py new file mode 100644 index 000000000..213cdca21 --- /dev/null +++ b/application/src/tira_app/git_runner_integration.py @@ -0,0 +1,1506 @@ +import json +import logging +import os +import shutil +import stat +import string +import tempfile +from copy import deepcopy +from datetime import datetime as dt +from glob import glob +from itertools import chain +from pathlib import Path + +import gitlab +import markdown +import requests +from django.conf import settings +from django.template.loader import render_to_string +from git import Repo +from github import Github +from slugify import slugify +from tqdm import tqdm + +from .grpc_client import new_transaction +from .model import EvaluationLog, TransactionLog + +logger = logging.getLogger("tira") + + +def normalize_file(file_content, tira_user_name, task_id): + default_datasets = { + "webpage-classification": "webpage-classification/tiny-sample-20231023-training", + "ir-lab-jena-leipzig-wise-2023": "workshop-on-open-web-search/retrieval-20231027-training", + "ir-lab-jena-leipzig-sose-2023": "workshop-on-open-web-search/retrieval-20231027-training", + "workshop-on-open-web-search": "workshop-on-open-web-search/retrieval-20231027-training", + "ir-benchmarks": "workshop-on-open-web-search/retrieval-20231027-training", + } + + return ( + file_content.replace("TIRA_USER_FOR_AUTOMATIC_REPLACEMENT", tira_user_name) + .replace("TIRA_TASK_ID_FOR_AUTOMATIC_REPLACEMENT", task_id) + .replace("TIRA_DATASET_FOR_AUTOMATIC_REPLACEMENT", default_datasets.get(task_id, "")) + ) + + +def convert_size(size_bytes): + import math + + if size_bytes == 0: + return "0B" + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size_bytes, 1024))) + p = math.pow(1024, i) + s = round(size_bytes / p, 2) + + return f"{s} {size_name[i]}" + + +def write_to_file(file_name, content): + open(file_name, "w").write(content) + + +class GitRunner: + def create_task_repository(self, task_id): + """ + Create the repository with the name "task_id" in the organization. + An organization has task repositories (execute and evaluate submissions) + and multiple user repositories (hosts docker images). + Does nothing, if the repository already exists. + + Parameters + ---------- + task_id: str + Name of the task repository + """ + logger.info(f"Creating task repository for task {task_id} ...") + repo = self.existing_repository(task_id) + if repo: + return int(repo.id) + + project = self._create_task_repository_on_gitHoster(task_id) + + with tempfile.TemporaryDirectory() as tmp_dir: + repo = Repo.init(tmp_dir) + write_to_file(str(tmp_dir) + "/" + self.template_ci_file_name(), self.template_ci()) + write_to_file(str(tmp_dir) + "/README.md", self.template_readme(task_id)) + write_to_file(str(tmp_dir) + "/tira", self.template_tira_cmd_script(project)) + os.chmod(str(tmp_dir) + "/tira", os.stat(str(tmp_dir) + "/tira").st_mode | stat.S_IEXEC) + + repo.create_remote("origin", self.repo_url(project.id)) + self.ensure_branch_is_main(repo) + repo.index.add(["README.md", self.template_ci_file_name(), "tira"]) + repo.index.commit("Initial commit") + repo.remote().push(self.user_repository_branch, o="ci.skip") + + logger.info(f"Created task repository for task {task_id} with new id {project.id}") + return project.id + + def template_ci_file_name(self): + raise ValueError("ToDo: Implement.") + + def _create_task_repository_on_gitHoster(self, task_id): + raise ValueError("ToDo: Implement.") + + def repo_url(self, repo_id): + raise ValueError("ToDo: Implement.") + + def ensure_branch_is_main(self, repo): + try: + # for some git versions we need to manually switch, may fail if the branch is already correct + repo.git.checkout("-b", self.user_repository_branch) + except Exception: + pass + + def clone_repository_and_create_new_branch(self, repo_url, branch_name, directory): + repo = Repo.clone_from(repo_url, directory, branch="main") + repo.head.reference = repo.create_head(branch_name) + + return repo + + def dict_to_key_value_file(self, d): + return "\n".join([(k + "=" + str(v)).strip() for (k, v) in d.items()]) + + def write_metadata_for_ci_job_to_repository( + self, + tmp_dir, + task_id, + transaction_id, + dataset_id, + vm_id, + run_id, + identifier, + git_runner_image, + git_runner_command, + evaluator_id, + user_image_to_execute, + user_command_to_execute, + tira_software_id, + resources, + input_run, + mount_hf_model, + workdir_in_user_image, + ): + job_dir = Path(tmp_dir) / dataset_id / vm_id / run_id + job_dir.mkdir(parents=True, exist_ok=True) + + metadata = { + # The pipeline executed first a pseudo software so the following three values are + # only dummy values so that the software runs successful. + "TIRA_IMAGE_TO_EXECUTE": user_image_to_execute, + "TIRA_VM_ID": vm_id, + "TIRA_COMMAND_TO_EXECUTE": user_command_to_execute, + "TIRA_SOFTWARE_ID": tira_software_id, + "TIRA_DATASET_ID": dataset_id, + "TIRA_RUN_ID": run_id, + "TIRA_CPU_COUNT": str(settings.GIT_CI_AVAILABLE_RESOURCES[resources]["cores"]), + "TIRA_MEMORY_IN_GIBIBYTE": str(settings.GIT_CI_AVAILABLE_RESOURCES[resources]["ram"]), + "TIRA_GPU": str(settings.GIT_CI_AVAILABLE_RESOURCES[resources]["gpu"]), + "TIRA_DATA": str(settings.GIT_CI_AVAILABLE_RESOURCES[resources]["data"]), + "TIRA_DATASET_TYPE": "training" if "training" in dataset_id else "test", + # The actual important stuff for the evaluator: + "TIRA_TASK_ID": task_id, + "TIRA_EVALUATOR_TRANSACTION_ID": transaction_id, + "TIRA_GIT_ID": identifier, + "TIRA_EVALUATION_IMAGE_TO_EXECUTE": git_runner_image, + "TIRA_EVALUATION_COMMAND_TO_EXECUTE": git_runner_command, + "TIRA_EVALUATION_SOFTWARE_ID": evaluator_id, + } + + if mount_hf_model and isinstance(mount_hf_model, str) and len(mount_hf_model.strip()) > 0: + metadata["TIRA_MOUNT_HF_MODEL"] = mount_hf_model.strip() + + if workdir_in_user_image and isinstance(workdir_in_user_image, str) and len(workdir_in_user_image.strip()) > 0: + metadata["TIRA_WORKDIR"] = workdir_in_user_image.strip() + + if input_run and not isinstance(input_run, list): + metadata["TIRA_INPUT_RUN_DATASET_ID"] = input_run["dataset_id"] + metadata["TIRA_INPUT_RUN_VM_ID"] = input_run["vm_id"] + metadata["TIRA_INPUT_RUN_RUN_ID"] = input_run["run_id"] + if input_run.get("replace_original_dataset", False): + metadata["TIRA_INPUT_RUN_REPLACES_ORIGINAL_DATASET"] = "true" + elif input_run and isinstance(input_run, list) and len(input_run) > 0: + metadata["TIRA_INPUT_RUN_DATASET_IDS"] = json.dumps([i["dataset_id"] for i in input_run]) + metadata["TIRA_INPUT_RUN_VM_IDS"] = json.dumps([i["vm_id"] for i in input_run]) + metadata["TIRA_INPUT_RUN_RUN_IDS"] = json.dumps([i["run_id"] for i in input_run]) + + open(job_dir / "job-to-execute.txt", "w").write(self.dict_to_key_value_file(metadata)) + + def create_user_repository(self, user_name): + """ + Create the repository for user with the name "user_name" in the organization. + An organization has task repositories (execute and evaluate submissions) + and multiple user repositories (hosts docker images). + Creates an authentication token, that allows the user to upload images to this repository. + Does nothing, if the repository already exists. + + Parameters + ---------- + user_name: str + Name of the user. The created repository has the name tira-user-${user_name} + """ + repo = "tira-user-" + user_name + existing_repo = self.existing_repository(repo) + if existing_repo: + return existing_repo.id + + project = self._create_task_repository_on_gitHoster(repo) + + token = self._create_access_token_gitHoster(project, repo) + + self.initialize_user_repository(project.id, repo, token.token) + + return project.id + + def initialize_user_repository(self, git_repository_id, repo_name, token): + project_readme = render_to_string( + "tira/git_user_repository_readme.md", + context={ + "user_name": repo_name.replace("tira-user-", ""), + "repo_name": repo_name, + "token": token, + "image_prefix": self.image_registry_prefix + "/" + repo_name + "/", + }, + ) + + with tempfile.TemporaryDirectory() as tmp_dir: + repo = Repo.init(tmp_dir) + write_to_file(str(tmp_dir) + "/README.md", project_readme) + + repo.create_remote("origin", self.repo_url(git_repository_id)) + self.ensure_branch_is_main(repo) + repo.index.add(["README.md"]) + repo.index.commit("Initial commit") + repo.remote().push(self.user_repository_branch) + + def docker_images_in_user_repository(self, user_name, cache=None, force_cache_refresh=False): + """TODO Dane + List all docker images uploaded by the user with the name "user_name" to his user repository + + Parameters + ---------- + user_name: str + Name of the user. + + Return + ---------- + images: Iterable[str] + The images uploaded by the user. + """ + cache_key = "docker-images-in-user-repository-tira-user-" + user_name + if cache: + ret = cache.get(cache_key) + if ret is not None and not force_cache_refresh: + return ret + + ret = [] + repo = self.existing_repository("tira-user-" + user_name) + if not repo: + self.create_user_repository(user_name) + return ret + + covered_images = set() + for registry_repository in repo.repositories.list(): + for registry in registry_repository.manager.list(): + for image in registry.tags.list(get_all=True): + if image.location in covered_images: + continue + covered_images.add(image.location) + image_manifest = self.get_manifest_of_docker_image_image_repository( + image.location.split(":")[0], image.location.split(":")[1], cache, force_cache_refresh + ) + + ret += [ + { + "image": image.location, + "architecture": image_manifest["architecture"], + "created": image_manifest["created"].split(".")[0], + "size": image_manifest["size"], + "raw_size": image_manifest["raw_size"], + "digest": image_manifest["digest"], + } + ] + + ret = sorted(list(ret), key=lambda i: i["image"]) + + if cache: + logger.info(f"Cache refreshed for key {cache_key} ...") + cache.set(cache_key, ret) + + return ret + + def help_on_uploading_docker_image(self, user_name, cache=None, force_cache_refresh=False): + """TODO + Each user repository has a readme.md , that contains instructions on + how to upload images to the repository. + This method extracts those instructions from the readme and returns them. + + Parameters + ---------- + user_name: str + Name of the user. + + Return + ---------- + help: [str] + The personalized instructions on how to upload images + to be shown in the webinterface. + """ + cache_key = "help-on-uploading-docker-image-tira-user-" + user_name + if cache: + ret = cache.get(cache_key) + if ret is not None and not force_cache_refresh: + return ret + + repo = self.existing_repository("tira-user-" + user_name) + if not repo: + self.create_user_repository(user_name) + return self.help_on_uploading_docker_image(user_name, cache) + + # Hacky at the moment + ret = repo.files.get("README.md", ref="main").decode().decode("UTF-8").split("## Create an docker image")[1] + ret = "## Create an docker image\n\n" + ret + + ret = markdown.markdown(ret) + + if cache: + logger.info(f"Cache refreshed for key {cache_key} ...") + cache.set(cache_key, ret) + + return ret + + def add_new_tag_to_docker_image_repository(self, repository_name, existing_tag, new_tag): + """TODO Niklas + The repository with the name "repository_name" contains an docker image + with the tag "existing_tag". + This method adds the tag "new_tag" to the image with the tag "existing_tag". + + Parameters + ---------- + repository_name: str + Name of the repository with an docker image with the tag "existing_tag". + + existing_tag: str + Tag of the docker image. + + new_tag: str + The to be added tag of the docker image. + """ + raise ValueError("ToDo: Implement.") + + def extract_configuration_of_finished_job(self, git_repository_id, dataset_id, vm_id, run_id): + with tempfile.TemporaryDirectory() as tmp_dir: + self.clone_repository_and_create_new_branch(self.repo_url(git_repository_id), "dummy-br", tmp_dir) + f = glob(tmp_dir + "/" + dataset_id + "/" + vm_id + "/" + run_id + "/job-executed-on-*.txt") + + if len(f) != 1: + return None + + return open(f[0]).read() + + def all_user_repositories(self): + """ + Lists all user repositories in the organization. + + Return + ---------- + user_repositories: Iterable[str] + List of all user repositories in the organization. + """ + raise ValueError("ToDo: Implement.") + + def run_and_evaluate_user_software( + self, + task_id, + dataset_id, + user_name, + run_id, + user_software_id, + user_docker_image, + user_command, + git_repository_id, + evaluator_id, + evaluator_software_id, + evaluator_docker_image, + evaluator_command, + ): + """TODO + Execute the specified software (docker image and a command) + on a dataset and evaluate the output. + + Erzeugt neue Datei und commited diese als Trigger für Workflow/CI. + + Parameters + ---------- + task_id: str + Name of the task repository. + + dataset_id: str + Dataset on which the software is to be executed. + + user_name: str + Name of the user. The repository of the user has the name tira-user-${user_name}. + + run_id: str + Identifier of the resulting run. + + user_software_id: str + ID of the to be executed software. + (identifies docker images and command) + + user_docker_image: str + The to be execued docker image. + + user_command: str + The to be executed command in "user_docker_image". + + git_repository_id: str + Identifier of the task repository + (gitlab: int; github: ???) + + evaluator_id: str + Identifier of the resulting evaluation. + + evaluator_software_id: str + ID of the to be executed evaluation software. + (identifies the evaluation docker images and evaluation command) + + + evaluator_docker_image: str + The to be execued docker image used for evaluation. + + evaluator_command: str + The to be executed evaluation command in "evaluation_docker_image". + + Return + ---------- + transaction_id: str + ID of the running transaction. + """ + raise ValueError("ToDo: Implement.") + + def stop_job_and_clean_up(self, git_repository_id, user_name, run_id): + """ + All runs that are currently running, pending, or failed + life in a dedicated branch. + Every successfully (without errors/failures and with evaluation) + executed software is merged into the main branch. + This method stops a potentially running pipeline identified by the run_id + of the user "user_id" and deletes the branch. + + Parameters + ---------- + git_repository_id: str + Identifier of the task repository. + (gitlab: int; github: int) + + user_name: str + Name of the user. The repository of the user has the name "tira-user-${user_name}". + + run_id: str + Identifier of the to be stopped run. + + Return + ---------- + - + """ + raise ValueError("ToDo: Implement.") + + def yield_all_running_pipelines(self, git_repository_id): + """TODO + Yield all pipelines/workflows that are currently running, pending, or failed. + + + Parameters + ---------- + git_repository_id: str + Identifier of the task repository. + (gitlab: int; github: int) + + Return + ---------- + jobs: Iteratable[dict] + all pipelines/workflows that are currently running, pending, or failed. + Each entry has the following fields: + 'run_id', + 'execution', + 'stdOutput', + 'started_at', + 'pipeline_name', + 'job_config', + 'pipeline' + """ + raise ValueError("ToDo: Implement.") + + def archive_software(self, working_directory, software_definition, download_images, persist_images, upload_images): + from .util import docker_image_details, run_cmd + + image = ( + software_definition["TIRA_EVALUATION_IMAGE_TO_EXECUTE"] + if "TIRA_EVALUATION_IMAGE_TO_EXECUTE" in software_definition + else software_definition["TIRA_IMAGE_TO_EXECUTE"] + ) + dockerhub_image = ( + software_definition["TIRA_IMAGE_TO_EXECUTE_IN_DOCKERHUB"] + if "TIRA_IMAGE_TO_EXECUTE_IN_DOCKERHUB" in software_definition + else None + ) + + if download_images: + print(f"Run docker pull {image}.") + run_cmd(["podman", "pull", image]) + + description = docker_image_details(image) + + Path(working_directory + "/docker-softwares").mkdir(parents=True, exist_ok=True) + image_name = working_directory + "/docker-softwares/" + description["image_id"] + ".tar" + + if persist_images and not os.path.isfile(image_name): + print(f"Run image save {image} -o {image_name}.") + run_cmd(["podman", "image", "save", image, "-o", image_name]) + + if upload_images and dockerhub_image: + run_cmd(["podman", "tag", image, dockerhub_image]) + print(f"Run image push {dockerhub_image}.") + run_cmd(["podman", "push", dockerhub_image]) + + description["local_image"] = image_name + software_definition["image_details"] = description + + return software_definition + + def archive_all_softwares(self, working_directory, download_images=True, persist_images=True, upload_images=True): + existing_software = [json.loads(i) for i in open(working_directory + "/.tira/submitted-software.jsonl", "r")] + existing_evaluators = [json.loads(i) for i in open(working_directory + "/.tira/evaluators.jsonl", "r")] + + software, evaluators = [], [] + + for s in tqdm(existing_software, "Software"): + software += [ + json.dumps(self.archive_software(working_directory, s, download_images, persist_images, upload_images)) + ] + + for e in tqdm(existing_evaluators, "Evaluators"): + evaluators += [ + json.dumps(self.archive_software(working_directory, e, download_images, persist_images, upload_images)) + ] + + open((Path(working_directory) / ".tira" / "submitted-software.jsonl").absolute(), "w").write( + "\n".join(software) + ) + open((Path(working_directory) / ".tira" / "evaluators.jsonl").absolute(), "w").write("\n".join(evaluators)) + + def archive_repository( + self, + repo_name, + working_directory, + copy_runs=True, + download_images=True, + persist_images=True, + upload_images=True, + persist_datasets=True, + ): + from django.template.loader import render_to_string + + from .tira_model import get_dataset, get_docker_software, get_docker_softwares_with_runs + + softwares = set() + evaluations = set() + datasets = {} + + if not os.path.isdir(working_directory + "/.git"): + repo = self.existing_repository(repo_name) + print(f"Clone repository {repo.name}. Working in {working_directory}") + repo = Repo.clone_from(self.repo_url(repo.id), working_directory, branch="main") + else: + print(f"Use existing repo in {working_directory}.") + self.archive_all_softwares(working_directory, download_images, persist_images, upload_images) + return + + Path(working_directory + "/docker-softwares").mkdir(parents=True, exist_ok=True) + + print("Exporting docker images...") + downloaded_images = set() + for job_file in tqdm( + sorted(list(glob(working_directory + "/*/*/*/job-executed-on*.txt"))), "Export Docker Images" + ): + job = [i.split("=", 1) for i in open(job_file, "r")] + job = {k.strip(): v.strip() for k, v in job} + image = job["TIRA_IMAGE_TO_EXECUTE"].strip() + + if self.image_registry_prefix.lower() not in image.lower(): + continue + + datasets[job["TIRA_DATASET_ID"]] = get_dataset(job["TIRA_DATASET_ID"]) + + try: + software_metadata = get_docker_software(int(job["TIRA_SOFTWARE_ID"].replace("docker-software-", ""))) + if copy_runs: + runs = get_docker_softwares_with_runs(job["TIRA_TASK_ID"], job["TIRA_VM_ID"]) + except Exception: + continue + + if copy_runs: + + runs = [ + i + for i in runs + if int(i["docker_software_id"]) == (int(job["TIRA_SOFTWARE_ID"].replace("docker-software-", ""))) + ] + runs = list(chain(*[i["runs"] for i in runs])) + runs = [ + i for i in runs if (i["input_run_id"] == job["TIRA_RUN_ID"] or i["run_id"] == job["TIRA_RUN_ID"]) + ] + + for run in runs: + result_out_dir = Path(job_file.split("/job-executed-on")[0]) / ( + "evaluation" if run["is_evaluation"] else "run" + ) + result_out_dir.mkdir(parents=True, exist_ok=True) + shutil.copytree( + Path(settings.TIRA_ROOT) + / "data" + / "runs" + / job["TIRA_DATASET_ID"] + / job["TIRA_VM_ID"] + / run["run_id"], + result_out_dir / run["run_id"], + ) + + image_name = (slugify(image) + ".tar").replace("/", "-") + + dockerhub_image = ( + f'docker.io/webis/{job["TIRA_TASK_ID"]}-submissions:' + + (image_name.split("-tira-user-")[1]).replace(".tar", "").strip() + ) + + downloaded_images.add(image) + softwares.add( + json.dumps( + { + "TIRA_IMAGE_TO_EXECUTE": image, + "TIRA_IMAGE_TO_EXECUTE_IN_DOCKERHUB": dockerhub_image, + "TIRA_VM_ID": job["TIRA_VM_ID"], + "TIRA_COMMAND_TO_EXECUTE": job["TIRA_COMMAND_TO_EXECUTE"], + "TIRA_TASK_ID": job["TIRA_TASK_ID"], + "TIRA_SOFTWARE_ID": job["TIRA_SOFTWARE_ID"], + "TIRA_SOFTWARE_NAME": software_metadata["display_name"], + "TIRA_IDS_OF_PREVIOUS_STAGES": ( + [] + if "input_docker_software_id" not in software_metadata + or not software_metadata["input_docker_software_id"] + else [software_metadata["input_docker_software_id"]] + ), + } + ) + ) + + evaluations.add( + json.dumps( + { + "TIRA_DATASET_ID": job["TIRA_DATASET_ID"].strip(), + "TIRA_EVALUATION_IMAGE_TO_EXECUTE": job["TIRA_EVALUATION_IMAGE_TO_EXECUTE"].strip(), + "TIRA_EVALUATION_COMMAND_TO_EXECUTE": job["TIRA_EVALUATION_COMMAND_TO_EXECUTE"].strip(), + } + ) + ) + + (Path(working_directory) / ".tira").mkdir(parents=True, exist_ok=True) + open((Path(working_directory) / ".tira" / "submitted-software.jsonl").absolute(), "w").write( + "\n".join(softwares) + ) + open((Path(working_directory) / ".tira" / "evaluators.jsonl").absolute(), "w").write("\n".join(evaluations)) + open((Path(working_directory) / "tira.py").absolute(), "w").write( + render_to_string("tira/tira_git_cmd.py", context={}) + ) + open((Path(working_directory) / "requirements.txt").absolute(), "w").write("docker==5.0.3\npandas\njupyterlab") + open((Path(working_directory) / "Makefile").absolute(), "w").write( + render_to_string("tira/tira_git_makefile", context={}) + ) + open((Path(working_directory) / "Tutorial.ipynb").absolute(), "w").write( + render_to_string("tira/tira_git_tutorial.ipynb", context={}) + ) + # open((Path(working_directory) / 'README.md').absolute(), 'a+').write(render_to_string('tira/tira_git_cmd.py', context={})) + + if persist_datasets: + logger.info("Archive datasets") + for dataset_name, dataset_definition in tqdm(datasets.items(), "Archive Datasets"): + if "is_confidential" in dataset_definition and not dataset_definition["is_confidential"]: + for i in ["training-datasets", "training-datasets-truth"]: + shutil.copytree( + Path(settings.TIRA_ROOT) / "data" / "datasets" / i / job["TIRA_TASK_ID"] / dataset_name, + Path(working_directory) / dataset_name / i, + ) + + self.archive_all_softwares(working_directory, download_images, persist_images, upload_images) + # logger.info(f'Archive repository into {repo_name}.zip') + # shutil.make_archive(repo_name, 'zip', working_directory) + logger.info(f"The repository is archived into {working_directory}") + + +class GitLabRunner(GitRunner): + + def __init__( + self, + private_token, + host, + user_name, + user_password, + gitlab_repository_namespace_id, + image_registry_prefix, + user_repository_branch, + ): + self.git_token = private_token + self.user_name = user_name + self.host = host + self.user_password = user_password + self.namespace_id = int(gitlab_repository_namespace_id) + self.image_registry_prefix = image_registry_prefix + self.user_repository_branch = user_repository_branch + self.gitHoster_client = gitlab.Gitlab("https://" + host, private_token=self.git_token) + # self.gitHoster_client = gitlab.Gitlab('https://' + host, private_token=json.load(open('/home/maik/.tira/.tira-settings.json'))['access_token']) + + def template_ci(self): + """ + returns the CI-Pipeline template file as string + """ + return render_to_string("tira/git_task_repository_gitlab_ci.yml", context={}) + + def template_ci_file_name(self): + return ".gitlab-ci.yml" + + def template_readme(self, task_id): + """ + returns the readme template file for Gitlab as string + """ + return render_to_string("tira/git_task_repository_readme.md", context={"task_name": task_id}) + + def template_tira_cmd_script(self, project): + return render_to_string("tira/tira_git_cmd.sh", context={"project_id": project.id, "ci_server_host": self.host}) + + def repo_url(self, git_repository_id): + project = self.gitHoster_client.projects.get(git_repository_id) + + return project.http_url_to_repo.replace(self.host, self.user_name + ":" + self.git_token + "@" + self.host) + + def get_manifest_of_docker_image_image_repository(self, repository_name, tag, cache, force_cache_refresh): + """ + Background for the implementation: + https://dille.name/blog/2018/09/20/how-to-tag-docker-images-without-pulling-them/ + https://gitlab.com/gitlab-org/gitlab/-/issues/23156 + """ + registry_host = self.image_registry_prefix.split("/")[0] + repository_name = repository_name.split(registry_host + "/")[-1] + + cache_key = f"docker-manifest-for-repo-{repository_name}-{tag}" + if cache: + ret = cache.get(cache_key) + if ret is not None: + return ret + + try: + token = requests.get( + f"https://{self.host}:{self.git_token}@git.webis.de/jwt/auth?client_id=docker&offline_token=true&service=container_registry&scope=repository:{repository_name}:push,pull,blob,upload" + ) + + if not token.ok: + raise ValueError(token.content.decode("UTF-8")) + + token = json.loads(token.content.decode("UTF-8"))["token"] + headers = { + "Accept": "application/vnd.docker.distribution.manifest.v2+json", + "Content-Type": "application/vnd.docker.distribution.manifest.v2+json", + "Authorization": "Bearer " + token, + } + manifest = requests.get(f"https://{registry_host}/v2/{repository_name}/manifests/{tag}", headers=headers) + + if not manifest.ok: + raise ValueError("-->" + manifest.content.decode("UTF-8")) + + image_metadata = json.loads(manifest.content.decode("UTF-8")) + raw_size = image_metadata["config"]["size"] + sum([i["size"] for i in image_metadata["layers"]]) + size = convert_size(raw_size) + + image_config = requests.get( + f'https://{registry_host}/v2/{repository_name}/blobs/{image_metadata["config"]["digest"]}', + headers=headers, + ) + + if not image_config.ok: + raise ValueError("-->" + image_config.content.decode("UTF-8")) + + image_config = json.loads(image_config.content.decode("UTF-8")) + + ret = { + "architecture": image_config["architecture"], + "created": image_config["created"], + "size": size, + "raw_size": raw_size, + "digest": image_metadata["config"]["digest"].split(":")[-1][:12], + } + except Exception as e: + logger.warn("Exception during loading of metadata for docker image", exc_info=e) + ret = { + "architecture": "Loading...", + "created": "Loading...", + "size": "Loading...", + "digest": "Loading...", + "raw_size": "Loading...", + } + + if cache: + logger.info(f"Cache refreshed for key {cache_key} ...") + cache.set(cache_key, ret) + + return ret + + def run_evaluate_with_git_workflow( + self, task_id, dataset_id, vm_id, run_id, git_runner_image, git_runner_command, git_repository_id, evaluator_id + ): + msg = f"start run_eval with git: {task_id} - {dataset_id} - {vm_id} - {run_id}" + logger.info(msg) + transaction_id = self.start_git_workflow( + task_id, + dataset_id, + vm_id, + run_id, + git_runner_image, + git_runner_command, + git_repository_id, + evaluator_id, + "ubuntu:18.04", + "echo 'No software to execute. Only evaluation'", + "-1", + list(settings.GIT_CI_AVAILABLE_RESOURCES.keys())[0], + None, + None, + None, + ) + + t = TransactionLog.objects.get(transaction_id=transaction_id) + _ = EvaluationLog.objects.update_or_create(vm_id=vm_id, run_id=run_id, running_on=vm_id, transaction=t) + + return transaction_id + + def run_docker_software_with_git_workflow( + self, + task_id, + dataset_id, + vm_id, + run_id, + git_runner_image, + git_runner_command, + git_repository_id, + evaluator_id, + user_image_to_execute, + user_command_to_execute, + tira_software_id, + resources, + input_run, + mount_hf_model, + workdir_in_user_image, + ): + msg = f"start run_docker_image with git: {task_id} - {dataset_id} - {vm_id} - {run_id}" + logger.info(msg) + transaction_id = self.start_git_workflow( + task_id, + dataset_id, + vm_id, + run_id, + git_runner_image, + git_runner_command, + git_repository_id, + evaluator_id, + user_image_to_execute, + user_command_to_execute, + tira_software_id, + resources, + input_run, + mount_hf_model, + workdir_in_user_image, + ) + + # TODO: add transaction to log + + return transaction_id + + def start_git_workflow( + self, + task_id, + dataset_id, + vm_id, + run_id, + git_runner_image, + git_runner_command, + git_repository_id, + evaluator_id, + user_image_to_execute, + user_command_to_execute, + tira_software_id, + resources, + input_run, + mount_hf_model, + workdir_in_user_image, + ): + msg = f"start git-workflow with git: {task_id} - {dataset_id} - {vm_id} - {run_id}" + transaction_id = new_transaction(msg, in_grpc=False) + logger.info(msg) + + identifier = f"eval---{dataset_id}---{vm_id}---{run_id}---started-{str(dt.now().strftime('%Y-%m-%d-%H-%M-%S'))}" + + with tempfile.TemporaryDirectory() as tmp_dir: + repo = self.clone_repository_and_create_new_branch(self.repo_url(git_repository_id), identifier, tmp_dir) + + self.write_metadata_for_ci_job_to_repository( + tmp_dir, + task_id, + transaction_id, + dataset_id, + vm_id, + run_id, + identifier, + git_runner_image, + git_runner_command, + evaluator_id, + user_image_to_execute, + user_command_to_execute, + tira_software_id, + resources, + input_run, + mount_hf_model, + workdir_in_user_image, + ) + + self.commit_and_push(repo, dataset_id, vm_id, run_id, identifier, git_repository_id, resources) + + t = TransactionLog.objects.get(transaction_id=transaction_id) + _ = EvaluationLog.objects.update_or_create(vm_id=vm_id, run_id=run_id, running_on=vm_id, transaction=t) + + return transaction_id + + def commit_and_push(self, repo, dataset_id, vm_id, run_id, identifier, git_repository_id, resources): + repo.index.add([str(Path(dataset_id) / vm_id / run_id / "job-to-execute.txt")]) + repo.index.commit("Evaluate software: " + identifier) + gpu_resources = str(settings.GIT_CI_AVAILABLE_RESOURCES[resources]["gpu"]).strip() + data_resources = str(settings.GIT_CI_AVAILABLE_RESOURCES[resources]["data"]).strip() + + if gpu_resources == "0" and data_resources == "no": + repo.remote().push(identifier) + else: + repo.remote().push(identifier, **{"o": "ci.skip"}) + + gl_project = self.gitHoster_client.projects.get(int(git_repository_id)) + gl_project.pipelines.create( + { + "ref": identifier, + "variables": [ + {"key": "TIRA_GPU", "value": gpu_resources}, + {"key": "TIRA_DATA", "value": data_resources}, + ], + } + ) + + def add_new_tag_to_docker_image_repository(self, repository_name, old_tag, new_tag): + """ + Background for the implementation: + https://dille.name/blog/2018/09/20/how-to-tag-docker-images-without-pulling-them/ + https://gitlab.com/gitlab-org/gitlab/-/issues/23156 + """ + original_repository_name = repository_name + registry_host = self.image_registry_prefix.split("/")[0] + repository_name = repository_name.split(registry_host + "/")[-1] + + token = requests.get( + f"https://{self.host}:{self.git_token}@git.webis.de/jwt/auth?client_id=docker&offline_token=true&service=container_registry&scope=repository:{repository_name}:push,pull" + ) + + if not token.ok: + raise ValueError(token.content.decode("UTF-8")) + + token = json.loads(token.content.decode("UTF-8"))["token"] + headers = { + "Accept": "application/vnd.docker.distribution.manifest.v2+json", + "Content-Type": "application/vnd.docker.distribution.manifest.v2+json", + "Authorization": "Bearer " + token, + } + + manifest = requests.get(f"https://{registry_host}/v2/{repository_name}/manifests/{old_tag}", headers=headers) + + if not manifest.ok: + raise ValueError("-->" + manifest.content.decode("UTF-8")) + manifest = manifest.content.decode("UTF-8") + + manifest = requests.put( + f"https://{registry_host}/v2/{repository_name}/manifests/{new_tag}", headers=headers, data=manifest + ) + + if not manifest.ok: + raise ValueError(manifest.content.decode("UTF-8")) + + return original_repository_name + ":" + new_tag + + def all_user_repositories(self): + """ + Lists all user repositories in the organization. + + Return + ---------- + user_repositories: Iterable[str] + List of all user repositories in the organization. + """ + + ret = [] + for potential_existing_projects in self.gitHoster_client.projects.list(search="tira-user-", get_all=True): + if ( + "tira-user-" in potential_existing_projects.name + and int(potential_existing_projects.namespace["id"]) == self.namespace_id + ): + ret += [potential_existing_projects.name] + return set(ret) + + def existing_repository(self, repo): + for potential_existing_projects in self.gitHoster_client.projects.list(search=repo): + if ( + potential_existing_projects.name == repo + and int(potential_existing_projects.namespace["id"]) == self.namespace_id + ): + return potential_existing_projects + + def clean_task_repository(self, task_id): + project = self.existing_repository(task_id) + for pipeline in project.pipelines.list(get_all=True): + print("Delete Pipeline: " + str(pipeline.id)) + if pipeline.status not in {"skipped", "canceled", "failed", "success"}: + print("Skip running pipeline " + str(pipeline.id)) + continue + pipeline.delete() + + def _create_task_repository_on_gitHoster(self, task_id): + project = self.existing_repository(task_id) + if project: + print(f'Repository found "{task_id}".') + return project + + project = self.gitHoster_client.projects.create( + {"name": task_id, "namespace_id": str(self.namespace_id), "default_branch": self.user_repository_branch} + ) + return project + + def _create_access_token_gitHoster(self, project, repo): + return project.access_tokens.create( + { + "name": repo, + "scopes": ["read_registry", "write_registry"], + "access_level": 30, + "expires_at": "2024-10-08", + } + ) + + def stop_job_and_clean_up(self, git_repository_id, user_name, run_id, cache=None): + """ + All runs that are currently running, pending, or failed + life in a dedicated branch. + Every successfully (without errors/failures and with evaluation) + executed software is merged into the main branch. + This method stops a potentially running pipeline identified by the run_id + of the user "user_id" and deletes the branch. + + Parameters + ---------- + git_repository_id: str + Identifier of the task repository. + (gitlab: int; github: int) + + user_name: str + Name of the user. The repository of the user has the name "tira-user-${user_name}". + + run_id: str + Identifier of the to be stopped run. + + Return + ---------- + - + """ + gl = self.gitHoster_client + gl_project = gl.projects.get(int(git_repository_id)) + + for pipeline in self.yield_all_running_pipelines(git_repository_id, user_name, cache, True): + if run_id == pipeline["run_id"]: + branch = pipeline["branch"] if "branch" in pipeline else pipeline["pipeline"].ref + if ("---" + user_name + "---") not in branch: + continue + if ("---" + run_id + "---") not in branch: + continue + + if "pipeline" in pipeline: + pipeline["pipeline"].cancel() + gl_project.branches.delete(branch) + + def yield_all_running_pipelines(self, git_repository_id, user_id, cache=None, force_cache_refresh=False): + for pipeline in self.all_running_pipelines_for_repository(git_repository_id, cache, force_cache_refresh): + pipeline = deepcopy(pipeline) + + if ("---" + user_id + "---") not in pipeline["pipeline_name"]: + continue + + if ("-training---" + user_id + "---") not in pipeline["pipeline_name"]: + pipeline["stdOutput"] = "Output for runs on the test-data is hidden." + + yield pipeline + + def all_running_pipelines_for_repository(self, git_repository_id, cache=None, force_cache_refresh=False): + cache_key = "all-running-pipelines-repo-" + str(git_repository_id) + if cache: + try: + ret = cache.get(cache_key) + if ret is not None and not force_cache_refresh: + logger.debug("get ret from cache", ret) + return ret + except Exception: + logger.exception(f"Could not find cache module {cache_key}.") + + ret = [] + gl = self.gitHoster_client + gl_project = gl.projects.get(int(git_repository_id)) + already_covered_run_ids = set() + for status in ["scheduled", "running", "pending", "created", "waiting_for_resource", "preparing"]: + for pipeline in gl_project.pipelines.list(status=status): + user_software_job = None + evaluation_job = None + for job in pipeline.jobs.list(): + if "run-user-software" == job.name: + user_software_job = job + if "evaluate-software-result" == job.name: + evaluation_job = job + logger.debug(f"TODO: pass evaluation jobs in different structure to UI: {evaluation_job}") + + p = (pipeline.ref + "---started-").split("---started-")[0] + + execution = {"scheduling": "running", "execution": "pending", "evaluation": "pending"} + if user_software_job.status == "running": + execution = {"scheduling": "done", "execution": "running", "evaluation": "pending"} + elif user_software_job.status != "created": + execution = {"scheduling": "done", "execution": "done", "evaluation": "running"} + + stdout = "Output for runs on the test-data is hidden." + if "-training---" in p: + try: + stdout = "" + user_software_job = gl_project.jobs.get(user_software_job.id) + stdout = self.clean_job_output(user_software_job.trace().decode("UTF-8")) + except Exception: + # Job is not started or similar + pass + + run_id = p.split("---")[-1] + + already_covered_run_ids.add(run_id) + job_config = self.extract_job_configuration(gl_project, pipeline.ref) + if job_config: + ret += [ + { + "run_id": run_id, + "execution": execution, + "stdOutput": stdout, + "started_at": p.split("---")[-1], + "pipeline_name": p, + "job_config": job_config, + "pipeline": pipeline, + } + ] + + ret += self.__all_failed_pipelines_for_repository(gl_project, already_covered_run_ids) + + if cache: + logger.info(f"Cache refreshed for key {cache_key} ...") + cache.set(cache_key, ret) + + return ret + + def clean_job_output(self, ret): + ret = "".join(filter(lambda x: x in string.printable, ret.strip())) + if '$ eval "${TIRA_COMMAND_TO_EXECUTE}"[0;m' in ret: + return self.clean_job_suffix(ret.split('$ eval "${TIRA_COMMAND_TO_EXECUTE}"[0;m')[1]) + elif '$ eval "${TIRA_EVALUATION_COMMAND_TO_EXECUTE}"[0;m' in ret: + return self.clean_job_suffix(ret.split('$ eval "${TIRA_EVALUATION_COMMAND_TO_EXECUTE}"[0;m')[1]) + else: + # Job not jet started. + return "" + + def clean_job_suffix(self, ret): + if "[32;1m$ env|grep 'TIRA' > task.env" in ret: + ret = ret.split("[32;1m$ env|grep 'TIRA' > task.env")[0] + if "section_end:" in ret: + ret = ret.split("section_end:")[0] + + return ret.strip() + + def extract_job_configuration(self, gl_project, branch): + ret = {} + + if not branch or branch.strip().lower() == "main": + return None + + try: + for commit in gl_project.commits.list(ref_name=branch, page=0, per_page=3): + if len(ret) > 0: + break + + if branch in commit.title and "Merge" not in commit.title: + for diff_entry in commit.diff(): + if len(ret) > 0: + break + + if diff_entry["old_path"] == diff_entry["new_path"] and diff_entry["new_path"].endswith( + "/job-to-execute.txt" + ): + diff_entry = diff_entry["diff"].replace("\n+", "\n").split("\n") + ret = { + i.split("=")[0].strip(): i.split("=")[1].strip() + for i in diff_entry + if len(i.split("=")) == 2 + } + except Exception as e: + logger.warn(f'Could not extract job configuration on "{branch}".', exc_info=e) + pass + + if ( + "TIRA_COMMAND_TO_EXECUTE" in ret + and "'No software to execute. Only evaluation'" in ret["TIRA_COMMAND_TO_EXECUTE"] + and ("TIRA_SOFTWARE_ID" not in ret or "-1" == ret["TIRA_SOFTWARE_ID"]) + ): + software_from_db = {"display_name": "Evaluate Run", "image": "evaluator", "command": "evaluator"} + else: + try: + from .tira_model import model + + software_from_db = model.get_docker_software(int(ret["TIRA_SOFTWARE_ID"].split("docker-software-")[-1])) + except Exception as e: + logger.warn(f'Could not extract the software from the database for "{json.dumps(ret)}": {str(e)}') + software_from_db = {} + + return { + "software_name": software_from_db.get("display_name", "Loading..."), + "image": software_from_db.get("user_image_name", "Loading..."), + "command": software_from_db.get("command", "Loading..."), + "cores": str(ret.get("TIRA_CPU_COUNT", "Loading...")) + " CPU Cores", + "ram": str(ret.get("TIRA_MEMORY_IN_GIBIBYTE", "Loading...")) + "GB of RAM", + "gpu": str(ret.get("TIRA_GPU", "Loading...")) + " GPUs", + "data": str(ret.get("TIRA_DATA", "Loading...")) + " Mounts", + "dataset_type": ret.get("TIRA_DATASET_TYPE", "Loading..."), + "dataset": ret.get("TIRA_DATASET_ID", "Loading..."), + "software_id": ret.get("TIRA_SOFTWARE_ID", "Loading..."), + "task_id": ret.get("TIRA_TASK_ID", "Loading..."), + } + + def __all_failed_pipelines_for_repository(self, gl_project, already_covered_run_ids): + ret = [] + + for branch in gl_project.branches.list(): + branch = branch.name + p = (branch + "---started-").split("---started-")[0] + run_id = p.split("---")[-1] + + if run_id in already_covered_run_ids: + continue + + job_config = self.extract_job_configuration(gl_project, branch) + if not job_config: + continue + + ret += [ + { + "run_id": run_id, + "execution": {"scheduling": "failed", "execution": "failed", "evaluation": "failed"}, + "pipeline_name": p, + "stdOutput": ( + "Job did not run. (Maybe it is still submitted to the cluster or failed to start. It might take" + " up to 5 minutes to submit a Job to the cluster.)" + ), + "started_at": p.split("---")[-1], + "branch": branch, + "job_config": job_config, + } + ] + + return ret + + +class GithubRunner(GitRunner): + + def __init__(self, github_token): + self.git_token = github_token + self.gitHoster_client = Github(self.git_token) + + def _convert_repository_id_to_repository_name(self, repository_id): + for repo in self.gitHoster_client.get_user().get_repos(): + if repo.id == repository_id: + return repo.name + + def template_ci(self): + """ + returns the Workflow template file as string + """ + # TODO: create workflow template file at tira/application/src/tira/templates/tira/git_task_repository_github_workflow.yml + return render_to_string("tira/git_task_repository_github_workflow.yml", context={}) + + def template_readme(self, task_id): + """ + returns the readme template file for Github as string + """ + # TODO: create readme template file for Github at tira/application/src/tira/templates/tira/git_task_repository_github_workflow.yml + return render_to_string("tira/git_task_repository_github_readme.md", context={"task_name": task_id}) + + def template_tira_cmd_script(self, project_id): + return render_to_string( + "tira/tira_git_cmd.sh", context={"project_id": project_id, "ci_server_host": "https://github.com"} + ) + + def add_new_tag_to_docker_image_repository(self, repository_name, old_tag, new_tag): + for repo in self.gitHoster_client.get_user().get_repos(): + if repo.name == repository_name: + tags = repo.tags + if new_tag not in tags: + repo.create_tag(new_tag) + repo.git.push(tags=True) # Brauchen wir das? + else: + logger.info(f"Tag: {new_tag} already exists with the same name") + + def all_user_repositories(self): + """ + Lists all user repositories in the organization "user_name". + + Return + ---------- + user_repositories: Iterable[str] + List of all user repositories in the organization. + """ + + ret = [] + for repo in self.gitHoster_client.get_user().get_repos(): + ret.append(repo.name) + + return set(ret) + + def stop_job_and_clean_up(self, git_repository_id, user_name, run_id): + """ + All runs that are currently running, pending, or failed + life in a dedicated branch. + Every successfully (without errors/failures and with evaluation) + executed software is merged into the main branch. + This method stops a potentially running pipeline identified by the run_id + of the user "user_id" and deletes the branch. + + Parameters + ---------- + git_repository_id: str + Identifier of the task repository. + (gitlab: int; github: int) + + user_name: str + Name of the user. The repository of the user has the name "tira-user-${user_name}". + + run_id: str + Identifier of the to be stopped run. + + Return + ---------- + - + """ + repository_name = self._convert_repository_id_to_repository_name(git_repository_id) + + # cancel worflow run + run = self.gitHoster_client.get_user().get_repo(repository_name).get_workflow_run(run_id) + run.cancel() + + # delete branch + branch_name = run.head_branch + self.gitHoster_client.get_user().get_repo(repository_name).get_git_ref(f"heads/{branch_name}").delete + + def _create_task_repository_on_gitHoster(self, task_id): + # create new repository and rename the default branch + project = self.gitHoster_client.get_user().create_repo(name=task_id) + for branch in project.get_branches(): + project.rename_branch(branch=branch, new_name=self.user_repository_branch) + return project + + def _create_access_token_gitHoster(self, project, repo): + raise ValueError("ToDo: Implement this.") + + def yield_all_running_pipelines(self, git_repository_id): + """ + Yield all pipelines/workflows that are currently running, pending, or failed. + + + Parameters + ---------- + git_repository_id: str + Identifier of the task repository. + (gitlab: int; github: int) + + Return + ---------- + jobs: Iteratable[dict] + all pipelines/workflows that are currently running, pending, or failed. + Each entry has the following fields: + 'run_id', + 'execution', + 'stdOutput', + 'started_at', + 'pipeline_name', + 'job_config', + 'pipeline' + """ + # https://docs.github.com/en/rest/actions/workflow-jobs?apiVersion=2022-11-28#get-a-job-for-a-workflow-run + pass + + def git_user_exists(self, user_name): + try: + return self.gitHoster_client.get_user(user_name) is not None + except Exception: + return False + + def get_git_runner_for_software_integration( + self, + reference_repository_name, + user_repository_name, + user_repository_namespace, + github_user, + tira_user_name, + dockerhub_token, + dockerhub_user, + tira_client_token, + repository_search_prefix, + tira_task_id, + tira_code_repository_id, + tira_client_user, + private, + ): + user = self.gitHoster_client.get_user() + try: + user_repo = user.get_repo(f"{user_repository_namespace}/{user_repository_name}") + if user_repo: + return user_repo + except Exception: + # repository does not exist. + pass + + return self.create_software_submission_repository_for_user( + reference_repository_name, + user_repository_name, + user_repository_namespace, + github_user, + tira_user_name, + dockerhub_token, + dockerhub_user, + tira_client_token, + repository_search_prefix, + tira_task_id, + tira_code_repository_id, + tira_client_user, + private, + ) + + def create_software_submission_repository_for_user( + self, + reference_repository_name, + user_repository_name, + user_repository_namespace, + github_user, + tira_user_name, + dockerhub_token, + dockerhub_user, + tira_client_token, + repository_search_prefix, + tira_task_id, + tira_code_repository_id, + tira_client_user, + private, + ): + reference_repo = self.gitHoster_client.get_repo(reference_repository_name) + + org = self.gitHoster_client.get_organization(user_repository_namespace) + repo = org.create_repo( + user_repository_name, + f"The repository of user {tira_user_name} for code submissions in TIRA.", + private=private, + ) + repo.add_to_collaborators(github_user, "admin") + + repo.create_secret("TIRA_DOCKER_REGISTRY_TOKEN", dockerhub_token) + repo.create_secret("TIRA_DOCKER_REGISTRY_USER", dockerhub_user) + repo.create_secret("TIRA_CLIENT_TOKEN", tira_client_token) + repo.create_secret("TIRA_CLIENT_USER", tira_client_user) + repo.create_secret("TIRA_CODE_REPOSITORY_ID", tira_code_repository_id) + + contents = reference_repo.get_contents(repository_search_prefix) + while contents: + file_content = contents.pop(0) + if file_content.type == "dir": + contents.extend(reference_repo.get_contents(file_content.path)) + else: + decoded_content = file_content.decoded_content.decode() + decoded_content = normalize_file(decoded_content, tira_user_name, tira_task_id) + repo.create_file(file_content.path, "Initial Commit.", decoded_content) + + return repo diff --git a/application/src/tira_app/grpc/grpc_server.py b/application/src/tira_app/grpc/grpc_server.py new file mode 100644 index 000000000..7b97a2fc9 --- /dev/null +++ b/application/src/tira_app/grpc/grpc_server.py @@ -0,0 +1,202 @@ +import logging +from concurrent import futures + +import django +import grpc +from django.conf import settings + +from .. import tira_model as model +from ..model import EvaluationLog, TransactionLog, TransitionLog +from ..proto import tira_host_pb2, tira_host_pb2_grpc + +grpc_port = settings.APPLICATION_GRPC_PORT + +logger = logging.getLogger("tira") + + +class TiraApplicationService(tira_host_pb2_grpc.TiraApplicationService): + def set_state(self, request, context): + """TODO error handling""" + django.db.connection.close() + logger.debug(f" Application Server received vm-state {request.state} for {request.vmId}") + print( + f"Application Server received vm-state {request.state} for {request.vmId}. Transaction:" + f" {request.transaction.transactionId}" + ) + try: + TransactionLog.objects.filter(transaction_id=request.transaction.transactionId).update( + last_status=request.transaction.status, + last_message=f"TiraApplicationService:set_state:{request.transaction.message}", + ) + + t = TransactionLog.objects.get(transaction_id=request.transaction.transactionId) + + _ = TransitionLog.objects.update_or_create( + vm_id=request.vmId, defaults={"vm_state": request.state, "transaction": t} + ) + except Exception as e: + logger.warning(e) + return tira_host_pb2.Transaction( + status=tira_host_pb2.Status.FAILED, + message=f"TiraApplicationService:set_state:FAILED with {e}", + transactionId=request.transaction.transactionId, + ) + + return tira_host_pb2.Transaction( + status=tira_host_pb2.Status.SUCCESS, + message="TiraApplicationService:set_state:SUCCESS", + transactionId=request.transaction.transactionId, + ) + + def complete_transaction(self, request, context): + """Marks a transaction as completed if the + This is basically the final stage of a a TIRA message exchange. + """ + django.db.connection.close() + logger.debug(f" Application Server received complete_transaction for {request.transactionId}") + print(f" Application Server received complete_transaction for {request.transactionId}") + + try: + _ = TransactionLog.objects.filter(transaction_id=request.transactionId).update( + completed=True, + last_status=str(request.status), + last_message=f"TiraApplicationService:complete_transaction:{request.message}", + ) + + except Exception as e: + logger.warning(e) + return tira_host_pb2.Transaction( + status=tira_host_pb2.Status.FAILED, + message=f"TiraApplicationService:complete_transaction:FAILED with {e}", + transactionId=request.transactionId, + ) + + return tira_host_pb2.Transaction( + status=tira_host_pb2.Status.SUCCESS, + message="TiraApplicationService:complete_transaction:SUCCESS", + transactionId=request.transactionId, + ) + + def confirm_vm_create(self, request, context): + """This gets called if a vm was successfully created. Right now it just says 'yes' when called. + See tira_host.proto for request specification. + """ + django.db.connection.close() + logger.debug( + " Application Server received vm-create confirmation with \n" + f"{request.vmID}, {request.userName}, {request.initialUserPw}, {request.ip}, {request.sshPort}, " + f"{request.rdpPort}" + ) + + _ = TransactionLog.objects.filter(transaction_id=request.transaction.transactionId).update( + completed=False, last_status=str(request.transaction.status), last_message=request.transaction.message + ) + + if request.transaction.status == tira_host_pb2.Status.SUCCESS: + model.add_vm( + request.vmId, + request.userName, + request.initialUserPw, + request.ip, + request.host, + request.sshPort, + request.rdpPort, + ) + + else: + logger.error( + "Application received confirm_vm_create with status Failed:\n" + f"{request.vmID}, {request.userName}, {request.initialUserPw}, {request.ip}, " + f"{request.sshPort}, {request.rdpPort}" + ) + + return tira_host_pb2.Transaction( + status=tira_host_pb2.Status.SUCCESS, + message="Application accepted vm create confirmation", + transactionId=request.transaction.transactionId, + ) + + def confirm_vm_delete(self, request, context): + """This gets called if a run_eval finishes and receives the EvaluationResults. + Right now it just says 'yes' when called. See tira_host.proto for request specification. + TODO this should remove the deleted vm from the model. + """ + django.db.connection.close() + print(f" Application Server received vm_delete confirmation with: \n{request.vmId.vmId} measures.") + + return tira_host_pb2.Transaction( + status=tira_host_pb2.Status.SUCCESS, + message="Application accepted vm delete confirmation", + transactionId=request.transaction.transactionId, + ) + + def confirm_run_eval(self, request, context): + """This gets called if a run_eval finishes and receives the EvaluationResults. + We use this to load a new evaluation run into the database. + See tira_host.proto for request specification. + """ + django.db.connection.close() + logger.debug( + f" Application Server received run-eval confirmation with: \n{request.runId.runId} - {request.runId.vmId} -" + f" {request.transaction.transactionId} and {len(request.measures)} measures." + ) + print( + f" Application Server received run-eval confirmation with: \n{request.runId.runId} - {request.runId.vmId} -" + f" {request.transaction.transactionId} and {len(request.measures)} measures." + ) + + result = model.add_run(request.runId.datasetId, request.runId.vmId, request.runId.runId) + + EvaluationLog.objects.filter(vm_id=request.runId.vmId, run_id=request.runId.runId).delete() + EvaluationLog.objects.filter(transaction__transaction_id=request.transaction.transactionId).delete() + + _ = TransactionLog.objects.filter(transaction_id=request.transaction.transactionId).update( + completed=False, last_status=str(request.transaction.status), last_message=request.transaction.message + ) + + return tira_host_pb2.Transaction( + status=tira_host_pb2.Status.SUCCESS, + message=( + f"Application accepted evaluation confirmation with request.runId.datasetId={request.runId.datasetId}," + f" request.runId.vmId={request.runId.vmId}, request.runId.runId={request.runId.runId}. Result {result}." + ), + transactionId=request.transaction.transactionId, + ) + + def confirm_run_execute(self, request, context): + """This gets called if a run_execute finishes. We use this to load the new run in the database. + See tira_host.proto for request specification. + """ + django.db.connection.close() + logger.debug(f" Application Server received run-eval confirmation with: \n{request.runId.runId}.") + + result = model.add_run(request.runId.datasetId, request.runId.vmId, request.runId.runId) + EvaluationLog.objects.filter(vm_id=request.runId.vmId, run_id=request.runId.runId).delete() + _ = TransactionLog.objects.filter(transaction_id=request.transaction.transactionId).update( + completed=False, last_status=str(request.transaction.status), last_message=request.transaction.message + ) + + return tira_host_pb2.Transaction( + status=tira_host_pb2.Status.SUCCESS, + message=( + "Application accepted run execute confirmation with:" + f" request.runId.datasetId={request.runId.datasetId}, request.runId.vmId={request.runId.vmId}," + f" request.runId.runId={request.runId.runId}. Result {result}." + ), + transactionId=request.transaction.transactionId, + ) + + def heartbeat(self, request, context): + """ """ + pass + + +def serve(): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=50)) + tira_host_pb2_grpc.add_TiraApplicationServiceServicer_to_server(TiraApplicationService(), server) + listen_addr = f"[::]:{grpc_port}" + server.add_insecure_port(listen_addr) + server.start() + print("Starting tira-application server on %s", listen_addr) + logger.info("Starting tira-application server on %s", listen_addr) + server.wait_for_termination() diff --git a/application/src/tira/grpc/test_grpc_host_client.py b/application/src/tira_app/grpc/test_grpc_host_client.py similarity index 53% rename from application/src/tira/grpc/test_grpc_host_client.py rename to application/src/tira_app/grpc/test_grpc_host_client.py index 00bb6c97a..74e117ea2 100644 --- a/application/src/tira/grpc/test_grpc_host_client.py +++ b/application/src/tira_app/grpc/test_grpc_host_client.py @@ -1,10 +1,11 @@ import grpc -from tira.proto import tira_host_pb2, tira_host_pb2_grpc + +from ..proto import tira_host_pb2, tira_host_pb2_grpc class TestGrpcHostClient: def __init__(self, transaction_id): - self.channel = grpc.insecure_channel('localhost:50052') + self.channel = grpc.insecure_channel("localhost:50052") self.stub = tira_host_pb2_grpc.TiraApplicationServiceStub(self.channel) self.transaction_id = transaction_id @@ -12,70 +13,86 @@ def __del__(self): self.channel.close() def set_state(self, vm_id, vm_state, transaction_id): - """ Wait for :param slp: seconds, then call the set_state method of the applications server, - this means, we tell the application that the vm now changed it's state to vm_state """ + """Wait for :param slp: seconds, then call the set_state method of the applications server, + this means, we tell the application that the vm now changed it's state to vm_state""" if self.transaction_id == transaction_id: response = self.stub.set_state( - tira_host_pb2.VmState(transaction=tira_host_pb2.Transaction(status=tira_host_pb2.Status.SUCCESS, - transactionId=transaction_id, - message=f"Set state to {vm_state}"), - state=vm_state, vmId=vm_id)) + tira_host_pb2.VmState( + transaction=tira_host_pb2.Transaction( + status=tira_host_pb2.Status.SUCCESS, + transactionId=transaction_id, + message=f"Set state to {vm_state}", + ), + state=vm_state, + vmId=vm_id, + ) + ) print(f"host-client: set_state response was: {response}") return response print("'set_state' rejected due to transaction id mismatch") # TODO transactionId -- implement create feature def confirm_vm_create(self, vm_id, user_name, user_pw, ip, host, ssh, rdp, transaction_id): - """ Call the set_state method of the applications server, - this means, we tell the application that the vm now changed it's state to vm_state """ + """Call the set_state method of the applications server, + this means, we tell the application that the vm now changed it's state to vm_state""" if self.transaction_id == transaction_id: response = self.stub.confirm_vm_create( - tira_host_pb2.VmDetails(transaction=tira_host_pb2.Transaction(status=tira_host_pb2.Status.SUCCESS, - transactionId=transaction_id, - message=f"Created VM"), - vmId=vm_id, userName=user_name, initialUserPw=user_pw, - ip=ip, host=host, sshPort=ssh, rdpPort=rdp)) + tira_host_pb2.VmDetails( + transaction=tira_host_pb2.Transaction( + status=tira_host_pb2.Status.SUCCESS, transactionId=transaction_id, message="Created VM" + ), + vmId=vm_id, + userName=user_name, + initialUserPw=user_pw, + ip=ip, + host=host, + sshPort=ssh, + rdpPort=rdp, + ) + ) print(f"host-client: confirm_vm_create response was: {response}") return response print("'confirm_vm_create' rejected due to transaction id mismatch") # TODO transactionId -- implement delete feature def confirm_vm_delete(self, vm_id, user_name, user_pw, ip, ssh, rdp, transaction_id): - """ Call the set_state method of the applications server, - this means, we tell the application that the vm now changed it's state to vm_state """ + """Call the set_state method of the applications server, + this means, we tell the application that the vm now changed it's state to vm_state""" if self.transaction_id == transaction_id: - response = self.stub.confirm_vm_delete( - tira_host_pb2.VmId(vmId=vm_id)) + response = self.stub.confirm_vm_delete(tira_host_pb2.VmId(vmId=vm_id)) print(f"host-client: confirm_vm_delete response was: {response}") return response print("'confirm_vm_delete' rejected due to transaction id mismatch") def confirm_run_eval(self, vm_id, dataset_id, run_id, transaction_id): - """ Call the confirm_run_eval method of the applications server """ + """Call the confirm_run_eval method of the applications server""" if self.transaction_id == transaction_id: - measure = tira_host_pb2.EvaluationResults.Measure(key='demo-measure', value='1') + measure = tira_host_pb2.EvaluationResults.Measure(key="demo-measure", value="1") result = tira_host_pb2.EvaluationResults( - transaction=tira_host_pb2.Transaction(status=tira_host_pb2.Status.SUCCESS, transactionId=transaction_id, - message="completed evaluation"), - runId=tira_host_pb2.RunId(vmId=vm_id, datasetId=dataset_id, runId=run_id)) + transaction=tira_host_pb2.Transaction( + status=tira_host_pb2.Status.SUCCESS, transactionId=transaction_id, message="completed evaluation" + ), + runId=tira_host_pb2.RunId(vmId=vm_id, datasetId=dataset_id, runId=run_id), + ) result.measures.append(measure) response = self.stub.confirm_run_eval(result) print(f"host-client: confirm_run_eval response was: {response}") if response.status == tira_host_pb2.Status.SUCCESS: - self.complete_transaction(transaction_id, message='confirmation: completed evaluation') + self.complete_transaction(transaction_id, message="confirmation: completed evaluation") else: self.confirm_run_eval(vm_id, dataset_id, run_id, transaction_id) return response print("'confirm_run_eval' rejected due to transaction id mismatch") def complete_transaction(self, transaction_id, message): - """ Confirm that a Transaction has completed. - """ + """Confirm that a Transaction has completed.""" if self.transaction_id == transaction_id: response = self.stub.complete_transaction( - tira_host_pb2.Transaction(status=tira_host_pb2.Status.SUCCESS, transactionId=transaction_id, - message=message)) + tira_host_pb2.Transaction( + status=tira_host_pb2.Status.SUCCESS, transactionId=transaction_id, message=message + ) + ) print(f"host-client: complete_transaction response was: {response}") return response print("'complete_transaction' rejected due to transaction id mismatch") diff --git a/application/src/tira/grpc/test_grpc_host_server.py b/application/src/tira_app/grpc/test_grpc_host_server.py similarity index 70% rename from application/src/tira/grpc/test_grpc_host_server.py rename to application/src/tira_app/grpc/test_grpc_host_server.py index 7f14f193d..bd5a60766 100644 --- a/application/src/tira/grpc/test_grpc_host_server.py +++ b/application/src/tira_app/grpc/test_grpc_host_server.py @@ -1,18 +1,18 @@ #!/usr/bin/env python from concurrent import futures -import grpc -import sys +from functools import wraps +from threading import Thread from time import sleep +from typing import Set from uuid import uuid4 -from threading import Thread -from functools import wraps +import grpc -from tira.proto import tira_host_pb2, tira_host_pb2_grpc -from tira.grpc.test_grpc_host_client import TestGrpcHostClient +from tira_app.grpc.test_grpc_host_client import TestGrpcHostClient +from tira_app.proto import tira_host_pb2, tira_host_pb2_grpc -VIRTUAL_MACHINES = {} +VIRTUAL_MACHINES: dict[str, "DummyVirtualMachine"] = {} class DummyVirtualMachine(object): @@ -68,13 +68,16 @@ def get_info(self): return response def auto_transaction(msg): - """ automatically terminate transactions if a method completes """ + """automatically terminate transactions if a method completes""" + def attribute_decorator(func): @wraps(func) def func_wrapper(self, transaction_id, *args, complete_transaction=False): # safely reset the transaction_ids if self.transaction_id is not None or self.transaction_id == transaction_id: - TestGrpcHostClient(self.transaction_id).complete_transaction(self.transaction_id, f"transaction superseded by {transaction_id}") + TestGrpcHostClient(self.transaction_id).complete_transaction( + self.transaction_id, f"transaction superseded by {transaction_id}" + ) self.transaction_id = None self.transaction_id = transaction_id @@ -85,15 +88,27 @@ def func_wrapper(self, transaction_id, *args, complete_transaction=False): TestGrpcHostClient(transaction_id).complete_transaction(transaction_id, msg) self.transaction_id = None else: - TestGrpcHostClient(transaction_id).complete_transaction(transaction_id, f"transaction superseded by {self.transaction_id}") + TestGrpcHostClient(transaction_id).complete_transaction( + transaction_id, f"transaction superseded by {self.transaction_id}" + ) + return func_wrapper + return attribute_decorator def create(self, transaction_id): test_host_client = TestGrpcHostClient(transaction_id) self.state = 2 - test_host_client.confirm_vm_create(self.vm_id, self.user_name, 'dummy_pw', '127.0.0.1', self.host, - self.ssh_port, self.rdp_port, self.transaction_id) + test_host_client.confirm_vm_create( + self.vm_id, + self.user_name, + "dummy_pw", + "127.0.0.1", + self.host, + self.ssh_port, + self.rdp_port, + self.transaction_id, + ) self.start(transaction_id) self.transaction_id = None # TODO check if we need to complete transaction here @@ -213,12 +228,11 @@ def run_abort(self, transaction_id): def get_or_create_vm(vm_id): - """ this is a hack for the dummy server. In reality, the vm need to be created first ;) """ + """this is a hack for the dummy server. In reality, the vm need to be created first ;)""" vm = VIRTUAL_MACHINES.get(vm_id, None) if vm is None: # here we cheat - vm = DummyVirtualMachine(vm_id, 'tira', 'ubuntu', - '16000', '2', '1234', '5678', 'localhost') + vm = DummyVirtualMachine(vm_id, "tira", "ubuntu", "16000", "2", "1234", "5678", "localhost") vm.state = 2 VIRTUAL_MACHINES[vm_id] = vm @@ -227,13 +241,15 @@ def get_or_create_vm(vm_id): class TiraHostService(tira_host_pb2_grpc.TiraHostService): - def check_state(state, ignore_ongoing=False): - """ A decorator that checks the STATE precondition for all calls to TiraHostService that thae a VmId message - We check: - - is the vm in the correct state for the requested transistion - - is there already a transaction ongoing - The decorator then calls the callback (or not) and sends the appropriate reply - """ + @staticmethod + def check_state(state: Set[int], ignore_ongoing: bool = False): + """A decorator that checks the STATE precondition for all calls to TiraHostService that thae a VmId message + We check: + - is the vm in the correct state for the requested transistion + - is there already a transaction ongoing + The decorator then calls the callback (or not) and sends the appropriate reply + """ + def state_check_decorator(func): @wraps(func) def func_wrapper(self, request, *args, **kwargs): @@ -248,26 +264,29 @@ def func_wrapper(self, request, *args, **kwargs): return tira_host_pb2.Transaction( status=tira_host_pb2.Status.FAILED, transactionId=request.transaction.transactionId, - message=f"{request.vmId}: required state {state} but was in state {vm.state}" + message=f"{request.vmId}: required state {state} but was in state {vm.state}", ) - if vm.transaction_id is not None and \ - vm.transaction_id != request.transaction.transactionId and \ - not ignore_ongoing: + if ( + vm.transaction_id is not None + and vm.transaction_id != request.transaction.transactionId + and not ignore_ongoing + ): return tira_host_pb2.Transaction( status=tira_host_pb2.Status.FAILED, transactionId=request.transaction.transactionId, - message=f"Rejected. {vm_id} already accepted a different transaction" + message=f"Rejected. {vm_id} already accepted a different transaction", ) else: func(self, request, *args, **kwargs) return tira_host_pb2.Transaction( status=tira_host_pb2.Status.SUCCESS, transactionId=request.transaction.transactionId, - message=f"Accepted transaction." + message="Accepted transaction.", ) return func_wrapper + return state_check_decorator def vm_info(self, request, context): @@ -278,16 +297,21 @@ def vm_info(self, request, context): def vm_create(self, request, context): # TODO transactions - print(f"received vm-create for {request.ovaFile} - {request.vmId} - {request.userName} " - f"- {request.ip} - {request.host}") + print( + f"received vm-create for {request.ovaFile} - {request.vmId} - {request.userName} " + f"- {request.ip} - {request.host}" + ) if request.vmId in VIRTUAL_MACHINES.keys(): - return tira_host_pb2.Transaction(status=tira_host_pb2.Status.FAILED, - transactionId=request.transaction.transactionId, - message="ID already exists") + return tira_host_pb2.Transaction( + status=tira_host_pb2.Status.FAILED, + transactionId=request.transaction.transactionId, + message="ID already exists", + ) - new_vm = DummyVirtualMachine(request.vmId, request.userName, 'ubuntu', - '16000', '2', '1234', '5678', request.host) + new_vm = DummyVirtualMachine( + request.vmId, request.userName, "ubuntu", "16000", "2", "1234", "5678", request.host + ) VIRTUAL_MACHINES[request.vmId] = new_vm @@ -295,9 +319,10 @@ def vm_create(self, request, context): t.start() return tira_host_pb2.Transaction( - status=tira_host_pb2.Status.SUCCESS, - transactionId=request.transaction.transactionId, - message="received vm_create request") + status=tira_host_pb2.Status.SUCCESS, + transactionId=request.transaction.transactionId, + message="received vm_create request", + ) @check_state({2}) def vm_delete(self, request, context): @@ -314,7 +339,7 @@ def vm_shutdown(self, request, context): print(f"received vm-shutdown for {request.vmId}") vm = get_or_create_vm(request.vmId) - t = Thread(target=vm.shutdown, args=(request.transaction.transactionId, ), kwargs={"complete_transaction": True}) + t = Thread(target=vm.shutdown, args=(request.transaction.transactionId,), kwargs={"complete_transaction": True}) t.start() @check_state({2}) @@ -322,7 +347,7 @@ def vm_start(self, request, context): print(f"received vm-start for {request.vmId}") vm = get_or_create_vm(request.vmId) - t = Thread(target=vm.start, args=(request.transaction.transactionId, ), kwargs={"complete_transaction": True}) + t = Thread(target=vm.start, args=(request.transaction.transactionId,), kwargs={"complete_transaction": True}) t.start() @check_state({3, 4}, ignore_ongoing=True) @@ -330,7 +355,7 @@ def vm_stop(self, request, context): print(f"received vm-stop for {request.vmId}") vm = get_or_create_vm(request.vmId) - t = Thread(target=vm.stop, args=(request.transaction.transactionId, ), kwargs={"complete_transaction": True}) + t = Thread(target=vm.stop, args=(request.transaction.transactionId,), kwargs={"complete_transaction": True}) t.start() @check_state({2}) @@ -338,7 +363,7 @@ def vm_sandbox(self, request, context): print(f"received vm-sandbox for {request.vmId}") vm = get_or_create_vm(request.vmId) - t = Thread(target=vm.sandbox, args=(request.transaction.transactionId, ), kwargs={"complete_transaction": True}) + t = Thread(target=vm.sandbox, args=(request.transaction.transactionId,), kwargs={"complete_transaction": True}) t.start() @check_state({7}) @@ -346,48 +371,60 @@ def vm_unsandbox(self, request, context): print(f"received vm-unsandbox for {request.vmId}") vm = get_or_create_vm(request.vmId) - t = Thread(target=vm.unsandbox, args=(request.transaction.transactionId, ), kwargs={"complete_transaction": True}) + t = Thread( + target=vm.unsandbox, args=(request.transaction.transactionId,), kwargs={"complete_transaction": True} + ) t.start() @check_state({1, 2}) def run_execute(self, request, context): - """ Here we pretend to do all actions involved in running and executing the software: - - shutdown, sandbox, execute, unsandbox, power_on - But we sleep instead. Afterwards, we notify the application that the transaction was complete. - """ - print(f"received run-execute for {request.runId.runId} - {request.runId.datasetId} - {request.runId.vmId} - " - f"{request.inputRunId.runId} - {request.inputRunId.datasetId} - {request.inputRunId.vmId}") + """Here we pretend to do all actions involved in running and executing the software: + - shutdown, sandbox, execute, unsandbox, power_on + But we sleep instead. Afterwards, we notify the application that the transaction was complete. + """ + print( + f"received run-execute for {request.runId.runId} - {request.runId.datasetId} - {request.runId.vmId} - " + f"{request.inputRunId.runId} - {request.inputRunId.datasetId} - {request.inputRunId.vmId}" + ) vm = get_or_create_vm(request.runId.vmId) - t = Thread(target=vm.run_execute, args=(request.transaction.transactionId, ), - kwargs={"complete_transaction": True}) + t = Thread( + target=vm.run_execute, args=(request.transaction.transactionId,), kwargs={"complete_transaction": True} + ) t.start() # @check_state({1}) # Uncommented for the dummy code. Here we cheat and pretend the master is running already def run_eval(self, request, context): - print(f"received run-eval for {request.runId.runId} - {request.runId.datasetId} - {request.runId.vmId} - " - f"{request.inputRunId.runId} - {request.inputRunId.datasetId} - {request.inputRunId.vmId}") + print( + f"received run-eval for {request.runId.runId} - {request.runId.datasetId} - {request.runId.vmId} - " + f"{request.inputRunId.runId} - {request.inputRunId.datasetId} - {request.inputRunId.vmId}" + ) vm = get_or_create_vm(request.runId.vmId) # eval is executed on the master vm vm.state = 1 try: - t = Thread(target=vm.run_eval, args=(request.transaction.transactionId, - request.inputRunId.vmId, - request.inputRunId.datasetId, - request.inputRunId.runId), - kwargs={"complete_transaction": True}) + t = Thread( + target=vm.run_eval, + args=( + request.transaction.transactionId, + request.inputRunId.vmId, + request.inputRunId.datasetId, + request.inputRunId.runId, + ), + kwargs={"complete_transaction": True}, + ) t.start() return tira_host_pb2.Transaction( status=tira_host_pb2.Status.SUCCESS, transactionId=request.transaction.transactionId, - message=f"TiraHostService:run_eval:{request.inputRunId.runId} on {request.runId.vmId}:ACCEPTED" + message=f"TiraHostService:run_eval:{request.inputRunId.runId} on {request.runId.vmId}:ACCEPTED", ) except Exception as e: return tira_host_pb2.Transaction( status=tira_host_pb2.Status.FAILED, transactionId=request.transaction.transactionId, - message=f"TiraHostService:run_eval:{request.vmId}:FAILED:{e}" + message=f"TiraHostService:run_eval:{request.vmId}:FAILED:{e}", ) @check_state({4, 5, 6, 7}, ignore_ongoing=True) @@ -395,45 +432,50 @@ def run_abort(self, request, context): print(f"received run-abort for {request.vmId}") vm = get_or_create_vm(request.vmId) - t = Thread(target=vm.run_abort, args=(request.transaction.transactionId, ), - kwargs={"complete_transaction": True}) + t = Thread( + target=vm.run_abort, args=(request.transaction.transactionId,), kwargs={"complete_transaction": True} + ) t.start() # TODO implement def vm_list(self, context): - print(f"received vm-list") - return tira_host_pb2.VmList(transaction=tira_host_pb2.Transaction( - status=tira_host_pb2.Status.FAILED, - message="vm-list: not implemented", - transactionId=str(uuid4()) - )) + print("received vm-list") + return tira_host_pb2.VmList( + transaction=tira_host_pb2.Transaction( + status=tira_host_pb2.Status.FAILED, message="vm-list: not implemented", transactionId=str(uuid4()) + ) + ) def vm_backup(self, request, context): print(f"received vm-backup for {request.vmId}") - return tira_host_pb2.VmList(transaction=tira_host_pb2.Transaction( - status=tira_host_pb2.Status.FAILED, - message="vm-backup: not implemented", - transactionId=request.transaction.transactionId - )) + return tira_host_pb2.VmList( + transaction=tira_host_pb2.Transaction( + status=tira_host_pb2.Status.FAILED, + message="vm-backup: not implemented", + transactionId=request.transaction.transactionId, + ) + ) def vm_snapshot(self, request, context): print(f"received vm-snapshot for {request.vmId}") - return tira_host_pb2.VmList(transaction=tira_host_pb2.Transaction( - status=tira_host_pb2.Status.FAILED, - message="vm-snapshot: not implemented", - transactionId=request.transaction.transactionId - )) + return tira_host_pb2.VmList( + transaction=tira_host_pb2.Transaction( + status=tira_host_pb2.Status.FAILED, + message="vm-snapshot: not implemented", + transactionId=request.transaction.transactionId, + ) + ) def serve(port): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) tira_host_pb2_grpc.add_TiraHostServiceServicer_to_server(TiraHostService(), server) - listen_addr = f'[::]:{port}' + listen_addr = f"[::]:{port}" server.add_insecure_port(listen_addr) server.start() print("Starting host server on %s", listen_addr) server.wait_for_termination() -if __name__ == '__main__': +if __name__ == "__main__": serve("50051") diff --git a/application/src/tira/grpc_client.py b/application/src/tira_app/grpc_client.py similarity index 66% rename from application/src/tira/grpc_client.py rename to application/src/tira_app/grpc_client.py index 9ba5d895b..6a7449050 100644 --- a/application/src/tira/grpc_client.py +++ b/application/src/tira_app/grpc_client.py @@ -1,24 +1,24 @@ #!/usr/bin/env python """ - GrpcClient to make gRPC calls to the dockerized host running a VM. +GrpcClient to make gRPC calls to the dockerized host running a VM. """ import logging +from functools import wraps +from uuid import uuid4 -from django.conf import settings import grpc +from django.conf import settings from google.protobuf.empty_pb2 import Empty -from tira.model import TransactionLog, EvaluationLog -from uuid import uuid4 -from functools import wraps +from .model import EvaluationLog, TransactionLog from .proto import tira_host_pb2, tira_host_pb2_grpc logger = logging.getLogger("tira") grpc_port = settings.HOST_GRPC_PORT -def new_transaction(message, in_grpc=True): - """ A convenience method to create a new transaction with a :@param message:, save it to the database, +def new_transaction(message: str, in_grpc=True): + """A convenience method to create a new transaction with a :@param message:, save it to the database, and wrap it in a protobuf Transaction to be returned. """ transaction_id = str(uuid4()) @@ -29,21 +29,23 @@ def new_transaction(message, in_grpc=True): return transaction_id -def auto_transaction(msg): - """ when we gat a Transaction message as response and it fails, automatically terminate the Transaction - in the TransactionLog """ +def auto_transaction(msg: str): + """when we gat a Transaction message as response and it fails, automatically terminate the Transaction + in the TransactionLog""" + def attribute_decorator(func): @wraps(func) def func_wrapper(self, *args, **kwargs): grpc_transaction = new_transaction(f"initialized {msg} of {kwargs['vm_id']}") - message_suffix = '-'.join([a for a in args if isinstance(a, str)]) + message_suffix = "-".join([a for a in args if isinstance(a, str)]) response = func(self, *args, transaction=grpc_transaction, **kwargs) if response.status == 1: _ = TransactionLog.objects.filter(transaction_id=response.transactionId).update( completed=True, last_status=str(response.status), - last_message=f"{response.message}: {message_suffix}") + last_message=f"{response.message}: {message_suffix}", + ) return response return func_wrapper @@ -52,24 +54,27 @@ def func_wrapper(self, *args, **kwargs): class GrpcClient: - """ Main class for the Application's GRPC client. This client makes calls to a server running on a host specified - by it's hostname """ + """Main class for the Application's GRPC client. This client makes calls to a server running on a host specified + by it's hostname""" + def __init__(self, hostname): - """ A channel is opened at init time and closed on deletion. Try not to store these objects for long. """ + """A channel is opened at init time and closed on deletion. Try not to store these objects for long.""" self.hostname = hostname - self.channel = grpc.insecure_channel(hostname + ':' + str(grpc_port)) + self.channel = grpc.insecure_channel(hostname + ":" + str(grpc_port)) self.stub = tira_host_pb2_grpc.TiraHostServiceStub(self.channel) def __del__(self): self.channel.close() def vm_create(self, vm_id, ova_file, user_id, hostname): - """ TODO test and comment """ + """TODO test and comment""" grpc_transaction = new_transaction(f"initialized vm create of {vm_id}") response = self.stub.vm_create( - tira_host_pb2.VmCreate(transaction=grpc_transaction, - vmId=vm_id, userId=user_id, ovaFile=ova_file, host=hostname)) + tira_host_pb2.VmCreate( + transaction=grpc_transaction, vmId=vm_id, userId=user_id, ovaFile=ova_file, host=hostname + ) + ) logger.debug("Application received vm-create response: " + str(response.message)) return response @@ -103,9 +108,20 @@ def vm_list(self): return response @auto_transaction("run-execute") - def run_execute(self, vm_id, dataset_id, run_id, input_run_vm_id, input_run_dataset_id, input_run_run_id, - optional_parameters, task_id, software_id, transaction): - """ Initiates a run: the execution of a software to produce output. + def run_execute( + self, + vm_id, + dataset_id, + run_id, + input_run_vm_id, + input_run_dataset_id, + input_run_run_id, + optional_parameters, + task_id, + software_id, + transaction, + ): + """Initiates a run: the execution of a software to produce output. :param software_id: :param task_id: :param vm_id: ID of the vm to run the command below @@ -120,19 +136,36 @@ def run_execute(self, vm_id, dataset_id, run_id, input_run_vm_id, input_run_data logger.info("Application starts a run-execute") grpc_run_id = tira_host_pb2.RunId(vmId=vm_id, datasetId=dataset_id, runId=run_id) - grpc_input_run_id = tira_host_pb2.RunId(vmId=input_run_vm_id, datasetId=input_run_dataset_id, - runId=str(input_run_run_id)) - - response = self.stub.run_execute(tira_host_pb2.RunDetails(transaction=transaction, - runId=grpc_run_id, inputRunId=grpc_input_run_id, - taskId=task_id, softwareId=software_id)) + grpc_input_run_id = tira_host_pb2.RunId( + vmId=input_run_vm_id, datasetId=input_run_dataset_id, runId=str(input_run_run_id) + ) + + response = self.stub.run_execute( + tira_host_pb2.RunDetails( + transaction=transaction, + runId=grpc_run_id, + inputRunId=grpc_input_run_id, + taskId=task_id, + softwareId=software_id, + ) + ) logger.debug("Application received run-execute response: " + str(response.message)) return response @auto_transaction("run-eval") - def run_eval(self, vm_id, dataset_id, run_id, input_run_vm_id, input_run_dataset_id, input_run_run_id, optional_parameters, transaction): - """ Initiates the evaluation of a prior run. + def run_eval( + self, + vm_id, + dataset_id, + run_id, + input_run_vm_id, + input_run_dataset_id, + input_run_run_id, + optional_parameters, + transaction, + ): + """Initiates the evaluation of a prior run. :param vm_id: ID of the vm that can run the evaluation :param dataset_id: ID of the dataset :param run_id: ID of the evaluation @@ -144,22 +177,27 @@ def run_eval(self, vm_id, dataset_id, run_id, input_run_vm_id, input_run_dataset :param optional_parameters: Other parameters the evaluator might expect """ grpc_run_id = tira_host_pb2.RunId(vmId=vm_id, datasetId=dataset_id, runId=run_id) - grpc_input_run_id = tira_host_pb2.RunId(vmId=input_run_vm_id, datasetId=input_run_dataset_id, - runId=str(input_run_run_id)) - response = self.stub.run_eval(tira_host_pb2.RunDetails(transaction=transaction, - runId=grpc_run_id, inputRunId=grpc_input_run_id, - optionalParameters=optional_parameters)) + grpc_input_run_id = tira_host_pb2.RunId( + vmId=input_run_vm_id, datasetId=input_run_dataset_id, runId=str(input_run_run_id) + ) + response = self.stub.run_eval( + tira_host_pb2.RunDetails( + transaction=transaction, + runId=grpc_run_id, + inputRunId=grpc_input_run_id, + optionalParameters=optional_parameters, + ) + ) if response.status == 0: t = TransactionLog.objects.get(transaction_id=transaction.transactionId) - _ = EvaluationLog.objects.update_or_create(vm_id=vm_id, run_id=run_id, running_on=vm_id, - transaction=t) + _ = EvaluationLog.objects.update_or_create(vm_id=vm_id, run_id=run_id, running_on=vm_id, transaction=t) logger.debug("Application received run-eval response: " + str(response.message)) return response @auto_transaction("run-abort") def run_abort(self, vm_id, transaction): - """ Abort a currently ongoing run.""" + """Abort a currently ongoing run.""" response = self.stub.run_abort(tira_host_pb2.VmId(transaction=transaction, vmId=vm_id)) logger.debug("Application received run-abort response: " + str(response.message)) return response diff --git a/application/src/tira_app/huggingface_hub_integration.py b/application/src/tira_app/huggingface_hub_integration.py new file mode 100644 index 000000000..4c92a1115 --- /dev/null +++ b/application/src/tira_app/huggingface_hub_integration.py @@ -0,0 +1,38 @@ +from typing import Iterable, Optional + +import tira.io_utils as tira_cli_io_utils +from huggingface_hub import HFCacheInfo, scan_cache_dir, snapshot_download +from huggingface_hub.constants import HF_HOME + +HF_CACHE: Optional[HFCacheInfo] = None + + +def _hf_repos() -> dict[str, str]: + global HF_CACHE + if HF_CACHE is None: + HF_CACHE = scan_cache_dir() + return {i.repo_id: str(i) for i in HF_CACHE.repos} + + +def huggingface_model_mounts(models: Iterable[str]): + if not models: + return [] + + mounts = tira_cli_io_utils.huggingface_model_mounts(models) + repos = _hf_repos() + print(mounts) + print(repos) + print(models) + + ret = [] + for model in models: + if model in repos: + ret.append(repos[model]) + else: + raise Exception(f"Model {model} is not available in the Huggingface cache") + + return {"MOUNT_HF_MODEL": " ".join(models), "HF_HOME": HF_HOME, "HF_CACHE_SCAN": ret} + + +def snapshot_download_hf_model(model: str): + snapshot_download(repo_id=model.replace("--", "/")) diff --git a/application/src/tira/ir_datasets_loader.py b/application/src/tira_app/ir_datasets_loader.py similarity index 56% rename from application/src/tira/ir_datasets_loader.py rename to application/src/tira_app/ir_datasets_loader.py index 926d6f106..067e2b479 100644 --- a/application/src/tira/ir_datasets_loader.py +++ b/application/src/tira_app/ir_datasets_loader.py @@ -1,69 +1,98 @@ -import sys -import json import copy +import gzip +import json +import os +from base64 import b64encode from pathlib import Path -from typing import Iterable +from typing import Any, Iterable, Optional + +import pandas as pd from bs4 import BeautifulSoup from tqdm import tqdm -import pandas as pd -import os -import gzip -from base64 import b64encode def run_irds_command(task_id, dataset_id, image, command, output_dir, truth_command, truth_output_dir): - from tira.tira_model import model - from tira.util import run_cmd_as_documented_background_process + from .tira_model import model + from .util import run_cmd_as_documented_background_process + irds_root = model.custom_irds_datasets_path / task_id / dataset_id - command = command.replace('$outputDir', '/output-tira-tmp/') - truth_command = truth_command.replace('$outputDir', '/output-tira-tmp/') + command = command.replace("$outputDir", "/output-tira-tmp/") + truth_command = truth_command.replace("$outputDir", "/output-tira-tmp/") Path(output_dir).mkdir(parents=True, exist_ok=True) Path(truth_output_dir).mkdir(parents=True, exist_ok=True) Path(irds_root).mkdir(parents=True, exist_ok=True) command = [ - ['sudo', 'podman', '--storage-opt', 'mount_program=/usr/bin/fuse-overlayfs', 'run', - '-v', f'{irds_root}:/root/.ir_datasets', '-v', f'{output_dir}:/output-tira-tmp/', - '--entrypoint', 'sh', image, '-c', command], - ['sudo', 'podman', '--storage-opt', 'mount_program=/usr/bin/fuse-overlayfs', 'run', - '-v', f'{irds_root}:/root/.ir_datasets', '-v', f'{truth_output_dir}:/output-tira-tmp/', - '--entrypoint', 'sh', image, '-c', truth_command] + [ + "sudo", + "podman", + "--storage-opt", + "mount_program=/usr/bin/fuse-overlayfs", + "run", + "-v", + f"{irds_root}:/root/.ir_datasets", + "-v", + f"{output_dir}:/output-tira-tmp/", + "--entrypoint", + "sh", + image, + "-c", + command, + ], + [ + "sudo", + "podman", + "--storage-opt", + "mount_program=/usr/bin/fuse-overlayfs", + "run", + "-v", + f"{irds_root}:/root/.ir_datasets", + "-v", + f"{truth_output_dir}:/output-tira-tmp/", + "--entrypoint", + "sh", + image, + "-c", + truth_command, + ], ] - descriptions = ['### Import Dataset (Without Ground Truth ###', '### Import Ground Truth ###'] + descriptions = ["### Import Dataset (Without Ground Truth ###", "### Import Ground Truth ###"] # For debug purposes - #command = ['sh', '-c', 'ecsho "1"; sleep 2s; echo "2"; sleep 2s; echo "3"; sleep 2s; echo "4";' + + # command = ['sh', '-c', 'ecsho "1"; sleep 2s; echo "2"; sleep 2s; echo "3"; sleep 2s; echo "4";' + # 'echo "5"; sleep 2s; echo "6"; sleep 2s; echo "7"; sleep 2s; echo "8";' + # 'echo "9"; sleep 2s; echo "10"; sleep 2s; echo "11"; sleep 2s; echo "12";' + # 'echo "13"; sleep 2s; echo "14"; sleep 2s; echo "15"; sleep 2s; echo "16"' # ] - #command = [command, command] + # command = [command, command] - return run_cmd_as_documented_background_process(cmd=command, vm_id=None, task_id=task_id, title=f'Import Dataset {dataset_id}', descriptions=descriptions) + return run_cmd_as_documented_background_process( + cmd=command, vm_id=None, task_id=task_id, title=f"Import Dataset {dataset_id}", descriptions=descriptions + ) class IrDatasetsLoader(object): - """ Base class for loading datasets in a standardized format""" + """Base class for loading datasets in a standardized format""" def load_irds(self, ir_datasets_id): from tira.third_party_integrations import ir_datasets + try: return ir_datasets.load(ir_datasets_id) - except: - raise ValueError(f'Could not load the dataset {ir_datasets_id}. Does it exist?') - + except Exception: + raise ValueError(f"Could not load the dataset {ir_datasets_id}. Does it exist?") def yield_docs(self, dataset, include_original, skip_duplicate_ids, allowlist_path_ids): already_covered_ids = set() allowed_ids = set() if allowlist_path_ids: - with open(allowlist_path_ids, 'r') as inp_file: + with open(allowlist_path_ids, "r") as inp_file: for i in inp_file: allowed_ids.add(i.strip()) - print('I use a allow list of size ', len(allowed_ids)) + print("I use a allow list of size ", len(allowed_ids)) - for doc in tqdm(dataset.docs_iter(), 'Load Documents'): + for doc in tqdm(dataset.docs_iter(), "Load Documents"): if skip_duplicate_ids and doc.doc_id in already_covered_ids: continue if allowlist_path_ids and str(doc.doc_id) not in allowed_ids: @@ -72,52 +101,75 @@ def yield_docs(self, dataset, include_original, skip_duplicate_ids, allowlist_pa yield self.map_doc(doc, include_original) if skip_duplicate_ids: already_covered_ids.add(doc.doc_id) - - - def load_dataset_for_fullrank(self, ir_datasets_id: str, output_dataset_path: Path, output_dataset_truth_path: Path, include_original=True, skip_documents=False, skip_qrels=False, skip_duplicate_ids=True, allowlist_path_ids: Path = None) -> None: - """ Loads a dataset through the ir_datasets package by the given ir_datasets ID. + def load_dataset_for_fullrank( + self, + ir_datasets_id: str, + output_dataset_path: Path, + output_dataset_truth_path: Path, + include_original=True, + skip_documents=False, + skip_qrels=False, + skip_duplicate_ids=True, + allowlist_path_ids: Optional[Path] = None, + ) -> None: + """Loads a dataset through the ir_datasets package by the given ir_datasets ID. Maps documents, queries, qrels to a standardized format in preparation for full-rank operations with PyTerrier. - + @param ir_datasets_id: the dataset ID as of ir_datasets @param output_dataset_path: the path to the directory where the output files will be stored @param output_dataset_truth_path: the path to the directory where the output files will be stored - @param include_original {False}: flag which signals if the original data of documents and queries should be included + @param include_original {False}: flag which signals if the original data of documents and queries should be included @param skip_duplicate_ids: Should this pipeline skip duplicate ids? @param allowlist_path_ids: skip ids not in the allowlist (e.g., for filtering the subcategories of the ClueWebs) """ dataset = self.load_irds(ir_datasets_id) if not skip_documents and output_dataset_path: - self.write_lines_to_file(self.yield_docs(dataset, include_original, skip_duplicate_ids, allowlist_path_ids), output_dataset_path/"documents.jsonl") - + self.write_lines_to_file( + self.yield_docs(dataset, include_original, skip_duplicate_ids, allowlist_path_ids), + output_dataset_path / "documents.jsonl", + ) + queries_mapped_jsonl = [self.map_query_as_jsonl(query, include_original) for query in dataset.queries_iter()] queries_mapped_xml = [self.map_query_as_xml(query, include_original) for query in dataset.queries_iter()] - + if not skip_qrels: try: qrels_mapped = [self.map_qrel(qrel) for qrel in dataset.qrels_iter()] - except: - print('WARNING: I could not load qrels and will skip writing the file "qrels.txt". This is expected if your dataset has no qrels yet. If you have qrels, please debug this problem locally on your machine.') + except Exception: + print( + 'WARNING: I could not load qrels and will skip writing the file "qrels.txt". This is expected if' + " your dataset has no qrels yet. If you have qrels, please debug this problem locally on your" + " machine." + ) qrels_mapped = [] if len(qrels_mapped) > 0: - self.write_lines_to_file(qrels_mapped, output_dataset_truth_path/"qrels.txt") + self.write_lines_to_file(qrels_mapped, output_dataset_truth_path / "qrels.txt") if output_dataset_path: - self.write_lines_to_file(queries_mapped_jsonl, output_dataset_path/"queries.jsonl") - self.write_lines_to_file([json.dumps({"ir_datasets_id": ir_datasets_id})], output_dataset_path/"metadata.json") - self.write_lines_to_xml_file(ir_datasets_id, queries_mapped_xml, output_dataset_path/"queries.xml") + self.write_lines_to_file(queries_mapped_jsonl, output_dataset_path / "queries.jsonl") + self.write_lines_to_file( + [json.dumps({"ir_datasets_id": ir_datasets_id})], output_dataset_path / "metadata.json" + ) + self.write_lines_to_xml_file(ir_datasets_id, queries_mapped_xml, output_dataset_path / "queries.xml") if output_dataset_truth_path: - self.write_lines_to_file(queries_mapped_jsonl, output_dataset_truth_path/"queries.jsonl") - self.write_lines_to_xml_file(ir_datasets_id, queries_mapped_xml, output_dataset_truth_path/"queries.xml") - - - def load_dataset_for_rerank(self, ir_datasets_id: str, output_dataset_path: Path, output_dataset_truth_path: Path, include_original: bool, run_file: Path) -> None: - """ Loads a dataset through ir_datasets package by the given ir_datasets ID. + self.write_lines_to_file(queries_mapped_jsonl, output_dataset_truth_path / "queries.jsonl") + self.write_lines_to_xml_file(ir_datasets_id, queries_mapped_xml, output_dataset_truth_path / "queries.xml") + + def load_dataset_for_rerank( + self, + ir_datasets_id: str, + output_dataset_path: Path, + output_dataset_truth_path: Path, + include_original: bool, + run_file: Path, + ) -> None: + """Loads a dataset through ir_datasets package by the given ir_datasets ID. Maps qrels and TREC-run-formatted data by a given file to a format fitted for re-rank operations with PyTerrier. - + @param ir_datasets_id: the dataset ID as of ir_datasets @param output_dataset_path: the path to the directory where the output files will be stored @param output_dataset_truth_path: the path to the directory where the output files will be stored @@ -125,39 +177,36 @@ def load_dataset_for_rerank(self, ir_datasets_id: str, output_dataset_path: Path @param run_file: the path to a file with data in TREC-run format """ dataset = self.load_irds(ir_datasets_id) - queries = {str(i.query_id):i for i in dataset.queries_iter()} - + queries = {str(i.query_id): i for i in dataset.queries_iter()} + run = self.load_run_file(run_file) - print('Get Documents') - docs = self.get_docs_by_ids(dataset, list(set([str(i['docno']) for i in run]))) - print('Produce rerank data.') - rerank = tqdm((self.construct_rerank_row(docs, queries, i, include_original) for i in run), 'Produce Rerank File.') - print('Write rerank data.') - self.write_lines_to_file(rerank, output_dataset_path/"rerank.jsonl.gz") - print('Done rerank data was written.') + print("Get Documents") + docs = self.get_docs_by_ids(dataset, set(str(i["docno"]) for i in run)) + print("Produce rerank data.") + rerank = tqdm( + (self.construct_rerank_row(docs, queries, i, include_original) for i in run), "Produce Rerank File." + ) + print("Write rerank data.") + self.write_lines_to_file(rerank, output_dataset_path / "rerank.jsonl.gz") + print("Done rerank data was written.") if output_dataset_truth_path: - print('Write qrels data.') + print("Write qrels data.") qrels_mapped = (self.map_qrel(qrel) for qrel in dataset.qrels_iter()) - self.write_lines_to_file(qrels_mapped, output_dataset_truth_path/"qrels.txt") - + self.write_lines_to_file(qrels_mapped, output_dataset_truth_path / "qrels.txt") def map_doc(self, doc: tuple, include_original=True) -> str: - """ Maps a document of any dataset (loaded through ir_datasets) to a standarized format + """Maps a document of any dataset (loaded through ir_datasets) to a standarized format stores full document data too, if flag 'include_original' is set @param doc: the document as a namedtuple @param include_original: flag which signals if the original document data should be stored too - :return ret: the mapped document + :return ret: the mapped document """ - ret = { - "docno": doc.doc_id, - "text": doc.default_text() - } + ret = {"docno": doc.doc_id, "text": doc.default_text()} if include_original: ret["original_document"] = self.make_serializable(doc._asdict()) return json.dumps(ret) - def map_query_as_jsonl(self, query: tuple, include_original=True) -> str: ret = { "qid": query.query_id, @@ -167,73 +216,72 @@ def map_query_as_jsonl(self, query: tuple, include_original=True) -> str: ret["original_query"] = query._asdict() return json.dumps(ret) - def map_query_as_xml(self, query: tuple, include_original=False) -> str: soup = BeautifulSoup() - soup.append(soup.new_tag('topic', attrs={ 'number': query.query_id })) - soup.topic.append(soup.new_tag('query')) + soup.append(soup.new_tag("topic", attrs={"number": query.query_id})) + soup.topic.append(soup.new_tag("query")) soup.query.append(soup.new_string(query.default_text())) if include_original: - soup.topic.append(soup.new_tag('original_query')) + soup.topic.append(soup.new_tag("original_query")) for key, value in query._asdict().items(): soup.original_query.append(soup.new_tag(str(key))) tag = soup.original_query.find(key) tag.append(soup.new_string(str(value))) return soup - def map_qrel(self, qrel: tuple) -> str: return f"{qrel.query_id} {qrel.iteration} {qrel.doc_id} {qrel.relevance}" - def load_run_file(self, run_file: Path) -> list: - if not os.path.abspath(run_file).endswith('run.txt'): - run_file = run_file / 'run.txt' + if not os.path.abspath(run_file).endswith("run.txt"): + run_file = run_file / "run.txt" - run = pd.read_csv(os.path.abspath(run_file), sep='\\s+', names=["qid", "Q0", "docno", "rank", "score", "system"]) + run = pd.read_csv( + os.path.abspath(run_file), sep="\\s+", names=["qid", "Q0", "docno", "rank", "score", "system"] + ) run = run.copy().sort_values(["qid", "score", "docno"], ascending=[True, False, False]).reset_index() run = run.groupby("qid")[["qid", "Q0", "docno", "rank", "score", "system"]].head(1000) # Make sure that rank position starts by 1 run["rank"] = 1 run["rank"] = run.groupby("qid")["rank"].cumsum() - - return [i.to_dict() for _, i in run[['qid', 'Q0', 'docno', 'rank', 'score', 'system']].iterrows()] - - def get_docs_by_ids(self, dataset, doc_ids: list) -> dict: + return [i.to_dict() for _, i in run[["qid", "Q0", "docno", "rank", "score", "system"]].iterrows()] + + def get_docs_by_ids(self, dataset, doc_ids: set[str]) -> dict[Any, Any]: docstore = dataset.docs_store() try: ret = {} doc_ids = set(doc_ids) - for doc in tqdm(docstore.get_many_iter(doc_ids), total=len(doc_ids), desc='Get Docs'): + for doc in tqdm(docstore.get_many_iter(doc_ids), total=len(doc_ids), desc="Get Docs"): ret[doc.doc_id] = doc return ret - except: + except Exception: ret = {} doc_ids = set(doc_ids) - for doc_id in tqdm(doc_ids, total=len(doc_ids), desc='Get Docs'): + for doc_id in tqdm(doc_ids, total=len(doc_ids), desc="Get Docs"): doc = docstore.get(doc_id) ret[doc.doc_id] = doc return ret - def make_serializable(self, o: dict): for k in o.keys(): value = o[k] - if value and value.__class__ and str(value.__class__.__name__) == 'bytes': - o[k] = b64encode(value).decode('ascii') - + if value and value.__class__ and str(value.__class__.__name__) == "bytes": + o[k] = b64encode(value).decode("ascii") + return o - def construct_rerank_row(self, docs: dict, queries: dict, rerank_line: dict, include_original) -> str: + def construct_rerank_row( + self, docs: dict, queries: dict, rerank_line: dict[str, Any], include_original + ) -> Optional[str]: query = queries[str(rerank_line["qid"])] doc = docs.get(str(rerank_line["docno"]), None) - + if not doc: return None - + ret = { "qid": query.query_id, "query": query.default_text(), @@ -242,39 +290,37 @@ def construct_rerank_row(self, docs: dict, queries: dict, rerank_line: dict, inc "text": doc.default_text(), "original_document": {}, "rank": rerank_line["rank"], - "score": rerank_line["score"] + "score": rerank_line["score"], } - + if include_original: - ret["original_document"] = self.make_serializable(doc._asdict()), + ret["original_document"] = (self.make_serializable(doc._asdict()),) return json.dumps(ret) - def write_lines_to_file(self, lines: Iterable[str], path: Path) -> None: - if(path.exists()): + if path.exists(): raise RuntimeError(f"File already exists: {path}") path.parent.mkdir(parents=True, exist_ok=True) - - if os.path.abspath(path).endswith('.gz'): - with gzip.open(os.path.abspath(path), 'wb') as file: + + if os.path.abspath(path).endswith(".gz"): + with gzip.open(os.path.abspath(path), "wb") as file: for line in lines: if not line: continue - file.write((line + '\n').encode('utf-8')) + file.write((line + "\n").encode("utf-8")) else: - with path.open('wt') as file: - file.writelines('%s\n' % line for line in lines if line) - + with path.open("wt") as file: + file.writelines("%s\n" % line for line in lines if line) def write_lines_to_xml_file(self, ir_datasets_id: str, lines: Iterable[str], path: Path) -> None: - if(path.exists()): + if path.exists(): raise RuntimeError(f"File already exists: {path}") path.parent.mkdir(parents=True, exist_ok=True) soup = BeautifulSoup() - soup.append(soup.new_tag('topics', attrs={ 'ir-datasets-id': ir_datasets_id })) - root = soup.find('topics') + soup.append(soup.new_tag("topics", attrs={"ir-datasets-id": ir_datasets_id})) + root = soup.find("topics") for line in lines: root.append(copy.deepcopy(line)) - with path.open('wt') as file: + with path.open("wt") as file: file.write(soup.prettify()) diff --git a/application/src/tira_app/management/commands/archive_runs_to_zenodo.py b/application/src/tira_app/management/commands/archive_runs_to_zenodo.py new file mode 100644 index 000000000..f02ef92f7 --- /dev/null +++ b/application/src/tira_app/management/commands/archive_runs_to_zenodo.py @@ -0,0 +1,197 @@ +import json +import shutil + +from django.core.management.base import BaseCommand +from tqdm import tqdm + +from ...endpoints.data_api import model +from ...views import zip_run, zip_runs + + +def md5(filename): + import hashlib + + return hashlib.md5(open(filename, "rb").read()).hexdigest() + + +class Command(BaseCommand): + help = "Dump software outputs for Zenodo" + + def handle(self, *args, **options): + dataset_groups = { + "trec-recent": [ + "msmarco-passage-trec-dl-2019-judged-20230107-training", + "msmarco-passage-trec-dl-2020-judged-20230107-training", + "trec-tip-of-the-tongue-dev-20230607-training", + ], + "tiny-test-collections": [ + "antique-test-20230107-training", + "vaswani-20230107-training", + "cranfield-20230107-training", + "nfcorpus-test-20230107-training", + ], + "trec-medical": [ + "medline-2004-trec-genomics-2004-20230107-training", + "medline-2017-trec-pm-2017-20230211-training", + "cord19-fulltext-trec-covid-20230107-training", + "medline-2017-trec-pm-2018-20230211-training", + "medline-2004-trec-genomics-2005-20230107-training", + ], + "clef-labs": [ + "argsme-touche-2020-task-1-20230209-training", + "argsme-touche-2021-task-1-20230209-training", + "longeval-short-july-20230513-training", + "longeval-heldout-20230513-training", + "longeval-long-september-20230513-training", + "longeval-train-20230513-training", + ], + "clueweb": [ + "clueweb09-en-trec-web-2009-20230107-training", + "clueweb09-en-trec-web-2010-20230107-training", + "clueweb09-en-trec-web-2011-20230107-training", + "clueweb09-en-trec-web-2012-20230107-training", + "clueweb12-touche-2020-task-2-20230209-training", + "clueweb12-touche-2021-task-2-20230209-training", + "clueweb12-trec-misinfo-2019-20240214-training", + "clueweb12-trec-web-2013-20230107-training", + "clueweb12-trec-web-2014-20230107-training", + "gov-trec-web-2002-20230209-training", + "gov-trec-web-2003-20230209-training", + "gov-trec-web-2004-20230209-training", + "gov2-trec-tb-2004-20230209-training", + "gov2-trec-tb-2005-20230209-training", + "gov2-trec-tb-2006-20230209-training", + ], + "trec-core": [ + "wapo-v2-trec-core-2018-20230107-training", + "disks45-nocr-trec8-20230209-training", + "disks45-nocr-trec7-20230209-training", + "disks45-nocr-trec-robust-2004-20230209-training", + ], + "ir-lab": ["anthology-20240411-training", "ir-acl-anthology-20240504-training"], + } + + # we publish document processors only for fully public datasets, query processors can be published on all groups + fully_public_datasets = ( + dataset_groups["trec-recent"] + + dataset_groups["tiny-test-collections"] + + dataset_groups["trec-medical"] + + dataset_groups["clef-labs"] + + dataset_groups["ir-lab"] + ) + + systems = { + "ir-benchmarks": { + "tira-ir-starter": {"Index (tira-ir-starter-pyterrier)": "pyterrier-indexes"}, + "seanmacavaney": { + "DocT5Query": "doc-t5-query", + "corpus-graph": "corpus-graph", + }, + "ows": {"pyterrier-anceindex": "pyterrier-anceindex"}, + "ir-lab-sose-2024": { + "tira-ir-starter": { + "Index (tira-ir-starter-pyterrier)": "ir-lab-sose-2024", + "Index (pyterrier-stanford-lemmatizer)": "ir-lab-sose-2024", + }, + "seanmacavaney": { + "DocT5Query": "ir-lab-sose-2024", + "corpus-graph": "ir-lab-sose-2024", + }, + "ows": {"pyterrier-anceindex": "ir-lab-sose-2024"}, + "naverlabseurope": {"Splade (Index)": "ir-lab-sose-2024"}, + }, + } + } + + aggregated_systems = { + "ir-benchmarks": { + "qpptk": { + "all-predictors": "qpptk-all-predictors", + }, + "salamander": { + "classify-comparative-queries": "qpptk-all-predictors", + }, + "ows": { + "query-segmentation-hyb-a": "qpptk-all-predictors", + }, + "dossier": {"pre-retrieval-query-intent": "qpptk-all-predictors"}, + "tu-dresden-03": { + "qe-gpt3.5-sq-zs": "qpptk-all-predictors", + "qe-llama-sq-zs": "qpptk-all-predictors", + "qe-llama-sq-fs": "qpptk-all-predictors", + "qe-llama-cot": "qpptk-all-predictors", + "qe-flan-ul2-sq-zs": "qpptk-all-predictors", + "qe-flan-ul2-sq-fs": "qpptk-all-predictors", + "qe-flan-ul2-cot": "qpptk-all-predictors", + }, + # pre-retrieval query intent, post-retrieval query intent + # splade + # comparative questions + # entity linking + }, + "workshop-on-open-web-search": { + "tu-dresden-03": { + "qe-gpt3.5-cot": "qpptk-all-predictors", + "qe-gpt3.5-sq-fs": "qpptk-all-predictors", + }, + "marcel-gohsen": { + "query-interpretation": "qpptk-all-predictors", + "entity-linking": "qpptk-all-predictors", + }, + }, + } + + ret = {} + + # for task_id in systems.keys(): + for task_id in []: + ret[task_id] = {} + for user_id in systems[task_id].keys(): + ret[task_id][user_id] = {} + for display_name in systems[task_id][user_id].keys(): + ret[task_id][user_id][display_name] = {} + output_dir = systems[task_id][user_id][display_name] + for i in tqdm(fully_public_datasets): + run_id = model.runs(task_id, i, user_id, display_name)[0]["run_id"] + target_file = f"{output_dir}/{run_id}.zip" + + zip_file = zip_run(i, user_id, run_id) + shutil.copyfile(zip_file, target_file) + ret[task_id][user_id][display_name][i] = {"run_id": run_id, "md5": md5(target_file)} + + print(json.dumps(ret)) + + ret = {} + + for task_id in aggregated_systems.keys(): + ret[task_id] = {} + for user_id in aggregated_systems[task_id].keys(): + ret[task_id][user_id] = {} + for display_name, output_dir in aggregated_systems[task_id][user_id].items(): + ret[task_id][user_id][display_name] = {} + + for dataset_group, datasets in tqdm(dataset_groups.items(), display_name): + run_ids = {} + file_name = f"{user_id}-{display_name}-{dataset_group}" + target_file = f"{output_dir}/{file_name}.zip" + + for dataset in datasets: + runs_on_dataset = model.runs(task_id, dataset, user_id, display_name) + if len(runs_on_dataset) > 0: + run_ids[dataset] = runs_on_dataset[0] + else: + print(f"skip dataset {dataset} for {display_name}") + + if len(run_ids) == 0: + print(f"Skip group {dataset_group} for {display_name}.") + continue + + zip_file = zip_runs(user_id, [(k, v) for k, v in run_ids.items()], file_name) + shutil.copyfile(zip_file, target_file) + ret[task_id][user_id][display_name][dataset_group] = { + "dataset_group": dataset_group, + "md5": md5(target_file), + "run_ids": run_ids, + } + + print(json.dumps(ret)) diff --git a/application/src/tira_app/management/commands/cache_daemon.py b/application/src/tira_app/management/commands/cache_daemon.py new file mode 100644 index 000000000..c4f386bf2 --- /dev/null +++ b/application/src/tira_app/management/commands/cache_daemon.py @@ -0,0 +1,149 @@ +import datetime +import logging +import time + +from django.core.cache import cache +from django.core.management import call_command +from django.core.management.base import BaseCommand + +from ... import tira_model as model + +logger = logging.getLogger("cache_daemon") +from ...git_runner import all_git_runners +from ...tira_model import get_all_reranking_datasets, get_git_integration + + +class Command(BaseCommand): + help = "cache daemon" + + def keep_running_softwares_fresh(self, sleep_time): + while True: + time.sleep(int(sleep_time)) + print( + f"{datetime.datetime.now()}: Start loop to keep the running softwares fresh (slept for {int(sleep_time)} seconds)..." + ) + for task in model.get_tasks(): + if task is None: + continue + if model.git_pipeline_is_enabled_for_task(task["task_id"], cache): + if "featured" not in task or not task["featured"]: + print(f'Skip inactive task {task["task_id"]}') + continue + + evaluators_for_task = model.get_evaluators_for_task(task["task_id"], cache) + repositories = set( + [ + i["git_repository_id"] + for i in evaluators_for_task + if i["is_git_runner"] and i["git_repository_id"] + ] + ) + + for git_repository_id in repositories: + try: + print(task["task_id"] + "--->" + str(git_repository_id)) + git_integration = get_git_integration(task_id=task["task_id"]) + running_pipelines = git_integration.all_running_pipelines_for_repository( + git_repository_id, cache, force_cache_refresh=True + ) + print( + "Refreshed Cache (" + + str(datetime.datetime.now()) + + "): " + + task["task_id"] + + " on repo " + + str(git_repository_id) + + " has " + + str(len(running_pipelines)) + + " jobs." + ) + except Exception as e: + print(f"Exception during refreshing the repository {git_repository_id}: e") + logger.warn(f"Exception during refreshing the repository {git_repository_id}", exc_info=e) + continue + + time.sleep(0.1) + + def refresh_user_images_in_repo(self, git_runner, sleep_time): + users_of_active_tasks = set() + for task in model.get_tasks(): + if task is None: + continue + if "featured" in task and task["featured"] and "allowed_task_teams" in task and task["allowed_task_teams"]: + users_of_active_tasks |= set( + [i.strip() for i in task["allowed_task_teams"].split("\n") if i and i.strip()] + ) + + print( + str(datetime.datetime.now()) + + ": Start loop to keep the user images fresh (sleeped " + + str(int(sleep_time)) + + f" seconds) for {users_of_active_tasks} ...", + flush=True, + ) + + for user in users_of_active_tasks: + try: + images = git_runner.docker_images_in_user_repository(user, cache, force_cache_refresh=True) + print( + "Refreshed Cache (" + + str(datetime.datetime.now()) + + "): " + + user + + " has " + + str(len(images)) + + " images.", + flush=True, + ) + except Exception as e: + print(f"Exception during refreshing image repository {user}: {e}", flush=True) + continue + time.sleep(0.1) + + def keep_user_images_fresh(self, sleep_time): + while True: + time.sleep(int(sleep_time)) + print( + str(datetime.datetime.now()) + + ": Start loop over all git runners to keep user images fresh (sleeped " + + str(int(sleep_time)) + + " seconds) ...", + flush=True, + ) + for git_runner in all_git_runners(): + try: + self.refresh_user_images_in_repo(git_runner, sleep_time) + except Exception as e: + print(f"Exception in keep_user_images_fresh: {e}", flush=True) + continue + + def keep_reranking_datasets_fresh(self, sleep_time): + while True: + time.sleep(int(sleep_time)) + print( + str(datetime.datetime.now()) + + ": Start keep_reranking_datasets_fresh (sleeped " + + str(int(sleep_time)) + + " seconds) ..." + ) + try: + get_all_reranking_datasets(True) + except Exception as e: + print(f"Exception in keep_reranking_datasets_fresh: {e}") + + def handle(self, *args, **options): + call_command("createcachetable") + + if "keep_running_softwares_fresh" in options and options["keep_running_softwares_fresh"]: + self.keep_running_softwares_fresh(options["keep_running_softwares_fresh"]) + + if "keep_user_images_fresh" in options and options["keep_user_images_fresh"]: + self.keep_user_images_fresh(options["keep_user_images_fresh"]) + + if "keep_reranking_datasets_fresh" in options and options["keep_reranking_datasets_fresh"]: + self.keep_reranking_datasets_fresh(options["keep_reranking_datasets_fresh"]) + + def add_arguments(self, parser): + parser.add_argument("--keep_running_softwares_fresh", default=None, type=str) + parser.add_argument("--keep_reranking_datasets_fresh", default=None, type=str) + parser.add_argument("--keep_user_images_fresh", default=None, type=str) diff --git a/application/src/tira_app/management/commands/dump_tira.py b/application/src/tira_app/management/commands/dump_tira.py new file mode 100644 index 000000000..5e824b2fb --- /dev/null +++ b/application/src/tira_app/management/commands/dump_tira.py @@ -0,0 +1,14 @@ +from django.apps import apps +from django.core.management import call_command +from django.core.management.base import BaseCommand + + +class Command(BaseCommand): + help = "dump all of tira" + + def handle(self, *args, **options): + tira_config = apps.get_app_config("tira") + models = [f"tira.{i}" for i in tira_config.models] + + cmd = ["dumpdata"] + models + ["--indent", "2"] + call_command(*cmd) diff --git a/application/src/tira_app/management/commands/git_runner_cli.py b/application/src/tira_app/management/commands/git_runner_cli.py new file mode 100644 index 000000000..831afe867 --- /dev/null +++ b/application/src/tira_app/management/commands/git_runner_cli.py @@ -0,0 +1,309 @@ +import json +import logging + +from django.core.cache import cache +from django.core.management.base import BaseCommand +from slugify import slugify +from tqdm import tqdm + +from ...tira_model import ( + add_input_run_id_to_all_rerank_runs, + create_re_rank_output_on_dataset, + get_git_integration, + load_refresh_timestamp_for_cache_key, +) +from ...util import get_tira_id + +logger = logging.getLogger("tira") + + +class Command(BaseCommand): + """Run git_runner via cli. + Later this will become a fully fledged cli tool that we use as wrapper in the repository. + At the moment, we just execute some predefined commands + """ + + def run_command_create_user_repository(self, options, git_runner): + print(f'Create a user repository for {options["create_user_repository"]}.') + repo_id = git_runner.create_user_repository(options["create_user_repository"]) + print(f"The new repository has the id ${repo_id}") + print( + git_runner.add_new_tag_to_docker_image_repository( + "registry.webis.de/code-research/tira/tira-user-del-maik-user-repo/my-software", + "0.0.3", + "0.0.1-tira-docker-software-id-name-x", + ) + ) + print("Images: " + str(git_runner.docker_images_in_user_repository(options["create_user_repository"]))) + + def run_command_create_task_repository(self, options, git_runner): + print(f'Create a task-repository for {options["create_task_repository"]}.') + repo_id = git_runner.create_task_repository(options["create_task_repository"]) + print(f"The new task-repository has the id ${repo_id}") + + def run_command_running_jobs(self, options, git_runner): + if "user_id" not in options or not options["user_id"]: + raise ValueError("Please pass --user_id as argument.") + + print(list(git_runner.yield_all_running_pipelines(options["running_jobs"], options["user_id"], cache, True))) + + print(load_refresh_timestamp_for_cache_key(cache, "all-running-pipelines-repo-" + options["running_jobs"])) + + def run_command_stop_job_and_clean_up(self, options, git_runner): + if "user_id" not in options or not options["user_id"]: + raise ValueError("Please pass --user_id as argument.") + + if "run_id" not in options or not options["run_id"]: + raise ValueError("Please pass --run_id as argument.") + + git_runner.stop_job_and_clean_up(options["stop_job_and_clean_up"], options["user_id"], options["run_id"]) + + def archive_repository_add_images_from_git_repo(self, options): + from ... import model as modeldb + + with open(options["archive_repository_add_images_from_git_repo"], "r") as f: + for line in tqdm(f): + data = json.loads(line) + if "docker-software-" not in data["TIRA_SOFTWARE_ID"]: + print("Skip") + continue + + docker_software_id = int(data["TIRA_SOFTWARE_ID"].split("docker-software-")[1]) + software = modeldb.DockerSoftware.objects.get(docker_software_id=docker_software_id) + if ( + data["TIRA_COMMAND_TO_EXECUTE"] != software.command + or not data["TIRA_IMAGE_TO_EXECUTE"].startswith(software.user_image_name) + or data["TIRA_IMAGE_TO_EXECUTE"] != software.tira_image_name + ): + print("Skip") + continue + + software.public_image_name = data["TIRA_IMAGE_TO_EXECUTE_IN_DOCKERHUB"] + software.public_image_size = max(data["image_details"]["size"], data["image_details"]["virtual_size"]) + software.save() + + def archive_docker_software(self, approach, git_runner): + from ... import model as modeldb + from ...util import docker_image_details + + task_id, vm_id, name = approach.split("/") + software = modeldb.DockerSoftware.objects.filter( + vm__vm_id=vm_id, task__task_id=task_id, display_name=name, deleted=False + ) + + if len(software) != 1: + raise ValueError(f"Found {software} but expected a single entry.") + + software = software[0] + if software.public_image_name and software.public_image_size: + print(f'Software "{approach}" is already public.') + return + + print(software) + image_name = (slugify(software.tira_image_name)).replace("/", "-") + dockerhub_image = f"docker.io/webis/{task_id}-submissions:" + image_name.split("-tira-user-")[1].strip() + + software_definition = { + "TIRA_IMAGE_TO_EXECUTE": software.tira_image_name, + "TIRA_IMAGE_TO_EXECUTE_IN_DOCKERHUB": dockerhub_image, + } + git_runner.archive_software( + "/tmp/", software_definition, download_images=True, persist_images=False, upload_images=True + ) + image_metadata = docker_image_details(software.tira_image_name) + + print(image_metadata) + print(image_name) + print(dockerhub_image) + software.public_image_name = dockerhub_image + software.public_image_size = image_metadata["size"] + software.save() + + def handle(self, *args, **options): + if "organization" not in options or not options["organization"]: + raise ValueError("Please pass --organization") + + git_runner = get_git_integration(options["organization"], None) + print(f"Use {git_runner}.") + + if "archive_repository" in options and options["archive_repository"]: + git_runner.archive_repository( + repo_name=options["archive_repository"], + working_directory="./" + options["archive_repository"], + download_images=options["archive_repository_download_images"].lower() == "true", + persist_images=options["archive_repository_persist_images"].lower() == "true", + upload_images=options["archive_repository_upload_images"].lower() == "true", + persist_datasets=options["archive_repository_persist_datasets"].lower() == "true", + copy_runs=options["archive_repository_copy_runs"].lower() == "true", + ) + + if "create_task_repository" in options and options["create_task_repository"]: + self.run_command_create_task_repository(options, git_runner) + + if "create_user_repository" in options and options["create_user_repository"]: + self.run_command_create_user_repository(options, git_runner) + + if "running_jobs" in options and options["running_jobs"]: + self.run_command_running_jobs(options, git_runner) + + if "stop_job_and_clean_up" in options and options["stop_job_and_clean_up"]: + self.run_command_stop_job_and_clean_up(options, git_runner) + + if ( + "archive_repository_add_images_from_git_repo" in options + and options["archive_repository_add_images_from_git_repo"] + ): + self.archive_repository_add_images_from_git_repo(options) + + if "archive_docker_software" in options and options["archive_docker_software"]: + self.archive_docker_software(options["archive_docker_software"], git_runner) + + if "run_image" in options and options["run_image"]: + git_runner.start_git_workflow( + task_id="clickbait-spoiling", + dataset_id="task-1-type-classification-validation-20220924-training", + vm_id="princess-knight", + run_id=get_tira_id(), + git_runner_image="webis/pan-clickbait-spoiling-evaluator:0.0.10", + git_runner_command="bash -c '/clickbait-spoiling-eval.py --task 2 --ground_truth_spoiler $inputDataset --input_run $inputRun --output_prototext ${outputDir}/evaluation.prototext'", + git_repository_id=2761, + evaluator_id="task-2-spoiler-generation-validation-20220924-training-evaluator", + user_image_to_execute="registry.webis.de/code-research/tira/tira-user-princess-knight/naive-baseline-task2:0.0.1-tira-docker-software-id-genteel-upstream", + user_command_to_execute=( + "/naive-baseline-task-2.py --input $inputDataset/input.jsonl --output $outputDir/run.jsonl" + ), + tira_software_id="17", + resources="small-resources-gpu", + ) + + if "clean_repository" in options and options["clean_repository"]: + # raise ValueError('ToDo: please insert the git authentication token with the name "tira-automation-bot-gitlab-admin-token" (maiks keepass) to git_runner.py method get_git_runner' + git_runner.clean_task_repository(options["clean_repository"]) + + if "docker_images_in_user_repository" in options and options["docker_images_in_user_repository"]: + print(git_runner.docker_images_in_user_repository(options["docker_images_in_user_repository"])) + + if "rerank" in options and options["rerank"]: + docker_software_id = 244 # "BM25 (tira-ir-starter-pyterrier)" + # Execute once in k8s: ./manage.py git_runner_cli --organization webis --rerank true + # Copy File + # Comment out dataset id + # configure ir-dataset: add ir_datasets image, ir_datasets re-ranking command, ir_datasets resources + # For new datasets: INSERT INTO tira_dockersoftware (`command`, `display_name`, `user_image_name`, `tira_image_name`, `deleted`, `task_id`, `vm_id`, `description`, `paper_link`, `ir_re_ranker`, `ir_re_ranking_input`) VALUES ('tbd', 'Anserini MS-MARCO Dev', 'tbd', 'tbd', 0, 'reneuir-2024', 'froebe', 'tbd', '', 0, 1); + # db statement: SELECT * FROM tira_run WHERE run_id LIKE '%rerank-%'; + # re-run with update re-ranking jobs + datasets = [ + "cranfield-20230107-training", + "antique-test-20230107-training", + "vaswani-20230107-training", + "msmarco-passage-trec-dl-2019-judged-20230107-training", + "medline-2004-trec-genomics-2004-20230107-training", + "wapo-v2-trec-core-2018-20230107-training", + "cord19-fulltext-trec-covid-20230107-training", + "disks45-nocr-trec7-20230209-training", + "disks45-nocr-trec8-20230209-training", + "disks45-nocr-trec-robust-2004-20230209-training", + "nfcorpus-test-20230107-training", + "argsme-touche-2020-task-1-20230209-training", + "argsme-touche-2021-task-1-20230209-training", + "msmarco-passage-trec-dl-2020-judged-20230107-training", + "medline-2004-trec-genomics-2005-20230107-training", + "gov-trec-web-2002-20230209-training", + "gov-trec-web-2003-20230209-training", + "gov-trec-web-2004-20230209-training", + "gov2-trec-tb-2006-20230209-training", + "gov2-trec-tb-2004-20230209-training", + "gov2-trec-tb-2005-20230209-training", + "medline-2017-trec-pm-2017-20230211-training", + "medline-2017-trec-pm-2018-20230211-training", + "clueweb12-trec-misinfo-2019-20240214-training", + "longeval-heldout-20230513-training", + "longeval-long-september-20230513-training", + "longeval-short-july-20230513-training", + "longeval-train-20230513-training", + "trec-tip-of-the-tongue-dev-20230607-training", + "longeval-2023-06-20240418-training", + "longeval-2023-08-20240418-training", + "ir-acl-anthology-topics-leipzig-20240423-test", + "ir-acl-anthology-topics-leipzig-20240423-test", + "ir-acl-anthology-topics-augsburg-20240525_0-test", + "ir-acl-anthology-20240504-training", + "ir-acl-anthology-topics-koeln-20240614-test", + "ms-marco-100-queries-20240629-training", # /mnt/ceph/tira/data/runs/ms-marco-100-queries-20240629-training/froebe/2024-06-30-22-13-09-rerank-2024-06-30-22-23-08 + "ms-marco-1000-queries-20240629-training", # /mnt/ceph/tira/data/runs/ms-marco-1000-queries-20240629-training/froebe/2024-06-30-22-14-54-rerank-2024-06-30-23-07-44 + "ms-marco-all-dev-queries-20240629-training", # /mnt/ceph/tira/data/runs/dl-top-10-docs-20240701-training/tira-ir-starter/2024-07-01-15-45-55-rerank-2024-07-02-10-40-56 + "dl-top-10-docs-20240701-training", # /mnt/ceph/tira/data/runs/dl-top-10-docs-20240701-training/froebe/2024-07-01-15-45-55-rerank-2024-07-02-10-40-56 + "dl-top-100-docs-20240701-training", # /mnt/ceph/tira/data/runs/dl-top-100-docs-20240701-training/tira-ir-starter/2024-07-01-15-46-44-rerank-2024-07-02-10-49-03 + "dl-top-1000-docs-20240701-training", # /mnt/ceph/tira/data/runs/dl-top-1000-docs-20240701-training/froebe/2024-07-01-15-47-04-rerank-2024-07-02-10-53-30 + ] + for dataset in datasets: + print(dataset) + tmp = create_re_rank_output_on_dataset( + task_id="ir-benchmarks", + vm_id="tira-ir-starter", + software_id=None, + docker_software_id=docker_software_id, + dataset_id=dataset, + ) + if tmp: + print(f'/mnt/ceph/tira/data/runs/{tmp["dataset_id"]}/{tmp["vm_id"]}/{tmp["run_id"]}/') + + docker_software_id = 242 # "ChatNoir" + datasets = [ + "clueweb09-en-trec-web-2009-20230107-training", + "clueweb09-en-trec-web-2010-20230107-training", + "clueweb09-en-trec-web-2011-20230107-training", + "clueweb09-en-trec-web-2012-20230107-training", + "clueweb12-trec-web-2013-20230107-training", + "clueweb12-trec-web-2014-20230107-training", + "clueweb12-touche-2020-task-2-20230209-training", + "clueweb12-touche-2021-task-2-20230209-training", + ] + for dataset in datasets: + print(dataset) + tmp = create_re_rank_output_on_dataset( + task_id="ir-benchmarks", + vm_id="tira-ir-starter", + software_id=None, + docker_software_id=docker_software_id, + dataset_id=dataset, + ) + if tmp: + print(f'/mnt/ceph/tira/data/runs/{tmp["dataset_id"]}/{tmp["vm_id"]}/{tmp["run_id"]}/') + + print( + git_runner.extract_configuration_of_finished_job( + 2979, + dataset_id="clinicaltrials-2017-trec-pm-2017-20230107-training", + vm_id="tira-ir-starter", + run_id="2023-01-12-15-02-11", + ) + ) + + print("\n\nReranking Datasets:\n\n") + + # for i in get_all_reranking_datasets(True).items(): + # print(i) + + add_input_run_id_to_all_rerank_runs() + + def add_arguments(self, parser): + parser.add_argument("--create_task_repository", default=None, type=str) + parser.add_argument("--create_user_repository", default=None, type=str) + parser.add_argument("--clean_repository", default=None, type=str) + parser.add_argument("--run_image", default=None, type=str) + parser.add_argument("--archive_repository", default=None, type=str) + parser.add_argument("--archive_repository_download_images", default="false", type=str) + parser.add_argument("--archive_repository_persist_images", default="false", type=str) + parser.add_argument("--archive_repository_upload_images", default="false", type=str) + parser.add_argument("--archive_repository_add_images_from_git_repo", default=None, type=str) + parser.add_argument("--archive_docker_software", default=None, type=str) + parser.add_argument("--archive_repository_persist_datasets", default="false", type=str) + parser.add_argument("--archive_repository_copy_runs", default="false", type=str) + parser.add_argument("--running_jobs", default=None, type=str) + parser.add_argument("--stop_job_and_clean_up", default=None, type=str) + parser.add_argument("--user_id", default=None, type=str) + parser.add_argument("--run_id", default=None, type=str) + parser.add_argument("--docker_images_in_user_repository", default=None, type=str) + parser.add_argument("--organization", default=None, type=str) + parser.add_argument("--rerank", default=None, type=str) diff --git a/application/src/tira/management/commands/grpc_mock_host.py b/application/src/tira_app/management/commands/grpc_mock_host.py similarity index 80% rename from application/src/tira/management/commands/grpc_mock_host.py rename to application/src/tira_app/management/commands/grpc_mock_host.py index 5fac9335f..f25a61766 100644 --- a/application/src/tira/management/commands/grpc_mock_host.py +++ b/application/src/tira_app/management/commands/grpc_mock_host.py @@ -1,13 +1,14 @@ -from django.conf import settings -from concurrent import futures -import grpc import logging +import time +from concurrent import futures from contextlib import contextmanager + +import grpc +from django.conf import settings from django.core.management.base import BaseCommand -import time -from tira.proto import tira_host_pb2_grpc -from tira.grpc.test_grpc_host_server import TiraHostService +from ...grpc.test_grpc_host_server import TiraHostService +from ...proto import tira_host_pb2_grpc grpc_host_port = settings.HOST_GRPC_PORT @@ -26,15 +27,15 @@ def serve_forever(host_addr): class Command(BaseCommand): - help = 'api server' + help = "api server" def handle(self, *args, **options): - host_addr = f'[::]:{grpc_host_port}' + host_addr = f"[::]:{grpc_host_port}" with serve_forever(host_addr): logger.info(f"Starting mock host server on {host_addr}") self.stdout.write(self.style.SUCCESS(f"Starting tira mock host server on {host_addr}")) try: while True: - time.sleep(60*60*24) + time.sleep(60 * 60 * 24) except KeyboardInterrupt: - pass \ No newline at end of file + pass diff --git a/application/src/tira/management/commands/grpc_server.py b/application/src/tira_app/management/commands/grpc_server.py similarity index 74% rename from application/src/tira/management/commands/grpc_server.py rename to application/src/tira_app/management/commands/grpc_server.py index dd993af62..19f59ecdf 100644 --- a/application/src/tira/management/commands/grpc_server.py +++ b/application/src/tira_app/management/commands/grpc_server.py @@ -1,17 +1,17 @@ -from django.conf import settings -from concurrent import futures -import grpc import logging import time +from concurrent import futures from contextlib import contextmanager -from django.core.management.base import BaseCommand, CommandError -from django.core.management import call_command -from tira.proto import tira_host_pb2_grpc -from tira.grpc.grpc_server import TiraApplicationService +import grpc +from django.conf import settings +from django.core.management.base import BaseCommand + +from ...grpc.grpc_server import TiraApplicationService +from ...proto import tira_host_pb2_grpc grpc_port = settings.APPLICATION_GRPC_PORT -listen_addr = f'[::]:{grpc_port}' +listen_addr = f"[::]:{grpc_port}" logger = logging.getLogger("grpc_server") @@ -27,7 +27,7 @@ def serve_forever(): class Command(BaseCommand): - help = 'api server' + help = "api server" def handle(self, *args, **options): with serve_forever(): @@ -35,6 +35,6 @@ def handle(self, *args, **options): self.stdout.write(self.style.SUCCESS(f"Starting tira-application server on {listen_addr}")) try: while True: - time.sleep(60*60*24) + time.sleep(60 * 60 * 24) except KeyboardInterrupt: pass diff --git a/application/src/tira/management/commands/index_model.py b/application/src/tira_app/management/commands/index_model.py similarity index 63% rename from application/src/tira/management/commands/index_model.py rename to application/src/tira_app/management/commands/index_model.py index 4e6f30221..e4ddddd18 100644 --- a/application/src/tira/management/commands/index_model.py +++ b/application/src/tira_app/management/commands/index_model.py @@ -1,7 +1,8 @@ -from django.conf import settings import logging -from django.core.management.base import BaseCommand + +from django.conf import settings from django.core.management import call_command +from django.core.management.base import BaseCommand grpc_app_port = settings.APPLICATION_GRPC_PORT @@ -9,11 +10,12 @@ class Command(BaseCommand): - help = 'api server' + help = "api server" def handle(self, *args, **options): - call_command('makemigrations') - call_command('makemigrations', 'tira') - call_command('migrate') - from tira.data.HybridDatabase import HybridDatabase + call_command("makemigrations") + call_command("makemigrations", "tira") + call_command("migrate") + from ...data.HybridDatabase import HybridDatabase + HybridDatabase().create_model() diff --git a/application/src/tira_app/management/commands/ir_datasets_loader_cli.py b/application/src/tira_app/management/commands/ir_datasets_loader_cli.py new file mode 100644 index 000000000..968904bab --- /dev/null +++ b/application/src/tira_app/management/commands/ir_datasets_loader_cli.py @@ -0,0 +1,124 @@ +import json +import logging +from pathlib import Path + +from django.core.management.base import BaseCommand + +from ...ir_datasets_loader import IrDatasetsLoader + +logger = logging.getLogger("tira") + + +class Command(BaseCommand): + """Run ir_datasets_loader via cli. + Loads a dataset by a given ir_datasets ID and maps the data to standardized formats + in preparation to full-rank or re-rank operations with PyTerrier + + @param --ir_dataset_id: required, string: the dataset ID as used by ir_datasets + @param --output_dataset_path: optional, string: the path to the directory where the output will be stored + @param --output_dataset_truth_path: optional, string: the path to the directory where the output will be stored + @param --include_original {True}: optional, boolean: flag to signal, if the original data should be included + @param --rerank: optional, string: if used, mapping will be in preparation for re-ranking operations and a path to + file with TREC-run formatted data is required + """ + + def import_dataset_for_fullrank( + self, + ir_datasets_id: str, + output_dataset_path: Path, + output_dataset_truth_path: Path, + include_original: bool, + skip_documents: bool, + skip_qrels: bool, + skip_duplicate_ids: bool, + allowlist_path_ids: bool, + ): + print( + "Task: Full-Rank -> create files: \n documents.jsonl \n queries.jsonl \n qrels.txt \n at" + f" {output_dataset_path}/" + ) + datasets_loader = IrDatasetsLoader() + datasets_loader.load_dataset_for_fullrank( + ir_datasets_id, + output_dataset_path, + output_dataset_truth_path, + include_original, + skip_documents=skip_documents, + skip_qrels=skip_qrels, + skip_duplicate_ids=skip_duplicate_ids, + allowlist_path_ids=allowlist_path_ids, + ) + + def import_dataset_for_rerank( + self, + ir_datasets_id: str, + output_dataset_path: Path, + output_dataset_truth_path: Path, + include_original: bool, + run_file: Path, + skip_qrels: bool, + ): + print(f"Task: Re-Rank -> create files: \n rerank.jsonl \n qrels.txt \n at {output_dataset_path}/") + datasets_loader = IrDatasetsLoader() + datasets_loader.load_dataset_for_rerank( + ir_datasets_id, output_dataset_path, output_dataset_truth_path, include_original, run_file + ) + + def contains_all_required_args(self, options): + if "input_dataset_directory" in options and options["input_dataset_directory"]: + metadata = json.load(open(options["input_dataset_directory"] + "/metadata.json")) + options["ir_datasets_id"] = metadata["ir_datasets_id"] + options["include_original"] = metadata.get("include_original", "true") + + return "ir_datasets_id" in options and options["ir_datasets_id"] + + def handle(self, *args, **options): + if not self.contains_all_required_args(options): + raise ValueError("Could not handle options" + str(options)) + return + + truth_path = ( + Path(options["output_dataset_truth_path"]) + if "output_dataset_truth_path" in options and options["output_dataset_truth_path"] + else None + ) + output_path = ( + Path(options["output_dataset_path"]) + if "output_dataset_path" in options and options["output_dataset_path"] + else None + ) + + skip_qrels = options["skip_qrels"] or str(options["output_dataset_truth_path"]).strip() == "/tmp" + + if options["rerank"]: + self.import_dataset_for_rerank( + options["ir_datasets_id"], + output_path, + truth_path, + options["include_original"].lower() == "true", + options["rerank"], + skip_qrels=skip_qrels, + ) + else: + self.import_dataset_for_fullrank( + options["ir_datasets_id"], + output_path, + truth_path, + options["include_original"].lower() == "true", + skip_documents=options["skip_documents"], + skip_qrels=skip_qrels, + skip_duplicate_ids=options["skip_duplicate_ids"], + allowlist_path_ids=options["allowlist_path_ids"], + ) + + def add_arguments(self, parser): + parser.add_argument("--ir_datasets_id", default=None, type=str) + parser.add_argument("--output_dataset_path", default=None, type=Path) + parser.add_argument("--output_dataset_truth_path", default="/tmp", type=Path) + parser.add_argument("--include_original", default="True", type=str) + parser.add_argument("--skip_documents", default=False, type=bool) + parser.add_argument("--skip_qrels", default=False, type=bool) + parser.add_argument("--input_dataset_directory", default=None, type=str) + parser.add_argument("--skip_duplicate_ids", default=True, type=bool) + parser.add_argument("--rerank", default=None, type=Path) + parser.add_argument("--allowlist_path_ids", default=None, type=Path, required=False) diff --git a/application/src/tira/management/commands/irds_cli.sh b/application/src/tira_app/management/commands/irds_cli.sh similarity index 100% rename from application/src/tira/management/commands/irds_cli.sh rename to application/src/tira_app/management/commands/irds_cli.sh diff --git a/application/src/tira_app/management/commands/playground.py b/application/src/tira_app/management/commands/playground.py new file mode 100644 index 000000000..9be832c3e --- /dev/null +++ b/application/src/tira_app/management/commands/playground.py @@ -0,0 +1,36 @@ +from django.core.management.base import BaseCommand + +# TODO: can I be removed? + + +class Command(BaseCommand): + """Runs some playground command.""" + + def handle(self, *args, **options): + from ...git_runner import all_git_runners + + g = all_git_runners() + assert len(g) == 1 + for ( + i + ) in ( + [] + ): # ['ul-nostalgic-turing', 'ul-trusting-neumann', 'ul-dreamy-zuse', 'ul-lucid-lovelace', 'ul-dazzling-euclid', 'ul-kangaroo-query-crew', 'ul-graceful-galileo', 'ul-suspicious-shannon', 'ul-the-golden-retrievers', 'ul-confident-torvalds']: + g[0].create_user_repository(i) + + # class tmp(): + # body= '{"group": "ir-lab-sose-2023-armafira", "team": "a", "username": "mf2", "email": "del-me", "affiliation": "mf2", "country": "c", "employment": "e", "participation": "p", "instructorName": "i", "instructorEmail": "i", "questions": ""}' + # session = {} + + # print(tmp().body) + # + # request = tmp() + # context = {'user_id': 'mf2'} + # print(add_registration(request, context, 'ir-lab-jena-leipzig-sose-2023', 'del-me-maik')) + + # from ...ir_datasets_loader import run_irds_command + # run_irds_command('tmp-test-maik', 'pssda', 'webis/tira-ir-datasets-starter:0.0.45-pangram', '/irds_cli.sh --skip_qrels true --ir_datasets_id pangrams --output_dataset_path $outputDir', '/tmp/sda-1/1/') + # run_irds_command('tmp-test-maik', 'pssda', 'webis/tira-ir-datasets-starter:0.0.45-pangram', '/irds_cli.sh --skip_documents true --ir_datasets_id pangrams --output_dataset_truth_path $outputDir', '/tmp/sda-1/2/') + + def add_arguments(self, parser): + pass diff --git a/application/src/tira/management/commands/run_develop.py b/application/src/tira_app/management/commands/run_develop.py similarity index 53% rename from application/src/tira/management/commands/run_develop.py rename to application/src/tira_app/management/commands/run_develop.py index 7f6333e24..7e4bee582 100644 --- a/application/src/tira/management/commands/run_develop.py +++ b/application/src/tira_app/management/commands/run_develop.py @@ -1,14 +1,8 @@ -from django.conf import settings -from concurrent import futures -import grpc import logging -import time -from contextlib import contextmanager -from django.core.management.base import BaseCommand, CommandError -from django.core.management import call_command -from tira.proto import tira_host_pb2_grpc -from tira.grpc.grpc_server import TiraApplicationService +from django.conf import settings +from django.core.management import call_command +from django.core.management.base import BaseCommand grpc_app_port = settings.APPLICATION_GRPC_PORT @@ -16,10 +10,10 @@ class Command(BaseCommand): - help = 'api server' + help = "api server" def handle(self, *args, **options): - app_addr = f'[::]:{grpc_app_port}' + app_addr = f"[::]:{grpc_app_port}" logger.info(f"Starting tira-application server on {app_addr}") self.stdout.write(self.style.SUCCESS(f"Starting tira-application server on {app_addr}")) - call_command('runserver', "8080") + call_command("runserver", "0.0.0.0:8080") diff --git a/application/src/tira/management/commands/run_mockup.py b/application/src/tira_app/management/commands/run_mockup.py similarity index 63% rename from application/src/tira/management/commands/run_mockup.py rename to application/src/tira_app/management/commands/run_mockup.py index bed67ebb3..774bf499a 100644 --- a/application/src/tira/management/commands/run_mockup.py +++ b/application/src/tira_app/management/commands/run_mockup.py @@ -1,15 +1,15 @@ -from django.conf import settings -from concurrent import futures -import grpc import logging -import time +from concurrent import futures from contextlib import contextmanager -from django.core.management.base import BaseCommand, CommandError + +import grpc +from django.conf import settings from django.core.management import call_command +from django.core.management.base import BaseCommand -from tira.proto import tira_host_pb2_grpc -from tira.grpc.grpc_server import TiraApplicationService -from tira.grpc.test_grpc_host_server import TiraHostService +from ...grpc.grpc_server import TiraApplicationService +from ...grpc.test_grpc_host_server import TiraHostService +from ...proto import tira_host_pb2_grpc grpc_app_port = settings.APPLICATION_GRPC_PORT grpc_host_port = settings.HOST_GRPC_PORT @@ -35,14 +35,18 @@ def serve_forever(app_addr, host_addr): class Command(BaseCommand): - help = 'api server' + help = "api server" def handle(self, *args, **options): - call_command('makemigrations') - call_command('migrate') - app_addr = f'[::]:{grpc_app_port}' - host_addr = f'[::]:{grpc_host_port}' + call_command("makemigrations") + call_command("migrate") + app_addr = f"[::]:{grpc_app_port}" + host_addr = f"[::]:{grpc_host_port}" with serve_forever(app_addr, host_addr): logger.info(f"Starting tira-application server on {app_addr} and mock host server on {host_addr}") - self.stdout.write(self.style.SUCCESS(f"Starting tira-application server on {app_addr} and mock host server on {host_addr}")) - call_command('runserver', "8080") + self.stdout.write( + self.style.SUCCESS( + f"Starting tira-application server on {app_addr} and mock host server on {host_addr}" + ) + ) + call_command("runserver", "0.0.0.0:8080") diff --git a/application/src/tira_app/management/commands/run_to_evaluations.py b/application/src/tira_app/management/commands/run_to_evaluations.py new file mode 100644 index 000000000..fd64a0cca --- /dev/null +++ b/application/src/tira_app/management/commands/run_to_evaluations.py @@ -0,0 +1,25 @@ +import json + +from django.core.management.base import BaseCommand + +from ... import tira_model as model + + +class Command(BaseCommand): + help = "export run to evaluations" + + def handle(self, *args, **options): + ret = {} + for dataset in model.get_datasets_by_task("ir-benchmarks", return_only_names=True): + mapping = {} + submissions = model.get_vms_with_reviews(dataset["dataset_id"]) + for submission in submissions: + for run in submission["runs"]: + if "is_evaluation" not in run or not run["is_evaluation"]: + continue + if run["input_run_id"] not in mapping: + mapping[run["input_run_id"]] = [] + mapping[run["input_run_id"]] += [run["run_id"]] + ret[dataset["dataset_id"]] = mapping + with open("run-to-evaluations.json", "w") as f: + f.write(json.dumps(ret)) diff --git a/application/src/tira/model.py b/application/src/tira_app/model.py similarity index 89% rename from application/src/tira/model.py rename to application/src/tira_app/model.py index 8fbdee261..fdc2154cf 100644 --- a/application/src/tira/model.py +++ b/application/src/tira_app/model.py @@ -1,15 +1,7 @@ -from google.protobuf.text_format import Parse -from google.protobuf.json_format import MessageToDict -from pathlib import Path import logging -from django.conf import settings -from django.db import models -from django.core.exceptions import ValidationError -import socket -from datetime import datetime -from tira.proto import TiraClientWebMessages_pb2 as modelpb -from tira.proto import tira_host_pb2 as model_host +from django.core.exceptions import ValidationError +from django.db import models logger = logging.getLogger("tira") # Transition is powering_on (3), powering_off (4), sandboxing (5), unsandboxing (6), executing (7) @@ -20,7 +12,7 @@ def _validate_transition_state(value): if value not in transition_states: - raise ValidationError('%(value)s is not a transition state', params={'value': value}) + raise ValidationError("%(value)s is not a transition state", params={"value": value}) class TransactionLog(models.Model): @@ -52,31 +44,32 @@ class Meta: class GitIntegration(models.Model): namespace_url = models.CharField(max_length=280, primary_key=True) - host = models.CharField(max_length=100, default='') - private_token = models.CharField(max_length=100, default='') - user_name = models.CharField(max_length=100, default='') - user_password = models.CharField(max_length=100, default='') + host = models.CharField(max_length=100, default="") + private_token = models.CharField(max_length=100, default="") + user_name = models.CharField(max_length=100, default="") + user_password = models.CharField(max_length=100, default="") gitlab_repository_namespace_id = models.IntegerField(default=None, null=True) - image_registry_prefix = models.CharField(max_length=150, default='') - user_repository_branch = models.CharField(max_length=100, default='main') + image_registry_prefix = models.CharField(max_length=150, default="") + user_repository_branch = models.CharField(max_length=100, default="main") class Organizer(models.Model): organizer_id = models.CharField(max_length=280, primary_key=True) - name = models.CharField(max_length=100, default='tira') - years = models.CharField(max_length=30, default='2022') - web = models.CharField(max_length=300, default='https://www.tira.io') + name = models.CharField(max_length=100, default="tira") + years = models.CharField(max_length=30, default="2022") + web = models.CharField(max_length=300, default="https://www.tira.io") git_integrations = models.ManyToManyField(GitIntegration, default=None) class VirtualMachine(models.Model): - """ This is the equivalent of a 'user' object (for legacy reasons). + """This is the equivalent of a 'user' object (for legacy reasons). Typically, only the vm_id is set. The vm_id is the equivalent of the user name and ends in '-default' if there is no virtual machine assigned to this user. """ + vm_id = models.CharField(max_length=280, primary_key=True) - user_password = models.CharField(max_length=280, default='tira') - roles = models.CharField(max_length=100, default='guest') + user_password = models.CharField(max_length=280, default="tira") + roles = models.CharField(max_length=100, default="guest") host = models.CharField(max_length=100, default=None, null=True) admin_name = models.CharField(max_length=100, default=None, null=True) admin_pw = models.CharField(max_length=280, default=None, null=True) @@ -92,24 +85,29 @@ class Task(models.Model): task_description = models.TextField(default="") vm = models.ForeignKey(VirtualMachine, on_delete=models.SET_NULL, null=True) organizer = models.ForeignKey(Organizer, on_delete=models.SET_NULL, null=True) - web = models.CharField(max_length=150, default='') + web = models.CharField(max_length=150, default="") featured = models.BooleanField(default=False) require_registration = models.BooleanField(default=False) -# Set to true = users can not submit without a group + # Set to true = users can not submit without a group require_groups = models.BooleanField(default=False) -# True = users can not create their own groups, they must join the given set + # True = users can not create their own groups, they must join the given set restrict_groups = models.BooleanField(default=False) max_std_out_chars_on_test_data = models.IntegerField(default=0) max_std_err_chars_on_test_data = models.IntegerField(default=0) max_file_list_chars_on_test_data = models.IntegerField(default=0) command_placeholder = models.TextField(default="mySoftware -c $inputDataset -r $inputRun -o $outputDir") - command_description = models.TextField(default="Available variables: $inputDataset, $inputRun, $outputDir, $dataServer, and $token.") + command_description = models.TextField( + default=( + "Available variables: $inputDataset, $inputRun, $outputDir," + " $dataServer, and $token." + ) + ) allowed_task_teams = models.TextField(default="") dataset_label = models.CharField(max_length=280, default="Input dataset") max_std_out_chars_on_test_data_eval = models.IntegerField(default=0) max_std_err_chars_on_test_data_eval = models.IntegerField(default=0) max_file_list_chars_on_test_data_eval = models.IntegerField(default=0) - is_ir_task = models.BooleanField(default=False) + is_ir_task = models.BooleanField(default=False) irds_re_ranking_image = models.CharField(max_length=150, default="") irds_re_ranking_command = models.CharField(max_length=150, default="") irds_re_ranking_resource = models.CharField(max_length=150, default="") @@ -180,13 +178,14 @@ class Software(models.Model): deleted = models.BooleanField(default=False) class Meta: - unique_together = (("software_id", "vm", 'task'),) + unique_together = (("software_id", "vm", "task"),) class Upload(models.Model): """ - The dataset is only associated for compatibility with Software. It's probably always none. """ + vm = models.ForeignKey(VirtualMachine, on_delete=models.CASCADE) task = models.ForeignKey(Task, on_delete=models.SET_NULL, null=True) dataset = models.ForeignKey(Dataset, on_delete=models.SET_NULL, null=True) @@ -220,14 +219,18 @@ class DockerSoftware(models.Model): class DockerSoftwareHasAdditionalInput(models.Model): position = models.AutoField(primary_key=True) - docker_software = models.ForeignKey(DockerSoftware, on_delete=models.CASCADE, related_name='+') - input_docker_software = models.ForeignKey(DockerSoftware, on_delete=models.CASCADE, default=None, null=True, related_name='+') + docker_software = models.ForeignKey(DockerSoftware, on_delete=models.CASCADE, related_name="+") + input_docker_software = models.ForeignKey( + DockerSoftware, on_delete=models.CASCADE, default=None, null=True, related_name="+" + ) input_upload = models.ForeignKey(Upload, on_delete=models.RESTRICT, default=None, null=True) + class DiscourseTokenForUser(models.Model): vm_id = models.OneToOneField(VirtualMachine, on_delete=models.CASCADE, primary_key=True) token = models.CharField(max_length=250) + class SoftwareSubmissionGitRepository(models.Model): repository_url = models.CharField(max_length=500, primary_key=True) vm = models.ForeignKey(VirtualMachine, on_delete=models.CASCADE) @@ -253,6 +256,7 @@ class SoftwareClone(models.Model): """ - This allows to import/export existing software to other tasks. """ + vm = models.ForeignKey(VirtualMachine, on_delete=models.CASCADE) task = models.ForeignKey(Task, on_delete=models.SET_NULL, null=True) docker_software = models.ForeignKey(DockerSoftware, on_delete=models.CASCADE, default=None, null=True) @@ -263,6 +267,7 @@ class HuggingFaceModelsOfSoftware(models.Model): """ - The Huggingface models to mount into some software. """ + docker_software = models.ForeignKey(DockerSoftware, on_delete=models.CASCADE, default=None, null=True) hf_home = models.CharField(max_length=250, default="") mount_hf_model = models.TextField(default="") @@ -276,7 +281,7 @@ class Run(models.Model): upload = models.ForeignKey(Upload, on_delete=models.CASCADE, null=True) docker_software = models.ForeignKey(DockerSoftware, on_delete=models.CASCADE, null=True) input_dataset = models.ForeignKey(Dataset, on_delete=models.SET_NULL, null=True) - input_run = models.ForeignKey('self', on_delete=models.CASCADE, null=True) + input_run = models.ForeignKey("self", on_delete=models.CASCADE, null=True) task = models.ForeignKey(Task, on_delete=models.SET_NULL, null=True) downloadable = models.BooleanField(default=False) deleted = models.BooleanField(default=False) diff --git a/application/src/tira_app/permissions.py b/application/src/tira_app/permissions.py new file mode 100644 index 000000000..f4f85a553 --- /dev/null +++ b/application/src/tira_app/permissions.py @@ -0,0 +1,20 @@ +from django.http import HttpRequest +from rest_framework.permissions import SAFE_METHODS, BasePermission + + +class ReadOnly(BasePermission): + def has_permission(self, request: HttpRequest, view): + return request.method in SAFE_METHODS + + +class IsOrganizer(BasePermission): + + def has_permission(self, request, view): + return True + + def has_object_permission(self, request: HttpRequest, view, obj) -> bool: + print(request) + print(view) + print(obj) + # TODO: implement + return False diff --git a/application/src/tira/proto/TiraClientWebMessages_pb2.py b/application/src/tira_app/proto/TiraClientWebMessages_pb2.py similarity index 100% rename from application/src/tira/proto/TiraClientWebMessages_pb2.py rename to application/src/tira_app/proto/TiraClientWebMessages_pb2.py diff --git a/application/src/tira/proto/TiraClientWebMessages_pb2_grpc.py b/application/src/tira_app/proto/TiraClientWebMessages_pb2_grpc.py similarity index 100% rename from application/src/tira/proto/TiraClientWebMessages_pb2_grpc.py rename to application/src/tira_app/proto/TiraClientWebMessages_pb2_grpc.py diff --git a/application/src/tira/proto/TiraHostMessages_pb2.py b/application/src/tira_app/proto/TiraHostMessages_pb2.py similarity index 100% rename from application/src/tira/proto/TiraHostMessages_pb2.py rename to application/src/tira_app/proto/TiraHostMessages_pb2.py diff --git a/application/src/tira/proto/TiraHostMessages_pb2_grpc.py b/application/src/tira_app/proto/TiraHostMessages_pb2_grpc.py similarity index 100% rename from application/src/tira/proto/TiraHostMessages_pb2_grpc.py rename to application/src/tira_app/proto/TiraHostMessages_pb2_grpc.py diff --git a/application/src/tira/proto/__init__.py b/application/src/tira_app/proto/__init__.py similarity index 100% rename from application/src/tira/proto/__init__.py rename to application/src/tira_app/proto/__init__.py diff --git a/application/src/tira/proto/tira_host_pb2.py b/application/src/tira_app/proto/tira_host_pb2.py similarity index 100% rename from application/src/tira/proto/tira_host_pb2.py rename to application/src/tira_app/proto/tira_host_pb2.py diff --git a/application/src/tira/proto/tira_host_pb2_grpc.py b/application/src/tira_app/proto/tira_host_pb2_grpc.py similarity index 100% rename from application/src/tira/proto/tira_host_pb2_grpc.py rename to application/src/tira_app/proto/tira_host_pb2_grpc.py diff --git a/application/src/tira/proto/tira_messages_pb2.py b/application/src/tira_app/proto/tira_messages_pb2.py similarity index 100% rename from application/src/tira/proto/tira_messages_pb2.py rename to application/src/tira_app/proto/tira_messages_pb2.py diff --git a/application/src/tira/proto/tira_to_web_pb2.py b/application/src/tira_app/proto/tira_to_web_pb2.py similarity index 100% rename from application/src/tira/proto/tira_to_web_pb2.py rename to application/src/tira_app/proto/tira_to_web_pb2.py diff --git a/application/src/tira/proto/tira_to_web_pb2_grpc.py b/application/src/tira_app/proto/tira_to_web_pb2_grpc.py similarity index 100% rename from application/src/tira/proto/tira_to_web_pb2_grpc.py rename to application/src/tira_app/proto/tira_to_web_pb2_grpc.py diff --git a/application/src/tirex-components.yml b/application/src/tira_app/res/tirex-components.yml similarity index 100% rename from application/src/tirex-components.yml rename to application/src/tira_app/res/tirex-components.yml diff --git a/application/src/tira/templates/tira/git-repo-template/Dockerfile b/application/src/tira_app/templates/tira/git-repo-template/Dockerfile similarity index 100% rename from application/src/tira/templates/tira/git-repo-template/Dockerfile rename to application/src/tira_app/templates/tira/git-repo-template/Dockerfile diff --git a/application/src/tira/templates/tira/git-repo-template/README.md b/application/src/tira_app/templates/tira/git-repo-template/README.md similarity index 100% rename from application/src/tira/templates/tira/git-repo-template/README.md rename to application/src/tira_app/templates/tira/git-repo-template/README.md diff --git a/application/src/tira/templates/tira/git-repo-template/github-action.yml b/application/src/tira_app/templates/tira/git-repo-template/github-action.yml similarity index 100% rename from application/src/tira/templates/tira/git-repo-template/github-action.yml rename to application/src/tira_app/templates/tira/git-repo-template/github-action.yml diff --git a/application/src/tira_app/templates/tira/git-repo-template/script.py b/application/src/tira_app/templates/tira/git-repo-template/script.py new file mode 100644 index 000000000..6ccd4fac9 --- /dev/null +++ b/application/src/tira_app/templates/tira/git-repo-template/script.py @@ -0,0 +1,21 @@ +import argparse + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(prog="script") + parser.add_argument("-i", "--input", required=True, help="the input to the script.") + parser.add_argument("-o", "--output", required=True, help="the output of the script.") + + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + print( + f"This is a demo, I ignore the passed input {args.input} and write some content into the output file" + f" {args.output}." + ) + with open(args.output + "/predictions.jsonl", "w") as f: + f.write("hello world") + print('Done. I wrote "hello world" to {args.output}/predictions.jsonl.') diff --git a/application/src/tira/templates/tira/git_task_repository_gitlab_ci.yml b/application/src/tira_app/templates/tira/git_task_repository_gitlab_ci.yml similarity index 100% rename from application/src/tira/templates/tira/git_task_repository_gitlab_ci.yml rename to application/src/tira_app/templates/tira/git_task_repository_gitlab_ci.yml diff --git a/application/src/tira/templates/tira/git_task_repository_readme.md b/application/src/tira_app/templates/tira/git_task_repository_readme.md similarity index 100% rename from application/src/tira/templates/tira/git_task_repository_readme.md rename to application/src/tira_app/templates/tira/git_task_repository_readme.md diff --git a/application/src/tira/templates/tira/git_user_repository_readme.md b/application/src/tira_app/templates/tira/git_user_repository_readme.md similarity index 100% rename from application/src/tira/templates/tira/git_user_repository_readme.md rename to application/src/tira_app/templates/tira/git_user_repository_readme.md diff --git a/application/src/tira_app/templates/tira/tira_git_cmd.py b/application/src/tira_app/templates/tira/tira_git_cmd.py new file mode 100644 index 000000000..20f0b9d28 --- /dev/null +++ b/application/src/tira_app/templates/tira/tira_git_cmd.py @@ -0,0 +1,268 @@ +import json +import os +import shutil +import sys +import tempfile +from glob import glob +from pathlib import Path + +import docker +import pandas as pd +from packaging import version + + +def all_softwares(): + ret = [] + for software_id, software_definition in ___load_softwares().items(): + ret += [ + { + "approach": software_id, + "team": software_definition["TIRA_VM_ID"], + "image": software_definition["TIRA_IMAGE_TO_EXECUTE"], + "command": software_definition["TIRA_COMMAND_TO_EXECUTE"], + } + ] + + return pd.DataFrame(ret) + + +def all_datasets(): + ret = [] + for i in glob("*/training-datasets/"): + cnt = 0 + for j in glob(i + "*"): + cnt += len(list(open(j))) + + ret += [{"dataset": i.split("/training-datasets/")[0], "records": cnt}] + + return pd.DataFrame(ret).sort_values("dataset") + + +def ___load_softwares(): + softwares = [json.loads(i) for i in open(".tira/submitted-software.jsonl")] + + return {i["TIRA_TASK_ID"] + "/" + i["TIRA_VM_ID"] + "/" + i["TIRA_SOFTWARE_NAME"]: i for i in softwares} + + +def load_data(approach): + ret = [] + + for i in glob(approach + "*/training-datasets-truth/*.json*"): + ret += [pd.read_json(i, orient="records", lines=True)] + + return pd.concat(ret) + + +def __num(s): + try: + return int(s) + except ValueError: + try: + return float(s) + except ValueError: + return s + + +def __load_evaluators(): + evaluators = [json.loads(i) for i in open(".tira/evaluators.jsonl")] + ret = {i["TIRA_DATASET_ID"]: i for i in evaluators} + + for evaluator in evaluators: + dataset_id = evaluator["TIRA_DATASET_ID"] + current_version = version.parse(ret[dataset_id]["TIRA_EVALUATION_IMAGE_TO_EXECUTE"].split(":")[-1]) + available_version = version.parse(evaluator["TIRA_EVALUATION_IMAGE_TO_EXECUTE"].split(":")[-1]) + + if available_version > current_version: + ret[dataset_id] = evaluator + + return ret + + +def __load_job_data(job_file): + job = [i.split("=") for i in open(job_file, "r")] + return {k.strip(): v.strip() for k, v in job} + + +def all_evaluated_appraoches(): + id_to_software_name = { + int(i["TIRA_SOFTWARE_ID"].split("docker-software-")[1]): i["TIRA_SOFTWARE_NAME"] + for i in ___load_softwares().values() + } + ret = [] + for evaluation in glob("*/*/*/evaluation"): + job_dir = glob(evaluation + "/../job-executed-on*.txt") + if len(job_dir) != 1: + raise ValueError("Can not handle multiple job definitions: ", job_dir) + + job_definition = __load_job_data(job_dir[0]) + job_identifier = ( + job_definition["TIRA_TASK_ID"] + + "/" + + job_definition["TIRA_VM_ID"] + + "/" + + id_to_software_name[int(job_definition["TIRA_SOFTWARE_ID"].split("docker-software-")[1])] + ) + + for eval_run in glob(f"{evaluation}/*/output/"): + + try: + i = {"approach": job_identifier, "dataset": job_definition["TIRA_DATASET_ID"]} + i.update(__load_output(eval_run, evaluation=True)) + ret += [i] + except Exception: + pass + + return pd.DataFrame(ret) + + +def all_evaluators(): + ret = [] + for i in __load_evaluators().values(): + ret += [ + { + "dataset": i["TIRA_DATASET_ID"], + "image": i["TIRA_EVALUATION_IMAGE_TO_EXECUTE"], + "command": i["TIRA_EVALUATION_COMMAND_TO_EXECUTE"], + } + ] + + return pd.DataFrame(ret) + + +def __extract_image_and_command(identifier, evaluator=False): + softwares = ___load_softwares() if not evaluator else __load_evaluators() + + if identifier in softwares and not evaluator: + return softwares[identifier]["TIRA_IMAGE_TO_EXECUTE"], softwares[identifier]["TIRA_COMMAND_TO_EXECUTE"] + if evaluator: + for k, v in softwares.items(): + if k.startswith(identifier): + return ( + v["TIRA_DATASET_ID"], + v["TIRA_EVALUATION_IMAGE_TO_EXECUTE"], + v["TIRA_EVALUATION_COMMAND_TO_EXECUTE"], + ) + + raise ValueError( + f'There is no {("evaluator" if evaluator else "software")} identified by "{identifier}". Choices are:' + f" {sorted(list(softwares))}" + ) + + +def __load_output(directory, evaluation=False, verbose=False): + files = glob(str(directory) + "/*") + + if evaluation: + files = [i for i in files if i.endswith(".prototext")] + + if len(files) != 1: + raise ValueError("Expected exactly one output file. Got: ", files) + + files = files[0] + + if verbose: + print(f"Read file from {files}") + + if evaluation: + ret = {} + for i in [i for i in open(files, "r").read().split("measure") if "key:" in i and "value:" in i]: + key = i.split("key:")[1].split("value")[0].split('"')[1] + value = i.split("key:")[1].split("value")[1].split('"')[1] + + ret[key.strip()] = __num(value.strip()) + + return ret + else: + return pd.read_json(files, lines=True, orient="records") + + +def __normalize_command(cmd): + to_normalize = { + "inputRun": "/tira-data/output", + "outputDir": "/tira-data/output", + "inputDataset": "/tira-data/input", + } + + if "inputRun" in cmd: + to_normalize["outputDir"] = "/tira-data/eval_output" + to_normalize["inputDataset"] = "/tira-data/input_truth" + + for k, v in to_normalize.items(): + cmd = cmd.replace("$" + k, v).replace("${" + k + "}", v) + + return cmd + + +def persist_dataset(data, verbose): + tmp_dir = Path(tempfile.TemporaryDirectory().name) + input_dir = tmp_dir / "input" + output_dir = tmp_dir / "output" + eval_output_dir = tmp_dir / "eval_output" + + os.makedirs(str(output_dir.absolute()), exist_ok=True) + os.makedirs(str(eval_output_dir.absolute()), exist_ok=True) + + if isinstance(data, pd.DataFrame): + if verbose: + print(f"Write {len(data)} records to {input_dir}/input.jsonl") + os.makedirs(str(input_dir.absolute()), exist_ok=True) + data.to_json(input_dir / "input.jsonl", lines=True, orient="records") + shutil.copytree(input_dir, tmp_dir / "input_truth") + else: + shutil.copytree(Path(data) / "training-datasets", input_dir) + shutil.copytree(Path(data) / "training-datasets-truth", tmp_dir / "input_truth") + + return tmp_dir + + +def run(identifier=None, image=None, command=None, data=None, evaluate=False, verbose=False): + if image is None or command is None: + image, command = __extract_image_and_command(identifier) + try: + environ = os.environ.copy() + if sys.platform == "linux" and os.path.exists(os.path.expanduser("~/.docker/desktop/docker.sock")): + environ["DOCKER_HOST"] = "unix:///" + os.path.expanduser("~/.docker/desktop/docker.sock") + client = docker.from_env(environment=environ) + + assert len(client.images.list()) >= 0 + assert len(client.containers.list()) >= 0 + except Exception as e: + raise ValueError("It seems like docker is not installed?", e) + + data_dir = persist_dataset(data, verbose) + command = __normalize_command(command) + + if verbose: + print(f"Run software with: docker run --rm -ti -v INPUT_DIR:/tira-data --entrypoint sh {image} {command}") + + client.containers.run( + image, + entrypoint="sh", + command=f'-c "{command}"', + volumes={str(data_dir): {"bind": "/tira-data/", "mode": "rw"}}, + ) + + if evaluate: + if type(evaluate) is not str: + evaluate = data + evaluate, image, command = __extract_image_and_command(evaluate, evaluator=True) + command = __normalize_command(command) + if verbose: + print( + f"Evaluate software with: docker run --rm -ti -v INPUT_DIR:/tira-data --entrypoint sh {image} {command}" + ) + + client.containers.run( + image, + entrypoint="sh", + command=f'-c "{command}"', + volumes={str(data_dir): {"bind": "/tira-data/", "mode": "rw"}}, + ) + + if evaluate: + approach_name = identifier if identifier else f'"{command}"@{image}' + eval_results = {"approach": approach_name, "evaluate": evaluate} + eval_results.update(__load_output(Path(data_dir) / "eval_output", evaluation=True, verbose=verbose)) + return __load_output(Path(data_dir) / "output", verbose=verbose), pd.DataFrame([eval_results]) + else: + return __load_output(Path(data_dir) / "output", verbose=verbose) diff --git a/application/src/tira/templates/tira/tira_git_cmd.sh b/application/src/tira_app/templates/tira/tira_git_cmd.sh similarity index 100% rename from application/src/tira/templates/tira/tira_git_cmd.sh rename to application/src/tira_app/templates/tira/tira_git_cmd.sh diff --git a/application/src/tira/templates/tira/tira_git_makefile b/application/src/tira_app/templates/tira/tira_git_makefile similarity index 100% rename from application/src/tira/templates/tira/tira_git_makefile rename to application/src/tira_app/templates/tira/tira_git_makefile diff --git a/application/src/tira/templates/tira/tira_git_tutorial.ipynb b/application/src/tira_app/templates/tira/tira_git_tutorial.ipynb similarity index 100% rename from application/src/tira/templates/tira/tira_git_tutorial.ipynb rename to application/src/tira_app/templates/tira/tira_git_tutorial.ipynb diff --git a/application/src/tira/tira_data.py b/application/src/tira_app/tira_data.py similarity index 62% rename from application/src/tira/tira_data.py rename to application/src/tira_app/tira_data.py index 2c5ed19bc..e0bcf9b0d 100644 --- a/application/src/tira/tira_data.py +++ b/application/src/tira_app/tira_data.py @@ -1,7 +1,9 @@ -from pathlib import Path import logging +from pathlib import Path + from django.conf import settings -from tira.endpoints.stdout_beautifier import beautify_ansi_text + +from .endpoints.stdout_beautifier import beautify_ansi_text logger = logging.getLogger("tira") @@ -10,38 +12,37 @@ def get_run_runtime(dataset_id, vm_id, run_id): - """ loads a runtime file (runtime.txt) and parses the string to return time, runtime_info""" - run_dir = (RUNS_DIR_PATH / dataset_id / vm_id / run_id) - context = {"time": '0', "cpu": '0', "pagefaults": '0', "swaps": '0', 'error': ''} + """loads a runtime file (runtime.txt) and parses the string to return time, runtime_info""" + run_dir = RUNS_DIR_PATH / dataset_id / vm_id / run_id + context = {"time": "0", "cpu": "0", "pagefaults": "0", "swaps": "0", "error": ""} if not (run_dir / "runtime.txt").exists(): return context - runtime = open(run_dir / "runtime.txt", 'r').read() + runtime = open(run_dir / "runtime.txt", "r").read() try: - context['time'] = runtime.split(" ")[2].strip("elapsed") - context['cpu'] = runtime.split(" ")[3] - context['pagefaults'] = runtime.split(" ")[6].strip("pagefaults").strip("(").strip(")") - context['swaps'] = runtime.split(" ")[7].strip("swaps") + context["time"] = runtime.split(" ")[2].strip("elapsed") + context["cpu"] = runtime.split(" ")[3] + context["pagefaults"] = runtime.split(" ")[6].strip("pagefaults").strip("(").strip(")") + context["swaps"] = runtime.split(" ")[7].strip("swaps") except IndexError as e: logger.exception(f"IndexError while parsing the runtime file {run_dir}/runtime.txt: {e}") - context['error'] = "IndexError while parsing the runtime file {run_dir}/runtime.txt" + context["error"] = "IndexError while parsing the runtime file {run_dir}/runtime.txt" return context def get_run_file_list(dataset_id, vm_id, run_id): - """ load the 2 files that describe the outputof a run: + """load the 2 files that describe the outputof a run: - file-list.txt (ascii-view of the files) and - size.txt (has line count, file count, subdir count) returns a dict with the variables: size, lines, fines, dirs, file_list """ - run_dir = (RUNS_DIR_PATH / dataset_id / vm_id / run_id) + run_dir = RUNS_DIR_PATH / dataset_id / vm_id / run_id try: size = open(run_dir / "size.txt").read().split("\n") except Exception as e: - logger.error(f"Failed to read output size.txt of: {dataset_id} -- {vm_id} -- {run_id}\n" - f"with error: {e}") + logger.error(f"Failed to read output size.txt of: {dataset_id} -- {vm_id} -- {run_id}\nwith error: {e}") size = ["No output could be found for this run or output was corrupted", None, None, None, None] if not (run_dir / "file-list.txt").exists(): @@ -57,32 +58,32 @@ def get_run_file_list(dataset_id, vm_id, run_id): def get_stdout(dataset_id, vm_id, run_id): # TODO: Don't open whole file but only read the n last lines to not have a full xGB file in memory output_lines = 100 - run_dir = (RUNS_DIR_PATH / dataset_id / vm_id / run_id) + run_dir = RUNS_DIR_PATH / dataset_id / vm_id / run_id if not (run_dir / "stdout.txt").exists(): return "No Stdout recorded" - with open(run_dir / "stdout.txt", 'r') as stdout_file: + with open(run_dir / "stdout.txt", "r") as stdout_file: stdout = stdout_file.readlines() stdout_len = len(stdout) - stdout = ''.join([f"[{max(stdout_len - output_lines, 0)} more lines]\n"] + stdout[-output_lines:]) + stdout = "".join([f"[{max(stdout_len - output_lines, 0)} more lines]\n"] + stdout[-output_lines:]) if not stdout: return "No Stdout recorded" return beautify_ansi_text(stdout) def get_stderr(dataset_id, vm_id, run_id): - run_dir = (RUNS_DIR_PATH / dataset_id / vm_id / run_id) + run_dir = RUNS_DIR_PATH / dataset_id / vm_id / run_id if not (run_dir / "stderr.txt").exists(): return "No Stderr recorded" - stderr = open(run_dir / "stderr.txt", 'r').read() + stderr = open(run_dir / "stderr.txt", "r").read() if not stderr: return "No Stderr recorded" return beautify_ansi_text(stderr) def get_tira_log(dataset_id, vm_id, run_id): -# TODO: read log once it has a fixed position -# log_path = -# with open(log_path, 'r') as log: -# l = log.read() -# return l + # TODO: read log once it has a fixed position + # log_path = + # with open(log_path, 'r') as log: + # l = log.read() + # return l return "foo" diff --git a/application/src/tira/tira_model.py b/application/src/tira_app/tira_model.py similarity index 52% rename from application/src/tira/tira_model.py rename to application/src/tira_app/tira_model.py index aeaff946c..bf96f9242 100644 --- a/application/src/tira/tira_model.py +++ b/application/src/tira_app/tira_model.py @@ -1,54 +1,59 @@ """ p.stat().st_mtime - change time """ -from pathlib import Path + +import datetime import logging -from tira.data.HybridDatabase import HybridDatabase -from django.core.cache import cache -from tira.git_runner import get_git_runner, get_git_runner_for_software_integration +import tempfile +from distutils.dir_util import copy_tree +from pathlib import Path +from typing import Any, Optional + import randomname +from discourse_client_in_disraptor import DiscourseApiClient from django.conf import settings +from django.core.cache import BaseCache, cache from django.db import connections, router -import datetime -from tira.util import get_tira_id, run_cmd_as_documented_background_process, register_run -import tempfile -from distutils.dir_util import copy_tree from slugify import slugify -from discourse_client_in_disraptor import DiscourseApiClient + +from .data.HybridDatabase import HybridDatabase +from .git_runner import get_git_runner, get_git_runner_for_software_integration +from .util import get_tira_id, register_run logger = logging.getLogger("tira") model = HybridDatabase() + # reloading and reindexing def build_model(): - """ reconstruct the caches and the database. """ + """reconstruct the caches and the database.""" model.build_model() def reload_vms(): - """ reload VM and user data from the export format of the model """ + """reload VM and user data from the export format of the model""" model.reload_vms() def reload_datasets(): - """ reload dataset data from the export format of the model """ + """reload dataset data from the export format of the model""" model.reload_datasets() def reload_tasks(): - """ reload task data from the export format of the model """ + """reload task data from the export format of the model""" model.reload_tasks() def reload_runs(vm_id): - """ reload run data for a VM from the export format of the model """ + """reload run data for a VM from the export format of the model""" model.reload_runs(vm_id) # get methods are the public interface. def get_vm(vm_id: str, create_if_none=False): - """ Returns a vm as dictionary with: + """Returns a vm as dictionary with: {"vm_id", "user_password", "roles", "host", "admin_name", "admin_pw", "ip", "ssh", "rdp", "archived"} @@ -67,32 +72,32 @@ def get_run(dataset_id: str, vm_id: str, run_id: str, return_deleted: bool = Fal def get_task(task_id: str, include_dataset_stats=False) -> dict: - """ Get a dict with the task data as follows: + """Get a dict with the task data as follows: {"task_id", "task_name", "task_description", "organizer", "web", "year", "dataset_count", "software_count", "max_std_out_chars_on_test_data", "max_std_err_chars_on_test_data", "max_file_list_chars_on_test_data", "command_placeholder", "command_description", "dataset_label", "max_std_out_chars_on_test_data_eval", "max_std_err_chars_on_test_data_eval", "max_file_list_chars_on_test_data_eval"} - """ + """ return model.get_task(task_id, include_dataset_stats) -def get_dataset(dataset_id: str) -> dict: - """ Return a Dataset as dict with the keys: +def get_dataset(dataset_id: str) -> dict[str, Any]: + """Return a Dataset as dict with the keys: - {"display_name", "evaluator_id", "dataset_id", "is_confidential", "is_deprecated", "year", - "task".task_id, 'organizer', "software_count"} - """ + {"display_name", "evaluator_id", "dataset_id", "is_confidential", "is_deprecated", "year", + "task".task_id, 'organizer', "software_count"} + """ return model.get_dataset(dataset_id) def get_datasets() -> dict: - """ Get a dict of dataset_id: dataset_json_descriptor """ + """Get a dict of dataset_id: dataset_json_descriptor""" return model.get_datasets() -def get_datasets_by_task(task_id: str, include_deprecated=False, return_only_names=False) -> list: - """ return the list of datasets associated with this task_id +def get_datasets_by_task(task_id: str, include_deprecated=False, return_only_names=False) -> list[dict[str, Any]]: + """return the list of datasets associated with this task_id @param task_id: id string of the task the dataset belongs to @param include_deprecated: Default False. If True, also returns datasets marked as deprecated. @return: a list of json-formatted datasets, as returned by get_dataset @@ -107,7 +112,9 @@ def load_refresh_timestamp_for_cache_key(cache, key): quote_name = connection.ops.quote_name with connection.cursor() as cursor: - cursor.execute("SELECT %s FROM %s WHERE %s = '%s'" % ( + cursor.execute( + "SELECT %s FROM %s WHERE %s = '%s'" + % ( quote_name("expires"), quote_name(cache._table), quote_name("cache_key"), @@ -117,8 +124,8 @@ def load_refresh_timestamp_for_cache_key(cache, key): ret = cursor.fetchall() if len(ret) > 0: - return ret[0][0] - datetime.timedelta(seconds=settings.CACHES['default']['TIMEOUT']) - except: + return ret[0][0] - datetime.timedelta(seconds=settings.CACHES["default"]["TIMEOUT"]) + except Exception: pass return datetime.datetime.now() @@ -128,25 +135,31 @@ def discourse_api_client(): return DiscourseApiClient(url=settings.DISCOURSE_API_URL, api_key=settings.DISRAPTOR_API_KEY) -def tira_run_command(image, command, task_id): +def tira_run_command(image: str, command: str, task_id: str): input_dataset = reference_dataset(task_id) - return f'tira-run \\\n --input-dataset {input_dataset} \\\n --image {image} \\\n --command \'{command}\'' + return f"tira-run \\\n --input-dataset {input_dataset} \\\n --image {image} \\\n --command '{command}'" -def reference_dataset(task_id): + +def reference_dataset(task_id: str): if task_id in settings.REFERENCE_DATASETS: return settings.REFERENCE_DATASETS[task_id] else: available_datasets = get_datasets_by_task(task_id) - available_datasets = [i['dataset_id'] for i in available_datasets if i['dataset_id'].endswith('-training') and not i['is_confidential'] and not i['is_deprecated']] + available_datasets = [ + i["dataset_id"] + for i in available_datasets + if i["dataset_id"].endswith("-training") and not i["is_confidential"] and not i["is_deprecated"] + ] if len(available_datasets) > 0: - return f'{task_id}/{available_datasets[0]}' + return f"{task_id}/{available_datasets[0]}" else: - return f'{task_id}/ADD-DATASET-ID-HERE' + return f"{task_id}/ADD-DATASET-ID-HERE" + def tira_docker_registry_token(docker_software_help): - ret = docker_software_help.split('docker login -u ')[1].split(' -p') - return ret[0].strip(), ret[1].split(' ')[0].strip() + ret = docker_software_help.split("docker login -u ")[1].split(" -p") + return ret[0].strip(), ret[1].split(" ")[0].strip() def load_docker_data(task_id, vm_id, cache, force_cache_refresh): @@ -162,14 +175,19 @@ def load_docker_data(task_id, vm_id, cache, force_cache_refresh): """ if not git_pipeline_is_enabled_for_task(task_id, cache, force_cache_refresh): return False - + git_runner = get_git_integration(task_id=task_id) - docker_images = [i for i in git_runner.docker_images_in_user_repository(vm_id, cache, force_cache_refresh) if '-tira-docker-software-id-' not in i['image']] - last_refresh = load_refresh_timestamp_for_cache_key(cache, 'docker-images-in-user-repository-tira-user-' + vm_id) + docker_images = [ + i + for i in git_runner.docker_images_in_user_repository(vm_id, cache, force_cache_refresh) + if "-tira-docker-software-id-" not in i["image"] + ] + last_refresh = load_refresh_timestamp_for_cache_key(cache, "docker-images-in-user-repository-tira-user-" + vm_id) docker_software_help = git_runner.help_on_uploading_docker_image(vm_id, cache, force_cache_refresh) public_docker_softwares = model.get_public_docker_softwares(task_id) - docker_login = 'docker login' + docker_software_help.split('docker login')[1].split('')[0] + # removed for the moment as tira-cli uses the above already. + # docker_login = "docker login" + docker_software_help.split("docker login")[1].split("")[0] return { "docker_images": docker_images, @@ -179,15 +197,19 @@ def load_docker_data(task_id, vm_id, cache, force_cache_refresh): "docker_registry_user": tira_docker_registry_token(docker_software_help)[0], "docker_registry_token": tira_docker_registry_token(docker_software_help)[1], "public_docker_softwares": public_docker_softwares, - "task_is_an_information_retrieval_task": True if get_task(task_id, False).get('is_ir_task', False) else False, - "docker_images_next_refresh": str(None if last_refresh is None else (last_refresh + datetime.timedelta(seconds=60))), - "tira_initial_run_example": '# This example shows how to execute the baseline on a small example dataset.\n' + - '# Please adjust the --image and --command parameters accordingly.\n' + - tira_run_command('YOUR-IMAGE', 'YOUR-COMMAND', task_id), - "tira_final_run_example": #'# The configuration of your software is final, please do a final test:\n' + - #docker_login + '\n' + - '# Please append "--push true" to your previous tira-run command to upload your software.\n# I.e., the --image and --command parameters are as before.\n' + - tira_run_command('YOUR-IMAGE', 'YOUR-COMMAND', task_id) + ' \\\n --push true', + "task_is_an_information_retrieval_task": True if get_task(task_id, False).get("is_ir_task", False) else False, + "docker_images_next_refresh": str( + None if last_refresh is None else (last_refresh + datetime.timedelta(seconds=60)) + ), + "tira_initial_run_example": "# This example shows how to execute the baseline on a small example dataset.\n" + + "# Please adjust the --image and --command parameters accordingly.\n" + + tira_run_command("YOUR-IMAGE", "YOUR-COMMAND", task_id), + "tira_final_run_example": # '# The configuration of your software is final, please do a final test:\n' + + # docker_login + '\n' + + '# Please append "--push true" to your previous tira-run command to upload your software.\n# I.e., the --image' + " and --command parameters are as before.\n" + + tira_run_command("YOUR-IMAGE", "YOUR-COMMAND", task_id) + + " \\\n --push true", } @@ -201,31 +223,40 @@ def get_discourse_token_for_user(vm_id, disraptor_user): if ret: return ret - disraptor_description = disraptor_user + '-repo-' + vm_id + disraptor_description = disraptor_user + "-repo-" + vm_id discourse_api_key = discourse_api_client().generate_api_key(disraptor_user, disraptor_description) model.create_discourse_token_for_user(vm_id, discourse_api_key) return model.get_discourse_token_for_user(vm_id) + def get_submission_git_repo(vm_id, task_id, disraptor_user=None, external_owner=None, private=True): - user_repository_name = slugify(task_id) + '-' + slugify(vm_id) - repository_url = settings.CODE_SUBMISSION_REPOSITORY_NAMESPACE + '/' + user_repository_name + user_repository_name = slugify(task_id) + "-" + slugify(vm_id) + repository_url = settings.CODE_SUBMISSION_REPOSITORY_NAMESPACE + "/" + user_repository_name ret = model.get_submission_git_repo_or_none(repository_url, vm_id) - if ret and 'repo_url' in ret or (not disraptor_user and not external_owner): + if ret and "repo_url" in ret or (not disraptor_user and not external_owner): return ret docker_data = load_docker_data(task_id, vm_id, cache, force_cache_refresh=False) - docker_registry_user = docker_data['docker_registry_user'] - docker_registry_token = docker_data['docker_registry_token'] + docker_registry_user = docker_data["docker_registry_user"] + docker_registry_token = docker_data["docker_registry_token"] reference_repository = settings.CODE_SUBMISSION_REFERENCE_REPOSITORIES[task_id] - disraptor_description = disraptor_user + '-repo-' + task_id + '-' + vm_id + disraptor_description = disraptor_user + "-repo-" + task_id + "-" + vm_id discourse_api_key = discourse_api_client().generate_api_key(disraptor_user, disraptor_description) - model.create_submission_git_repo(repository_url, vm_id, docker_registry_user, docker_registry_token, - discourse_api_key, reference_repository, external_owner, - disraptor_user, disraptor_description) + model.create_submission_git_repo( + repository_url, + vm_id, + docker_registry_user, + docker_registry_token, + discourse_api_key, + reference_repository, + external_owner, + disraptor_user, + disraptor_description, + ) ret = model.get_submission_git_repo_or_none(repository_url, vm_id, return_object=True) g = get_git_runner_for_software_integration() @@ -237,12 +268,12 @@ def get_submission_git_repo(vm_id, task_id, disraptor_user=None, external_owner= dockerhub_token=docker_registry_token, dockerhub_user=docker_registry_user, tira_client_token=discourse_api_key, - repository_search_prefix='', + repository_search_prefix="", tira_user_name=vm_id, tira_task_id=task_id, tira_code_repository_id=repository_url, tira_client_user=disraptor_user, - private=private + private=private, ) ret.confirmed = True @@ -251,31 +282,31 @@ def get_submission_git_repo(vm_id, task_id, disraptor_user=None, external_owner= return model.get_submission_git_repo_or_none(repository_url, vm_id) -def git_pipeline_is_enabled_for_task(task_id, cache, force_cache_refresh=False): +def git_pipeline_is_enabled_for_task(task_id: str, cache: BaseCache, force_cache_refresh: bool = False): evaluators_for_task = get_evaluators_for_task(task_id, cache, force_cache_refresh) - git_runners_for_task = [i['is_git_runner'] for i in evaluators_for_task] + git_runners_for_task = [i["is_git_runner"] for i in evaluators_for_task] # We enable the docker part only if all evaluators use the docker variant. return len(git_runners_for_task) > 0 and all(i for i in git_runners_for_task) -def get_evaluators_for_task(task_id, cache, force_cache_refresh=False): - cache_key = 'get-evaluators-for-task-' + str(task_id) - ret = cache.get(cache_key) +def get_evaluators_for_task(task_id: str, cache: BaseCache, force_cache_refresh: bool = False): + cache_key = "get-evaluators-for-task-" + str(task_id) + ret = cache.get(cache_key) if ret is not None and not force_cache_refresh: return ret - + datasets = get_datasets_by_task(task_id) - + try: - ret = [get_evaluator(i['dataset_id']) for i in datasets] - except: + ret = [get_evaluator(i["dataset_id"]) for i in datasets] + except Exception: ret = [] logger.info(f"Cache refreshed for key {cache_key} ...") cache.set(cache_key, ret) - - return ret + + return ret def run_is_public_and_unblinded(run_id: str) -> bool: @@ -284,7 +315,7 @@ def run_is_public_and_unblinded(run_id: str) -> bool: """ try: return model.run_is_public_and_unblinded(run_id) - except: + except Exception: pass return False @@ -293,9 +324,9 @@ def run_is_public_and_unblinded(run_id: str) -> bool: def get_docker_software(docker_software_id: int) -> dict: """ Return the docker software as dict with keys: - + {'docker_software_id', 'display_name', 'user_image_name', 'command', 'tira_image_name', 'task_id', vm_id'} - """ + """ return model.get_docker_software(docker_software_id) @@ -311,37 +342,54 @@ def get_docker_software_by_name(name, vm_id, task_id) -> dict: def __formatted_error_message_for_missing_input_run(docker_software, input_run): - if 'input_docker_software_id' in docker_software and docker_software['input_docker_software_id']: - return f"The execution of your software depends on the execution of {docker_software['input_docker_software']}" + \ - f", but {docker_software['input_docker_software']} was never executed on this dataset. " + \ - f"Please execute first {docker_software['input_docker_software']} on your specified dataset. Found the input {input_run}." + if "input_docker_software_id" in docker_software and docker_software["input_docker_software_id"]: + return ( + f"The execution of your software depends on the execution of {docker_software['input_docker_software']}" + + f", but {docker_software['input_docker_software']} was never executed on this dataset. " + + f"Please execute first {docker_software['input_docker_software']} on your specified dataset. Found the" + f" input {input_run}." + ) else: - return f"The execution of your software depends on the upload of a manual run for the group of {docker_software['input_docker_software']}" + \ - f", but {docker_software['input_docker_software']} was not uploaded for this dataset. " + \ - f"Please upload first {docker_software['input_docker_software']} on your specified dataset. Found the input {input_run}." + return ( + "The execution of your software depends on the upload of a manual run for the group of" + f" {docker_software['input_docker_software']}" + + f", but {docker_software['input_docker_software']} was not uploaded for this dataset. " + + f"Please upload first {docker_software['input_docker_software']} on your specified dataset. Found the" + f" input {input_run}." + ) def get_ordered_input_runs_of_software(docker_software, task_id, dataset_id, vm_id): input_runs, missing_input_runs = [], [] - if ('input_docker_software_id' in docker_software and docker_software['input_docker_software_id']) or ('input_upload_id' in docker_software and docker_software['input_upload_id']): - dsid = int(docker_software['input_docker_software_id']) if 'input_docker_software_id' in docker_software and docker_software['input_docker_software_id'] else None - uid = int(docker_software['input_upload_id']) if 'input_upload_id' in docker_software and docker_software['input_upload_id'] else None + if ("input_docker_software_id" in docker_software and docker_software["input_docker_software_id"]) or ( + "input_upload_id" in docker_software and docker_software["input_upload_id"] + ): + dsid = ( + int(docker_software["input_docker_software_id"]) + if "input_docker_software_id" in docker_software and docker_software["input_docker_software_id"] + else None + ) + uid = ( + int(docker_software["input_upload_id"]) + if "input_upload_id" in docker_software and docker_software["input_upload_id"] + else None + ) input_run = latest_output_of_software_on_dataset(task_id, None, None, dsid, dataset_id, uid) - if not input_run or not input_run.get('dataset_id', None) or not input_run.get('run_id', None): + if not input_run or not input_run.get("dataset_id", None) or not input_run.get("run_id", None): missing_input_runs += [__formatted_error_message_for_missing_input_run(docker_software, input_run)] else: - input_run['vm_id'] = model.get_run(run_id=input_run['run_id'], vm_id=None, dataset_id=None)['vm'] + input_run["vm_id"] = model.get_run(run_id=input_run["run_id"], vm_id=None, dataset_id=None)["vm"] input_runs += [input_run] - for (dsid, uid) in model.get_ordered_additional_input_runs_of_software(docker_software): + for dsid, uid in model.get_ordered_additional_input_runs_of_software(docker_software): input_run = latest_output_of_software_on_dataset(task_id, None, None, dsid, dataset_id, uid) - if not input_run or not input_run.get('dataset_id', None) or not input_run.get('run_id', None): + if not input_run or not input_run.get("dataset_id", None) or not input_run.get("run_id", None): missing_input_runs += [__formatted_error_message_for_missing_input_run(docker_software, input_run)] else: - input_run['vm_id'] = model.get_run(run_id=input_run['run_id'], vm_id=None, dataset_id=None)['vm'] + input_run["vm_id"] = model.get_run(run_id=input_run["run_id"], vm_id=None, dataset_id=None)["vm"] input_runs += [input_run] if not input_runs or len(input_runs) < 1: @@ -374,7 +422,7 @@ def get_organizer_list() -> list: def get_vm_list(): - """ load the vm-info file which stores all active vms as such: + """load the vm-info file which stores all active vms as such: \t[\t]\n ... @@ -384,7 +432,7 @@ def get_vm_list(): def get_vms_by_dataset(dataset_id: str) -> list: - """ return a list of vm_id's that have runs on this dataset """ + """return a list of vm_id's that have runs on this dataset""" return model.get_vms_by_dataset(dataset_id) @@ -393,13 +441,13 @@ def get_vm_runs_by_dataset(dataset_id: str, vm_id: str, return_deleted: bool = F def get_vm_runs_by_task(task_id: str, vm_id: str, return_deleted: bool = False) -> list: - """ returns a list of all the runs of a user over all datasets in json (as returned by _load_user_runs) """ + """returns a list of all the runs of a user over all datasets in json (as returned by _load_user_runs)""" return model.get_vm_runs_by_task(task_id, vm_id, return_deleted) def get_vms_with_reviews(dataset_id: str) -> list: - """ Get a list of all vms of a given dataset. VM's are given as a dict: - ``{vm_id: str, "runs": list of runs, unreviewed_count: int, blinded_count: int, published_count: int}`` + """Get a list of all vms of a given dataset. VM's are given as a dict: + ``{vm_id: str, "runs": list of runs, unreviewed_count: int, blinded_count: int, published_count: int}`` """ return model.get_vms_with_reviews(dataset_id) @@ -408,8 +456,8 @@ def get_evaluations_of_run(vm_id, run_id): return model.get_evaluations_of_run(vm_id, run_id) -def get_evaluator(dataset_id, task_id=None): - """ returns a dict containing the evaluator parameters: +def get_evaluator(dataset_id: str, task_id: Optional[str] = None) -> dict[str, Any]: + """returns a dict containing the evaluator parameters: vm_id: id of the master vm running the evaluator host: ip or hostname of the host @@ -420,14 +468,14 @@ def get_evaluator(dataset_id, task_id=None): def get_vm_evaluations_by_dataset(dataset_id, vm_id, only_public_results=True): - """ Return a dict of run_id: evaluation_results for the given vm on the given dataset + """Return a dict of run_id: evaluation_results for the given vm on the given dataset @param only_public_results: only return the measures for published datasets. """ return model.get_vm_evaluations_by_dataset(dataset_id, vm_id, only_public_results) def get_evaluations_with_keys_by_dataset(dataset_id, include_unpublished=False, show_only_unreviewed=False): - """ Get all evaluations and evaluation measures for all vms on the given dataset. + """Get all evaluations and evaluation measures for all vms on the given dataset. @param dataset_id: the dataset_id as used in tira_model @param include_unpublished: If True, the review status (published, blinded) is included in the evaluations. @@ -435,7 +483,9 @@ def get_evaluations_with_keys_by_dataset(dataset_id, include_unpublished=False, :returns: a tuple (ev_keys, evaluation), where ev-keys is a list of keys of the evaluation measure and evaluation a list of evaluations and each evaluation is a dict with {vm_id: str, run_id: str, measures: list} """ - return model.get_evaluations_with_keys_by_dataset(dataset_id, include_unpublished, show_only_unreviewed=show_only_unreviewed) + return model.get_evaluations_with_keys_by_dataset( + dataset_id, include_unpublished, show_only_unreviewed=show_only_unreviewed + ) def get_job_details(task_id, vm_id, job_id): @@ -443,7 +493,7 @@ def get_job_details(task_id, vm_id, job_id): def get_evaluation(run_id: str): - """ Get the evaluation of this run + """Get the evaluation of this run @param run_id: the id of the run @return: a dict with {measure_key: measure_value} @@ -454,6 +504,7 @@ def get_evaluation(run_id: str): def get_count_of_missing_reviews(task_id): return model.get_count_of_missing_reviews(task_id) + def get_count_of_team_submissions(task_id): return model.get_count_of_team_submissions(task_id) @@ -515,10 +566,10 @@ def get_docker_softwares(task_id, vm_id): def get_run_review(dataset_id: str, vm_id: str, run_id: str) -> dict: - """ Returns a review as dict with the following keys: + """Returns a review as dict with the following keys: - {"reviewer", "noErrors", "missingOutput", "extraneousOutput", "invalidOutput", "hasErrorOutput", - "otherErrors", "comment", "hasErrors", "hasWarnings", "hasNoErrors", "published", "blinded"} + {"reviewer", "noErrors", "missingOutput", "extraneousOutput", "invalidOutput", "hasErrorOutput", + "otherErrors", "comment", "hasErrors", "hasWarnings", "hasNoErrors", "published", "blinded"} """ return model.get_run_review(dataset_id, vm_id, run_id) @@ -529,61 +580,76 @@ def get_vm_reviews_by_dataset(dataset_id: str, vm_id: str) -> dict: def get_software(task_id, vm_id, software_id): - """ Returns the software of a vm on a task in json """ + """Returns the software of a vm on a task in json""" return model.get_software(task_id, vm_id, software_id) def get_software_by_task(task_id, vm_id): - """ Returns the software of a vm on a task in json """ + """Returns the software of a vm on a task in json""" return model.get_software_by_task(task_id, vm_id) -def get_users_vms(): - """ Return the users list. """ - return model.get_users_vms() - - -def add_upload(task_id, vm_id, rename_to: str = None): - """" Add empty new upload""" +def add_upload(task_id, vm_id, rename_to: Optional[str] = None): + """ " Add empty new upload""" return model.add_upload(task_id, vm_id, rename_to) def delete_upload(task_id, vm_id, upload_id): return model.delete_upload(task_id, vm_id, upload_id) + def update_upload_metadata(task_id, vm_id, upload_id, display_name, description, paper_link): return model.update_upload_metadata(task_id, vm_id, upload_id, display_name, description, paper_link) def add_uploaded_run(task_id, vm_id, dataset_id, upload_id, uploaded_file): - """ Add the uploaded file as a new result and return it """ + """Add the uploaded file as a new result and return it""" return model.add_uploaded_run(task_id, vm_id, dataset_id, upload_id, uploaded_file) -def update_docker_software_metadata(docker_software_id, display_name, description, paper_link, ir_re_ranker, ir_re_ranking_input): - return model.update_docker_software_metadata(docker_software_id, display_name, description, paper_link, ir_re_ranker, ir_re_ranking_input) +def update_docker_software_metadata( + docker_software_id, display_name, description, paper_link, ir_re_ranker, ir_re_ranking_input +): + return model.update_docker_software_metadata( + docker_software_id, display_name, description, paper_link, ir_re_ranker, ir_re_ranking_input + ) + def add_docker_software_mounts(docker_software, mounts): model.add_docker_software_mounts(docker_software, mounts) -def add_docker_software(task_id, vm_id, image, command, software_inputs=None, submission_git_repo=None, build_environment=None): - """ Add the docker software to the user of the vm and return it """ - image, old_tag = image.split(':') - new_tag = old_tag + '-tira-docker-software-id-' + randomname.get_name().lower() - - tira_image_name = get_git_integration(task_id=task_id).add_new_tag_to_docker_image_repository(image, old_tag, new_tag) +def add_docker_software( + task_id, vm_id, image, command, software_inputs=None, submission_git_repo=None, build_environment=None +): + """Add the docker software to the user of the vm and return it""" + + image, old_tag = image.split(":") + new_tag = old_tag + "-tira-docker-software-id-" + randomname.get_name().lower() + + tira_image_name = get_git_integration(task_id=task_id).add_new_tag_to_docker_image_repository( + image, old_tag, new_tag + ) input_docker_job, input_upload = {}, {} if software_inputs: for software_num, software_input in zip(range(len(software_inputs)), software_inputs): - if type(software_input) != int and 'upload' in software_input: - input_upload[software_num] = software_input.split('-')[-1] + if not isinstance(software_input, int) and "upload" in software_input: + input_upload[software_num] = software_input.split("-")[-1] else: input_docker_job[software_num] = software_input - return model.add_docker_software(task_id, vm_id, image + ':' + old_tag, command, tira_image_name, input_docker_job, - input_upload, submission_git_repo, build_environment) + return model.add_docker_software( + task_id, + vm_id, + image + ":" + old_tag, + command, + tira_image_name, + input_docker_job, + input_upload, + submission_git_repo, + build_environment, + ) def add_registration(data): @@ -592,16 +658,20 @@ def add_registration(data): def all_allowed_task_teams(task_id): task = get_task(task_id) - return set([i.strip() for i in task['allowed_task_teams'].split() if i.strip()]) + return set([i.strip() for i in task["allowed_task_teams"].split() if i.strip()]) def user_is_registered(task_id, request): - from tira.authentication import auth - task = get_task(task_id) + from .authentication import auth + allowed_task_teams = all_allowed_task_teams(task_id) user_vm_ids = [i.strip() for i in auth.get_vm_ids(request) if i.strip()] - return user_vm_ids is not None and len(user_vm_ids) > 0 and (len(allowed_task_teams) == 0 or any([i in allowed_task_teams for i in user_vm_ids])) + return ( + user_vm_ids is not None + and len(user_vm_ids) > 0 + and (len(allowed_task_teams) == 0 or any([i in allowed_task_teams for i in user_vm_ids])) + ) def remaining_team_names(task_id): @@ -614,100 +684,249 @@ def remaining_team_names(task_id): # add methods to add new data to the model # ------------------------------------------------------------ + def add_vm(vm_id: str, user_name: str, initial_user_password: str, ip: str, host: str, ssh: str, rdp: str): - """ Add a new task to the database. + """Add a new task to the database. This will not overwrite existing files and instead do nothing and return false """ return model.add_vm(vm_id, user_name, initial_user_password, ip, host, ssh, rdp) -def create_task(task_id: str, task_name: str, task_description: str, featured: bool, master_vm_id: str, - organizer: str, website: str, require_registration: bool, require_groups: bool, restrict_groups: bool, - help_command: str = None, help_text: str = None, allowed_task_teams: str = None): - """ Add a new task to the database. - CAUTION: This function does not do any sanity checks and will OVERWRITE existing tasks - :returns: The new task as json as returned by get_task - """ - return model.create_task(task_id, task_name, task_description, featured, master_vm_id, organizer, website, - require_registration, require_groups, restrict_groups, help_command, help_text, allowed_task_teams) +def create_task( + task_id: str, + task_name: str, + task_description: str, + featured: bool, + master_vm_id: str, + organizer: str, + website: str, + require_registration: bool, + require_groups: bool, + restrict_groups: bool, + help_command: Optional[str] = None, + help_text: Optional[str] = None, + allowed_task_teams: Optional[str] = None, +): + """Add a new task to the database. + CAUTION: This function does not do any sanity checks and will OVERWRITE existing tasks + :returns: The new task as json as returned by get_task + """ + return model.create_task( + task_id, + task_name, + task_description, + featured, + master_vm_id, + organizer, + website, + require_registration, + require_groups, + restrict_groups, + help_command, + help_text, + allowed_task_teams, + ) -def add_dataset(task_id: str, dataset_id: str, dataset_type: str, dataset_name: str, upload_name: str, irds_docker_image: str=None, irds_import_command: str=None, irds_import_truth_command: str=None) -> list: - """ returns a list of paths of newly created datasets as string. - """ - return model.add_dataset(task_id, dataset_id, dataset_type, dataset_name, upload_name, irds_docker_image= irds_docker_image, irds_import_command=irds_import_command, irds_import_truth_command=irds_import_truth_command) +def add_dataset( + task_id: str, + dataset_id: str, + dataset_type: str, + dataset_name: str, + upload_name: str, + irds_docker_image: Optional[str] = None, + irds_import_command: Optional[str] = None, + irds_import_truth_command: Optional[str] = None, +) -> list: + """returns a list of paths of newly created datasets as string.""" + return model.add_dataset( + task_id, + dataset_id, + dataset_type, + dataset_name, + upload_name, + irds_docker_image=irds_docker_image, + irds_import_command=irds_import_command, + irds_import_truth_command=irds_import_truth_command, + ) def add_software(task_id: str, vm_id: str): return model.add_software(task_id, vm_id) -def add_evaluator(vm_id: str, task_id: str, dataset_id: str, command: str, working_directory: str, measures, - is_git_runner: bool = False, git_runner_image: str = None, git_runner_command: str = None, - git_repository_id: str = None): - ret = model.add_evaluator(vm_id, task_id, dataset_id, command, working_directory, measures, is_git_runner, - git_runner_image, git_runner_command, git_repository_id) +def add_evaluator( + vm_id: str, + task_id: str, + dataset_id: str, + command: str, + working_directory: str, + measures, + is_git_runner: Optional[bool] = False, + git_runner_image: Optional[str] = None, + git_runner_command: Optional[str] = None, + git_repository_id: Optional[str] = None, +): + ret = model.add_evaluator( + vm_id, + task_id, + dataset_id, + command, + working_directory, + measures, + is_git_runner, + git_runner_image, + git_runner_command, + git_repository_id, + ) from django.core.cache import cache + get_evaluators_for_task(task_id=task_id, cache=cache, force_cache_refresh=True) return ret def add_run(dataset_id, vm_id, run_id): - """ Add a new run to the model. Currently, this initiates the caching on the application side of things. """ + """Add a new run to the model. Currently, this initiates the caching on the application side of things.""" return model.add_run(dataset_id, vm_id, run_id) -def update_review(dataset_id, vm_id, run_id, - reviewer_id: str = None, review_date: str = None, has_errors: bool = None, - has_no_errors: bool = None, no_errors: bool = None, missing_output: bool = None, - extraneous_output: bool = None, invalid_output: bool = None, has_error_output: bool = None, - other_errors: bool = None, comment: str = None, published: bool = None, blinded: bool = None, - has_warnings: bool = False): - """ updates the review specified by dataset_id, vm_id, and run_id with the values given in the parameters. - Required Parameters are also required in the function """ - return model.update_review(dataset_id, vm_id, run_id, reviewer_id, review_date, has_errors, has_no_errors, - no_errors, missing_output, extraneous_output, invalid_output, has_error_output, - other_errors, comment, published, blinded, has_warnings) +def update_review( + dataset_id, + vm_id, + run_id, + reviewer_id: Optional[str] = None, + review_date: Optional[str] = None, + has_errors: Optional[bool] = None, + has_no_errors: Optional[bool] = None, + no_errors: Optional[bool] = None, + missing_output: Optional[bool] = None, + extraneous_output: Optional[bool] = None, + invalid_output: Optional[bool] = None, + has_error_output: Optional[bool] = None, + other_errors: Optional[bool] = None, + comment: Optional[str] = None, + published: Optional[bool] = None, + blinded: Optional[bool] = None, + has_warnings: bool = False, +): + """updates the review specified by dataset_id, vm_id, and run_id with the values given in the parameters. + Required Parameters are also required in the function""" + return model.update_review( + dataset_id, + vm_id, + run_id, + reviewer_id, + review_date, + has_errors, + has_no_errors, + no_errors, + missing_output, + extraneous_output, + invalid_output, + has_error_output, + other_errors, + comment, + published, + blinded, + has_warnings, + ) -def update_run(dataset_id, vm_id, run_id, deleted: bool = None): - """ updates the run specified by dataset_id, vm_id, and run_id with the values given in the parameters. - Required Parameters are also required in the function """ +def update_run(dataset_id, vm_id, run_id, deleted: Optional[bool] = None): + """updates the run specified by dataset_id, vm_id, and run_id with the values given in the parameters. + Required Parameters are also required in the function""" return model.update_run(dataset_id, vm_id, run_id, deleted) -def update_software(task_id, vm_id, software_id, command: str = None, working_directory: str = None, - dataset: str = None, run: str = None, deleted: bool = False): - return model.update_software(task_id, vm_id, software_id, command, working_directory, dataset, - run, deleted) - - -def edit_task(task_id: str, task_name: str, task_description: str, featured: bool, master_vm_id: str, organizer: str, website: str, - require_registration: str, require_groups: str, restrict_groups: str, - help_command: str = None, help_text: str = None, allowed_task_teams=None, is_ir_task: bool = False, - irds_re_ranking_image: str = '', irds_re_ranking_command: str = '', - irds_re_ranking_resource: str = ''): - """ Update the task's data """ +def update_software( + task_id, + vm_id, + software_id, + command: Optional[str] = None, + working_directory: Optional[str] = None, + dataset: Optional[str] = None, + run: Optional[str] = None, + deleted: bool = False, +): + return model.update_software(task_id, vm_id, software_id, command, working_directory, dataset, run, deleted) + + +def edit_task( + task_id: str, + task_name: str, + task_description: str, + featured: bool, + master_vm_id: str, + organizer: str, + website: str, + require_registration: str, + require_groups: str, + restrict_groups: str, + help_command: Optional[str] = None, + help_text: Optional[str] = None, + allowed_task_teams: Optional[str] = None, + is_ir_task: bool = False, + irds_re_ranking_image: str = "", + irds_re_ranking_command: str = "", + irds_re_ranking_resource: str = "", +): + """Update the task's data""" if allowed_task_teams: - allowed_task_teams = '\n'.join([slugify(i) for i in allowed_task_teams.split('\n')]) - - return model.edit_task(task_id, task_name, task_description, featured, master_vm_id, organizer, website, - require_registration, require_groups, restrict_groups, help_command, help_text, - allowed_task_teams, is_ir_task, irds_re_ranking_image, irds_re_ranking_command, - irds_re_ranking_resource) + allowed_task_teams = "\n".join([slugify(i) for i in allowed_task_teams.split("\n")]) + + return model.edit_task( + task_id, + task_name, + task_description, + featured, + master_vm_id, + organizer, + website, + require_registration, + require_groups, + restrict_groups, + help_command, + help_text, + allowed_task_teams, + is_ir_task, + irds_re_ranking_image, + irds_re_ranking_command, + irds_re_ranking_resource, + ) -def edit_dataset(task_id: str, dataset_id: str, dataset_name: str, command: str, - working_directory: str, measures: str, upload_name: str, is_confidential: bool = False, - is_git_runner: bool = False, git_runner_image: str = None, git_runner_command: str = None, - git_repository_id: str = None): - """ Update the datasets's data """ - return model.edit_dataset(task_id, dataset_id, dataset_name, command, working_directory, - measures, upload_name, is_confidential, is_git_runner, git_runner_image, - git_runner_command, git_repository_id) +def edit_dataset( + task_id: str, + dataset_id: str, + dataset_name: str, + command: str, + working_directory: str, + measures: str, + upload_name: str, + is_confidential: bool = False, + is_git_runner: bool = False, + git_runner_image: Optional[str] = None, + git_runner_command: Optional[str] = None, + git_repository_id: Optional[str] = None, +): + """Update the datasets's data""" + return model.edit_dataset( + task_id, + dataset_id, + dataset_name, + command, + working_directory, + measures, + upload_name, + is_confidential, + is_git_runner, + git_runner_image, + git_runner_command, + git_repository_id, + ) def delete_docker_software(task_id, vm_id, docker_software_id): @@ -718,8 +937,8 @@ def delete_docker_software(task_id, vm_id, docker_software_id): def delete_software(task_id, vm_id, software_id): - """ Set the Software's deleted flag to true and prune it from the cache. - TODO add option to truly delete the software. """ + """Set the Software's deleted flag to true and prune it from the cache. + TODO add option to truly delete the software.""" return model.delete_software(task_id, vm_id, software_id) @@ -728,7 +947,7 @@ def delete_run(dataset_id, vm_id, run_id): def delete_task(task_id: str): - """ Delete a task from the model """ + """Delete a task from the model""" return model.delete_task(task_id) @@ -751,29 +970,30 @@ def all_git_integrations(self): def get_git_integration(organizer_id=None, task_id=None, dataset_id=None, return_metadata_only=False): from django.core.cache import cache - cache_key = f'tira-model-docker-get_git_integration-{organizer_id}-{task_id}-{dataset_id}' - ret = cache.get(cache_key) + + cache_key = f"tira-model-docker-get_git_integration-{organizer_id}-{task_id}-{dataset_id}" + ret = cache.get(cache_key) if ret is not None: return ret if return_metadata_only else get_git_runner(ret) - + if not organizer_id and not task_id and not dataset_id: - raise ValueError(f'Organizer Id or task_id must be passed. But both are none') + raise ValueError("Organizer Id or task_id must be passed. But both are none") if dataset_id and not organizer_id and not task_id: - task_id = model.get_dataset(dataset_id)['task'] + task_id = model.get_dataset(dataset_id)["task"] if task_id and not organizer_id: - organizer_id = model.get_task(task_id, include_dataset_stats=False)['organizer_id'] + organizer_id = model.get_task(task_id, include_dataset_stats=False)["organizer_id"] if not organizer_id: - raise ValueError(f'Organizer Id can not be None. Got {organizer_id}') - + raise ValueError(f"Organizer Id can not be None. Got {organizer_id}") + organizer = model.get_organizer(organizer_id) - namespace_url = organizer['gitUrlToNamespace'] - - ret = model.get_git_integration(namespace_url, '', return_dict=True, create_if_not_exists=False) + namespace_url = organizer["gitUrlToNamespace"] + + ret = model.get_git_integration(namespace_url, "", return_dict=True, create_if_not_exists=False) cache.set(cache_key, ret) - + return ret if return_metadata_only else get_git_runner(ret) @@ -806,22 +1026,30 @@ def software_exists(task_id: str, vm_id: str, software_id: str) -> bool: return model.software_exists(task_id, vm_id, software_id) -def latest_output_of_software_on_dataset(task_id: str, vm_id: str, software_id: str, docker_software_id: int, dataset_id: str, upload_id: int): +def latest_output_of_software_on_dataset( + task_id: str, + vm_id: str, + software_id: Optional[str], + docker_software_id: Optional[int], + dataset_id: str, + upload_id: Optional[int], +): run_ids = model.all_matching_run_ids(vm_id, dataset_id, task_id, software_id, docker_software_id, upload_id) if run_ids and len(run_ids) > 0: - return { - 'task_id': task_id, - 'vm_id': vm_id, - 'dataset_id': dataset_id, - 'run_id': run_ids[0] - } + return {"task_id": task_id, "vm_id": vm_id, "dataset_id": dataset_id, "run_id": run_ids[0]} else: return None -def create_re_rank_output_on_dataset(task_id: str, vm_id: str, software_id: str, docker_software_id: int, - dataset_id: str, return_none_if_not_exists=False): +def create_re_rank_output_on_dataset( + task_id: str, + vm_id: str, + software_id: str, + docker_software_id: int, + dataset_id: str, + return_none_if_not_exists=False, +): task = get_task(task_id, False) is_ir_task = task.get("is_ir_task", False) @@ -831,7 +1059,9 @@ def create_re_rank_output_on_dataset(task_id: str, vm_id: str, software_id: str, if not is_ir_task or not irds_re_ranking_image or not irds_re_ranking_command or not irds_re_ranking_resource: return None - docker_irds_software_id = str(int(model.get_irds_docker_software_id(task_id, vm_id, software_id, docker_software_id).docker_software_id)) + docker_irds_software_id = str( + int(model.get_irds_docker_software_id(task_id, vm_id, software_id, docker_software_id).docker_software_id) + ) reranked_job = latest_output_of_software_on_dataset(task_id, vm_id, None, docker_irds_software_id, dataset_id, None) if reranked_job: @@ -842,78 +1072,103 @@ def create_re_rank_output_on_dataset(task_id: str, vm_id: str, software_id: str, evaluator = model.get_evaluator(dataset_id) - if not evaluator or 'is_git_runner' not in evaluator or not evaluator[ - 'is_git_runner'] or 'git_runner_image' not in evaluator or not evaluator[ - 'git_runner_image'] or 'git_runner_command' not in evaluator or not evaluator[ - 'git_runner_command'] or 'git_repository_id' not in evaluator or not evaluator['git_repository_id']: + if ( + not evaluator + or "is_git_runner" not in evaluator + or not evaluator["is_git_runner"] + or "git_runner_image" not in evaluator + or not evaluator["git_runner_image"] + or "git_runner_command" not in evaluator + or not evaluator["git_runner_command"] + or "git_repository_id" not in evaluator + or not evaluator["git_repository_id"] + ): return ValueError("The dataset is misconfigured. Docker-execute only available for git-evaluators") input_run = latest_output_of_software_on_dataset(task_id, vm_id, software_id, docker_software_id, dataset_id, None) - path_to_run = Path(settings.TIRA_ROOT) / "data" / "runs" / dataset_id / vm_id / input_run['run_id'] / "output" - rerank_run_id = input_run['run_id'] + '-rerank-' + get_tira_id() + path_to_run = Path(settings.TIRA_ROOT) / "data" / "runs" / dataset_id / vm_id / input_run["run_id"] / "output" + rerank_run_id = input_run["run_id"] + "-rerank-" + get_tira_id() rerank_dir = Path(settings.TIRA_ROOT) / "data" / "runs" / dataset_id / vm_id / rerank_run_id - input_run['vm_id'] = vm_id + input_run["vm_id"] = vm_id output_directory = tempfile.TemporaryDirectory() - raw_command = evaluator['git_runner_command'] - raw_command = raw_command.replace('$outputDir', '/tira-output/current-output') - raw_command = raw_command.replace('$inputDataset', '/tira-input/current-input') - - #docker run --rm -ti --entrypoint sh -v ${PWD}:/data -v /mnt/ceph/tira/data/datasets/training-datasets/ir-benchmarks/clueweb12-trec-misinfo-2019-20240214-training/:/irds-data/:ro docker.io/webis/tira-ir-datasets-starter:0.0.56 - #export TIRA_INPUT_DATASET=/irds-data/ /irds_cli.sh --ir_datasets_id ignored --rerank /data/output-run/ --input_dataset_directory /irds-data/ - command = [ - ['sudo', 'podman', '--storage-opt', 'mount_program=/usr/bin/fuse-overlayfs', 'run', - '-v', f'{output_directory}:/tira-output/current-output', '-v', f'{path_to_run}:/tira-input/current-input:ro', - '--entrypoint', 'sh', evaluator['git_runner_image'], '-c', raw_command] - ] - - print('Input run:', path_to_run) - print('Rerank dir:', rerank_dir) + raw_command = evaluator["git_runner_command"] + raw_command = raw_command.replace("$outputDir", "/tira-output/current-output") + raw_command = raw_command.replace("$inputDataset", "/tira-input/current-input") + + # docker run --rm -ti --entrypoint sh -v ${PWD}:/data -v /mnt/ceph/tira/data/datasets/training-datasets/ir-benchmarks/clueweb12-trec-misinfo-2019-20240214-training/:/irds-data/:ro docker.io/webis/tira-ir-datasets-starter:0.0.56 + # export TIRA_INPUT_DATASET=/irds-data/ /irds_cli.sh --ir_datasets_id ignored --rerank /data/output-run/ --input_dataset_directory /irds-data/ + # command = [ + # [ + # "sudo", + # "podman", + # "--storage-opt", + # "mount_program=/usr/bin/fuse-overlayfs", + # "run", + # "-v", + # f"{output_directory}:/tira-output/current-output", + # "-v", + # f"{path_to_run}:/tira-input/current-input:ro", + # "--entrypoint", + # "sh", + # evaluator["git_runner_image"], + # "-c", + # raw_command, + # ] + # ] + + print("Input run:", path_to_run) + print("Rerank dir:", rerank_dir) rerank_dir.mkdir(parents=True, exist_ok=True) - register_run(dataset_id, vm_id, rerank_run_id, evaluator['evaluator_id']) + register_run(dataset_id, vm_id, rerank_run_id, evaluator["evaluator_id"]) def register_reranking(): rerank_dir.mkdir(parents=True, exist_ok=True) copy_tree(output_directory.name, rerank_dir / "output") - register_run(dataset_id, vm_id, rerank_run_id, evaluator['evaluator_id']) + register_run(dataset_id, vm_id, rerank_run_id, evaluator["evaluator_id"]) - #return run_cmd_as_documented_background_process(command, vm_id, task_id, 'Create Re-ranking file.', + # return run_cmd_as_documented_background_process(command, vm_id, task_id, 'Create Re-ranking file.', # ['Create rerankings.'], register_reranking) def add_input_run_id_to_all_rerank_runs(): from tqdm import tqdm + dataset_to_run_id = {} - for reranking_software in tqdm(model.get_reranking_docker_softwares(), 'Get input_run_ids'): - for dataset in get_datasets_by_task(reranking_software['task_id']): + for reranking_software in tqdm(model.get_reranking_docker_softwares(), "Get input_run_ids"): + for dataset in get_datasets_by_task(reranking_software["task_id"]): ls = latest_output_of_software_on_dataset( - reranking_software['task_id'], - reranking_software['vm_id'], + reranking_software["task_id"], + reranking_software["vm_id"], + None, + reranking_software["docker_software_id"], + dataset["dataset_id"], None, - reranking_software['docker_software_id'], - dataset['dataset_id'], - None ) - + if ls: - if dataset['dataset_id'] in dataset_to_run_id: - raise ValueError('Ambigious...') - - dataset_to_run_id[dataset['dataset_id']] = ls['run_id'] + if dataset["dataset_id"] in dataset_to_run_id: + raise ValueError("Ambigious...") + + dataset_to_run_id[dataset["dataset_id"]] = ls["run_id"] - for i in tqdm(model.get_all_docker_software_rerankers(), 'Update input ids'): - for run in model.get_runs_for_docker_software(i['docker_software_id']): - if 'input_run' not in run or not run['input_run']: - model.update_input_run_id_for_run(run['run_id'], dataset_to_run_id[run['dataset']]) + for i in tqdm(model.get_all_docker_software_rerankers(), "Update input ids"): + for run in model.get_runs_for_docker_software(i["docker_software_id"]): + if "input_run" not in run or not run["input_run"]: + model.update_input_run_id_for_run(run["run_id"], dataset_to_run_id[run["dataset"]]) def get_all_reranking_datasets_for_task(task_id): - return [{'dataset_id': k, 'display_name': v['display_name'], 'original_dataset_id': v['dataset_id']} for k, v in get_all_reranking_datasets().items() if v and v['task_id'] == task_id] + return [ + {"dataset_id": k, "display_name": v["display_name"], "original_dataset_id": v["dataset_id"]} + for k, v in get_all_reranking_datasets().items() + if v and v["task_id"] == task_id + ] def get_all_reranking_datasets(force_cache_refresh=False): - cache_key = 'get_all_reranking_datasets' + cache_key = "get_all_reranking_datasets" ret = cache.get(cache_key) if ret is not None and not force_cache_refresh: return ret @@ -921,16 +1176,20 @@ def get_all_reranking_datasets(force_cache_refresh=False): ret = {} for reranking_software in model.get_reranking_docker_softwares(): - for dataset in get_datasets_by_task(reranking_software['task_id']): + for dataset in get_datasets_by_task(reranking_software["task_id"]): reranking_input = create_re_rank_output_on_dataset( - task_id=reranking_software['task_id'], vm_id=reranking_software['vm_id'], - software_id=None, docker_software_id=reranking_software['docker_software_id'], - dataset_id=dataset['dataset_id'], return_none_if_not_exists = True) + task_id=reranking_software["task_id"], + vm_id=reranking_software["vm_id"], + software_id=None, + docker_software_id=reranking_software["docker_software_id"], + dataset_id=dataset["dataset_id"], + return_none_if_not_exists=True, + ) if reranking_input: - name = 'docker-id-' + str(reranking_software['docker_software_id']) + '-on-' + dataset['dataset_id'] - name = name.replace(' ', '-').replace('\\s', '-') - reranking_input['display_name'] = reranking_software['display_name'] + ' on ' + dataset['dataset_id'] + name = "docker-id-" + str(reranking_software["docker_software_id"]) + "-on-" + dataset["dataset_id"] + name = name.replace(" ", "-").replace("\\s", "-") + reranking_input["display_name"] = reranking_software["display_name"] + " on " + dataset["dataset_id"] ret[name] = reranking_input diff --git a/application/src/tira_app/urls.py b/application/src/tira_app/urls.py new file mode 100644 index 000000000..72cca7c10 --- /dev/null +++ b/application/src/tira_app/urls.py @@ -0,0 +1,252 @@ +from typing import Union + +from django.urls import URLPattern, URLResolver, include, path + +from . import views +from .endpoints import admin_api, data_api, diffir_api, organizer_api, serp_api, vm_api +from .endpoints.misc import endpoints as misc_endpoints +from .endpoints.v1 import endpoints as v1_endpoints + +urlpatterns: list[Union[URLResolver, URLPattern]] = [ + path( + "task//user//dataset//download/.zip", + views.download_rundir, + name="download_rundir", + ), + path( + "data-download/git-repo-template//.zip", + views.download_repo_template, + name="download_repo_template", + ), + path( + "data-download///.zip", + views.download_datadir, + name="download_datadir", + ), + # grpc client endpoints + path("task//vm//add_software/vm", vm_api.software_add, name="software_add"), + path( + "task//vm//add_software/docker", vm_api.docker_software_add, name="docker_software_add" + ), + path("task//vm//add_software/upload", vm_api.add_upload, name="add_upload"), + path( + "task//vm//save_software/docker/", + vm_api.docker_software_save, + name="docker_software_save", + ), + path( + "task//vm//save_software/upload/", + vm_api.upload_save, + name="docker_software_save", + ), + path( + "task//vm//save_software/vm/", + vm_api.software_save, + name="software_save", + ), + path( + "task//vm//delete_software/vm/", + vm_api.software_delete, + name="software_delete", + ), + path( + "task//vm//delete_software/docker/", + vm_api.docker_software_delete, + name="docker_delete", + ), + path("task//vm//run_details/", vm_api.run_details, name="run_details"), + path( + "task//vm//software_details/", + vm_api.software_details, + name="software_details", + ), + path("task//vm//upload//", vm_api.upload, name="upload"), + path("task//vm//upload-delete/", vm_api.delete_upload, name="deleteupload"), + path("grpc//vm_info", vm_api.vm_info, name="vm_info"), + path("grpc//vm_state", vm_api.vm_state, name="vm_state"), + path("grpc//vm_start", vm_api.vm_start, name="vm_start"), + path("grpc//vm_shutdown", vm_api.vm_shutdown, name="vm_shutdown"), + path("grpc//vm_stop", vm_api.vm_stop, name="vm_stop"), + path("grpc//vm_shutdown", vm_api.vm_shutdown, name="vm_shutdown"), + path("grpc//run_abort", vm_api.run_abort, name="run_abort"), + path("grpc//vm_running_evaluations", vm_api.vm_running_evaluations, name="vm_running_evaluations"), + path("grpc//get_running_evaluations", vm_api.get_running_evaluations, name="get_running_evaluations"), + path("grpc///run_execute/vm/", vm_api.run_execute, name="run_execute"), + path( + ( + "grpc///run_execute/docker///" + "/" + ), + vm_api.run_execute_docker_software, + name="run_execute_docker_software", + ), + path("grpc//run_eval//", vm_api.run_eval, name="run_eval"), + path("grpc//run_delete//", vm_api.run_delete, name="run_delete"), + path( + "grpc///stop_docker_software/", + vm_api.stop_docker_software, + name="stop_docker_software", + ), + path("tira-admin/reload/vms", admin_api.admin_reload_vms, name="tira-admin-reload-vms"), + path("tira-admin/reload/datasets", admin_api.admin_reload_datasets, name="tira-admin-reload-datasets"), + path("tira-admin/reload/tasks", admin_api.admin_reload_tasks, name="tira-admin-reload-tasks"), + path("tira-admin/reload-data", admin_api.admin_reload_data, name="tira-admin-reload-data"), + path("tira-admin/reload-runs/", admin_api.admin_reload_runs, name="tira-admin-reload-runs"), + path("tira-admin/create-vm", admin_api.admin_create_vm, name="tira-admin-create-vm"), + path("tira-admin/archive-vm", admin_api.admin_archive_vm, name="tira-admin-archive-vm"), + path("tira-admin/modify-vm", admin_api.admin_modify_vm, name="tira-admin-modify-vm"), + path( + "tira-admin/export-participants/.csv", data_api.export_registrations, name="export_registrations" + ), + path("tira-admin//create-task", admin_api.admin_create_task, name="tira-admin-create-task"), + path("tira-admin/edit-task/", admin_api.admin_edit_task, name="tira-admin-edit-task"), + path("tira-admin/delete-task/", admin_api.admin_delete_task, name="tira-admin-delete-task"), + path("tira-admin/add-dataset/", admin_api.admin_add_dataset, name="tira-admin-add-dataset"), + path( + "tira-admin/upload-dataset///", + admin_api.admin_upload_dataset, + name="tira-admin-upload-dataset", + ), + path( + "tira-admin/import-irds-dataset/", + admin_api.admin_import_ir_dataset, + name="tira-admin-import-irds-dataset", + ), + path("tira-admin/edit-dataset/", admin_api.admin_edit_dataset, name="tira-admin-edit-dataset"), + path( + "tira-admin/delete-dataset/", admin_api.admin_delete_dataset, name="tira-admin-delete-dataset" + ), + path("tira-admin/add-organizer/", admin_api.admin_add_organizer, name="tira-admin-add-organizer"), + path( + "tira-admin/edit-organizer/", admin_api.admin_edit_organizer, name="tira-admin-edit-organizer" + ), + path( + "tira-admin/edit-review///", + admin_api.admin_edit_review, + name="tira-admin-edit-review", + ), + path("tira-admin/create-group/", admin_api.admin_create_group, name="tira-admin-create-group"), + path("publish////", organizer_api.publish, name="publish"), + path("blind////", organizer_api.blind, name="blind"), + path( + "api/evaluations//", + data_api.get_evaluations_by_dataset, + name="get_evaluations_by_dataset", + ), + path( + "api/evaluations-of-vm//", data_api.get_evaluations_by_vm, name="get_evaluations_by_vm" + ), + path("api/evaluation//", data_api.get_evaluation, name="get_evaluation"), + path( + "api/submissions//", + data_api.get_submissions_by_dataset, + name="get_submissions_by_dataset", + ), + path( + "api/docker-softwares-details//", + vm_api.docker_software_details, + name="software_details", + ), + path( + "api/huggingface_model_mounts/vm//", + vm_api.huggingface_model_mounts, + name="huggingface_model_mounts", + ), + path( + "api/upload-group-details///", + vm_api.upload_group_details, + name="upload_id", + ), + path("api/evaluations_of_run//", data_api.get_evaluations_of_run, name="evaluations_of_run"), + path( + "api/configuration-of-evaluation//", + data_api.get_configuration_of_evaluation, + name="get_configuration_of_evaluation", + ), + path("api/list-runs////", data_api.runs, name="runs"), + path("api/ova-list", data_api.get_ova_list, name="get_ova_list"), + path("api/host-list", data_api.get_host_list, name="get_host_list"), + path("api/organizer-list", data_api.get_organizer_list, name="get_organizer_list"), + path("api/task-list", data_api.get_task_list, name="get_task_list"), + path("api/task/", data_api.get_task, name="get_task"), + path( + "api/registration_formular/", data_api.get_registration_formular, name="get_registration_formular" + ), + path("api/dataset/", data_api.get_dataset, name="get_dataset"), + path("api/datasets_by_task/", data_api.get_dataset_for_task, name="get_dataset_for_task"), + path("api/organizer/", data_api.get_organizer, name="get_organizer"), + path("api/role", data_api.get_role, name="get_role"), + path("api/task//user/", data_api.get_user, name="get_user"), + path( + "api/task//user//refresh-docker-images", + data_api.update_docker_images, + name="get_updated_docker_images", + ), + path( + "api/count-of-team-submissions/", + organizer_api.get_count_of_team_submissions, + name="get_count_of_team_submissions", + ), + path( + "api/count-of-missing-reviews/", + organizer_api.get_count_of_missing_reviews, + name="get_count_of_missing_reviews", + ), + path( + "api/task//user//software/running/", + data_api.get_running_software, + name="get_running_software", + ), + path("api/task//public-submissions", data_api.public_submissions, name="public_submissions"), + path( + "api/task//submission-details//", + data_api.public_submission, + name="public_submission", + ), + path("api/review///", data_api.get_review, name="get_review"), + path( + "api/registration/add_registration//", + data_api.add_registration, + name="add_registration", + ), + path( + "api/submissions-for-task///", + data_api.submissions_for_task, + name="submissions_for_task", + ), + path("api/tirex-components", data_api.tirex_components, name="tirex_components"), + path("api/tirex-snippet", data_api.get_snippet_to_run_components, name="get_snippet_to_run_components"), + path( + "api/snippets-for-tirex-components", + data_api.get_snippet_to_run_components, + name="get_snippet_to_run_components", + ), + path("api/re-ranking-datasets/", data_api.reranking_datasets, name="reranking_datasets"), + path("api/submissions-of-user/", data_api.submissions_of_user, name="submissions_of_user"), + path( + "api/add_software_submission_git_repository//", + vm_api.add_software_submission_git_repository, + name="add_software_submission_git_repository", + ), + path( + "api/get_software_submission_git_repository//", + vm_api.get_software_submission_git_repository, + name="get_software_submission_git_repository", + ), + path("api/token/", vm_api.get_token, name="get_token"), + path( + "api/import-submission////", + data_api.import_submission, + name="import_submission", + ), + path("diffir////", diffir_api.diffir, name="diffir"), + path( + "serp//user//dataset///", + serp_api.serp, + name="serp", + ), + *misc_endpoints, + path("v1/", include(v1_endpoints)), +] + +app_name = "tira" diff --git a/application/src/tira/util.py b/application/src/tira_app/util.py similarity index 64% rename from application/src/tira/util.py rename to application/src/tira_app/util.py index dbe99a5d1..455956842 100644 --- a/application/src/tira/util.py +++ b/application/src/tira_app/util.py @@ -1,11 +1,11 @@ +import logging from datetime import datetime as dt from datetime import timezone -import logging +from pathlib import Path -from .proto import TiraClientWebMessages_pb2 as modelpb from django.conf import settings -from pathlib import Path -from tira import tira_model + +from .proto import TiraClientWebMessages_pb2 as modelpb logger = logging.getLogger("tira") @@ -41,16 +41,16 @@ def extract_year_from_dataset_id(dataset_id: str) -> str: def reroute_host(hostname): - """ If we use a local deployment and use a local (mock) host, we need to change all hostnames to localhost. + """If we use a local deployment and use a local (mock) host, we need to change all hostnames to localhost. Otherwise we may contact the real vm-hosts while developing. - """ - return 'localhost' if settings.GRPC_HOST == 'local' else hostname + """ + return "localhost" if settings.GRPC_HOST == "local" else hostname def auto_reviewer(review_path, run_id): - """ Do standard checks for reviews so we do not need to wait for a reviewer to check for: - - failed runs ( - """ + """Do standard checks for reviews so we do not need to wait for a reviewer to check for: + - failed runs ( + """ review_file = review_path / "run-review.bin" review = modelpb.RunReview() @@ -63,7 +63,7 @@ def auto_reviewer(review_path, run_id): raise FileExistsError(f"review file: {review_file} exists but is corrupted with {e}") review.runId = run_id - review.reviewerId = 'tira' + review.reviewerId = "tira" review.reviewDate = str(dt.utcnow()) review.hasWarnings = False review.hasErrors = False @@ -87,7 +87,7 @@ def auto_reviewer(review_path, run_id): except Exception as e: review_path.mkdir(parents=True, exist_ok=True) - review.reviewerId = 'tira' + review.reviewerId = "tira" review.comment = f"Internal Error: {e}. Please contact the support." review.hasErrors = True review.hasNoErrors = False @@ -98,50 +98,61 @@ def auto_reviewer(review_path, run_id): def run_cmd(cmd, ignore_failure=False): import subprocess + exit_code = subprocess.call(cmd) if not ignore_failure and exit_code != 0: - raise ValueError(f'Command {cmd} did exit with return code {exit_code}.') + raise ValueError(f"Command {cmd} did exit with return code {exit_code}.") def link_to_discourse_team(vm_id): - if not vm_id.endswith('-default'): - return 'https://www.tira.io/g/tira_vm_' + vm_id + if not vm_id.endswith("-default"): + return "https://www.tira.io/g/tira_vm_" + vm_id else: - return 'https://www.tira.io/u/' + vm_id.split('-default')[0] + return "https://www.tira.io/u/" + vm_id.split("-default")[0] def register_run(dataset_id, vm_id, run_id, software_id): + # import tira_model has to be done here since it has a side-effect with django and throws + # django.core.exceptions.AppRegistryNotReady: Apps aren't loaded yet. + # if it is imported before django is launched otherwise. + from . import tira_model + path_for_run = Path(settings.TIRA_ROOT) / "data" / "runs" / dataset_id / vm_id / run_id - with open(path_for_run / 'run.prototext', 'w') as f: + with open(path_for_run / "run.prototext", "w") as f: f.write( - f'\nsoftwareId: "{software_id}"\nrunId: "{run_id}"\ninputDataset: "{dataset_id}"\ndownloadable: true\ndeleted: false\n') + f'\nsoftwareId: "{software_id}"\nrunId: "{run_id}"\ninputDataset: "{dataset_id}"\ndownloadable:' + " true\ndeleted: false\n" + ) tira_model.add_run(dataset_id=dataset_id, vm_id=vm_id, run_id=run_id) def __run_cmd_as_documented_background_process(cmds, process_id, descriptions, callback): import datetime - from subprocess import Popen, STDOUT import tempfile + from subprocess import STDOUT, Popen from time import sleep - import tira.model as modeldb + + from . import model as modeldb + with tempfile.NamedTemporaryFile() as file: - file.write(''.encode('utf8')) + file.write("".encode("utf8")) for cmd, description in zip(cmds, descriptions): process = Popen(cmd, stdout=file, stderr=STDOUT, stdin=None, close_fds=True, text=True) while process.poll() is None: sleep(4) - stdout = open(file.name, 'rt').read() + stdout = open(file.name, "rt").read() last_contact = datetime.datetime.now() modeldb.BackendProcess.objects.filter(id=process_id).update(stdout=stdout, last_contact=last_contact) exit_code = process.poll() - stdout = open(file.name, 'rt').read() + stdout = open(file.name, "rt").read() last_contact = datetime.datetime.now() - modeldb.BackendProcess.objects.filter(id=process_id).update(stdout=stdout, exit_code=exit_code, - last_contact=last_contact) + modeldb.BackendProcess.objects.filter(id=process_id).update( + stdout=stdout, exit_code=exit_code, last_contact=last_contact + ) if exit_code != 0: return @@ -156,25 +167,40 @@ def run_cmd_as_documented_background_process(cmd, vm_id, task_id, title, descrip """ import json import threading - import tira.model as modeldb - process_id = modeldb.BackendProcess.objects.create(vm_id=vm_id, task_id=task_id, - cmd=json.dumps(cmd), title=title).id + from . import model as modeldb - thread = threading.Thread(target=__run_cmd_as_documented_background_process, name=f'Process-{process_id}', args=(cmd, process_id, descriptions, callback)) + process_id = modeldb.BackendProcess.objects.create( + vm_id=vm_id, task_id=task_id, cmd=json.dumps(cmd), title=title + ).id + + thread = threading.Thread( + target=__run_cmd_as_documented_background_process, + name=f"Process-{process_id}", + args=(cmd, process_id, descriptions, callback), + ) thread.start() return process_id - def docker_image_details(image): import json import subprocess - ret = subprocess.check_output(['podman', 'image', 'inspect', image]) + + ret = subprocess.check_output(["podman", "image", "inspect", image]) ret = json.loads(ret) if len(ret) != 1: - raise ValueError(f'Could not handle {ret}') + raise ValueError(f"Could not handle {ret}") ret = ret[0] - image_id = ret['Id'] if ':' not in ret['Id'] else ret['Id'].split(':')[1] - return {'image_id': image_id, 'size': ret['Size'], 'virtual_size': ret['VirtualSize']} + image_id = ret["Id"] if ":" not in ret["Id"] else ret["Id"].split(":")[1] + return {"image_id": image_id, "size": ret["Size"], "virtual_size": ret["VirtualSize"]} + + +def str2bool(text: str) -> bool: + """ + Extracts the boolean meaning of the given text. A string of the form "yes", "y", "true", "t", and "1" is + considered to express the boolean value True. The string may be in upper case as well (e.g., TRUE or True) and may + be surrounded by whitespaces. Any value that is not considered true, will return false. + """ + return text.strip().lower() in ("yes", "y", "true", "t", "1") diff --git a/application/src/tira_app/views.py b/application/src/tira_app/views.py new file mode 100644 index 000000000..b200081a5 --- /dev/null +++ b/application/src/tira_app/views.py @@ -0,0 +1,221 @@ +import json +import logging +import os +import tempfile +import zipfile +from http import HTTPStatus +from pathlib import Path + +from django.conf import settings +from django.core.cache import cache +from django.core.serializers.json import DjangoJSONEncoder +from django.http import FileResponse, JsonResponse +from django.shortcuts import render +from django.template.loader import render_to_string +from django.utils.safestring import mark_safe + +from . import tira_model as model +from .authentication import auth +from .checks import check_conditional_permissions, check_permissions, check_resources_exist + +logger = logging.getLogger("tira") +logger.info("Views: Logger active") + + +def add_context(func): + def func_wrapper(request, *args, **kwargs): + uid = auth.get_user_id(request) + vm_id = None + + if args and "vm_id" in args: + vm_id = args["vm_id"] + elif kwargs and "vm_id" in kwargs: + vm_id = kwargs["vm_id"] + + context = { + "include_navigation": False, + "user_id": uid, + "role": auth.get_role(request, user_id=uid, vm_id=vm_id), + "organizer_teams": mark_safe(json.dumps(auth.get_organizer_ids(request))), + } + return func( + request, + context, + *args, + **kwargs, + ) + + return func_wrapper + + +def _add_task_to_context(context, task_id, dataset_id): + datasets = model.get_datasets_by_task(task_id) + + context["datasets"] = json.dumps({ds["dataset_id"]: ds for ds in datasets}, cls=DjangoJSONEncoder) + context["selected_dataset_id"] = dataset_id + context["test_dataset_ids"] = json.dumps( + [ds["dataset_id"] for ds in datasets if ds["is_confidential"]], cls=DjangoJSONEncoder + ) + context["training_dataset_ids"] = json.dumps( + [ds["dataset_id"] for ds in datasets if not ds["is_confidential"]], cls=DjangoJSONEncoder + ) + task = model.get_task(task_id) + context["task_id"] = task["task_id"] + context["task_name"] = json.dumps(task["task_name"], cls=DjangoJSONEncoder) + context["organizer"] = json.dumps(task["organizer"], cls=DjangoJSONEncoder) + context["task_description"] = json.dumps(task["task_description"], cls=DjangoJSONEncoder) + context["web"] = json.dumps(task["web"], cls=DjangoJSONEncoder) + + +def _add_user_vms_to_context(request, context, task_id, include_docker_details=True): + if context["role"] != auth.ROLE_GUEST: + allowed_vms_for_task = model.all_allowed_task_teams(task_id) + vm_id = auth.get_vm_id(request, context["user_id"]) + vm_ids = [] + + if allowed_vms_for_task is None or vm_id in allowed_vms_for_task: + context["vm_id"] = vm_id + + if getattr(auth, "get_vm_ids", None): + vm_ids = [ + i + for i in auth.get_vm_ids(request, context["user_id"]) + if allowed_vms_for_task is None or i in allowed_vms_for_task + ] + + context["user_vms_for_task"] = vm_ids + + docker = ["Your account has no docker registry. Please contact an organizer."] + + if include_docker_details and len(vm_ids) > 0: + docker = model.load_docker_data(task_id, vm_ids[0], cache, force_cache_refresh=False) + + if not docker: + docker = ["Docker is not enabled for this task."] + else: + docker = docker["docker_software_help"].split("\n") + docker = [i for i in docker if "docker login" in i or "docker push" in i or "docker build -t" in i] + docker = [ + i.replace("/my-software:0.0.1", "/") + .replace("", "") + .replace("", "") + .replace("

    ", "") + .replace("

    ", "") + for i in docker + ] + docker = [ + ( + i + if "docker build -t" not in i + else "docker tag " + i.split("docker build -t")[-1].split(" -f ")[0].strip() + ) + for i in docker + ] + + context["docker_documentation"] = docker + + +def zip_run(dataset_id, vm_id, run_id): + """Zip the given run and hand it out for download. Deletes the zip on the server again.""" + path_to_be_zipped = Path(settings.TIRA_ROOT) / "data" / "runs" / dataset_id / vm_id / run_id + zipped = Path(f"{path_to_be_zipped.stem}.zip") + + with zipfile.ZipFile(zipped, "w", zipfile.ZIP_DEFLATED) as zipf: + for f in path_to_be_zipped.rglob("*"): + zipf.write(f, arcname=f.relative_to(path_to_be_zipped.parent)) + + return zipped + + +def zip_runs(vm_id, dataset_ids_and_run_ids, name): + """Zip the given run and hand it out for download. Deletes the zip on the server again.""" + + zipped = Path(f"{name}.zip") + + with zipfile.ZipFile(zipped, "w", zipfile.ZIP_DEFLATED) as zipf: + for dataset_id, run_id in dataset_ids_and_run_ids: + path_to_be_zipped = Path(settings.TIRA_ROOT) / "data" / "runs" / dataset_id / vm_id / run_id + for f in path_to_be_zipped.rglob("*"): + zipf.write(f, arcname=f.relative_to(path_to_be_zipped.parent)) + + return zipped + + +@check_conditional_permissions(public_data_ok=True) +@check_resources_exist("json") +def download_rundir(request, task_id, dataset_id, vm_id, run_id): + """Zip the given run and hand it out for download. Deletes the zip on the server again.""" + zipped = zip_run(dataset_id, vm_id, run_id) + + if zipped.exists(): + response = FileResponse(open(zipped, "rb"), as_attachment=True, filename=f"{run_id}-{zipped.stem}.zip") + os.remove(zipped) + return response + else: + return JsonResponse( + {"status": 1, "reason": f"File does not exist: {zipped}"}, status=HTTPStatus.INTERNAL_SERVER_ERROR + ) + + +@check_conditional_permissions(public_data_ok=True) +@check_resources_exist("json") +def download_input_rundir(request, task_id, dataset_id, vm_id, run_id): + return download_rundir(request, task_id, dataset_id, vm_id, run_id) + + +def download_repo_template(request, task_id, vm_id): + with tempfile.TemporaryDirectory() as tmpdirname: + directory = Path(tmpdirname) / f"git-repo-template-{task_id}" + os.makedirs(directory, exist_ok=True) + os.makedirs(directory / ".github" / "workflows", exist_ok=True) + context = { + "task_id": task_id, + "image": f"registry.webis.de/code-research/tira/tira-user-{vm_id}/github-action-submission:0.0.1", + "input_dataset": model.reference_dataset(task_id), + } + + with (directory / "README.md").open("w") as readme, (directory / "script.py").open("w") as script, ( + directory / "requirements.txt" + ).open("w") as requirements, (directory / "Dockerfile").open("w") as dockerfile, ( + directory / ".github" / "workflows" / "upload-software-to-tira.yml" + ).open( + "w" + ) as ci: + readme.write(render_to_string("tira/git-repo-template/README.md", context=context)) + dockerfile.write(render_to_string("tira/git-repo-template/Dockerfile", context=context)) + requirements.write("argparse") + script.write(render_to_string("tira/git-repo-template/script.py", context=context)) + ci.write(render_to_string("tira/git-repo-template/github-action.yml", context=context)) + + zipped = Path(tmpdirname) / f"{task_id}.zip" + with zipfile.ZipFile(zipped, "w") as zipf: + for f in directory.rglob("*"): + zipf.write(f, arcname=f.relative_to(directory)) + + return FileResponse(open(zipped, "rb"), as_attachment=True, filename=f"git-repo-template-{task_id}.zip") + + +@check_permissions +def download_datadir(request, dataset_type, input_type, dataset_id): + input_type = input_type.lower().replace("input", "") + input_type = "" if len(input_type) < 2 else input_type + task_id = model.get_dataset(dataset_id)["task"] + + path = model.model.data_path / f"{dataset_type}-datasets{input_type}" / task_id / dataset_id + + if not path.exists(): + return JsonResponse( + {"status": 1, "reason": f"File does not exist: {path}"}, status=HTTPStatus.INTERNAL_SERVER_ERROR + ) + + zipped = Path(f"{path.stem}.zip") + with zipfile.ZipFile(zipped, "w") as zipf: + for f in path.rglob("*"): + zipf.write(f, arcname=f.relative_to(path.parent)) + + if zipped.exists(): + response = FileResponse( + open(zipped, "rb"), as_attachment=True, filename=f"{dataset_id}-{dataset_type}{input_type}.zip" + ) + os.remove(zipped) + return response diff --git a/application/test/_utils/mixins.py b/application/test/_utils/mixins.py new file mode 100644 index 000000000..e60304b56 --- /dev/null +++ b/application/test/_utils/mixins.py @@ -0,0 +1,33 @@ +from abc import ABC, abstractmethod +from typing import Optional +from unittest.util import _common_shorten_repr + + +class StrAssertMixins(ABC): + """A mixin class for adding further string related assertions to a test case. + + The inheriting class must implement ``fail`` and ``_formatMessage`` methods, which behave similar to + `unittest.TestCase`. The most straight forward way is to use the mixin together with a `unittest.TestCase`: + + .. code:: python + + from unittest import TestCase + + class MyTest(TestCase, StrAssertMixins): + + def testcase(self): + self.assertStartsWith("foobar", "foo") # Success + self.assertStartsWith("foobar", "bar") # Fail + """ + + @abstractmethod + def _formatMessage(self, msg: Optional[str], standardMsg: str) -> str: ... + + @abstractmethod + def fail(self, msg: Optional[str] = None) -> None: ... + + def assertStartsWith(self, string: str, prefix: str, msg: Optional[str] = None) -> None: + if not string.startswith(prefix): + standardMsg = "not %s.startswith(%s)" % _common_shorten_repr(string, prefix) + msg = self._formatMessage(msg, standardMsg) + self.fail(msg) diff --git a/application/test/api_access_matrix.py b/application/test/api_access_matrix.py index d90c3fd8e..9f85c0dee 100644 --- a/application/test/api_access_matrix.py +++ b/application/test/api_access_matrix.py @@ -1,129 +1,20 @@ -from utils_for_testing import route_to_test, software_public, software_non_public from datetime import datetime -#Used for some tests +from utils_for_testing import route_to_test, software_non_public, software_public + +# Used for some tests now = datetime.now().strftime("%Y%m%d") -ADMIN = 'tira_reviewer' -GUEST = '' -PARTICIPANT = 'tira_vm_PARTICIPANT-FOR-TEST-1' -ORGANIZER = 'tira_org_EXAMPLE-ORGANIZER' -ORGANIZER_WRONG_TASK = 'tira_org_ORGANIZER-FOR-OTHER-TASK' +ADMIN = "tira_reviewer" +GUEST = "" +PARTICIPANT = "tira_vm_PARTICIPANT-FOR-TEST-1" +ORGANIZER = "tira_org_EXAMPLE-ORGANIZER" +ORGANIZER_WRONG_TASK = "tira_org_ORGANIZER-FOR-OTHER-TASK" API_ACCESS_MATRIX = [ route_to_test( - url_pattern='', - params=None, - group_to_expected_status_code={ - ADMIN: 200, - GUEST: 200, - PARTICIPANT: 200, - ORGANIZER: 200, - ORGANIZER_WRONG_TASK: 200, - }, - ), - route_to_test( - url_pattern='task', - params=None, - group_to_expected_status_code={ - ADMIN: 200, - GUEST: 200, - PARTICIPANT: 200, - ORGANIZER: 200, - ORGANIZER_WRONG_TASK: 200, - }, - ), - route_to_test( - url_pattern=r'^frontend-vuetify/.*', - params=None, - group_to_expected_status_code={ - ADMIN: 200, - GUEST: 200, - PARTICIPANT: 200, - ORGANIZER: 200, - ORGANIZER_WRONG_TASK: 200, - }, - ), - route_to_test( - url_pattern=r'^tirex/.*', - params=None, - group_to_expected_status_code={ - ADMIN: 200, - GUEST: 200, - PARTICIPANT: 200, - ORGANIZER: 200, - ORGANIZER_WRONG_TASK: 200, - }, - ), - route_to_test( - url_pattern=r'^submit/.*', - params=None, - group_to_expected_status_code={ - ADMIN: 200, - GUEST: 200, - PARTICIPANT: 200, - ORGANIZER: 200, - ORGANIZER_WRONG_TASK: 200, - }, - ), - route_to_test( - url_pattern=r'^task-overview/.*', - params=None, - group_to_expected_status_code={ - ADMIN: 200, - GUEST: 200, - PARTICIPANT: 200, - ORGANIZER: 200, - ORGANIZER_WRONG_TASK: 200, - }, - ), - route_to_test( - url_pattern='task/', - params=None, - group_to_expected_status_code={ - ADMIN: 200, - GUEST: 200, - PARTICIPANT: 200, - ORGANIZER: 200, - ORGANIZER_WRONG_TASK: 200, - }, - ), - route_to_test( - url_pattern='task//', - params=None, - group_to_expected_status_code={ - ADMIN: 200, - GUEST: 200, - PARTICIPANT: 200, - ORGANIZER: 200, - ORGANIZER_WRONG_TASK: 200, - }, - ), - route_to_test( - url_pattern='task//', - params=None, - group_to_expected_status_code={ - ADMIN: 200, - GUEST: 200, - PARTICIPANT: 200, - ORGANIZER: 200, - ORGANIZER_WRONG_TASK: 200, - }, - ), - route_to_test( - url_pattern='tasks', - params=None, - group_to_expected_status_code={ - ADMIN: 200, - GUEST: 200, - PARTICIPANT: 200, - ORGANIZER: 200, - ORGANIZER_WRONG_TASK: 200, - }, - ), - route_to_test( - url_pattern='api/tirex-components', + url_pattern="api/tirex-components", params=None, group_to_expected_status_code={ ADMIN: 200, @@ -134,7 +25,7 @@ }, ), route_to_test( - url_pattern='api/tirex-snippet', + url_pattern="api/tirex-snippet", params=None, group_to_expected_status_code={ ADMIN: 200, @@ -145,7 +36,7 @@ }, ), route_to_test( - url_pattern='api/snippets-for-tirex-components', + url_pattern="api/snippets-for-tirex-components", params=None, group_to_expected_status_code={ ADMIN: 200, @@ -156,8 +47,8 @@ }, ), route_to_test( - url_pattern='api/list-runs////', - params={'task_id': '1', 'dataset_id': 1, 'vm_id': '1', 'software_id': '1'}, + url_pattern="api/list-runs////", + params={"task_id": "1", "dataset_id": 1, "vm_id": "1", "software_id": "1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -167,8 +58,8 @@ }, ), route_to_test( - url_pattern='api/re-ranking-datasets/', - params={'task_id': '1'}, + url_pattern="api/re-ranking-datasets/", + params={"task_id": "1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -178,8 +69,8 @@ }, ), route_to_test( - url_pattern='api/task//public-submissions', - params={'task_id': '1'}, + url_pattern="api/task//public-submissions", + params={"task_id": "1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -189,8 +80,8 @@ }, ), route_to_test( - url_pattern='api/task//submission-details//', - params={'task_id': '1', 'user_id': '2', 'display_name': '3'}, + url_pattern="api/task//submission-details//", + params={"task_id": "1", "user_id": "2", "display_name": "3"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -200,8 +91,8 @@ }, ), route_to_test( - url_pattern='api/submissions-of-user/', - params={'vm_id': 'does-not-exist'}, + url_pattern="api/submissions-of-user/", + params={"vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -211,8 +102,8 @@ }, ), route_to_test( - url_pattern='api/import-submission////', - params={'vm_id': 'does-not-exist', 'task_id': 'does-not-exist', 'submission_type': '1', 's_id': '1'}, + url_pattern="api/import-submission////", + params={"vm_id": "does-not-exist", "task_id": "does-not-exist", "submission_type": "1", "s_id": "1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -222,8 +113,13 @@ }, ), route_to_test( - url_pattern='task//user//dataset//download/.zip', - params={'task_id': 'shared-task-1', 'dataset_id': f'dataset-1-{now}-training', 'vm_id': 'example_participant', 'run_id': 'run-1-example_participant'}, + url_pattern="task//user//dataset//download/.zip", + params={ + "task_id": "shared-task-1", + "dataset_id": f"dataset-1-{now}-training", + "vm_id": "example_participant", + "run_id": "run-1-example_participant", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -232,10 +128,9 @@ ORGANIZER_WRONG_TASK: 302, }, ), - route_to_test( - url_pattern='data-download/git-repo-template//.zip', - params={'task_id': f'does-not-exist', 'vm_id': 'does-not-exist'}, + url_pattern="data-download/git-repo-template//.zip", + params={"task_id": "does-not-exist", "vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -245,8 +140,8 @@ }, ), route_to_test( - url_pattern='data-download///.zip', - params={'dataset_type': 'training', 'dataset_id': f'dataset-1-{now}-training', 'input_type': 'input-'}, + url_pattern="data-download///.zip", + params={"dataset_type": "training", "dataset_id": f"dataset-1-{now}-training", "input_type": "input-"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -255,10 +150,13 @@ ORGANIZER_WRONG_TASK: 200, }, ), - route_to_test( - url_pattern='data-download///.zip', - params={'dataset_type': 'training', 'dataset_id': f'dataset-not-published-{now}-training', 'input_type': 'input-'}, + url_pattern="data-download///.zip", + params={ + "dataset_type": "training", + "dataset_id": f"dataset-not-published-{now}-training", + "input_type": "input-", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -268,8 +166,8 @@ }, ), route_to_test( - url_pattern='data-download///.zip', - params={'dataset_type': 'training', 'dataset_id': f'dataset-2-{now}-test', 'input_type': 'input-'}, + url_pattern="data-download///.zip", + params={"dataset_type": "training", "dataset_id": f"dataset-2-{now}-test", "input_type": "input-"}, group_to_expected_status_code={ ADMIN: 500, GUEST: 405, @@ -279,8 +177,12 @@ }, ), route_to_test( - url_pattern='data-download///.zip', - params={'dataset_type': 'training', 'dataset_id': f'dataset-of-organizer-{now}-training', 'input_type': 'input-'}, + url_pattern="data-download///.zip", + params={ + "dataset_type": "training", + "dataset_id": f"dataset-of-organizer-{now}-training", + "input_type": "input-", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -290,8 +192,8 @@ }, ), route_to_test( - url_pattern='tira-admin/export-participants/.csv', - params={'task_id': 'task-of-organizer-1'}, + url_pattern="tira-admin/export-participants/.csv", + params={"task_id": "task-of-organizer-1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -301,8 +203,8 @@ }, ), route_to_test( - url_pattern='api/count-of-team-submissions/', - params={'task_id': 'task-of-organizer-1'}, + url_pattern="api/count-of-team-submissions/", + params={"task_id": "task-of-organizer-1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -312,8 +214,8 @@ }, ), route_to_test( - url_pattern='api/token/', - params={'vm_id': PARTICIPANT.split('_')[-1]}, + url_pattern="api/token/", + params={"vm_id": PARTICIPANT.split("_")[-1]}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -323,8 +225,8 @@ }, ), route_to_test( - url_pattern='api/add_software_submission_git_repository//', - params={'task_id': 'task-of-organizer-1', 'vm_id': PARTICIPANT.split('_')[-1]}, + url_pattern="api/add_software_submission_git_repository//", + params={"task_id": "task-of-organizer-1", "vm_id": PARTICIPANT.split("_")[-1]}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -334,8 +236,8 @@ }, ), route_to_test( - url_pattern='api/add_software_submission_git_repository//', - params={'task_id': 'task-of-organizer-1', 'vm_id': 'does-not-exist'}, + url_pattern="api/add_software_submission_git_repository//", + params={"task_id": "task-of-organizer-1", "vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -345,8 +247,8 @@ }, ), route_to_test( - url_pattern='api/add_software_submission_git_repository//', - params={'task_id': 'does-not-exist', 'vm_id': 'does-not-exist'}, + url_pattern="api/add_software_submission_git_repository//", + params={"task_id": "does-not-exist", "vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -356,8 +258,8 @@ }, ), route_to_test( - url_pattern='api/get_software_submission_git_repository//', - params={'task_id': 'task-of-organizer-1', 'vm_id': PARTICIPANT.split('_')[-1]}, + url_pattern="api/get_software_submission_git_repository//", + params={"task_id": "task-of-organizer-1", "vm_id": PARTICIPANT.split("_")[-1]}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -367,8 +269,8 @@ }, ), route_to_test( - url_pattern='api/get_software_submission_git_repository//', - params={'task_id': 'task-of-organizer-1', 'vm_id': 'does-not-exist'}, + url_pattern="api/get_software_submission_git_repository//", + params={"task_id": "task-of-organizer-1", "vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -378,8 +280,8 @@ }, ), route_to_test( - url_pattern='api/get_software_submission_git_repository//', - params={'task_id': 'does-not-exist', 'vm_id': 'does-not-exist'}, + url_pattern="api/get_software_submission_git_repository//", + params={"task_id": "does-not-exist", "vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -388,11 +290,9 @@ ORGANIZER_WRONG_TASK: 302, }, ), - - route_to_test( - url_pattern='tira-admin/export-participants/.csv', - params={'task_id': 'shared-task-1'}, + url_pattern="tira-admin/export-participants/.csv", + params={"task_id": "shared-task-1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -402,8 +302,8 @@ }, ), route_to_test( - url_pattern='api/configuration-of-evaluation//', - params={'task_id': 'shared-task-1', 'dataset_id': f'dataset-1-{now}-training'}, + url_pattern="api/configuration-of-evaluation//", + params={"task_id": "shared-task-1", "dataset_id": f"dataset-1-{now}-training"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -413,8 +313,8 @@ }, ), route_to_test( - url_pattern='diffir////', - params={'task_id': 'shared-task-1', 'topk': 10, 'run_id_1': '1', 'run_id_2': '2'}, + url_pattern="diffir////", + params={"task_id": "shared-task-1", "topk": 10, "run_id_1": "1", "run_id_2": "2"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -424,8 +324,13 @@ }, ), route_to_test( - url_pattern='task//user//dataset//download/.zip', - params={'task_id': 'shared-task-1', 'dataset_id': f'dataset-1-{now}-training', 'vm_id': PARTICIPANT.split('_')[-1], 'run_id': 'run-1-example_participant'}, + url_pattern="task//user//dataset//download/.zip", + params={ + "task_id": "shared-task-1", + "dataset_id": f"dataset-1-{now}-training", + "vm_id": PARTICIPANT.split("_")[-1], + "run_id": "run-1-example_participant", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -435,8 +340,8 @@ }, ), route_to_test( - url_pattern='api/huggingface_model_mounts/vm//', - params={'hf_model': 'does-not-exist', 'vm_id': PARTICIPANT.split('_')[-1]}, + url_pattern="api/huggingface_model_mounts/vm//", + params={"hf_model": "does-not-exist", "vm_id": PARTICIPANT.split("_")[-1]}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -446,8 +351,8 @@ }, ), route_to_test( - url_pattern='api/huggingface_model_mounts/vm//', - params={'hf_model': 'does-not-exist', 'vm_id': 'does-not-exist'}, + url_pattern="api/huggingface_model_mounts/vm//", + params={"hf_model": "does-not-exist", "vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -457,8 +362,13 @@ }, ), route_to_test( - url_pattern='task//user//dataset//download/.zip', - params={'task_id': 'shared-task-1', 'dataset_id': f'dataset-2-{now}-test', 'vm_id': 'example_participant', 'run_id': 'run-1-example_participant'}, + url_pattern="task//user//dataset//download/.zip", + params={ + "task_id": "shared-task-1", + "dataset_id": f"dataset-2-{now}-test", + "vm_id": "example_participant", + "run_id": "run-1-example_participant", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -468,8 +378,13 @@ }, ), route_to_test( - url_pattern='task//user//dataset//download/.zip', - params={'task_id': 'shared-task-1', 'dataset_id': f'dataset-2-{now}-test', 'vm_id': PARTICIPANT.split('_')[-1], 'run_id': 'run-1-example_participant'}, + url_pattern="task//user//dataset//download/.zip", + params={ + "task_id": "shared-task-1", + "dataset_id": f"dataset-2-{now}-test", + "vm_id": PARTICIPANT.split("_")[-1], + "run_id": "run-1-example_participant", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -479,8 +394,14 @@ }, ), route_to_test( - url_pattern='serp//user//dataset///', - params={'task_id': 'shared-task-1', 'topk': 10, 'dataset_id': f'dataset-1-{now}-training', 'vm_id': PARTICIPANT.split('_')[-1], 'run_id': 'run-1-example_participant'}, + url_pattern="serp//user//dataset///", + params={ + "task_id": "shared-task-1", + "topk": 10, + "dataset_id": f"dataset-1-{now}-training", + "vm_id": PARTICIPANT.split("_")[-1], + "run_id": "run-1-example_participant", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -490,9 +411,14 @@ }, ), route_to_test( - url_pattern='serp//user//dataset///', - params={'task_id': 'shared-task-1', 'topk': 10, 'dataset_id': f'dataset-1-{now}-training', - 'vm_id': 'participant-1', 'run_id': 'run-1-participant-1'}, + url_pattern="serp//user//dataset///", + params={ + "task_id": "shared-task-1", + "topk": 10, + "dataset_id": f"dataset-1-{now}-training", + "vm_id": "participant-1", + "run_id": "run-1-participant-1", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -502,20 +428,14 @@ }, ), route_to_test( - url_pattern='serp//user//dataset///', - params={'task_id': 'shared-task-1', 'dataset_id': f'dataset-2-{now}-test', 'topk': 10, 'vm_id': 'example_participant', 'run_id': 'run-1-example_participant'}, - group_to_expected_status_code={ - ADMIN: 200, - GUEST: 405, - PARTICIPANT: 405, - ORGANIZER: 405, - ORGANIZER_WRONG_TASK: 405, + url_pattern="serp//user//dataset///", + params={ + "task_id": "shared-task-1", + "dataset_id": f"dataset-2-{now}-test", + "topk": 10, + "vm_id": "example_participant", + "run_id": "run-1-example_participant", }, - ), - route_to_test( - url_pattern='serp//user//dataset///', - params={'task_id': 'shared-task-1', 'dataset_id': f'dataset-2-{now}-test', 'topk': 10, 'vm_id': PARTICIPANT.split('_')[-1], - 'run_id': 'run-1-example_participant'}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -525,19 +445,14 @@ }, ), route_to_test( - url_pattern='api/count-of-missing-reviews/', - params={'task_id': 'shared-task-1'}, - group_to_expected_status_code={ - ADMIN: 200, - GUEST: 405, - PARTICIPANT: 405, - ORGANIZER: 405, - ORGANIZER_WRONG_TASK: 405, + url_pattern="serp//user//dataset///", + params={ + "task_id": "shared-task-1", + "dataset_id": f"dataset-2-{now}-test", + "topk": 10, + "vm_id": PARTICIPANT.split("_")[-1], + "run_id": "run-1-example_participant", }, - ), - route_to_test( - url_pattern='background_jobs//', - params={'task_id': 'does-not-exist', 'job_id': -1}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -547,41 +462,19 @@ }, ), route_to_test( - url_pattern='background_jobs//', - params={'task_id': 'task-of-organizer-1', 'job_id': -1}, + url_pattern="api/count-of-missing-reviews/", + params={"task_id": "shared-task-1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, PARTICIPANT: 405, - ORGANIZER: 200, + ORGANIZER: 405, ORGANIZER_WRONG_TASK: 405, }, ), route_to_test( - url_pattern='login', - params={}, - group_to_expected_status_code={ - ADMIN: 200, - GUEST: 200, - PARTICIPANT: 200, - ORGANIZER: 200, - ORGANIZER_WRONG_TASK: 200, - }, - ), - route_to_test( - url_pattern='logout', - params={}, - group_to_expected_status_code={ - ADMIN: 302, - GUEST: 302, - PARTICIPANT: 302, - ORGANIZER: 302, - ORGANIZER_WRONG_TASK: 302, - }, - ), - route_to_test( - url_pattern='task//vm//add_software/vm', - params={'task_id': 'shared-task-1', 'vm_id': 'example_participant'}, + url_pattern="task//vm//add_software/vm", + params={"task_id": "shared-task-1", "vm_id": "example_participant"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -591,8 +484,8 @@ }, ), route_to_test( - url_pattern='task//vm//add_software/vm', - params={'task_id': 'shared-task-1', 'vm_id': PARTICIPANT.split('_')[-1]}, + url_pattern="task//vm//add_software/vm", + params={"task_id": "shared-task-1", "vm_id": PARTICIPANT.split("_")[-1]}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -602,8 +495,8 @@ }, ), route_to_test( - url_pattern='task//vm//add_software/docker', - params={'task_id': 'shared-task-1', 'vm_id': 'example_participant'}, + url_pattern="task//vm//add_software/docker", + params={"task_id": "shared-task-1", "vm_id": "example_participant"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -613,8 +506,8 @@ }, ), route_to_test( - url_pattern='task//vm//add_software/docker', - params={'task_id': 'task-of-organizer-1', 'vm_id': 'example_participant'}, + url_pattern="task//vm//add_software/docker", + params={"task_id": "task-of-organizer-1", "vm_id": "example_participant"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -624,8 +517,8 @@ }, ), route_to_test( - url_pattern='task//vm//add_software/docker', - params={'task_id': 'shared-task-1', 'vm_id': PARTICIPANT.split('_')[-1]}, + url_pattern="task//vm//add_software/docker", + params={"task_id": "shared-task-1", "vm_id": PARTICIPANT.split("_")[-1]}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -635,8 +528,8 @@ }, ), route_to_test( - url_pattern='task//vm//software_details/', - params={'task_id': 'shared-task-1', 'vm_id': 'example_participant', 'software_name': 'does-not-exist'}, + url_pattern="task//vm//software_details/", + params={"task_id": "shared-task-1", "vm_id": "example_participant", "software_name": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -646,8 +539,8 @@ }, ), route_to_test( - url_pattern='task//vm//software_details/', - params={'task_id': 'shared-task-1', 'vm_id': 'example_participant', 'software_name': software_non_public}, + url_pattern="task//vm//software_details/", + params={"task_id": "shared-task-1", "vm_id": "example_participant", "software_name": software_non_public}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -657,8 +550,8 @@ }, ), route_to_test( - url_pattern='task//vm//software_details/', - params={'task_id': 'shared-task-1', 'vm_id': 'PARTICIPANT-FOR-TEST-1', 'software_name': software_public}, + url_pattern="task//vm//software_details/", + params={"task_id": "shared-task-1", "vm_id": "PARTICIPANT-FOR-TEST-1", "software_name": software_public}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -668,8 +561,8 @@ }, ), route_to_test( - url_pattern='task//vm//software_details/', - params={'task_id': 'shared-task-1', 'vm_id': 'PARTICIPANT-FOR-TEST-1', 'software_name': software_non_public}, + url_pattern="task//vm//software_details/", + params={"task_id": "shared-task-1", "vm_id": "PARTICIPANT-FOR-TEST-1", "software_name": software_non_public}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -679,8 +572,8 @@ }, ), route_to_test( - url_pattern='api/upload-group-details///', - params={'task_id': 'shared-task-1', 'vm_id': 'example_participant', 'upload_id': '10'}, + url_pattern="api/upload-group-details///", + params={"task_id": "shared-task-1", "vm_id": "example_participant", "upload_id": "10"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -690,8 +583,8 @@ }, ), route_to_test( - url_pattern='task//vm//add_software/upload', - params={'task_id': 'shared-task-1', 'vm_id': 'example_participant'}, + url_pattern="task//vm//add_software/upload", + params={"task_id": "shared-task-1", "vm_id": "example_participant"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -701,8 +594,8 @@ }, ), route_to_test( - url_pattern='task//vm//add_software/upload', - params={'task_id': 'shared-task-1', 'vm_id': PARTICIPANT.split('_')[-1]}, + url_pattern="task//vm//add_software/upload", + params={"task_id": "shared-task-1", "vm_id": PARTICIPANT.split("_")[-1]}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -712,8 +605,8 @@ }, ), route_to_test( - url_pattern='task//vm//upload-delete/', - params={'task_id': 'shared-task-1', 'vm_id': 'does-not-exist', 'upload_id': -1}, + url_pattern="task//vm//upload-delete/", + params={"task_id": "shared-task-1", "vm_id": "does-not-exist", "upload_id": -1}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -723,8 +616,8 @@ }, ), route_to_test( - url_pattern='task//vm//upload-delete/', - params={'task_id': 'shared-task-1', 'vm_id': PARTICIPANT.split('_')[-1], 'upload_id': -1}, + url_pattern="task//vm//upload-delete/", + params={"task_id": "shared-task-1", "vm_id": PARTICIPANT.split("_")[-1], "upload_id": -1}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -734,8 +627,8 @@ }, ), route_to_test( - url_pattern='task//vm//save_software/upload/', - params={'task_id': 'shared-task-1', 'vm_id': 'does-not-exist', 'upload_id': -1}, + url_pattern="task//vm//save_software/upload/", + params={"task_id": "shared-task-1", "vm_id": "does-not-exist", "upload_id": -1}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -745,8 +638,8 @@ }, ), route_to_test( - url_pattern='task//vm//save_software/upload/', - params={'task_id': 'shared-task-1', 'vm_id': PARTICIPANT.split('_')[-1], 'upload_id': -1}, + url_pattern="task//vm//save_software/upload/", + params={"task_id": "shared-task-1", "vm_id": PARTICIPANT.split("_")[-1], "upload_id": -1}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -756,8 +649,8 @@ }, ), route_to_test( - url_pattern='task//vm//save_software/docker/', - params={'task_id': 'shared-task-1', 'vm_id': 'example_participant', 'docker_software_id': 0}, + url_pattern="task//vm//save_software/docker/", + params={"task_id": "shared-task-1", "vm_id": "example_participant", "docker_software_id": 0}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -767,8 +660,8 @@ }, ), route_to_test( - url_pattern='task//vm//save_software/docker/', - params={'task_id': 'shared-task-1', 'vm_id': PARTICIPANT.split('_')[-1], 'docker_software_id': 0}, + url_pattern="task//vm//save_software/docker/", + params={"task_id": "shared-task-1", "vm_id": PARTICIPANT.split("_")[-1], "docker_software_id": 0}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -778,8 +671,8 @@ }, ), route_to_test( - url_pattern='task//vm//save_software/vm/', - params={'task_id': 'shared-task-1', 'vm_id': 'example_participant', 'software_id': 0}, + url_pattern="task//vm//save_software/vm/", + params={"task_id": "shared-task-1", "vm_id": "example_participant", "software_id": 0}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -789,8 +682,8 @@ }, ), route_to_test( - url_pattern='task//vm//save_software/vm/', - params={'task_id': 'shared-task-1', 'vm_id': PARTICIPANT.split('_')[-1], 'software_id': 0}, + url_pattern="task//vm//save_software/vm/", + params={"task_id": "shared-task-1", "vm_id": PARTICIPANT.split("_")[-1], "software_id": 0}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -800,8 +693,8 @@ }, ), route_to_test( - url_pattern='task//vm//save_software/vm/', - params={'task_id': 'task-of-organizer-1', 'vm_id': 'example_participant', 'software_id': 0}, + url_pattern="task//vm//save_software/vm/", + params={"task_id": "task-of-organizer-1", "vm_id": "example_participant", "software_id": 0}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -811,8 +704,8 @@ }, ), route_to_test( - url_pattern='task//vm//delete_software/vm/', - params={'task_id': 'shared-task-1', 'vm_id': 'example_participant', 'software_id': 0}, + url_pattern="task//vm//delete_software/vm/", + params={"task_id": "shared-task-1", "vm_id": "example_participant", "software_id": 0}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -822,8 +715,8 @@ }, ), route_to_test( - url_pattern='task//vm//delete_software/vm/', - params={'task_id': 'shared-task-1', 'vm_id': PARTICIPANT.split('_')[-1], 'software_id': 0}, + url_pattern="task//vm//delete_software/vm/", + params={"task_id": "shared-task-1", "vm_id": PARTICIPANT.split("_")[-1], "software_id": 0}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -832,11 +725,14 @@ ORGANIZER_WRONG_TASK: 302, }, ), - - route_to_test( - url_pattern='task//vm//delete_software/docker/', - params={'task_id': 'shared-task-1', 'vm_id': 'example_participant', 'software_id': 0}, + url_pattern="task//vm//delete_software/docker/", + params={ + "task_id": "shared-task-1", + "vm_id": "example_participant", + "software_id": 0, + "docker_software_id": "", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -846,8 +742,13 @@ }, ), route_to_test( - url_pattern='task//vm//delete_software/docker/', - params={'task_id': 'shared-task-1', 'vm_id': PARTICIPANT.split('_')[-1], 'software_id': 0}, + url_pattern="task//vm//delete_software/docker/", + params={ + "task_id": "shared-task-1", + "vm_id": PARTICIPANT.split("_")[-1], + "software_id": 0, + "docker_software_id": "", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -857,8 +758,13 @@ }, ), route_to_test( - url_pattern='task//vm//delete_software/docker/', - params={'task_id': 'task-of-organizer-1', 'vm_id': 'example_participant', 'software_id': 0}, + url_pattern="task//vm//delete_software/docker/", + params={ + "task_id": "task-of-organizer-1", + "vm_id": "example_participant", + "software_id": 0, + "docker_software_id": "", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -868,8 +774,8 @@ }, ), route_to_test( - url_pattern='task//vm//run_details/', - params={'task_id': 'shared-task-1', 'vm_id': 'example_participant', 'run_id': 'run-1-example_participant'}, + url_pattern="task//vm//run_details/", + params={"task_id": "shared-task-1", "vm_id": "example_participant", "run_id": "run-1-example_participant"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -879,8 +785,8 @@ }, ), route_to_test( - url_pattern='task//vm//run_details/', - params={'task_id': 'shared-task-1', 'vm_id': 'participant-1', 'run_id': 'run-9-participant-1'}, + url_pattern="task//vm//run_details/", + params={"task_id": "shared-task-1", "vm_id": "participant-1", "run_id": "run-9-participant-1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -890,8 +796,8 @@ }, ), route_to_test( - url_pattern='task//vm//software_details/', - params={'task_id': 'shared-task-1', 'vm_id': 'example_participant', 'software_name': 'does-not-exist'}, + url_pattern="task//vm//software_details/", + params={"task_id": "shared-task-1", "vm_id": "example_participant", "software_name": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -901,8 +807,8 @@ }, ), route_to_test( - url_pattern='task//vm//upload//', - params={'task_id': 'shared-task-1', 'vm_id': 'example_participant', 'dataset_id': 0, 'upload_id': -1}, + url_pattern="task//vm//upload//", + params={"task_id": "shared-task-1", "vm_id": "example_participant", "dataset_id": 0, "upload_id": -1}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -912,8 +818,8 @@ }, ), route_to_test( - url_pattern='task//vm//upload//', - params={'task_id': 'shared-task-1', 'vm_id': PARTICIPANT.split('_')[-1], 'dataset_id': 0, 'upload_id': -1}, + url_pattern="task//vm//upload//", + params={"task_id": "shared-task-1", "vm_id": PARTICIPANT.split("_")[-1], "dataset_id": 0, "upload_id": -1}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -923,8 +829,8 @@ }, ), route_to_test( - url_pattern='grpc//vm_info', - params={'vm_id': 'does-not-exist'}, + url_pattern="grpc//vm_info", + params={"vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -934,7 +840,7 @@ }, ), # Skip for the moment, takes too long. Maybe mock later? - #route_to_test( + # route_to_test( # url_pattern='grpc//vm_info', # params={'vm_id': PARTICIPANT.split('_')[-1]}, # group_to_expected_status_code={ @@ -943,10 +849,10 @@ # PARTICIPANT: 200, # ORGANIZER: 302, # }, - #), + # ), route_to_test( - url_pattern='grpc//vm_state', - params={'vm_id': 'does-not-exist'}, + url_pattern="grpc//vm_state", + params={"vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -956,8 +862,8 @@ }, ), route_to_test( - url_pattern='grpc//vm_state', - params={'vm_id': PARTICIPANT.split('_')[-1]}, + url_pattern="grpc//vm_state", + params={"vm_id": PARTICIPANT.split("_")[-1]}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -967,8 +873,8 @@ }, ), route_to_test( - url_pattern='grpc//vm_start', - params={'vm_id': 'does-not-exist'}, + url_pattern="grpc//vm_start", + params={"vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -978,7 +884,7 @@ }, ), # Skip for the moment, takes too long. Maybe mock later? - #route_to_test( + # route_to_test( # url_pattern='grpc//vm_start', # params={'vm_id': PARTICIPANT.split('_')[-1]}, # group_to_expected_status_code={ @@ -987,10 +893,10 @@ # PARTICIPANT: 200, # ORGANIZER: 302, # }, - #), + # ), route_to_test( - url_pattern='grpc//vm_shutdown', - params={'vm_id': 'does-not-exist'}, + url_pattern="grpc//vm_shutdown", + params={"vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1000,7 +906,7 @@ }, ), # Skip for the moment, takes too long. Maybe mock later? - #route_to_test( + # route_to_test( # url_pattern='grpc//vm_shutdown', # params={'vm_id': PARTICIPANT.split('_')[-1]}, # group_to_expected_status_code={ @@ -1009,10 +915,10 @@ # PARTICIPANT: 200, # ORGANIZER: 302, # }, - #), + # ), route_to_test( - url_pattern='grpc//vm_stop', - params={'vm_id': 'does-not-exist'}, + url_pattern="grpc//vm_stop", + params={"vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1022,7 +928,7 @@ }, ), # Skip for the moment, takes too long. Maybe mock later? - #route_to_test( + # route_to_test( # url_pattern='grpc//vm_stop', # params={'vm_id': PARTICIPANT.split('_')[-1]}, # group_to_expected_status_code={ @@ -1031,10 +937,10 @@ # PARTICIPANT: 200, # ORGANIZER: 302, # }, - #), + # ), route_to_test( - url_pattern='grpc//run_abort', - params={'vm_id': 'does-not-exist'}, + url_pattern="grpc//run_abort", + params={"vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1044,7 +950,7 @@ }, ), # Skip for the moment, takes too long. Maybe mock later? - #route_to_test( + # route_to_test( # url_pattern='grpc//run_abort', # params={'vm_id': PARTICIPANT.split('_')[-1]}, # group_to_expected_status_code={ @@ -1053,10 +959,10 @@ # PARTICIPANT: 200, # ORGANIZER: 302, # }, - #), + # ), route_to_test( - url_pattern='api/evaluations_of_run//', - params={'vm_id': PARTICIPANT.split('_')[-1], 'run_id': 'run-1-example_participant'}, + url_pattern="api/evaluations_of_run//", + params={"vm_id": PARTICIPANT.split("_")[-1], "run_id": "run-1-example_participant"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, # TODO Make consistent with "api/evaluations//" @@ -1066,8 +972,8 @@ }, ), route_to_test( - url_pattern='api/evaluations_of_run//', - params={'vm_id': 'does-not-exist', 'run_id': 'does-not-exist'}, + url_pattern="api/evaluations_of_run//", + params={"vm_id": "does-not-exist", "run_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, # TODO Make consistent with "api/evaluations//" @@ -1077,8 +983,8 @@ }, ), route_to_test( - url_pattern='grpc//vm_running_evaluations', - params={'vm_id': 'does-not-exist'}, + url_pattern="grpc//vm_running_evaluations", + params={"vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1088,7 +994,7 @@ }, ), # Skip for the moment, takes too long. Maybe mock later? - #route_to_test( + # route_to_test( # url_pattern='grpc//vm_running_evaluations', # params={'vm_id': PARTICIPANT.split('_')[-1]}, # group_to_expected_status_code={ @@ -1097,10 +1003,10 @@ # PARTICIPANT: 200, # ORGANIZER: 302, # }, - #), + # ), route_to_test( - url_pattern='grpc//get_running_evaluations', - params={'vm_id': 'does-not-exist'}, + url_pattern="grpc//get_running_evaluations", + params={"vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1110,8 +1016,8 @@ }, ), route_to_test( - url_pattern='grpc//get_running_evaluations', - params={'vm_id': PARTICIPANT.split('_')[-1]}, + url_pattern="grpc//get_running_evaluations", + params={"vm_id": PARTICIPANT.split("_")[-1]}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1121,8 +1027,8 @@ }, ), route_to_test( - url_pattern='grpc///run_execute/vm/', - params={'task_id': 'shared-task-1', 'vm_id': 'does-not-exist', 'software_id': 'does-not-exist'}, + url_pattern="grpc///run_execute/vm/", + params={"task_id": "shared-task-1", "vm_id": "does-not-exist", "software_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1132,7 +1038,7 @@ }, ), # TODO Add later - #route_to_test( + # route_to_test( # url_pattern='grpc///run_execute/vm/', # params={'task_id': 'shared-task-1', 'vm_id': PARTICIPANT.split('_')[-1], 'software_id': f'software-of-{PARTICIPANT.split("_")[-1]}'}, # group_to_expected_status_code={ @@ -1141,10 +1047,17 @@ # PARTICIPANT: 200, # ORGANIZER: 302, # }, - #), + # ), route_to_test( - url_pattern='grpc///run_execute/docker////', - params={'task_id': 'shared-task-1', 'vm_id': 'does-not-exist', 'dataset_id': 'does-not-exist', 'docker_software_id': 'does-not-exist', 'rerank_dataset': 'none'}, + url_pattern="grpc///run_execute/docker////", + params={ + "task_id": "shared-task-1", + "vm_id": "does-not-exist", + "dataset_id": "does-not-exist", + "docker_software_id": "does-not-exist", + "rerank_dataset": "none", + "docker_resources": "", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1154,8 +1067,15 @@ }, ), route_to_test( - url_pattern='grpc///run_execute/docker////', - params={'task_id': 'shared-task-1', 'vm_id': PARTICIPANT.split('_')[-1], 'dataset_id': 'does-not-exist', 'docker_software_id': 'does-not-exist', 'rerank_dataset': 'none'}, + url_pattern="grpc///run_execute/docker////", + params={ + "task_id": "shared-task-1", + "vm_id": PARTICIPANT.split("_")[-1], + "dataset_id": "does-not-exist", + "docker_software_id": "does-not-exist", + "rerank_dataset": "none", + "docker_resources": "", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1165,8 +1085,12 @@ }, ), route_to_test( - url_pattern='grpc//run_eval//', - params={'vm_id': 'does-not-exist', 'dataset_id': f'dataset-1-{now}-training', 'run_id': 'run-1-example_participant'}, + url_pattern="grpc//run_eval//", + params={ + "vm_id": "does-not-exist", + "dataset_id": f"dataset-1-{now}-training", + "run_id": "run-1-example_participant", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1176,7 +1100,7 @@ }, ), # TODO Add later - #route_to_test( + # route_to_test( # url_pattern='grpc//run_eval//', # params={'vm_id': PARTICIPANT.split('_')[-1], 'dataset_id': f'dataset-1-{now}-training', 'run_id': 'run-1-example_participant'}, # group_to_expected_status_code={ @@ -1185,11 +1109,10 @@ # PARTICIPANT: 200, # ORGANIZER: 302, # }, - #), - + # ), route_to_test( - url_pattern='api/submissions-for-task///', - params={'user_id': 'does-not-exist', 'task_id': f'does-not-exist', 'submission_type': 'does-not-matter'}, + url_pattern="api/submissions-for-task///", + params={"user_id": "does-not-exist", "task_id": "does-not-exist", "submission_type": "does-not-matter"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1199,9 +1122,12 @@ }, ), route_to_test( - url_pattern='api/submissions-for-task///', - params={'user_id': PARTICIPANT.split('_')[-1], 'task_id': f'shared-task-1', - 'submission_type': 'does-not-matter'}, + url_pattern="api/submissions-for-task///", + params={ + "user_id": PARTICIPANT.split("_")[-1], + "task_id": "shared-task-1", + "submission_type": "does-not-matter", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1211,8 +1137,8 @@ }, ), route_to_test( - url_pattern='api/docker-softwares-details//', - params={'vm_id': PARTICIPANT.split('_')[-1], 'docker_software_id': f'1'}, + url_pattern="api/docker-softwares-details//", + params={"vm_id": PARTICIPANT.split("_")[-1], "docker_software_id": "1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1222,8 +1148,11 @@ }, ), route_to_test( - url_pattern='api/docker-softwares-details//', - params={'vm_id': 'does-not-exist', 'docker_software_id': f'1', }, + url_pattern="api/docker-softwares-details//", + params={ + "vm_id": "does-not-exist", + "docker_software_id": "1", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1233,19 +1162,23 @@ }, ), route_to_test( - url_pattern='grpc///stop_docker_software/', - params={'user_id': 'example_participant', 'task_id': f'shared-task-1', 'run_id': 'run-1-example_participant'}, + url_pattern="grpc///stop_docker_software/", + params={"user_id": "example_participant", "task_id": "shared-task-1", "run_id": "run-1-example_participant"}, group_to_expected_status_code={ ADMIN: 200, - GUEST: 302, # Was error + GUEST: 302, # Was error PARTICIPANT: 302, ORGANIZER: 302, ORGANIZER_WRONG_TASK: 302, }, ), route_to_test( - url_pattern='grpc///stop_docker_software/', - params={'user_id': PARTICIPANT.split('_')[-1], 'task_id': f'shared-task-1', 'run_id': 'run-1-example_participant'}, + url_pattern="grpc///stop_docker_software/", + params={ + "user_id": PARTICIPANT.split("_")[-1], + "task_id": "shared-task-1", + "run_id": "run-1-example_participant", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1255,7 +1188,7 @@ }, ), route_to_test( - url_pattern='tira-admin/reload/vms', + url_pattern="tira-admin/reload/vms", params={}, group_to_expected_status_code={ ADMIN: 200, @@ -1266,7 +1199,7 @@ }, ), route_to_test( - url_pattern='tira-admin/reload/datasets', + url_pattern="tira-admin/reload/datasets", params={}, group_to_expected_status_code={ ADMIN: 200, @@ -1277,7 +1210,7 @@ }, ), route_to_test( - url_pattern='tira-admin/reload/tasks', + url_pattern="tira-admin/reload/tasks", params={}, group_to_expected_status_code={ ADMIN: 200, @@ -1288,7 +1221,7 @@ }, ), route_to_test( - url_pattern='tira-admin/create-vm', + url_pattern="tira-admin/create-vm", params={}, group_to_expected_status_code={ ADMIN: 200, @@ -1299,7 +1232,7 @@ }, ), route_to_test( - url_pattern='tira-admin/modify-vm', + url_pattern="tira-admin/modify-vm", params={}, group_to_expected_status_code={ ADMIN: 200, @@ -1310,8 +1243,8 @@ }, ), route_to_test( - url_pattern='tira-admin/edit-task/', - params={'task_id': 'shared-task-1'}, + url_pattern="tira-admin/edit-task/", + params={"task_id": "shared-task-1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1321,8 +1254,8 @@ }, ), route_to_test( - url_pattern='tira-admin/edit-task/', - params={'task_id': 'task-of-organizer-1'}, + url_pattern="tira-admin/edit-task/", + params={"task_id": "task-of-organizer-1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1332,8 +1265,8 @@ }, ), route_to_test( - url_pattern='tira-admin/delete-task/', - params={'task_id': 'task-does-not-exist'}, + url_pattern="tira-admin/delete-task/", + params={"task_id": "task-does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1343,8 +1276,8 @@ }, ), route_to_test( - url_pattern='tira-admin/add-dataset/', - params={'task_id': 'task-does-not-exist'}, + url_pattern="tira-admin/add-dataset/", + params={"task_id": "task-does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1354,8 +1287,8 @@ }, ), route_to_test( - url_pattern='tira-admin/add-dataset/', - params={'task_id': 'shared-task-1'}, + url_pattern="tira-admin/add-dataset/", + params={"task_id": "shared-task-1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1365,8 +1298,8 @@ }, ), route_to_test( - url_pattern='tira-admin/add-dataset/', - params={'task_id': 'task-of-organizer-1'}, + url_pattern="tira-admin/add-dataset/", + params={"task_id": "task-of-organizer-1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1376,8 +1309,8 @@ }, ), route_to_test( - url_pattern='tira-admin/import-irds-dataset/', - params={'task_id': 'task-does-not-exist'}, + url_pattern="tira-admin/import-irds-dataset/", + params={"task_id": "task-does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1387,8 +1320,8 @@ }, ), route_to_test( - url_pattern='tira-admin/import-irds-dataset/', - params={'task_id': 'task-of-organizer-1'}, + url_pattern="tira-admin/import-irds-dataset/", + params={"task_id": "task-of-organizer-1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1398,8 +1331,8 @@ }, ), route_to_test( - url_pattern='tira-admin/upload-dataset///', - params={'task_id': 'task-does-not-exist', 'dataset_id': 'does-not-exist', 'dataset_type': 'participant-input'}, + url_pattern="tira-admin/upload-dataset///", + params={"task_id": "task-does-not-exist", "dataset_id": "does-not-exist", "dataset_type": "participant-input"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1409,8 +1342,8 @@ }, ), route_to_test( - url_pattern='tira-admin/upload-dataset///', - params={'task_id': 'task-of-organizer-1', 'dataset_id': 'does-not-exist', 'dataset_type': 'participant-input'}, + url_pattern="tira-admin/upload-dataset///", + params={"task_id": "task-of-organizer-1", "dataset_id": "does-not-exist", "dataset_type": "participant-input"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1420,8 +1353,8 @@ }, ), route_to_test( - url_pattern='tira-admin/edit-dataset/', - params={'dataset_id': 'does-not-exist'}, + url_pattern="tira-admin/edit-dataset/", + params={"dataset_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1431,8 +1364,8 @@ }, ), route_to_test( - url_pattern='tira-admin/edit-dataset/', - params={'dataset_id': f'dataset-of-organizer-{now}-training'}, + url_pattern="tira-admin/edit-dataset/", + params={"dataset_id": f"dataset-of-organizer-{now}-training"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1442,8 +1375,8 @@ }, ), route_to_test( - url_pattern='tira-admin/edit-dataset/', - params={'dataset_id': f'dataset-1-{now}-training'}, + url_pattern="tira-admin/edit-dataset/", + params={"dataset_id": f"dataset-1-{now}-training"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1453,8 +1386,8 @@ }, ), route_to_test( - url_pattern='tira-admin/delete-dataset/', - params={'dataset_id': 'does-not-exist'}, + url_pattern="tira-admin/delete-dataset/", + params={"dataset_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1464,30 +1397,30 @@ }, ), route_to_test( - url_pattern='tira-admin/add-organizer/', - params={'organizer_id': 'organizer-2'}, + url_pattern="tira-admin/add-organizer/", + params={"organizer_id": "organizer-2"}, group_to_expected_status_code={ ADMIN: 200, - GUEST: 405, # We expect 405 for existing organizer 'organizer-2' - PARTICIPANT: 405, # We expect 405 for existing organizer 'organizer-2' - ORGANIZER: 405, # We expect 405 for existing organizer 'organizer-2' - ORGANIZER_WRONG_TASK: 405, # We expect 405 for existing 'organizer-2' + GUEST: 405, # We expect 405 for existing organizer 'organizer-2' + PARTICIPANT: 405, # We expect 405 for existing organizer 'organizer-2' + ORGANIZER: 405, # We expect 405 for existing organizer 'organizer-2' + ORGANIZER_WRONG_TASK: 405, # We expect 405 for existing 'organizer-2' }, ), route_to_test( - url_pattern='tira-admin/add-organizer/', - params={'organizer_id': 'organizer-id-does-not-exist'}, + url_pattern="tira-admin/add-organizer/", + params={"organizer_id": "organizer-id-does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, - PARTICIPANT: 200, # We expect 200 for non-existing organizer. - ORGANIZER: 200, # We expect 200 for non-existing organizer. - ORGANIZER_WRONG_TASK: 200, # We expect 200 for non-existing organizer. + PARTICIPANT: 200, # We expect 200 for non-existing organizer. + ORGANIZER: 200, # We expect 200 for non-existing organizer. + ORGANIZER_WRONG_TASK: 200, # We expect 200 for non-existing organizer. }, ), route_to_test( - url_pattern='tira-admin/edit-organizer/', - params={'organizer_id': 'organizer-id-does-not-exist'}, + url_pattern="tira-admin/edit-organizer/", + params={"organizer_id": "organizer-id-does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1498,8 +1431,8 @@ }, ), route_to_test( - url_pattern='tira-admin/edit-organizer/', - params={'organizer_id': 'organizer-2'}, + url_pattern="tira-admin/edit-organizer/", + params={"organizer_id": "organizer-2"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1509,8 +1442,8 @@ }, ), route_to_test( - url_pattern='tira-admin/edit-organizer/', - params={'organizer_id': 'EXAMPLE-ORGANIZER'}, + url_pattern="tira-admin/edit-organizer/", + params={"organizer_id": "EXAMPLE-ORGANIZER"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1520,8 +1453,12 @@ }, ), route_to_test( - url_pattern='tira-admin/edit-review///', - params={'dataset_id': 'dataset-does-not-exist', 'vm_id': 'vm-id-does-not-exist', 'run_id': 'run-id-does-not-exist'}, + url_pattern="tira-admin/edit-review///", + params={ + "dataset_id": "dataset-does-not-exist", + "vm_id": "vm-id-does-not-exist", + "run_id": "run-id-does-not-exist", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1531,9 +1468,12 @@ }, ), route_to_test( - url_pattern='tira-admin/edit-review///', - params={'dataset_id': f'dataset-of-organizer-{now}-training', 'vm_id': 'vm-id-does-not-exist', - 'run_id': 'run-of-organizer'}, + url_pattern="tira-admin/edit-review///", + params={ + "dataset_id": f"dataset-of-organizer-{now}-training", + "vm_id": "vm-id-does-not-exist", + "run_id": "run-of-organizer", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1543,9 +1483,12 @@ }, ), route_to_test( - url_pattern='tira-admin/edit-review///', - params={'dataset_id': f'dataset-of-organizer-{now}-training', 'vm_id': 'vm-id-does-not-exist', - 'run_id': 'run-1-example_participant'}, + url_pattern="tira-admin/edit-review///", + params={ + "dataset_id": f"dataset-of-organizer-{now}-training", + "vm_id": "vm-id-does-not-exist", + "run_id": "run-1-example_participant", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1555,9 +1498,12 @@ }, ), route_to_test( - url_pattern='tira-admin/edit-review///', - params={'dataset_id': f'dataset-1-{now}-training', 'vm_id': 'vm-id-does-not-exist', - 'run_id': 'run-of-organizer'}, + url_pattern="tira-admin/edit-review///", + params={ + "dataset_id": f"dataset-1-{now}-training", + "vm_id": "vm-id-does-not-exist", + "run_id": "run-of-organizer", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1567,8 +1513,8 @@ }, ), route_to_test( - url_pattern='tira-admin/create-group/', - params={'vm_id': 'vm-id-does-not-exist'}, + url_pattern="tira-admin/create-group/", + params={"vm_id": "vm-id-does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1578,19 +1524,29 @@ }, ), route_to_test( - url_pattern='publish////', - params={'dataset_id': 'dataset-does-not-exist', 'vm_id': 'vm-id-does-not-exist', 'run_id': 'run-id-does-not-exist', 'value': 'does-not-exist'}, + url_pattern="publish////", + params={ + "dataset_id": "dataset-does-not-exist", + "vm_id": "vm-id-does-not-exist", + "run_id": "run-id-does-not-exist", + "value": "does-not-exist", + }, group_to_expected_status_code={ ADMIN: 200, - GUEST: 302, # TODO: Make consistent. - PARTICIPANT: 302, # TODO: Make consistent. - ORGANIZER: 302, # TODO: Make consistent. - ORGANIZER_WRONG_TASK: 302, # TODO: Make consistent. + GUEST: 302, # TODO: Make consistent. + PARTICIPANT: 302, # TODO: Make consistent. + ORGANIZER: 302, # TODO: Make consistent. + ORGANIZER_WRONG_TASK: 302, # TODO: Make consistent. }, ), route_to_test( - url_pattern='blind////', - params={'dataset_id': 'dataset-does-not-exist', 'vm_id': 'vm-id-does-not-exist', 'run_id': 'run-id-does-not-exist', 'value': 'does-not-exist'}, + url_pattern="blind////", + params={ + "dataset_id": "dataset-does-not-exist", + "vm_id": "vm-id-does-not-exist", + "run_id": "run-id-does-not-exist", + "value": "does-not-exist", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1600,19 +1556,19 @@ }, ), route_to_test( - url_pattern='api/evaluations//', - params={'task_id': 'task-does-not-exist', 'dataset_id': 'dataset-id-does-not-exist'}, + url_pattern="api/evaluations//", + params={"task_id": "task-does-not-exist", "dataset_id": "dataset-id-does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, - GUEST: 200, # TODO Make consistent with "api/evaluation//" + GUEST: 200, # TODO Make consistent with "api/evaluation//" PARTICIPANT: 200, ORGANIZER: 200, - ORGANIZER_WRONG_TASK: 200, # TODO Make consistent with "api/evaluation//" + ORGANIZER_WRONG_TASK: 200, # TODO Make consistent with "api/evaluation//" }, ), route_to_test( - url_pattern='api/evaluations-of-vm//', - params={'task_id': 'task-of-organizer-1', 'vm_id': 'does-not-exist'}, + url_pattern="api/evaluations-of-vm//", + params={"task_id": "task-of-organizer-1", "vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1622,8 +1578,8 @@ }, ), route_to_test( - url_pattern='api/evaluations-of-vm//', - params={'task_id': 'task-of-organizer-1', 'vm_id': PARTICIPANT.split('_')[-1]}, + url_pattern="api/evaluations-of-vm//", + params={"task_id": "task-of-organizer-1", "vm_id": PARTICIPANT.split("_")[-1]}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1632,10 +1588,9 @@ ORGANIZER_WRONG_TASK: 302, }, ), - route_to_test( - url_pattern='api/evaluation//', - params={'vm_id': 'example-participant', 'run_id': 'run-1-example_participant'}, + url_pattern="api/evaluation//", + params={"vm_id": "example-participant", "run_id": "run-1-example_participant"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, # TODO Make consistent with "api/evaluations//" @@ -1645,21 +1600,21 @@ }, ), route_to_test( - url_pattern='api/evaluation//', - params={'vm_id': PARTICIPANT.split('_')[-1], 'run_id': 'run-1-example_participant'}, + url_pattern="api/evaluation//", + params={"vm_id": PARTICIPANT.split("_")[-1], "run_id": "run-1-example_participant"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, # TODO Make consistent with "api/evaluations//" PARTICIPANT: 200, ORGANIZER: 302, - ORGANIZER_WRONG_TASK: 302, # TODO Make consistent with "api/evaluations//" + ORGANIZER_WRONG_TASK: 302, # TODO Make consistent with "api/evaluations//" }, ), route_to_test( - url_pattern='api/submissions//', - params={'task_id': 'task-id-does-not-exist', 'dataset_id': 'dataset-id-does-not-exist'}, + url_pattern="api/submissions//", + params={"task_id": "task-id-does-not-exist", "dataset_id": "dataset-id-does-not-exist"}, group_to_expected_status_code={ - ADMIN: 200, # TODO: Add more fine-grained tests, as admin gets different response + ADMIN: 200, # TODO: Add more fine-grained tests, as admin gets different response GUEST: 200, PARTICIPANT: 200, ORGANIZER: 200, @@ -1667,7 +1622,7 @@ }, ), route_to_test( - url_pattern='api/ova-list', + url_pattern="api/ova-list", params={}, group_to_expected_status_code={ ADMIN: 200, @@ -1678,7 +1633,7 @@ }, ), route_to_test( - url_pattern='api/host-list', + url_pattern="api/host-list", params={}, group_to_expected_status_code={ ADMIN: 200, @@ -1689,7 +1644,7 @@ }, ), route_to_test( - url_pattern='api/organizer-list', + url_pattern="api/organizer-list", params={}, group_to_expected_status_code={ ADMIN: 200, @@ -1700,7 +1655,7 @@ }, ), route_to_test( - url_pattern='api/task-list', + url_pattern="api/task-list", params={}, group_to_expected_status_code={ ADMIN: 200, @@ -1711,8 +1666,8 @@ }, ), route_to_test( - url_pattern='api/task/', - params={'task_id': 'task-id-does-not-exist'}, + url_pattern="api/task/", + params={"task_id": "task-id-does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -1722,8 +1677,8 @@ }, ), route_to_test( - url_pattern='api/registration_formular/', - params={'task_id': 'task-id-does-not-exist'}, + url_pattern="api/registration_formular/", + params={"task_id": "task-id-does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -1733,8 +1688,8 @@ }, ), route_to_test( - url_pattern='api/dataset/', - params={'dataset_id': 'dataset-id-does-not-exist'}, + url_pattern="api/dataset/", + params={"dataset_id": "dataset-id-does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -1744,8 +1699,8 @@ }, ), route_to_test( - url_pattern='api/datasets_by_task/', - params={'task_id': 'task-id-does-not-exist'}, + url_pattern="api/datasets_by_task/", + params={"task_id": "task-id-does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -1755,8 +1710,8 @@ }, ), route_to_test( - url_pattern='api/organizer/', - params={'organizer_id': 'organizer-id-id-does-not-exist'}, + url_pattern="api/organizer/", + params={"organizer_id": "organizer-id-id-does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 200, @@ -1766,7 +1721,7 @@ }, ), route_to_test( - url_pattern='api/role', + url_pattern="api/role", params={}, group_to_expected_status_code={ ADMIN: 200, @@ -1777,74 +1732,86 @@ }, ), route_to_test( - url_pattern='api/task//user/', - params={'task_id': 'task-id-does-not-exist', 'user_id': 'user-id-does-not-exist'}, + url_pattern="api/task//user/", + params={"task_id": "task-id-does-not-exist", "user_id": "user-id-does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, - GUEST: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. - PARTICIPANT: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. - ORGANIZER: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. - ORGANIZER_WRONG_TASK: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + GUEST: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + PARTICIPANT: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + ORGANIZER: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + ORGANIZER_WRONG_TASK: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. }, ), route_to_test( - url_pattern='api/task//user/', - params={'task_id': 'task-id-does-not-exist', 'user_id': PARTICIPANT.split('_')[-1]}, + url_pattern="api/task//user/", + params={"task_id": "task-id-does-not-exist", "user_id": PARTICIPANT.split("_")[-1]}, group_to_expected_status_code={ ADMIN: 200, - GUEST: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + GUEST: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. PARTICIPANT: 200, - ORGANIZER: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. - ORGANIZER_WRONG_TASK: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + ORGANIZER: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + ORGANIZER_WRONG_TASK: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. }, ), route_to_test( - url_pattern='api/task//user//refresh-docker-images', - params={'task_id': 'task-id-does-not-exist', 'user_id': 'user-id-does-not-exist'}, + url_pattern="api/task//user//refresh-docker-images", + params={"task_id": "task-id-does-not-exist", "user_id": "user-id-does-not-exist"}, group_to_expected_status_code={ ADMIN: 200, - GUEST: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. - PARTICIPANT: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. - ORGANIZER: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. - ORGANIZER_WRONG_TASK: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + GUEST: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + PARTICIPANT: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + ORGANIZER: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + ORGANIZER_WRONG_TASK: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. }, ), route_to_test( - url_pattern='api/task//user//refresh-docker-images', - params={'task_id': 'task-id-does-not-exist', 'user_id': PARTICIPANT.split('_')[-1]}, + url_pattern="api/task//user//refresh-docker-images", + params={"task_id": "task-id-does-not-exist", "user_id": PARTICIPANT.split("_")[-1]}, group_to_expected_status_code={ ADMIN: 200, - GUEST: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + GUEST: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. PARTICIPANT: 200, - ORGANIZER: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. - ORGANIZER_WRONG_TASK: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + ORGANIZER: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + ORGANIZER_WRONG_TASK: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. }, ), route_to_test( - url_pattern='api/task//user//software/running/', - params={'task_id': 'task-id-does-not-exist', 'user_id': 'user-id-does-not-exist', 'force_cache_refresh': 'ignore'}, + url_pattern="api/task//user//software/running/", + params={ + "task_id": "task-id-does-not-exist", + "user_id": "user-id-does-not-exist", + "force_cache_refresh": "ignore", + }, group_to_expected_status_code={ ADMIN: 200, - GUEST: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. - PARTICIPANT: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. - ORGANIZER: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. - ORGANIZER_WRONG_TASK: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + GUEST: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + PARTICIPANT: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + ORGANIZER: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + ORGANIZER_WRONG_TASK: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. }, ), route_to_test( - url_pattern='api/task//user//software/running/', - params={'task_id': 'task-id-does-not-exist', 'user_id': PARTICIPANT.split('_')[-1], 'force_cache_refresh': 'ignore'}, + url_pattern="api/task//user//software/running/", + params={ + "task_id": "task-id-does-not-exist", + "user_id": PARTICIPANT.split("_")[-1], + "force_cache_refresh": "ignore", + }, group_to_expected_status_code={ ADMIN: 200, - GUEST: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + GUEST: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. PARTICIPANT: 200, - ORGANIZER: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. - ORGANIZER_WRONG_TASK: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + ORGANIZER: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. + ORGANIZER_WRONG_TASK: 200, # TODO: This seems to be wrong, but I am not sure, I would expect a 405 here. }, ), route_to_test( - url_pattern='api/review///', - params={'dataset_id': 'dataset-id-does-not-exist', 'vm_id': 'example_participant', 'run_id': 'run-1-example_participant'}, + url_pattern="api/review///", + params={ + "dataset_id": "dataset-id-does-not-exist", + "vm_id": "example_participant", + "run_id": "run-1-example_participant", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -1854,8 +1821,12 @@ }, ), route_to_test( - url_pattern='api/review///', - params={'dataset_id': f'dataset-1-{now}-training', 'vm_id': 'example_participant', 'run_id': 'run-1-example_participant'}, + url_pattern="api/review///", + params={ + "dataset_id": f"dataset-1-{now}-training", + "vm_id": "example_participant", + "run_id": "run-1-example_participant", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1865,8 +1836,12 @@ }, ), route_to_test( - url_pattern='api/review///', - params={'dataset_id': f'dataset-1-{now}-training', 'vm_id': PARTICIPANT.split('_')[-1], 'run_id': 'run-1-example_participant'}, + url_pattern="api/review///", + params={ + "dataset_id": f"dataset-1-{now}-training", + "vm_id": PARTICIPANT.split("_")[-1], + "run_id": "run-1-example_participant", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, @@ -1876,34 +1851,40 @@ }, ), route_to_test( - url_pattern='api/review///', - params={'dataset_id': f'dataset-of-organizer-{now}-training', 'vm_id': 'does-not-exist', 'run_id': 'run-of-organizer'}, + url_pattern="api/review///", + params={ + "dataset_id": f"dataset-of-organizer-{now}-training", + "vm_id": "does-not-exist", + "run_id": "run-of-organizer", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, # TODO: Is this inconsistent with api/review/// above? - PARTICIPANT: 302, # TODO: Is this inconsistent with api/review/// above? + PARTICIPANT: 302, # TODO: Is this inconsistent with api/review/// above? ORGANIZER: 200, - ORGANIZER_WRONG_TASK: 302, # TODO: Is this inconsistent with api/review/// above? + ORGANIZER_WRONG_TASK: 302, # TODO: Is this inconsistent with api/review/// above? }, ), route_to_test( - url_pattern='api/review///', - params={'dataset_id': f'dataset-of-organizer-{now}-training', 'vm_id': 'does-not-exist', - 'run_id': 'run-1-example_participant'}, + url_pattern="api/review///", + params={ + "dataset_id": f"dataset-of-organizer-{now}-training", + "vm_id": "does-not-exist", + "run_id": "run-1-example_participant", + }, group_to_expected_status_code={ ADMIN: 200, GUEST: 302, # TODO: Is this inconsistent with api/review/// above? PARTICIPANT: 302, # TODO: Is this inconsistent with api/review/// above? - ORGANIZER: 302, # TODO: Is this inconsistent with api/review/// above? + ORGANIZER: 302, # TODO: Is this inconsistent with api/review/// above? ORGANIZER_WRONG_TASK: 302, # TODO: Is this inconsistent with api/review/// above? }, ), - # TODO: The following methods return 50X at the moment, we should improve the setup so that it returns 200. But for the moment 50X is enough to separate authenticated from unauthenticated. route_to_test( - url_pattern='tira-admin/reload-data', + url_pattern="tira-admin/reload-data", params={}, group_to_expected_status_code={ ADMIN: 500, @@ -1912,11 +1893,11 @@ ORGANIZER: 405, ORGANIZER_WRONG_TASK: 405, }, - hide_stdout=True + hide_stdout=True, ), route_to_test( - url_pattern='tira-admin/reload-runs/', - params={'vm_id': 'does-not-exist'}, + url_pattern="tira-admin/reload-runs/", + params={"vm_id": "does-not-exist"}, group_to_expected_status_code={ ADMIN: 500, GUEST: 405, @@ -1924,10 +1905,10 @@ ORGANIZER: 405, ORGANIZER_WRONG_TASK: 405, }, - hide_stdout=True + hide_stdout=True, ), route_to_test( - url_pattern='tira-admin/archive-vm', + url_pattern="tira-admin/archive-vm", params={}, group_to_expected_status_code={ ADMIN: 501, @@ -1938,8 +1919,8 @@ }, ), route_to_test( - url_pattern='tira-admin//create-task', - params={'organizer_id': 'organizer-id-does-not-exist'}, + url_pattern="tira-admin//create-task", + params={"organizer_id": "organizer-id-does-not-exist"}, group_to_expected_status_code={ ADMIN: 501, GUEST: 405, @@ -1949,8 +1930,8 @@ }, ), route_to_test( - url_pattern='tira-admin//create-task', - params={'organizer_id': ORGANIZER.split('_')[-1]}, + url_pattern="tira-admin//create-task", + params={"organizer_id": ORGANIZER.split("_")[-1]}, group_to_expected_status_code={ ADMIN: 501, GUEST: 405, @@ -1960,44 +1941,51 @@ }, ), route_to_test( - url_pattern='api/registration/add_registration//', - params={'task_id': 'shared-task-1', 'vm_id': 'example_participant'}, + url_pattern="api/registration/add_registration//", + params={"task_id": "shared-task-1", "vm_id": "example_participant"}, group_to_expected_status_code={ ADMIN: 500, - GUEST: 500, # TODO: Would we expect an 404 here? - PARTICIPANT: 500, # TODO: Would we expect an 404 here? - ORGANIZER: 500, # TODO: Would we expect an 404 here? - ORGANIZER_WRONG_TASK: 500, # TODO: Would we expect an 404 here? + GUEST: 500, # TODO: Would we expect an 404 here? + PARTICIPANT: 500, # TODO: Would we expect an 404 here? + ORGANIZER: 500, # TODO: Would we expect an 404 here? + ORGANIZER_WRONG_TASK: 500, # TODO: Would we expect an 404 here? }, body='{"group": "X"}', ), - # Some commands that delete stuff must be executed as last route_to_test( - url_pattern='grpc//run_delete//', - params={'vm_id': 'does-not-exist', 'dataset_id': f'dataset-1-{now}-training', 'run_id': 'run-1-example_participant'}, + url_pattern="grpc//run_delete//", + params={ + "vm_id": "does-not-exist", + "dataset_id": f"dataset-1-{now}-training", + "run_id": "run-1-example_participant", + }, group_to_expected_status_code={ ADMIN: 202, GUEST: 302, PARTICIPANT: 302, ORGANIZER: 302, ORGANIZER_WRONG_TASK: 302, - } + }, ), route_to_test( - url_pattern='grpc//run_delete//', - params={'vm_id': PARTICIPANT.split('_')[-1], 'dataset_id': f'dataset-1-{now}-training', 'run_id': 'run-1-example_participant'}, + url_pattern="grpc//run_delete//", + params={ + "vm_id": PARTICIPANT.split("_")[-1], + "dataset_id": f"dataset-1-{now}-training", + "run_id": "run-1-example_participant", + }, group_to_expected_status_code={ ADMIN: 202, GUEST: 302, PARTICIPANT: 202, ORGANIZER: 302, ORGANIZER_WRONG_TASK: 302, - } + }, ), route_to_test( - url_pattern='tira-admin/delete-task/', - params={'task_id': 'task-of-organizer-1'}, + url_pattern="tira-admin/delete-task/", + params={"task_id": "task-of-organizer-1"}, group_to_expected_status_code={ ADMIN: 200, GUEST: 405, @@ -2007,8 +1995,8 @@ }, ), route_to_test( - url_pattern='tira-admin/delete-dataset/', - params={'dataset_id': f'dataset-of-organizer-{now}-training'}, + url_pattern="tira-admin/delete-dataset/", + params={"dataset_id": f"dataset-of-organizer-{now}-training"}, group_to_expected_status_code={ GUEST: 405, PARTICIPANT: 405, @@ -2016,17 +2004,279 @@ ORGANIZER: 200, }, ), + route_to_test( + url_pattern="health", + params={}, + group_to_expected_status_code={ + GUEST: 204, + PARTICIPANT: 204, + ORGANIZER_WRONG_TASK: 204, + ORGANIZER: 204, + ADMIN: 204, + }, + ), + route_to_test( + url_pattern="info", + params={}, + group_to_expected_status_code={ + GUEST: 200, + PARTICIPANT: 200, + ORGANIZER_WRONG_TASK: 200, + ORGANIZER: 200, + ADMIN: 200, + }, + ), + # The following v1/ endpoints should be restricted to only allow admin-access for now + route_to_test( + url_pattern="v1/datasets/", + params={}, + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + ADMIN: 200, + }, + ), + route_to_test( + url_pattern="v1/datasets//", + params={"dataset_id": "i-do-not-exist"}, + method="GET", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + ADMIN: 404, + }, + ), + route_to_test( + url_pattern="v1/datasets//", + params={"dataset_id": "i-do-not-exist"}, + method="DELETE", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + ADMIN: 404, + }, + ), + route_to_test( + url_pattern="v1/evaluations/", + params={}, + method="GET", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + ADMIN: 200, + }, + ), + route_to_test( + url_pattern="v1/organizers/", + params={}, + method="GET", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + ADMIN: 200, + }, + ), + route_to_test( + url_pattern="v1/organizers/", + params={}, + method="POST", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + # ADMIN: 200, # TODO: replace with correct code once the POST is properly implemented + }, + ), + route_to_test( + url_pattern="v1/organizers//", + params={"organizer_id": "does-not-exist"}, + method="GET", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + ADMIN: 404, + }, + ), + route_to_test( + url_pattern="v1/organizers//", + params={"organizer_id": "does-not-exist"}, + method="DELETE", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + ADMIN: 404, + }, + ), + route_to_test( + url_pattern="v1/runs/", + params={}, + method="GET", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + ADMIN: 200, + }, + ), + route_to_test( + url_pattern="v1/runs//", + params={"run_id": "does-not-exist"}, + method="GET", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + ADMIN: 404, + }, + ), + route_to_test( + url_pattern="v1/runs//", + params={"run_id": "does-not-exist"}, + method="DELETE", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + ADMIN: 404, + }, + ), + route_to_test( + url_pattern="v1/runs//review", + params={"run": "does-not-exist"}, + method="GET", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + ADMIN: 404, + }, + ), + route_to_test( + url_pattern="v1/tasks/", + params={}, + method="GET", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + ADMIN: 200, + }, + ), + route_to_test( + url_pattern="v1/tasks/", + params={}, + method="POST", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + # ADMIN: 200, # TODO: replace with correct code once the POST is properly implemented + }, + ), + route_to_test( + url_pattern="v1/tasks//", + params={"task_id": "does-not-exist"}, + method="GET", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + ADMIN: 404, + }, + ), + route_to_test( + url_pattern="v1/tasks//", + params={"task_id": "does-not-exist"}, + method="DELETE", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + ADMIN: 404, + }, + ), + route_to_test( + url_pattern="v1/tasks//evaluations", + params={"task_id": "does-not-exist"}, + method="GET", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + # ADMIN: 404, # FIXME: this does not currently work + ADMIN: 200, + }, + ), + route_to_test( + url_pattern="v1/tasks//registrations", + params={"task_id": "does-not-exist"}, + method="GET", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + # ADMIN: 404, # FIXME: this does not currently work + ADMIN: 200, + }, + ), + route_to_test( + url_pattern="v1/tasks//registrations", + params={"task_id": "does-not-exist"}, + method="POST", + group_to_expected_status_code={ + GUEST: 403, + PARTICIPANT: 403, + ORGANIZER_WRONG_TASK: 403, + ORGANIZER: 403, + # ADMIN: 404, # TODO: these should give 404 for non-existant tasks. That is not currently the case + }, + ), + route_to_test( + url_pattern="v1/user/", + params={}, + group_to_expected_status_code={ + GUEST: 200, + PARTICIPANT: 200, + ORGANIZER_WRONG_TASK: 200, + ORGANIZER: 200, + ADMIN: 200, + }, + ), ] -def access_matrix_for_user(user): +def access_matrix_for_user(user: str) -> list[tuple]: ret = [] for i in API_ACCESS_MATRIX: if user not in i[2]: continue - params = i[2][user]['params'] - expected_status_code = i[2][user]['expected_status_code'] - + params = i[2][user]["params"] + expected_status_code = i[2][user]["expected_status_code"] + ret += [(i[0], i[1], params, expected_status_code, i[3])] return ret - diff --git a/application/test/auth_tests/test_is_admin_for_task.py b/application/test/auth_tests/test_is_admin_for_task.py index e34606ff8..88b5a9d35 100644 --- a/application/test/auth_tests/test_is_admin_for_task.py +++ b/application/test/auth_tests/test_is_admin_for_task.py @@ -1,17 +1,17 @@ from django.test import TestCase -from api_access_matrix import ADMIN -from tira.authentication import auth from utils_for_testing import mock_request, set_up_tira_environment -submit_url_task_1 = 'submit/task-of-organizer-1' -overview_url_task_1 = 'task-overview/task-of-organizer-1' +from tira_app.authentication import auth -submit_url_task_2 = 'submit/task-does-not-exist' -overview_url_task_2 = 'task-overview/task-does-not-exist' +submit_url_task_1 = "submit/task-of-organizer-1" +overview_url_task_1 = "task-overview/task-of-organizer-1" -no_org = '' -wrong_org = 'wrong_org,tira_org_pan' -org_1 = 'tira_org_EXAMPLE-ORGANIZER' +submit_url_task_2 = "submit/task-does-not-exist" +overview_url_task_2 = "task-overview/task-does-not-exist" + +no_org = "" +wrong_org = "wrong_org,tira_org_pan" +org_1 = "tira_org_EXAMPLE-ORGANIZER" class TestIsAdminForTask(TestCase): @@ -21,7 +21,7 @@ def setUpClass(cls): def test_wrong_org_on_wrong_url(self): # Arrange - request = mock_request(wrong_org, 'wrong-url') + request = mock_request(wrong_org, "wrong-url") # Act actual = auth.is_admin_for_task(request) @@ -71,7 +71,7 @@ def test_wrong_org_on_existing_task_for_overview_url_2(self): def test_no_org_on_wrong_url(self): # Arrange - request = mock_request(no_org, 'wrong-url') + request = mock_request(no_org, "wrong-url") # Act actual = auth.is_admin_for_task(request) @@ -121,7 +121,7 @@ def test_no_org_on_existing_task_for_overview_url_2(self): def test_org_1_on_wrong_url(self): # Arrange - request = mock_request(org_1, 'wrong-url') + request = mock_request(org_1, "wrong-url") # Act actual = auth.is_admin_for_task(request) @@ -172,4 +172,3 @@ def test_org_1_on_existing_task_for_overview_url_2(self): @classmethod def tearDownClass(cls): pass - diff --git a/application/test/diffir_tests/test_diffir_endpoint.py b/application/test/diffir_tests/test_diffir_endpoint.py index 1103a2aff..90d5ebff8 100644 --- a/application/test/diffir_tests/test_diffir_endpoint.py +++ b/application/test/diffir_tests/test_diffir_endpoint.py @@ -1,37 +1,71 @@ -from django.test import TestCase +from _utils.mixins import StrAssertMixins from api_access_matrix import ADMIN -from utils_for_testing import dataset_1, dataset_2, dataset_meta, method_for_url_pattern, mock_request, set_up_tira_environment +from django.test import TestCase +from utils_for_testing import method_for_url_pattern, mock_request, set_up_tira_environment -url = 'serp//user//dataset///' +url = "serp//user//dataset///" diffir = method_for_url_pattern(url) -class TestDiffirEndpoint(TestCase): +class TestDiffirEndpoint(TestCase, StrAssertMixins): @classmethod def setUpClass(cls): set_up_tira_environment() def test_diffir_with_json(self): # Arrange - request = mock_request(ADMIN, url) + request = mock_request( + ADMIN, + url, + params={ + "task_id": "", # "t1", + "vm_id": "", # "example_participant", + "dataset_id": "", # "dataset-1", + "topk": "", # 10, + "run_id": "", # "run-3-example_participant", + }, + ) # Act - actual = diffir(request, vm_id='example_participant', dataset_id='dataset-1', task_id='t1', run_id='run-3-example_participant', topk=10) + actual = diffir( + request, + vm_id="example_participant", + dataset_id="dataset-1", + task_id="t1", + run_id="run-3-example_participant", + topk=10, + ) # Assert - self.assertTrue(actual.content.decode('utf-8').startswith('')) + self.assertStartsWith(actual.content.decode("utf-8"), "") def test_diffir_with_json_gz(self): # Arrange - request = mock_request(ADMIN, url) + request = mock_request( + ADMIN, + url, + params={ + "task_id": "", # "t1", + "vm_id": "", # "example_participant", + "dataset_id": "", # "dataset-1", + "topk": "", # 10, + "run_id": "", # "run-5-example_participant", + }, + ) # Act - actual = diffir(request, vm_id='example_participant', dataset_id='dataset-1', task_id='t1', run_id='run-5-example_participant', topk=10) + actual = diffir( + request, + vm_id="example_participant", + dataset_id="dataset-1", + task_id="t1", + run_id="run-5-example_participant", + topk=10, + ) # Assert - self.assertTrue(actual.content.decode('utf-8').startswith('')) + self.assertStartsWith(actual.content.decode("utf-8"), "") @classmethod def tearDownClass(cls): pass - diff --git a/application/test/evaluation_api_integration_tests/test_evaluation_results_for_single_dataset.py b/application/test/evaluation_api_integration_tests/test_evaluation_results_for_single_dataset.py index 2ecfbfacb..3958a3375 100644 --- a/application/test/evaluation_api_integration_tests/test_evaluation_results_for_single_dataset.py +++ b/application/test/evaluation_api_integration_tests/test_evaluation_results_for_single_dataset.py @@ -1,8 +1,15 @@ +from api_access_matrix import ADMIN, GUEST from django.test import TestCase -from api_access_matrix import GUEST, ADMIN -from utils_for_testing import dataset_1, dataset_2, dataset_meta, method_for_url_pattern, mock_request, set_up_tira_environment - -url = 'api/evaluations//' +from utils_for_testing import ( + dataset_1, + dataset_2, + dataset_meta, + method_for_url_pattern, + mock_request, + set_up_tira_environment, +) + +url = "api/evaluations//" evaluations_function = method_for_url_pattern(url) @@ -13,92 +20,93 @@ def setUpClass(cls): def test_for_non_existing_task_and_dataset(self): # Arrange - request = mock_request(GUEST, url) + request = mock_request(GUEST, url, params={"task_id": "", "dataset_id": ""}) # Act - actual = evaluations_function(request, task_id='does-not-exist', dataset_id='does-not-exist') + actual = evaluations_function(request, task_id="does-not-exist", dataset_id="does-not-exist") # Assert - self.verify_as_json(actual, 'non_existing_task_and_dataset.json') + self.verify_as_json(actual, "non_existing_task_and_dataset.json") def test_for_existing_task_and_dataset_with_few_evaluations(self): # Arrange - request = mock_request(GUEST, url) + request = mock_request(GUEST, url, params={"task_id": "", "dataset_id": ""}) # Act - actual = evaluations_function(request, task_id='shared-task-1', dataset_id=dataset_1) + actual = evaluations_function(request, task_id="shared-task-1", dataset_id=dataset_1) # Assert - self.verify_as_json(actual, 'existing_task_and_dataset_with_few_evaluations.json') + self.verify_as_json(actual, "existing_task_and_dataset_with_few_evaluations.json") def test_for_existing_task_and_dataset_with_few_evaluations_including_blinded(self): # Arrange - request = mock_request(ADMIN, url) + request = mock_request(ADMIN, url, params={"task_id": "", "dataset_id": ""}) # Act - actual = evaluations_function(request, task_id='shared-task-1', dataset_id=dataset_1) + actual = evaluations_function(request, task_id="shared-task-1", dataset_id=dataset_1) # Assert - self.verify_as_json(actual, 'test_for_existing_task_and_dataset_with_few_evaluations_including_blinded.json') + self.verify_as_json(actual, "test_for_existing_task_and_dataset_with_few_evaluations_including_blinded.json") def test_for_existing_task_and_meta_dataset_with_few_evaluations(self): # Arrange - request = mock_request(GUEST, url) + request = mock_request(GUEST, url, params={"task_id": "", "dataset_id": ""}) # Act - actual = evaluations_function(request, task_id='shared-task-1', dataset_id=dataset_meta) + actual = evaluations_function(request, task_id="shared-task-1", dataset_id=dataset_meta) # Assert - self.verify_as_json(actual, 'test_for_existing_task_and_meta_dataset_with_few_evaluations.json') + self.verify_as_json(actual, "test_for_existing_task_and_meta_dataset_with_few_evaluations.json") def test_for_existing_task_and_dataset_with_little_evaluations(self): # Arrange - request = mock_request(GUEST, url) + request = mock_request(GUEST, url, params={"task_id": "", "dataset_id": ""}) # Act - actual = evaluations_function(request, task_id='shared-task-1', dataset_id=dataset_2) + actual = evaluations_function(request, task_id="shared-task-1", dataset_id=dataset_2) # Assert - self.verify_as_json(actual, 'existing_task_and_dataset_with_little_evaluations.json') + self.verify_as_json(actual, "existing_task_and_dataset_with_little_evaluations.json") def test_for_existing_task_and_dataset_with_little_evaluations_including_blinded(self): # Arrange - request = mock_request(ADMIN, url) + request = mock_request(ADMIN, url, params={"task_id": "", "dataset_id": ""}) # Act - actual = evaluations_function(request, task_id='shared-task-1', dataset_id=dataset_2) + actual = evaluations_function(request, task_id="shared-task-1", dataset_id=dataset_2) # Assert - self.verify_as_json(actual, 'test_for_existing_task_and_dataset_with_little_evaluations_including_blinded.json') + self.verify_as_json(actual, "test_for_existing_task_and_dataset_with_little_evaluations_including_blinded.json") def verify_as_json(self, actual, test_name): + import json + from approvaltests import verify_as_json from approvaltests.core.options import Options from approvaltests.namer.cli_namer import CliNamer - import json + content = json.loads(actual.content) - if 'context' in content and 'dataset_id' in content['context']: - content['context']['dataset_id'] = content['context']['dataset_id'].split('-20')[0] + if "context" in content and "dataset_id" in content["context"]: + content["context"]["dataset_id"] = content["context"]["dataset_id"].split("-20")[0] - if 'context' in content and 'evaluations' in content['context']: - for i in content['context']['evaluations']: - if 'dataset_id' in i: - i['dataset_id'] = i['dataset_id'].split('-20')[0] + if "context" in content and "evaluations" in content["context"]: + for i in content["context"]["evaluations"]: + if "dataset_id" in i: + i["dataset_id"] = i["dataset_id"].split("-20")[0] - if 'context' in content and 'runs' in content['context']: - for i in content['context']['runs']: - if 'dataset_id' in i: - i['dataset_id'] = i['dataset_id'].split('-20')[0] + if "context" in content and "runs" in content["context"]: + for i in content["context"]["runs"]: + if "dataset_id" in i: + i["dataset_id"] = i["dataset_id"].split("-20")[0] - for t in ['link_results_download', 'link_run_download']: + for t in ["link_results_download", "link_run_download"]: if t in i: - i[t] = i[t].split('/dataset/')[0] + '/dataset/