diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 37527351..58184d21 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -7,6 +7,15 @@ on:
branches: ["main"]
workflow_dispatch:
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}
+ cancel-in-progress: true
+
+env:
+ STABLE_PYTHON_VERSION: '3.11'
+ PYTHONUNBUFFERED: "1"
+ FORCE_COLOR: "1"
+
jobs:
tests:
name: "Python ${{ matrix.python-version }}"
@@ -21,42 +30,37 @@ jobs:
steps:
- uses: "actions/checkout@v4"
- - uses: "actions/setup-python@v4"
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: "actions/setup-python@v4"
with:
python-version: ${{ matrix.python-version }}
- - name: "Install dependencies"
- run: |
- set -xe
- python -VV
- python -m site
- python -m pip install --upgrade pip setuptools wheel coverage[toml] virtualenv tox tox-gh-actions
- - name: "Run tox targets for ${{ matrix.python-version }}"
- run: python -m tox
+ - name: "Upgrade pip"
+ run: python -m pip install --upgrade pip
- - name: "Generate coverage XML"
- if: "contains(env.USING_COVERAGE, matrix.python-version)"
- run: python -m coverage xml
+ - name: "Install hatch"
+ run: pip install hatch
- - name: "Upload coverage to Codecov"
- uses: codecov/codecov-action@v3
+ - name: "Run tests for ${{ matrix.python-version }}"
+ run: hatch run test
- others:
- runs-on: ubuntu-20.04
+ docs:
+ name: Documentation
+ runs-on: "ubuntu-latest"
strategy:
fail-fast: false
- matrix:
- toxenv: ["manifest", "docs", "binder"]
- env:
- TOXENV: ${{ matrix.toxenv }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: 3.11
- - name: "Install dependencies"
- run: |
- set -xe
- python -m pip install virtualenv tox
- - name: "Run tox targets for ${{ matrix.toxenv }}"
- run: python -m tox
+
+ - name: "Upgrade pip"
+ run: python -m pip install --upgrade pip
+
+ - name: "Install hatch"
+ run: pip install hatch
+
+ - name: "Build docs"
+ run: hatch run docs:ci-build
diff --git a/.gitignore b/.gitignore
index 5eeb0b76..24eef423 100644
--- a/.gitignore
+++ b/.gitignore
@@ -37,7 +37,6 @@ pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
-.tox/
.coverage
.coverage.*
.cache
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 2f858cc0..ccef9eae 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,11 +1,10 @@
default_language_version:
- python: python3.8
+ python: python3.10
ci:
- autofix_prs: true
- autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
autoupdate_schedule: quarterly
- # submodules: true
+ autofix_commit_msg: šØ [pre-commit.ci] Auto format from pre-commit.com hooks
+ autoupdate_commit_msg: ā¬ [pre-commit.ci] pre-commit autoupdate
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
@@ -21,6 +20,14 @@ repos:
- id: check-docstring-first
- id: detect-private-key
+ - repo: https://github.com/charliermarsh/ruff-pre-commit
+ rev: v0.1.2
+ hooks:
+ - id: ruff
+ args:
+ - --fix
+ - id: ruff-format
+
- repo: https://github.com/asottile/pyupgrade
rev: v3.15.0
hooks:
@@ -28,24 +35,6 @@ repos:
args: [--py37-plus]
name: Upgrade code
- #- repo: https://github.com/myint/docformatter
- # rev: v1.5.0
- # hooks:
- # - id: docformatter
- # args: [--in-place, --wrap-summaries=120, --wrap-descriptions=120]
-
- #- repo: https://github.com/PyCQA/isort
- # rev: 5.10.1
- # hooks:
- # - id: isort
-
- - repo: https://github.com/psf/black
- rev: 23.11.0
- hooks:
- - id: black
- name: Black code
- args: ["-S"]
-
- repo: https://github.com/executablebooks/mdformat
rev: 0.7.17
hooks:
@@ -58,12 +47,3 @@ repos:
rev: v1.5.0
hooks:
- id: yesqa
-
- - repo: https://github.com/pycqa/flake8
- rev: 6.1.0
- hooks:
- - id: flake8
- arge:
- - "--count"
- - "--show-source"
- - "--statistics"
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644
index f369aa73..00000000
--- a/MANIFEST.in
+++ /dev/null
@@ -1,40 +0,0 @@
-recursive-include papermill *.py
-recursive-include papermill *.ipynb
-recursive-include papermill *.json
-recursive-include papermill *.yaml
-recursive-include papermill *.yml
-recursive-include papermill *.keep
-recursive-include papermill *.txt
-
-include setup.py
-include requirements.txt
-include tox_py_installer.sh
-recursive-include requirements *.txt
-include docs/requirements.txt
-include tox.ini
-include pytest.ini
-include README.md
-include LICENSE
-include MANIFEST.in
-include *.md
-include *.toml
-
-include .bumpversion.cfg
-
-# Documentation
-prune docs
-
-exclude .pre-commit-config.yaml
-# exclude sample notebooks for binder
-prune binder
-# Scripts
-graft scripts
-# Test env
-prune .tox
-# Exclude notebooks checkpoints generated by testing
-recursive-exclude papermill/.ipynb_checkpoints *.ipynb
-recursive-exclude papermill/tests/notebooks/.ipynb_checkpoints *.ipynb
-
-# Build files
-exclude .github
-exclude .readthedocs.yaml
diff --git a/docs/conf.py b/docs/conf.py
index 00ddfde6..50adcd0f 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -80,7 +80,7 @@
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'UPDATE.md']
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = "sphinx"
+pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
@@ -90,14 +90,14 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = "furo"
+html_theme = 'furo'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
- "sidebar_hide_name": True,
+ 'sidebar_hide_name': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
@@ -105,7 +105,7 @@
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
-html_logo = "_static/images/papermill.png"
+html_logo = '_static/images/papermill.png'
# -- Options for HTMLHelp output ------------------------------------------
@@ -132,9 +132,7 @@
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
-latex_documents = [
- (master_doc, 'papermill.tex', 'papermill Documentation', 'nteract team', 'manual')
-]
+latex_documents = [(master_doc, 'papermill.tex', 'papermill Documentation', 'nteract team', 'manual')]
# -- Options for manual page output ---------------------------------------
diff --git a/docs/requirements.txt b/docs/requirements.txt
deleted file mode 100644
index 7950ab0e..00000000
--- a/docs/requirements.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-# Pin packages for RTD builds
-# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html#pinning-dependencies
-#-r ../requirements.txt
-Sphinx>=7.2.6
-furo>=2023.9.10
-myst-parser>=2.0.0
-moto>=4.2.8
-sphinx-copybutton>=0.5.2
-nbformat
-entrypoints
diff --git a/papermill/__init__.py b/papermill/__init__.py
index af32a9d3..e3b98fb6 100644
--- a/papermill/__init__.py
+++ b/papermill/__init__.py
@@ -1,5 +1,4 @@
-from .version import version as __version__
-
from .exceptions import PapermillException, PapermillExecutionError
from .execute import execute_notebook
from .inspection import inspect_notebook
+from .version import version as __version__
diff --git a/papermill/abs.py b/papermill/abs.py
index d40db9d2..0378d45f 100644
--- a/papermill/abs.py
+++ b/papermill/abs.py
@@ -1,9 +1,9 @@
"""Utilities for working with Azure blob storage"""
-import re
import io
+import re
-from azure.storage.blob import BlobServiceClient
from azure.identity import EnvironmentCredential
+from azure.storage.blob import BlobServiceClient
class AzureBlobStore:
@@ -20,7 +20,7 @@ class AzureBlobStore:
def _blob_service_client(self, account_name, sas_token=None):
blob_service_client = BlobServiceClient(
- account_url=f"{account_name}.blob.core.windows.net",
+ account_url=f'{account_name}.blob.core.windows.net',
credential=sas_token or EnvironmentCredential(),
)
@@ -32,15 +32,15 @@ def _split_url(self, url):
see: https://docs.microsoft.com/en-us/azure/storage/common/storage-dotnet-shared-access-signature-part-1 # noqa: E501
abs://myaccount.blob.core.windows.net/sascontainer/sasblob.txt?sastoken
"""
- match = re.match(r"abs://(.*)\.blob\.core\.windows\.net\/(.*?)\/([^\?]*)\??(.*)$", url)
+ match = re.match(r'abs://(.*)\.blob\.core\.windows\.net\/(.*?)\/([^\?]*)\??(.*)$', url)
if not match:
raise Exception(f"Invalid azure blob url '{url}'")
else:
params = {
- "account": match.group(1),
- "container": match.group(2),
- "blob": match.group(3),
- "sas_token": match.group(4),
+ 'account': match.group(1),
+ 'container': match.group(2),
+ 'blob': match.group(3),
+ 'sas_token': match.group(4),
}
return params
@@ -48,22 +48,22 @@ def read(self, url):
"""Read storage at a given url"""
params = self._split_url(url)
output_stream = io.BytesIO()
- blob_service_client = self._blob_service_client(params["account"], params["sas_token"])
+ blob_service_client = self._blob_service_client(params['account'], params['sas_token'])
blob_client = blob_service_client.get_blob_client(params['container'], params['blob'])
blob_client.download_blob().readinto(output_stream)
output_stream.seek(0)
- return [line.decode("utf-8") for line in output_stream]
+ return [line.decode('utf-8') for line in output_stream]
def listdir(self, url):
"""Returns a list of the files under the specified path"""
params = self._split_url(url)
- blob_service_client = self._blob_service_client(params["account"], params["sas_token"])
- container_client = blob_service_client.get_container_client(params["container"])
- return list(container_client.list_blobs(params["blob"]))
+ blob_service_client = self._blob_service_client(params['account'], params['sas_token'])
+ container_client = blob_service_client.get_container_client(params['container'])
+ return list(container_client.list_blobs(params['blob']))
def write(self, buf, url):
"""Write buffer to storage at a given url"""
params = self._split_url(url)
- blob_service_client = self._blob_service_client(params["account"], params["sas_token"])
+ blob_service_client = self._blob_service_client(params['account'], params['sas_token'])
blob_client = blob_service_client.get_blob_client(params['container'], params['blob'])
blob_client.upload_blob(data=buf, overwrite=True)
diff --git a/papermill/adl.py b/papermill/adl.py
index 01efd44d..4ad0f62a 100644
--- a/papermill/adl.py
+++ b/papermill/adl.py
@@ -39,12 +39,7 @@ def listdir(self, url):
"""Returns a list of the files under the specified path"""
(store_name, path) = self._split_url(url)
adapter = self._create_adapter(store_name)
- return [
- "adl://{store_name}.azuredatalakestore.net/{path_to_child}".format(
- store_name=store_name, path_to_child=path_to_child
- )
- for path_to_child in adapter.ls(path)
- ]
+ return [f'adl://{store_name}.azuredatalakestore.net/{path_to_child}' for path_to_child in adapter.ls(path)]
def read(self, url):
"""Read storage at a given url"""
diff --git a/papermill/cli.py b/papermill/cli.py
index ac0de74f..e80867df 100755
--- a/papermill/cli.py
+++ b/papermill/cli.py
@@ -1,23 +1,21 @@
"""Main `papermill` interface."""
+import base64
+import logging
import os
+import platform
import sys
-from stat import S_ISFIFO
-import nbclient
import traceback
-
-import base64
-import logging
+from stat import S_ISFIFO
import click
-
+import nbclient
import yaml
-import platform
+from . import __version__ as papermill_version
from .execute import execute_notebook
-from .iorw import read_yaml_file, NoDatesSafeLoader
from .inspection import display_notebook_help
-from . import __version__ as papermill_version
+from .iorw import NoDatesSafeLoader, read_yaml_file
click.disable_unicode_literals_warning = True
@@ -28,18 +26,14 @@
def print_papermill_version(ctx, param, value):
if not value:
return
- print(
- "{version} from {path} ({pyver})".format(
- version=papermill_version, path=__file__, pyver=platform.python_version()
- )
- )
+ print(f'{papermill_version} from {__file__} ({platform.python_version()})')
ctx.exit()
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.pass_context
@click.argument('notebook_path', required=not INPUT_PIPED)
-@click.argument('output_path', default="")
+@click.argument('output_path', default='')
@click.option(
'--help-notebook',
is_flag=True,
@@ -47,39 +41,56 @@ def print_papermill_version(ctx, param, value):
help='Display parameters information for the given notebook path.',
)
@click.option(
- '--parameters', '-p', nargs=2, multiple=True, help='Parameters to pass to the parameters cell.'
+ '--parameters',
+ '-p',
+ nargs=2,
+ multiple=True,
+ help='Parameters to pass to the parameters cell.',
)
@click.option(
- '--parameters_raw', '-r', nargs=2, multiple=True, help='Parameters to be read as raw string.'
+ '--parameters_raw',
+ '-r',
+ nargs=2,
+ multiple=True,
+ help='Parameters to be read as raw string.',
)
@click.option(
- '--parameters_file', '-f', multiple=True, help='Path to YAML file containing parameters.'
+ '--parameters_file',
+ '-f',
+ multiple=True,
+ help='Path to YAML file containing parameters.',
)
@click.option(
- '--parameters_yaml', '-y', multiple=True, help='YAML string to be used as parameters.'
+ '--parameters_yaml',
+ '-y',
+ multiple=True,
+ help='YAML string to be used as parameters.',
)
@click.option(
- '--parameters_base64', '-b', multiple=True, help='Base64 encoded YAML string as parameters.'
+ '--parameters_base64',
+ '-b',
+ multiple=True,
+ help='Base64 encoded YAML string as parameters.',
)
@click.option(
'--inject-input-path',
is_flag=True,
default=False,
- help="Insert the path of the input notebook as PAPERMILL_INPUT_PATH as a notebook parameter.",
+ help='Insert the path of the input notebook as PAPERMILL_INPUT_PATH as a notebook parameter.',
)
@click.option(
'--inject-output-path',
is_flag=True,
default=False,
- help="Insert the path of the output notebook as PAPERMILL_OUTPUT_PATH as a notebook parameter.",
+ help='Insert the path of the output notebook as PAPERMILL_OUTPUT_PATH as a notebook parameter.',
)
@click.option(
'--inject-paths',
is_flag=True,
default=False,
help=(
- "Insert the paths of input/output notebooks as PAPERMILL_INPUT_PATH/PAPERMILL_OUTPUT_PATH"
- " as notebook parameters."
+ 'Insert the paths of input/output notebooks as PAPERMILL_INPUT_PATH/PAPERMILL_OUTPUT_PATH'
+ ' as notebook parameters.'
),
)
@click.option('--engine', help='The execution engine name to use in evaluating the notebook.')
@@ -97,7 +108,7 @@ def print_papermill_version(ctx, param, value):
@click.option(
'--prepare-only/--prepare-execute',
default=False,
- help="Flag for outputting the notebook without execution, but with parameters applied.",
+ help='Flag for outputting the notebook without execution, but with parameters applied.',
)
@click.option(
'--kernel',
@@ -111,22 +122,24 @@ def print_papermill_version(ctx, param, value):
)
@click.option('--cwd', default=None, help='Working directory to run notebook in.')
@click.option(
- '--progress-bar/--no-progress-bar', default=None, help="Flag for turning on the progress bar."
+ '--progress-bar/--no-progress-bar',
+ default=None,
+ help='Flag for turning on the progress bar.',
)
@click.option(
'--log-output/--no-log-output',
default=False,
- help="Flag for writing notebook output to the configured logger.",
+ help='Flag for writing notebook output to the configured logger.',
)
@click.option(
'--stdout-file',
type=click.File(mode='w', encoding='utf-8'),
- help="File to write notebook stdout output to.",
+ help='File to write notebook stdout output to.',
)
@click.option(
'--stderr-file',
type=click.File(mode='w', encoding='utf-8'),
- help="File to write notebook stderr output to.",
+ help='File to write notebook stderr output to.',
)
@click.option(
'--log-level',
@@ -139,14 +152,14 @@ def print_papermill_version(ctx, param, value):
'--start_timeout', # Backwards compatible naming
type=int,
default=60,
- help="Time in seconds to wait for kernel to start.",
+ help='Time in seconds to wait for kernel to start.',
)
@click.option(
'--execution-timeout',
type=int,
- help="Time in seconds to wait for each cell before failing execution (default: forever)",
+ help='Time in seconds to wait for each cell before failing execution (default: forever)',
)
-@click.option('--report-mode/--no-report-mode', default=False, help="Flag for hiding input.")
+@click.option('--report-mode/--no-report-mode', default=False, help='Flag for hiding input.')
@click.option(
'--version',
is_flag=True,
@@ -227,7 +240,7 @@ def papermill(
elif progress_bar is None:
progress_bar = not log_output
- logging.basicConfig(level=log_level, format="%(message)s")
+ logging.basicConfig(level=log_level, format='%(message)s')
# Read in Parameters
parameters_final = {}
@@ -276,11 +289,11 @@ def papermill(
def _resolve_type(value):
- if value == "True":
+ if value == 'True':
return True
- elif value == "False":
+ elif value == 'False':
return False
- elif value == "None":
+ elif value == 'None':
return None
elif _is_int(value):
return int(value)
diff --git a/papermill/clientwrap.py b/papermill/clientwrap.py
index cde6f906..f4d4a8b2 100644
--- a/papermill/clientwrap.py
+++ b/papermill/clientwrap.py
@@ -1,5 +1,5 @@
-import sys
import asyncio
+import sys
from nbclient import NotebookClient
from nbclient.exceptions import CellExecutionError
@@ -41,7 +41,7 @@ def execute(self, **kwargs):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
with self.setup_kernel(**kwargs):
- self.log.info("Executing notebook with kernel: %s" % self.kernel_name)
+ self.log.info('Executing notebook with kernel: %s' % self.kernel_name)
self.papermill_execute_cells()
info_msg = self.wait_for_reply(self.kc.kernel_info())
self.nb.metadata['language_info'] = info_msg['content']['language_info']
@@ -84,23 +84,23 @@ def log_output_message(self, output):
:param output: nbformat.notebooknode.NotebookNode
:return:
"""
- if output.output_type == "stream":
- content = "".join(output.text)
- if output.name == "stdout":
+ if output.output_type == 'stream':
+ content = ''.join(output.text)
+ if output.name == 'stdout':
if self.log_output:
self.log.info(content)
if self.stdout_file:
self.stdout_file.write(content)
self.stdout_file.flush()
- elif output.name == "stderr":
+ elif output.name == 'stderr':
if self.log_output:
# In case users want to redirect stderr differently, pipe to warning
self.log.warning(content)
if self.stderr_file:
self.stderr_file.write(content)
self.stderr_file.flush()
- elif self.log_output and ("data" in output and "text/plain" in output.data):
- self.log.info("".join(output.data['text/plain']))
+ elif self.log_output and ('data' in output and 'text/plain' in output.data):
+ self.log.info(''.join(output.data['text/plain']))
def process_message(self, *arg, **kwargs):
output = super().process_message(*arg, **kwargs)
diff --git a/papermill/engines.py b/papermill/engines.py
index 3723b70e..3e87f52b 100644
--- a/papermill/engines.py
+++ b/papermill/engines.py
@@ -1,16 +1,16 @@
"""Engines to perform different roles"""
-import sys
import datetime
-import dateutil
-
+import sys
from functools import wraps
+
+import dateutil
import entrypoints
-from .log import logger
-from .exceptions import PapermillException
from .clientwrap import PapermillNotebookClient
+from .exceptions import PapermillException
from .iorw import write_ipynb
-from .utils import merge_kwargs, remove_args, nb_kernel_name, nb_language
+from .log import logger
+from .utils import merge_kwargs, nb_kernel_name, nb_language, remove_args
class PapermillEngines:
@@ -33,7 +33,7 @@ def register_entry_points(self):
Load handlers provided by other packages
"""
- for entrypoint in entrypoints.get_group_all("papermill.engine"):
+ for entrypoint in entrypoints.get_group_all('papermill.engine'):
self.register(entrypoint.name, entrypoint.load())
def get_engine(self, name=None):
@@ -90,13 +90,18 @@ class NotebookExecutionManager:
shared manner.
"""
- PENDING = "pending"
- RUNNING = "running"
- COMPLETED = "completed"
- FAILED = "failed"
+ PENDING = 'pending'
+ RUNNING = 'running'
+ COMPLETED = 'completed'
+ FAILED = 'failed'
def __init__(
- self, nb, output_path=None, log_output=False, progress_bar=True, autosave_cell_every=30
+ self,
+ nb,
+ output_path=None,
+ log_output=False,
+ progress_bar=True,
+ autosave_cell_every=30,
):
self.nb = nb
self.output_path = output_path
@@ -111,7 +116,7 @@ def __init__(
# lazy import due to implict slow ipython import
from tqdm.auto import tqdm
- self.pbar = tqdm(total=len(self.nb.cells), unit="cell", desc="Executing")
+ self.pbar = tqdm(total=len(self.nb.cells), unit='cell', desc='Executing')
def now(self):
"""Helper to return current UTC time"""
@@ -162,7 +167,7 @@ def autosave_cell(self):
# Autosave is taking too long, so exponentially back off.
self.autosave_cell_every *= 2
logger.warning(
- "Autosave too slow: {:.2f} sec, over {}% limit. Backing off to {} sec".format(
+ 'Autosave too slow: {:.2f} sec, over {}% limit. Backing off to {} sec'.format(
save_elapsed, self.max_autosave_pct, self.autosave_cell_every
)
)
@@ -187,7 +192,7 @@ def notebook_start(self, **kwargs):
for cell in self.nb.cells:
# Reset the cell execution counts.
- if cell.get("cell_type") == "code":
+ if cell.get('cell_type') == 'code':
cell.execution_count = None
# Clear out the papermill metadata for each cell.
@@ -198,7 +203,7 @@ def notebook_start(self, **kwargs):
duration=None,
status=self.PENDING, # pending, running, completed
)
- if cell.get("cell_type") == "code":
+ if cell.get('cell_type') == 'code':
cell.outputs = []
self.save()
@@ -216,13 +221,13 @@ def cell_start(self, cell, cell_index=None, **kwargs):
logger.info(f'Executing Cell {ceel_num:-<40}')
cell.metadata.papermill['start_time'] = self.now().isoformat()
- cell.metadata.papermill["status"] = self.RUNNING
+ cell.metadata.papermill['status'] = self.RUNNING
cell.metadata.papermill['exception'] = False
# injects optional description of the current cell directly in the tqdm
cell_description = self.get_cell_description(cell)
if cell_description is not None and hasattr(self, 'pbar') and self.pbar:
- self.pbar.set_description(f"Executing {cell_description}")
+ self.pbar.set_description(f'Executing {cell_description}')
self.save()
@@ -278,9 +283,7 @@ def notebook_complete(self, **kwargs):
self.end_time = self.now()
self.nb.metadata.papermill['end_time'] = self.end_time.isoformat()
if self.nb.metadata.papermill.get('start_time'):
- self.nb.metadata.papermill['duration'] = (
- self.end_time - self.start_time
- ).total_seconds()
+ self.nb.metadata.papermill['duration'] = (self.end_time - self.start_time).total_seconds()
# Cleanup cell statuses in case callbacks were never called
for cell in self.nb.cells:
@@ -295,12 +298,12 @@ def notebook_complete(self, **kwargs):
# Force a final sync
self.save()
- def get_cell_description(self, cell, escape_str="papermill_description="):
+ def get_cell_description(self, cell, escape_str='papermill_description='):
"""Fetches cell description if present"""
if cell is None:
return None
- cell_code = cell["source"]
+ cell_code = cell['source']
if cell_code is None or escape_str not in cell_code:
return None
diff --git a/papermill/exceptions.py b/papermill/exceptions.py
index d27ce2bf..f78f95f7 100644
--- a/papermill/exceptions.py
+++ b/papermill/exceptions.py
@@ -33,10 +33,10 @@ def __str__(self):
# when called with str(). In order to maintain compatability with previous versions which
# passed only the message to the superclass constructor, __str__ method is implemented to
# provide the same result as was produced in the past.
- message = "\n" + 75 * "-" + "\n"
+ message = '\n' + 75 * '-' + '\n'
message += 'Exception encountered at "In [%s]":\n' % str(self.exec_count)
- message += "\n".join(self.traceback)
- message += "\n"
+ message += '\n'.join(self.traceback)
+ message += '\n'
return message
@@ -59,10 +59,8 @@ class PapermillParameterOverwriteWarning(PapermillWarning):
def missing_dependency_generator(package, dep):
def missing_dep():
raise PapermillOptionalDependencyException(
- "The {package} optional dependency is missing. "
- "Please run pip install papermill[{dep}] to install this dependency".format(
- package=package, dep=dep
- )
+ f'The {package} optional dependency is missing. '
+ f'Please run pip install papermill[{dep}] to install this dependency'
)
return missing_dep
@@ -71,9 +69,9 @@ def missing_dep():
def missing_environment_variable_generator(package, env_key):
def missing_dep():
raise PapermillOptionalDependencyException(
- "The {package} optional dependency is present, but the environment "
- "variable {env_key} is not set. Please set this variable as "
- "required by {package} on your platform.".format(package=package, env_key=env_key)
+ f'The {package} optional dependency is present, but the environment '
+ f'variable {env_key} is not set. Please set this variable as '
+ f'required by {package} on your platform.'
)
return missing_dep
diff --git a/papermill/execute.py b/papermill/execute.py
index 03cef9e8..1b683918 100644
--- a/papermill/execute.py
+++ b/papermill/execute.py
@@ -1,13 +1,18 @@
-import nbformat
from pathlib import Path
-from .log import logger
-from .exceptions import PapermillExecutionError
-from .iorw import get_pretty_path, local_file_io_cwd, load_notebook_node, write_ipynb
+import nbformat
+
from .engines import papermill_engines
-from .utils import chdir
-from .parameterize import add_builtin_parameters, parameterize_notebook, parameterize_path
+from .exceptions import PapermillExecutionError
from .inspection import _infer_parameters
+from .iorw import get_pretty_path, load_notebook_node, local_file_io_cwd, write_ipynb
+from .log import logger
+from .parameterize import (
+ add_builtin_parameters,
+ parameterize_notebook,
+ parameterize_path,
+)
+from .utils import chdir
def execute_notebook(
@@ -79,11 +84,11 @@ def execute_notebook(
input_path = parameterize_path(input_path, path_parameters)
output_path = parameterize_path(output_path, path_parameters)
- logger.info("Input Notebook: %s" % get_pretty_path(input_path))
- logger.info("Output Notebook: %s" % get_pretty_path(output_path))
+ logger.info('Input Notebook: %s' % get_pretty_path(input_path))
+ logger.info('Output Notebook: %s' % get_pretty_path(output_path))
with local_file_io_cwd():
if cwd is not None:
- logger.info(f"Working directory: {get_pretty_path(cwd)}")
+ logger.info(f'Working directory: {get_pretty_path(cwd)}')
nb = load_notebook_node(input_path)
@@ -93,7 +98,7 @@ def execute_notebook(
parameter_predefined = {p.name for p in parameter_predefined}
for p in parameters:
if p not in parameter_predefined:
- logger.warning(f"Passed unknown parameter: {p}")
+ logger.warning(f'Passed unknown parameter: {p}')
nb = parameterize_notebook(
nb,
parameters,
@@ -109,9 +114,7 @@ def execute_notebook(
if not prepare_only:
# Dropdown to the engine to fetch the kernel name from the notebook document
- kernel_name = papermill_engines.nb_kernel_name(
- engine_name=engine_name, nb=nb, name=kernel_name
- )
+ kernel_name = papermill_engines.nb_kernel_name(engine_name=engine_name, nb=nb, name=kernel_name)
# Execute the Notebook in `cwd` if it is set
with chdir(cwd):
nb = papermill_engines.execute_notebook_with_engine(
@@ -165,15 +168,13 @@ def prepare_notebook_metadata(nb, input_path, output_path, report_mode=False):
return nb
-ERROR_MARKER_TAG = "papermill-error-cell-tag"
+ERROR_MARKER_TAG = 'papermill-error-cell-tag'
-ERROR_STYLE = (
- 'style="color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;"'
-)
+ERROR_STYLE = 'style="color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;"'
ERROR_MESSAGE_TEMPLATE = (
''
- "An Exception was encountered at 'In [%s]'."
+ 'An Exception was encountered at \'In [%s]\'.'
''
)
@@ -185,7 +186,7 @@ def prepare_notebook_metadata(nb, input_path, output_path, report_mode=False):
def remove_error_markers(nb):
- nb.cells = [cell for cell in nb.cells if ERROR_MARKER_TAG not in cell.metadata.get("tags", [])]
+ nb.cells = [cell for cell in nb.cells if ERROR_MARKER_TAG not in cell.metadata.get('tags', [])]
return nb
@@ -201,12 +202,12 @@ def raise_for_execution_errors(nb, output_path):
"""
error = None
for index, cell in enumerate(nb.cells):
- if cell.get("outputs") is None:
+ if cell.get('outputs') is None:
continue
for output in cell.outputs:
- if output.output_type == "error":
- if output.ename == "SystemExit" and (output.evalue == "" or output.evalue == "0"):
+ if output.output_type == 'error':
+ if output.ename == 'SystemExit' and (output.evalue == '' or output.evalue == '0'):
continue
error = PapermillExecutionError(
cell_index=index,
diff --git a/papermill/inspection.py b/papermill/inspection.py
index 6800921d..db5a6136 100644
--- a/papermill/inspection.py
+++ b/papermill/inspection.py
@@ -1,18 +1,24 @@
"""Deduce parameters of a notebook from the parameters cell."""
-import click
from pathlib import Path
+import click
+
from .iorw import get_pretty_path, load_notebook_node, local_file_io_cwd
from .log import logger
from .parameterize import add_builtin_parameters, parameterize_path
from .translators import papermill_translators
-from .utils import any_tagged_cell, find_first_tagged_cell_index, nb_kernel_name, nb_language
+from .utils import (
+ any_tagged_cell,
+ find_first_tagged_cell_index,
+ nb_kernel_name,
+ nb_language,
+)
def _open_notebook(notebook_path, parameters):
path_parameters = add_builtin_parameters(parameters)
input_path = parameterize_path(notebook_path, path_parameters)
- logger.info("Input Notebook: %s" % get_pretty_path(input_path))
+ logger.info('Input Notebook: %s' % get_pretty_path(input_path))
with local_file_io_cwd():
return load_notebook_node(input_path)
@@ -33,7 +39,7 @@ def _infer_parameters(nb, name=None, language=None):
"""
params = []
- parameter_cell_idx = find_first_tagged_cell_index(nb, "parameters")
+ parameter_cell_idx = find_first_tagged_cell_index(nb, 'parameters')
if parameter_cell_idx < 0:
return params
parameter_cell = nb.cells[parameter_cell_idx]
@@ -45,11 +51,7 @@ def _infer_parameters(nb, name=None, language=None):
try:
params = translator.inspect(parameter_cell)
except NotImplementedError:
- logger.warning(
- "Translator for '{}' language does not support parameter introspection.".format(
- language
- )
- )
+ logger.warning(f"Translator for '{language}' language does not support parameter introspection.")
return params
@@ -69,7 +71,7 @@ def display_notebook_help(ctx, notebook_path, parameters):
pretty_path = get_pretty_path(notebook_path)
click.echo(f"\nParameters inferred for notebook '{pretty_path}':")
- if not any_tagged_cell(nb, "parameters"):
+ if not any_tagged_cell(nb, 'parameters'):
click.echo("\n No cell tagged 'parameters'")
return 1
@@ -77,23 +79,22 @@ def display_notebook_help(ctx, notebook_path, parameters):
if params:
for param in params:
p = param._asdict()
- type_repr = p["inferred_type_name"]
- if type_repr == "None":
- type_repr = "Unknown type"
+ type_repr = p['inferred_type_name']
+ if type_repr == 'None':
+ type_repr = 'Unknown type'
- definition = " {}: {} (default {})".format(p["name"], type_repr, p["default"])
+ definition = ' {}: {} (default {})'.format(p['name'], type_repr, p['default'])
if len(definition) > 30:
- if len(p["help"]):
- param_help = "".join((definition, "\n", 34 * " ", p["help"]))
+ if len(p['help']):
+ param_help = ''.join((definition, '\n', 34 * ' ', p['help']))
else:
param_help = definition
else:
- param_help = "{:<34}{}".format(definition, p["help"])
+ param_help = '{:<34}{}'.format(definition, p['help'])
click.echo(param_help)
else:
click.echo(
- "\n Can't infer anything about this notebook's parameters. "
- "It may not have any parameter defined."
+ "\n Can't infer anything about this notebook's parameters. " 'It may not have any parameter defined.'
)
return 0
diff --git a/papermill/iorw.py b/papermill/iorw.py
index 4a2d3285..ecca680f 100644
--- a/papermill/iorw.py
+++ b/papermill/iorw.py
@@ -1,16 +1,20 @@
+import fnmatch
+import json
import os
import sys
-import json
-import yaml
-import fnmatch
-import nbformat
-import requests
import warnings
-import entrypoints
-
from contextlib import contextmanager
-from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_exponential
+import entrypoints
+import nbformat
+import requests
+import yaml
+from tenacity import (
+ retry,
+ retry_if_exception_type,
+ stop_after_attempt,
+ wait_exponential,
+)
from . import __version__
from .exceptions import (
@@ -25,37 +29,37 @@
try:
from .s3 import S3
except ImportError:
- S3 = missing_dependency_generator("boto3", "s3")
+ S3 = missing_dependency_generator('boto3', 's3')
try:
from .adl import ADL
except ImportError:
- ADL = missing_dependency_generator("azure.datalake.store", "azure")
+ ADL = missing_dependency_generator('azure.datalake.store', 'azure')
except KeyError as exc:
- if exc.args[0] == "APPDATA":
- ADL = missing_environment_variable_generator("azure.datalake.store", "APPDATA")
+ if exc.args[0] == 'APPDATA':
+ ADL = missing_environment_variable_generator('azure.datalake.store', 'APPDATA')
else:
raise
try:
from .abs import AzureBlobStore
except ImportError:
- AzureBlobStore = missing_dependency_generator("azure.storage.blob", "azure")
+ AzureBlobStore = missing_dependency_generator('azure.storage.blob', 'azure')
try:
from gcsfs import GCSFileSystem
except ImportError:
- GCSFileSystem = missing_dependency_generator("gcsfs", "gcs")
+ GCSFileSystem = missing_dependency_generator('gcsfs', 'gcs')
try:
- from pyarrow.fs import HadoopFileSystem, FileSelector
+ from pyarrow.fs import FileSelector, HadoopFileSystem
except ImportError:
- HadoopFileSystem = missing_dependency_generator("pyarrow", "hdfs")
+ HadoopFileSystem = missing_dependency_generator('pyarrow', 'hdfs')
try:
from github import Github
except ImportError:
- Github = missing_dependency_generator("pygithub", "github")
+ Github = missing_dependency_generator('pygithub', 'github')
def fallback_gs_is_retriable(e):
@@ -83,11 +87,11 @@ def fallback_gs_is_retriable(e):
class PapermillIO:
- '''
+ """
The holder which houses any io system registered with the system.
This object is used in a singleton manner to save and load particular
named Handler objects for reference externally.
- '''
+ """
def __init__(self):
self.reset()
@@ -117,11 +121,11 @@ def register(self, scheme, handler):
def register_entry_points(self):
# Load handlers provided by other packages
- for entrypoint in entrypoints.get_group_all("papermill.io"):
+ for entrypoint in entrypoints.get_group_all('papermill.io'):
self.register(entrypoint.name, entrypoint.load())
def get_handler(self, path, extensions=None):
- '''Get I/O Handler based on a notebook path
+ """Get I/O Handler based on a notebook path
Parameters
----------
@@ -138,7 +142,7 @@ def get_handler(self, path, extensions=None):
Returns
-------
I/O Handler
- '''
+ """
if path is None:
return NoIOHandler()
@@ -147,14 +151,9 @@ def get_handler(self, path, extensions=None):
if extensions:
if not fnmatch.fnmatch(os.path.basename(path).split('?')[0], '*.*'):
- warnings.warn(
- "the file is not specified with any extension : " + os.path.basename(path)
- )
- elif not any(
- fnmatch.fnmatch(os.path.basename(path).split('?')[0], '*' + ext)
- for ext in extensions
- ):
- warnings.warn(f"The specified file ({path}) does not end in one of {extensions}")
+ warnings.warn('the file is not specified with any extension : ' + os.path.basename(path))
+ elif not any(fnmatch.fnmatch(os.path.basename(path).split('?')[0], '*' + ext) for ext in extensions):
+ warnings.warn(f'The specified file ({path}) does not end in one of {extensions}')
local_handler = None
for scheme, handler in self._handlers:
@@ -165,7 +164,7 @@ def get_handler(self, path, extensions=None):
return handler
if local_handler is None:
- raise PapermillException(f"Could not find a registered schema handler for: {path}")
+ raise PapermillException(f'Could not find a registered schema handler for: {path}')
return local_handler
@@ -196,7 +195,7 @@ def __init__(self):
def read(self, path):
try:
with chdir(self._cwd):
- with open(path, encoding="utf-8") as f:
+ with open(path, encoding='utf-8') as f:
return f.read()
except OSError as e:
try:
@@ -217,14 +216,14 @@ def write(self, buf, path):
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
raise FileNotFoundError(f"output folder {dirname} doesn't exist.")
- with open(path, 'w', encoding="utf-8") as f:
+ with open(path, 'w', encoding='utf-8') as f:
f.write(buf)
def pretty_path(self, path):
return path
def cwd(self, new_path):
- '''Sets the cwd during reads and writes'''
+ """Sets the cwd during reads and writes"""
old_cwd = self._cwd
self._cwd = new_path
return old_cwd
@@ -233,7 +232,7 @@ def cwd(self, new_path):
class S3Handler:
@classmethod
def read(cls, path):
- return "\n".join(S3().read(path))
+ return '\n'.join(S3().read(path))
@classmethod
def listdir(cls, path):
@@ -259,7 +258,7 @@ def _get_client(self):
def read(self, path):
lines = self._get_client().read(path)
- return "\n".join(lines)
+ return '\n'.join(lines)
def listdir(self, path):
return self._get_client().listdir(path)
@@ -282,7 +281,7 @@ def _get_client(self):
def read(self, path):
lines = self._get_client().read(path)
- return "\n".join(lines)
+ return '\n'.join(lines)
def listdir(self, path):
return self._get_client().listdir(path)
@@ -321,7 +320,9 @@ def write(self, buf, path):
retry=retry_if_exception_type(PapermillRateLimitException),
stop=stop_after_attempt(self.RATE_LIMIT_RETRIES),
wait=wait_exponential(
- multiplier=self.RETRY_MULTIPLIER, min=self.RETRY_DELAY, max=self.RETRY_MAX_DELAY
+ multiplier=self.RETRY_MULTIPLIER,
+ min=self.RETRY_DELAY,
+ max=self.RETRY_MAX_DELAY,
),
reraise=True,
)
@@ -333,7 +334,7 @@ def retry_write():
try:
message = e.message
except AttributeError:
- message = f"Generic exception {type(e)} raised"
+ message = f'Generic exception {type(e)} raised'
if gs_is_retriable(e):
raise PapermillRateLimitException(message)
# Reraise the original exception without retries
@@ -351,7 +352,7 @@ def __init__(self):
def _get_client(self):
if self._client is None:
- self._client = HadoopFileSystem(host="default")
+ self._client = HadoopFileSystem(host='default')
return self._client
def read(self, path):
@@ -403,7 +404,7 @@ def pretty_path(self, path):
class StreamHandler:
- '''Handler for Stdin/Stdout streams'''
+ """Handler for Stdin/Stdout streams"""
def read(self, path):
return sys.stdin.read()
@@ -424,7 +425,7 @@ def pretty_path(self, path):
class NotebookNodeHandler:
- '''Handler for input_path of nbformat.NotebookNode object'''
+ """Handler for input_path of nbformat.NotebookNode object"""
def read(self, path):
return nbformat.writes(path)
@@ -440,7 +441,7 @@ def pretty_path(self, path):
class NoIOHandler:
- '''Handler for output_path of None - intended to not write anything'''
+ """Handler for output_path of None - intended to not write anything"""
def read(self, path):
raise PapermillException('read is not supported by NoIOHandler')
@@ -466,17 +467,17 @@ class NoDatesSafeLoader(yaml.SafeLoader):
# Instantiate a PapermillIO instance and register Handlers.
papermill_io = PapermillIO()
-papermill_io.register("local", LocalHandler())
-papermill_io.register("s3://", S3Handler)
-papermill_io.register("adl://", ADLHandler())
-papermill_io.register("abs://", ABSHandler())
-papermill_io.register("http://", HttpHandler)
-papermill_io.register("https://", HttpHandler)
-papermill_io.register("gs://", GCSHandler())
-papermill_io.register("hdfs://", HDFSHandler())
-papermill_io.register("http://github.com/", GithubHandler())
-papermill_io.register("https://github.com/", GithubHandler())
-papermill_io.register("-", StreamHandler())
+papermill_io.register('local', LocalHandler())
+papermill_io.register('s3://', S3Handler)
+papermill_io.register('adl://', ADLHandler())
+papermill_io.register('abs://', ABSHandler())
+papermill_io.register('http://', HttpHandler)
+papermill_io.register('https://', HttpHandler)
+papermill_io.register('gs://', GCSHandler())
+papermill_io.register('hdfs://', HDFSHandler())
+papermill_io.register('http://github.com/', GithubHandler())
+papermill_io.register('https://github.com/', GithubHandler())
+papermill_io.register('-', StreamHandler())
papermill_io.register_entry_points()
@@ -539,14 +540,14 @@ def get_pretty_path(path):
@contextmanager
def local_file_io_cwd(path=None):
try:
- local_handler = papermill_io.get_handler("local")
+ local_handler = papermill_io.get_handler('local')
except PapermillException:
- logger.warning("No local file handler detected")
+ logger.warning('No local file handler detected')
else:
try:
old_cwd = local_handler.cwd(path or os.getcwd())
except AttributeError:
- logger.warning("Local file handler does not support cwd assignment")
+ logger.warning('Local file handler does not support cwd assignment')
else:
try:
yield
diff --git a/papermill/parameterize.py b/papermill/parameterize.py
index 4e09dfd6..a210f26e 100644
--- a/papermill/parameterize.py
+++ b/papermill/parameterize.py
@@ -1,15 +1,15 @@
+from datetime import datetime
+from uuid import uuid4
+
import nbformat
from .engines import papermill_engines
-from .log import logger
from .exceptions import PapermillMissingParameterException
from .iorw import read_yaml_file
+from .log import logger
from .translators import translate_parameters
from .utils import find_first_tagged_cell_index
-from uuid import uuid4
-from datetime import datetime
-
def add_builtin_parameters(parameters):
"""Add built-in parameters to a dictionary of parameters
@@ -20,10 +20,10 @@ def add_builtin_parameters(parameters):
Dictionary of parameters provided by the user
"""
with_builtin_parameters = {
- "pm": {
- "run_uuid": str(uuid4()),
- "current_datetime_local": datetime.now(),
- "current_datetime_utc": datetime.utcnow(),
+ 'pm': {
+ 'run_uuid': str(uuid4()),
+ 'current_datetime_local': datetime.now(),
+ 'current_datetime_utc': datetime.utcnow(),
}
}
@@ -53,7 +53,7 @@ def parameterize_path(path, parameters):
try:
return path.format(**parameters)
except KeyError as key_error:
- raise PapermillMissingParameterException(f"Missing parameter {key_error}")
+ raise PapermillMissingParameterException(f'Missing parameter {key_error}')
def parameterize_notebook(
diff --git a/papermill/s3.py b/papermill/s3.py
index a170d491..06ac9aff 100644
--- a/papermill/s3.py
+++ b/papermill/s3.py
@@ -1,8 +1,7 @@
"""Utilities for working with S3."""
-import os
-
import logging
+import os
import threading
import zlib
@@ -11,7 +10,6 @@
from .exceptions import AwsError
from .utils import retry
-
logger = logging.getLogger('papermill.s3')
@@ -34,9 +32,7 @@ def __init__(self, name, service=None):
def list(self, prefix='', delimiter=None):
"""Limits a list of Bucket's objects based on prefix and delimiter."""
- return self.service._list(
- bucket=self.name, prefix=prefix, delimiter=delimiter, objects=True
- )
+ return self.service._list(bucket=self.name, prefix=prefix, delimiter=delimiter, objects=True)
class Prefix:
@@ -213,7 +209,8 @@ def sort(item):
for page in page_iterator:
locations = sorted(
- [i for i in page.get('Contents', []) + page.get('CommonPrefixes', [])], key=sort
+ [i for i in page.get('Contents', []) + page.get('CommonPrefixes', [])],
+ key=sort,
)
for item in locations:
@@ -234,7 +231,14 @@ def sort(item):
prefix = item['Key'] if 'Key' in item else item['Prefix']
yield f's3://{bucket}/{prefix}'
- def _put(self, source, dest, num_callbacks=10, policy='bucket-owner-full-control', **kwargs):
+ def _put(
+ self,
+ source,
+ dest,
+ num_callbacks=10,
+ policy='bucket-owner-full-control',
+ **kwargs,
+ ):
key = self._get_key(dest)
obj = self.s3.Object(key.bucket.name, key.name)
@@ -247,7 +251,12 @@ def _put(self, source, dest, num_callbacks=10, policy='bucket-owner-full-control
return key
def _put_string(
- self, source, dest, num_callbacks=10, policy='bucket-owner-full-control', **kwargs
+ self,
+ source,
+ dest,
+ num_callbacks=10,
+ policy='bucket-owner-full-control',
+ **kwargs,
):
key = self._get_key(dest)
obj = self.s3.Object(key.bucket.name, key.name)
@@ -309,7 +318,7 @@ def cat(
if size == 0:
break
- r = obj.get(Range=f"bytes={bytes_read}-")
+ r = obj.get(Range=f'bytes={bytes_read}-')
try:
while bytes_read < size:
@@ -339,7 +348,7 @@ def cat(
bytes_read += len(bytes)
except zlib.error:
- logger.error("Error while decompressing [%s]", key.name)
+ logger.error('Error while decompressing [%s]', key.name)
raise
except UnicodeDecodeError:
raise
@@ -375,8 +384,8 @@ def cp_string(self, source, dest, **kwargs):
the s3 location
"""
- assert isinstance(source, str), "source must be a string"
- assert self._is_s3(dest), "Destination must be s3 location"
+ assert isinstance(source, str), 'source must be a string'
+ assert self._is_s3(dest), 'Destination must be s3 location'
return self._put_string(source, dest, **kwargs)
@@ -399,7 +408,7 @@ def list(self, name, iterator=False, **kwargs):
if True return iterator rather than converting to list object
"""
- assert self._is_s3(name), "name must be in form s3://bucket/key"
+ assert self._is_s3(name), 'name must be in form s3://bucket/key'
it = self._list(bucket=self._bucket_name(name), prefix=self._key_name(name), **kwargs)
return iter(it) if iterator else list(it)
@@ -423,10 +432,10 @@ def listdir(self, name, **kwargs):
files or prefixes that are encountered
"""
- assert self._is_s3(name), "name must be in form s3://bucket/prefix/"
+ assert self._is_s3(name), 'name must be in form s3://bucket/prefix/'
if not name.endswith('/'):
- name += "/"
+ name += '/'
return self.list(name, delimiter='/', **kwargs)
def read(self, source, compressed=False, encoding='UTF-8'):
diff --git a/papermill/tests/__init__.py b/papermill/tests/__init__.py
index b1bfa363..6ef2067e 100644
--- a/papermill/tests/__init__.py
+++ b/papermill/tests/__init__.py
@@ -1,8 +1,6 @@
import os
-
from io import StringIO
-
kernel_name = 'python3'
diff --git a/papermill/tests/test_abs.py b/papermill/tests/test_abs.py
index 797fd138..580828b9 100644
--- a/papermill/tests/test_abs.py
+++ b/papermill/tests/test_abs.py
@@ -1,14 +1,15 @@
import os
import unittest
-
from unittest.mock import Mock, patch
+
from azure.identity import EnvironmentCredential
+
from ..abs import AzureBlobStore
class MockBytesIO:
def __init__(self):
- self.list = [b"hello", b"world!"]
+ self.list = [b'hello', b'world!']
def __getitem__(self, index):
return self.list[index]
@@ -23,7 +24,7 @@ class ABSTest(unittest.TestCase):
"""
def setUp(self):
- self.list_blobs = Mock(return_value=["foo", "bar", "baz"])
+ self.list_blobs = Mock(return_value=['foo', 'bar', 'baz'])
self.upload_blob = Mock()
self.download_blob = Mock()
self._container_client = Mock(list_blobs=self.list_blobs)
@@ -34,88 +35,75 @@ def setUp(self):
)
self.abs = AzureBlobStore()
self.abs._blob_service_client = Mock(return_value=self._blob_service_client)
- os.environ["AZURE_TENANT_ID"] = "mytenantid"
- os.environ["AZURE_CLIENT_ID"] = "myclientid"
- os.environ["AZURE_CLIENT_SECRET"] = "myclientsecret"
+ os.environ['AZURE_TENANT_ID'] = 'mytenantid'
+ os.environ['AZURE_CLIENT_ID'] = 'myclientid'
+ os.environ['AZURE_CLIENT_SECRET'] = 'myclientsecret'
def test_split_url_raises_exception_on_invalid_url(self):
with self.assertRaises(Exception) as context:
- AzureBlobStore._split_url("this_is_not_a_valid_url")
- self.assertTrue(
- "Invalid azure blob url 'this_is_not_a_valid_url'" in str(context.exception)
- )
+ AzureBlobStore._split_url('this_is_not_a_valid_url')
+ self.assertTrue("Invalid azure blob url 'this_is_not_a_valid_url'" in str(context.exception))
def test_split_url_splits_valid_url(self):
- params = AzureBlobStore._split_url(
- "abs://myaccount.blob.core.windows.net/sascontainer/sasblob.txt?sastoken"
- )
- self.assertEqual(params["account"], "myaccount")
- self.assertEqual(params["container"], "sascontainer")
- self.assertEqual(params["blob"], "sasblob.txt")
- self.assertEqual(params["sas_token"], "sastoken")
+ params = AzureBlobStore._split_url('abs://myaccount.blob.core.windows.net/sascontainer/sasblob.txt?sastoken')
+ self.assertEqual(params['account'], 'myaccount')
+ self.assertEqual(params['container'], 'sascontainer')
+ self.assertEqual(params['blob'], 'sasblob.txt')
+ self.assertEqual(params['sas_token'], 'sastoken')
def test_split_url_splits_valid_url_no_sas(self):
- params = AzureBlobStore._split_url(
- "abs://myaccount.blob.core.windows.net/container/blob.txt"
- )
- self.assertEqual(params["account"], "myaccount")
- self.assertEqual(params["container"], "container")
- self.assertEqual(params["blob"], "blob.txt")
- self.assertEqual(params["sas_token"], "")
+ params = AzureBlobStore._split_url('abs://myaccount.blob.core.windows.net/container/blob.txt')
+ self.assertEqual(params['account'], 'myaccount')
+ self.assertEqual(params['container'], 'container')
+ self.assertEqual(params['blob'], 'blob.txt')
+ self.assertEqual(params['sas_token'], '')
def test_split_url_splits_valid_url_with_prefix(self):
params = AzureBlobStore._split_url(
- "abs://myaccount.blob.core.windows.net/sascontainer/A/B/sasblob.txt?sastoken"
+ 'abs://myaccount.blob.core.windows.net/sascontainer/A/B/sasblob.txt?sastoken'
)
- self.assertEqual(params["account"], "myaccount")
- self.assertEqual(params["container"], "sascontainer")
- self.assertEqual(params["blob"], "A/B/sasblob.txt")
- self.assertEqual(params["sas_token"], "sastoken")
+ self.assertEqual(params['account'], 'myaccount')
+ self.assertEqual(params['container'], 'sascontainer')
+ self.assertEqual(params['blob'], 'A/B/sasblob.txt')
+ self.assertEqual(params['sas_token'], 'sastoken')
def test_listdir_calls(self):
self.assertEqual(
- self.abs.listdir(
- "abs://myaccount.blob.core.windows.net/sascontainer/sasblob.txt?sastoken"
- ),
- ["foo", "bar", "baz"],
+ self.abs.listdir('abs://myaccount.blob.core.windows.net/sascontainer/sasblob.txt?sastoken'),
+ ['foo', 'bar', 'baz'],
)
- self._blob_service_client.get_container_client.assert_called_once_with("sascontainer")
- self.list_blobs.assert_called_once_with("sasblob.txt")
+ self._blob_service_client.get_container_client.assert_called_once_with('sascontainer')
+ self.list_blobs.assert_called_once_with('sasblob.txt')
- @patch("papermill.abs.io.BytesIO", side_effect=MockBytesIO)
+ @patch('papermill.abs.io.BytesIO', side_effect=MockBytesIO)
def test_reads_file(self, mockBytesIO):
self.assertEqual(
- self.abs.read(
- "abs://myaccount.blob.core.windows.net/sascontainer/sasblob.txt?sastoken"
- ),
- ["hello", "world!"],
- )
- self._blob_service_client.get_blob_client.assert_called_once_with(
- "sascontainer", "sasblob.txt"
+ self.abs.read('abs://myaccount.blob.core.windows.net/sascontainer/sasblob.txt?sastoken'),
+ ['hello', 'world!'],
)
+ self._blob_service_client.get_blob_client.assert_called_once_with('sascontainer', 'sasblob.txt')
self.download_blob.assert_called_once_with()
def test_write_file(self):
self.abs.write(
- "hello world", "abs://myaccount.blob.core.windows.net/sascontainer/sasblob.txt?sastoken"
- )
- self._blob_service_client.get_blob_client.assert_called_once_with(
- "sascontainer", "sasblob.txt"
+ 'hello world',
+ 'abs://myaccount.blob.core.windows.net/sascontainer/sasblob.txt?sastoken',
)
- self.upload_blob.assert_called_once_with(data="hello world", overwrite=True)
+ self._blob_service_client.get_blob_client.assert_called_once_with('sascontainer', 'sasblob.txt')
+ self.upload_blob.assert_called_once_with(data='hello world', overwrite=True)
def test_blob_service_client(self):
abs = AzureBlobStore()
- blob = abs._blob_service_client(account_name="myaccount", sas_token="sastoken")
- self.assertEqual(blob.account_name, "myaccount")
+ blob = abs._blob_service_client(account_name='myaccount', sas_token='sastoken')
+ self.assertEqual(blob.account_name, 'myaccount')
# Credentials gets funky with v12.0.0, so I comment this out
# self.assertEqual(blob.credential, "sastoken")
def test_blob_service_client_environment_credentials(self):
abs = AzureBlobStore()
- blob = abs._blob_service_client(account_name="myaccount", sas_token="")
- self.assertEqual(blob.account_name, "myaccount")
+ blob = abs._blob_service_client(account_name='myaccount', sas_token='')
+ self.assertEqual(blob.account_name, 'myaccount')
self.assertIsInstance(blob.credential, EnvironmentCredential)
- self.assertEqual(blob.credential._credential._tenant_id, "mytenantid")
- self.assertEqual(blob.credential._credential._client_id, "myclientid")
- self.assertEqual(blob.credential._credential._client_credential, "myclientsecret")
+ self.assertEqual(blob.credential._credential._tenant_id, 'mytenantid')
+ self.assertEqual(blob.credential._credential._client_id, 'myclientid')
+ self.assertEqual(blob.credential._credential._client_credential, 'myclientsecret')
diff --git a/papermill/tests/test_adl.py b/papermill/tests/test_adl.py
index 6195c9f4..952c7a19 100644
--- a/papermill/tests/test_adl.py
+++ b/papermill/tests/test_adl.py
@@ -1,8 +1,9 @@
import unittest
+from unittest.mock import MagicMock, Mock, patch
-from unittest.mock import Mock, MagicMock, patch
-
-from ..adl import ADL, core as adl_core, lib as adl_lib
+from ..adl import ADL
+from ..adl import core as adl_core
+from ..adl import lib as adl_lib
class ADLTest(unittest.TestCase):
@@ -12,10 +13,14 @@ class ADLTest(unittest.TestCase):
def setUp(self):
self.ls = Mock(
- return_value=["path/to/directory/foo", "path/to/directory/bar", "path/to/directory/baz"]
+ return_value=[
+ 'path/to/directory/foo',
+ 'path/to/directory/bar',
+ 'path/to/directory/baz',
+ ]
)
self.fakeFile = MagicMock()
- self.fakeFile.__iter__.return_value = [b"a", b"b", b"c"]
+ self.fakeFile.__iter__.return_value = [b'a', b'b', b'c']
self.fakeFile.__enter__.return_value = self.fakeFile
self.open = Mock(return_value=self.fakeFile)
self.fakeAdapter = Mock(open=self.open, ls=self.ls)
@@ -24,40 +29,41 @@ def setUp(self):
def test_split_url_raises_exception_on_invalid_url(self):
with self.assertRaises(Exception) as context:
- ADL._split_url("this_is_not_a_valid_url")
+ ADL._split_url('this_is_not_a_valid_url')
self.assertTrue("Invalid ADL url 'this_is_not_a_valid_url'" in str(context.exception))
def test_split_url_splits_valid_url(self):
- (store_name, path) = ADL._split_url("adl://foo.azuredatalakestore.net/bar/baz")
- self.assertEqual(store_name, "foo")
- self.assertEqual(path, "bar/baz")
+ (store_name, path) = ADL._split_url('adl://foo.azuredatalakestore.net/bar/baz')
+ self.assertEqual(store_name, 'foo')
+ self.assertEqual(path, 'bar/baz')
def test_listdir_calls_ls_on_adl_adapter(self):
self.assertEqual(
- self.adl.listdir("adl://foo_store.azuredatalakestore.net/path/to/directory"),
+ self.adl.listdir('adl://foo_store.azuredatalakestore.net/path/to/directory'),
[
- "adl://foo_store.azuredatalakestore.net/path/to/directory/foo",
- "adl://foo_store.azuredatalakestore.net/path/to/directory/bar",
- "adl://foo_store.azuredatalakestore.net/path/to/directory/baz",
+ 'adl://foo_store.azuredatalakestore.net/path/to/directory/foo',
+ 'adl://foo_store.azuredatalakestore.net/path/to/directory/bar',
+ 'adl://foo_store.azuredatalakestore.net/path/to/directory/baz',
],
)
- self.ls.assert_called_once_with("path/to/directory")
+ self.ls.assert_called_once_with('path/to/directory')
def test_read_opens_and_reads_file(self):
self.assertEqual(
- self.adl.read("adl://foo_store.azuredatalakestore.net/path/to/file"), ["a", "b", "c"]
+ self.adl.read('adl://foo_store.azuredatalakestore.net/path/to/file'),
+ ['a', 'b', 'c'],
)
self.fakeFile.__iter__.assert_called_once_with()
def test_write_opens_file_and_writes_to_it(self):
- self.adl.write("hello world", "adl://foo_store.azuredatalakestore.net/path/to/file")
- self.fakeFile.write.assert_called_once_with(b"hello world")
+ self.adl.write('hello world', 'adl://foo_store.azuredatalakestore.net/path/to/file')
+ self.fakeFile.write.assert_called_once_with(b'hello world')
- @patch.object(adl_lib, 'auth', return_value="my_token")
- @patch.object(adl_core, 'AzureDLFileSystem', return_value="my_adapter")
+ @patch.object(adl_lib, 'auth', return_value='my_token')
+ @patch.object(adl_core, 'AzureDLFileSystem', return_value='my_adapter')
def test_create_adapter(self, azure_dl_filesystem_mock, auth_mock):
sut = ADL()
- actual = sut._create_adapter("my_store_name")
- assert actual == "my_adapter"
+ actual = sut._create_adapter('my_store_name')
+ assert actual == 'my_adapter'
auth_mock.assert_called_once_with()
- azure_dl_filesystem_mock.assert_called_once_with("my_token", store_name="my_store_name")
+ azure_dl_filesystem_mock.assert_called_once_with('my_token', store_name='my_store_name')
diff --git a/papermill/tests/test_autosave.py b/papermill/tests/test_autosave.py
index 46032300..74ae06e8 100644
--- a/papermill/tests/test_autosave.py
+++ b/papermill/tests/test_autosave.py
@@ -1,15 +1,15 @@
-import nbformat
import os
import tempfile
import time
import unittest
from unittest.mock import patch
-from . import get_notebook_path
+import nbformat
from .. import engines
from ..engines import NotebookExecutionManager
from ..execute import execute_notebook
+from . import get_notebook_path
class TestMidCellAutosave(unittest.TestCase):
@@ -19,9 +19,7 @@ def setUp(self):
self.nb = nbformat.read(self.notebook_path, as_version=4)
def test_autosave_not_too_fast(self):
- nb_man = NotebookExecutionManager(
- self.nb, output_path='test.ipynb', autosave_cell_every=0.5
- )
+ nb_man = NotebookExecutionManager(self.nb, output_path='test.ipynb', autosave_cell_every=0.5)
with patch.object(engines, 'write_ipynb') as write_mock:
write_mock.reset_mock()
assert write_mock.call_count == 0 # check that the mock is sane
diff --git a/papermill/tests/test_cli.py b/papermill/tests/test_cli.py
index 71443985..ad6ddbed 100755
--- a/papermill/tests/test_cli.py
+++ b/papermill/tests/test_cli.py
@@ -2,35 +2,34 @@
""" Test the command line interface """
import os
-from pathlib import Path
-import sys
import subprocess
+import sys
import tempfile
-import uuid
-import nbclient
-
-import nbformat
import unittest
+import uuid
+from pathlib import Path
from unittest.mock import patch
+import nbclient
+import nbformat
import pytest
from click.testing import CliRunner
-from . import get_notebook_path, kernel_name
from .. import cli
-from ..cli import papermill, _is_int, _is_float, _resolve_type
+from ..cli import _is_float, _is_int, _resolve_type, papermill
+from . import get_notebook_path, kernel_name
@pytest.mark.parametrize(
- "test_input,expected",
+ 'test_input,expected',
[
- ("True", True),
- ("False", False),
- ("None", None),
- ("12.51", 12.51),
- ("10", 10),
- ("hello world", "hello world"),
- ("š", "š"),
+ ('True', True),
+ ('False', False),
+ ('None', None),
+ ('12.51', 12.51),
+ ('10', 10),
+ ('hello world', 'hello world'),
+ ('š', 'š'),
],
)
def test_resolve_type(test_input, expected):
@@ -38,17 +37,17 @@ def test_resolve_type(test_input, expected):
@pytest.mark.parametrize(
- "value,expected",
+ 'value,expected',
[
(13.71, True),
- ("False", False),
- ("None", False),
+ ('False', False),
+ ('None', False),
(-8.2, True),
(10, True),
- ("10", True),
- ("12.31", True),
- ("hello world", False),
- ("š", False),
+ ('10', True),
+ ('12.31', True),
+ ('hello world', False),
+ ('š', False),
],
)
def test_is_float(value, expected):
@@ -56,17 +55,17 @@ def test_is_float(value, expected):
@pytest.mark.parametrize(
- "value,expected",
+ 'value,expected',
[
(13.71, True),
- ("False", False),
- ("None", False),
+ ('False', False),
+ ('None', False),
(-8.2, True),
- ("-23.2", False),
+ ('-23.2', False),
(10, True),
- ("13", True),
- ("hello world", False),
- ("š", False),
+ ('13', True),
+ ('hello world', False),
+ ('š', False),
],
)
def test_is_int(value, expected):
@@ -100,12 +99,8 @@ def setUp(self):
self.default_execute_kwargs['input_path'],
self.default_execute_kwargs['output_path'],
]
- self.sample_yaml_file = os.path.join(
- os.path.dirname(__file__), 'parameters', 'example.yaml'
- )
- self.sample_json_file = os.path.join(
- os.path.dirname(__file__), 'parameters', 'example.json'
- )
+ self.sample_yaml_file = os.path.join(os.path.dirname(__file__), 'parameters', 'example.yaml')
+ self.sample_json_file = os.path.join(os.path.dirname(__file__), 'parameters', 'example.json')
def augment_execute_kwargs(self, **new_kwargs):
kwargs = self.default_execute_kwargs.copy()
@@ -115,24 +110,27 @@ def augment_execute_kwargs(self, **new_kwargs):
@patch(cli.__name__ + '.execute_notebook')
def test_parameters(self, execute_patch):
self.runner.invoke(
- papermill, self.default_args + ['-p', 'foo', 'bar', '--parameters', 'baz', '42']
- )
- execute_patch.assert_called_with(
- **self.augment_execute_kwargs(parameters={'foo': 'bar', 'baz': 42})
+ papermill,
+ self.default_args + ['-p', 'foo', 'bar', '--parameters', 'baz', '42'],
)
+ execute_patch.assert_called_with(**self.augment_execute_kwargs(parameters={'foo': 'bar', 'baz': 42}))
@patch(cli.__name__ + '.execute_notebook')
def test_parameters_raw(self, execute_patch):
self.runner.invoke(
- papermill, self.default_args + ['-r', 'foo', 'bar', '--parameters_raw', 'baz', '42']
- )
- execute_patch.assert_called_with(
- **self.augment_execute_kwargs(parameters={'foo': 'bar', 'baz': '42'})
+ papermill,
+ self.default_args + ['-r', 'foo', 'bar', '--parameters_raw', 'baz', '42'],
)
+ execute_patch.assert_called_with(**self.augment_execute_kwargs(parameters={'foo': 'bar', 'baz': '42'}))
@patch(cli.__name__ + '.execute_notebook')
def test_parameters_file(self, execute_patch):
- extra_args = ['-f', self.sample_yaml_file, '--parameters_file', self.sample_json_file]
+ extra_args = [
+ '-f',
+ self.sample_yaml_file,
+ '--parameters_file',
+ self.sample_json_file,
+ ]
self.runner.invoke(papermill, self.default_args + extra_args)
execute_patch.assert_called_with(
**self.augment_execute_kwargs(
@@ -152,16 +150,12 @@ def test_parameters_yaml(self, execute_patch):
papermill,
self.default_args + ['-y', '{"foo": "bar"}', '--parameters_yaml', '{"foo2": ["baz"]}'],
)
- execute_patch.assert_called_with(
- **self.augment_execute_kwargs(parameters={'foo': 'bar', 'foo2': ['baz']})
- )
+ execute_patch.assert_called_with(**self.augment_execute_kwargs(parameters={'foo': 'bar', 'foo2': ['baz']}))
@patch(cli.__name__ + '.execute_notebook')
def test_parameters_yaml_date(self, execute_patch):
self.runner.invoke(papermill, self.default_args + ['-y', 'a_date: 2019-01-01'])
- execute_patch.assert_called_with(
- **self.augment_execute_kwargs(parameters={'a_date': '2019-01-01'})
- )
+ execute_patch.assert_called_with(**self.augment_execute_kwargs(parameters={'a_date': '2019-01-01'}))
@patch(cli.__name__ + '.execute_notebook')
def test_parameters_empty(self, execute_patch):
@@ -202,7 +196,8 @@ def test_parameters_yaml_override(self, execute_patch):
)
@patch(
- cli.__name__ + '.execute_notebook', side_effect=nbclient.exceptions.DeadKernelError("Fake")
+ cli.__name__ + '.execute_notebook',
+ side_effect=nbclient.exceptions.DeadKernelError('Fake'),
)
def test_parameters_dead_kernel(self, execute_patch):
result = self.runner.invoke(
@@ -220,18 +215,15 @@ def test_parameters_base64(self, execute_patch):
'eydmb28nOiAxfQ==',
]
self.runner.invoke(papermill, self.default_args + extra_args)
- execute_patch.assert_called_with(
- **self.augment_execute_kwargs(parameters={'foo': 1, 'bar': 2})
- )
+ execute_patch.assert_called_with(**self.augment_execute_kwargs(parameters={'foo': 1, 'bar': 2}))
@patch(cli.__name__ + '.execute_notebook')
def test_parameters_base64_date(self, execute_patch):
self.runner.invoke(
- papermill, self.default_args + ['--parameters_base64', 'YV9kYXRlOiAyMDE5LTAxLTAx']
- )
- execute_patch.assert_called_with(
- **self.augment_execute_kwargs(parameters={'a_date': '2019-01-01'})
+ papermill,
+ self.default_args + ['--parameters_base64', 'YV9kYXRlOiAyMDE5LTAxLTAx'],
)
+ execute_patch.assert_called_with(**self.augment_execute_kwargs(parameters={'a_date': '2019-01-01'}))
@patch(cli.__name__ + '.execute_notebook')
def test_inject_input_path(self, execute_patch):
@@ -262,9 +254,7 @@ def test_inject_paths(self, execute_patch):
@patch(cli.__name__ + '.execute_notebook')
def test_engine(self, execute_patch):
self.runner.invoke(papermill, self.default_args + ['--engine', 'engine-that-could'])
- execute_patch.assert_called_with(
- **self.augment_execute_kwargs(engine_name='engine-that-could')
- )
+ execute_patch.assert_called_with(**self.augment_execute_kwargs(engine_name='engine-that-could'))
@patch(cli.__name__ + '.execute_notebook')
def test_prepare_only(self, execute_patch):
@@ -309,9 +299,7 @@ def test_log_output(self, execute_patch):
@patch(cli.__name__ + '.execute_notebook')
def test_log_output_plus_progress(self, execute_patch):
self.runner.invoke(papermill, self.default_args + ['--log-output', '--progress-bar'])
- execute_patch.assert_called_with(
- **self.augment_execute_kwargs(log_output=True, progress_bar=True)
- )
+ execute_patch.assert_called_with(**self.augment_execute_kwargs(log_output=True, progress_bar=True))
@patch(cli.__name__ + '.execute_notebook')
def test_no_log_output(self, execute_patch):
@@ -403,7 +391,7 @@ def test_many_args(self, execute_patch):
'bar': 'value',
'baz': 'replace',
'yaml_foo': {'yaml_bar': 'yaml_baz'},
- "base64_foo": "base64_bar",
+ 'base64_foo': 'base64_bar',
'a_date': '2019-01-01',
},
engine_name='engine-that-could',
@@ -441,16 +429,20 @@ def papermill_version():
@pytest.fixture()
def notebook():
- metadata = {'kernelspec': {'name': 'python3', 'language': 'python', 'display_name': 'python3'}}
+ metadata = {
+ 'kernelspec': {
+ 'name': 'python3',
+ 'language': 'python',
+ 'display_name': 'python3',
+ }
+ }
return nbformat.v4.new_notebook(
metadata=metadata,
cells=[nbformat.v4.new_markdown_cell('This is a notebook with kernel: python3')],
)
-require_papermill_installed = pytest.mark.skipif(
- not papermill_version(), reason='papermill is not installed'
-)
+require_papermill_installed = pytest.mark.skipif(not papermill_version(), reason='papermill is not installed')
@require_papermill_installed
diff --git a/papermill/tests/test_clientwrap.py b/papermill/tests/test_clientwrap.py
index a8bfff27..32309cf6 100644
--- a/papermill/tests/test_clientwrap.py
+++ b/papermill/tests/test_clientwrap.py
@@ -1,13 +1,12 @@
-import nbformat
import unittest
-
from unittest.mock import call, patch
-from . import get_notebook_path
+import nbformat
-from ..log import logger
-from ..engines import NotebookExecutionManager
from ..clientwrap import PapermillNotebookClient
+from ..engines import NotebookExecutionManager
+from ..log import logger
+from . import get_notebook_path
class TestPapermillClientWrapper(unittest.TestCase):
@@ -18,23 +17,23 @@ def setUp(self):
def test_logging_stderr_msg(self):
with patch.object(logger, 'warning') as warning_mock:
- for output in self.nb.cells[0].get("outputs", []):
+ for output in self.nb.cells[0].get('outputs', []):
self.client.log_output_message(output)
- warning_mock.assert_called_once_with("INFO:test:test text\n")
+ warning_mock.assert_called_once_with('INFO:test:test text\n')
def test_logging_stdout_msg(self):
with patch.object(logger, 'info') as info_mock:
- for output in self.nb.cells[1].get("outputs", []):
+ for output in self.nb.cells[1].get('outputs', []):
self.client.log_output_message(output)
- info_mock.assert_called_once_with("hello world\n")
+ info_mock.assert_called_once_with('hello world\n')
def test_logging_data_msg(self):
with patch.object(logger, 'info') as info_mock:
- for output in self.nb.cells[2].get("outputs", []):
+ for output in self.nb.cells[2].get('outputs', []):
self.client.log_output_message(output)
info_mock.assert_has_calls(
[
- call(""),
- call(""),
+ call(''),
+ call(''),
]
)
diff --git a/papermill/tests/test_engines.py b/papermill/tests/test_engines.py
index fa132781..b750a01e 100644
--- a/papermill/tests/test_engines.py
+++ b/papermill/tests/test_engines.py
@@ -1,17 +1,16 @@
import copy
-import dateutil
import unittest
-
from abc import ABCMeta
-from unittest.mock import Mock, patch, call
-from nbformat.notebooknode import NotebookNode
+from unittest.mock import Mock, call, patch
-from . import get_notebook_path
+import dateutil
+from nbformat.notebooknode import NotebookNode
from .. import engines, exceptions
-from ..log import logger
+from ..engines import Engine, NBClientEngine, NotebookExecutionManager
from ..iorw import load_notebook_node
-from ..engines import NotebookExecutionManager, Engine, NBClientEngine
+from ..log import logger
+from . import get_notebook_path
def AnyMock(cls):
@@ -170,7 +169,8 @@ def test_cell_complete_after_cell_start(self):
self.assertEqual(cell.metadata.papermill['end_time'], fixed_now.isoformat())
self.assertEqual(
- cell.metadata.papermill['duration'], (fixed_now - start_time).total_seconds()
+ cell.metadata.papermill['duration'],
+ (fixed_now - start_time).total_seconds(),
)
self.assertFalse(cell.metadata.papermill['exception'])
self.assertEqual(cell.metadata.papermill['status'], NotebookExecutionManager.COMPLETED)
@@ -219,7 +219,8 @@ def test_cell_complete_after_cell_exception(self):
self.assertEqual(cell.metadata.papermill['end_time'], fixed_now.isoformat())
self.assertEqual(
- cell.metadata.papermill['duration'], (fixed_now - start_time).total_seconds()
+ cell.metadata.papermill['duration'],
+ (fixed_now - start_time).total_seconds(),
)
self.assertTrue(cell.metadata.papermill['exception'])
self.assertEqual(cell.metadata.papermill['status'], NotebookExecutionManager.FAILED)
@@ -253,7 +254,8 @@ def test_notebook_complete(self):
self.assertEqual(nb_man.nb.metadata.papermill['end_time'], fixed_now.isoformat())
self.assertEqual(
- nb_man.nb.metadata.papermill['duration'], (fixed_now - start_time).total_seconds()
+ nb_man.nb.metadata.papermill['duration'],
+ (fixed_now - start_time).total_seconds(),
)
self.assertFalse(nb_man.nb.metadata.papermill['exception'])
@@ -281,10 +283,12 @@ def test_notebook_complete_cell_status_with_failed(self):
nb_man.cell_exception(nb_man.nb.cells[1])
nb_man.notebook_complete()
self.assertEqual(
- nb_man.nb.cells[0].metadata.papermill['status'], NotebookExecutionManager.COMPLETED
+ nb_man.nb.cells[0].metadata.papermill['status'],
+ NotebookExecutionManager.COMPLETED,
)
self.assertEqual(
- nb_man.nb.cells[1].metadata.papermill['status'], NotebookExecutionManager.FAILED
+ nb_man.nb.cells[1].metadata.papermill['status'],
+ NotebookExecutionManager.FAILED,
)
for cell in nb_man.nb.cells[2:]:
self.assertEqual(cell.metadata.papermill['status'], NotebookExecutionManager.PENDING)
@@ -297,10 +301,10 @@ def setUp(self):
self.nb = load_notebook_node(self.notebook_path)
def test_wrap_and_execute_notebook(self):
- '''
+ """
Mocks each wrapped call and proves the correct inputs get applied to
the correct underlying calls for execute_notebook.
- '''
+ """
with patch.object(Engine, 'execute_managed_notebook') as exec_mock:
with patch.object(engines, 'NotebookExecutionManager') as wrap_mock:
Engine.execute_notebook(
@@ -320,9 +324,7 @@ def test_wrap_and_execute_notebook(self):
autosave_cell_every=30,
)
wrap_mock.return_value.notebook_start.assert_called_once()
- exec_mock.assert_called_once_with(
- wrap_mock.return_value, 'python', log_output=True, bar='baz'
- )
+ exec_mock.assert_called_once_with(wrap_mock.return_value, 'python', log_output=True, bar='baz')
wrap_mock.return_value.notebook_complete.assert_called_once()
wrap_mock.return_value.cleanup_pbar.assert_called_once()
@@ -335,9 +337,7 @@ def execute_managed_notebook(cls, nb_man, kernel_name, **kwargs):
nb_man.cell_complete(cell)
with patch.object(NotebookExecutionManager, 'save') as save_mock:
- nb = CellCallbackEngine.execute_notebook(
- copy.deepcopy(self.nb), 'python', output_path='foo.ipynb'
- )
+ nb = CellCallbackEngine.execute_notebook(copy.deepcopy(self.nb), 'python', output_path='foo.ipynb')
self.assertEqual(nb, AnyMock(NotebookNode))
self.assertNotEqual(self.nb, nb)
@@ -355,7 +355,8 @@ def execute_managed_notebook(cls, nb_man, kernel_name, **kwargs):
self.assertEqual(cell.metadata.papermill['duration'], AnyMock(float))
self.assertFalse(cell.metadata.papermill['exception'])
self.assertEqual(
- cell.metadata.papermill['status'], NotebookExecutionManager.COMPLETED
+ cell.metadata.papermill['status'],
+ NotebookExecutionManager.COMPLETED,
)
def test_no_cell_callback_execute(self):
@@ -366,9 +367,7 @@ def execute_managed_notebook(cls, nb_man, kernel_name, **kwargs):
with patch.object(NotebookExecutionManager, 'save') as save_mock:
with patch.object(NotebookExecutionManager, 'complete_pbar') as pbar_comp_mock:
- nb = NoCellCallbackEngine.execute_notebook(
- copy.deepcopy(self.nb), 'python', output_path='foo.ipynb'
- )
+ nb = NoCellCallbackEngine.execute_notebook(copy.deepcopy(self.nb), 'python', output_path='foo.ipynb')
self.assertEqual(nb, AnyMock(NotebookNode))
self.assertNotEqual(self.nb, nb)
@@ -387,7 +386,8 @@ def execute_managed_notebook(cls, nb_man, kernel_name, **kwargs):
self.assertIsNone(cell.metadata.papermill['duration'])
self.assertIsNone(cell.metadata.papermill['exception'])
self.assertEqual(
- cell.metadata.papermill['status'], NotebookExecutionManager.COMPLETED
+ cell.metadata.papermill['status'],
+ NotebookExecutionManager.COMPLETED,
)
@@ -435,7 +435,11 @@ def test_nb_convert_engine(self):
def test_nb_convert_engine_execute(self):
with patch.object(NotebookExecutionManager, 'save') as save_mock:
nb = NBClientEngine.execute_notebook(
- self.nb, 'python', output_path='foo.ipynb', progress_bar=False, log_output=True
+ self.nb,
+ 'python',
+ output_path='foo.ipynb',
+ progress_bar=False,
+ log_output=True,
)
self.assertEqual(save_mock.call_count, 8)
self.assertEqual(nb, AnyMock(NotebookNode))
@@ -451,7 +455,8 @@ def test_nb_convert_engine_execute(self):
self.assertEqual(cell.metadata.papermill['duration'], AnyMock(float))
self.assertFalse(cell.metadata.papermill['exception'])
self.assertEqual(
- cell.metadata.papermill['status'], NotebookExecutionManager.COMPLETED
+ cell.metadata.papermill['status'],
+ NotebookExecutionManager.COMPLETED,
)
def test_nb_convert_log_outputs(self):
@@ -498,30 +503,31 @@ def setUp(self):
def test_registration(self):
mock_engine = Mock()
- self.papermill_engines.register("mock_engine", mock_engine)
- self.assertIn("mock_engine", self.papermill_engines._engines)
- self.assertIs(mock_engine, self.papermill_engines._engines["mock_engine"])
+ self.papermill_engines.register('mock_engine', mock_engine)
+ self.assertIn('mock_engine', self.papermill_engines._engines)
+ self.assertIs(mock_engine, self.papermill_engines._engines['mock_engine'])
def test_getting(self):
mock_engine = Mock()
- self.papermill_engines.register("mock_engine", mock_engine)
+ self.papermill_engines.register('mock_engine', mock_engine)
# test retrieving an engine works
- retrieved_engine = self.papermill_engines.get_engine("mock_engine")
+ retrieved_engine = self.papermill_engines.get_engine('mock_engine')
self.assertIs(mock_engine, retrieved_engine)
# test you can't retrieve a non-registered engine
self.assertRaises(
- exceptions.PapermillException, self.papermill_engines.get_engine, "non-existent"
+ exceptions.PapermillException,
+ self.papermill_engines.get_engine,
+ 'non-existent',
)
def test_registering_entry_points(self):
fake_entrypoint = Mock(load=Mock())
- fake_entrypoint.name = "fake-engine"
+ fake_entrypoint.name = 'fake-engine'
- with patch(
- "entrypoints.get_group_all", return_value=[fake_entrypoint]
- ) as mock_get_group_all:
+ with patch('entrypoints.get_group_all', return_value=[fake_entrypoint]) as mock_get_group_all:
self.papermill_engines.register_entry_points()
- mock_get_group_all.assert_called_once_with("papermill.engine")
+ mock_get_group_all.assert_called_once_with('papermill.engine')
self.assertEqual(
- self.papermill_engines.get_engine("fake-engine"), fake_entrypoint.load.return_value
+ self.papermill_engines.get_engine('fake-engine'),
+ fake_entrypoint.load.return_value,
)
diff --git a/papermill/tests/test_exceptions.py b/papermill/tests/test_exceptions.py
index 0a7e7a8d..191767fb 100644
--- a/papermill/tests/test_exceptions.py
+++ b/papermill/tests/test_exceptions.py
@@ -10,27 +10,31 @@
@pytest.fixture
def temp_file():
"""NamedTemporaryFile must be set in wb mode, closed without delete, opened with open(file, "rb"),
- then manually deleted. Otherwise, file fails to be read due to permission error on Windows."""
- with tempfile.NamedTemporaryFile(mode="wb", delete=False) as f:
+ then manually deleted. Otherwise, file fails to be read due to permission error on Windows.
+ """
+ with tempfile.NamedTemporaryFile(mode='wb', delete=False) as f:
yield f
os.unlink(f.name)
@pytest.mark.parametrize(
- "exc,args",
+ 'exc,args',
[
(
exceptions.PapermillExecutionError,
- (1, 2, "TestSource", "Exception", Exception(), ["Traceback", "Message"]),
+ (1, 2, 'TestSource', 'Exception', Exception(), ['Traceback', 'Message']),
),
- (exceptions.PapermillMissingParameterException, ("PapermillMissingParameterException",)),
- (exceptions.AwsError, ("AwsError",)),
- (exceptions.FileExistsError, ("FileExistsError",)),
- (exceptions.PapermillException, ("PapermillException",)),
- (exceptions.PapermillRateLimitException, ("PapermillRateLimitException",)),
+ (
+ exceptions.PapermillMissingParameterException,
+ ('PapermillMissingParameterException',),
+ ),
+ (exceptions.AwsError, ('AwsError',)),
+ (exceptions.FileExistsError, ('FileExistsError',)),
+ (exceptions.PapermillException, ('PapermillException',)),
+ (exceptions.PapermillRateLimitException, ('PapermillRateLimitException',)),
(
exceptions.PapermillOptionalDependencyException,
- ("PapermillOptionalDependencyException",),
+ ('PapermillOptionalDependencyException',),
),
],
)
@@ -41,7 +45,7 @@ def test_exceptions_are_unpickleable(temp_file, exc, args):
temp_file.close() # close to re-open for reading
# Read the Pickled File
- with open(temp_file.name, "rb") as read_file:
+ with open(temp_file.name, 'rb') as read_file:
read_file.seek(0)
data = read_file.read()
pickled_err = pickle.loads(data)
diff --git a/papermill/tests/test_execute.py b/papermill/tests/test_execute.py
index f589cbba..6396de35 100644
--- a/papermill/tests/test_execute.py
+++ b/papermill/tests/test_execute.py
@@ -3,20 +3,19 @@
import tempfile
import unittest
from copy import deepcopy
-from unittest.mock import patch, ANY
-
from functools import partial
from pathlib import Path
+from unittest.mock import ANY, patch
import nbformat
from nbformat import validate
from .. import engines, translators
-from ..log import logger
+from ..exceptions import PapermillExecutionError
+from ..execute import execute_notebook
from ..iorw import load_notebook_node
+from ..log import logger
from ..utils import chdir
-from ..execute import execute_notebook
-from ..exceptions import PapermillExecutionError
from . import get_notebook_path, kernel_name
execute_notebook = partial(execute_notebook, kernel_name=kernel_name)
@@ -68,7 +67,8 @@ def test_cell_insertion(self):
execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'msg': 'Hello'})
test_nb = load_notebook_node(self.nb_test_executed_fname)
self.assertListEqual(
- test_nb.cells[1].get('source').split('\n'), ['# Parameters', 'msg = "Hello"', '']
+ test_nb.cells[1].get('source').split('\n'),
+ ['# Parameters', 'msg = "Hello"', ''],
)
self.assertEqual(test_nb.metadata.papermill.parameters, {'msg': 'Hello'})
@@ -78,7 +78,8 @@ def test_no_tags(self):
execute_notebook(get_notebook_path(notebook_name), nb_test_executed_fname, {'msg': 'Hello'})
test_nb = load_notebook_node(nb_test_executed_fname)
self.assertListEqual(
- test_nb.cells[0].get('source').split('\n'), ['# Parameters', 'msg = "Hello"', '']
+ test_nb.cells[0].get('source').split('\n'),
+ ['# Parameters', 'msg = "Hello"', ''],
)
self.assertEqual(test_nb.metadata.papermill.parameters, {'msg': 'Hello'})
@@ -86,14 +87,13 @@ def test_quoted_params(self):
execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'msg': '"Hello"'})
test_nb = load_notebook_node(self.nb_test_executed_fname)
self.assertListEqual(
- test_nb.cells[1].get('source').split('\n'), ['# Parameters', r'msg = "\"Hello\""', '']
+ test_nb.cells[1].get('source').split('\n'),
+ ['# Parameters', r'msg = "\"Hello\""', ''],
)
self.assertEqual(test_nb.metadata.papermill.parameters, {'msg': '"Hello"'})
def test_backslash_params(self):
- execute_notebook(
- self.notebook_path, self.nb_test_executed_fname, {'foo': r'do\ not\ crash'}
- )
+ execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'foo': r'do\ not\ crash'})
test_nb = load_notebook_node(self.nb_test_executed_fname)
self.assertListEqual(
test_nb.cells[1].get('source').split('\n'),
@@ -126,7 +126,7 @@ def test_prepare_only(self):
# Should not raise as we don't execute the notebook at all
execute_notebook(path, result_path, {'foo': r'do\ not\ crash'}, prepare_only=True)
nb = load_notebook_node(result_path)
- self.assertEqual(nb.cells[0].cell_type, "code")
+ self.assertEqual(nb.cells[0].cell_type, 'code')
self.assertEqual(
nb.cells[0].get('source').split('\n'),
['# Parameters', r'foo = "do\\ not\\ crash"', ''],
@@ -147,28 +147,29 @@ def test(self):
# validating the removal logic (the markers are simulatin an error in the first code cell
# that has since been fixed)
original_nb = load_notebook_node(path)
- self.assertEqual(original_nb.cells[0].metadata["tags"], ["papermill-error-cell-tag"])
- self.assertIn("In [1]", original_nb.cells[0].source)
- self.assertEqual(original_nb.cells[2].metadata["tags"], ["papermill-error-cell-tag"])
+ self.assertEqual(original_nb.cells[0].metadata['tags'], ['papermill-error-cell-tag'])
+ self.assertIn('In [1]', original_nb.cells[0].source)
+ self.assertEqual(original_nb.cells[2].metadata['tags'], ['papermill-error-cell-tag'])
result_path = os.path.join(self.test_dir, 'broken1.ipynb')
with self.assertRaises(PapermillExecutionError):
execute_notebook(path, result_path)
nb = load_notebook_node(result_path)
- self.assertEqual(nb.cells[0].cell_type, "markdown")
+ self.assertEqual(nb.cells[0].cell_type, 'markdown')
self.assertRegex(
- nb.cells[0].source, r'^$'
+ nb.cells[0].source,
+ r'^$',
)
- self.assertEqual(nb.cells[0].metadata["tags"], ["papermill-error-cell-tag"])
+ self.assertEqual(nb.cells[0].metadata['tags'], ['papermill-error-cell-tag'])
- self.assertEqual(nb.cells[1].cell_type, "markdown")
+ self.assertEqual(nb.cells[1].cell_type, 'markdown')
self.assertEqual(nb.cells[2].execution_count, 1)
- self.assertEqual(nb.cells[3].cell_type, "markdown")
- self.assertEqual(nb.cells[4].cell_type, "markdown")
+ self.assertEqual(nb.cells[3].cell_type, 'markdown')
+ self.assertEqual(nb.cells[4].cell_type, 'markdown')
- self.assertEqual(nb.cells[5].cell_type, "markdown")
+ self.assertEqual(nb.cells[5].cell_type, 'markdown')
self.assertRegex(nb.cells[5].source, '')
- self.assertEqual(nb.cells[5].metadata["tags"], ["papermill-error-cell-tag"])
+ self.assertEqual(nb.cells[5].metadata['tags'], ['papermill-error-cell-tag'])
self.assertEqual(nb.cells[6].execution_count, 2)
self.assertEqual(nb.cells[6].outputs[0].output_type, 'error')
@@ -176,7 +177,8 @@ def test(self):
# double check the removal (the new cells above should be the only two tagged ones)
self.assertEqual(
- sum("papermill-error-cell-tag" in cell.metadata.get("tags", []) for cell in nb.cells), 2
+ sum('papermill-error-cell-tag' in cell.metadata.get('tags', []) for cell in nb.cells),
+ 2,
)
@@ -193,13 +195,14 @@ def test(self):
with self.assertRaises(PapermillExecutionError):
execute_notebook(path, result_path)
nb = load_notebook_node(result_path)
- self.assertEqual(nb.cells[0].cell_type, "markdown")
+ self.assertEqual(nb.cells[0].cell_type, 'markdown')
self.assertRegex(
- nb.cells[0].source, r'^.*In \[2\].*$'
+ nb.cells[0].source,
+ r'^.*In \[2\].*$',
)
self.assertEqual(nb.cells[1].execution_count, 1)
- self.assertEqual(nb.cells[2].cell_type, "markdown")
+ self.assertEqual(nb.cells[2].cell_type, 'markdown')
self.assertRegex(nb.cells[2].source, '')
self.assertEqual(nb.cells[3].execution_count, 2)
self.assertEqual(nb.cells[3].outputs[0].output_type, 'display_data')
@@ -219,9 +222,7 @@ def tearDown(self):
shutil.rmtree(self.test_dir)
def test_report_mode(self):
- nb = execute_notebook(
- self.notebook_path, self.nb_test_executed_fname, {'a': 0}, report_mode=True
- )
+ nb = execute_notebook(self.notebook_path, self.nb_test_executed_fname, {'a': 0}, report_mode=True)
for cell in nb.cells:
if cell.cell_type == 'code':
self.assertEqual(cell.metadata.get('jupyter', {}).get('source_hidden'), True)
@@ -262,21 +263,17 @@ def test_local_save_ignores_cwd_assignment(self):
with chdir(self.base_test_dir):
# Both paths are relative
execute_notebook(
- self.simple_notebook_name, self.nb_test_executed_fname, cwd=self.test_dir
+ self.simple_notebook_name,
+ self.nb_test_executed_fname,
+ cwd=self.test_dir,
)
- self.assertTrue(
- os.path.isfile(os.path.join(self.base_test_dir, self.nb_test_executed_fname))
- )
+ self.assertTrue(os.path.isfile(os.path.join(self.base_test_dir, self.nb_test_executed_fname)))
def test_execution_respects_cwd_assignment(self):
with chdir(self.base_test_dir):
# Both paths are relative
- execute_notebook(
- self.check_notebook_name, self.nb_test_executed_fname, cwd=self.test_dir
- )
- self.assertTrue(
- os.path.isfile(os.path.join(self.base_test_dir, self.nb_test_executed_fname))
- )
+ execute_notebook(self.check_notebook_name, self.nb_test_executed_fname, cwd=self.test_dir)
+ self.assertTrue(os.path.isfile(os.path.join(self.base_test_dir, self.nb_test_executed_fname)))
def test_pathlib_paths(self):
# Copy of test_execution_respects_cwd_assignment but with `Path`s
@@ -301,7 +298,7 @@ def test_sys_exit(self):
result_path = os.path.join(self.test_dir, f'output_{notebook_name}')
execute_notebook(get_notebook_path(notebook_name), result_path)
nb = load_notebook_node(result_path)
- self.assertEqual(nb.cells[0].cell_type, "code")
+ self.assertEqual(nb.cells[0].cell_type, 'code')
self.assertEqual(nb.cells[0].execution_count, 1)
self.assertEqual(nb.cells[1].execution_count, 2)
self.assertEqual(nb.cells[1].outputs[0].output_type, 'error')
@@ -314,7 +311,7 @@ def test_sys_exit0(self):
result_path = os.path.join(self.test_dir, f'output_{notebook_name}')
execute_notebook(get_notebook_path(notebook_name), result_path)
nb = load_notebook_node(result_path)
- self.assertEqual(nb.cells[0].cell_type, "code")
+ self.assertEqual(nb.cells[0].cell_type, 'code')
self.assertEqual(nb.cells[0].execution_count, 1)
self.assertEqual(nb.cells[1].execution_count, 2)
self.assertEqual(nb.cells[1].outputs[0].output_type, 'error')
@@ -328,13 +325,14 @@ def test_sys_exit1(self):
with self.assertRaises(PapermillExecutionError):
execute_notebook(get_notebook_path(notebook_name), result_path)
nb = load_notebook_node(result_path)
- self.assertEqual(nb.cells[0].cell_type, "markdown")
+ self.assertEqual(nb.cells[0].cell_type, 'markdown')
self.assertRegex(
- nb.cells[0].source, r'^$'
+ nb.cells[0].source,
+ r'^$',
)
self.assertEqual(nb.cells[1].execution_count, 1)
- self.assertEqual(nb.cells[2].cell_type, "markdown")
+ self.assertEqual(nb.cells[2].cell_type, 'markdown')
self.assertRegex(nb.cells[2].source, '')
self.assertEqual(nb.cells[3].execution_count, 2)
self.assertEqual(nb.cells[3].outputs[0].output_type, 'error')
@@ -346,7 +344,7 @@ def test_system_exit(self):
result_path = os.path.join(self.test_dir, f'output_{notebook_name}')
execute_notebook(get_notebook_path(notebook_name), result_path)
nb = load_notebook_node(result_path)
- self.assertEqual(nb.cells[0].cell_type, "code")
+ self.assertEqual(nb.cells[0].cell_type, 'code')
self.assertEqual(nb.cells[0].execution_count, 1)
self.assertEqual(nb.cells[1].execution_count, 2)
self.assertEqual(nb.cells[1].outputs[0].output_type, 'error')
@@ -393,25 +391,21 @@ def execute_managed_notebook(cls, nb_man, kernel_name, **kwargs):
@classmethod
def nb_kernel_name(cls, nb, name=None):
- return "my_custom_kernel"
+ return 'my_custom_kernel'
@classmethod
def nb_language(cls, nb, language=None):
- return "my_custom_language"
+ return 'my_custom_language'
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.notebook_path = get_notebook_path('simple_execute.ipynb')
- self.nb_test_executed_fname = os.path.join(
- self.test_dir, 'output_{}'.format('simple_execute.ipynb')
- )
+ self.nb_test_executed_fname = os.path.join(self.test_dir, 'output_{}'.format('simple_execute.ipynb'))
self._orig_papermill_engines = deepcopy(engines.papermill_engines)
self._orig_translators = deepcopy(translators.papermill_translators)
- engines.papermill_engines.register("custom_engine", self.CustomEngine)
- translators.papermill_translators.register(
- "my_custom_language", translators.PythonTranslator()
- )
+ engines.papermill_engines.register('custom_engine', self.CustomEngine)
+ translators.papermill_translators.register('my_custom_language', translators.PythonTranslator())
def tearDown(self):
shutil.rmtree(self.test_dir)
@@ -419,9 +413,14 @@ def tearDown(self):
translators.papermill_translators = self._orig_translators
@patch.object(
- CustomEngine, "execute_managed_notebook", wraps=CustomEngine.execute_managed_notebook
+ CustomEngine,
+ 'execute_managed_notebook',
+ wraps=CustomEngine.execute_managed_notebook,
+ )
+ @patch(
+ 'papermill.parameterize.translate_parameters',
+ wraps=translators.translate_parameters,
)
- @patch("papermill.parameterize.translate_parameters", wraps=translators.translate_parameters)
def test_custom_kernel_name_and_language(self, translate_parameters, execute_managed_notebook):
"""Tests execute against engine with custom implementations to fetch
kernel name and language from the notebook object
@@ -429,12 +428,13 @@ def test_custom_kernel_name_and_language(self, translate_parameters, execute_man
execute_notebook(
self.notebook_path,
self.nb_test_executed_fname,
- engine_name="custom_engine",
- parameters={"msg": "fake msg"},
+ engine_name='custom_engine',
+ parameters={'msg': 'fake msg'},
)
- self.assertEqual(execute_managed_notebook.call_args[0], (ANY, "my_custom_kernel"))
+ self.assertEqual(execute_managed_notebook.call_args[0], (ANY, 'my_custom_kernel'))
self.assertEqual(
- translate_parameters.call_args[0], (ANY, 'my_custom_language', {"msg": "fake msg"}, ANY)
+ translate_parameters.call_args[0],
+ (ANY, 'my_custom_language', {'msg': 'fake msg'}, ANY),
)
diff --git a/papermill/tests/test_gcs.py b/papermill/tests/test_gcs.py
index 4b50d922..61de47b5 100644
--- a/papermill/tests/test_gcs.py
+++ b/papermill/tests/test_gcs.py
@@ -92,9 +92,7 @@ def test_gcs_listdir(self, mock_gcs_filesystem):
@patch(
'papermill.iorw.GCSFileSystem',
- side_effect=mock_gcs_fs_wrapper(
- GCSRateLimitException({"message": "test", "code": 429}), 10
- ),
+ side_effect=mock_gcs_fs_wrapper(GCSRateLimitException({'message': 'test', 'code': 429}), 10),
)
def test_gcs_handle_exception(self, mock_gcs_filesystem):
with patch.object(GCSHandler, 'RETRY_DELAY', 0):
@@ -105,47 +103,48 @@ def test_gcs_handle_exception(self, mock_gcs_filesystem):
@patch(
'papermill.iorw.GCSFileSystem',
- side_effect=mock_gcs_fs_wrapper(GCSRateLimitException({"message": "test", "code": 429}), 1),
+ side_effect=mock_gcs_fs_wrapper(GCSRateLimitException({'message': 'test', 'code': 429}), 1),
)
def test_gcs_retry(self, mock_gcs_filesystem):
with patch.object(GCSHandler, 'RETRY_DELAY', 0):
with patch.object(GCSHandler, 'RETRY_MULTIPLIER', 0):
with patch.object(GCSHandler, 'RETRY_MAX_DELAY', 0):
self.assertEqual(
- self.gcs_handler.write('raise_limit_exception', 'gs://bucket/test.ipynb'), 2
+ self.gcs_handler.write('raise_limit_exception', 'gs://bucket/test.ipynb'),
+ 2,
)
@patch(
'papermill.iorw.GCSFileSystem',
- side_effect=mock_gcs_fs_wrapper(GCSHttpError({"message": "test", "code": 429}), 1),
+ side_effect=mock_gcs_fs_wrapper(GCSHttpError({'message': 'test', 'code': 429}), 1),
)
def test_gcs_retry_older_exception(self, mock_gcs_filesystem):
with patch.object(GCSHandler, 'RETRY_DELAY', 0):
with patch.object(GCSHandler, 'RETRY_MULTIPLIER', 0):
with patch.object(GCSHandler, 'RETRY_MAX_DELAY', 0):
self.assertEqual(
- self.gcs_handler.write('raise_limit_exception', 'gs://bucket/test.ipynb'), 2
+ self.gcs_handler.write('raise_limit_exception', 'gs://bucket/test.ipynb'),
+ 2,
)
@patch('papermill.iorw.gs_is_retriable', side_effect=fallback_gs_is_retriable)
@patch(
'papermill.iorw.GCSFileSystem',
- side_effect=mock_gcs_fs_wrapper(
- GCSRateLimitException({"message": "test", "code": None}), 1
- ),
+ side_effect=mock_gcs_fs_wrapper(GCSRateLimitException({'message': 'test', 'code': None}), 1),
)
def test_gcs_fallback_retry_unknown_failure_code(self, mock_gcs_filesystem, mock_gcs_retriable):
with patch.object(GCSHandler, 'RETRY_DELAY', 0):
with patch.object(GCSHandler, 'RETRY_MULTIPLIER', 0):
with patch.object(GCSHandler, 'RETRY_MAX_DELAY', 0):
self.assertEqual(
- self.gcs_handler.write('raise_limit_exception', 'gs://bucket/test.ipynb'), 2
+ self.gcs_handler.write('raise_limit_exception', 'gs://bucket/test.ipynb'),
+ 2,
)
@patch('papermill.iorw.gs_is_retriable', return_value=False)
@patch(
'papermill.iorw.GCSFileSystem',
- side_effect=mock_gcs_fs_wrapper(GCSRateLimitException({"message": "test", "code": 500}), 1),
+ side_effect=mock_gcs_fs_wrapper(GCSRateLimitException({'message': 'test', 'code': 500}), 1),
)
def test_gcs_invalid_code(self, mock_gcs_filesystem, mock_gcs_retriable):
with self.assertRaises(GCSRateLimitException):
@@ -154,7 +153,7 @@ def test_gcs_invalid_code(self, mock_gcs_filesystem, mock_gcs_retriable):
@patch('papermill.iorw.gs_is_retriable', side_effect=fallback_gs_is_retriable)
@patch(
'papermill.iorw.GCSFileSystem',
- side_effect=mock_gcs_fs_wrapper(GCSRateLimitException({"message": "test", "code": 500}), 1),
+ side_effect=mock_gcs_fs_wrapper(GCSRateLimitException({'message': 'test', 'code': 500}), 1),
)
def test_fallback_gcs_invalid_code(self, mock_gcs_filesystem, mock_gcs_retriable):
with self.assertRaises(GCSRateLimitException):
@@ -162,7 +161,7 @@ def test_fallback_gcs_invalid_code(self, mock_gcs_filesystem, mock_gcs_retriable
@patch(
'papermill.iorw.GCSFileSystem',
- side_effect=mock_gcs_fs_wrapper(ValueError("not-a-retry"), 1),
+ side_effect=mock_gcs_fs_wrapper(ValueError('not-a-retry'), 1),
)
def test_gcs_unretryable(self, mock_gcs_filesystem):
with self.assertRaises(ValueError):
diff --git a/papermill/tests/test_hdfs.py b/papermill/tests/test_hdfs.py
index 23c96feb..e8c49dd2 100644
--- a/papermill/tests/test_hdfs.py
+++ b/papermill/tests/test_hdfs.py
@@ -40,7 +40,7 @@ def __init__(self, path):
self.path = path
-@pytest.mark.skip(reason="No valid dep package for python 3.12 yet")
+@pytest.mark.skip(reason='No valid dep package for python 3.12 yet')
@patch('papermill.iorw.HadoopFileSystem', side_effect=MockHadoopFileSystem())
class HDFSTest(unittest.TestCase):
def setUp(self):
@@ -49,7 +49,8 @@ def setUp(self):
def test_hdfs_listdir(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(
- self.hdfs_handler.listdir("hdfs:///Projects/"), ['test1.ipynb', 'test2.ipynb']
+ self.hdfs_handler.listdir('hdfs:///Projects/'),
+ ['test1.ipynb', 'test2.ipynb'],
)
# Check if client is the same after calling
self.assertIs(client, self.hdfs_handler._get_client())
@@ -57,11 +58,12 @@ def test_hdfs_listdir(self, mock_hdfs_filesystem):
def test_hdfs_read(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(
- self.hdfs_handler.read("hdfs:///Projects/test1.ipynb"), b'Content of notebook'
+ self.hdfs_handler.read('hdfs:///Projects/test1.ipynb'),
+ b'Content of notebook',
)
self.assertIs(client, self.hdfs_handler._get_client())
def test_hdfs_write(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
- self.assertEqual(self.hdfs_handler.write("hdfs:///Projects/test1.ipynb", b'New content'), 1)
+ self.assertEqual(self.hdfs_handler.write('hdfs:///Projects/test1.ipynb', b'New content'), 1)
self.assertIs(client, self.hdfs_handler._get_client())
diff --git a/papermill/tests/test_inspect.py b/papermill/tests/test_inspect.py
index 22160225..6d787e2d 100644
--- a/papermill/tests/test_inspect.py
+++ b/papermill/tests/test_inspect.py
@@ -3,11 +3,9 @@
import pytest
from click import Context
-
from papermill.inspection import display_notebook_help, inspect_notebook
-
-NOTEBOOKS_PATH = Path(__file__).parent / "notebooks"
+NOTEBOOKS_PATH = Path(__file__).parent / 'notebooks'
def _get_fullpath(name):
@@ -17,38 +15,55 @@ def _get_fullpath(name):
@pytest.fixture
def click_context():
mock = MagicMock(spec=Context, command=MagicMock())
- mock.command.get_usage.return_value = "Dummy usage"
+ mock.command.get_usage.return_value = 'Dummy usage'
return mock
@pytest.mark.parametrize(
- "name, expected",
+ 'name, expected',
[
- (_get_fullpath("no_parameters.ipynb"), {}),
+ (_get_fullpath('no_parameters.ipynb'), {}),
(
- _get_fullpath("simple_execute.ipynb"),
- {"msg": {"name": "msg", "inferred_type_name": "None", "default": "None", "help": ""}},
+ _get_fullpath('simple_execute.ipynb'),
+ {
+ 'msg': {
+ 'name': 'msg',
+ 'inferred_type_name': 'None',
+ 'default': 'None',
+ 'help': '',
+ }
+ },
),
(
- _get_fullpath("complex_parameters.ipynb"),
+ _get_fullpath('complex_parameters.ipynb'),
{
- "msg": {"name": "msg", "inferred_type_name": "None", "default": "None", "help": ""},
- "a": {
- "name": "a",
- "inferred_type_name": "float",
- "default": "2.25",
- "help": "Variable a",
+ 'msg': {
+ 'name': 'msg',
+ 'inferred_type_name': 'None',
+ 'default': 'None',
+ 'help': '',
+ },
+ 'a': {
+ 'name': 'a',
+ 'inferred_type_name': 'float',
+ 'default': '2.25',
+ 'help': 'Variable a',
+ },
+ 'b': {
+ 'name': 'b',
+ 'inferred_type_name': 'List[str]',
+ 'default': "['Hello','World']",
+ 'help': 'Nice list',
},
- "b": {
- "name": "b",
- "inferred_type_name": "List[str]",
- "default": "['Hello','World']",
- "help": "Nice list",
+ 'c': {
+ 'name': 'c',
+ 'inferred_type_name': 'NoneType',
+ 'default': 'None',
+ 'help': '',
},
- "c": {"name": "c", "inferred_type_name": "NoneType", "default": "None", "help": ""},
},
),
- (_get_fullpath("notimplemented_translator.ipynb"), {}),
+ (_get_fullpath('notimplemented_translator.ipynb'), {}),
],
)
def test_inspect_notebook(name, expected):
@@ -56,44 +71,51 @@ def test_inspect_notebook(name, expected):
def test_str_path():
- expected = {"msg": {"name": "msg", "inferred_type_name": "None", "default": "None", "help": ""}}
- assert inspect_notebook(str(_get_fullpath("simple_execute.ipynb"))) == expected
+ expected = {
+ 'msg': {
+ 'name': 'msg',
+ 'inferred_type_name': 'None',
+ 'default': 'None',
+ 'help': '',
+ }
+ }
+ assert inspect_notebook(str(_get_fullpath('simple_execute.ipynb'))) == expected
@pytest.mark.parametrize(
- "name, expected",
+ 'name, expected',
[
(
- _get_fullpath("no_parameters.ipynb"),
+ _get_fullpath('no_parameters.ipynb'),
[
- "Dummy usage",
+ 'Dummy usage',
"\nParameters inferred for notebook '{name}':",
"\n No cell tagged 'parameters'",
],
),
(
- _get_fullpath("simple_execute.ipynb"),
+ _get_fullpath('simple_execute.ipynb'),
[
- "Dummy usage",
+ 'Dummy usage',
"\nParameters inferred for notebook '{name}':",
- " msg: Unknown type (default None)",
+ ' msg: Unknown type (default None)',
],
),
(
- _get_fullpath("complex_parameters.ipynb"),
+ _get_fullpath('complex_parameters.ipynb'),
[
- "Dummy usage",
+ 'Dummy usage',
"\nParameters inferred for notebook '{name}':",
- " msg: Unknown type (default None)",
- " a: float (default 2.25) Variable a",
+ ' msg: Unknown type (default None)',
+ ' a: float (default 2.25) Variable a',
" b: List[str] (default ['Hello','World'])\n Nice list",
- " c: NoneType (default None) ",
+ ' c: NoneType (default None) ',
],
),
(
- _get_fullpath("notimplemented_translator.ipynb"),
+ _get_fullpath('notimplemented_translator.ipynb'),
[
- "Dummy usage",
+ 'Dummy usage',
"\nParameters inferred for notebook '{name}':",
"\n Can't infer anything about this notebook's parameters. It may not have any parameter defined.", # noqa
],
@@ -101,7 +123,7 @@ def test_str_path():
],
)
def test_display_notebook_help(click_context, name, expected):
- with patch("papermill.inspection.click.echo") as echo:
+ with patch('papermill.inspection.click.echo') as echo:
display_notebook_help(click_context, str(name), None)
assert echo.call_count == len(expected)
diff --git a/papermill/tests/test_iorw.py b/papermill/tests/test_iorw.py
index 88417764..cb1eab75 100644
--- a/papermill/tests/test_iorw.py
+++ b/papermill/tests/test_iorw.py
@@ -1,28 +1,28 @@
+import io
import json
-import unittest
import os
-import io
+import unittest
+from tempfile import TemporaryDirectory
+from unittest.mock import Mock, patch
+
import nbformat
import pytest
-
from requests.exceptions import ConnectionError
-from tempfile import TemporaryDirectory
-from unittest.mock import Mock, patch
from .. import iorw
+from ..exceptions import PapermillException
from ..iorw import (
+ ADLHandler,
HttpHandler,
LocalHandler,
NoIOHandler,
- ADLHandler,
NotebookNodeHandler,
- StreamHandler,
PapermillIO,
- read_yaml_file,
- papermill_io,
+ StreamHandler,
local_file_io_cwd,
+ papermill_io,
+ read_yaml_file,
)
-from ..exceptions import PapermillException
from . import get_notebook_path
FIXTURE_PATH = os.path.join(os.path.dirname(__file__), 'fixtures')
@@ -38,16 +38,16 @@ def __init__(self, ver):
self.ver = ver
def read(self, path):
- return f"contents from {path} for version {self.ver}"
+ return f'contents from {path} for version {self.ver}'
def listdir(self, path):
- return ["fake", "contents"]
+ return ['fake', 'contents']
def write(self, buf, path):
- return f"wrote {buf}"
+ return f'wrote {buf}'
def pretty_path(self, path):
- return f"{path}/pretty/{self.ver}"
+ return f'{path}/pretty/{self.ver}'
class FakeByteHandler:
def __init__(self, ver):
@@ -59,13 +59,13 @@ def read(self, path):
return f.read()
def listdir(self, path):
- return ["fake", "contents"]
+ return ['fake', 'contents']
def write(self, buf, path):
- return f"wrote {buf}"
+ return f'wrote {buf}'
def pretty_path(self, path):
- return f"{path}/pretty/{self.ver}"
+ return f'{path}/pretty/{self.ver}'
def setUp(self):
self.papermill_io = PapermillIO()
@@ -73,8 +73,8 @@ def setUp(self):
self.fake1 = self.FakeHandler(1)
self.fake2 = self.FakeHandler(2)
self.fake_byte1 = self.FakeByteHandler(1)
- self.papermill_io.register("fake", self.fake1)
- self.papermill_io_bytes.register("notebooks", self.fake_byte1)
+ self.papermill_io.register('fake', self.fake1)
+ self.papermill_io_bytes.register('notebooks', self.fake_byte1)
self.old_papermill_io = iorw.papermill_io
iorw.papermill_io = self.papermill_io
@@ -83,14 +83,14 @@ def tearDown(self):
iorw.papermill_io = self.old_papermill_io
def test_get_handler(self):
- self.assertEqual(self.papermill_io.get_handler("fake"), self.fake1)
+ self.assertEqual(self.papermill_io.get_handler('fake'), self.fake1)
def test_get_local_handler(self):
with self.assertRaises(PapermillException):
- self.papermill_io.get_handler("dne")
+ self.papermill_io.get_handler('dne')
- self.papermill_io.register("local", self.fake2)
- self.assertEqual(self.papermill_io.get_handler("dne"), self.fake2)
+ self.papermill_io.register('local', self.fake2)
+ self.assertEqual(self.papermill_io.get_handler('dne'), self.fake2)
def test_get_no_io_handler(self):
self.assertIsInstance(self.papermill_io.get_handler(None), NoIOHandler)
@@ -101,91 +101,85 @@ def test_get_notebook_node_handler(self):
def test_entrypoint_register(self):
fake_entrypoint = Mock(load=Mock())
- fake_entrypoint.name = "fake-from-entry-point://"
+ fake_entrypoint.name = 'fake-from-entry-point://'
- with patch(
- "entrypoints.get_group_all", return_value=[fake_entrypoint]
- ) as mock_get_group_all:
+ with patch('entrypoints.get_group_all', return_value=[fake_entrypoint]) as mock_get_group_all:
self.papermill_io.register_entry_points()
- mock_get_group_all.assert_called_once_with("papermill.io")
- fake_ = self.papermill_io.get_handler("fake-from-entry-point://")
+ mock_get_group_all.assert_called_once_with('papermill.io')
+ fake_ = self.papermill_io.get_handler('fake-from-entry-point://')
assert fake_ == fake_entrypoint.load.return_value
def test_register_ordering(self):
# Should match fake1 with fake2 path
- self.assertEqual(self.papermill_io.get_handler("fake2/path"), self.fake1)
+ self.assertEqual(self.papermill_io.get_handler('fake2/path'), self.fake1)
self.papermill_io.reset()
- self.papermill_io.register("fake", self.fake1)
- self.papermill_io.register("fake2", self.fake2)
+ self.papermill_io.register('fake', self.fake1)
+ self.papermill_io.register('fake2', self.fake2)
# Should match fake1 with fake1 path, and NOT fake2 path/match
- self.assertEqual(self.papermill_io.get_handler("fake/path"), self.fake1)
+ self.assertEqual(self.papermill_io.get_handler('fake/path'), self.fake1)
# Should match fake2 with fake2 path
- self.assertEqual(self.papermill_io.get_handler("fake2/path"), self.fake2)
+ self.assertEqual(self.papermill_io.get_handler('fake2/path'), self.fake2)
def test_read(self):
- self.assertEqual(
- self.papermill_io.read("fake/path"), "contents from fake/path for version 1"
- )
+ self.assertEqual(self.papermill_io.read('fake/path'), 'contents from fake/path for version 1')
def test_read_bytes(self):
- self.assertIsNotNone(
- self.papermill_io_bytes.read("notebooks/gcs/gcs_in/gcs-simple_notebook.ipynb")
- )
+ self.assertIsNotNone(self.papermill_io_bytes.read('notebooks/gcs/gcs_in/gcs-simple_notebook.ipynb'))
def test_read_with_no_file_extension(self):
with pytest.warns(UserWarning):
- self.papermill_io.read("fake/path")
+ self.papermill_io.read('fake/path')
def test_read_with_invalid_file_extension(self):
with pytest.warns(UserWarning):
- self.papermill_io.read("fake/path/fakeinputpath.ipynb1")
+ self.papermill_io.read('fake/path/fakeinputpath.ipynb1')
def test_read_with_valid_file_extension(self):
with pytest.warns(None) as warns:
- self.papermill_io.read("fake/path/fakeinputpath.ipynb")
+ self.papermill_io.read('fake/path/fakeinputpath.ipynb')
self.assertEqual(len(warns), 0)
def test_read_yaml_with_no_file_extension(self):
with pytest.warns(UserWarning):
- read_yaml_file("fake/path")
+ read_yaml_file('fake/path')
def test_read_yaml_with_invalid_file_extension(self):
with pytest.warns(UserWarning):
- read_yaml_file("fake/path/fakeinputpath.ipynb")
+ read_yaml_file('fake/path/fakeinputpath.ipynb')
def test_read_stdin(self):
file_content = 'Ī¤į½“ Ī³Ī»įæ¶ĻĻĪ± Ī¼Īæįæ¦ į¼Ī“ĻĻĪ±Ī½ į¼Ī»Ī»Ī·Ī½Ī¹Īŗį½“'
with patch('sys.stdin', io.StringIO(file_content)):
- self.assertEqual(self.old_papermill_io.read("-"), file_content)
+ self.assertEqual(self.old_papermill_io.read('-'), file_content)
def test_listdir(self):
- self.assertEqual(self.papermill_io.listdir("fake/path"), ["fake", "contents"])
+ self.assertEqual(self.papermill_io.listdir('fake/path'), ['fake', 'contents'])
def test_write(self):
- self.assertEqual(self.papermill_io.write("buffer", "fake/path"), "wrote buffer")
+ self.assertEqual(self.papermill_io.write('buffer', 'fake/path'), 'wrote buffer')
def test_write_with_no_file_extension(self):
with pytest.warns(UserWarning):
- self.papermill_io.write("buffer", "fake/path")
+ self.papermill_io.write('buffer', 'fake/path')
def test_write_with_path_of_none(self):
self.assertIsNone(self.papermill_io.write('buffer', None))
def test_write_with_invalid_file_extension(self):
with pytest.warns(UserWarning):
- self.papermill_io.write("buffer", "fake/path/fakeoutputpath.ipynb1")
+ self.papermill_io.write('buffer', 'fake/path/fakeoutputpath.ipynb1')
def test_write_stdout(self):
file_content = 'Ī¤į½“ Ī³Ī»įæ¶ĻĻĪ± Ī¼Īæįæ¦ į¼Ī“ĻĻĪ±Ī½ į¼Ī»Ī»Ī·Ī½Ī¹Īŗį½“'
out = io.BytesIO()
with patch('sys.stdout', out):
- self.old_papermill_io.write(file_content, "-")
+ self.old_papermill_io.write(file_content, '-')
self.assertEqual(out.getvalue(), file_content.encode('utf-8'))
def test_pretty_path(self):
- self.assertEqual(self.papermill_io.pretty_path("fake/path"), "fake/path/pretty/1")
+ self.assertEqual(self.papermill_io.pretty_path('fake/path'), 'fake/path/pretty/1')
class TestLocalHandler(unittest.TestCase):
@@ -205,12 +199,12 @@ def test_write_utf8(self):
def test_write_no_directory_exists(self):
with self.assertRaises(FileNotFoundError):
- LocalHandler().write("buffer", "fake/path/fakenb.ipynb")
+ LocalHandler().write('buffer', 'fake/path/fakenb.ipynb')
def test_write_local_directory(self):
with patch.object(io, 'open'):
# Shouldn't raise with missing directory
- LocalHandler().write("buffer", "local.ipynb")
+ LocalHandler().write('buffer', 'local.ipynb')
def test_write_passed_cwd(self):
with TemporaryDirectory() as temp_dir:
@@ -231,7 +225,7 @@ def test_local_file_io_cwd(self):
try:
local_handler = LocalHandler()
papermill_io.reset()
- papermill_io.register("local", local_handler)
+ papermill_io.register('local', local_handler)
with local_file_io_cwd(temp_dir):
local_handler.write('ā', 'paper.txt')
@@ -253,7 +247,7 @@ def test_invalid_string(self):
# a string from which we can't extract a notebook is assumed to
# be a file and an IOError will be raised
with self.assertRaises(IOError):
- LocalHandler().read("a random string")
+ LocalHandler().read('a random string')
class TestNoIOHandler(unittest.TestCase):
@@ -281,20 +275,20 @@ class TestADLHandler(unittest.TestCase):
def setUp(self):
self.handler = ADLHandler()
self.handler._client = Mock(
- read=Mock(return_value=["foo", "bar", "baz"]),
- listdir=Mock(return_value=["foo", "bar", "baz"]),
+ read=Mock(return_value=['foo', 'bar', 'baz']),
+ listdir=Mock(return_value=['foo', 'bar', 'baz']),
write=Mock(),
)
def test_read(self):
- self.assertEqual(self.handler.read("some_path"), "foo\nbar\nbaz")
+ self.assertEqual(self.handler.read('some_path'), 'foo\nbar\nbaz')
def test_listdir(self):
- self.assertEqual(self.handler.listdir("some_path"), ["foo", "bar", "baz"])
+ self.assertEqual(self.handler.listdir('some_path'), ['foo', 'bar', 'baz'])
def test_write(self):
- self.handler.write("foo", "bar")
- self.handler._client.write.assert_called_once_with("foo", "bar")
+ self.handler.write('foo', 'bar')
+ self.handler._client.write.assert_called_once_with('foo', 'bar')
class TestHttpHandler(unittest.TestCase):
diff --git a/papermill/tests/test_parameterize.py b/papermill/tests/test_parameterize.py
index 5a929df6..fbd12ff0 100644
--- a/papermill/tests/test_parameterize.py
+++ b/papermill/tests/test_parameterize.py
@@ -1,21 +1,23 @@
import unittest
+from datetime import datetime
-from ..iorw import load_notebook_node
from ..exceptions import PapermillMissingParameterException
-from ..parameterize import parameterize_notebook, parameterize_path, add_builtin_parameters
+from ..iorw import load_notebook_node
+from ..parameterize import (
+ add_builtin_parameters,
+ parameterize_notebook,
+ parameterize_path,
+)
from . import get_notebook_path
-from datetime import datetime
class TestNotebookParametrizing(unittest.TestCase):
def count_nb_injected_parameter_cells(self, nb):
- return len(
- [c for c in nb.cells if 'injected-parameters' in c.get('metadata', {}).get('tags', [])]
- )
+ return len([c for c in nb.cells if 'injected-parameters' in c.get('metadata', {}).get('tags', [])])
def test_no_tag_copying(self):
# Test that injected cell does not copy other tags
- test_nb = load_notebook_node(get_notebook_path("simple_execute.ipynb"))
+ test_nb = load_notebook_node(get_notebook_path('simple_execute.ipynb'))
test_nb.cells[0]['metadata']['tags'].append('some tag')
test_nb = parameterize_notebook(test_nb, {'msg': 'Hello'})
@@ -31,7 +33,7 @@ def test_no_tag_copying(self):
self.assertEqual(self.count_nb_injected_parameter_cells(test_nb), 1)
def test_injected_parameters_tag(self):
- test_nb = load_notebook_node(get_notebook_path("simple_execute.ipynb"))
+ test_nb = load_notebook_node(get_notebook_path('simple_execute.ipynb'))
test_nb = parameterize_notebook(test_nb, {'msg': 'Hello'})
@@ -44,7 +46,7 @@ def test_injected_parameters_tag(self):
self.assertEqual(self.count_nb_injected_parameter_cells(test_nb), 1)
def test_repeated_run_injected_parameters_tag(self):
- test_nb = load_notebook_node(get_notebook_path("simple_execute.ipynb"))
+ test_nb = load_notebook_node(get_notebook_path('simple_execute.ipynb'))
self.assertEqual(self.count_nb_injected_parameter_cells(test_nb), 0)
test_nb = parameterize_notebook(test_nb, {'msg': 'Hello'})
@@ -54,7 +56,7 @@ def test_repeated_run_injected_parameters_tag(self):
self.assertEqual(self.count_nb_injected_parameter_cells(test_nb), 1)
def test_no_parameter_tag(self):
- test_nb = load_notebook_node(get_notebook_path("simple_execute.ipynb"))
+ test_nb = load_notebook_node(get_notebook_path('simple_execute.ipynb'))
test_nb.cells[0]['metadata']['tags'] = []
test_nb = parameterize_notebook(test_nb, {'msg': 'Hello'})
@@ -65,7 +67,7 @@ def test_no_parameter_tag(self):
self.assertEqual(self.count_nb_injected_parameter_cells(test_nb), 1)
def test_repeated_run_no_parameters_tag(self):
- test_nb = load_notebook_node(get_notebook_path("simple_execute.ipynb"))
+ test_nb = load_notebook_node(get_notebook_path('simple_execute.ipynb'))
test_nb.cells[0]['metadata']['tags'] = []
self.assertEqual(self.count_nb_injected_parameter_cells(test_nb), 0)
@@ -76,10 +78,8 @@ def test_repeated_run_no_parameters_tag(self):
self.assertEqual(self.count_nb_injected_parameter_cells(test_nb), 1)
def test_custom_comment(self):
- test_nb = load_notebook_node(get_notebook_path("simple_execute.ipynb"))
- test_nb = parameterize_notebook(
- test_nb, {'msg': 'Hello'}, comment='This is a custom comment'
- )
+ test_nb = load_notebook_node(get_notebook_path('simple_execute.ipynb'))
+ test_nb = parameterize_notebook(test_nb, {'msg': 'Hello'}, comment='This is a custom comment')
cell_one = test_nb.cells[1]
first_line = cell_one['source'].split('\n')[0]
@@ -88,83 +88,79 @@ def test_custom_comment(self):
class TestBuiltinParameters(unittest.TestCase):
def test_add_builtin_parameters_keeps_provided_parameters(self):
- with_builtin_parameters = add_builtin_parameters({"foo": "bar"})
- self.assertEqual(with_builtin_parameters["foo"], "bar")
+ with_builtin_parameters = add_builtin_parameters({'foo': 'bar'})
+ self.assertEqual(with_builtin_parameters['foo'], 'bar')
def test_add_builtin_parameters_adds_dict_of_builtins(self):
- with_builtin_parameters = add_builtin_parameters({"foo": "bar"})
- self.assertIn("pm", with_builtin_parameters)
- self.assertIsInstance(with_builtin_parameters["pm"], type({}))
+ with_builtin_parameters = add_builtin_parameters({'foo': 'bar'})
+ self.assertIn('pm', with_builtin_parameters)
+ self.assertIsInstance(with_builtin_parameters['pm'], type({}))
def test_add_builtin_parameters_allows_to_override_builtin(self):
- with_builtin_parameters = add_builtin_parameters({"pm": "foo"})
- self.assertEqual(with_builtin_parameters["pm"], "foo")
+ with_builtin_parameters = add_builtin_parameters({'pm': 'foo'})
+ self.assertEqual(with_builtin_parameters['pm'], 'foo')
def test_builtin_parameters_include_run_uuid(self):
- with_builtin_parameters = add_builtin_parameters({"foo": "bar"})
- self.assertIn("run_uuid", with_builtin_parameters["pm"])
+ with_builtin_parameters = add_builtin_parameters({'foo': 'bar'})
+ self.assertIn('run_uuid', with_builtin_parameters['pm'])
def test_builtin_parameters_include_current_datetime_local(self):
- with_builtin_parameters = add_builtin_parameters({"foo": "bar"})
- self.assertIn("current_datetime_local", with_builtin_parameters["pm"])
- self.assertIsInstance(with_builtin_parameters["pm"]["current_datetime_local"], datetime)
+ with_builtin_parameters = add_builtin_parameters({'foo': 'bar'})
+ self.assertIn('current_datetime_local', with_builtin_parameters['pm'])
+ self.assertIsInstance(with_builtin_parameters['pm']['current_datetime_local'], datetime)
def test_builtin_parameters_include_current_datetime_utc(self):
- with_builtin_parameters = add_builtin_parameters({"foo": "bar"})
- self.assertIn("current_datetime_utc", with_builtin_parameters["pm"])
- self.assertIsInstance(with_builtin_parameters["pm"]["current_datetime_utc"], datetime)
+ with_builtin_parameters = add_builtin_parameters({'foo': 'bar'})
+ self.assertIn('current_datetime_utc', with_builtin_parameters['pm'])
+ self.assertIsInstance(with_builtin_parameters['pm']['current_datetime_utc'], datetime)
class TestPathParameterizing(unittest.TestCase):
def test_plain_text_path_with_empty_parameters_object(self):
- self.assertEqual(parameterize_path("foo/bar", {}), "foo/bar")
+ self.assertEqual(parameterize_path('foo/bar', {}), 'foo/bar')
def test_plain_text_path_with_none_parameters(self):
- self.assertEqual(parameterize_path("foo/bar", None), "foo/bar")
+ self.assertEqual(parameterize_path('foo/bar', None), 'foo/bar')
def test_plain_text_path_with_unused_parameters(self):
- self.assertEqual(parameterize_path("foo/bar", {"baz": "quux"}), "foo/bar")
+ self.assertEqual(parameterize_path('foo/bar', {'baz': 'quux'}), 'foo/bar')
def test_path_with_single_parameter(self):
- self.assertEqual(parameterize_path("foo/bar/{baz}", {"baz": "quux"}), "foo/bar/quux")
+ self.assertEqual(parameterize_path('foo/bar/{baz}', {'baz': 'quux'}), 'foo/bar/quux')
def test_path_with_boolean_parameter(self):
- self.assertEqual(parameterize_path("foo/bar/{baz}", {"baz": False}), "foo/bar/False")
+ self.assertEqual(parameterize_path('foo/bar/{baz}', {'baz': False}), 'foo/bar/False')
def test_path_with_dict_parameter(self):
- self.assertEqual(
- parameterize_path("foo/{bar[baz]}/", {"bar": {"baz": "quux"}}), "foo/quux/"
- )
+ self.assertEqual(parameterize_path('foo/{bar[baz]}/', {'bar': {'baz': 'quux'}}), 'foo/quux/')
def test_path_with_list_parameter(self):
- self.assertEqual(parameterize_path("foo/{bar[0]}/", {"bar": [1, 2, 3]}), "foo/1/")
- self.assertEqual(parameterize_path("foo/{bar[2]}/", {"bar": [1, 2, 3]}), "foo/3/")
+ self.assertEqual(parameterize_path('foo/{bar[0]}/', {'bar': [1, 2, 3]}), 'foo/1/')
+ self.assertEqual(parameterize_path('foo/{bar[2]}/', {'bar': [1, 2, 3]}), 'foo/3/')
def test_path_with_none_parameter(self):
- self.assertEqual(parameterize_path("foo/bar/{baz}", {"baz": None}), "foo/bar/None")
+ self.assertEqual(parameterize_path('foo/bar/{baz}', {'baz': None}), 'foo/bar/None')
def test_path_with_numeric_parameter(self):
- self.assertEqual(parameterize_path("foo/bar/{baz}", {"baz": 42}), "foo/bar/42")
+ self.assertEqual(parameterize_path('foo/bar/{baz}', {'baz': 42}), 'foo/bar/42')
def test_path_with_numeric_format_string(self):
- self.assertEqual(parameterize_path("foo/bar/{baz:03d}", {"baz": 42}), "foo/bar/042")
+ self.assertEqual(parameterize_path('foo/bar/{baz:03d}', {'baz': 42}), 'foo/bar/042')
def test_path_with_float_format_string(self):
- self.assertEqual(parameterize_path("foo/bar/{baz:.03f}", {"baz": 0.3}), "foo/bar/0.300")
+ self.assertEqual(parameterize_path('foo/bar/{baz:.03f}', {'baz': 0.3}), 'foo/bar/0.300')
def test_path_with_multiple_parameter(self):
- self.assertEqual(
- parameterize_path("{foo}/{baz}", {"foo": "bar", "baz": "quux"}), "bar/quux"
- )
+ self.assertEqual(parameterize_path('{foo}/{baz}', {'foo': 'bar', 'baz': 'quux'}), 'bar/quux')
def test_parameterized_path_with_undefined_parameter(self):
with self.assertRaises(PapermillMissingParameterException) as context:
- parameterize_path("{foo}", {})
+ parameterize_path('{foo}', {})
self.assertEqual(str(context.exception), "Missing parameter 'foo'")
def test_parameterized_path_with_none_parameters(self):
with self.assertRaises(PapermillMissingParameterException) as context:
- parameterize_path("{foo}", None)
+ parameterize_path('{foo}', None)
self.assertEqual(str(context.exception), "Missing parameter 'foo'")
def test_path_of_none_returns_none(self):
@@ -172,6 +168,6 @@ def test_path_of_none_returns_none(self):
self.assertIsNone(parameterize_path(path=None, parameters=None))
def test_path_of_notebook_node_returns_input(self):
- test_nb = load_notebook_node(get_notebook_path("simple_execute.ipynb"))
+ test_nb = load_notebook_node(get_notebook_path('simple_execute.ipynb'))
result_nb = parameterize_path(test_nb, parameters=None)
self.assertIs(result_nb, test_nb)
diff --git a/papermill/tests/test_s3.py b/papermill/tests/test_s3.py
index 20455c11..de86f5b6 100644
--- a/papermill/tests/test_s3.py
+++ b/papermill/tests/test_s3.py
@@ -1,13 +1,13 @@
# The following tests are purposely limited to the exposed interface by iorw.py
import os.path
-import pytest
+
import boto3
import moto
-
+import pytest
from moto import mock_s3
-from ..s3 import Bucket, Prefix, Key, S3
+from ..s3 import S3, Bucket, Key, Prefix
@pytest.fixture
@@ -121,8 +121,8 @@ def test_key_init():
def test_key_repr():
- k = Key("foo", "bar")
- assert repr(k) == "s3://foo/bar"
+ k = Key('foo', 'bar')
+ assert repr(k) == 's3://foo/bar'
def test_key_defaults():
@@ -156,20 +156,21 @@ def test_s3_defaults():
with open(os.path.join(local_dir, test_file_path)) as f:
test_nb_content = f.read()
-no_empty_lines = lambda s: "\n".join([l for l in s.split('\n') if len(l) > 0])
+no_empty_lines = lambda s: '\n'.join([l for l in s.split('\n') if len(l) > 0])
test_clean_nb_content = no_empty_lines(test_nb_content)
-read_from_gen = lambda g: "\n".join(g)
+read_from_gen = lambda g: '\n'.join(g)
-@pytest.fixture(scope="function")
+@pytest.fixture(scope='function')
def s3_client():
mock_s3 = moto.mock_s3()
mock_s3.start()
client = boto3.client('s3')
client.create_bucket(
- Bucket=test_bucket_name, CreateBucketConfiguration={'LocationConstraint': 'us-west-2'}
+ Bucket=test_bucket_name,
+ CreateBucketConfiguration={'LocationConstraint': 'us-west-2'},
)
client.put_object(Bucket=test_bucket_name, Key=test_file_path, Body=test_nb_content)
client.put_object(Bucket=test_bucket_name, Key=test_empty_file_path, Body='')
@@ -184,19 +185,19 @@ def s3_client():
def test_s3_read(s3_client):
- s3_path = f"s3://{test_bucket_name}/{test_file_path}"
+ s3_path = f's3://{test_bucket_name}/{test_file_path}'
data = read_from_gen(s3_client.read(s3_path))
assert data == test_clean_nb_content
def test_s3_read_empty(s3_client):
- s3_path = f"s3://{test_bucket_name}/{test_empty_file_path}"
+ s3_path = f's3://{test_bucket_name}/{test_empty_file_path}'
data = read_from_gen(s3_client.read(s3_path))
assert data == ''
def test_s3_write(s3_client):
- s3_path = f"s3://{test_bucket_name}/{test_file_path}.txt"
+ s3_path = f's3://{test_bucket_name}/{test_file_path}.txt'
s3_client.cp_string(test_string, s3_path)
data = read_from_gen(s3_client.read(s3_path))
@@ -204,7 +205,7 @@ def test_s3_write(s3_client):
def test_s3_overwrite(s3_client):
- s3_path = f"s3://{test_bucket_name}/{test_file_path}"
+ s3_path = f's3://{test_bucket_name}/{test_file_path}'
s3_client.cp_string(test_string, s3_path)
data = read_from_gen(s3_client.read(s3_path))
@@ -213,8 +214,8 @@ def test_s3_overwrite(s3_client):
def test_s3_listdir(s3_client):
dir_name = os.path.dirname(test_file_path)
- s3_dir = f"s3://{test_bucket_name}/{dir_name}"
- s3_path = f"s3://{test_bucket_name}/{test_file_path}"
+ s3_dir = f's3://{test_bucket_name}/{dir_name}'
+ s3_path = f's3://{test_bucket_name}/{test_file_path}'
dir_listings = s3_client.listdir(s3_dir)
assert len(dir_listings) == 2
assert s3_path in dir_listings
diff --git a/papermill/tests/test_translators.py b/papermill/tests/test_translators.py
index fd433d6f..ab49475d 100644
--- a/papermill/tests/test_translators.py
+++ b/papermill/tests/test_translators.py
@@ -1,8 +1,7 @@
-import pytest
-
-from unittest.mock import Mock
from collections import OrderedDict
+from unittest.mock import Mock
+import pytest
from nbformat.v4 import new_code_cell
from .. import translators
@@ -11,19 +10,19 @@
@pytest.mark.parametrize(
- "test_input,expected",
+ 'test_input,expected',
[
- ("foo", '"foo"'),
+ ('foo', '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
- ({"foo": "bar"}, '{"foo": "bar"}'),
- ({"foo": '"bar"'}, '{"foo": "\\"bar\\""}'),
- ({"foo": ["bar"]}, '{"foo": ["bar"]}'),
- ({"foo": {"bar": "baz"}}, '{"foo": {"bar": "baz"}}'),
- ({"foo": {"bar": '"baz"'}}, '{"foo": {"bar": "\\"baz\\""}}'),
- (["foo"], '["foo"]'),
- (["foo", '"bar"'], '["foo", "\\"bar\\""]'),
- ([{"foo": "bar"}], '[{"foo": "bar"}]'),
- ([{"foo": '"bar"'}], '[{"foo": "\\"bar\\""}]'),
+ ({'foo': 'bar'}, '{"foo": "bar"}'),
+ ({'foo': '"bar"'}, '{"foo": "\\"bar\\""}'),
+ ({'foo': ['bar']}, '{"foo": ["bar"]}'),
+ ({'foo': {'bar': 'baz'}}, '{"foo": {"bar": "baz"}}'),
+ ({'foo': {'bar': '"baz"'}}, '{"foo": {"bar": "\\"baz\\""}}'),
+ (['foo'], '["foo"]'),
+ (['foo', '"bar"'], '["foo", "\\"bar\\""]'),
+ ([{'foo': 'bar'}], '[{"foo": "bar"}]'),
+ ([{'foo': '"bar"'}], '[{"foo": "\\"bar\\""}]'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
@@ -41,14 +40,14 @@ def test_translate_type_python(test_input, expected):
@pytest.mark.parametrize(
- "parameters,expected",
+ 'parameters,expected',
[
- ({"foo": "bar"}, '# Parameters\nfoo = "bar"\n'),
- ({"foo": True}, '# Parameters\nfoo = True\n'),
- ({"foo": 5}, '# Parameters\nfoo = 5\n'),
- ({"foo": 1.1}, '# Parameters\nfoo = 1.1\n'),
- ({"foo": ['bar', 'baz']}, '# Parameters\nfoo = ["bar", "baz"]\n'),
- ({"foo": {'bar': 'baz'}}, '# Parameters\nfoo = {"bar": "baz"}\n'),
+ ({'foo': 'bar'}, '# Parameters\nfoo = "bar"\n'),
+ ({'foo': True}, '# Parameters\nfoo = True\n'),
+ ({'foo': 5}, '# Parameters\nfoo = 5\n'),
+ ({'foo': 1.1}, '# Parameters\nfoo = 1.1\n'),
+ ({'foo': ['bar', 'baz']}, '# Parameters\nfoo = ["bar", "baz"]\n'),
+ ({'foo': {'bar': 'baz'}}, '# Parameters\nfoo = {"bar": "baz"}\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'# Parameters\nfoo = "bar"\nbaz = ["buz"]\n',
@@ -60,42 +59,63 @@ def test_translate_codify_python(parameters, expected):
@pytest.mark.parametrize(
- "test_input,expected", [("", '#'), ("foo", '# foo'), ("['best effort']", "# ['best effort']")]
+ 'test_input,expected',
+ [('', '#'), ('foo', '# foo'), ("['best effort']", "# ['best effort']")],
)
def test_translate_comment_python(test_input, expected):
assert translators.PythonTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
- "test_input,expected",
+ 'test_input,expected',
[
- ("a = 2", [Parameter("a", "None", "2", "")]),
- ("a: int = 2", [Parameter("a", "int", "2", "")]),
- ("a = 2 # type:int", [Parameter("a", "int", "2", "")]),
- ("a = False # Nice variable a", [Parameter("a", "None", "False", "Nice variable a")]),
+ ('a = 2', [Parameter('a', 'None', '2', '')]),
+ ('a: int = 2', [Parameter('a', 'int', '2', '')]),
+ ('a = 2 # type:int', [Parameter('a', 'int', '2', '')]),
+ (
+ 'a = False # Nice variable a',
+ [Parameter('a', 'None', 'False', 'Nice variable a')],
+ ),
(
- "a: float = 2.258 # type: int Nice variable a",
- [Parameter("a", "float", "2.258", "Nice variable a")],
+ 'a: float = 2.258 # type: int Nice variable a',
+ [Parameter('a', 'float', '2.258', 'Nice variable a')],
),
(
"a = 'this is a string' # type: int Nice variable a",
- [Parameter("a", "int", "'this is a string'", "Nice variable a")],
+ [Parameter('a', 'int', "'this is a string'", 'Nice variable a')],
),
(
"a: List[str] = ['this', 'is', 'a', 'string', 'list'] # Nice variable a",
[
Parameter(
- "a", "List[str]", "['this', 'is', 'a', 'string', 'list']", "Nice variable a"
+ 'a',
+ 'List[str]',
+ "['this', 'is', 'a', 'string', 'list']",
+ 'Nice variable a',
)
],
),
(
"a: List[str] = [\n 'this', # First\n 'is',\n 'a',\n 'string',\n 'list' # Last\n] # Nice variable a", # noqa
- [Parameter("a", "List[str]", "['this','is','a','string','list']", "Nice variable a")],
+ [
+ Parameter(
+ 'a',
+ 'List[str]',
+ "['this','is','a','string','list']",
+ 'Nice variable a',
+ )
+ ],
),
(
"a: List[str] = [\n 'this',\n 'is',\n 'a',\n 'string',\n 'list'\n] # Nice variable a", # noqa
- [Parameter("a", "List[str]", "['this','is','a','string','list']", "Nice variable a")],
+ [
+ Parameter(
+ 'a',
+ 'List[str]',
+ "['this','is','a','string','list']",
+ 'Nice variable a',
+ )
+ ],
),
(
"""a: List[str] = [
@@ -110,8 +130,13 @@ def test_translate_comment_python(test_input, expected):
b: float = -2.3432 # My b variable
""",
[
- Parameter("a", "List[str]", "['this','is','a','string','list']", "Nice variable a"),
- Parameter("b", "float", "-2.3432", "My b variable"),
+ Parameter(
+ 'a',
+ 'List[str]',
+ "['this','is','a','string','list']",
+ 'Nice variable a',
+ ),
+ Parameter('b', 'float', '-2.3432', 'My b variable'),
],
),
],
@@ -122,19 +147,19 @@ def test_inspect_python(test_input, expected):
@pytest.mark.parametrize(
- "test_input,expected",
+ 'test_input,expected',
[
- ("foo", '"foo"'),
+ ('foo', '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
- ({"foo": "bar"}, 'list("foo" = "bar")'),
- ({"foo": '"bar"'}, 'list("foo" = "\\"bar\\"")'),
- ({"foo": ["bar"]}, 'list("foo" = list("bar"))'),
- ({"foo": {"bar": "baz"}}, 'list("foo" = list("bar" = "baz"))'),
- ({"foo": {"bar": '"baz"'}}, 'list("foo" = list("bar" = "\\"baz\\""))'),
- (["foo"], 'list("foo")'),
- (["foo", '"bar"'], 'list("foo", "\\"bar\\"")'),
- ([{"foo": "bar"}], 'list(list("foo" = "bar"))'),
- ([{"foo": '"bar"'}], 'list(list("foo" = "\\"bar\\""))'),
+ ({'foo': 'bar'}, 'list("foo" = "bar")'),
+ ({'foo': '"bar"'}, 'list("foo" = "\\"bar\\"")'),
+ ({'foo': ['bar']}, 'list("foo" = list("bar"))'),
+ ({'foo': {'bar': 'baz'}}, 'list("foo" = list("bar" = "baz"))'),
+ ({'foo': {'bar': '"baz"'}}, 'list("foo" = list("bar" = "\\"baz\\""))'),
+ (['foo'], 'list("foo")'),
+ (['foo', '"bar"'], 'list("foo", "\\"bar\\"")'),
+ ([{'foo': 'bar'}], 'list(list("foo" = "bar"))'),
+ ([{'foo': '"bar"'}], 'list(list("foo" = "\\"bar\\""))'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
@@ -149,27 +174,28 @@ def test_translate_type_r(test_input, expected):
@pytest.mark.parametrize(
- "test_input,expected", [("", '#'), ("foo", '# foo'), ("['best effort']", "# ['best effort']")]
+ 'test_input,expected',
+ [('', '#'), ('foo', '# foo'), ("['best effort']", "# ['best effort']")],
)
def test_translate_comment_r(test_input, expected):
assert translators.RTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
- "parameters,expected",
+ 'parameters,expected',
[
- ({"foo": "bar"}, '# Parameters\nfoo = "bar"\n'),
- ({"foo": True}, '# Parameters\nfoo = TRUE\n'),
- ({"foo": 5}, '# Parameters\nfoo = 5\n'),
- ({"foo": 1.1}, '# Parameters\nfoo = 1.1\n'),
- ({"foo": ['bar', 'baz']}, '# Parameters\nfoo = list("bar", "baz")\n'),
- ({"foo": {'bar': 'baz'}}, '# Parameters\nfoo = list("bar" = "baz")\n'),
+ ({'foo': 'bar'}, '# Parameters\nfoo = "bar"\n'),
+ ({'foo': True}, '# Parameters\nfoo = TRUE\n'),
+ ({'foo': 5}, '# Parameters\nfoo = 5\n'),
+ ({'foo': 1.1}, '# Parameters\nfoo = 1.1\n'),
+ ({'foo': ['bar', 'baz']}, '# Parameters\nfoo = list("bar", "baz")\n'),
+ ({'foo': {'bar': 'baz'}}, '# Parameters\nfoo = list("bar" = "baz")\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'# Parameters\nfoo = "bar"\nbaz = list("buz")\n',
),
# Underscores remove
- ({"___foo": 5}, '# Parameters\nfoo = 5\n'),
+ ({'___foo': 5}, '# Parameters\nfoo = 5\n'),
],
)
def test_translate_codify_r(parameters, expected):
@@ -177,19 +203,19 @@ def test_translate_codify_r(parameters, expected):
@pytest.mark.parametrize(
- "test_input,expected",
+ 'test_input,expected',
[
- ("foo", '"foo"'),
+ ('foo', '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
- ({"foo": "bar"}, 'Map("foo" -> "bar")'),
- ({"foo": '"bar"'}, 'Map("foo" -> "\\"bar\\"")'),
- ({"foo": ["bar"]}, 'Map("foo" -> Seq("bar"))'),
- ({"foo": {"bar": "baz"}}, 'Map("foo" -> Map("bar" -> "baz"))'),
- ({"foo": {"bar": '"baz"'}}, 'Map("foo" -> Map("bar" -> "\\"baz\\""))'),
- (["foo"], 'Seq("foo")'),
- (["foo", '"bar"'], 'Seq("foo", "\\"bar\\"")'),
- ([{"foo": "bar"}], 'Seq(Map("foo" -> "bar"))'),
- ([{"foo": '"bar"'}], 'Seq(Map("foo" -> "\\"bar\\""))'),
+ ({'foo': 'bar'}, 'Map("foo" -> "bar")'),
+ ({'foo': '"bar"'}, 'Map("foo" -> "\\"bar\\"")'),
+ ({'foo': ['bar']}, 'Map("foo" -> Seq("bar"))'),
+ ({'foo': {'bar': 'baz'}}, 'Map("foo" -> Map("bar" -> "baz"))'),
+ ({'foo': {'bar': '"baz"'}}, 'Map("foo" -> Map("bar" -> "\\"baz\\""))'),
+ (['foo'], 'Seq("foo")'),
+ (['foo', '"bar"'], 'Seq("foo", "\\"bar\\"")'),
+ ([{'foo': 'bar'}], 'Seq(Map("foo" -> "bar"))'),
+ ([{'foo': '"bar"'}], 'Seq(Map("foo" -> "\\"bar\\""))'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
@@ -206,19 +232,19 @@ def test_translate_type_scala(test_input, expected):
@pytest.mark.parametrize(
- "test_input,expected",
- [("", '//'), ("foo", '// foo'), ("['best effort']", "// ['best effort']")],
+ 'test_input,expected',
+ [('', '//'), ('foo', '// foo'), ("['best effort']", "// ['best effort']")],
)
def test_translate_comment_scala(test_input, expected):
assert translators.ScalaTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
- "input_name,input_value,expected",
+ 'input_name,input_value,expected',
[
- ("foo", '""', 'val foo = ""'),
- ("foo", '"bar"', 'val foo = "bar"'),
- ("foo", 'Map("foo" -> "bar")', 'val foo = Map("foo" -> "bar")'),
+ ('foo', '""', 'val foo = ""'),
+ ('foo', '"bar"', 'val foo = "bar"'),
+ ('foo', 'Map("foo" -> "bar")', 'val foo = Map("foo" -> "bar")'),
],
)
def test_translate_assign_scala(input_name, input_value, expected):
@@ -226,14 +252,14 @@ def test_translate_assign_scala(input_name, input_value, expected):
@pytest.mark.parametrize(
- "parameters,expected",
+ 'parameters,expected',
[
- ({"foo": "bar"}, '// Parameters\nval foo = "bar"\n'),
- ({"foo": True}, '// Parameters\nval foo = true\n'),
- ({"foo": 5}, '// Parameters\nval foo = 5\n'),
- ({"foo": 1.1}, '// Parameters\nval foo = 1.1\n'),
- ({"foo": ['bar', 'baz']}, '// Parameters\nval foo = Seq("bar", "baz")\n'),
- ({"foo": {'bar': 'baz'}}, '// Parameters\nval foo = Map("bar" -> "baz")\n'),
+ ({'foo': 'bar'}, '// Parameters\nval foo = "bar"\n'),
+ ({'foo': True}, '// Parameters\nval foo = true\n'),
+ ({'foo': 5}, '// Parameters\nval foo = 5\n'),
+ ({'foo': 1.1}, '// Parameters\nval foo = 1.1\n'),
+ ({'foo': ['bar', 'baz']}, '// Parameters\nval foo = Seq("bar", "baz")\n'),
+ ({'foo': {'bar': 'baz'}}, '// Parameters\nval foo = Map("bar" -> "baz")\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'// Parameters\nval foo = "bar"\nval baz = Seq("buz")\n',
@@ -246,15 +272,18 @@ def test_translate_codify_scala(parameters, expected):
# C# section
@pytest.mark.parametrize(
- "test_input,expected",
+ 'test_input,expected',
[
- ("foo", '"foo"'),
+ ('foo', '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
- ({"foo": "bar"}, 'new Dictionary{ { "foo" , "bar" } }'),
- ({"foo": '"bar"'}, 'new Dictionary{ { "foo" , "\\"bar\\"" } }'),
- (["foo"], 'new [] { "foo" }'),
- (["foo", '"bar"'], 'new [] { "foo", "\\"bar\\"" }'),
- ([{"foo": "bar"}], 'new [] { new Dictionary{ { "foo" , "bar" } } }'),
+ ({'foo': 'bar'}, 'new Dictionary{ { "foo" , "bar" } }'),
+ ({'foo': '"bar"'}, 'new Dictionary{ { "foo" , "\\"bar\\"" } }'),
+ (['foo'], 'new [] { "foo" }'),
+ (['foo', '"bar"'], 'new [] { "foo", "\\"bar\\"" }'),
+ (
+ [{'foo': 'bar'}],
+ 'new [] { new Dictionary{ { "foo" , "bar" } } }',
+ ),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
@@ -270,31 +299,34 @@ def test_translate_type_csharp(test_input, expected):
@pytest.mark.parametrize(
- "test_input,expected",
- [("", '//'), ("foo", '// foo'), ("['best effort']", "// ['best effort']")],
+ 'test_input,expected',
+ [('', '//'), ('foo', '// foo'), ("['best effort']", "// ['best effort']")],
)
def test_translate_comment_csharp(test_input, expected):
assert translators.CSharpTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
- "input_name,input_value,expected",
- [("foo", '""', 'var foo = "";'), ("foo", '"bar"', 'var foo = "bar";')],
+ 'input_name,input_value,expected',
+ [('foo', '""', 'var foo = "";'), ('foo', '"bar"', 'var foo = "bar";')],
)
def test_translate_assign_csharp(input_name, input_value, expected):
assert translators.CSharpTranslator.assign(input_name, input_value) == expected
@pytest.mark.parametrize(
- "parameters,expected",
+ 'parameters,expected',
[
- ({"foo": "bar"}, '// Parameters\nvar foo = "bar";\n'),
- ({"foo": True}, '// Parameters\nvar foo = true;\n'),
- ({"foo": 5}, '// Parameters\nvar foo = 5;\n'),
- ({"foo": 1.1}, '// Parameters\nvar foo = 1.1;\n'),
- ({"foo": ['bar', 'baz']}, '// Parameters\nvar foo = new [] { "bar", "baz" };\n'),
+ ({'foo': 'bar'}, '// Parameters\nvar foo = "bar";\n'),
+ ({'foo': True}, '// Parameters\nvar foo = true;\n'),
+ ({'foo': 5}, '// Parameters\nvar foo = 5;\n'),
+ ({'foo': 1.1}, '// Parameters\nvar foo = 1.1;\n'),
(
- {"foo": {'bar': 'baz'}},
+ {'foo': ['bar', 'baz']},
+ '// Parameters\nvar foo = new [] { "bar", "baz" };\n',
+ ),
+ (
+ {'foo': {'bar': 'baz'}},
'// Parameters\nvar foo = new Dictionary{ { "bar" , "baz" } };\n',
),
],
@@ -305,26 +337,26 @@ def test_translate_codify_csharp(parameters, expected):
# Powershell section
@pytest.mark.parametrize(
- "test_input,expected",
+ 'test_input,expected',
[
- ("foo", '"foo"'),
+ ('foo', '"foo"'),
('{"foo": "bar"}', '"{`"foo`": `"bar`"}"'),
- ({"foo": "bar"}, '@{"foo" = "bar"}'),
- ({"foo": '"bar"'}, '@{"foo" = "`"bar`""}'),
- ({"foo": ["bar"]}, '@{"foo" = @("bar")}'),
- ({"foo": {"bar": "baz"}}, '@{"foo" = @{"bar" = "baz"}}'),
- ({"foo": {"bar": '"baz"'}}, '@{"foo" = @{"bar" = "`"baz`""}}'),
- (["foo"], '@("foo")'),
- (["foo", '"bar"'], '@("foo", "`"bar`"")'),
- ([{"foo": "bar"}], '@(@{"foo" = "bar"})'),
- ([{"foo": '"bar"'}], '@(@{"foo" = "`"bar`""})'),
+ ({'foo': 'bar'}, '@{"foo" = "bar"}'),
+ ({'foo': '"bar"'}, '@{"foo" = "`"bar`""}'),
+ ({'foo': ['bar']}, '@{"foo" = @("bar")}'),
+ ({'foo': {'bar': 'baz'}}, '@{"foo" = @{"bar" = "baz"}}'),
+ ({'foo': {'bar': '"baz"'}}, '@{"foo" = @{"bar" = "`"baz`""}}'),
+ (['foo'], '@("foo")'),
+ (['foo', '"bar"'], '@("foo", "`"bar`"")'),
+ ([{'foo': 'bar'}], '@(@{"foo" = "bar"})'),
+ ([{'foo': '"bar"'}], '@(@{"foo" = "`"bar`""})'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
(-5432.1, '-5432.1'),
- (float('nan'), "[double]::NaN"),
- (float('-inf'), "[double]::NegativeInfinity"),
- (float('inf'), "[double]::PositiveInfinity"),
+ (float('nan'), '[double]::NaN'),
+ (float('-inf'), '[double]::NegativeInfinity'),
+ (float('inf'), '[double]::PositiveInfinity'),
(True, '$True'),
(False, '$False'),
(None, '$Null'),
@@ -335,14 +367,14 @@ def test_translate_type_powershell(test_input, expected):
@pytest.mark.parametrize(
- "parameters,expected",
+ 'parameters,expected',
[
- ({"foo": "bar"}, '# Parameters\n$foo = "bar"\n'),
- ({"foo": True}, '# Parameters\n$foo = $True\n'),
- ({"foo": 5}, '# Parameters\n$foo = 5\n'),
- ({"foo": 1.1}, '# Parameters\n$foo = 1.1\n'),
- ({"foo": ['bar', 'baz']}, '# Parameters\n$foo = @("bar", "baz")\n'),
- ({"foo": {'bar': 'baz'}}, '# Parameters\n$foo = @{"bar" = "baz"}\n'),
+ ({'foo': 'bar'}, '# Parameters\n$foo = "bar"\n'),
+ ({'foo': True}, '# Parameters\n$foo = $True\n'),
+ ({'foo': 5}, '# Parameters\n$foo = 5\n'),
+ ({'foo': 1.1}, '# Parameters\n$foo = 1.1\n'),
+ ({'foo': ['bar', 'baz']}, '# Parameters\n$foo = @("bar", "baz")\n'),
+ ({'foo': {'bar': 'baz'}}, '# Parameters\n$foo = @{"bar" = "baz"}\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'# Parameters\n$foo = "bar"\n$baz = @("buz")\n',
@@ -354,15 +386,16 @@ def test_translate_codify_powershell(parameters, expected):
@pytest.mark.parametrize(
- "input_name,input_value,expected",
- [("foo", '""', '$foo = ""'), ("foo", '"bar"', '$foo = "bar"')],
+ 'input_name,input_value,expected',
+ [('foo', '""', '$foo = ""'), ('foo', '"bar"', '$foo = "bar"')],
)
def test_translate_assign_powershell(input_name, input_value, expected):
assert translators.PowershellTranslator.assign(input_name, input_value) == expected
@pytest.mark.parametrize(
- "test_input,expected", [("", '#'), ("foo", '# foo'), ("['best effort']", "# ['best effort']")]
+ 'test_input,expected',
+ [('', '#'), ('foo', '# foo'), ("['best effort']", "# ['best effort']")],
)
def test_translate_comment_powershell(test_input, expected):
assert translators.PowershellTranslator.comment(test_input) == expected
@@ -370,15 +403,15 @@ def test_translate_comment_powershell(test_input, expected):
# F# section
@pytest.mark.parametrize(
- "test_input,expected",
+ 'test_input,expected',
[
- ("foo", '"foo"'),
+ ('foo', '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
- ({"foo": "bar"}, '[ ("foo", "bar" :> IComparable) ] |> Map.ofList'),
- ({"foo": '"bar"'}, '[ ("foo", "\\"bar\\"" :> IComparable) ] |> Map.ofList'),
- (["foo"], '[ "foo" ]'),
- (["foo", '"bar"'], '[ "foo"; "\\"bar\\"" ]'),
- ([{"foo": "bar"}], '[ [ ("foo", "bar" :> IComparable) ] |> Map.ofList ]'),
+ ({'foo': 'bar'}, '[ ("foo", "bar" :> IComparable) ] |> Map.ofList'),
+ ({'foo': '"bar"'}, '[ ("foo", "\\"bar\\"" :> IComparable) ] |> Map.ofList'),
+ (['foo'], '[ "foo" ]'),
+ (['foo', '"bar"'], '[ "foo"; "\\"bar\\"" ]'),
+ ([{'foo': 'bar'}], '[ [ ("foo", "bar" :> IComparable) ] |> Map.ofList ]'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
@@ -394,31 +427,35 @@ def test_translate_type_fsharp(test_input, expected):
@pytest.mark.parametrize(
- "test_input,expected",
- [("", '(* *)'), ("foo", '(* foo *)'), ("['best effort']", "(* ['best effort'] *)")],
+ 'test_input,expected',
+ [
+ ('', '(* *)'),
+ ('foo', '(* foo *)'),
+ ("['best effort']", "(* ['best effort'] *)"),
+ ],
)
def test_translate_comment_fsharp(test_input, expected):
assert translators.FSharpTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
- "input_name,input_value,expected",
- [("foo", '""', 'let foo = ""'), ("foo", '"bar"', 'let foo = "bar"')],
+ 'input_name,input_value,expected',
+ [('foo', '""', 'let foo = ""'), ('foo', '"bar"', 'let foo = "bar"')],
)
def test_translate_assign_fsharp(input_name, input_value, expected):
assert translators.FSharpTranslator.assign(input_name, input_value) == expected
@pytest.mark.parametrize(
- "parameters,expected",
+ 'parameters,expected',
[
- ({"foo": "bar"}, '(* Parameters *)\nlet foo = "bar"\n'),
- ({"foo": True}, '(* Parameters *)\nlet foo = true\n'),
- ({"foo": 5}, '(* Parameters *)\nlet foo = 5\n'),
- ({"foo": 1.1}, '(* Parameters *)\nlet foo = 1.1\n'),
- ({"foo": ['bar', 'baz']}, '(* Parameters *)\nlet foo = [ "bar"; "baz" ]\n'),
+ ({'foo': 'bar'}, '(* Parameters *)\nlet foo = "bar"\n'),
+ ({'foo': True}, '(* Parameters *)\nlet foo = true\n'),
+ ({'foo': 5}, '(* Parameters *)\nlet foo = 5\n'),
+ ({'foo': 1.1}, '(* Parameters *)\nlet foo = 1.1\n'),
+ ({'foo': ['bar', 'baz']}, '(* Parameters *)\nlet foo = [ "bar"; "baz" ]\n'),
(
- {"foo": {'bar': 'baz'}},
+ {'foo': {'bar': 'baz'}},
'(* Parameters *)\nlet foo = [ ("bar", "baz" :> IComparable) ] |> Map.ofList\n',
),
],
@@ -428,19 +465,19 @@ def test_translate_codify_fsharp(parameters, expected):
@pytest.mark.parametrize(
- "test_input,expected",
+ 'test_input,expected',
[
- ("foo", '"foo"'),
+ ('foo', '"foo"'),
('{"foo": "bar"}', '"{\\"foo\\": \\"bar\\"}"'),
- ({"foo": "bar"}, 'Dict("foo" => "bar")'),
- ({"foo": '"bar"'}, 'Dict("foo" => "\\"bar\\"")'),
- ({"foo": ["bar"]}, 'Dict("foo" => ["bar"])'),
- ({"foo": {"bar": "baz"}}, 'Dict("foo" => Dict("bar" => "baz"))'),
- ({"foo": {"bar": '"baz"'}}, 'Dict("foo" => Dict("bar" => "\\"baz\\""))'),
- (["foo"], '["foo"]'),
- (["foo", '"bar"'], '["foo", "\\"bar\\""]'),
- ([{"foo": "bar"}], '[Dict("foo" => "bar")]'),
- ([{"foo": '"bar"'}], '[Dict("foo" => "\\"bar\\"")]'),
+ ({'foo': 'bar'}, 'Dict("foo" => "bar")'),
+ ({'foo': '"bar"'}, 'Dict("foo" => "\\"bar\\"")'),
+ ({'foo': ['bar']}, 'Dict("foo" => ["bar"])'),
+ ({'foo': {'bar': 'baz'}}, 'Dict("foo" => Dict("bar" => "baz"))'),
+ ({'foo': {'bar': '"baz"'}}, 'Dict("foo" => Dict("bar" => "\\"baz\\""))'),
+ (['foo'], '["foo"]'),
+ (['foo', '"bar"'], '["foo", "\\"bar\\""]'),
+ ([{'foo': 'bar'}], '[Dict("foo" => "bar")]'),
+ ([{'foo': '"bar"'}], '[Dict("foo" => "\\"bar\\"")]'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
@@ -455,14 +492,14 @@ def test_translate_type_julia(test_input, expected):
@pytest.mark.parametrize(
- "parameters,expected",
+ 'parameters,expected',
[
- ({"foo": "bar"}, '# Parameters\nfoo = "bar"\n'),
- ({"foo": True}, '# Parameters\nfoo = true\n'),
- ({"foo": 5}, '# Parameters\nfoo = 5\n'),
- ({"foo": 1.1}, '# Parameters\nfoo = 1.1\n'),
- ({"foo": ['bar', 'baz']}, '# Parameters\nfoo = ["bar", "baz"]\n'),
- ({"foo": {'bar': 'baz'}}, '# Parameters\nfoo = Dict("bar" => "baz")\n'),
+ ({'foo': 'bar'}, '# Parameters\nfoo = "bar"\n'),
+ ({'foo': True}, '# Parameters\nfoo = true\n'),
+ ({'foo': 5}, '# Parameters\nfoo = 5\n'),
+ ({'foo': 1.1}, '# Parameters\nfoo = 1.1\n'),
+ ({'foo': ['bar', 'baz']}, '# Parameters\nfoo = ["bar", "baz"]\n'),
+ ({'foo': {'bar': 'baz'}}, '# Parameters\nfoo = Dict("bar" => "baz")\n'),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'# Parameters\nfoo = "bar"\nbaz = ["buz"]\n',
@@ -474,36 +511,37 @@ def test_translate_codify_julia(parameters, expected):
@pytest.mark.parametrize(
- "test_input,expected", [("", '#'), ("foo", '# foo'), ('["best effort"]', '# ["best effort"]')]
+ 'test_input,expected',
+ [('', '#'), ('foo', '# foo'), ('["best effort"]', '# ["best effort"]')],
)
def test_translate_comment_julia(test_input, expected):
assert translators.JuliaTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
- "test_input,expected",
+ 'test_input,expected',
[
- ("foo", '"foo"'),
+ ('foo', '"foo"'),
('{"foo": "bar"}', '"{""foo"": ""bar""}"'),
- ({1: "foo"}, 'containers.Map({\'1\'}, {"foo"})'),
- ({1.0: "foo"}, 'containers.Map({\'1.0\'}, {"foo"})'),
- ({None: "foo"}, 'containers.Map({\'None\'}, {"foo"})'),
- ({True: "foo"}, 'containers.Map({\'True\'}, {"foo"})'),
- ({"foo": "bar"}, 'containers.Map({\'foo\'}, {"bar"})'),
- ({"foo": '"bar"'}, 'containers.Map({\'foo\'}, {"""bar"""})'),
- ({"foo": ["bar"]}, 'containers.Map({\'foo\'}, {{"bar"}})'),
+ ({1: 'foo'}, 'containers.Map({\'1\'}, {"foo"})'),
+ ({1.0: 'foo'}, 'containers.Map({\'1.0\'}, {"foo"})'),
+ ({None: 'foo'}, 'containers.Map({\'None\'}, {"foo"})'),
+ ({True: 'foo'}, 'containers.Map({\'True\'}, {"foo"})'),
+ ({'foo': 'bar'}, 'containers.Map({\'foo\'}, {"bar"})'),
+ ({'foo': '"bar"'}, 'containers.Map({\'foo\'}, {"""bar"""})'),
+ ({'foo': ['bar']}, 'containers.Map({\'foo\'}, {{"bar"}})'),
(
- {"foo": {"bar": "baz"}},
- 'containers.Map({\'foo\'}, {containers.Map({\'bar\'}, {"baz"})})',
+ {'foo': {'bar': 'baz'}},
+ "containers.Map({'foo'}, {containers.Map({'bar'}, {\"baz\"})})",
),
(
- {"foo": {"bar": '"baz"'}},
+ {'foo': {'bar': '"baz"'}},
'containers.Map({\'foo\'}, {containers.Map({\'bar\'}, {"""baz"""})})',
),
- (["foo"], '{"foo"}'),
- (["foo", '"bar"'], '{"foo", """bar"""}'),
- ([{"foo": "bar"}], '{containers.Map({\'foo\'}, {"bar"})}'),
- ([{"foo": '"bar"'}], '{containers.Map({\'foo\'}, {"""bar"""})}'),
+ (['foo'], '{"foo"}'),
+ (['foo', '"bar"'], '{"foo", """bar"""}'),
+ ([{'foo': 'bar'}], '{containers.Map({\'foo\'}, {"bar"})}'),
+ ([{'foo': '"bar"'}], '{containers.Map({\'foo\'}, {"""bar"""})}'),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
@@ -518,14 +556,17 @@ def test_translate_type_matlab(test_input, expected):
@pytest.mark.parametrize(
- "parameters,expected",
+ 'parameters,expected',
[
- ({"foo": "bar"}, '% Parameters\nfoo = "bar";\n'),
- ({"foo": True}, '% Parameters\nfoo = true;\n'),
- ({"foo": 5}, '% Parameters\nfoo = 5;\n'),
- ({"foo": 1.1}, '% Parameters\nfoo = 1.1;\n'),
- ({"foo": ['bar', 'baz']}, '% Parameters\nfoo = {"bar", "baz"};\n'),
- ({"foo": {'bar': 'baz'}}, '% Parameters\nfoo = containers.Map({\'bar\'}, {"baz"});\n'),
+ ({'foo': 'bar'}, '% Parameters\nfoo = "bar";\n'),
+ ({'foo': True}, '% Parameters\nfoo = true;\n'),
+ ({'foo': 5}, '% Parameters\nfoo = 5;\n'),
+ ({'foo': 1.1}, '% Parameters\nfoo = 1.1;\n'),
+ ({'foo': ['bar', 'baz']}, '% Parameters\nfoo = {"bar", "baz"};\n'),
+ (
+ {'foo': {'bar': 'baz'}},
+ '% Parameters\nfoo = containers.Map({\'bar\'}, {"baz"});\n',
+ ),
(
OrderedDict([['foo', 'bar'], ['baz', ['buz']]]),
'% Parameters\nfoo = "bar";\nbaz = {"buz"};\n',
@@ -537,7 +578,8 @@ def test_translate_codify_matlab(parameters, expected):
@pytest.mark.parametrize(
- "test_input,expected", [("", '%'), ("foo", '% foo'), ("['best effort']", "% ['best effort']")]
+ 'test_input,expected',
+ [('', '%'), ('foo', '% foo'), ("['best effort']", "% ['best effort']")],
)
def test_translate_comment_matlab(test_input, expected):
assert translators.MatlabTranslator.comment(test_input) == expected
@@ -546,34 +588,32 @@ def test_translate_comment_matlab(test_input, expected):
def test_find_translator_with_exact_kernel_name():
my_new_kernel_translator = Mock()
my_new_language_translator = Mock()
- translators.papermill_translators.register("my_new_kernel", my_new_kernel_translator)
- translators.papermill_translators.register("my_new_language", my_new_language_translator)
+ translators.papermill_translators.register('my_new_kernel', my_new_kernel_translator)
+ translators.papermill_translators.register('my_new_language', my_new_language_translator)
assert (
- translators.papermill_translators.find_translator("my_new_kernel", "my_new_language")
+ translators.papermill_translators.find_translator('my_new_kernel', 'my_new_language')
is my_new_kernel_translator
)
def test_find_translator_with_exact_language():
my_new_language_translator = Mock()
- translators.papermill_translators.register("my_new_language", my_new_language_translator)
+ translators.papermill_translators.register('my_new_language', my_new_language_translator)
assert (
- translators.papermill_translators.find_translator("unregistered_kernel", "my_new_language")
+ translators.papermill_translators.find_translator('unregistered_kernel', 'my_new_language')
is my_new_language_translator
)
def test_find_translator_with_no_such_kernel_or_language():
with pytest.raises(PapermillException):
- translators.papermill_translators.find_translator(
- "unregistered_kernel", "unregistered_language"
- )
+ translators.papermill_translators.find_translator('unregistered_kernel', 'unregistered_language')
def test_translate_uses_str_representation_of_unknown_types():
class FooClass:
def __str__(self):
- return "foo"
+ return 'foo'
obj = FooClass()
assert translators.Translator.translate(obj) == '"foo"'
@@ -584,7 +624,7 @@ class MyNewTranslator(translators.Translator):
pass
with pytest.raises(NotImplementedError):
- MyNewTranslator.translate_dict({"foo": "bar"})
+ MyNewTranslator.translate_dict({'foo': 'bar'})
def test_translator_must_implement_translate_list():
@@ -592,7 +632,7 @@ class MyNewTranslator(translators.Translator):
pass
with pytest.raises(NotImplementedError):
- MyNewTranslator.translate_list(["foo", "bar"])
+ MyNewTranslator.translate_list(['foo', 'bar'])
def test_translator_must_implement_comment():
@@ -600,17 +640,17 @@ class MyNewTranslator(translators.Translator):
pass
with pytest.raises(NotImplementedError):
- MyNewTranslator.comment("foo")
+ MyNewTranslator.comment('foo')
# Bash/sh section
@pytest.mark.parametrize(
- "test_input,expected",
+ 'test_input,expected',
[
- ("foo", "foo"),
- ("foo space", "'foo space'"),
+ ('foo', 'foo'),
+ ('foo space', "'foo space'"),
("foo's apostrophe", "'foo'\"'\"'s apostrophe'"),
- ("shell ( is ) ", "'shell ( is ) '"),
+ ('shell ( is ) ', "'shell ( is ) '"),
(12345, '12345'),
(-54321, '-54321'),
(1.2345, '1.2345'),
@@ -625,20 +665,21 @@ def test_translate_type_sh(test_input, expected):
@pytest.mark.parametrize(
- "test_input,expected", [("", '#'), ("foo", '# foo'), ("['best effort']", "# ['best effort']")]
+ 'test_input,expected',
+ [('', '#'), ('foo', '# foo'), ("['best effort']", "# ['best effort']")],
)
def test_translate_comment_sh(test_input, expected):
assert translators.BashTranslator.comment(test_input) == expected
@pytest.mark.parametrize(
- "parameters,expected",
+ 'parameters,expected',
[
- ({"foo": "bar"}, '# Parameters\nfoo=bar\n'),
- ({"foo": "shell ( is ) "}, "# Parameters\nfoo='shell ( is ) '\n"),
- ({"foo": True}, '# Parameters\nfoo=true\n'),
- ({"foo": 5}, '# Parameters\nfoo=5\n'),
- ({"foo": 1.1}, '# Parameters\nfoo=1.1\n'),
+ ({'foo': 'bar'}, '# Parameters\nfoo=bar\n'),
+ ({'foo': 'shell ( is ) '}, "# Parameters\nfoo='shell ( is ) '\n"),
+ ({'foo': True}, '# Parameters\nfoo=true\n'),
+ ({'foo': 5}, '# Parameters\nfoo=5\n'),
+ ({'foo': 1.1}, '# Parameters\nfoo=1.1\n'),
(
OrderedDict([['foo', 'bar'], ['baz', '$dumb(shell)']]),
"# Parameters\nfoo=bar\nbaz='$dumb(shell)'\n",
diff --git a/papermill/tests/test_utils.py b/papermill/tests/test_utils.py
index 7b6f8d59..4d058fb2 100644
--- a/papermill/tests/test_utils.py
+++ b/papermill/tests/test_utils.py
@@ -1,54 +1,53 @@
-import pytest
import warnings
-
-from unittest.mock import Mock, call
-from tempfile import TemporaryDirectory
from pathlib import Path
+from tempfile import TemporaryDirectory
+from unittest.mock import Mock, call
-from nbformat.v4 import new_notebook, new_code_cell
+import pytest
+from nbformat.v4 import new_code_cell, new_notebook
+from ..exceptions import PapermillParameterOverwriteWarning
from ..utils import (
any_tagged_cell,
- retry,
chdir,
merge_kwargs,
remove_args,
+ retry,
)
-from ..exceptions import PapermillParameterOverwriteWarning
def test_no_tagged_cell():
nb = new_notebook(
- cells=[new_code_cell('a = 2', metadata={"tags": []})],
+ cells=[new_code_cell('a = 2', metadata={'tags': []})],
)
- assert not any_tagged_cell(nb, "parameters")
+ assert not any_tagged_cell(nb, 'parameters')
def test_tagged_cell():
nb = new_notebook(
- cells=[new_code_cell('a = 2', metadata={"tags": ["parameters"]})],
+ cells=[new_code_cell('a = 2', metadata={'tags': ['parameters']})],
)
- assert any_tagged_cell(nb, "parameters")
+ assert any_tagged_cell(nb, 'parameters')
def test_merge_kwargs():
with warnings.catch_warnings(record=True) as wrn:
- assert merge_kwargs({"a": 1, "b": 2}, a=3) == {"a": 3, "b": 2}
+ assert merge_kwargs({'a': 1, 'b': 2}, a=3) == {'a': 3, 'b': 2}
assert len(wrn) == 1
assert issubclass(wrn[0].category, PapermillParameterOverwriteWarning)
assert wrn[0].message.__str__() == "Callee will overwrite caller's argument(s): a=3"
def test_remove_args():
- assert remove_args(["a"], a=1, b=2, c=3) == {"c": 3, "b": 2}
+ assert remove_args(['a'], a=1, b=2, c=3) == {'c': 3, 'b': 2}
def test_retry():
- m = Mock(side_effect=RuntimeError(), __name__="m", __module__="test_s3", __doc__="m")
+ m = Mock(side_effect=RuntimeError(), __name__='m', __module__='test_s3', __doc__='m')
wrapped_m = retry(3)(m)
with pytest.raises(RuntimeError):
- wrapped_m("foo")
- m.assert_has_calls([call("foo"), call("foo"), call("foo")])
+ wrapped_m('foo')
+ m.assert_has_calls([call('foo'), call('foo'), call('foo')])
def test_chdir():
diff --git a/papermill/translators.py b/papermill/translators.py
index 58bac647..0086f84f 100644
--- a/papermill/translators.py
+++ b/papermill/translators.py
@@ -6,16 +6,15 @@
from .exceptions import PapermillException
from .models import Parameter
-
logger = logging.getLogger(__name__)
class PapermillTranslators:
- '''
+ """
The holder which houses any translator registered with the system.
This object is used in a singleton manner to save and load particular
named Translator objects for reference externally.
- '''
+ """
def __init__(self):
self._translators = {}
@@ -29,9 +28,7 @@ def find_translator(self, kernel_name, language):
elif language in self._translators:
return self._translators[language]
raise PapermillException(
- "No parameter translator functions specified for kernel '{}' or language '{}'".format(
- kernel_name, language
- )
+ f"No parameter translator functions specified for kernel '{kernel_name}' or language '{language}'"
)
@@ -166,7 +163,7 @@ def translate_bool(cls, val):
@classmethod
def translate_dict(cls, val):
- escaped = ', '.join([f"{cls.translate_str(k)}: {cls.translate(v)}" for k, v in val.items()])
+ escaped = ', '.join([f'{cls.translate_str(k)}: {cls.translate(v)}' for k, v in val.items()])
return f'{{{escaped}}}'
@classmethod
@@ -190,7 +187,7 @@ def codify(cls, parameters, comment='Parameters'):
except ImportError:
logger.debug("Black is not installed, parameters won't be formatted")
except AttributeError as aerr:
- logger.warning(f"Black encountered an error, skipping formatting ({aerr})")
+ logger.warning(f'Black encountered an error, skipping formatting ({aerr})')
return content
@classmethod
@@ -223,10 +220,10 @@ def flatten_accumulator(accumulator):
Returns:
Flatten definition
"""
- flat_string = ""
+ flat_string = ''
for line in accumulator[:-1]:
- if "#" in line:
- comment_pos = line.index("#")
+ if '#' in line:
+ comment_pos = line.index('#')
flat_string += line[:comment_pos].strip()
else:
flat_string += line.strip()
@@ -245,7 +242,7 @@ def flatten_accumulator(accumulator):
if len(line.strip()) == 0 or line.strip().startswith('#'):
continue # Skip blank and comment
- nequal = line.count("=")
+ nequal = line.count('=')
if nequal > 0:
grouped_variable.append(flatten_accumulator(accumulator))
accumulator = []
@@ -263,16 +260,16 @@ def flatten_accumulator(accumulator):
match = re.match(cls.PARAMETER_PATTERN, definition)
if match is not None:
attr = match.groupdict()
- if attr["target"] is None: # Fail to get variable name
+ if attr['target'] is None: # Fail to get variable name
continue
- type_name = str(attr["annotation"] or attr["type_comment"] or None)
+ type_name = str(attr['annotation'] or attr['type_comment'] or None)
params.append(
Parameter(
- name=attr["target"].strip(),
+ name=attr['target'].strip(),
inferred_type_name=type_name.strip(),
- default=str(attr["value"]).strip(),
- help=str(attr["help"] or "").strip(),
+ default=str(attr['value']).strip(),
+ help=str(attr['help'] or '').strip(),
)
)
@@ -290,9 +287,7 @@ def translate_bool(cls, val):
@classmethod
def translate_dict(cls, val):
- escaped = ', '.join(
- [f'{cls.translate_str(k)} = {cls.translate(v)}' for k, v in val.items()]
- )
+ escaped = ', '.join([f'{cls.translate_str(k)} = {cls.translate(v)}' for k, v in val.items()])
return f'list({escaped})'
@classmethod
@@ -307,7 +302,7 @@ def comment(cls, cmt_str):
@classmethod
def assign(cls, name, str_val):
# Leading '_' aren't legal R variable names -- so we drop them when injecting
- while name.startswith("_"):
+ while name.startswith('_'):
name = name[1:]
return f'{name} = {str_val}'
@@ -316,14 +311,12 @@ class ScalaTranslator(Translator):
@classmethod
def translate_int(cls, val):
strval = cls.translate_raw_str(val)
- return strval + "L" if (val > 2147483647 or val < -2147483648) else strval
+ return strval + 'L' if (val > 2147483647 or val < -2147483648) else strval
@classmethod
def translate_dict(cls, val):
"""Translate dicts to scala Maps"""
- escaped = ', '.join(
- [f"{cls.translate_str(k)} -> {cls.translate(v)}" for k, v in val.items()]
- )
+ escaped = ', '.join([f'{cls.translate_str(k)} -> {cls.translate(v)}' for k, v in val.items()])
return f'Map({escaped})'
@classmethod
@@ -348,9 +341,7 @@ def translate_none(cls, val):
@classmethod
def translate_dict(cls, val):
- escaped = ', '.join(
- [f"{cls.translate_str(k)} => {cls.translate(v)}" for k, v in val.items()]
- )
+ escaped = ', '.join([f'{cls.translate_str(k)} => {cls.translate(v)}' for k, v in val.items()])
return f'Dict({escaped})'
@classmethod
@@ -379,8 +370,8 @@ def __translate_char_array(str_val):
if isinstance(str_val, str):
str_val = str_val.encode('unicode_escape')
str_val = str_val.decode('utf-8')
- str_val = str_val.replace('\'', '\'\'')
- return f'\'{str_val}\''
+ str_val = str_val.replace("'", "''")
+ return f"'{str_val}'"
@classmethod
def translate_none(cls, val):
@@ -388,8 +379,8 @@ def translate_none(cls, val):
@classmethod
def translate_dict(cls, val):
- keys = ', '.join([f"{cls.__translate_char_array(k)}" for k, v in val.items()])
- vals = ', '.join([f"{cls.translate(v)}" for k, v in val.items()])
+ keys = ', '.join([f'{cls.__translate_char_array(k)}' for k, v in val.items()])
+ vals = ', '.join([f'{cls.translate(v)}' for k, v in val.items()])
return f'containers.Map({{{keys}}}, {{{vals}}})'
@classmethod
@@ -413,7 +404,7 @@ class CSharpTranslator(Translator):
@classmethod
def translate_none(cls, val):
# Can't figure out how to do this as nullable
- raise NotImplementedError("Option type not implemented for C#.")
+ raise NotImplementedError('Option type not implemented for C#.')
@classmethod
def translate_bool(cls, val):
@@ -422,15 +413,13 @@ def translate_bool(cls, val):
@classmethod
def translate_int(cls, val):
strval = cls.translate_raw_str(val)
- return strval + "L" if (val > 2147483647 or val < -2147483648) else strval
+ return strval + 'L' if (val > 2147483647 or val < -2147483648) else strval
@classmethod
def translate_dict(cls, val):
"""Translate dicts to nontyped dictionary"""
- kvps = ', '.join(
- [f"{{ {cls.translate_str(k)} , {cls.translate(v)} }}" for k, v in val.items()]
- )
+ kvps = ', '.join([f'{{ {cls.translate_str(k)} , {cls.translate(v)} }}' for k, v in val.items()])
return f'new Dictionary{{ {kvps} }}'
@classmethod
@@ -460,13 +449,11 @@ def translate_bool(cls, val):
@classmethod
def translate_int(cls, val):
strval = cls.translate_raw_str(val)
- return strval + "L" if (val > 2147483647 or val < -2147483648) else strval
+ return strval + 'L' if (val > 2147483647 or val < -2147483648) else strval
@classmethod
def translate_dict(cls, val):
- tuples = '; '.join(
- [f"({cls.translate_str(k)}, {cls.translate(v)} :> IComparable)" for k, v in val.items()]
- )
+ tuples = '; '.join([f'({cls.translate_str(k)}, {cls.translate(v)} :> IComparable)' for k, v in val.items()])
return f'[ {tuples} ] |> Map.ofList'
@classmethod
@@ -498,11 +485,11 @@ def translate_float(cls, val):
if math.isfinite(val):
return cls.translate_raw_str(val)
elif math.isnan(val):
- return "[double]::NaN"
+ return '[double]::NaN'
elif val < 0:
- return "[double]::NegativeInfinity"
+ return '[double]::NegativeInfinity'
else:
- return "[double]::PositiveInfinity"
+ return '[double]::PositiveInfinity'
@classmethod
def translate_none(cls, val):
@@ -514,7 +501,7 @@ def translate_bool(cls, val):
@classmethod
def translate_dict(cls, val):
- kvps = '\n '.join([f"{cls.translate_str(k)} = {cls.translate(v)}" for k, v in val.items()])
+ kvps = '\n '.join([f'{cls.translate_str(k)} = {cls.translate(v)}' for k, v in val.items()])
return f'@{{{kvps}}}'
@classmethod
@@ -560,18 +547,18 @@ def assign(cls, name, str_val):
# Instantiate a PapermillIO instance and register Handlers.
papermill_translators = PapermillTranslators()
-papermill_translators.register("python", PythonTranslator)
-papermill_translators.register("R", RTranslator)
-papermill_translators.register("scala", ScalaTranslator)
-papermill_translators.register("julia", JuliaTranslator)
-papermill_translators.register("matlab", MatlabTranslator)
-papermill_translators.register(".net-csharp", CSharpTranslator)
-papermill_translators.register(".net-fsharp", FSharpTranslator)
-papermill_translators.register(".net-powershell", PowershellTranslator)
-papermill_translators.register("pysparkkernel", PythonTranslator)
-papermill_translators.register("sparkkernel", ScalaTranslator)
-papermill_translators.register("sparkrkernel", RTranslator)
-papermill_translators.register("bash", BashTranslator)
+papermill_translators.register('python', PythonTranslator)
+papermill_translators.register('R', RTranslator)
+papermill_translators.register('scala', ScalaTranslator)
+papermill_translators.register('julia', JuliaTranslator)
+papermill_translators.register('matlab', MatlabTranslator)
+papermill_translators.register('.net-csharp', CSharpTranslator)
+papermill_translators.register('.net-fsharp', FSharpTranslator)
+papermill_translators.register('.net-powershell', PowershellTranslator)
+papermill_translators.register('pysparkkernel', PythonTranslator)
+papermill_translators.register('sparkkernel', ScalaTranslator)
+papermill_translators.register('sparkrkernel', RTranslator)
+papermill_translators.register('bash', BashTranslator)
def translate_parameters(kernel_name, language, parameters, comment='Parameters'):
diff --git a/papermill/utils.py b/papermill/utils.py
index a12ee308..e69b710a 100644
--- a/papermill/utils.py
+++ b/papermill/utils.py
@@ -1,7 +1,6 @@
-import os
import logging
+import os
import warnings
-
from contextlib import contextmanager
from functools import wraps
@@ -50,7 +49,7 @@ def nb_kernel_name(nb, name=None):
"""
name = name or nb.metadata.get('kernelspec', {}).get('name')
if not name:
- raise ValueError("No kernel name found in notebook and no override provided.")
+ raise ValueError('No kernel name found in notebook and no override provided.')
return name
@@ -79,7 +78,7 @@ def nb_language(nb, language=None):
# v3 language path for old notebooks that didn't convert cleanly
language = language or nb.metadata.get('kernelspec', {}).get('language')
if not language:
- raise ValueError("No language found in notebook and no override provided.")
+ raise ValueError('No language found in notebook and no override provided.')
return language
diff --git a/pyproject.toml b/pyproject.toml
index 79dfbcbf..e5911bfd 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,30 +1,192 @@
-# Example configuration for Black.
-
-# NOTE: you have to use single-quoted strings in TOML for regular expressions.
-# It's the equivalent of r-strings in Python. Multiline strings are treated as
-# verbose regular expressions by Black. Use [ ] to denote a significant space
-# character.
-
-[tool.black]
-line-length = 100
-target-version = ['py312']
-include = '\.pyi?$'
-exclude = '''
-/(
- \.git
- | \.hg
- | \.mypy_cache
- | \.tox
- | \.venv
- | _build
- | buck-out
- | build
- | dist
-
- # The following are specific to Black, you probably don't want those.
- | blib2to3
- | tests/data
- | profiling
-)/
-'''
-skip-string-normalization = true
+[build-system]
+requires = ["hatchling", "hatch-vcs"]
+build-backend = "hatchling.build"
+
+
+[project]
+name = "papermill"
+description = "Parameterize and run Jupyter and nteract Notebooks"
+readme = "README.md"
+license = "BSD-3-clause"
+requires-python = ">=3.8"
+authors = [
+ { name = "nteract contributors", email = "nteract@googlegroups.com" },
+]
+keywords = [
+ "jupyter",
+ "mapreduce",
+ "notebook",
+ "nteract",
+ "pipeline",
+]
+classifiers = [
+ "Intended Audience :: Developers",
+ "Intended Audience :: Science/Research",
+ "Intended Audience :: System Administrators",
+ "License :: OSI Approved :: BSD License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+]
+dependencies = [
+ "aiohttp==3.9.0b0;python_version==\"3.12\"",
+ "click",
+ "entrypoints",
+ "nbclient >= 0.2.0",
+ "nbformat >= 5.1.2",
+ "pyyaml",
+ "requests",
+ "tenacity >= 5.0.2",
+ "tqdm >= 4.32.2",
+]
+dynamic = ["version"]
+
+[project.scripts]
+papermill = "papermill.__main__:papermill"
+
+[project.urls]
+Documentation = "https://papermill.readthedocs.io"
+Funding = "https://nteract.io"
+Homepage = "https://github.com/nteract/papermill"
+Source = "https://github.com/nteract/papermill/"
+Tracker = "https://github.com/nteract/papermill/issues"
+
+[tool.hatch.version]
+source = "vcs"
+
+[tool.hatch.build.hooks.vcs]
+version-file = "papermill/version.py"
+
+[tool.hatch.build.targets.sdist]
+include = [
+ "/papermill",
+]
+
+[[tool.hatch.envs.default.matrix]]
+python = ["3.8", "3.9", "3.10", "3.11", "3.12"]
+
+[tool.hatch.envs.default]
+dependencies = [
+ "attrs>=17.4.0",
+ "azure-datalake-store >= 0.0.30",
+ "azure-identity>=1.3.1",
+ "azure-storage-blob >= 12.1.0",
+ "black >= 23.11.0",
+ "boto3",
+ "boto3",
+ "botocore",
+ "bumpversion",
+ "check-manifest",
+ "codecov",
+ "coverage",
+ "flake8",
+ "gcsfs>=0.2.0",
+ "google_compute_engine",
+ "ipython>=5.0",
+ "ipywidgets",
+ "moto",
+ "notebook",
+ "pip>=18.1",
+ "pre-commit",
+ "pytest-cov>=2.6.1",
+ "pytest-env>=0.6.2",
+ "pytest-mock>=1.10",
+ "pytest>=4.1",
+ "recommonmark",
+ "requests >= 2.21.0",
+ "requests >= 2.21.0",
+ "setuptools>=38.6.0",
+ "tox",
+ "twine>=1.11.0",
+ "wheel>=0.31.0",
+]
+
+[tool.hatch.envs.default.scripts]
+cov = "pytest --cov-report=term-missing --cov-config=pyproject.toml --cov=hatch_init --cov=tests papermill/tests"
+no-cov = "cov --no-cov"
+test = "pytest papermill/tests"
+
+[tool.hatch.envs.docs]
+dependencies = [
+ "sphinx-copybutton>=0.5.2",
+ "sphinx-autobuild",
+ "Sphinx>=7.2.6",
+ "furo>=2023.9.10",
+ "myst-parser>=2.0.0",
+ "moto>=4.2.8",
+ "nbformat",
+ "entrypoints"
+]
+
+[tool.hatch.envs.docs.scripts]
+build = "sphinx-build docs docs_out --color -W -bhtml"
+clean = "rm -rf docs_out"
+serve = "sphinx-autobuild docs docs_out/index.html"
+ci-build = "sphinx-build docs docs_out --color -W -bhtml"
+
+[tool.pytest.ini_options]
+minversion = "6.0"
+env = [
+ "AWS_SECRET_ACCESS_KEY=foobar_secret",
+ "AWS_ACCESS_KEY_ID=foobar_key"
+]
+filterwarnings = [
+ "ignore:.*imp module is deprecated.","*:DeprecationWarning"
+]
+
+[tool.ruff]
+line-length = 120
+indent-width = 4
+select = [
+ "E", # pycodestyle errors
+ "W", # pycodestyle warnings
+ "F", # pyflakes
+ "I", # isort
+ "C", # flake8-comprehensions
+ "B", # flake8-bugbear
+ "UP", # pyupgrade
+]
+ignore = [
+ "E501", # line too long, handled by black
+ "B008", # do not perform function calls in argument defaults
+ "C901", # too complex
+ "W191", # indentation contains tabs
+ # Extra space in brackets
+ "E20",
+ # Multiple spaces around ","
+ "E231","E241",
+ # Comments
+ "E26",
+ # Import formatting
+ "E4",
+ # Comparing types instead of isinstance
+ "E721",
+ # Assigning lambda expression
+ "E731",
+ # Do not use variables named ālā, āOā, or āIā
+ "E741",
+ "B028"
+]
+target-version = "py310"
+
+[tool.ruff.per-file-ignores]
+"__init__.py" = ["F401"]
+"papermill/cli.py" = ["C408"]
+"papermill/engines.py" = ["C408"]
+"papermill/iorw.py" = ["B018", "B006","UP038","B904","C408","B028"]
+"papermill/parameterize.py" = ["B904"]
+"papermill/s3.py" = ["C416","UP038","B007","B028"]
+"papermill/tests/test_cli.py" = ["C408"]
+"papermill/tests/test_engines.py" = ["B024"]
+"papermill/tests/test_inspect.py" = ["B905"]
+"papermill/utils.py" = ["C419","B028","B007"]
+
+[tool.ruff.isort]
+known-third-party = ["papermill"]
+
+[tool.ruff.format]
+quote-style = "single"
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index 2c5efc2b..00000000
--- a/requirements.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-click
-pyyaml
-nbformat >= 5.1.2
-nbclient >= 0.2.0
-tqdm >= 4.32.2
-requests
-entrypoints
-tenacity >= 5.0.2
-aiohttp==3.9.0b0;python_version=="3.12"
diff --git a/requirements/azure.txt b/requirements/azure.txt
deleted file mode 100644
index e6aebca9..00000000
--- a/requirements/azure.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-azure-datalake-store >= 0.0.30
-azure-storage-blob >= 12.1.0
-requests >= 2.21.0
-azure-identity>=1.3.1
diff --git a/requirements/dev.txt b/requirements/dev.txt
deleted file mode 100644
index dca04012..00000000
--- a/requirements/dev.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-boto3
-botocore
-codecov
-coverage
-google_compute_engine # Need this because boto has issues with dynamic package loading during tests if other google components are there
-ipython>=5.0
-ipywidgets
-notebook
-moto
-pytest>=4.1
-pytest-cov>=2.6.1
-pytest-mock>=1.10
-pytest-env>=0.6.2
-requests >= 2.21.0
-check-manifest
-attrs>=17.4.0
-pre-commit
-flake8
-tox
-bumpversion
-recommonmark
-pip>=18.1
-wheel>=0.31.0
-setuptools>=38.6.0
-twine>=1.11.0
diff --git a/requirements/gcs.txt b/requirements/gcs.txt
deleted file mode 100644
index 8db67662..00000000
--- a/requirements/gcs.txt
+++ /dev/null
@@ -1 +0,0 @@
-gcsfs>=0.2.0
diff --git a/requirements/github.txt b/requirements/github.txt
deleted file mode 100644
index 75bfade9..00000000
--- a/requirements/github.txt
+++ /dev/null
@@ -1 +0,0 @@
-PyGithub >= 1.55
diff --git a/requirements/hdfs.txt b/requirements/hdfs.txt
deleted file mode 100644
index fa262689..00000000
--- a/requirements/hdfs.txt
+++ /dev/null
@@ -1 +0,0 @@
-pyarrow >= 2.0
diff --git a/requirements/s3.txt b/requirements/s3.txt
deleted file mode 100644
index 30ddf823..00000000
--- a/requirements/s3.txt
+++ /dev/null
@@ -1 +0,0 @@
-boto3
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index 0b0c6c37..00000000
--- a/setup.cfg
+++ /dev/null
@@ -1,47 +0,0 @@
-
-[flake8]
-# References:
-# https://flake8.readthedocs.io/en/latest/user/configuration.html
-# https://flake8.readthedocs.io/en/latest/user/error-codes.html
-
-# Note: there cannot be spaces after comma's here
-exclude = __init__.py
-ignore =
- # Extra space in brackets
- E20,
- # Multiple spaces around ","
- E231,E241,
- # Comments
- E26,
- # Import formatting
- E4,
- # Comparing types instead of isinstance
- E721,
- # Assigning lambda expression
- E731,
- # Do not use variables named ālā, āOā, or āIā
- E741
-max-line-length = 120
-max-complexity = 23
-
-[bdist_wheel]
-universal=0
-
-[coverage:run]
-branch = False
-omit =
- papermill/tests/*
- papermill/version.py
-
-[coverage:report]
-exclude_lines =
- if self\.debug:
- pragma: no cover
- raise AssertionError
- raise NotImplementedError
- if __name__ == .__main__.:
-ignore_errors = True
-omit = papermill/tests/*,papermill/version.py
-
-[tool:pytest]
-filterwarnings = always
diff --git a/setup.py b/setup.py
deleted file mode 100644
index c67c3703..00000000
--- a/setup.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-""""
-setup.py
-
-See:
-https://packaging.python.org/tutorials/packaging-projects/
-https://packaging.python.org/en/latest/distributing.html
-https://github.com/pypa/sampleproject
-
-"""
-import os
-from setuptools import setup
-
-
-local_path = os.path.dirname(__file__)
-# Fix for tox which manipulates execution pathing
-if not local_path:
- local_path = '.'
-here = os.path.abspath(local_path)
-
-
-def version():
- with open(here + '/papermill/version.py') as ver:
- for line in ver.readlines():
- if line.startswith('version ='):
- return line.split(' = ')[-1].strip()[1:-1]
- raise ValueError('No version found in papermill/version.py')
-
-
-def read(fname):
- with open(fname) as fhandle:
- return fhandle.read()
-
-
-def read_reqs(fname, folder=None):
- path_dir = os.path.join(here, folder) if folder else here
- req_path = os.path.join(path_dir, fname)
- return [req.strip() for req in read(req_path).splitlines() if req.strip()]
-
-
-s3_reqs = read_reqs('s3.txt', folder='requirements')
-azure_reqs = read_reqs('azure.txt', folder='requirements')
-gcs_reqs = read_reqs('gcs.txt', folder='requirements')
-hdfs_reqs = read_reqs('hdfs.txt', folder='requirements')
-github_reqs = read_reqs('github.txt', folder='requirements')
-docs_only_reqs = read_reqs('requirements.txt', folder='docs')
-black_reqs = ['black >= 19.3b0']
-all_reqs = s3_reqs + azure_reqs + gcs_reqs + hdfs_reqs + github_reqs + black_reqs
-docs_reqs = all_reqs + docs_only_reqs
-# Temporarily remove hdfs_reqs from dev deps until the pyarrow package is available for Python 3.12
-dev_reqs = (
- read_reqs('dev.txt', folder='requirements') + s3_reqs + azure_reqs + gcs_reqs + black_reqs
-) # all_reqs
-extras_require = {
- "test": dev_reqs,
- "dev": dev_reqs,
- "all": all_reqs,
- "s3": s3_reqs,
- "azure": azure_reqs,
- "gcs": gcs_reqs,
- "hdfs": hdfs_reqs,
- "github": github_reqs,
- "black": black_reqs,
- "docs": docs_reqs,
-}
-
-# Get the long description from the README file
-with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
- long_description = f.read()
-
-setup(
- name='papermill',
- version=version(),
- description='Parameterize and run Jupyter and nteract Notebooks',
- author='nteract contributors',
- author_email='nteract@googlegroups.com',
- license='BSD',
- # Note that this is a string of words separated by whitespace, not a list.
- keywords='jupyter mapreduce nteract pipeline notebook',
- long_description=long_description,
- long_description_content_type='text/markdown',
- url='https://github.com/nteract/papermill',
- packages=['papermill'],
- python_requires='>=3.8',
- install_requires=read_reqs('requirements.txt'),
- extras_require=extras_require,
- entry_points={'console_scripts': ['papermill = papermill.__main__:papermill']},
- project_urls={
- 'Documentation': 'https://papermill.readthedocs.io',
- 'Funding': 'https://nteract.io',
- 'Source': 'https://github.com/nteract/papermill/',
- 'Tracker': 'https://github.com/nteract/papermill/issues',
- },
- classifiers=[
- 'Intended Audience :: Developers',
- 'Intended Audience :: System Administrators',
- 'Intended Audience :: Science/Research',
- 'License :: OSI Approved :: BSD License',
- 'Programming Language :: Python',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.8',
- 'Programming Language :: Python :: 3.9',
- 'Programming Language :: Python :: 3.10',
- 'Programming Language :: Python :: 3.11',
- 'Programming Language :: Python :: 3.12',
- ],
-)
diff --git a/tox.ini b/tox.ini
deleted file mode 100644
index 17d1c0ba..00000000
--- a/tox.ini
+++ /dev/null
@@ -1,67 +0,0 @@
-[tox]
-skipsdist = true
-envlist = py{38,39,310,311,312}, dist, manifest, docs, binder
-
-[gh-actions]
-python =
- 3.8: py38
- 3.9: py39
- 3.10: py310
- 3.11: py311, docs
- 3.12: py312, dist
-
-# Manifest
-[testenv:manifest]
-skip_install = true
-deps = check-manifest
-commands = check-manifest
-ignore =
- .readthedocs.yaml
-
-# Docs
-[testenv:docs]
-description = invoke sphinx-build to build the HTML docs
-deps =
- .[docs]
-extras = docs
-commands =
- sphinx-build -d "{toxworkdir}/docs_doctree" docs "{toxworkdir}/docs_out" --color -W -bhtml {posargs}
- python -c 'import pathlib; print("documentation available under file://\{0\}".format(pathlib.Path(r"{toxworkdir}") / "docs_out" / "index.html"))'
-
-# Binder
-[testenv:binder]
-description = ensure /binder/*ipynb are runnable
-deps =
- -r binder/requirements.txt
-commands = python -c "import glob; import papermill as pm; [pm.execute_notebook(input, '{toxworkdir}/out.ipynb', parameters=\{'binder_dir':'binder'\}) for input in glob.glob('binder/**/*.ipynb')]"
-
-# Distro
-[testenv:dist]
-skip_install = true
-commands =
- python setup.py sdist --dist-dir={distdir} bdist_wheel --dist-dir={distdir}
- /bin/bash -c 'python -m pip install -U --force-reinstall {distdir}/papermill*.whl'
-
-[testenv]
-# disable Python's hash randomization for tests that stringify dicts, etc
-setenv =
- PYTHONHASHSEED = 0
- AWS_ACCESS_KEY_ID=foobar_key
- AWS_SECRET_ACCESS_KEY=foobar_secret
-passenv = *
-basepython =
- py38: python3.8
- py39: python3.9
- py310: python3.10
- py311: python3.11
- py312: python3.12
- manifest: python3.11
- dist: python3.12
- docs: python3.11
- binder: python3.11
-deps = .[dev]
-# Have to use /bin/bash or the `*` will cause that argument to get quoted by the tox command line...
-allowlist_externals = /bin/bash
-# Python 3.12 breaks default pip/setuptools versions ... force an upgrade of these before anything else
-install_command = /bin/bash ./tox_py_installer.sh {opts} {packages}
-commands = pytest -v --maxfail=2 --cov=papermill -W always {posargs}
diff --git a/tox_py_installer.sh b/tox_py_installer.sh
deleted file mode 100755
index 4bcf405d..00000000
--- a/tox_py_installer.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-python -m ensurepip --upgrade
-python -m pip install --upgrade setuptools
-# python -m pip install {opts} {packages}
-python -m pip install $1 $2