diff --git a/antarest/core/exceptions.py b/antarest/core/exceptions.py index 89555f3e48..3ad12ae655 100644 --- a/antarest/core/exceptions.py +++ b/antarest/core/exceptions.py @@ -11,8 +11,8 @@ # This file is part of the Antares project. import re -import typing as t from http import HTTPStatus +from typing import Optional, Sequence from fastapi.exceptions import HTTPException from typing_extensions import override @@ -337,7 +337,7 @@ def __init__(self) -> None: class StudyDeletionNotAllowed(HTTPException): - def __init__(self, uuid: str, message: t.Optional[str] = None) -> None: + def __init__(self, uuid: str, message: Optional[str] = None) -> None: msg = f"Study {uuid} (not managed) is not allowed to be deleted" if message: msg += f"\n{message}" @@ -384,7 +384,7 @@ class ReferencedObjectDeletionNotAllowed(HTTPException): other objects: areas, links or thermal clusters. """ - def __init__(self, object_id: str, binding_ids: t.Sequence[str], *, object_type: str) -> None: + def __init__(self, object_id: str, binding_ids: Sequence[str], *, object_type: str) -> None: """ Initialize the exception. diff --git a/antarest/core/filesystem_blueprint.py b/antarest/core/filesystem_blueprint.py index 10145d744e..b9450b5fd9 100644 --- a/antarest/core/filesystem_blueprint.py +++ b/antarest/core/filesystem_blueprint.py @@ -18,8 +18,8 @@ import os import shutil import stat -import typing as t from pathlib import Path +from typing import Iterator, Mapping, Sequence, Tuple import typing_extensions as te from fastapi import APIRouter, Depends, HTTPException @@ -58,7 +58,7 @@ class FilesystemDTO( """ name: FilesystemName - mount_dirs: t.Mapping[str, Path] = Field(description="Full path of the mount points in Antares Web Server") + mount_dirs: Mapping[str, Path] = Field(description="Full path of the mount points in Antares Web Server") class MountPointDTO( @@ -203,7 +203,7 @@ async def from_path(cls, full_path: Path, *, details: bool = False) -> "FileInfo return obj -async def _calc_details(full_path: t.Union[str, Path]) -> t.Tuple[int, int]: +async def _calc_details(full_path: str | Path) -> Tuple[int, int]: """Calculate the number of files and the total size of a directory recursively.""" full_path = Path(full_path) @@ -271,7 +271,7 @@ def create_file_system_blueprint(config: Config) -> APIRouter: # Utility functions # ================= - def _get_mount_dirs(fs: str) -> t.Mapping[str, Path]: + def _get_mount_dirs(fs: str) -> Mapping[str, Path]: try: return filesystems[fs] except KeyError: @@ -299,9 +299,9 @@ def _get_full_path(mount_dir: Path, path: str) -> Path: @bp.get( "", summary="Get filesystems information", - response_model=t.Sequence[FilesystemDTO], + response_model=Sequence[FilesystemDTO], ) - async def list_filesystems() -> t.Sequence[FilesystemDTO]: + async def list_filesystems() -> Sequence[FilesystemDTO]: """ Get the list of filesystems and their mount points. @@ -316,9 +316,9 @@ async def list_filesystems() -> t.Sequence[FilesystemDTO]: @bp.get( "/{fs}", summary="Get information of a filesystem", - response_model=t.Sequence[MountPointDTO], + response_model=Sequence[MountPointDTO], ) - async def list_mount_points(fs: FilesystemName) -> t.Sequence[MountPointDTO]: + async def list_mount_points(fs: FilesystemName) -> Sequence[MountPointDTO]: """ Get the path and the disk usage of the mount points in a filesystem. @@ -373,14 +373,14 @@ async def get_mount_point(fs: FilesystemName, mount: MountPointName) -> MountPoi @bp.get( "/{fs}/{mount}/ls", summary="List files in a mount point", - response_model=t.Sequence[FileInfoDTO], + response_model=Sequence[FileInfoDTO], ) async def list_files( fs: FilesystemName, mount: MountPointName, path: str = "", details: bool = False, - ) -> t.Sequence[FileInfoDTO]: + ) -> Sequence[FileInfoDTO]: """ List files and directories in a mount point. @@ -532,7 +532,7 @@ async def download_file( elif full_path.is_file(): - def iter_file() -> t.Iterator[bytes]: + def iter_file() -> Iterator[bytes]: with full_path.open(mode="rb") as file: yield from file diff --git a/antarest/core/filetransfer/main.py b/antarest/core/filetransfer/main.py index 797e3652cc..63210b6192 100644 --- a/antarest/core/filetransfer/main.py +++ b/antarest/core/filetransfer/main.py @@ -12,8 +12,6 @@ from typing import Optional -from fastapi import APIRouter, FastAPI - from antarest.core.application import AppBuildContext from antarest.core.config import Config from antarest.core.filetransfer.repository import FileDownloadRepository diff --git a/antarest/core/maintenance/main.py b/antarest/core/maintenance/main.py index a962d1b013..92fe373304 100644 --- a/antarest/core/maintenance/main.py +++ b/antarest/core/maintenance/main.py @@ -12,8 +12,6 @@ from typing import Optional -from fastapi import APIRouter, FastAPI - from antarest.core.application import AppBuildContext from antarest.core.config import Config from antarest.core.interfaces.cache import ICache diff --git a/antarest/core/permissions.py b/antarest/core/permissions.py index 9bf073e809..578269dc00 100644 --- a/antarest/core/permissions.py +++ b/antarest/core/permissions.py @@ -11,7 +11,7 @@ # This file is part of the Antares project. import logging -import typing as t +from typing import Dict, Sequence from antarest.core.jwt import JWTUser from antarest.core.model import PermissionInfo, PublicMode, StudyPermissionType @@ -20,7 +20,7 @@ logger = logging.getLogger(__name__) -permission_matrix: t.Dict[str, t.Dict[str, t.Sequence[t.Union[RoleType, PublicMode]]]] = { +permission_matrix: Dict[str, Dict[str, Sequence[RoleType | PublicMode]]] = { StudyPermissionType.READ.value: { "roles": [ RoleType.ADMIN, diff --git a/antarest/core/requests.py b/antarest/core/requests.py index 78a8c4ae6c..2702a6c78d 100644 --- a/antarest/core/requests.py +++ b/antarest/core/requests.py @@ -10,10 +10,9 @@ # # This file is part of the Antares project. -import typing as t from collections import OrderedDict from dataclasses import dataclass -from typing import Any, Generator, Tuple +from typing import Any, Generator, Mapping, MutableMapping, Optional, Tuple from fastapi import HTTPException from markupsafe import escape @@ -32,19 +31,19 @@ } -class CaseInsensitiveDict(t.MutableMapping[str, t.Any]): # copy of the requests class to avoid importing the package +class CaseInsensitiveDict(MutableMapping[str, Any]): # copy of the requests class to avoid importing the package def __init__(self, data=None, **kwargs) -> None: # type: ignore - self._store: OrderedDict[str, t.Any] = OrderedDict() + self._store: OrderedDict[str, Any] = OrderedDict() if data is None: data = {} self.update(data, **kwargs) @override - def __setitem__(self, key: str, value: t.Any) -> None: + def __setitem__(self, key: str, value: Any) -> None: self._store[key.lower()] = (key, value) @override - def __getitem__(self, key: str) -> t.Any: + def __getitem__(self, key: str) -> Any: return self._store[key.lower()][1] @override @@ -52,7 +51,7 @@ def __delitem__(self, key: str) -> None: del self._store[key.lower()] @override - def __iter__(self) -> t.Any: + def __iter__(self) -> Any: return (casedkey for casedkey, mappedvalue in self._store.values()) @override @@ -63,8 +62,8 @@ def lower_items(self) -> Generator[Tuple[Any, Any], Any, None]: return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) @override - def __eq__(self, other: t.Any) -> bool: - if isinstance(other, t.Mapping): + def __eq__(self, other: Any) -> bool: + if isinstance(other, Mapping): other = CaseInsensitiveDict(other) else: return NotImplemented @@ -84,7 +83,7 @@ class RequestParameters: DTO object to handle data inside request to send to service """ - user: t.Optional[JWTUser] = None + user: Optional[JWTUser] = None def get_user_id(self) -> str: return str(escape(str(self.user.id))) if self.user else "Unknown" diff --git a/antarest/core/serialization/__init__.py b/antarest/core/serialization/__init__.py index 5591290ce8..6bc9304822 100644 --- a/antarest/core/serialization/__init__.py +++ b/antarest/core/serialization/__init__.py @@ -9,12 +9,12 @@ # SPDX-License-Identifier: MPL-2.0 # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, Optional import pydantic -ADAPTER: pydantic.TypeAdapter[t.Any] = pydantic.TypeAdapter( - type=t.Any, config=pydantic.config.ConfigDict(ser_json_inf_nan="constants") +ADAPTER: pydantic.TypeAdapter[Any] = pydantic.TypeAdapter( + type=Any, config=pydantic.config.ConfigDict(ser_json_inf_nan="constants") ) # ser_json_inf_nan="constants" means infinity and NaN values will be serialized as `Infinity` and `NaN`. @@ -22,15 +22,15 @@ # Since pydantic v2 is written in RUST it's way faster. -def from_json(data: t.Union[str, bytes, bytearray]) -> t.Dict[str, t.Any]: +def from_json(data: str | bytes | bytearray) -> Dict[str, Any]: return ADAPTER.validate_json(data) # type: ignore -def to_json(data: t.Any, indent: t.Optional[int] = None) -> bytes: +def to_json(data: Any, indent: Optional[int] = None) -> bytes: return ADAPTER.dump_json(data, indent=indent) -def to_json_string(data: t.Any, indent: t.Optional[int] = None) -> str: +def to_json_string(data: Any, indent: Optional[int] = None) -> str: return to_json(data, indent=indent).decode("utf-8") diff --git a/antarest/core/swagger.py b/antarest/core/swagger.py index 6856b5d2fa..30c767428f 100644 --- a/antarest/core/swagger.py +++ b/antarest/core/swagger.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Dict, List, Tuple from fastapi import FastAPI from fastapi.openapi.models import Example @@ -22,7 +22,7 @@ attachment = "User-defined file attachment
" # noinspection SpellCheckingInspection -urls: t.List[t.Tuple[str, str]] = [ +urls: List[Tuple[str, str]] = [ ("layers/layers", ""), ("settings/generaldata", ""), ("output/{sim}/about-the-study/parameters", sim), @@ -42,7 +42,7 @@ ] -def get_path_examples() -> t.Dict[str, Example]: +def get_path_examples() -> Dict[str, Example]: return {url: {"value": url, "description": des} for url, des in urls} diff --git a/antarest/core/tasks/main.py b/antarest/core/tasks/main.py index 92b0929413..41ad8b35cd 100644 --- a/antarest/core/tasks/main.py +++ b/antarest/core/tasks/main.py @@ -12,8 +12,6 @@ from typing import Optional -from fastapi import APIRouter, FastAPI - from antarest.core.application import AppBuildContext from antarest.core.config import Config from antarest.core.interfaces.eventbus import DummyEventBusService, IEventBus diff --git a/antarest/core/tasks/model.py b/antarest/core/tasks/model.py index ce4ab5631a..74af26e75c 100644 --- a/antarest/core/tasks/model.py +++ b/antarest/core/tasks/model.py @@ -10,10 +10,10 @@ # # This file is part of the Antares project. -import typing as t import uuid from datetime import datetime from enum import Enum, StrEnum +from typing import TYPE_CHECKING, Any, List, Mapping, Optional from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer, Sequence, String # type: ignore from sqlalchemy.engine.base import Engine # type: ignore @@ -23,7 +23,7 @@ from antarest.core.persistence import Base from antarest.core.serialization import AntaresBaseModel -if t.TYPE_CHECKING: +if TYPE_CHECKING: # avoid circular import from antarest.login.model import Identity from antarest.study.model import Study @@ -62,7 +62,7 @@ class TaskResult(AntaresBaseModel, extra="forbid"): success: bool message: str # Can be used to store json serialized result - return_value: t.Optional[str] = None + return_value: Optional[str] = None class TaskLogDTO(AntaresBaseModel, extra="forbid"): @@ -80,32 +80,32 @@ class TaskEventPayload(AntaresBaseModel, extra="forbid"): id: str message: str type: TaskType - study_id: t.Optional[str] = None + study_id: Optional[str] = None class TaskDTO(AntaresBaseModel, extra="forbid"): id: str name: str - owner: t.Optional[int] = None + owner: Optional[int] = None status: TaskStatus creation_date_utc: str - completion_date_utc: t.Optional[str] = None - result: t.Optional[TaskResult] = None - logs: t.Optional[t.List[TaskLogDTO]] = None - type: t.Optional[str] = None - ref_id: t.Optional[str] = None - progress: t.Optional[int] = None + completion_date_utc: Optional[str] = None + result: Optional[TaskResult] = None + logs: Optional[List[TaskLogDTO]] = None + type: Optional[str] = None + ref_id: Optional[str] = None + progress: Optional[int] = None class TaskListFilter(AntaresBaseModel, extra="forbid"): - status: t.List[TaskStatus] = [] - name: t.Optional[str] = None - type: t.List[TaskType] = [] - ref_id: t.Optional[str] = None - from_creation_date_utc: t.Optional[float] = None - to_creation_date_utc: t.Optional[float] = None - from_completion_date_utc: t.Optional[float] = None - to_completion_date_utc: t.Optional[float] = None + status: List[TaskStatus] = [] + name: Optional[str] = None + type: List[TaskType] = [] + ref_id: Optional[str] = None + from_creation_date_utc: Optional[float] = None + to_creation_date_utc: Optional[float] = None + from_completion_date_utc: Optional[float] = None + to_completion_date_utc: Optional[float] = None class TaskJobLog(Base): # type: ignore @@ -124,7 +124,7 @@ class TaskJobLog(Base): # type: ignore job: "TaskJob" = relationship("TaskJob", back_populates="logs", uselist=False) @override - def __eq__(self, other: t.Any) -> bool: + def __eq__(self, other: Any) -> bool: if not isinstance(other, TaskJobLog): return False return bool(other.id == self.id and other.message == self.message and other.task_id == self.task_id) @@ -144,12 +144,12 @@ class TaskJob(Base): # type: ignore name: str = Column(String(), nullable=False, index=True) status: int = Column(Integer(), default=lambda: TaskStatus.PENDING.value, index=True) creation_date: datetime = Column(DateTime, default=datetime.utcnow, index=True) - completion_date: t.Optional[datetime] = Column(DateTime, nullable=True, default=None) - result_msg: t.Optional[str] = Column(String(), nullable=True, default=None) - result: t.Optional[str] = Column(String(), nullable=True, default=None) - result_status: t.Optional[bool] = Column(Boolean(), nullable=True, default=None) - type: t.Optional[str] = Column(String(), nullable=True, default=None, index=True) - progress: t.Optional[int] = Column(Integer(), nullable=True, default=None) + completion_date: Optional[datetime] = Column(DateTime, nullable=True, default=None) + result_msg: Optional[str] = Column(String(), nullable=True, default=None) + result: Optional[str] = Column(String(), nullable=True, default=None) + result_status: Optional[bool] = Column(Boolean(), nullable=True, default=None) + type: Optional[str] = Column(String(), nullable=True, default=None, index=True) + progress: Optional[int] = Column(Integer(), nullable=True, default=None) owner_id: int = Column( Integer(), ForeignKey("identities.id", name="fk_taskjob_identity_id", ondelete="SET NULL"), @@ -157,7 +157,7 @@ class TaskJob(Base): # type: ignore default=None, index=True, ) - ref_id: t.Optional[str] = Column( + ref_id: Optional[str] = Column( String(), ForeignKey("study.id", name="fk_taskjob_study_id", ondelete="CASCADE"), nullable=True, @@ -167,7 +167,7 @@ class TaskJob(Base): # type: ignore # Define a one-to-many relationship between `TaskJob` and `TaskJobLog`. # If the TaskJob is deleted, all attached logs must also be deleted in cascade. - logs: t.List["TaskJobLog"] = relationship("TaskJobLog", back_populates="job", cascade="all, delete, delete-orphan") + logs: List["TaskJobLog"] = relationship("TaskJobLog", back_populates="job", cascade="all, delete, delete-orphan") # Define a many-to-one relationship between `TaskJob` and `Identity`. # If the Identity is deleted, all attached TaskJob must be preserved. @@ -202,7 +202,7 @@ def to_dto(self, with_logs: bool = False) -> TaskDTO: ) @override - def __eq__(self, other: t.Any) -> bool: + def __eq__(self, other: Any) -> bool: if not isinstance(other, TaskJob): return False return bool( @@ -232,7 +232,7 @@ def __repr__(self) -> str: ) -def cancel_orphan_tasks(engine: Engine, session_args: t.Mapping[str, bool]) -> None: +def cancel_orphan_tasks(engine: Engine, session_args: Mapping[str, bool]) -> None: """ Cancel all tasks that are currently running or pending. diff --git a/antarest/core/tasks/repository.py b/antarest/core/tasks/repository.py index 87572bb570..dc53d56580 100644 --- a/antarest/core/tasks/repository.py +++ b/antarest/core/tasks/repository.py @@ -11,9 +11,9 @@ # This file is part of the Antares project. import datetime -import typing as t from http import HTTPStatus from operator import and_ +from typing import List, Optional from fastapi import HTTPException from sqlalchemy.orm import Session # type: ignore @@ -44,7 +44,7 @@ def save(self, task: TaskJob) -> TaskJob: session.commit() return task - def get(self, id: str) -> t.Optional[TaskJob]: + def get(self, id: str) -> Optional[TaskJob]: session = self.session task: TaskJob = session.get(TaskJob, id) if task is not None: @@ -57,7 +57,7 @@ def get_or_raise(self, id: str) -> TaskJob: raise HTTPException(HTTPStatus.NOT_FOUND, f"Task {id} not found") return task - def list(self, filter: TaskListFilter, user: t.Optional[int] = None) -> t.List[TaskJob]: + def list(self, filter: TaskListFilter, user: Optional[int] = None) -> List[TaskJob]: q = self.session.query(TaskJob) if user: q = q.filter(TaskJob.owner_id == user) @@ -85,7 +85,7 @@ def list(self, filter: TaskListFilter, user: t.Optional[int] = None) -> t.List[T if filter.type: _types = [task_type.value for task_type in filter.type] q = q.filter(TaskJob.type.in_(_types)) # type: ignore - tasks: t.List[TaskJob] = q.all() + tasks: List[TaskJob] = q.all() return tasks def delete(self, tid: str) -> None: diff --git a/antarest/core/tasks/service.py b/antarest/core/tasks/service.py index 4ada902c58..fbbc4ec15f 100644 --- a/antarest/core/tasks/service.py +++ b/antarest/core/tasks/service.py @@ -13,10 +13,10 @@ import datetime import logging import time -import typing as t from abc import ABC, abstractmethod from concurrent.futures import Future, ThreadPoolExecutor from http import HTTPStatus +from typing import Awaitable, Callable, Dict, List, Optional from fastapi import HTTPException from sqlalchemy.orm import Session # type: ignore @@ -60,7 +60,7 @@ def notify_progress(self, progress: int) -> None: raise NotImplementedError() -Task = t.Callable[[ITaskNotifier], TaskResult] +Task = Callable[[ITaskNotifier], TaskResult] class ITaskService(ABC): @@ -69,22 +69,22 @@ def add_worker_task( self, task_type: TaskType, task_queue: str, - task_args: t.Dict[str, t.Union[int, float, bool, str]], - name: t.Optional[str], - ref_id: t.Optional[str], + task_args: Dict[str, int | float | bool | str], + name: Optional[str], + ref_id: Optional[str], request_params: RequestParameters, - ) -> t.Optional[str]: + ) -> Optional[str]: raise NotImplementedError() @abstractmethod def add_task( self, action: Task, - name: t.Optional[str], - task_type: t.Optional[TaskType], - ref_id: t.Optional[str], - progress: t.Optional[int], - custom_event_messages: t.Optional[CustomTaskEventMessages], + name: Optional[str], + task_type: Optional[TaskType], + ref_id: Optional[str], + progress: Optional[int], + custom_event_messages: Optional[CustomTaskEventMessages], request_params: RequestParameters, ) -> str: raise NotImplementedError() @@ -99,7 +99,7 @@ def status_task( raise NotImplementedError() @abstractmethod - def list_tasks(self, task_filter: TaskListFilter, request_params: RequestParameters) -> t.List[TaskDTO]: + def list_tasks(self, task_filter: TaskListFilter, request_params: RequestParameters) -> List[TaskDTO]: raise NotImplementedError() @abstractmethod @@ -169,7 +169,7 @@ def __init__( self.config = config self.repo = repository self.event_bus = event_bus - self.tasks: t.Dict[str, Future[None]] = {} + self.tasks: Dict[str, Future[None]] = {} self.threadpool = ThreadPoolExecutor(max_workers=config.tasks.max_workers, thread_name_prefix="taskjob_") self.event_bus.add_listener(self.create_task_event_callback(), [EventType.TASK_CANCEL_REQUEST]) self.remote_workers = config.tasks.remote_workers @@ -178,13 +178,13 @@ def _create_worker_task( self, task_id: str, task_type: str, - task_args: t.Dict[str, t.Union[int, float, bool, str]], + task_args: Dict[str, int | float | bool | str], ) -> Task: - task_result_wrapper: t.List[TaskResult] = [] + task_result_wrapper: List[TaskResult] = [] def _create_awaiter( - res_wrapper: t.List[TaskResult], - ) -> t.Callable[[Event], t.Awaitable[None]]: + res_wrapper: List[TaskResult], + ) -> Callable[[Event], Awaitable[None]]: async def _await_task_end(event: Event) -> None: task_event = WorkerTaskResult.model_validate(event.payload) if task_event.task_id == task_id: @@ -227,11 +227,11 @@ def add_worker_task( self, task_type: TaskType, task_queue: str, - task_args: t.Dict[str, t.Union[int, float, bool, str]], - name: t.Optional[str], - ref_id: t.Optional[str], + task_args: Dict[str, int | float | bool | str], + name: Optional[str], + ref_id: Optional[str], request_params: RequestParameters, - ) -> t.Optional[str]: + ) -> Optional[str]: if not self.check_remote_worker_for_queue(task_queue): logger.warning(f"Failed to find configured remote worker for task queue {task_queue}") return None @@ -249,11 +249,11 @@ def add_worker_task( def add_task( self, action: Task, - name: t.Optional[str], - task_type: t.Optional[TaskType], - ref_id: t.Optional[str], - progress: t.Optional[int], - custom_event_messages: t.Optional[CustomTaskEventMessages], + name: Optional[str], + task_type: Optional[TaskType], + ref_id: Optional[str], + progress: Optional[int], + custom_event_messages: Optional[CustomTaskEventMessages], request_params: RequestParameters, ) -> str: task = self._create_task(name, task_type, ref_id, progress, request_params) @@ -262,10 +262,10 @@ def add_task( def _create_task( self, - name: t.Optional[str], - task_type: t.Optional[TaskType], - ref_id: t.Optional[str], - progress: t.Optional[int], + name: Optional[str], + task_type: Optional[TaskType], + ref_id: Optional[str], + progress: Optional[int], request_params: RequestParameters, ) -> TaskJob: if not request_params.user: @@ -285,7 +285,7 @@ def _launch_task( self, action: Task, task: TaskJob, - custom_event_messages: t.Optional[CustomTaskEventMessages], + custom_event_messages: Optional[CustomTaskEventMessages], request_params: RequestParameters, ) -> None: if not request_params.user: @@ -308,7 +308,7 @@ def _launch_task( future = self.threadpool.submit(self._run_task, action, task.id, request_params.user, custom_event_messages) self.tasks[task.id] = future - def create_task_event_callback(self) -> t.Callable[[Event], t.Awaitable[None]]: + def create_task_event_callback(self) -> Callable[[Event], Awaitable[None]]: async def task_event_callback(event: Event) -> None: self._cancel_task(str(event.payload), dispatch=False) @@ -355,10 +355,10 @@ def status_task( ) @override - def list_tasks(self, task_filter: TaskListFilter, request_params: RequestParameters) -> t.List[TaskDTO]: + def list_tasks(self, task_filter: TaskListFilter, request_params: RequestParameters) -> List[TaskDTO]: return [task.to_dto() for task in self.list_db_tasks(task_filter, request_params)] - def list_db_tasks(self, task_filter: TaskListFilter, request_params: RequestParameters) -> t.List[TaskJob]: + def list_db_tasks(self, task_filter: TaskListFilter, request_params: RequestParameters) -> List[TaskJob]: if not request_params.user: raise MustBeAuthenticatedError() user = None if request_params.user.is_site_admin() else request_params.user.impersonator @@ -402,7 +402,7 @@ def _run_task( callback: Task, task_id: str, jwt_user: JWTUser, - custom_event_messages: t.Optional[CustomTaskEventMessages] = None, + custom_event_messages: Optional[CustomTaskEventMessages] = None, ) -> None: # We need to catch all exceptions so that the calling thread is guaranteed # to not die @@ -515,7 +515,7 @@ def _run_task( exc_info=inner_exc, ) - def get_task_progress(self, task_id: str, params: RequestParameters) -> t.Optional[int]: + def get_task_progress(self, task_id: str, params: RequestParameters) -> Optional[int]: task = self.repo.get_or_raise(task_id) user = params.user if user and (user.is_site_admin() or user.is_admin_token() or task.owner_id == user.impersonator): diff --git a/antarest/core/tasks/web.py b/antarest/core/tasks/web.py index 2638347417..e6db500d5f 100644 --- a/antarest/core/tasks/web.py +++ b/antarest/core/tasks/web.py @@ -13,7 +13,7 @@ import concurrent.futures import http import logging -import typing as t +from typing import Any, Optional from fastapi import APIRouter, Depends, HTTPException @@ -47,7 +47,7 @@ def create_tasks_api(service: TaskJobService, config: Config) -> APIRouter: def list_tasks( filter: TaskListFilter, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: request_params = RequestParameters(user=current_user) return service.list_tasks(filter, request_params) @@ -99,7 +99,7 @@ def get_task( def cancel_task( task_id: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: request_params = RequestParameters(user=current_user) return service.cancel_task(task_id, request_params, dispatch=True) @@ -107,9 +107,9 @@ def cancel_task( "/tasks/{task_id}/progress", tags=[APITag.tasks], summary="Retrieve task progress from task id", - response_model=t.Optional[int], + response_model=Optional[int], ) - def get_progress(task_id: str, current_user: JWTUser = Depends(auth.get_current_user)) -> t.Optional[int]: + def get_progress(task_id: str, current_user: JWTUser = Depends(auth.get_current_user)) -> Optional[int]: sanitized_task_id = sanitize_uuid(task_id) logger.info( f"Fetching task progress of task {sanitized_task_id}", diff --git a/antarest/core/utils/archives.py b/antarest/core/utils/archives.py index f1c734c83c..e0b196969d 100644 --- a/antarest/core/utils/archives.py +++ b/antarest/core/utils/archives.py @@ -13,10 +13,10 @@ import os import shutil import tempfile -import typing as t import zipfile from enum import StrEnum from pathlib import Path +from typing import Any, BinaryIO, Callable, List, Optional, Tuple import py7zr @@ -38,7 +38,7 @@ def archive_dir( src_dir_path: Path, target_archive_path: Path, remove_source_dir: bool = False, - archive_format: t.Optional[ArchiveFormat] = None, + archive_format: Optional[ArchiveFormat] = None, ) -> None: if archive_format is not None and target_archive_path.suffix != archive_format: raise ShouldNotHappenException( @@ -74,7 +74,7 @@ def is_zip(path: Path) -> bool: def read_in_zip( zip_path: Path, inside_zip_path: Path, - read: t.Callable[[t.Optional[Path]], None], + read: Callable[[Optional[Path]], None], ) -> None: tmp_dir = None try: @@ -88,7 +88,7 @@ def read_in_zip( tmp_dir.cleanup() -def extract_archive(stream: t.BinaryIO, target_dir: Path) -> None: +def extract_archive(stream: BinaryIO, target_dir: Path) -> None: """ Extract a ZIP archive to a given destination. @@ -122,7 +122,7 @@ def extract_archive(stream: t.BinaryIO, target_dir: Path) -> None: raise BadArchiveContent -def extract_file_to_tmp_dir(archive_path: Path, inside_archive_path: Path) -> t.Tuple[Path, t.Any]: +def extract_file_to_tmp_dir(archive_path: Path, inside_archive_path: Path) -> Tuple[Path, Any]: str_inside_archive_path = str(inside_archive_path).replace("\\", "/") tmp_dir = tempfile.TemporaryDirectory() try: @@ -184,7 +184,7 @@ def read_file_from_archive(archive_path: Path, posix_path: str) -> str: return read_original_file_in_archive(archive_path, posix_path).decode("utf-8") -def extract_lines_from_archive(root: Path, posix_path: str) -> t.List[str]: +def extract_lines_from_archive(root: Path, posix_path: str) -> List[str]: """ Extract text lines from various types of files. diff --git a/antarest/core/utils/utils.py b/antarest/core/utils/utils.py index 90e920e815..7544196f7f 100644 --- a/antarest/core/utils/utils.py +++ b/antarest/core/utils/utils.py @@ -16,8 +16,8 @@ import logging import re import time -import typing as t from pathlib import Path +from typing import Any, Callable, List, Optional, TypeVar from fastapi import HTTPException from typing_extensions import override @@ -39,7 +39,7 @@ def __hash__(self) -> int: return hash(tuple(sorted(self.__dict__.items()))) @override - def __eq__(self, other: t.Any) -> bool: + def __eq__(self, other: Any) -> bool: return isinstance(other, type(self)) and self.__dict__ == other.__dict__ @override @@ -65,7 +65,7 @@ def sanitize_string(string: str) -> str: return str(glob.escape(string)) -def get_default_config_path() -> t.Optional[Path]: +def get_default_config_path() -> Optional[Path]: config = Path("config.yaml") if config.exists(): return config @@ -94,17 +94,17 @@ def __init__(self) -> None: def reset_current(self) -> None: self.current_time = time.time() - def log_elapsed(self, logger_: t.Callable[[float], None], since_start: bool = False) -> None: + def log_elapsed(self, logger_: Callable[[float], None], since_start: bool = False) -> None: logger_(time.time() - (self.start_time if since_start else self.current_time)) self.current_time = time.time() -T = t.TypeVar("T") +T = TypeVar("T") -def retry(func: t.Callable[[], T], attempts: int = 10, interval: float = 0.5) -> T: +def retry(func: Callable[[], T], attempts: int = 10, interval: float = 0.5) -> T: attempt = 0 - caught_exception: t.Optional[Exception] = None + caught_exception: Optional[Exception] = None while attempt < attempts: try: attempt += 1 @@ -116,12 +116,12 @@ def retry(func: t.Callable[[], T], attempts: int = 10, interval: float = 0.5) -> raise caught_exception or ShouldNotHappenException() -def assert_this(b: t.Any) -> None: +def assert_this(b: Any) -> None: if not b: raise AssertionError -def concat_files(files: t.List[Path], target: Path) -> None: +def concat_files(files: List[Path], target: Path) -> None: with open(target, "w") as fh: for item in files: with open(item, "r") as infile: @@ -129,7 +129,7 @@ def concat_files(files: t.List[Path], target: Path) -> None: fh.write(line) -def concat_files_to_str(files: t.List[Path]) -> str: +def concat_files_to_str(files: List[Path]) -> str: concat_str = "" for item in files: with open(item, "r") as infile: @@ -139,9 +139,9 @@ def concat_files_to_str(files: t.List[Path]) -> str: def suppress_exception( - callback: t.Callable[[], T], - logger_: t.Callable[[Exception], None], -) -> t.Optional[T]: + callback: Callable[[], T], + logger_: Callable[[Exception], None], +) -> Optional[T]: try: return callback() except Exception as e: diff --git a/antarest/eventbus/main.py b/antarest/eventbus/main.py index e1268dc374..6107d770a6 100644 --- a/antarest/eventbus/main.py +++ b/antarest/eventbus/main.py @@ -12,7 +12,6 @@ from typing import Optional -from fastapi import APIRouter, FastAPI from redis import Redis from antarest.core.application import AppBuildContext diff --git a/antarest/eventbus/service.py b/antarest/eventbus/service.py index f414712743..f5f5b38a29 100644 --- a/antarest/eventbus/service.py +++ b/antarest/eventbus/service.py @@ -14,7 +14,6 @@ import logging import random import threading -import time import uuid from typing import Awaitable, Callable, Dict, List, Optional diff --git a/antarest/fastapi_jwt_auth/auth_jwt.py b/antarest/fastapi_jwt_auth/auth_jwt.py index 19fa3173b4..55f226c427 100644 --- a/antarest/fastapi_jwt_auth/auth_jwt.py +++ b/antarest/fastapi_jwt_auth/auth_jwt.py @@ -525,12 +525,15 @@ def _verify_and_get_jwt_in_cookies( if not isinstance(request, (Request, WebSocket)): raise TypeError("request must be an instance of 'Request' or 'WebSocket'") + cookie = None + cookie_key = None + if type_token == "access": cookie_key = self._access_cookie_key cookie = request.cookies.get(cookie_key) if not isinstance(request, WebSocket): csrf_token = request.headers.get(self._access_csrf_header_name) - if type_token == "refresh": + elif type_token == "refresh": cookie_key = self._refresh_cookie_key cookie = request.cookies.get(cookie_key) if not isinstance(request, WebSocket): diff --git a/antarest/fastapi_jwt_auth/config.py b/antarest/fastapi_jwt_auth/config.py index f04ace9cd7..6ba7ab258a 100644 --- a/antarest/fastapi_jwt_auth/config.py +++ b/antarest/fastapi_jwt_auth/config.py @@ -1,12 +1,11 @@ -import typing as t from datetime import timedelta -from typing import List, Optional, Sequence, Union +from typing import Any, Dict, List, Optional, Sequence, Set, Union from pydantic import BaseModel, StrictBool, StrictInt, StrictStr, ValidationError, field_validator, model_validator class LoadConfig(BaseModel): - authjwt_token_location: Optional[t.Set[StrictStr]] = {"headers"} + authjwt_token_location: Optional[Set[StrictStr]] = {"headers"} authjwt_secret_key: Optional[StrictStr] = None authjwt_public_key: Optional[StrictStr] = None authjwt_private_key: Optional[StrictStr] = None @@ -17,7 +16,7 @@ class LoadConfig(BaseModel): authjwt_decode_issuer: Optional[StrictStr] = None authjwt_decode_audience: Optional[Union[StrictStr, Sequence[StrictStr]]] = None authjwt_denylist_enabled: Optional[StrictBool] = False - authjwt_denylist_token_checks: Optional[t.Set[StrictStr]] = {"access", "refresh"} + authjwt_denylist_token_checks: Optional[Set[StrictStr]] = {"access", "refresh"} authjwt_header_name: Optional[StrictStr] = "Authorization" authjwt_header_type: Optional[StrictStr] = "Bearer" authjwt_access_token_expires: Optional[Union[StrictBool, StrictInt, timedelta]] = timedelta(minutes=15) @@ -39,7 +38,7 @@ class LoadConfig(BaseModel): authjwt_refresh_csrf_cookie_path: Optional[StrictStr] = "/" authjwt_access_csrf_header_name: Optional[StrictStr] = "X-CSRF-Token" authjwt_refresh_csrf_header_name: Optional[StrictStr] = "X-CSRF-Token" - authjwt_csrf_methods: Optional[t.Set[StrictStr]] = {"POST", "PUT", "PATCH", "DELETE"} + authjwt_csrf_methods: Optional[Set[StrictStr]] = {"POST", "PUT", "PATCH", "DELETE"} @field_validator("authjwt_access_token_expires") def validate_access_token_expires( @@ -64,7 +63,7 @@ def validate_cookie_samesite(cls, v: Optional[StrictStr]) -> Optional[StrictStr] return v @model_validator(mode="before") - def check_type_validity(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + def check_type_validity(cls, values: Dict[str, Any]) -> Dict[str, Any]: for _ in values.get("authjwt_csrf_methods", []): if _.upper() not in ["POST", "PUT", "PATCH", "DELETE"]: raise ValidationError( diff --git a/antarest/front.py b/antarest/front.py index 98f5ab83d8..b8772a82e1 100644 --- a/antarest/front.py +++ b/antarest/front.py @@ -20,9 +20,8 @@ what are the API and websocket prefixes """ -import re from pathlib import Path -from typing import Any, List, Optional, Sequence +from typing import Any, List, Optional from fastapi import FastAPI from starlette.middleware.base import BaseHTTPMiddleware, DispatchFunction, RequestResponseEndpoint diff --git a/antarest/launcher/adapters/log_parser.py b/antarest/launcher/adapters/log_parser.py index 0583340917..889b2205ab 100644 --- a/antarest/launcher/adapters/log_parser.py +++ b/antarest/launcher/adapters/log_parser.py @@ -12,22 +12,22 @@ import functools import re -import typing as t +from typing import Callable, Iterable, Match, Optional, cast from antarest.core.serialization import AntaresBaseModel -_SearchFunc = t.Callable[[str], t.Optional[t.Match[str]]] +_SearchFunc = Callable[[str], Optional[Match[str]]] _compile = functools.partial(re.compile, flags=re.IGNORECASE | re.VERBOSE) # Search for the line indicating the loading of areas (first line of data loading). -_loading_areas = t.cast( +_loading_areas = cast( _SearchFunc, _compile(r"Loading \s+ the \s+ list \s+ of \s+ areas").search, ) # Search for the total number of Monté-Carlo (MC) years. -_total_mc_years = t.cast( +_total_mc_years = cast( _SearchFunc, _compile( r""" @@ -39,19 +39,19 @@ ) # Search for the line indicating the export of annual results of a Monté-Carlo year. -_annual_results = t.cast( +_annual_results = cast( _SearchFunc, _compile(r"Exporting \s+ the \s+ annual \s+ results").search, ) # Search for the line indicating the export of survey results. -_survey_results = t.cast( +_survey_results = cast( _SearchFunc, _compile(r"Exporting \s+ the \s+ survey \s+ results").search, ) # Search for the line indicating the solver is quitting gracefully or an error -_quitting = t.cast( +_quitting = cast( _SearchFunc, _compile( r""" @@ -100,7 +100,7 @@ def _update_progress(self, line: str) -> bool: return True return False - def parse_log_lines(self, lines: t.Iterable[str]) -> bool: + def parse_log_lines(self, lines: Iterable[str]) -> bool: """ Parses a sequence of log lines and updates the progress accordingly. diff --git a/antarest/launcher/adapters/slurm_launcher/slurm_launcher.py b/antarest/launcher/adapters/slurm_launcher/slurm_launcher.py index 79e8b94fb9..6b63a70a9a 100644 --- a/antarest/launcher/adapters/slurm_launcher/slurm_launcher.py +++ b/antarest/launcher/adapters/slurm_launcher/slurm_launcher.py @@ -19,8 +19,8 @@ import threading import time import traceback -import typing as t from pathlib import Path +from typing import Awaitable, Callable, Dict, List, Optional, cast from antares.study.version import SolverVersion from antareslauncher.data_repo.data_repo_tinydb import DataRepoTinydb @@ -75,8 +75,8 @@ def __init__(self, launcher_args: argparse.Namespace): super().__init__() # known arguments - self.other_options: t.Optional[str] = None - self.xpansion_mode: t.Optional[str] = None + self.other_options: Optional[str] = None + self.xpansion_mode: Optional[str] = None self.time_limit: int = 0 self.n_cpu: int = 0 self.post_processing: bool = False @@ -157,8 +157,8 @@ def __init__( self.check_state: bool = True self.event_bus = event_bus self.event_bus.add_listener(self._create_event_listener(), [EventType.STUDY_JOB_CANCEL_REQUEST]) - self.thread: t.Optional[threading.Thread] = None - self.job_list: t.List[str] = [] + self.thread: Optional[threading.Thread] = None + self.job_list: List[str] = [] self._check_config() self.antares_launcher_lock = threading.Lock() @@ -238,7 +238,7 @@ def stop(self) -> None: self.thread = None logger.info("slurm_launcher loop stopped") - def _init_launcher_arguments(self, local_workspace: t.Optional[Path] = None) -> argparse.Namespace: + def _init_launcher_arguments(self, local_workspace: Optional[Path] = None) -> argparse.Namespace: main_options_parameters = ParserParameters( default_wait_time=self.slurm_config.default_wait_time, default_time_limit=self.slurm_config.time_limit.default * 3600, @@ -255,7 +255,7 @@ def _init_launcher_arguments(self, local_workspace: t.Optional[Path] = None) -> parser.add_basic_arguments() parser.add_advanced_arguments() - arguments = t.cast(argparse.Namespace, parser.parse_args([])) + arguments = cast(argparse.Namespace, parser.parse_args([])) arguments.wait_mode = False arguments.check_queue = False arguments.json_ssh_config = None @@ -267,7 +267,7 @@ def _init_launcher_arguments(self, local_workspace: t.Optional[Path] = None) -> return arguments - def _init_launcher_parameters(self, local_workspace: t.Optional[Path] = None) -> MainParameters: + def _init_launcher_parameters(self, local_workspace: Optional[Path] = None) -> MainParameters: return MainParameters( json_dir=local_workspace or self.slurm_config.local_workspace, default_json_db_name=self.slurm_config.default_json_db_name, @@ -296,13 +296,13 @@ def _delete_workspace_file(self, study_path: Path) -> None: def _import_study_output( self, job_id: str, - xpansion_mode: t.Optional[str] = None, - log_dir: t.Optional[str] = None, - ) -> t.Optional[str]: + xpansion_mode: Optional[str] = None, + log_dir: Optional[str] = None, + ) -> Optional[str]: if xpansion_mode: self._import_xpansion_result(job_id, xpansion_mode) - launcher_logs: t.Dict[str, t.List[Path]] = {} + launcher_logs: Dict[str, List[Path]] = {} if log_dir is not None: launcher_logs = { log_name: log_path @@ -456,17 +456,17 @@ def _handle_success(self, study: StudyDTO) -> None: self.callbacks.update_status(study.name, JobStatus.SUCCESS, None, output_id) @staticmethod - def _get_log_path(study: StudyDTO, log_type: LogType = LogType.STDOUT) -> t.Optional[Path]: + def _get_log_path(study: StudyDTO, log_type: LogType = LogType.STDOUT) -> Optional[Path]: log_dir = Path(study.job_log_dir) return SlurmLauncher._get_log_path_from_log_dir(log_dir, log_type) @staticmethod - def _find_log_dir(base_log_dir: Path, job_id: str) -> t.Optional[Path]: + def _find_log_dir(base_log_dir: Path, job_id: str) -> Optional[Path]: pattern = f"{job_id}*" return next(iter(base_log_dir.glob(pattern)), None) @staticmethod - def _get_log_path_from_log_dir(log_dir: Path, log_type: LogType = LogType.STDOUT) -> t.Optional[Path]: + def _get_log_path_from_log_dir(log_dir: Path, log_type: LogType = LogType.STDOUT) -> Optional[Path]: pattern = { LogType.STDOUT: "antares-out-*", LogType.STDERR: "antares-err-*", @@ -601,8 +601,8 @@ def run_study( thread.start() @override - def get_log(self, job_id: str, log_type: LogType) -> t.Optional[str]: - log_path: t.Optional[Path] = None + def get_log(self, job_id: str, log_type: LogType) -> Optional[str]: + log_path: Optional[Path] = None for study in self.data_repo_tinydb.get_list_of_studies(): if study.name == job_id: log_path = SlurmLauncher._get_log_path(study, log_type) @@ -612,7 +612,7 @@ def get_log(self, job_id: str, log_type: LogType) -> t.Optional[str]: log_path = SlurmLauncher._get_log_path_from_log_dir(log_dir, log_type) return log_path.read_text() if log_path else None - def _create_event_listener(self) -> t.Callable[[Event], t.Awaitable[None]]: + def _create_event_listener(self) -> Callable[[Event], Awaitable[None]]: async def _listen_to_kill_job(event: Event) -> None: self.kill_job(event.payload, dispatch=False) diff --git a/antarest/launcher/main.py b/antarest/launcher/main.py index 7f794b9a9c..076253a6ba 100644 --- a/antarest/launcher/main.py +++ b/antarest/launcher/main.py @@ -12,8 +12,6 @@ from typing import Optional -from fastapi import APIRouter, FastAPI - from antarest.core.application import AppBuildContext from antarest.core.config import Config from antarest.core.filetransfer.service import FileTransferManager diff --git a/antarest/launcher/model.py b/antarest/launcher/model.py index df5c9cb903..47c37bee3c 100644 --- a/antarest/launcher/model.py +++ b/antarest/launcher/model.py @@ -11,8 +11,8 @@ # This file is part of the Antares project. import enum -import typing as t from datetime import datetime +from typing import Any, Dict, List, MutableMapping, Optional from pydantic import Field from sqlalchemy import Column, DateTime, Enum, ForeignKey, Integer, Sequence, String # type: ignore @@ -26,7 +26,7 @@ class XpansionParametersDTO(AntaresBaseModel): - output_id: t.Optional[str] = None + output_id: Optional[str] = None sensitivity_mode: bool = False enabled: bool = True @@ -35,21 +35,21 @@ class LauncherParametersDTO(AntaresBaseModel): # Warning ! This class must be retro-compatible (that's the reason for the weird bool/XpansionParametersDTO union) # The reason is that it's stored in json format in database and deserialized using the latest class version # If compatibility is to be broken, an (alembic) data migration script should be added - adequacy_patch: t.Optional[t.Dict[str, t.Any]] = None - nb_cpu: t.Optional[int] = None + adequacy_patch: Optional[Dict[str, Any]] = None + nb_cpu: Optional[int] = None post_processing: bool = False time_limit: int = 240 * 3600 # Default value set to 240 hours (in seconds) - xpansion: t.Union[XpansionParametersDTO, bool, None] = None + xpansion: XpansionParametersDTO | bool | None = None xpansion_r_version: bool = False archive_output: bool = True auto_unzip: bool = True - output_suffix: t.Optional[str] = None - other_options: t.Optional[str] = None + output_suffix: Optional[str] = None + other_options: Optional[str] = None # add extensions field here @classmethod - def from_launcher_params(cls, params: t.Optional[str]) -> "LauncherParametersDTO": + def from_launcher_params(cls, params: Optional[str]) -> "LauncherParametersDTO": """ Convert the launcher parameters from a string to a `LauncherParametersDTO` object. """ @@ -63,7 +63,7 @@ class LogType(enum.StrEnum): STDERR = "STDERR" @staticmethod - def from_filename(filename: str) -> t.Optional["LogType"]: + def from_filename(filename: str) -> Optional["LogType"]: if filename == "antares-err.log": return LogType.STDERR elif filename == "antares-out.log": @@ -113,20 +113,20 @@ class JobResultDTO(AntaresBaseModel): id: str study_id: str - launcher: t.Optional[str] - launcher_params: t.Optional[str] + launcher: Optional[str] + launcher_params: Optional[str] status: JobStatus creation_date: str - completion_date: t.Optional[str] - msg: t.Optional[str] - output_id: t.Optional[str] - exit_code: t.Optional[int] - solver_stats: t.Optional[str] - owner: t.Optional[UserInfo] + completion_date: Optional[str] + msg: Optional[str] + output_id: Optional[str] + exit_code: Optional[int] + solver_stats: Optional[str] + owner: Optional[UserInfo] class Config: @staticmethod - def json_schema_extra(schema: t.MutableMapping[str, t.Any]) -> None: + def json_schema_extra(schema: MutableMapping[str, Any]) -> None: schema["example"] = JobResultDTO( id="b2a9f6a7-7f8f-4f7a-9a8b-1f9b4c5d6e7f", study_id="b2a9f6a7-7f8f-4f7a-9a8b-1f9b4c5d6e7f", @@ -177,21 +177,21 @@ class JobResult(Base): # type: ignore id: str = Column(String(36), primary_key=True) study_id: str = Column(String(36)) - launcher: t.Optional[str] = Column(String) - launcher_params: t.Optional[str] = Column(String, nullable=True) + launcher: Optional[str] = Column(String) + launcher_params: Optional[str] = Column(String, nullable=True) job_status: JobStatus = Column(Enum(JobStatus)) creation_date = Column(DateTime, default=datetime.utcnow) completion_date = Column(DateTime) - msg: t.Optional[str] = Column(String()) - output_id: t.Optional[str] = Column(String()) - exit_code: t.Optional[int] = Column(Integer) - solver_stats: t.Optional[str] = Column(String(), nullable=True) - owner_id: t.Optional[int] = Column(Integer(), ForeignKey(Identity.id, ondelete="SET NULL"), nullable=True) + msg: Optional[str] = Column(String()) + output_id: Optional[str] = Column(String()) + exit_code: Optional[int] = Column(Integer) + solver_stats: Optional[str] = Column(String(), nullable=True) + owner_id: Optional[int] = Column(Integer(), ForeignKey(Identity.id, ondelete="SET NULL"), nullable=True) # Define a many-to-one relationship between `JobResult` and `Identity`. # This relationship is required to display the owner of a job result in the UI. # If the owner is deleted, the job result is detached from the owner (but not deleted). - owner: t.Optional[Identity] = relationship(Identity, back_populates="job_results", uselist=False) + owner: Optional[Identity] = relationship(Identity, back_populates="job_results", uselist=False) logs = relationship(JobLog, uselist=True, cascade="all, delete, delete-orphan") @@ -242,7 +242,7 @@ class JobCreationDTO(AntaresBaseModel): class LauncherEnginesDTO(AntaresBaseModel): - engines: t.List[str] + engines: List[str] @camel_case_model diff --git a/antarest/login/model.py b/antarest/login/model.py index cfa7a7a015..79f959ec32 100644 --- a/antarest/login/model.py +++ b/antarest/login/model.py @@ -11,8 +11,8 @@ # This file is part of the Antares project. import contextlib -import typing as t import uuid +from typing import TYPE_CHECKING, List, Mapping, Optional import bcrypt from sqlalchemy import Boolean, Column, Enum, ForeignKey, Integer, Sequence, String # type: ignore @@ -26,7 +26,7 @@ from antarest.core.roles import RoleType from antarest.core.serialization import AntaresBaseModel -if t.TYPE_CHECKING: +if TYPE_CHECKING: # avoid circular import from antarest.core.tasks.model import TaskJob from antarest.launcher.model import JobResult @@ -57,7 +57,7 @@ class BotRoleCreateDTO(AntaresBaseModel): class BotCreateDTO(AntaresBaseModel): name: str - roles: t.List[BotRoleCreateDTO] + roles: List[BotRoleCreateDTO] is_author: bool = True @@ -67,7 +67,7 @@ class UserCreateDTO(AntaresBaseModel): class GroupDTO(AntaresBaseModel): - id: t.Optional[str] = None + id: Optional[str] = None name: str @@ -78,7 +78,7 @@ class RoleCreationDTO(AntaresBaseModel): class RoleDTO(AntaresBaseModel): - group_id: t.Optional[str] + group_id: Optional[str] group_name: str identity_id: int type: RoleType @@ -87,7 +87,7 @@ class RoleDTO(AntaresBaseModel): class IdentityDTO(AntaresBaseModel): id: int name: str - roles: t.List[RoleDTO] + roles: List[RoleDTO] class RoleDetailDTO(AntaresBaseModel): @@ -100,7 +100,7 @@ class BotIdentityDTO(AntaresBaseModel): id: int name: str isAuthor: bool - roles: t.List[RoleDTO] + roles: List[RoleDTO] class BotDTO(UserInfo): @@ -115,7 +115,7 @@ class UserRoleDTO(AntaresBaseModel): class GroupDetailDTO(GroupDTO): - users: t.List[UserRoleDTO] + users: List[UserRoleDTO] class Password: @@ -154,11 +154,11 @@ class Identity(Base): # type: ignore # Define a one-to-many relationship with `JobResult`. # If an identity is deleted, all the associated job results are detached from the identity. - job_results: t.List["JobResult"] = relationship("JobResult", back_populates="owner", cascade="save-update, merge") + job_results: List["JobResult"] = relationship("JobResult", back_populates="owner", cascade="save-update, merge") # Define a one-to-many relationship with `TaskJob`. # If an identity is deleted, all the associated task jobs are detached from the identity. - owned_jobs: t.List["TaskJob"] = relationship("TaskJob", back_populates="owner", cascade="save-update, merge") + owned_jobs: List["TaskJob"] = relationship("TaskJob", back_populates="owner", cascade="save-update, merge") def to_dto(self) -> UserInfo: return UserInfo(id=self.id, name=self.name) @@ -323,7 +323,7 @@ class CredentialsDTO(AntaresBaseModel): refresh_token: str -def init_admin_user(engine: Engine, session_args: t.Mapping[str, bool], admin_password: str) -> None: +def init_admin_user(engine: Engine, session_args: Mapping[str, bool], admin_password: str) -> None: """ Create the default admin user, group and role if they do not already exist in the database. diff --git a/antarest/login/utils.py b/antarest/login/utils.py index 459a1ff2ae..2ddedf92cd 100644 --- a/antarest/login/utils.py +++ b/antarest/login/utils.py @@ -11,9 +11,8 @@ # This file is part of the Antares project. import contextlib -import typing as t from contextvars import ContextVar -from typing import Optional +from typing import Iterator, Optional from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint from starlette.requests import Request @@ -25,7 +24,7 @@ from antarest.fastapi_jwt_auth import AuthJWT from antarest.login.auth import Auth -_current_user: ContextVar[t.Optional[JWTUser]] = ContextVar("_current_user", default=None) +_current_user: ContextVar[Optional[JWTUser]] = ContextVar("_current_user", default=None) class CurrentUserMiddleware(BaseHTTPMiddleware): @@ -57,7 +56,7 @@ def get_current_user() -> Optional[JWTUser]: @contextlib.contextmanager -def current_user_context(token: Optional[JWTUser]) -> t.Iterator[JWTUser | None]: +def current_user_context(token: Optional[JWTUser]) -> Iterator[JWTUser | None]: global _current_user _current_user.set(token) diff --git a/antarest/matrixstore/model.py b/antarest/matrixstore/model.py index b117e12e1f..1f6e107788 100644 --- a/antarest/matrixstore/model.py +++ b/antarest/matrixstore/model.py @@ -11,8 +11,8 @@ # This file is part of the Antares project. import datetime -import typing as t import uuid +from typing import Any, List from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer, String, Table # type: ignore from sqlalchemy.orm import relationship # type: ignore @@ -48,7 +48,7 @@ def __repr__(self) -> str: # pragma: no cover return f"Matrix(id={self.id}, shape={(self.height, self.width)}, created_at={self.created_at})" @override - def __eq__(self, other: t.Any) -> bool: + def __eq__(self, other: Any) -> bool: if not isinstance(other, Matrix): return False @@ -69,9 +69,9 @@ class MatrixInfoDTO(AntaresBaseModel): class MatrixDataSetDTO(AntaresBaseModel): id: str name: str - matrices: t.List[MatrixInfoDTO] + matrices: List[MatrixInfoDTO] owner: UserInfo - groups: t.List[GroupDTO] + groups: List[GroupDTO] public: bool created_at: str updated_at: str @@ -110,7 +110,7 @@ def __repr__(self) -> str: # pragma: no cover return f"MatrixDataSetRelation(dataset_id={self.dataset_id}, matrix_id={self.matrix_id}, name={self.name})" @override - def __eq__(self, other: t.Any) -> bool: + def __eq__(self, other: Any) -> bool: if not isinstance(other, MatrixDataSetRelation): return False @@ -190,7 +190,7 @@ def __repr__(self) -> str: # pragma: no cover ) @override - def __eq__(self, other: t.Any) -> bool: + def __eq__(self, other: Any) -> bool: if not isinstance(other, MatrixDataSet): return False @@ -219,9 +219,9 @@ def __eq__(self, other: t.Any) -> bool: class MatrixDTO(AntaresBaseModel): width: int height: int - index: t.List[str] - columns: t.List[str] - data: t.List[t.List[MatrixData]] + index: List[str] + columns: List[str] + data: List[List[MatrixData]] created_at: int = 0 id: str = "" @@ -236,12 +236,12 @@ class MatrixContent(AntaresBaseModel): columns: A list of columns indexes or names. """ - data: t.List[t.List[MatrixData]] - index: t.List[t.Union[int, str]] - columns: t.List[t.Union[int, str]] + data: List[List[MatrixData]] + index: List[int | str] + columns: List[int | str] class MatrixDataSetUpdateDTO(AntaresBaseModel): name: str - groups: t.List[str] + groups: List[str] public: bool diff --git a/antarest/matrixstore/repository.py b/antarest/matrixstore/repository.py index 0e02ac61fb..c3ac28232a 100644 --- a/antarest/matrixstore/repository.py +++ b/antarest/matrixstore/repository.py @@ -12,8 +12,8 @@ import hashlib import logging -import typing as t from pathlib import Path +from typing import List, Optional import numpy as np import pandas as pd @@ -34,7 +34,7 @@ class MatrixDataSetRepository: Database connector to manage Matrix metadata entity """ - def __init__(self, session: t.Optional[Session] = None) -> None: + def __init__(self, session: Optional[Session] = None) -> None: self._session = session @property @@ -55,19 +55,19 @@ def save(self, matrix_user_metadata: MatrixDataSet) -> MatrixDataSet: logger.debug(f"Matrix dataset {matrix_user_metadata.id} for user {matrix_user_metadata.owner_id} saved") return matrix_user_metadata - def get(self, id_number: str) -> t.Optional[MatrixDataSet]: + def get(self, id_number: str) -> Optional[MatrixDataSet]: matrix: MatrixDataSet = self.session.query(MatrixDataSet).get(id_number) return matrix - def get_all_datasets(self) -> t.List[MatrixDataSet]: - matrix_datasets: t.List[MatrixDataSet] = self.session.query(MatrixDataSet).all() + def get_all_datasets(self) -> List[MatrixDataSet]: + matrix_datasets: List[MatrixDataSet] = self.session.query(MatrixDataSet).all() return matrix_datasets def query( self, - name: t.Optional[str], - owner: t.Optional[int] = None, - ) -> t.List[MatrixDataSet]: + name: Optional[str], + owner: Optional[int] = None, + ) -> List[MatrixDataSet]: """ Query a list of MatrixUserMetadata by searching for each one separately if a set of filter match @@ -83,7 +83,7 @@ def query( query = query.filter(MatrixDataSet.name.ilike(f"%{name}%")) # type: ignore if owner is not None: query = query.filter(MatrixDataSet.owner_id == owner) - datasets: t.List[MatrixDataSet] = query.distinct().all() + datasets: List[MatrixDataSet] = query.distinct().all() return datasets def delete(self, dataset_id: str) -> None: @@ -97,7 +97,7 @@ class MatrixRepository: Database connector to manage Matrix entity. """ - def __init__(self, session: t.Optional[Session] = None) -> None: + def __init__(self, session: Optional[Session] = None) -> None: self._session = session @property @@ -117,7 +117,7 @@ def save(self, matrix: Matrix) -> Matrix: logger.debug(f"Matrix {matrix.id} saved") return matrix - def get(self, matrix_hash: str) -> t.Optional[Matrix]: + def get(self, matrix_hash: str) -> Optional[Matrix]: matrix: Matrix = self.session.query(Matrix).get(matrix_hash) return matrix @@ -162,7 +162,7 @@ def get(self, matrix_hash: str) -> MatrixContent: Returns: The matrix content or `None` if the file is not found. """ - storage_format: t.Optional[InternalMatrixFormat] = None + storage_format: Optional[InternalMatrixFormat] = None for internal_format in InternalMatrixFormat: matrix_path = self.bucket_dir.joinpath(f"{matrix_hash}.{internal_format}") if matrix_path.exists(): @@ -193,7 +193,7 @@ def exists(self, matrix_hash: str) -> bool: return True return False - def save(self, content: t.Union[t.List[t.List[MatrixData]], npt.NDArray[np.float64]]) -> str: + def save(self, content: List[List[MatrixData]] | npt.NDArray[np.float64]) -> str: """ The matrix content will be saved in the repository given format, where each row represents a line in the file and the values are separated by tabs. The file will be saved diff --git a/antarest/matrixstore/service.py b/antarest/matrixstore/service.py index ca69f40fa2..3484ff921a 100644 --- a/antarest/matrixstore/service.py +++ b/antarest/matrixstore/service.py @@ -14,11 +14,11 @@ import io import logging import tempfile -import typing as t import zipfile from abc import ABC, abstractmethod from datetime import datetime from pathlib import Path +from typing import List, Optional, Sequence, Tuple import numpy as np import py7zr @@ -72,11 +72,11 @@ def __init__(self, matrix_content_repository: MatrixContentRepository) -> None: self.matrix_content_repository = matrix_content_repository @abstractmethod - def create(self, data: t.Union[t.List[t.List[MatrixData]], npt.NDArray[np.float64]]) -> str: + def create(self, data: List[List[MatrixData]] | npt.NDArray[np.float64]) -> str: raise NotImplementedError() @abstractmethod - def get(self, matrix_id: str) -> t.Optional[MatrixDTO]: + def get(self, matrix_id: str) -> Optional[MatrixDTO]: raise NotImplementedError() @abstractmethod @@ -87,7 +87,7 @@ def exists(self, matrix_id: str) -> bool: def delete(self, matrix_id: str) -> None: raise NotImplementedError() - def get_matrix_id(self, matrix: t.Union[t.List[t.List[float]], str]) -> str: + def get_matrix_id(self, matrix: List[List[float]] | str) -> str: """ Get the matrix ID from a matrix or a matrix link. @@ -114,7 +114,7 @@ def __init__(self, matrix_content_repository: MatrixContentRepository): super().__init__(matrix_content_repository=matrix_content_repository) @override - def create(self, data: t.Union[t.List[t.List[MatrixData]], npt.NDArray[np.float64]]) -> str: + def create(self, data: List[List[MatrixData]] | npt.NDArray[np.float64]) -> str: return self.matrix_content_repository.save(data) @override @@ -158,7 +158,7 @@ def __init__( self.config = config @staticmethod - def _from_dto(dto: MatrixDTO) -> t.Tuple[Matrix, MatrixContent]: + def _from_dto(dto: MatrixDTO) -> Tuple[Matrix, MatrixContent]: matrix = Matrix( id=dto.id, width=dto.width, @@ -171,7 +171,7 @@ def _from_dto(dto: MatrixDTO) -> t.Tuple[Matrix, MatrixContent]: return matrix, content @override - def create(self, data: t.Union[t.List[t.List[MatrixData]], npt.NDArray[np.float64]]) -> str: + def create(self, data: List[List[MatrixData]] | npt.NDArray[np.float64]) -> str: """ Creates a new matrix object with the specified data. @@ -208,7 +208,7 @@ def create(self, data: t.Union[t.List[t.List[MatrixData]], npt.NDArray[np.float6 self.repo.save(matrix) return matrix_id - def create_by_importation(self, file: UploadFile, is_json: bool = False) -> t.List[MatrixInfoDTO]: + def create_by_importation(self, file: UploadFile, is_json: bool = False) -> List[MatrixInfoDTO]: """ Imports a matrix from a TSV or JSON file or a collection of matrices from a ZIP file. @@ -232,7 +232,7 @@ def create_by_importation(self, file: UploadFile, is_json: bool = False) -> t.Li if file.content_type == "application/zip": with contextlib.closing(f): buffer = io.BytesIO(f.read()) - matrix_info: t.List[MatrixInfoDTO] = [] + matrix_info: List[MatrixInfoDTO] = [] if file.filename.endswith("zip"): with zipfile.ZipFile(buffer) as zf: for info in zf.infolist(): @@ -278,7 +278,7 @@ def get_dataset( self, id: str, params: RequestParameters, - ) -> t.Optional[MatrixDataSet]: + ) -> Optional[MatrixDataSet]: if not params.user: raise UserHasNotPermissionError() dataset = self.repo_dataset.get(id) @@ -291,7 +291,7 @@ def get_dataset( def create_dataset( self, dataset_info: MatrixDataSetUpdateDTO, - matrices: t.List[MatrixInfoDTO], + matrices: List[MatrixInfoDTO], params: RequestParameters, ) -> MatrixDataSet: if not params.user: @@ -337,10 +337,10 @@ def update_dataset( def list( self, - dataset_name: t.Optional[str], + dataset_name: Optional[str], filter_own: bool, params: RequestParameters, - ) -> t.List[MatrixDataSetDTO]: + ) -> List[MatrixDataSetDTO]: """ List matrix user metadata @@ -379,7 +379,7 @@ def delete_dataset(self, id: str, params: RequestParameters) -> str: return id @override - def get(self, matrix_id: str) -> t.Optional[MatrixDTO]: + def get(self, matrix_id: str) -> Optional[MatrixDTO]: """ Get a matrix object from the database and the matrix content repository. @@ -458,7 +458,7 @@ def check_access_permission( raise UserHasNotPermissionError() return access - def create_matrix_files(self, matrix_ids: t.Sequence[str], export_path: Path) -> str: + def create_matrix_files(self, matrix_ids: Sequence[str], export_path: Path) -> str: with tempfile.TemporaryDirectory(dir=self.config.storage.tmp_dir) as tmpdir: stopwatch = StopWatch() for mid in matrix_ids: @@ -505,7 +505,7 @@ def download_dataset( def download_matrix_list( self, - matrix_list: t.Sequence[str], + matrix_list: Sequence[str], dataset_name: str, params: RequestParameters, ) -> FileDownloadTaskDTO: diff --git a/antarest/service_creator.py b/antarest/service_creator.py index dcf94b3748..44670ffcd2 100644 --- a/antarest/service_creator.py +++ b/antarest/service_creator.py @@ -11,9 +11,9 @@ # This file is part of the Antares project. import logging -import typing as t from enum import StrEnum from pathlib import Path +from typing import Any, Dict, Mapping, Optional, Tuple import redis from ratelimit import RateLimitMiddleware # type: ignore @@ -54,7 +54,7 @@ logger = logging.getLogger(__name__) -SESSION_ARGS: t.Mapping[str, bool] = { +SESSION_ARGS: Mapping[str, bool] = { "autocommit": False, "expire_on_commit": False, "autoflush": False, @@ -82,7 +82,7 @@ def init_db_engine( ) -> Engine: if auto_upgrade_db: upgrade_db(config_file) - connect_args: t.Dict[str, t.Any] = {} + connect_args: Dict[str, Any] = {} if config.db.db_url.startswith("sqlite"): connect_args["check_same_thread"] = False else: @@ -119,7 +119,7 @@ def new_redis_instance(config: RedisConfig) -> redis.Redis: # type: ignore return redis_client # type: ignore -def create_event_bus(app_ctxt: t.Optional[AppBuildContext], config: Config) -> t.Tuple[IEventBus, t.Optional[redis.Redis]]: # type: ignore +def create_event_bus(app_ctxt: Optional[AppBuildContext], config: Config) -> Tuple[IEventBus, Optional[redis.Redis]]: # type: ignore redis_client = new_redis_instance(config.redis) if config.redis is not None else None return ( build_eventbus(app_ctxt, config, True, redis_client), @@ -128,8 +128,8 @@ def create_event_bus(app_ctxt: t.Optional[AppBuildContext], config: Config) -> t def create_core_services( - app_ctxt: t.Optional[AppBuildContext], config: Config -) -> t.Tuple[ICache, IEventBus, ITaskService, FileTransferManager, LoginService, MatrixService, StudyService]: + app_ctxt: Optional[AppBuildContext], config: Config +) -> Tuple[ICache, IEventBus, ITaskService, FileTransferManager, LoginService, MatrixService, StudyService]: event_bus, redis_client = create_event_bus(app_ctxt, config) cache = build_cache(config=config, redis_client=redis_client) filetransfer_service = build_filetransfer_service(app_ctxt, event_bus, config) @@ -166,8 +166,8 @@ def create_core_services( def create_watcher( config: Config, - app_ctxt: t.Optional[AppBuildContext], - study_service: t.Optional[StudyService] = None, + app_ctxt: Optional[AppBuildContext], + study_service: Optional[StudyService] = None, ) -> Watcher: if study_service: watcher = Watcher( @@ -189,7 +189,7 @@ def create_watcher( return watcher -def create_explorer(config: Config, app_ctxt: t.Optional[AppBuildContext]) -> t.Any: +def create_explorer(config: Config, app_ctxt: Optional[AppBuildContext]) -> Any: explorer = Explorer(config=config) if app_ctxt: app_ctxt.api_root.include_router(create_explorer_routes(config=config, explorer=explorer)) @@ -199,9 +199,9 @@ def create_explorer(config: Config, app_ctxt: t.Optional[AppBuildContext]) -> t. def create_matrix_gc( config: Config, - app_ctxt: t.Optional[AppBuildContext], - study_service: t.Optional[StudyService] = None, - matrix_service: t.Optional[MatrixService] = None, + app_ctxt: Optional[AppBuildContext], + study_service: Optional[StudyService] = None, + matrix_service: Optional[MatrixService] = None, ) -> MatrixGarbageCollector: if study_service and matrix_service: return MatrixGarbageCollector( @@ -222,17 +222,15 @@ def create_archive_worker( config: Config, workspace: str, local_root: Path = Path("/"), - event_bus: t.Optional[IEventBus] = None, + event_bus: Optional[IEventBus] = None, ) -> AbstractWorker: if not event_bus: event_bus, _ = create_event_bus(None, config) return ArchiveWorker(event_bus, workspace, local_root, config) -def create_services( - config: Config, app_ctxt: t.Optional[AppBuildContext], create_all: bool = False -) -> t.Dict[str, t.Any]: - services: t.Dict[str, t.Any] = {} +def create_services(config: Config, app_ctxt: Optional[AppBuildContext], create_all: bool = False) -> Dict[str, Any]: + services: Dict[str, Any] = {} ( cache, diff --git a/antarest/study/business/adequacy_patch_management.py b/antarest/study/business/adequacy_patch_management.py index 3b309e0b01..9ad3bd1c62 100644 --- a/antarest/study/business/adequacy_patch_management.py +++ b/antarest/study/business/adequacy_patch_management.py @@ -117,7 +117,7 @@ def get_value(field_info: FieldInfo) -> Any: return parent.get(target_name, field_info["default_value"]) if is_in_version else None - return AdequacyPatchFormFields.construct(**{name: get_value(info) for name, info in FIELDS_INFO.items()}) + return AdequacyPatchFormFields.model_construct(**{name: get_value(info) for name, info in FIELDS_INFO.items()}) def set_field_values(self, study: Study, field_values: AdequacyPatchFormFields) -> None: """ diff --git a/antarest/study/business/advanced_parameters_management.py b/antarest/study/business/advanced_parameters_management.py index 31a8d880e5..0b5c9dc809 100644 --- a/antarest/study/business/advanced_parameters_management.py +++ b/antarest/study/business/advanced_parameters_management.py @@ -240,7 +240,7 @@ def get_value(field_info: FieldInfo) -> Any: parent = seeds return parent.get(target_name, field_info["default_value"]) - return AdvancedParamsFormFields.construct(**{name: get_value(info) for name, info in FIELDS_INFO.items()}) + return AdvancedParamsFormFields.model_construct(**{name: get_value(info) for name, info in FIELDS_INFO.items()}) def set_field_values(self, study: Study, field_values: AdvancedParamsFormFields) -> None: """ diff --git a/antarest/study/business/aggregator_management.py b/antarest/study/business/aggregator_management.py index 6ee055bf2f..1b7e7f8481 100644 --- a/antarest/study/business/aggregator_management.py +++ b/antarest/study/business/aggregator_management.py @@ -11,9 +11,9 @@ # This file is part of the Antares project. import logging -import typing as t from enum import StrEnum from pathlib import Path +from typing import Any, Dict, List, MutableSequence, Optional, Sequence import numpy as np import pandas as pd @@ -98,7 +98,7 @@ def _checks_estimated_size(nb_files: int, df_bytes_size: int, nb_files_checked: raise FileTooLargeError(estimated_df_size, maximum_size) -def _columns_ordering(df_cols: t.List[str], column_name: str, is_details: bool, mc_root: MCRoot) -> t.Sequence[str]: +def _columns_ordering(df_cols: List[str], column_name: str, is_details: bool, mc_root: MCRoot) -> Sequence[str]: # original columns org_cols = df_cols.copy() if is_details: @@ -116,12 +116,12 @@ def _columns_ordering(df_cols: t.List[str], column_name: str, is_details: bool, return new_column_order -def _infer_column_from_regex(cols: t.Sequence[str], col_regex: str) -> t.Optional[str]: +def _infer_column_from_regex(cols: Sequence[str], col_regex: str) -> Optional[str]: stripped_lower_col_regex = col_regex.lower().strip() return next((c for c in cols if stripped_lower_col_regex in c.lower().strip()), None) -def _infer_time_id(df: pd.DataFrame, is_details: bool) -> t.List[int]: +def _infer_time_id(df: pd.DataFrame, is_details: bool) -> List[int]: if is_details: return df[TIME_ID_COL].tolist() else: @@ -129,11 +129,11 @@ def _infer_time_id(df: pd.DataFrame, is_details: bool) -> t.List[int]: def _filtered_files_listing( - folders_to_check: t.List[Path], + folders_to_check: List[Path], query_file: str, frequency: str, -) -> t.Dict[str, t.MutableSequence[str]]: - filtered_files: t.Dict[str, t.MutableSequence[str]] = {} +) -> Dict[str, MutableSequence[str]]: + filtered_files: Dict[str, MutableSequence[str]] = {} for folder_path in folders_to_check: for file in folder_path.iterdir(): if file.stem == f"{query_file}-{frequency}": @@ -146,11 +146,11 @@ def __init__( self, study_path: Path, output_id: str, - query_file: t.Union[MCIndAreasQueryFile, MCAllAreasQueryFile, MCIndLinksQueryFile, MCAllLinksQueryFile], + query_file: MCIndAreasQueryFile | MCAllAreasQueryFile | MCIndLinksQueryFile | MCAllLinksQueryFile, frequency: MatrixFrequency, - ids_to_consider: t.Sequence[str], - columns_names: t.Sequence[str], - mc_years: t.Optional[t.Sequence[int]] = None, + ids_to_consider: Sequence[str], + columns_names: Sequence[str], + mc_years: Optional[Sequence[int]] = None, ): self.study_path = study_path self.output_id = output_id @@ -206,7 +206,7 @@ def _parse_output_file(self, file_path: Path, normalize_column_name: bool = True df.columns = pd.Index(new_cols) return df - def _filter_ids(self, folder_path: Path) -> t.List[str]: + def _filter_ids(self, folder_path: Path) -> List[str]: if self.output_type == "areas": # Areas names filtering areas_ids = sorted([d.name for d in folder_path.iterdir()]) @@ -220,7 +220,7 @@ def _filter_ids(self, folder_path: Path) -> t.List[str]: return [link for link in links_ids if link in self.ids_to_consider] return links_ids - def _gather_all_files_to_consider(self) -> t.Sequence[Path]: + def _gather_all_files_to_consider(self) -> Sequence[Path]: if self.mc_root == MCRoot.MC_IND: # Monte Carlo years filtering all_mc_years = [d.name for d in self.mc_ind_path.iterdir()] @@ -312,7 +312,7 @@ def _process_df(self, file_path: Path, is_details: bool) -> pd.DataFrame: # using a dictionary to build the new data frame with the base columns (NO2, production etc.) # and the cluster id and time id - new_obj: t.Dict[str, t.Any] = {k: [] for k in [CLUSTER_ID_COL, TIME_ID_COL] + actual_cols} + new_obj: Dict[str, Any] = {k: [] for k in [CLUSTER_ID_COL, TIME_ID_COL] + actual_cols} # loop over the cluster id to extract the values of the actual columns for cluster_id, dummy_component in cluster_dummy_product_cols: @@ -356,7 +356,7 @@ def _process_df(self, file_path: Path, is_details: bool) -> pd.DataFrame: # just extract the data frame from the file by just merging the columns components return self._parse_output_file(file_path) - def _build_dataframe(self, files: t.Sequence[Path]) -> pd.DataFrame: + def _build_dataframe(self, files: Sequence[Path]) -> pd.DataFrame: if self.mc_root not in [MCRoot.MC_IND, MCRoot.MC_ALL]: raise MCRootNotHandled(f"Unknown Monte Carlo root: {self.mc_root}") is_details = self.query_file in [ diff --git a/antarest/study/business/all_optional_meta.py b/antarest/study/business/all_optional_meta.py index 5d271f0772..9e15ecae6a 100644 --- a/antarest/study/business/all_optional_meta.py +++ b/antarest/study/business/all_optional_meta.py @@ -11,16 +11,16 @@ # This file is part of the Antares project. import copy -import typing as t +from typing import Optional, Type, TypeVar from pydantic import BaseModel, create_model from antarest.core.utils.string import to_camel_case -ModelClass = t.TypeVar("ModelClass", bound=BaseModel) +ModelClass = TypeVar("ModelClass", bound=BaseModel) -def all_optional_model(model: t.Type[ModelClass]) -> t.Type[ModelClass]: +def all_optional_model(model: Type[ModelClass]) -> Type[ModelClass]: """ This decorator can be used to make all fields of a pydantic model optionals. @@ -34,13 +34,13 @@ def all_optional_model(model: t.Type[ModelClass]) -> t.Type[ModelClass]: for field_name, field_info in model.model_fields.items(): new = copy.deepcopy(field_info) new.default = None - new.annotation = t.Optional[field_info.annotation] # type: ignore + new.annotation = Optional[field_info.annotation] # type: ignore kwargs[field_name] = (new.annotation, new) return create_model(f"Partial{model.__name__}", __base__=model, __module__=model.__module__, **kwargs) # type: ignore -def camel_case_model(model: t.Type[BaseModel]) -> t.Type[BaseModel]: +def camel_case_model(model: Type[BaseModel]) -> Type[BaseModel]: """ This decorator can be used to modify a model to use camel case aliases. diff --git a/antarest/study/business/allocation_management.py b/antarest/study/business/allocation_management.py index e1901b0b75..0b8577a854 100644 --- a/antarest/study/business/allocation_management.py +++ b/antarest/study/business/allocation_management.py @@ -210,7 +210,7 @@ def set_allocation_form_fields( updated_allocations = self.get_allocation_data(study, area_id) - return AllocationFormFields.construct( + return AllocationFormFields.model_construct( allocation=[ AllocationField.construct(area_id=area, coefficient=value) for area, value in updated_allocations.items() @@ -250,4 +250,4 @@ def get_allocation_matrix(self, study: Study, all_areas: List[AreaInfoDTO]) -> A col_idx = columns.index(prod_area) array[row_idx][col_idx] = coefficient - return AllocationMatrix.construct(index=rows, columns=columns, data=array.tolist()) + return AllocationMatrix.model_construct(index=rows, columns=columns, data=array.tolist()) diff --git a/antarest/study/business/area_management.py b/antarest/study/business/area_management.py index ef93e4f908..21447352d2 100644 --- a/antarest/study/business/area_management.py +++ b/antarest/study/business/area_management.py @@ -12,7 +12,7 @@ import logging import re -import typing as t +from typing import Any, Dict, List, Mapping, Optional, Sequence from antarest.core.exceptions import ConfigFileNotFound, DuplicateAreaName, LayerNotAllowedToBeDeleted, LayerNotFound from antarest.core.model import JSON @@ -50,7 +50,7 @@ _THERMAL_AREAS_PATH = "input/thermal/areas" -def _get_ui_info_map(file_study: FileStudy, area_ids: t.Sequence[str]) -> t.Dict[str, t.Any]: +def _get_ui_info_map(file_study: FileStudy, area_ids: Sequence[str]) -> Dict[str, Any]: """ Get the UI information (a JSON object) for each selected Area. @@ -85,7 +85,7 @@ def _get_ui_info_map(file_study: FileStudy, area_ids: t.Sequence[str]) -> t.Dict return ui_info_map -def _get_area_layers(area_uis: t.Dict[str, t.Any], area: str) -> t.List[str]: +def _get_area_layers(area_uis: Dict[str, Any], area: str) -> List[str]: if area in area_uis and "ui" in area_uis[area] and "layers" in area_uis[area]["ui"]: return re.split(r"\s+", (str(area_uis[area]["ui"]["layers"]) or "")) return [] @@ -118,7 +118,7 @@ def __init__( self.patch_service = PatchService(repository=repository) # noinspection SpellCheckingInspection - def get_all_area_props(self, study: RawStudy) -> t.Mapping[str, AreaOutput]: + def get_all_area_props(self, study: RawStudy) -> Mapping[str, AreaOutput]: """ Retrieves all areas of a study. @@ -166,8 +166,8 @@ def get_all_area_props(self, study: RawStudy) -> t.Mapping[str, AreaOutput]: # noinspection SpellCheckingInspection def update_areas_props( - self, study: RawStudy, update_areas_by_ids: t.Mapping[str, AreaOutput] - ) -> t.Mapping[str, AreaOutput]: + self, study: RawStudy, update_areas_by_ids: Mapping[str, AreaOutput] + ) -> Mapping[str, AreaOutput]: """ Update the properties of ares. @@ -241,7 +241,7 @@ def update_areas_props( def get_table_schema() -> JSON: return AreaOutput.model_json_schema() - def get_all_areas(self, study: RawStudy, area_type: t.Optional[AreaType] = None) -> t.List[AreaInfoDTO]: + def get_all_areas(self, study: RawStudy, area_type: Optional[AreaType] = None) -> List[AreaInfoDTO]: """ Retrieves all areas and districts of a raw study based on the area type. @@ -255,9 +255,9 @@ def get_all_areas(self, study: RawStudy, area_type: t.Optional[AreaType] = None) storage_service = self.storage_service.get_storage(study) file_study = storage_service.get_raw(study) metadata = self.patch_service.get(study) - areas_metadata: t.Dict[str, PatchArea] = metadata.areas or {} - cfg_areas: t.Dict[str, Area] = file_study.config.areas - result: t.List[AreaInfoDTO] = [] + areas_metadata: Dict[str, PatchArea] = metadata.areas or {} + cfg_areas: Dict[str, Area] = file_study.config.areas + result: List[AreaInfoDTO] = [] if area_type is None or area_type == AreaType.AREA: result.extend( @@ -272,7 +272,7 @@ def get_all_areas(self, study: RawStudy, area_type: t.Optional[AreaType] = None) ) if area_type is None or area_type == AreaType.DISTRICT: - cfg_sets: t.Dict[str, DistrictSet] = file_study.config.sets + cfg_sets: Dict[str, DistrictSet] = file_study.config.sets result.extend( AreaInfoDTO( id=set_id, @@ -286,7 +286,7 @@ def get_all_areas(self, study: RawStudy, area_type: t.Optional[AreaType] = None) return result - def get_all_areas_ui_info(self, study: RawStudy) -> t.Dict[str, t.Any]: + def get_all_areas_ui_info(self, study: RawStudy) -> Dict[str, Any]: """ Retrieve information about all areas' user interface (UI) from the study. @@ -304,7 +304,7 @@ def get_all_areas_ui_info(self, study: RawStudy) -> t.Dict[str, t.Any]: area_ids = list(file_study.config.areas) return _get_ui_info_map(file_study, area_ids) - def get_layers(self, study: RawStudy) -> t.List[LayerInfoDTO]: + def get_layers(self, study: RawStudy) -> List[LayerInfoDTO]: storage_service = self.storage_service.get_storage(study) file_study = storage_service.get_raw(study) area_ids = list(file_study.config.areas) @@ -327,7 +327,7 @@ def get_layers(self, study: RawStudy) -> t.List[LayerInfoDTO]: for layer in layers ] - def update_layer_areas(self, study: RawStudy, layer_id: str, areas: t.List[str]) -> None: + def update_layer_areas(self, study: RawStudy, layer_id: str, areas: List[str]) -> None: logger.info(f"Updating layer {layer_id} with areas {areas}") file_study = self.storage_service.get_storage(study).get_raw(study) layers = file_study.tree.get(["layers", "layers", "layers"]) @@ -344,9 +344,9 @@ def update_layer_areas(self, study: RawStudy, layer_id: str, areas: t.List[str]) ] to_remove_areas = [area for area in existing_areas if area not in areas] to_add_areas = [area for area in areas if area not in existing_areas] - commands: t.List[ICommand] = [] + commands: List[ICommand] = [] - def create_update_commands(area_id: str) -> t.List[ICommand]: + def create_update_commands(area_id: str) -> List[ICommand]: return [ UpdateConfig( target=f"input/areas/{area_id}/ui/layerX", @@ -369,7 +369,7 @@ def create_update_commands(area_id: str) -> t.List[ICommand]: ] for area in to_remove_areas: - area_to_remove_layers: t.List[str] = _get_area_layers(areas_ui, area) + area_to_remove_layers: List[str] = _get_area_layers(areas_ui, area) if layer_id in areas_ui[area]["layerX"]: del areas_ui[area]["layerX"][layer_id] if layer_id in areas_ui[area]["layerY"]: @@ -380,7 +380,7 @@ def create_update_commands(area_id: str) -> t.List[ICommand]: ) commands.extend(create_update_commands(area)) for area in to_add_areas: - area_to_add_layers: t.List[str] = _get_area_layers(areas_ui, area) + area_to_add_layers: List[str] = _get_area_layers(areas_ui, area) if layer_id not in areas_ui[area]["layerX"]: areas_ui[area]["layerX"][layer_id] = areas_ui[area]["ui"]["x"] if layer_id not in areas_ui[area]["layerY"]: @@ -521,7 +521,7 @@ def update_thermal_cluster_metadata( self, study: Study, area_id: str, - clusters_metadata: t.Dict[str, PatchCluster], + clusters_metadata: Dict[str, PatchCluster], ) -> AreaInfoDTO: file_study = self.storage_service.get_storage(study).get_raw(study) patch = self.patch_service.get(study) @@ -551,7 +551,7 @@ def delete_area(self, study: Study, area_id: str) -> None: def _update_with_cluster_metadata( area: str, info: ClusterInfoDTO, - cluster_patch: t.Dict[str, PatchCluster], + cluster_patch: Dict[str, PatchCluster], ) -> ClusterInfoDTO: patch = cluster_patch.get(f"{area}.{info.id}", PatchCluster()) info.code_oi = patch.code_oi @@ -559,7 +559,7 @@ def _update_with_cluster_metadata( return info @staticmethod - def _get_clusters(file_study: FileStudy, area: str, metadata_patch: Patch) -> t.List[ClusterInfoDTO]: + def _get_clusters(file_study: FileStudy, area: str, metadata_patch: Patch) -> List[ClusterInfoDTO]: thermal_clusters_data = file_study.tree.get(["input", "thermal", "clusters", area, "list"]) cluster_patch = metadata_patch.thermal_clusters or {} result = [ diff --git a/antarest/study/business/areas/hydro_management.py b/antarest/study/business/areas/hydro_management.py index 396d2f785c..436947af3f 100644 --- a/antarest/study/business/areas/hydro_management.py +++ b/antarest/study/business/areas/hydro_management.py @@ -10,9 +10,9 @@ # # This file is part of the Antares project. -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List -from pydantic import Field, model_validator +from pydantic import Field from antarest.study.business.all_optional_meta import all_optional_model from antarest.study.business.utils import FieldInfo, FormFieldsBaseModel, execute_or_add_commands diff --git a/antarest/study/business/areas/properties_management.py b/antarest/study/business/areas/properties_management.py index 151ff178c3..b0afda1833 100644 --- a/antarest/study/business/areas/properties_management.py +++ b/antarest/study/business/areas/properties_management.py @@ -11,8 +11,8 @@ # This file is part of the Antares project. import re -import typing as t from builtins import sorted +from typing import Any, Dict, Iterable, List, Optional, Set, cast from pydantic import model_validator @@ -34,19 +34,19 @@ DEFAULT_FILTER_VALUE = FILTER_OPTIONS -def sort_filter_options(options: t.Iterable[str]) -> t.List[str]: +def sort_filter_options(options: Iterable[str]) -> List[str]: return sorted( options, key=lambda x: FILTER_OPTIONS.index(x), ) -def encode_filter(value: str) -> t.Set[str]: +def encode_filter(value: str) -> Set[str]: stripped = value.strip() return set(re.split(r"\s*,\s*", stripped) if stripped else []) -def decode_filter(encoded_value: t.Set[str], current_filter: t.Optional[str] = None) -> str: +def decode_filter(encoded_value: Set[str], current_filter: Optional[str] = None) -> str: return ", ".join(sort_filter_options(encoded_value)) @@ -57,13 +57,13 @@ class PropertiesFormFields(FormFieldsBaseModel): non_dispatch_power: bool dispatch_hydro_power: bool other_dispatch_power: bool - filter_synthesis: t.Set[str] - filter_by_year: t.Set[str] + filter_synthesis: Set[str] + filter_by_year: Set[str] # version 830 adequacy_patch_mode: AdequacyPatchMode @model_validator(mode="before") - def validation(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + def validation(cls, values: Dict[str, Any]) -> Dict[str, Any]: filters = { "filter_synthesis": values.get("filter_synthesis"), "filter_by_year": values.get("filter_by_year"), @@ -77,7 +77,7 @@ def validation(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: return values -FIELDS_INFO: t.Dict[str, FieldInfo] = { +FIELDS_INFO: Dict[str, FieldInfo] = { "energy_cost_unsupplied": { "path": THERMAL_PATH.format(field="unserverdenergycost"), "default_value": 0.0, @@ -130,9 +130,9 @@ def get_field_values( file_study = self.storage_service.get_storage(study).get_raw(study) study_ver = file_study.config.version - def get_value(field_info: FieldInfo) -> t.Any: - start_ver = t.cast(int, field_info.get("start_version", 0)) - end_ver = t.cast(int, field_info.get("end_version", study_ver)) + def get_value(field_info: FieldInfo) -> Any: + start_ver = cast(int, field_info.get("start_version", 0)) + end_ver = cast(int, field_info.get("end_version", study_ver)) is_in_version = start_ver <= study_ver <= end_ver if not is_in_version: return None @@ -153,7 +153,7 @@ def set_field_values( area_id: str, field_values: PropertiesFormFields, ) -> None: - commands: t.List[UpdateConfig] = [] + commands: List[UpdateConfig] = [] file_study = self.storage_service.get_storage(study).get_raw(study) context = self.storage_service.variant_study_service.command_factory.command_context diff --git a/antarest/study/business/areas/renewable_management.py b/antarest/study/business/areas/renewable_management.py index e82a13f3db..e3705e23ff 100644 --- a/antarest/study/business/areas/renewable_management.py +++ b/antarest/study/business/areas/renewable_management.py @@ -11,7 +11,7 @@ # This file is part of the Antares project. import collections -import typing as t +from typing import Any, Dict, Mapping, MutableMapping, Optional, Sequence from antares.study.version import StudyVersion from pydantic import field_validator @@ -57,7 +57,7 @@ class Config: populate_by_name = True @staticmethod - def json_schema_extra(schema: t.MutableMapping[str, t.Any]) -> None: + def json_schema_extra(schema: MutableMapping[str, Any]) -> None: schema["example"] = RenewableClusterInput( group="Gas", name="Gas Cluster XY", @@ -75,7 +75,7 @@ class RenewableClusterCreation(RenewableClusterInput): # noinspection Pydantic @field_validator("name", mode="before") - def validate_name(cls, name: t.Optional[str]) -> str: + def validate_name(cls, name: Optional[str]) -> str: """ Validator to check if the name is not empty. """ @@ -97,7 +97,7 @@ class RenewableClusterOutput(RenewableConfig): class Config: @staticmethod - def json_schema_extra(schema: t.MutableMapping[str, t.Any]) -> None: + def json_schema_extra(schema: MutableMapping[str, Any]) -> None: schema["example"] = RenewableClusterOutput( id="Gas cluster YZ", group="Gas", @@ -112,7 +112,7 @@ def json_schema_extra(schema: t.MutableMapping[str, t.Any]) -> None: def create_renewable_output( study_version: str, cluster_id: str, - config: t.Mapping[str, t.Any], + config: Mapping[str, Any], ) -> "RenewableClusterOutput": obj = create_renewable_config(study_version=StudyVersion.parse(study_version), **config, id=cluster_id) kwargs = obj.model_dump(by_alias=False) @@ -136,7 +136,7 @@ def _get_file_study(self, study: Study) -> FileStudy: """ return self.storage_service.get_storage(study).get_raw(study) - def get_clusters(self, study: Study, area_id: str) -> t.Sequence[RenewableClusterOutput]: + def get_clusters(self, study: Study, area_id: str) -> Sequence[RenewableClusterOutput]: """ Fetches all clusters related to a specific area in a study. @@ -159,7 +159,7 @@ def get_clusters(self, study: Study, area_id: str) -> t.Sequence[RenewableCluste def get_all_renewables_props( self, study: Study, - ) -> t.Mapping[str, t.Mapping[str, RenewableClusterOutput]]: + ) -> Mapping[str, Mapping[str, RenewableClusterOutput]]: """ Retrieve all renewable clusters from all areas within a study. @@ -183,7 +183,7 @@ def get_all_renewables_props( except KeyError: raise RenewableClusterConfigNotFound(path) - renewables_by_areas: t.MutableMapping[str, t.MutableMapping[str, RenewableClusterOutput]] + renewables_by_areas: MutableMapping[str, MutableMapping[str, RenewableClusterOutput]] renewables_by_areas = collections.defaultdict(dict) for area_id, cluster_obj in clusters.items(): for cluster_id, cluster in cluster_obj.items(): @@ -292,7 +292,7 @@ def update_cluster( new_data = new_config.model_dump(mode="json", by_alias=True, exclude={"id"}) # create the dict containing the new values using aliases - data: t.Dict[str, t.Any] = {} + data: Dict[str, Any] = {} for field_name, field in new_config.model_fields.items(): if field_name in new_values: name = field.alias if field.alias else field_name @@ -311,7 +311,7 @@ def update_cluster( values = new_config.model_dump(by_alias=False) return RenewableClusterOutput(**values, id=cluster_id) - def delete_clusters(self, study: Study, area_id: str, cluster_ids: t.Sequence[str]) -> None: + def delete_clusters(self, study: Study, area_id: str, cluster_ids: Sequence[str]) -> None: """ Deletes multiple clusters from an area in the study. @@ -391,8 +391,8 @@ def duplicate_cluster( def update_renewables_props( self, study: Study, - update_renewables_by_areas: t.Mapping[str, t.Mapping[str, RenewableClusterInput]], - ) -> t.Mapping[str, t.Mapping[str, RenewableClusterOutput]]: + update_renewables_by_areas: Mapping[str, Mapping[str, RenewableClusterInput]], + ) -> Mapping[str, Mapping[str, RenewableClusterOutput]]: old_renewables_by_areas = self.get_all_renewables_props(study) new_renewables_by_areas = {area_id: dict(clusters) for area_id, clusters in old_renewables_by_areas.items()} @@ -427,4 +427,4 @@ def update_renewables_props( @staticmethod def get_table_schema() -> JSON: - return RenewableClusterOutput.schema() + return RenewableClusterOutput.model_json_schema() diff --git a/antarest/study/business/areas/st_storage_management.py b/antarest/study/business/areas/st_storage_management.py index ef89e345d8..e31d79c1f1 100644 --- a/antarest/study/business/areas/st_storage_management.py +++ b/antarest/study/business/areas/st_storage_management.py @@ -12,7 +12,7 @@ import collections import operator -import typing as t +from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Sequence import numpy as np from antares.study.version import StudyVersion @@ -58,7 +58,7 @@ class STStorageInput(STStorage880Properties): class Config: @staticmethod - def json_schema_extra(schema: t.MutableMapping[str, t.Any]) -> None: + def json_schema_extra(schema: MutableMapping[str, Any]) -> None: schema["example"] = STStorageInput( name="Siemens Battery", group=STStorageGroup.BATTERY, @@ -78,7 +78,7 @@ class STStorageCreation(STStorageInput): # noinspection Pydantic @field_validator("name", mode="before") - def validate_name(cls, name: t.Optional[str]) -> str: + def validate_name(cls, name: Optional[str]) -> str: """ Validator to check if the name is not empty. """ @@ -101,7 +101,7 @@ class STStorageOutput(STStorage880Config): class Config: @staticmethod - def json_schema_extra(schema: t.MutableMapping[str, t.Any]) -> None: + def json_schema_extra(schema: MutableMapping[str, Any]) -> None: schema["example"] = STStorageOutput( id="siemens_battery", name="Siemens Battery", @@ -135,12 +135,12 @@ class STStorageMatrix(AntaresBaseModel): class Config: extra = "forbid" - data: t.List[t.List[float]] - index: t.List[int] - columns: t.List[int] + data: List[List[float]] + index: List[int] + columns: List[int] @field_validator("data") - def validate_time_series(cls, data: t.List[t.List[float]]) -> t.List[t.List[float]]: + def validate_time_series(cls, data: List[List[float]]) -> List[List[float]]: """ Validator to check the integrity of the time series data. @@ -230,7 +230,7 @@ def validate_rule_curve(self) -> "STStorageMatrices": _ALL_STORAGE_PATH = "input/st-storage/clusters" -def _get_values_by_ids(file_study: FileStudy, area_id: str) -> t.Mapping[str, t.Mapping[str, t.Any]]: +def _get_values_by_ids(file_study: FileStudy, area_id: str) -> Mapping[str, Mapping[str, Any]]: path = _STORAGE_LIST_PATH.format(area_id=area_id, storage_id="")[:-1] try: return CaseInsensitiveDict(file_study.tree.get(path.split("/"), depth=3)) @@ -243,7 +243,7 @@ def _get_values_by_ids(file_study: FileStudy, area_id: str) -> t.Mapping[str, t. def create_storage_output( study_version: StudyVersion, cluster_id: str, - config: t.Mapping[str, t.Any], + config: Mapping[str, Any], ) -> "STStorageOutput": obj = create_st_storage_config(study_version=study_version, **config, id=cluster_id) kwargs = obj.model_dump(mode="json", by_alias=False) @@ -315,7 +315,7 @@ def get_storages( self, study: Study, area_id: str, - ) -> t.Sequence[STStorageOutput]: + ) -> Sequence[STStorageOutput]: """ Get the list of short-term storage configurations for the given `study`, and `area_id`. @@ -345,7 +345,7 @@ def get_storages( def get_all_storages_props( self, study: Study, - ) -> t.Mapping[str, t.Mapping[str, STStorageOutput]]: + ) -> Mapping[str, Mapping[str, STStorageOutput]]: """ Retrieve all short-term storages from all areas within a study. @@ -370,7 +370,7 @@ def get_all_storages_props( raise STStorageConfigNotFound(path) from None study_version = StudyVersion.parse(study.version) - storages_by_areas: t.MutableMapping[str, t.MutableMapping[str, STStorageOutput]] + storages_by_areas: MutableMapping[str, MutableMapping[str, STStorageOutput]] storages_by_areas = collections.defaultdict(dict) for area_id, cluster_obj in storages.items(): for cluster_id, cluster in cluster_obj.items(): @@ -381,8 +381,8 @@ def get_all_storages_props( def update_storages_props( self, study: Study, - update_storages_by_areas: t.Mapping[str, t.Mapping[str, STStorageInput]], - ) -> t.Mapping[str, t.Mapping[str, STStorageOutput]]: + update_storages_by_areas: Mapping[str, Mapping[str, STStorageInput]], + ) -> Mapping[str, Mapping[str, STStorageOutput]]: old_storages_by_areas = self.get_all_storages_props(study) new_storages_by_areas = {area_id: dict(clusters) for area_id, clusters in old_storages_by_areas.items()} @@ -482,7 +482,7 @@ def update_storage( new_data = new_config.model_dump(mode="json", by_alias=True, exclude={"id"}) # create the dict containing the new values using aliases - data: t.Dict[str, t.Any] = {} + data: Dict[str, Any] = {} for field_name, field in new_config.model_fields.items(): if field_name in new_values: name = field.alias if field.alias else field_name @@ -506,7 +506,7 @@ def delete_storages( self, study: Study, area_id: str, - storage_ids: t.Sequence[str], + storage_ids: Sequence[str], ) -> None: """ Delete short-term storage configurations form the given study and area_id. @@ -584,7 +584,7 @@ def duplicate_cluster(self, study: Study, area_id: str, source_id: str, new_clus ] # Prepare and execute commands - commands: t.List[t.Union[CreateSTStorage, ReplaceMatrix]] = [create_cluster_cmd] + commands: List[CreateSTStorage | ReplaceMatrix] = [create_cluster_cmd] storage_service = self.storage_service.get_storage(study) command_context = self.storage_service.variant_study_service.command_factory.command_context for source_path, new_path in zip(source_paths, new_paths): @@ -626,7 +626,7 @@ def _get_matrix_obj( area_id: str, storage_id: str, ts_name: STStorageTimeSeries, - ) -> t.MutableMapping[str, t.Any]: + ) -> MutableMapping[str, Any]: file_study = self._get_file_study(study) path = _STORAGE_SERIES_PATH.format(area_id=area_id, storage_id=storage_id, ts_name=ts_name) try: @@ -661,7 +661,7 @@ def _save_matrix_obj( area_id: str, storage_id: str, ts_name: STStorageTimeSeries, - matrix_data: t.List[t.List[float]], + matrix_data: List[List[float]], ) -> None: file_study = self._get_file_study(study) command_context = self.storage_service.variant_study_service.command_factory.command_context @@ -718,4 +718,4 @@ def validate_matrix(matrix_type: STStorageTimeSeries) -> STStorageMatrix: @staticmethod def get_table_schema() -> JSON: - return STStorageOutput.schema() + return STStorageOutput.model_json_schema() diff --git a/antarest/study/business/areas/thermal_management.py b/antarest/study/business/areas/thermal_management.py index 90c2420881..40cc6bf2f6 100644 --- a/antarest/study/business/areas/thermal_management.py +++ b/antarest/study/business/areas/thermal_management.py @@ -11,8 +11,8 @@ # This file is part of the Antares project. import collections -import typing as t from pathlib import Path +from typing import Any, Dict, List, Mapping, MutableMapping, MutableSequence, Optional, Sequence from antares.study.version import StudyVersion from pydantic import field_validator @@ -63,7 +63,7 @@ class ThermalClusterInput(Thermal870Properties): class Config: @staticmethod - def json_schema_extra(schema: t.MutableMapping[str, t.Any]) -> None: + def json_schema_extra(schema: MutableMapping[str, Any]) -> None: schema["example"] = ThermalClusterInput( group="Gas", name="Gas Cluster XY", @@ -83,7 +83,7 @@ class ThermalClusterCreation(ThermalClusterInput): # noinspection Pydantic @field_validator("name", mode="before") - def validate_name(cls, name: t.Optional[str]) -> str: + def validate_name(cls, name: Optional[str]) -> str: """ Validator to check if the name is not empty. """ @@ -105,7 +105,7 @@ class ThermalClusterOutput(Thermal870Config): class Config: @staticmethod - def json_schema_extra(schema: t.MutableMapping[str, t.Any]) -> None: + def json_schema_extra(schema: MutableMapping[str, Any]) -> None: schema["example"] = ThermalClusterOutput( id="Gas cluster YZ", group="Gas", @@ -121,7 +121,7 @@ def json_schema_extra(schema: t.MutableMapping[str, t.Any]) -> None: def create_thermal_output( study_version: StudyVersion, cluster_id: str, - config: t.Mapping[str, t.Any], + config: Mapping[str, Any], ) -> "ThermalClusterOutput": obj = create_thermal_config(study_version=study_version, **config, id=cluster_id) kwargs = obj.model_dump(mode="json", by_alias=False) @@ -180,7 +180,7 @@ def get_clusters( self, study: Study, area_id: str, - ) -> t.Sequence[ThermalClusterOutput]: + ) -> Sequence[ThermalClusterOutput]: """ Retrieve all thermal clusters from a specified area within a study. @@ -207,7 +207,7 @@ def get_clusters( def get_all_thermals_props( self, study: Study, - ) -> t.Mapping[str, t.Mapping[str, ThermalClusterOutput]]: + ) -> Mapping[str, Mapping[str, ThermalClusterOutput]]: """ Retrieve all thermal clusters from all areas within a study. @@ -232,7 +232,7 @@ def get_all_thermals_props( raise ThermalClusterConfigNotFound(path) from None study_version = StudyVersion.parse(study.version) - thermals_by_areas: t.MutableMapping[str, t.MutableMapping[str, ThermalClusterOutput]] + thermals_by_areas: MutableMapping[str, MutableMapping[str, ThermalClusterOutput]] thermals_by_areas = collections.defaultdict(dict) for area_id, cluster_obj in clusters.items(): for cluster_id, cluster in cluster_obj.items(): @@ -243,8 +243,8 @@ def get_all_thermals_props( def update_thermals_props( self, study: Study, - update_thermals_by_areas: t.Mapping[str, t.Mapping[str, ThermalClusterInput]], - ) -> t.Mapping[str, t.Mapping[str, ThermalClusterOutput]]: + update_thermals_by_areas: Mapping[str, Mapping[str, ThermalClusterInput]], + ) -> Mapping[str, Mapping[str, ThermalClusterOutput]]: old_thermals_by_areas = self.get_all_thermals_props(study) new_thermals_by_areas = {area_id: dict(clusters) for area_id, clusters in old_thermals_by_areas.items()} @@ -282,7 +282,7 @@ def update_thermals_props( @staticmethod def get_table_schema() -> JSON: - return ThermalClusterOutput.schema() + return ThermalClusterOutput.model_json_schema() def create_cluster(self, study: Study, area_id: str, cluster_data: ThermalClusterCreation) -> ThermalClusterOutput: """ @@ -365,7 +365,7 @@ def update_cluster( new_data = new_config.model_dump(mode="json", by_alias=True, exclude={"id"}) # create the dict containing the new values using aliases - data: t.Dict[str, t.Any] = {} + data: Dict[str, Any] = {} for field_name, field in new_config.model_fields.items(): if field_name in new_values: name = field.alias if field.alias else field_name @@ -384,7 +384,7 @@ def update_cluster( values = {**new_config.model_dump(mode="json", by_alias=False), "id": cluster_id} return ThermalClusterOutput.model_validate(values) - def delete_clusters(self, study: Study, area_id: str, cluster_ids: t.Sequence[str]) -> None: + def delete_clusters(self, study: Study, area_id: str, cluster_ids: Sequence[str]) -> None: """ Delete the clusters with the given IDs in the given area of the given study. @@ -464,7 +464,7 @@ def duplicate_cluster( new_paths.append(f"input/thermal/series/{area_id}/{lower_new_id}/fuelCost") # Prepare and execute commands - commands: t.List[t.Union[CreateCluster, ReplaceMatrix]] = [create_cluster_cmd] + commands: List[CreateCluster | ReplaceMatrix] = [create_cluster_cmd] storage_service = self.storage_service.get_storage(study) command_context = self.storage_service.variant_study_service.command_factory.command_context for source_path, new_path in zip(source_paths, new_paths): @@ -486,7 +486,7 @@ def validate_series(self, study: Study, area_id: str, cluster_id: str) -> bool: series_path.append(thermal_cluster_path / "CO2Cost") series_path.append(thermal_cluster_path / "fuelCost") - ts_widths: t.MutableMapping[int, t.MutableSequence[str]] = {} + ts_widths: MutableMapping[int, MutableSequence[str]] = {} for ts_path in series_path: matrix = self.storage_service.get_storage(study).get(study, ts_path.as_posix()) matrix_data = matrix["data"] diff --git a/antarest/study/business/binding_constraint_management.py b/antarest/study/business/binding_constraint_management.py index fd3970b9b1..3eaef07b17 100644 --- a/antarest/study/business/binding_constraint_management.py +++ b/antarest/study/business/binding_constraint_management.py @@ -13,7 +13,7 @@ import collections import copy import logging -import typing as t +from typing import Any, Dict, List, Mapping, MutableSequence, Optional, Sequence, Tuple import numpy as np from antares.study.version import StudyVersion @@ -142,13 +142,13 @@ class ConstraintTerm(AntaresBaseModel): data: the constraint term data (link or cluster), if any. """ - id: t.Optional[str] = None - weight: t.Optional[float] = None - offset: t.Optional[int] = None - data: t.Optional[t.Union[LinkTerm, ClusterTerm]] = None + id: Optional[str] = None + weight: Optional[float] = None + offset: Optional[int] = None + data: Optional[LinkTerm | ClusterTerm] = None @field_validator("id") - def id_to_lower(cls, v: t.Optional[str]) -> t.Optional[str]: + def id_to_lower(cls, v: Optional[str]) -> Optional[str]: """Ensure the ID is lower case.""" if v is None: return None @@ -179,11 +179,11 @@ class ConstraintFilters(AntaresBaseModel, frozen=True, extra="forbid"): """ bc_id: str = "" - enabled: t.Optional[bool] = None - operator: t.Optional[BindingConstraintOperator] = None + enabled: Optional[bool] = None + operator: Optional[BindingConstraintOperator] = None comments: str = "" group: str = "" - time_step: t.Optional[BindingConstraintFrequency] = None + time_step: Optional[BindingConstraintFrequency] = None area_name: str = "" cluster_name: str = "" link_id: str = "" @@ -269,7 +269,7 @@ class ConstraintInput870(OptionalProperties): @camel_case_model class ConstraintInput(BindingConstraintMatrices, ConstraintInput870): - terms: t.MutableSequence[ConstraintTerm] = Field( + terms: MutableSequence[ConstraintTerm] = Field( default_factory=lambda: [], ) @@ -279,7 +279,7 @@ class ConstraintCreation(ConstraintInput): name: str @model_validator(mode="before") - def check_matrices_dimensions(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + def check_matrices_dimensions(cls, values: Dict[str, Any]) -> Dict[str, Any]: for _key in ["time_step"] + [m.value for m in TermMatrices]: _camel = to_camel_case(_key) values[_key] = values.pop(_camel, values.get(_key)) @@ -332,9 +332,9 @@ def check_matrices_dimensions(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t. class ConstraintOutputBase(BindingConstraintPropertiesBase): id: str name: str - terms: t.MutableSequence[ConstraintTerm] = Field(default_factory=lambda: []) + terms: MutableSequence[ConstraintTerm] = Field(default_factory=lambda: []) # I have to redefine the time_step attribute to give him another alias. - time_step: t.Optional[BindingConstraintFrequency] = Field(DEFAULT_TIMESTEP, alias="timeStep") # type: ignore + time_step: Optional[BindingConstraintFrequency] = Field(DEFAULT_TIMESTEP, alias="timeStep") # type: ignore class ConstraintOutput830(ConstraintOutputBase): @@ -348,7 +348,7 @@ class ConstraintOutput870(ConstraintOutput830): # WARNING: Do not change the order of the following line, it is used to determine # the type of the output constraint in the FastAPI endpoint. -ConstraintOutput = t.Union[ConstraintOutputBase, ConstraintOutput830, ConstraintOutput870] +ConstraintOutput = ConstraintOutputBase | ConstraintOutput830 | ConstraintOutput870 OPERATOR_MATRIX_FILE_MAP = { BindingConstraintOperator.EQUAL: ["{bc_id}_eq"], @@ -359,8 +359,8 @@ class ConstraintOutput870(ConstraintOutput830): def _get_references_by_widths( - file_study: FileStudy, bcs: t.Sequence[ConstraintOutput] -) -> t.Mapping[int, t.Sequence[t.Tuple[str, str]]]: + file_study: FileStudy, bcs: Sequence[ConstraintOutput] +) -> Mapping[int, Sequence[Tuple[str, str]]]: """ Iterates over each BC and its associated matrices. For each matrix, it checks its width according to the expected matrix shapes. @@ -371,7 +371,7 @@ def _get_references_by_widths( but the width should be consistent within a group of binding constraints. """ - references_by_width: t.Dict[int, t.List[t.Tuple[str, str]]] = {} + references_by_width: Dict[int, List[Tuple[str, str]]] = {} _total = len(bcs) for _index, bc in enumerate(bcs): matrices_name = ( @@ -405,8 +405,8 @@ def _generate_replace_matrix_commands( value: ConstraintInput, operator: BindingConstraintOperator, command_context: CommandContext, -) -> t.List[ICommand]: - commands: t.List[ICommand] = [] +) -> List[ICommand]: + commands: List[ICommand] = [] if study_version < STUDY_VERSION_8_7: matrix = { BindingConstraintFrequency.HOURLY.value: default_bc_hourly_86, @@ -439,7 +439,7 @@ def _generate_replace_matrix_commands( return commands -def _validate_binding_constraints(file_study: FileStudy, bcs: t.Sequence[ConstraintOutput]) -> bool: +def _validate_binding_constraints(file_study: FileStudy, bcs: Sequence[ConstraintOutput]) -> bool: """ Validates the binding constraints within a group. """ @@ -447,7 +447,7 @@ def _validate_binding_constraints(file_study: FileStudy, bcs: t.Sequence[Constra if len(references_by_widths) > 1: most_common = collections.Counter(references_by_widths.keys()).most_common() - invalid_constraints: t.Dict[str, str] = {} + invalid_constraints: Dict[str, str] = {} for width, _ in most_common[1:]: references = references_by_widths[width] @@ -479,7 +479,7 @@ def __init__( self.storage_service = storage_service @staticmethod - def parse_and_add_terms(key: str, value: t.Any, adapted_constraint: ConstraintOutput) -> None: + def parse_and_add_terms(key: str, value: Any, adapted_constraint: ConstraintOutput) -> None: """Parse a single term from the constraint dictionary and add it to the adapted_constraint model.""" if "%" in key or "." in key: separator = "%" if "%" in key else "." @@ -518,7 +518,7 @@ def parse_and_add_terms(key: str, value: t.Any, adapted_constraint: ConstraintOu ) @staticmethod - def constraint_model_adapter(constraint: t.Mapping[str, t.Any], study_version: StudyVersion) -> ConstraintOutput: + def constraint_model_adapter(constraint: Mapping[str, Any], study_version: StudyVersion) -> ConstraintOutput: """ Adapts a binding constraint configuration to the appropriate model version. @@ -576,7 +576,7 @@ def constraint_model_adapter(constraint: t.Mapping[str, t.Any], study_version: S return adapted_constraint @staticmethod - def terms_to_coeffs(terms: t.Sequence[ConstraintTerm]) -> t.Dict[str, t.List[float]]: + def terms_to_coeffs(terms: Sequence[ConstraintTerm]) -> Dict[str, List[float]]: """ Converts a sequence of terms into a dictionary mapping each term's ID to its coefficients, including the weight and, optionally, the offset. @@ -592,7 +592,7 @@ def terms_to_coeffs(terms: t.Sequence[ConstraintTerm]) -> t.Dict[str, t.List[flo coeffs[term.id].append(term.offset) return coeffs - def check_binding_constraints_exists(self, study: Study, bc_ids: t.List[str]) -> None: + def check_binding_constraints_exists(self, study: Study, bc_ids: List[str]) -> None: storage_service = self.storage_service.get_storage(study) file_study = storage_service.get_raw(study) existing_constraints = file_study.tree.get(["input", "bindingconstraints", "bindingconstraints"]) @@ -622,7 +622,7 @@ def get_binding_constraint(self, study: Study, bc_id: str) -> ConstraintOutput: file_study = storage_service.get_raw(study) config = file_study.tree.get(["input", "bindingconstraints", "bindingconstraints"]) - constraints_by_id: t.Dict[str, ConstraintOutput] = CaseInsensitiveDict() # type: ignore + constraints_by_id: Dict[str, ConstraintOutput] = CaseInsensitiveDict() # type: ignore for constraint in config.values(): constraint_config = self.constraint_model_adapter(constraint, StudyVersion.parse(study.version)) @@ -635,7 +635,7 @@ def get_binding_constraint(self, study: Study, bc_id: str) -> ConstraintOutput: def get_binding_constraints( self, study: Study, filters: ConstraintFilters = ConstraintFilters() - ) -> t.Sequence[ConstraintOutput]: + ) -> Sequence[ConstraintOutput]: """ Retrieves all binding constraints within a given study, optionally filtered by specific criteria. @@ -653,7 +653,7 @@ def get_binding_constraints( filtered_constraints = list(filter(lambda c: filters.match_filters(c), outputs)) return filtered_constraints - def get_grouped_constraints(self, study: Study) -> t.Mapping[str, t.Sequence[ConstraintOutput]]: + def get_grouped_constraints(self, study: Study) -> Mapping[str, Sequence[ConstraintOutput]]: """ Retrieves and groups all binding constraints by their group names within a given study. @@ -682,7 +682,7 @@ def get_grouped_constraints(self, study: Study) -> t.Mapping[str, t.Sequence[Con return grouped_constraints - def get_constraints_by_group(self, study: Study, group_name: str) -> t.Sequence[ConstraintOutput]: + def get_constraints_by_group(self, study: Study, group_name: str) -> Sequence[ConstraintOutput]: """ Retrieve all binding constraints belonging to a specified group within a study. @@ -884,7 +884,7 @@ def update_binding_constraint( study: Study, binding_constraint_id: str, data: ConstraintInput, - existing_constraint: t.Optional[ConstraintOutput] = None, + existing_constraint: Optional[ConstraintOutput] = None, ) -> ConstraintOutput: file_study = self.storage_service.get_storage(study).get_raw(study) existing_constraint = existing_constraint or self.get_binding_constraint(study, binding_constraint_id) @@ -938,8 +938,8 @@ def update_binding_constraint( def update_binding_constraints( self, study: Study, - bcs_by_ids: t.Mapping[str, ConstraintInput], - ) -> t.Mapping[str, ConstraintOutput]: + bcs_by_ids: Mapping[str, ConstraintInput], + ) -> Mapping[str, ConstraintOutput]: """ Updates multiple binding constraints within a study. @@ -1033,7 +1033,7 @@ def remove_binding_constraint(self, study: Study, binding_constraint_id: str) -> ) execute_or_add_commands(study, file_study, [command], self.storage_service) - def remove_multiple_binding_constraints(self, study: Study, binding_constraints_ids: t.List[str]) -> None: + def remove_multiple_binding_constraints(self, study: Study, binding_constraints_ids: List[str]) -> None: """ Removes multiple binding constraints from a study. @@ -1059,7 +1059,7 @@ def remove_multiple_binding_constraints(self, study: Study, binding_constraints_ execute_or_add_commands(study, file_study, [command], self.storage_service) def _update_constraint_with_terms( - self, study: Study, bc: ConstraintOutput, terms: t.Mapping[str, ConstraintTerm] + self, study: Study, bc: ConstraintOutput, terms: Mapping[str, ConstraintTerm] ) -> None: coeffs = { term_id: [term.weight, term.offset] if term.offset else [term.weight] for term_id, term in terms.items() @@ -1079,7 +1079,7 @@ def update_constraint_terms( self, study: Study, binding_constraint_id: str, - constraint_terms: t.Sequence[ConstraintTerm], + constraint_terms: Sequence[ConstraintTerm], update_mode: str = "replace", ) -> None: """ @@ -1115,7 +1115,7 @@ def update_constraint_terms( self._update_constraint_with_terms(study, constraint, existing_terms) def create_constraint_terms( - self, study: Study, binding_constraint_id: str, constraint_terms: t.Sequence[ConstraintTerm] + self, study: Study, binding_constraint_id: str, constraint_terms: Sequence[ConstraintTerm] ) -> None: """ Adds new constraint terms to an existing binding constraint. @@ -1150,12 +1150,12 @@ def remove_constraint_term( @staticmethod def get_table_schema() -> JSON: - return ConstraintOutput870.schema() + return ConstraintOutput870.model_json_schema() def _replace_matrices_according_to_frequency_and_version( - data: ConstraintInput, version: StudyVersion, args: t.Dict[str, t.Any] -) -> t.Dict[str, t.Any]: + data: ConstraintInput, version: StudyVersion, args: Dict[str, Any] +) -> Dict[str, Any]: if version < STUDY_VERSION_8_7: if "values" not in args: matrix = { @@ -1177,7 +1177,7 @@ def _replace_matrices_according_to_frequency_and_version( def check_attributes_coherence( - data: t.Union[ConstraintCreation, ConstraintInput], + data: ConstraintCreation | ConstraintInput, study_version: StudyVersion, operator: BindingConstraintOperator, ) -> None: diff --git a/antarest/study/business/link_management.py b/antarest/study/business/link_management.py index a1a9cb3916..36ad03a795 100644 --- a/antarest/study/business/link_management.py +++ b/antarest/study/business/link_management.py @@ -10,8 +10,7 @@ # # This file is part of the Antares project. -import typing as t -from typing import Any +from typing import Any, Dict, List, Mapping, Tuple from antares.study.version import StudyVersion @@ -31,15 +30,15 @@ class LinkManager: def __init__(self, storage_service: StudyStorageService) -> None: self.storage_service = storage_service - def get_all_links(self, study: Study) -> t.List[LinkDTO]: + def get_all_links(self, study: Study) -> List[LinkDTO]: file_study = self.storage_service.get_storage(study).get_raw(study) - result: t.List[LinkDTO] = [] + result: List[LinkDTO] = [] for area_id, area in file_study.config.areas.items(): links_config = file_study.tree.get(["input", "links", area_id, "properties"]) for link in area.links: - link_tree_config: t.Dict[str, t.Any] = links_config[link] + link_tree_config: Dict[str, Any] = links_config[link] link_tree_config.update({"area1": area_id, "area2": link}) link_internal = LinkInternal.model_validate(link_tree_config) @@ -104,8 +103,8 @@ def update_link(self, study: RawStudy, area_from: str, area_to: str, link_update def update_links( self, study: RawStudy, - update_links_by_ids: t.Mapping[t.Tuple[str, str], LinkBaseDTO], - ) -> t.Mapping[t.Tuple[str, str], LinkBaseDTO]: + update_links_by_ids: Mapping[Tuple[str, str], LinkBaseDTO], + ) -> Mapping[Tuple[str, str], LinkBaseDTO]: new_links_by_ids = {} for (area1, area2), update_link_dto in update_links_by_ids.items(): updated_link = self.update_link(study, area1, area2, update_link_dto) diff --git a/antarest/study/business/model/link_model.py b/antarest/study/business/model/link_model.py index 9d6f50e1f5..c5675753ad 100644 --- a/antarest/study/business/model/link_model.py +++ b/antarest/study/business/model/link_model.py @@ -9,7 +9,8 @@ # SPDX-License-Identifier: MPL-2.0 # # This file is part of the Antares project. -import typing as t +from dataclasses import field +from typing import Annotated, List, Optional, Self, Type from antares.study.version import StudyVersion from pydantic import BeforeValidator, ConfigDict, Field, PlainSerializer, model_validator @@ -96,41 +97,40 @@ class FilterOption(EnumIgnoreCase): ANNUAL = "annual" -def validate_filters( - filter_value: t.Union[t.List[FilterOption], str], enum_cls: t.Type[FilterOption] -) -> t.List[FilterOption]: +def validate_filters(filter_value: List[FilterOption] | str, enum_cls: Type[FilterOption]) -> List[FilterOption]: if isinstance(filter_value, str): - if not filter_value.strip(): + filter_value = filter_value.strip() + if not filter_value: return [] - filter_accepted_values = [e for e in enum_cls] + valid_values = {str(e.value) for e in enum_cls} options = filter_value.replace(" ", "").split(",") - invalid_options = [opt for opt in options if opt not in filter_accepted_values] + invalid_options = [opt for opt in options if opt not in valid_values] if invalid_options: raise LinkValidationError( f"Invalid value(s) in filters: {', '.join(invalid_options)}. " - f"Allowed values are: {', '.join(filter_accepted_values)}." + f"Allowed values are: {', '.join(valid_values)}." ) - options_enum: t.List[FilterOption] = list(dict.fromkeys(enum_cls(opt) for opt in options)) + options_enum: List[FilterOption] = list(dict.fromkeys(enum_cls(opt) for opt in options)) return options_enum return filter_value -def join_with_comma(values: t.List[FilterOption]) -> str: +def join_with_comma(values: List[FilterOption]) -> str: return ", ".join(value.name.lower() for value in values) -comma_separated_enum_list = t.Annotated[ - t.List[FilterOption], +comma_separated_enum_list = Annotated[ + List[FilterOption], BeforeValidator(lambda x: validate_filters(x, FilterOption)), PlainSerializer(lambda x: join_with_comma(x)), ] DEFAULT_COLOR = 112 -FILTER_VALUES: t.List[FilterOption] = [ +FILTER_VALUES: List[FilterOption] = [ FilterOption.HOURLY, FilterOption.DAILY, FilterOption.WEEKLY, @@ -154,8 +154,8 @@ class LinkBaseDTO(AntaresBaseModel): colorg: int = Field(default=DEFAULT_COLOR, ge=0, le=255) link_width: float = 1 link_style: LinkStyle = LinkStyle.PLAIN - filter_synthesis: t.Optional[comma_separated_enum_list] = FILTER_VALUES - filter_year_by_year: t.Optional[comma_separated_enum_list] = FILTER_VALUES + filter_synthesis: Optional[comma_separated_enum_list] = field(default_factory=lambda: FILTER_VALUES) + filter_year_by_year: Optional[comma_separated_enum_list] = field(default_factory=lambda: FILTER_VALUES) class Area(AntaresBaseModel): @@ -163,7 +163,7 @@ class Area(AntaresBaseModel): area2: str @model_validator(mode="after") - def validate_areas(self) -> t.Self: + def validate_areas(self) -> Self: if self.area1 == self.area2: raise LinkValidationError(f"Cannot create a link that goes from and to the same single area: {self.area1}") area_from, area_to = sorted([self.area1, self.area2]) @@ -203,8 +203,8 @@ class LinkInternal(AntaresBaseModel): colorg: int = Field(default=DEFAULT_COLOR, ge=0, le=255) link_width: float = 1 link_style: LinkStyle = LinkStyle.PLAIN - filter_synthesis: t.Optional[comma_separated_enum_list] = FILTER_VALUES - filter_year_by_year: t.Optional[comma_separated_enum_list] = FILTER_VALUES + filter_synthesis: Optional[comma_separated_enum_list] = field(default_factory=lambda: FILTER_VALUES) + filter_year_by_year: Optional[comma_separated_enum_list] = field(default_factory=lambda: FILTER_VALUES) def to_dto(self) -> LinkDTO: data = self.model_dump() diff --git a/antarest/study/business/optimization_management.py b/antarest/study/business/optimization_management.py index bbfa50c767..e6cba79056 100644 --- a/antarest/study/business/optimization_management.py +++ b/antarest/study/business/optimization_management.py @@ -12,7 +12,6 @@ from typing import Any, Dict, List, Union, cast -from antares.study.version import StudyVersion from pydantic.types import StrictBool from antarest.study.business.all_optional_meta import all_optional_model diff --git a/antarest/study/business/scenario_builder_management.py b/antarest/study/business/scenario_builder_management.py index 0ea15dd6d8..513797af06 100644 --- a/antarest/study/business/scenario_builder_management.py +++ b/antarest/study/business/scenario_builder_management.py @@ -11,7 +11,7 @@ # This file is part of the Antares project. import enum -import typing as t +from typing import Any, Dict, Mapping, MutableMapping, cast import typing_extensions as te from typing_extensions import override @@ -31,11 +31,11 @@ _HYDRO_LEVEL_PERCENT = 100 -_Section: te.TypeAlias = t.MutableMapping[str, t.Union[int, float]] -_Sections: te.TypeAlias = t.MutableMapping[str, _Section] +_Section: te.TypeAlias = MutableMapping[str, int | float] +_Sections: te.TypeAlias = MutableMapping[str, _Section] -Ruleset: te.TypeAlias = t.MutableMapping[str, t.Any] -Rulesets: te.TypeAlias = t.MutableMapping[str, Ruleset] +Ruleset: te.TypeAlias = MutableMapping[str, Any] +Rulesets: te.TypeAlias = MutableMapping[str, Ruleset] class ScenarioType(enum.StrEnum): @@ -92,11 +92,11 @@ def _get_ruleset_config( file_study: FileStudy, ruleset_name: str, symbol: str = "", -) -> t.Dict[str, t.Union[int, float]]: +) -> Dict[str, int | float]: try: suffix = f"/{symbol}" if symbol else "" url = f"settings/scenariobuilder/{ruleset_name}{suffix}".split("/") - ruleset_cfg = t.cast(t.Dict[str, t.Union[int, float]], file_study.tree.get(url)) + ruleset_cfg = cast(Dict[str, int | float], file_study.tree.get(url)) except KeyError: ruleset_cfg = {} return ruleset_cfg @@ -106,7 +106,7 @@ def _get_nb_years(file_study: FileStudy) -> int: try: # noinspection SpellCheckingInspection url = "settings/generaldata/general/nbyears".split("/") - nb_years = t.cast(int, file_study.tree.get(url)) + nb_years = cast(int, file_study.tree.get(url)) except KeyError: nb_years = 1 return nb_years @@ -129,7 +129,7 @@ def _get_active_ruleset_name(file_study: FileStudy, default_ruleset: str = "Defa """ try: url = "settings/generaldata/general/active-rules-scenario".split("/") - active_ruleset = t.cast(str, file_study.tree.get(url)) + active_ruleset = cast(str, file_study.tree.get(url)) except KeyError: active_ruleset = default_ruleset else: @@ -166,7 +166,7 @@ def __init__(self, storage_service: StudyStorageService) -> None: self.storage_service = storage_service def get_config(self, study: Study) -> Rulesets: - sections = t.cast(_Sections, self.storage_service.get_storage(study).get(study, "/settings/scenariobuilder")) + sections = cast(_Sections, self.storage_service.get_storage(study).get(study, "/settings/scenariobuilder")) rulesets: Rulesets = {} for ruleset_name, data in sections.items(): @@ -248,13 +248,13 @@ def update_scenario_by_type(self, study: Study, table_form: TableForm, scenario_ return table_form -def _populate_common(section: _Section, symbol: str, data: t.Mapping[str, t.Mapping[str, t.Any]]) -> None: +def _populate_common(section: _Section, symbol: str, data: Mapping[str, Mapping[str, Any]]) -> None: for area, scenario_area in data.items(): for year, value in scenario_area.items(): section[f"{symbol},{area},{year}"] = value -def _populate_hydro_levels(section: _Section, symbol: str, data: t.Mapping[str, t.Mapping[str, t.Any]]) -> None: +def _populate_hydro_levels(section: _Section, symbol: str, data: Mapping[str, Mapping[str, Any]]) -> None: for area, scenario_area in data.items(): for year, value in scenario_area.items(): if isinstance(value, (int, float)) and value != float("nan"): @@ -262,14 +262,14 @@ def _populate_hydro_levels(section: _Section, symbol: str, data: t.Mapping[str, section[f"{symbol},{area},{year}"] = value -def _populate_links(section: _Section, symbol: str, data: t.Mapping[str, t.Mapping[str, t.Any]]) -> None: +def _populate_links(section: _Section, symbol: str, data: Mapping[str, Mapping[str, Any]]) -> None: for link, scenario_link in data.items(): for year, value in scenario_link.items(): area1, area2 = link.split(" / ") section[f"{symbol},{area1},{area2},{year}"] = value -def _populate_clusters(section: _Section, symbol: str, data: t.Mapping[str, t.Mapping[str, t.Any]]) -> None: +def _populate_clusters(section: _Section, symbol: str, data: Mapping[str, Mapping[str, Any]]) -> None: for area, scenario_area in data.items(): for cluster, scenario_area_cluster in scenario_area.items(): for year, value in scenario_area_cluster.items(): diff --git a/antarest/study/business/table_mode_management.py b/antarest/study/business/table_mode_management.py index 2ede027bc3..8faa5eebcc 100644 --- a/antarest/study/business/table_mode_management.py +++ b/antarest/study/business/table_mode_management.py @@ -11,7 +11,7 @@ # This file is part of the Antares project. import collections -import typing as t +from typing import Any, Mapping, MutableMapping, Optional, Sequence, cast import numpy as np import pandas as pd @@ -33,8 +33,8 @@ _TableIndex = str # row name _TableColumn = str # column name -_CellValue = t.Any # cell value (str, int, float, bool, enum, etc.) -TableDataDTO = t.Mapping[_TableIndex, t.Mapping[_TableColumn, _CellValue]] +_CellValue = Any # cell value (str, int, float, bool, enum, etc.) +TableDataDTO = Mapping[_TableIndex, Mapping[_TableColumn, _CellValue]] class TableModeType(EnumIgnoreCase): @@ -64,7 +64,7 @@ class TableModeType(EnumIgnoreCase): @classmethod @override - def _missing_(cls, value: object) -> t.Optional["EnumIgnoreCase"]: + def _missing_(cls, value: object) -> Optional["EnumIgnoreCase"]: if isinstance(value, str): # handle aliases of old table types value = value.upper() @@ -144,7 +144,7 @@ def get_table_data( self, study: RawStudy, table_type: TableModeType, - columns: t.Sequence[_TableColumn], + columns: Sequence[_TableColumn], ) -> TableDataDTO: """ Get the table data of the specified type for the given study. @@ -176,7 +176,7 @@ def get_table_data( # Convert NaN to `None` because it is not JSON-serializable df.replace(np.nan, None, inplace=True) - return t.cast(TableDataDTO, df.to_dict(orient="index")) + return cast(TableDataDTO, df.to_dict(orient="index")) def update_table_data( self, @@ -216,7 +216,7 @@ def update_table_data( } return data elif table_type == TableModeType.THERMAL: - thermals_by_areas: t.MutableMapping[str, t.MutableMapping[str, ThermalClusterInput]] + thermals_by_areas: MutableMapping[str, MutableMapping[str, ThermalClusterInput]] thermals_by_areas = collections.defaultdict(dict) for key, values in data.items(): area_id, cluster_id = key.split(" / ") @@ -229,7 +229,7 @@ def update_table_data( } return data elif table_type == TableModeType.RENEWABLE: - renewables_by_areas: t.MutableMapping[str, t.MutableMapping[str, RenewableClusterInput]] + renewables_by_areas: MutableMapping[str, MutableMapping[str, RenewableClusterInput]] renewables_by_areas = collections.defaultdict(dict) for key, values in data.items(): area_id, cluster_id = key.split(" / ") @@ -242,7 +242,7 @@ def update_table_data( } return data elif table_type == TableModeType.ST_STORAGE: - storages_by_areas: t.MutableMapping[str, t.MutableMapping[str, STStorageInput]] + storages_by_areas: MutableMapping[str, MutableMapping[str, STStorageInput]] storages_by_areas = collections.defaultdict(dict) for key, values in data.items(): area_id, cluster_id = key.split(" / ") diff --git a/antarest/study/business/thematic_trimming_field_infos.py b/antarest/study/business/thematic_trimming_field_infos.py index 2f5c902244..8c8ac6d081 100644 --- a/antarest/study/business/thematic_trimming_field_infos.py +++ b/antarest/study/business/thematic_trimming_field_infos.py @@ -14,7 +14,7 @@ List of fields of the Thematic Trimming panel """ -import typing as t +from typing import Any, Mapping from antares.study.version import StudyVersion @@ -136,7 +136,7 @@ class ThematicTrimmingFormFields(FormFieldsBaseModel): _SHORT_TERM_STORAGES = "Short-Term Storages" _SHORT_TERM_STORAGES_GROUP = "Short-Term Storages - Group" -FIELDS_INFO: t.Mapping[str, t.Mapping[str, t.Any]] = { +FIELDS_INFO: Mapping[str, Mapping[str, Any]] = { # fmt: off "ov_cost": {"topic": _GENERAL, "path": "OV. COST", "default_value": True}, "op_cost": {"topic": _GENERAL, "path": "OP. COST", "default_value": True}, @@ -241,7 +241,7 @@ class ThematicTrimmingFormFields(FormFieldsBaseModel): } -def get_fields_info(study_version: StudyVersion) -> t.Mapping[str, t.Mapping[str, t.Any]]: +def get_fields_info(study_version: StudyVersion) -> Mapping[str, Mapping[str, Any]]: return { key: info for key, info in FIELDS_INFO.items() diff --git a/antarest/study/business/thematic_trimming_management.py b/antarest/study/business/thematic_trimming_management.py index 7aef93aedf..83d4afc674 100644 --- a/antarest/study/business/thematic_trimming_management.py +++ b/antarest/study/business/thematic_trimming_management.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, List, Mapping, cast from antares.study.version import StudyVersion @@ -36,9 +36,9 @@ def get_field_values(self, study: Study) -> ThematicTrimmingFormFields: include_vars = trimming_config.get("select_var +") or [] selected_vars_reset = trimming_config.get("selected_vars_reset", True) - def get_value(field_info: t.Mapping[str, t.Any]) -> bool: + def get_value(field_info: Mapping[str, Any]) -> bool: if selected_vars_reset is None: - return t.cast(bool, field_info["default_value"]) + return cast(bool, field_info["default_value"]) var_name = field_info["path"] return var_name not in exclude_vars if selected_vars_reset else var_name in include_vars @@ -53,12 +53,12 @@ def set_field_values(self, study: Study, field_values: ThematicTrimmingFormField file_study = self.storage_service.get_storage(study).get_raw(study) field_values_dict = field_values.model_dump(mode="json") - keys_by_bool: t.Dict[bool, t.List[t.Any]] = {True: [], False: []} + keys_by_bool: Dict[bool, List[Any]] = {True: [], False: []} fields_info = get_fields_info(StudyVersion.parse(study.version)) for name, info in fields_info.items(): keys_by_bool[field_values_dict[name]].append(info["path"]) - config_data: t.Dict[str, t.Any] + config_data: Dict[str, Any] if len(keys_by_bool[True]) > len(keys_by_bool[False]): config_data = { "selected_vars_reset": True, diff --git a/antarest/study/business/utils.py b/antarest/study/business/utils.py index 628dd4a0c4..ef8d70e93e 100644 --- a/antarest/study/business/utils.py +++ b/antarest/study/business/utils.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Callable, MutableSequence, Optional, Sequence, TypedDict from antares.study.version import StudyVersion @@ -34,15 +34,15 @@ def execute_or_add_commands( study: Study, file_study: FileStudy, - commands: t.Sequence[ICommand], + commands: Sequence[ICommand], storage_service: StudyStorageService, - listener: t.Optional[ICommandListener] = None, + listener: Optional[ICommandListener] = None, ) -> None: # get current user if not in session, otherwise get session user current_user = get_current_user() if isinstance(study, RawStudy): - executed_commands: t.MutableSequence[ICommand] = [] + executed_commands: MutableSequence[ICommand] = [] for command in commands: result = command.apply(file_study, listener) if not result.status: @@ -87,14 +87,14 @@ class FormFieldsBaseModel( """ -class FieldInfo(t.TypedDict, total=False): +class FieldInfo(TypedDict, total=False): path: str - default_value: t.Any - start_version: t.Optional[StudyVersion] - end_version: t.Optional[StudyVersion] + default_value: Any + start_version: Optional[StudyVersion] + end_version: Optional[StudyVersion] # Workaround to replace Pydantic computed values which are ignored by FastAPI. # TODO: check @computed_field available in Pydantic v2 to remove it # (value) -> encoded_value - encode: t.Optional[t.Callable[[t.Any], t.Any]] + encode: Optional[Callable[[Any], Any]] # (encoded_value, current_value) -> decoded_value - decode: t.Optional[t.Callable[[t.Any, t.Optional[t.Any]], t.Any]] + decode: Optional[Callable[[Any, Optional[Any]], Any]] diff --git a/antarest/study/business/xpansion_management.py b/antarest/study/business/xpansion_management.py index 7464de5ec5..1c04d5b865 100644 --- a/antarest/study/business/xpansion_management.py +++ b/antarest/study/business/xpansion_management.py @@ -15,8 +15,8 @@ import io import logging import shutil -import typing as t import zipfile +from typing import Any, List, MutableMapping, Optional, Sequence from fastapi import HTTPException, UploadFile from pydantic import Field, ValidationError, field_validator, model_validator @@ -69,11 +69,11 @@ class XpansionSensitivitySettings(AntaresBaseModel): """ epsilon: float = Field(default=0, ge=0, description="Max deviation from optimum (€)") - projection: t.List[str] = Field(default_factory=list, description="List of candidate names to project") + projection: List[str] = Field(default_factory=list, description="List of candidate names to project") capex: bool = Field(default=False, description="Whether to include capex in the sensitivity analysis") @field_validator("projection", mode="before") - def projection_validation(cls, v: t.Optional[t.Sequence[str]]) -> t.Sequence[str]: + def projection_validation(cls, v: Optional[Sequence[str]]) -> Sequence[str]: return [] if v is None else v @@ -148,10 +148,10 @@ class XpansionSettings(AntaresBaseModel, extra="ignore", validate_assignment=Tru timelimit: int = int(1e12) # The sensitivity analysis is optional - sensitivity_config: t.Optional[XpansionSensitivitySettings] = None + sensitivity_config: Optional[XpansionSensitivitySettings] = None @model_validator(mode="before") - def validate_float_values(cls, values: t.MutableMapping[str, t.Any]) -> t.MutableMapping[str, t.Any]: + def validate_float_values(cls, values: MutableMapping[str, Any]) -> MutableMapping[str, Any]: if "relaxed-optimality-gap" in values: values["relaxed_optimality_gap"] = values.pop("relaxed-optimality-gap") @@ -237,20 +237,20 @@ class XpansionCandidateDTO(AntaresBaseModel): name: str link: str annual_cost_per_mw: float = Field(alias="annual-cost-per-mw", ge=0) - unit_size: t.Optional[float] = Field(default=None, alias="unit-size", ge=0) - max_units: t.Optional[int] = Field(default=None, alias="max-units", ge=0) - max_investment: t.Optional[float] = Field(default=None, alias="max-investment", ge=0) - already_installed_capacity: t.Optional[int] = Field(default=None, alias="already-installed-capacity", ge=0) + unit_size: Optional[float] = Field(default=None, alias="unit-size", ge=0) + max_units: Optional[int] = Field(default=None, alias="max-units", ge=0) + max_investment: Optional[float] = Field(default=None, alias="max-investment", ge=0) + already_installed_capacity: Optional[int] = Field(default=None, alias="already-installed-capacity", ge=0) # this is obsolete (replaced by direct/indirect) - link_profile: t.Optional[str] = Field(default=None, alias="link-profile") + link_profile: Optional[str] = Field(default=None, alias="link-profile") # this is obsolete (replaced by direct/indirect) - already_installed_link_profile: t.Optional[str] = Field(default=None, alias="already-installed-link-profile") - direct_link_profile: t.Optional[str] = Field(default=None, alias="direct-link-profile") - indirect_link_profile: t.Optional[str] = Field(default=None, alias="indirect-link-profile") - already_installed_direct_link_profile: t.Optional[str] = Field( + already_installed_link_profile: Optional[str] = Field(default=None, alias="already-installed-link-profile") + direct_link_profile: Optional[str] = Field(default=None, alias="direct-link-profile") + indirect_link_profile: Optional[str] = Field(default=None, alias="indirect-link-profile") + already_installed_direct_link_profile: Optional[str] = Field( default=None, alias="already-installed-direct-link-profile" ) - already_installed_indirect_link_profile: t.Optional[str] = Field( + already_installed_indirect_link_profile: Optional[str] = Field( default=None, alias="already-installed-indirect-link-profile" ) @@ -304,7 +304,7 @@ class XpansionManager: def __init__(self, study_storage_service: StudyStorageService): self.study_storage_service = study_storage_service - def create_xpansion_configuration(self, study: Study, zipped_config: t.Optional[UploadFile] = None) -> None: + def create_xpansion_configuration(self, study: Study, zipped_config: Optional[UploadFile] = None) -> None: logger.info(f"Initiating xpansion configuration for study '{study.id}'") file_study = self.study_storage_service.get_storage(study).get_raw(study) try: @@ -501,9 +501,9 @@ def _assert_candidate_name_is_not_already_taken(candidates: JSON, xpansion_candi @staticmethod def _assert_investment_candidate_is_valid( - max_investment: t.Optional[float], - max_units: t.Optional[int], - unit_size: t.Optional[float], + max_investment: Optional[float], + max_units: Optional[int], + unit_size: Optional[float], ) -> None: bool_max_investment = max_investment is None bool_max_units = max_units is None @@ -570,7 +570,7 @@ def get_candidate(self, study: Study, candidate_name: str) -> XpansionCandidateD except StopIteration: raise CandidateNotFoundError(f"The candidate '{candidate_name}' does not exist") - def get_candidates(self, study: Study) -> t.List[XpansionCandidateDTO]: + def get_candidates(self, study: Study) -> List[XpansionCandidateDTO]: logger.info(f"Getting all candidates of study {study.id}") file_study = self.study_storage_service.get_storage(study).get_raw(study) candidates = file_study.tree.get(["user", "expansion", "candidates"]) @@ -619,7 +619,7 @@ def update_xpansion_constraints_settings(self, study: Study, constraints_file_na xpansion_settings = UpdateXpansionSettings.model_validate(args) return self.update_xpansion_settings(study, xpansion_settings) - def _raw_file_dir(self, raw_file_type: XpansionResourceFileType) -> t.List[str]: + def _raw_file_dir(self, raw_file_type: XpansionResourceFileType) -> List[str]: if raw_file_type == XpansionResourceFileType.CONSTRAINTS: return ["user", "expansion", "constraints"] elif raw_file_type == XpansionResourceFileType.CAPACITIES: @@ -631,7 +631,7 @@ def _raw_file_dir(self, raw_file_type: XpansionResourceFileType) -> t.List[str]: def _add_raw_files( self, file_study: FileStudy, - files: t.List[UploadFile], + files: List[UploadFile], raw_file_type: XpansionResourceFileType, ) -> None: keys = self._raw_file_dir(raw_file_type) @@ -666,7 +666,7 @@ def add_resource( self, study: Study, resource_type: XpansionResourceFileType, - files: t.List[UploadFile], + files: List[UploadFile], ) -> None: logger.info(f"Adding xpansion {resource_type} resource file list to study '{study.id}'") file_study = self.study_storage_service.get_storage(study).get_raw(study) @@ -703,12 +703,12 @@ def get_resource_content( study: Study, resource_type: XpansionResourceFileType, filename: str, - ) -> t.Union[JSON, bytes]: + ) -> JSON | bytes: logger.info(f"Getting xpansion {resource_type} resource file '{filename}' from study '{study.id}'") file_study = self.study_storage_service.get_storage(study).get_raw(study) return file_study.tree.get(self._raw_file_dir(resource_type) + [filename]) - def list_resources(self, study: Study, resource_type: XpansionResourceFileType) -> t.List[str]: + def list_resources(self, study: Study, resource_type: XpansionResourceFileType) -> List[str]: logger.info(f"Getting all xpansion {resource_type} files from study '{study.id}'") file_study = self.study_storage_service.get_storage(study).get_raw(study) try: diff --git a/antarest/study/common/studystorage.py b/antarest/study/common/studystorage.py index 221857c339..8ec19c3d2c 100644 --- a/antarest/study/common/studystorage.py +++ b/antarest/study/common/studystorage.py @@ -10,9 +10,9 @@ # # This file is part of the Antares project. -import typing as t from abc import ABC, abstractmethod from pathlib import Path +from typing import BinaryIO, Generic, List, Optional, Sequence, TypeVar from antarest.core.exceptions import StudyNotFoundError from antarest.core.model import JSON @@ -22,10 +22,10 @@ from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.rawstudy.model.filesystem.inode import OriginalFile -T = t.TypeVar("T", bound=Study) +T = TypeVar("T", bound=Study) -class IStudyStorageService(ABC, t.Generic[T]): +class IStudyStorageService(ABC, Generic[T]): @abstractmethod def create(self, metadata: T) -> T: """ @@ -86,7 +86,7 @@ def exists(self, metadata: T) -> bool: """ @abstractmethod - def copy(self, src_meta: T, dest_name: str, groups: t.Sequence[str], with_outputs: bool = False) -> T: + def copy(self, src_meta: T, dest_name: str, groups: Sequence[str], with_outputs: bool = False) -> T: """ Create a new study by copying a reference study. @@ -116,9 +116,9 @@ def patch_update_study_metadata(self, study: T, metadata: StudyMetadataPatchDTO) def import_output( self, study: T, - output: t.Union[t.BinaryIO, Path], - output_name: t.Optional[str] = None, - ) -> t.Optional[str]: + output: BinaryIO | Path, + output_name: Optional[str] = None, + ) -> Optional[str]: """ Import an output Args: @@ -137,7 +137,7 @@ def get_raw( self, metadata: T, use_cache: bool = True, - output_dir: t.Optional[Path] = None, + output_dir: Optional[Path] = None, ) -> FileStudy: """ Fetch a study raw tree object and its config @@ -150,7 +150,7 @@ def get_raw( """ @abstractmethod - def get_study_sim_result(self, metadata: T) -> t.List[StudySimResultDTO]: + def get_study_sim_result(self, metadata: T) -> List[StudySimResultDTO]: """ Get global result information @@ -252,7 +252,7 @@ def export_study_flat( metadata: T, dst_path: Path, outputs: bool = True, - output_list_filter: t.Optional[t.List[str]] = None, + output_list_filter: Optional[List[str]] = None, denormalize: bool = True, ) -> None: """ @@ -267,7 +267,7 @@ def export_study_flat( """ @abstractmethod - def get_synthesis(self, metadata: T, params: t.Optional[RequestParameters] = None) -> FileStudyTreeConfigDTO: + def get_synthesis(self, metadata: T, params: Optional[RequestParameters] = None) -> FileStudyTreeConfigDTO: """ Return study synthesis Args: diff --git a/antarest/study/main.py b/antarest/study/main.py index b7ddb9715e..f992157597 100644 --- a/antarest/study/main.py +++ b/antarest/study/main.py @@ -12,8 +12,6 @@ from typing import Optional -from fastapi import APIRouter, FastAPI - from antarest.core.application import AppBuildContext from antarest.core.config import Config from antarest.core.filetransfer.service import FileTransferManager diff --git a/antarest/study/model.py b/antarest/study/model.py index 004c836844..71e0c746f0 100644 --- a/antarest/study/model.py +++ b/antarest/study/model.py @@ -13,10 +13,10 @@ import dataclasses import enum import secrets -import typing as t import uuid from datetime import datetime, timedelta from pathlib import Path, PurePath +from typing import TYPE_CHECKING, Annotated, Any, Dict, List, Mapping, Optional, Tuple, cast from antares.study.version import StudyVersion from pydantic import BeforeValidator, ConfigDict, Field, PlainSerializer, computed_field, field_validator @@ -41,7 +41,7 @@ from antarest.login.model import Group, GroupDTO, Identity from antarest.study.css4_colors import COLOR_NAMES -if t.TYPE_CHECKING: +if TYPE_CHECKING: # avoid circular import from antarest.core.tasks.model import TaskJob @@ -67,11 +67,11 @@ STUDY_VERSION_9_1 = StudyVersion.parse("9.1") STUDY_VERSION_9_2 = StudyVersion.parse("9.2") -StudyVersionStr = t.Annotated[StudyVersion, BeforeValidator(StudyVersion.parse), PlainSerializer(str)] -StudyVersionInt = t.Annotated[StudyVersion, BeforeValidator(StudyVersion.parse), PlainSerializer(int)] +StudyVersionStr = Annotated[StudyVersion, BeforeValidator(StudyVersion.parse), PlainSerializer(str)] +StudyVersionInt = Annotated[StudyVersion, BeforeValidator(StudyVersion.parse), PlainSerializer(int)] -STUDY_REFERENCE_TEMPLATES: t.Mapping[StudyVersion, str] = { +STUDY_REFERENCE_TEMPLATES: Mapping[StudyVersion, str] = { STUDY_VERSION_6_0: "empty_study_613.zip", STUDY_VERSION_6_1: "empty_study_613.zip", STUDY_VERSION_6_4: "empty_study_613.zip", @@ -162,11 +162,11 @@ class Tag(Base): # type:ignore label = Column(String(40), primary_key=True, index=True) color: str = Column(String(20), index=True, default=lambda: secrets.choice(COLOR_NAMES)) - studies: t.List["Study"] = relationship("Study", secondary=StudyTag.__table__, back_populates="tags") + studies: List["Study"] = relationship("Study", secondary=StudyTag.__table__, back_populates="tags") @override def __str__(self) -> str: # pragma: no cover - return t.cast(str, self.label) + return cast(str, self.label) @override def __repr__(self) -> str: # pragma: no cover @@ -203,7 +203,7 @@ class StudyAdditionalData(Base): # type:ignore patch = Column(String(), index=True, nullable=True) @override - def __eq__(self, other: t.Any) -> bool: + def __eq__(self, other: Any) -> bool: if not super().__eq__(other): return False if not isinstance(other, StudyAdditionalData): @@ -238,7 +238,7 @@ class Study(Base): # type: ignore owner_id = Column(Integer, ForeignKey(Identity.id), nullable=True, index=True) archived = Column(Boolean(), default=False, index=True) - tags: t.List[Tag] = relationship(Tag, secondary=StudyTag.__table__, back_populates="studies") + tags: List[Tag] = relationship(Tag, secondary=StudyTag.__table__, back_populates="studies") owner = relationship(Identity, uselist=False) groups = relationship(Group, secondary=StudyGroup.__table__, cascade="") additional_data = relationship( @@ -249,7 +249,7 @@ class Study(Base): # type: ignore # Define a one-to-many relationship between `Study` and `TaskJob`. # If the Study is deleted, all attached TaskJob must be deleted in cascade. - jobs: t.List["TaskJob"] = relationship("TaskJob", back_populates="study", cascade="all, delete, delete-orphan") + jobs: List["TaskJob"] = relationship("TaskJob", back_populates="study", cascade="all, delete, delete-orphan") __mapper_args__ = {"polymorphic_identity": "study", "polymorphic_on": type} @@ -269,7 +269,7 @@ def __str__(self) -> str: ) @override - def __eq__(self, other: t.Any) -> bool: + def __eq__(self, other: Any) -> bool: if not isinstance(other, Study): return False return bool( @@ -286,18 +286,18 @@ def __eq__(self, other: t.Any) -> bool: and other.archived == self.archived ) - def to_json_summary(self) -> t.Any: + def to_json_summary(self) -> Any: return {"id": self.id, "name": self.name} @validates("folder") # type: ignore - def validate_folder(self, key: str, folder: t.Optional[str]) -> t.Optional[str]: + def validate_folder(self, key: str, folder: Optional[str]) -> Optional[str]: """ We want to store the path in posix format in the database, even on windows. """ return normalize_path(folder) -def normalize_path(path: t.Optional[str]) -> t.Optional[str]: +def normalize_path(path: Optional[str]) -> Optional[str]: """ Turns any path including a windows path (with \ separator) to a posix path (with / separator). """ @@ -328,7 +328,7 @@ class RawStudy(Study): } @override - def __eq__(self, other: t.Any) -> bool: + def __eq__(self, other: Any) -> bool: if not super().__eq__(other): return False if not isinstance(other, RawStudy): @@ -349,7 +349,7 @@ class StudyFolder: path: Path workspace: str - groups: t.List[Group] + groups: List[Group] class NonStudyFolderDTO(AntaresBaseModel): @@ -390,21 +390,21 @@ class WorkspaceMetadata(AntaresBaseModel): class PatchStudy(AntaresBaseModel): - scenario: t.Optional[str] = None - doc: t.Optional[str] = None - status: t.Optional[str] = None - comments: t.Optional[str] = None - tags: t.List[str] = [] + scenario: Optional[str] = None + doc: Optional[str] = None + status: Optional[str] = None + comments: Optional[str] = None + tags: List[str] = [] class PatchArea(AntaresBaseModel): - country: t.Optional[str] = None - tags: t.List[str] = [] + country: Optional[str] = None + tags: List[str] = [] class PatchCluster(AntaresBaseModel): - type: t.Optional[str] = None - code_oi: t.Optional[str] = None + type: Optional[str] = None + code_oi: Optional[str] = None class Config: @classmethod @@ -413,18 +413,18 @@ def alias_generator(cls, string: str) -> str: class PatchOutputs(AntaresBaseModel): - reference: t.Optional[str] = None + reference: Optional[str] = None class Patch(AntaresBaseModel): - study: t.Optional[PatchStudy] = None - areas: t.Optional[t.Dict[str, PatchArea]] = None - thermal_clusters: t.Optional[t.Dict[str, PatchCluster]] = None - outputs: t.Optional[PatchOutputs] = None + study: Optional[PatchStudy] = None + areas: Optional[Dict[str, PatchArea]] = None + thermal_clusters: Optional[Dict[str, PatchCluster]] = None + outputs: Optional[PatchOutputs] = None class OwnerInfo(AntaresBaseModel): - id: t.Optional[int] = None + id: Optional[int] = None name: str @@ -436,35 +436,35 @@ class StudyMetadataDTO(AntaresBaseModel): updated: str type: str owner: OwnerInfo - groups: t.List[GroupDTO] + groups: List[GroupDTO] public_mode: PublicMode workspace: str managed: bool archived: bool - horizon: t.Optional[str] = None - scenario: t.Optional[str] = None - status: t.Optional[str] = None - doc: t.Optional[str] = None - folder: t.Optional[str] = None - tags: t.List[str] = [] + horizon: Optional[str] = None + scenario: Optional[str] = None + status: Optional[str] = None + doc: Optional[str] = None + folder: Optional[str] = None + tags: List[str] = [] @field_validator("horizon", mode="before") - def transform_horizon_to_str(cls, val: t.Union[str, int, None]) -> t.Optional[str]: + def transform_horizon_to_str(cls, val: str | int | None) -> Optional[str]: # horizon can be an int. return str(val) if val else val # type: ignore class StudyMetadataPatchDTO(AntaresBaseModel): - name: t.Optional[str] = None - author: t.Optional[str] = None - horizon: t.Optional[str] = None - scenario: t.Optional[str] = None - status: t.Optional[str] = None - doc: t.Optional[str] = None - tags: t.List[str] = [] + name: Optional[str] = None + author: Optional[str] = None + horizon: Optional[str] = None + scenario: Optional[str] = None + status: Optional[str] = None + doc: Optional[str] = None + tags: List[str] = [] @field_validator("tags", mode="before") - def _normalize_tags(cls, v: t.List[str]) -> t.List[str]: + def _normalize_tags(cls, v: List[str]) -> List[str]: """Remove leading and trailing whitespaces, and replace consecutive whitespaces by a single one.""" tags = [] for tag in v: @@ -478,14 +478,14 @@ def _normalize_tags(cls, v: t.List[str]) -> t.List[str]: class StudySimSettingsDTO(AntaresBaseModel): - general: t.Dict[str, t.Any] - input: t.Dict[str, t.Any] - output: t.Dict[str, t.Any] - optimization: t.Dict[str, t.Any] - otherPreferences: t.Dict[str, t.Any] - advancedParameters: t.Dict[str, t.Any] - seedsMersenneTwister: t.Dict[str, t.Any] - playlist: t.Optional[t.List[int]] = None + general: Dict[str, Any] + input: Dict[str, Any] + output: Dict[str, Any] + optimization: Dict[str, Any] + otherPreferences: Dict[str, Any] + advancedParameters: Dict[str, Any] + seedsMersenneTwister: Dict[str, Any] + playlist: Optional[List[int]] = None class StudySimResultDTO(AntaresBaseModel): @@ -574,12 +574,12 @@ class StudyDownloadDTO(AntaresBaseModel): """ type: StudyDownloadType - years: t.Optional[t.List[int]] + years: Optional[List[int]] level: StudyDownloadLevelDTO - filterIn: t.Optional[str] - filterOut: t.Optional[str] - filter: t.Optional[t.List[str]] - columns: t.Optional[t.List[str]] + filterIn: Optional[str] + filterOut: Optional[str] + filter: Optional[List[str]] + columns: Optional[List[str]] synthesis: bool = False includeClusters: bool = False @@ -594,31 +594,31 @@ class MatrixIndex(AntaresBaseModel): class TimeSerie(AntaresBaseModel): name: str unit: str - data: t.List[t.Optional[float]] = [] + data: List[Optional[float]] = [] class TimeSeriesData(AntaresBaseModel): type: StudyDownloadType name: str - data: t.Dict[str, t.List[TimeSerie]] = {} + data: Dict[str, List[TimeSerie]] = {} class MatrixAggregationResultDTO(AntaresBaseModel): index: MatrixIndex - data: t.List[TimeSeriesData] - warnings: t.List[str] + data: List[TimeSeriesData] + warnings: List[str] class MatrixAggregationResult(AntaresBaseModel): index: MatrixIndex - data: t.Dict[t.Tuple[StudyDownloadType, str], t.Dict[str, t.List[TimeSerie]]] - warnings: t.List[str] + data: Dict[Tuple[StudyDownloadType, str], Dict[str, List[TimeSerie]]] + warnings: List[str] def to_dto(self) -> MatrixAggregationResultDTO: return MatrixAggregationResultDTO.construct( index=self.index, data=[ - TimeSeriesData.construct( + TimeSeriesData.model_construct( type=key_type, name=key_name, data=self.data[(key_type, key_name)], diff --git a/antarest/study/repository.py b/antarest/study/repository.py index a904124d4d..28f2d85b85 100644 --- a/antarest/study/repository.py +++ b/antarest/study/repository.py @@ -12,7 +12,7 @@ import datetime import enum -import typing as t +from typing import List, Optional, Sequence, Tuple, cast from pydantic import NonNegativeInt from sqlalchemy import and_, func, not_, or_, sql # type: ignore @@ -55,11 +55,11 @@ class AccessPermissions(AntaresBaseModel, frozen=True, extra="forbid"): """ is_admin: bool = False - user_id: t.Optional[int] = None - user_groups: t.Sequence[str] = () + user_id: Optional[int] = None + user_groups: Sequence[str] = () @classmethod - def from_params(cls, params: t.Union[RequestParameters, JWTUser]) -> "AccessPermissions": + def from_params(cls, params: RequestParameters | JWTUser) -> "AccessPermissions": """ This function makes it easier to pass on user ids and groups into the repository filtering function by extracting the associated `AccessPermissions` object. @@ -105,15 +105,15 @@ class StudyFilter(AntaresBaseModel, frozen=True, extra="forbid"): """ name: str = "" - managed: t.Optional[bool] = None - archived: t.Optional[bool] = None - variant: t.Optional[bool] = None - versions: t.Sequence[str] = () - users: t.Sequence[int] = () - groups: t.Sequence[str] = () - tags: t.Sequence[str] = () - study_ids: t.Sequence[str] = () - exists: t.Optional[bool] = None + managed: Optional[bool] = None + archived: Optional[bool] = None + variant: Optional[bool] = None + versions: Sequence[str] = () + users: Sequence[int] = () + groups: Sequence[str] = () + tags: Sequence[str] = () + study_ids: Sequence[str] = () + exists: Optional[bool] = None workspace: str = "" folder: str = "" access_permissions: AccessPermissions = AccessPermissions() @@ -146,7 +146,7 @@ class StudyMetadataRepository: Database connector to manage Study entity """ - def __init__(self, cache_service: ICache, session: t.Optional[Session] = None): + def __init__(self, cache_service: ICache, session: Optional[Session] = None): """ Initialize the repository. @@ -191,7 +191,7 @@ def save( def refresh(self, metadata: Study) -> None: self.session.refresh(metadata) - def get(self, study_id: str) -> t.Optional[Study]: + def get(self, study_id: str) -> Optional[Study]: """Get the study by ID or return `None` if not found in database.""" # todo: I think we should use a `entity = with_polymorphic(Study, "*")` # to make sure RawStudy and VariantStudy fields are also fetched. @@ -224,16 +224,16 @@ def one(self, study_id: str) -> Study: ) return study - def get_additional_data(self, study_id: str) -> t.Optional[StudyAdditionalData]: + def get_additional_data(self, study_id: str) -> Optional[StudyAdditionalData]: study: StudyAdditionalData = self.session.query(StudyAdditionalData).get(study_id) return study def get_all( self, study_filter: StudyFilter = StudyFilter(), - sort_by: t.Optional[StudySortBy] = None, + sort_by: Optional[StudySortBy] = None, pagination: StudyPagination = StudyPagination(), - ) -> t.Sequence[Study]: + ) -> Sequence[Study]: """ Retrieve studies based on specified filters, sorting, and pagination. @@ -274,7 +274,7 @@ def get_all( if sort_by is None: q = q.order_by(entity.name.asc()) if study_filter.groups or study_filter.tags: - studies: t.Sequence[Study] = q.all()[offset:end] + studies: Sequence[Study] = q.all()[offset:end] return studies q = q.offset(offset).limit(limit) @@ -382,14 +382,14 @@ def _search_studies( return q - def get_all_raw(self, exists: t.Optional[bool] = None) -> t.Sequence[RawStudy]: + def get_all_raw(self, exists: Optional[bool] = None) -> Sequence[RawStudy]: query = self.session.query(RawStudy) if exists is not None: if exists: query = query.filter(RawStudy.missing.is_(None)) else: query = query.filter(not_(RawStudy.missing.is_(None))) - studies: t.Sequence[RawStudy] = query.all() + studies: Sequence[RawStudy] = query.all() return studies def delete(self, id_: str, *ids: str) -> None: @@ -398,7 +398,7 @@ def delete(self, id_: str, *ids: str) -> None: session.query(Study).filter(Study.id.in_(ids)).delete(synchronize_session=False) session.commit() - def update_tags(self, study: Study, new_tags: t.Sequence[str]) -> None: + def update_tags(self, study: Study, new_tags: Sequence[str]) -> None: """ Updates the tags associated with a given study in the database, replacing existing tags with new ones (case-insensitive). @@ -421,14 +421,14 @@ def update_tags(self, study: Study, new_tags: t.Sequence[str]) -> None: session.query(Tag).filter(~Tag.studies.any()).delete(synchronize_session=False) # type: ignore session.commit() - def list_duplicates(self) -> t.List[t.Tuple[str, str]]: + def list_duplicates(self) -> List[Tuple[str, str]]: """ Get list of duplicates as tuples (id, path). """ session = self.session subquery = session.query(Study.path).group_by(Study.path).having(func.count() > 1).subquery() query = session.query(Study.id, Study.path).filter(Study.path.in_(subquery)) - return t.cast(t.List[t.Tuple[str, str]], query.all()) + return cast(List[Tuple[str, str]], query.all()) def has_children(self, uuid: str) -> bool: """ diff --git a/antarest/study/service.py b/antarest/study/service.py index e50ad57d9c..911071cba7 100644 --- a/antarest/study/service.py +++ b/antarest/study/service.py @@ -18,9 +18,9 @@ import logging import os import time -import typing as t from datetime import datetime, timedelta from pathlib import Path, PurePosixPath +from typing import Any, BinaryIO, Callable, Dict, List, Optional, Sequence, Tuple, Type, cast from uuid import uuid4 import numpy as np @@ -146,7 +146,6 @@ DigestSynthesis, DigestUI, ) -from antarest.study.storage.rawstudy.model.filesystem.root.user.user import User from antarest.study.storage.rawstudy.raw_study_service import RawStudyService from antarest.study.storage.storage_service import StudyStorageService from antarest.study.storage.study_download_utils import StudyDownloader, get_output_variables_information @@ -187,7 +186,7 @@ MAX_MISSING_STUDY_TIMEOUT = 2 # days -def get_disk_usage(path: t.Union[str, Path]) -> int: +def get_disk_usage(path: str | Path) -> int: """Calculate the total disk usage (in bytes) of a study in a compressed file or directory.""" path = Path(path) if is_archive_format(path.suffix.lower()): @@ -219,7 +218,7 @@ def _imports_matrix_from_bytes(data: bytes) -> npt.NDArray[np.float64]: def _get_path_inside_user_folder( - path: str, exception_class: t.Type[t.Union[FolderCreationNotAllowed, ResourceDeletionNotAllowed]] + path: str, exception_class: Type[FolderCreationNotAllowed | ResourceDeletionNotAllowed] ) -> str: """ Retrieves the path inside the `user` folder for a given user path @@ -441,9 +440,9 @@ def __init__( ) self.cache_service = cache_service self.config = config - self.on_deletion_callbacks: t.List[t.Callable[[str], None]] = [] + self.on_deletion_callbacks: List[Callable[[str], None]] = [] - def add_on_deletion_callback(self, callback: t.Callable[[str], None]) -> None: + def add_on_deletion_callback(self, callback: Callable[[str], None]) -> None: self.on_deletion_callbacks.append(callback) def _on_study_delete(self, uuid: str) -> None: @@ -504,12 +503,12 @@ def aggregate_output_data( self, uuid: str, output_id: str, - query_file: t.Union[MCIndAreasQueryFile, MCAllAreasQueryFile, MCIndLinksQueryFile, MCAllLinksQueryFile], + query_file: MCIndAreasQueryFile | MCAllAreasQueryFile | MCIndLinksQueryFile | MCAllLinksQueryFile, frequency: MatrixFrequency, - columns_names: t.Sequence[str], - ids_to_consider: t.Sequence[str], + columns_names: Sequence[str], + ids_to_consider: Sequence[str], params: RequestParameters, - mc_years: t.Optional[t.Sequence[int]] = None, + mc_years: Optional[Sequence[int]] = None, ) -> pd.DataFrame: """ Aggregates output data based on several filtering conditions @@ -542,7 +541,7 @@ def get_logs( job_id: str, err_log: bool, params: RequestParameters, - ) -> t.Optional[str]: + ) -> Optional[str]: study = self.get_study(study_id) assert_permission(params.user, study, StudyPermissionType.READ) file_study = self.storage_service.get_storage(study).get_raw(study) @@ -562,7 +561,7 @@ def get_logs( empty_log = False for log_location in log_locations[err_log]: try: - log = t.cast( + log = cast( bytes, file_study.tree.get(log_location, depth=1, formatted=True), ).decode(encoding="utf-8") @@ -600,7 +599,7 @@ def save_logs( ) stopwatch.log_elapsed(lambda d: logger.info(f"Saved logs for job {job_id} in {d}s")) - def get_comments(self, study_id: str, params: RequestParameters) -> t.Union[str, JSON]: + def get_comments(self, study_id: str, params: RequestParameters) -> str | JSON: """ Get the comments of a study. @@ -667,9 +666,9 @@ def edit_comments( def get_studies_information( self, study_filter: StudyFilter, - sort_by: t.Optional[StudySortBy] = None, + sort_by: Optional[StudySortBy] = None, pagination: StudyPagination = StudyPagination(), - ) -> t.Dict[str, StudyMetadataDTO]: + ) -> Dict[str, StudyMetadataDTO]: """ Get information for matching studies of a search query. Args: @@ -680,7 +679,7 @@ def get_studies_information( Returns: List of study information """ logger.info("Retrieving matching studies") - studies: t.Dict[str, StudyMetadataDTO] = {} + studies: Dict[str, StudyMetadataDTO] = {} matching_studies = self.repository.get_all( study_filter=study_filter, sort_by=sort_by, @@ -709,7 +708,7 @@ def count_studies( ) return total - def _try_get_studies_information(self, study: Study) -> t.Optional[StudyMetadataDTO]: + def _try_get_studies_information(self, study: Study) -> Optional[StudyMetadataDTO]: try: return self.storage_service.get_storage(study).get_study_information(study) except Exception as e: @@ -827,8 +826,8 @@ def get_study_path(self, uuid: str, params: RequestParameters) -> Path: def create_study( self, study_name: str, - version: t.Optional[str], - group_ids: t.List[str], + version: Optional[str], + group_ids: List[str], params: RequestParameters, ) -> str: """ @@ -908,9 +907,7 @@ def get_study_synthesis(self, study_id: str, params: RequestParameters) -> FileS study_storage_service = self.storage_service.get_storage(study) return study_storage_service.get_synthesis(study, params) - def get_input_matrix_startdate( - self, study_id: str, path: t.Optional[str], params: RequestParameters - ) -> MatrixIndex: + def get_input_matrix_startdate(self, study_id: str, path: Optional[str], params: RequestParameters) -> MatrixIndex: study = self.get_study(study_id) assert_permission(params.user, study, StudyPermissionType.READ) file_study = self.storage_service.get_storage(study).get_raw(study) @@ -927,7 +924,7 @@ def get_input_matrix_startdate( def remove_duplicates(self) -> None: duplicates = self.repository.list_duplicates() - ids: t.List[str] = [] + ids: List[str] = [] # ids with same path duplicates_by_path = collections.defaultdict(list) for study_id, path in duplicates: @@ -938,7 +935,7 @@ def remove_duplicates(self) -> None: self.repository.delete(*ids) def sync_studies_on_disk( - self, folders: t.List[StudyFolder], directory: t.Optional[Path] = None, recursive: bool = True + self, folders: List[StudyFolder], directory: Optional[Path] = None, recursive: bool = True ) -> None: """ Used by watcher to send list of studies present on filesystem. @@ -1054,7 +1051,7 @@ def copy_study( self, src_uuid: str, dest_study_name: str, - group_ids: t.List[str], + group_ids: List[str], use_task: bool, params: RequestParameters, with_outputs: bool = False, @@ -1194,7 +1191,7 @@ def output_variables_information( study_uuid: str, output_uuid: str, params: RequestParameters, - ) -> t.Dict[str, t.List[str]]: + ) -> Dict[str, List[str]]: """ Returns information about output variables using thematic and geographic trimming information Args: @@ -1268,7 +1265,7 @@ def export_study_flat( uuid: str, params: RequestParameters, dest: Path, - output_list: t.Optional[t.List[str]] = None, + output_list: Optional[List[str]] = None, ) -> None: logger.info(f"Flat exporting study {uuid}") study = self.get_study(uuid) @@ -1365,8 +1362,8 @@ def download_outputs( use_task: bool, filetype: ExportFormat, params: RequestParameters, - tmp_export_file: t.Optional[Path] = None, - ) -> t.Union[Response, FileDownloadTaskDTO, FileResponse]: + tmp_export_file: Optional[Path] = None, + ) -> Response | FileDownloadTaskDTO | FileResponse: """ Download outputs Args: @@ -1463,7 +1460,7 @@ def export_task(_notifier: ITaskNotifier) -> TaskResult: json_response = to_json(matrix.model_dump(mode="json")) return Response(content=json_response, media_type="application/json") - def get_study_sim_result(self, study_id: str, params: RequestParameters) -> t.List[StudySimResultDTO]: + def get_study_sim_result(self, study_id: str, params: RequestParameters) -> List[StudySimResultDTO]: """ Get global result information Args: @@ -1511,8 +1508,8 @@ def set_sim_reference( def import_study( self, - stream: t.BinaryIO, - group_ids: t.List[str], + stream: BinaryIO, + group_ids: List[str], params: RequestParameters, ) -> str: """ @@ -1557,11 +1554,11 @@ def import_study( def import_output( self, uuid: str, - output: t.Union[t.BinaryIO, Path], + output: BinaryIO | Path, params: RequestParameters, - output_name_suffix: t.Optional[str] = None, + output_name_suffix: Optional[str] = None, auto_unzip: bool = True, - ) -> t.Optional[str]: + ) -> Optional[str]: """ Import specific output simulation inside study Args: @@ -1641,7 +1638,7 @@ def _edit_study_using_command( data: SUB_JSON, *, create_missing: bool = False, - ) -> t.List[ICommand]: + ) -> List[ICommand]: """ Replace data on disk with new, using variant commands. @@ -1657,7 +1654,7 @@ def _edit_study_using_command( study_service = self.storage_service.get_storage(study) file_study = study_service.get_raw(metadata=study) version = file_study.config.version - commands: t.List[ICommand] = [] + commands: List[ICommand] = [] file_relpath = PurePosixPath(url.strip().strip("/")) file_path = study_service.get_study_path(study).joinpath(file_relpath) @@ -1690,9 +1687,7 @@ def _edit_study_using_command( execute_or_add_commands(study, file_study, commands, self.storage_service) return commands # for testing purpose - def apply_commands( - self, uuid: str, commands: t.List[CommandDTO], params: RequestParameters - ) -> t.Optional[t.List[str]]: + def apply_commands(self, uuid: str, commands: List[CommandDTO], params: RequestParameters) -> Optional[List[str]]: study = self.get_study(uuid) if isinstance(study, VariantStudy): return self.storage_service.variant_study_service.append_commands(uuid, commands, params) @@ -1700,7 +1695,7 @@ def apply_commands( file_study = self.storage_service.raw_study_service.get_raw(study) assert_permission(params.user, study, StudyPermissionType.WRITE) self._assert_study_unarchived(study) - parsed_commands: t.List[ICommand] = [] + parsed_commands: List[ICommand] = [] for command in commands: parsed_commands.extend(self.storage_service.variant_study_service.command_factory.to_command(command)) execute_or_add_commands( @@ -1763,7 +1758,7 @@ def edit_study( uuid, params.get_user_id(), ) - return t.cast(JSON, new) + return cast(JSON, new) def change_owner(self, study_id: str, owner_id: int, params: RequestParameters) -> None: """ @@ -1891,7 +1886,7 @@ def set_public_mode(self, study_id: str, mode: PublicMode, params: RequestParame params.get_user_id(), ) - def check_errors(self, uuid: str) -> t.List[str]: + def check_errors(self, uuid: str) -> List[str]: study = self.get_study(uuid) self._assert_study_unarchived(study) return self.storage_service.raw_study_service.check_errors(study) @@ -1899,10 +1894,10 @@ def check_errors(self, uuid: str) -> t.List[str]: def get_all_areas( self, uuid: str, - area_type: t.Optional[AreaType], + area_type: Optional[AreaType], ui: bool, params: RequestParameters, - ) -> t.Union[t.List[AreaInfoDTO], t.Dict[str, t.Any]]: + ) -> List[AreaInfoDTO] | Dict[str, Any]: study = self.get_study(uuid) assert_permission(params.user, study, StudyPermissionType.READ) return ( @@ -1913,7 +1908,7 @@ def get_all_links( self, uuid: str, params: RequestParameters, - ) -> t.List[LinkDTO]: + ) -> List[LinkDTO]: study = self.get_study(uuid) assert_permission(params.user, study, StudyPermissionType.READ) return self.links_manager.get_all_links(study) @@ -2014,7 +2009,7 @@ def update_thermal_cluster_metadata( self, uuid: str, area_id: str, - clusters_metadata: t.Dict[str, PatchCluster], + clusters_metadata: Dict[str, PatchCluster], params: RequestParameters, ) -> AreaInfoDTO: study = self.get_study(uuid) @@ -2189,8 +2184,8 @@ def unarchive_task(notifier: ITaskNotifier) -> TaskResult: def _save_study( self, study: Study, - owner: t.Optional[JWTUser] = None, - group_ids: t.Sequence[str] = (), + owner: Optional[JWTUser] = None, + group_ids: Sequence[str] = (), ) -> None: """ Create or update a study with specified attributes. @@ -2218,7 +2213,7 @@ def _save_study( study.groups.clear() for gid in group_ids: owned_groups = (g for g in owner.groups if g.id == gid) - jwt_group: t.Optional[JWTGroup] = next(owned_groups, None) + jwt_group: Optional[JWTGroup] = next(owned_groups, None) if jwt_group is None or jwt_group.role is None: raise UserHasNotPermissionError(f"Permission denied for group ID: {gid}") study.groups.append(Group(id=jwt_group.id, name=jwt_group.name)) @@ -2276,13 +2271,13 @@ def _analyse_study(self, metadata: Study) -> StudyContentStatus: # noinspection PyUnusedLocal @staticmethod - def get_studies_versions(params: RequestParameters) -> t.List[str]: + def get_studies_versions(params: RequestParameters) -> List[str]: return [f"{v:ddd}" for v in STUDY_REFERENCE_TEMPLATES] def create_xpansion_configuration( self, uuid: str, - zipped_config: t.Optional[UploadFile], + zipped_config: Optional[UploadFile], params: RequestParameters, ) -> None: study = self.get_study(uuid) @@ -2328,7 +2323,7 @@ def get_candidate(self, uuid: str, candidate_name: str, params: RequestParameter assert_permission(params.user, study, StudyPermissionType.READ) return self.xpansion_manager.get_candidate(study, candidate_name) - def get_candidates(self, uuid: str, params: RequestParameters) -> t.List[XpansionCandidateDTO]: + def get_candidates(self, uuid: str, params: RequestParameters) -> List[XpansionCandidateDTO]: study = self.get_study(uuid) assert_permission(params.user, study, StudyPermissionType.READ) return self.xpansion_manager.get_candidates(study) @@ -2366,7 +2361,7 @@ def update_matrix( self, uuid: str, path: str, - matrix_edit_instruction: t.List[MatrixEditInstruction], + matrix_edit_instruction: List[MatrixEditInstruction], params: RequestParameters, ) -> None: """ @@ -2428,7 +2423,7 @@ def archive_outputs(self, study_id: str, params: RequestParameters) -> None: self.archive_output(study_id, output, params) @staticmethod - def _get_output_archive_task_names(study: Study, output_id: str) -> t.Tuple[str, str]: + def _get_output_archive_task_names(study: Study, output_id: str) -> Tuple[str, str]: return ( f"Archive output {study.id}/{output_id}", f"Unarchive output {study.name}/{output_id} ({study.id})", @@ -2440,7 +2435,7 @@ def archive_output( output_id: str, params: RequestParameters, force: bool = False, - ) -> t.Optional[str]: + ) -> Optional[str]: study = self.get_study(study_id) assert_permission(params.user, study, StudyPermissionType.WRITE) self._assert_study_unarchived(study) @@ -2502,7 +2497,7 @@ def unarchive_output( output_id: str, keep_src_zip: bool, params: RequestParameters, - ) -> t.Optional[str]: + ) -> Optional[str]: study = self.get_study(study_id) assert_permission(params.user, study, StudyPermissionType.READ) self._assert_study_unarchived(study) @@ -2546,7 +2541,7 @@ def unarchive_output_task(notifier: ITaskNotifier) -> TaskResult: ) raise e - task_id: t.Optional[str] = None + task_id: Optional[str] = None workspace = getattr(study, "workspace", DEFAULT_WORKSPACE_NAME) if workspace != DEFAULT_WORKSPACE_NAME: dest = Path(study.path) / "output" / output_id @@ -2720,8 +2715,8 @@ def get_matrix_with_index_and_header( study = self.get_study(study_id) if matrix_path.parts in [("input", "hydro", "allocation"), ("input", "hydro", "correlation")]: - all_areas = t.cast( - t.List[AreaInfoDTO], + all_areas = cast( + List[AreaInfoDTO], self.get_all_areas(study_id, area_type=AreaType.AREA, ui=False, params=parameters), ) if matrix_path.parts[-1] == "allocation": @@ -2761,9 +2756,7 @@ def get_matrix_with_index_and_header( return df_matrix - def asserts_no_thermal_in_binding_constraints( - self, study: Study, area_id: str, cluster_ids: t.Sequence[str] - ) -> None: + def asserts_no_thermal_in_binding_constraints(self, study: Study, area_id: str, cluster_ids: Sequence[str]) -> None: """ Check that no cluster is referenced in a binding constraint, otherwise raise an HTTP 403 Forbidden error. @@ -2825,9 +2818,9 @@ def create_user_folder(self, study_id: str, path: str, current_user: JWTUser) -> def _alter_user_folder( self, study_id: str, - command_data: t.Union[CreateUserResourceData, RemoveUserResourceData], - command_class: t.Type[t.Union[CreateUserResource, RemoveUserResource]], - exception_class: t.Type[t.Union[FolderCreationNotAllowed, ResourceDeletionNotAllowed]], + command_data: CreateUserResourceData | RemoveUserResourceData, + command_class: Type[CreateUserResource | RemoveUserResource], + exception_class: Type[FolderCreationNotAllowed | ResourceDeletionNotAllowed], current_user: JWTUser, ) -> None: study = self.get_study(study_id) diff --git a/antarest/study/storage/abstract_storage_service.py b/antarest/study/storage/abstract_storage_service.py index 69d9ba6a27..b075097b4c 100644 --- a/antarest/study/storage/abstract_storage_service.py +++ b/antarest/study/storage/abstract_storage_service.py @@ -13,9 +13,9 @@ import logging import shutil import tempfile -import typing as t from abc import ABC from pathlib import Path +from typing import BinaryIO, List, Optional from uuid import uuid4 from typing_extensions import override @@ -102,7 +102,7 @@ def get_study_information( patch_metadata = patch.study or PatchStudy() study_workspace = getattr(study, "workspace", DEFAULT_WORKSPACE_NAME) - folder: t.Optional[str] = None + folder: Optional[str] = None if hasattr(study, "folder"): folder = study.folder @@ -160,7 +160,7 @@ def get( if url == "" and depth == -1: cache_id = f"{CacheConstants.RAW_STUDY}/{metadata.id}" - from_cache: t.Optional[JSON] = None + from_cache: Optional[JSON] = None if use_cache: from_cache = self.cache.get(cache_id) if from_cache is not None: @@ -204,7 +204,7 @@ def get_file( def get_study_sim_result( self, study: T, - ) -> t.List[StudySimResultDTO]: + ) -> List[StudySimResultDTO]: """ Get global result information Args: @@ -213,7 +213,7 @@ def get_study_sim_result( """ study_data = self.get_raw(study) patch_metadata = self.patch_service.get(study) - results: t.List[StudySimResultDTO] = [] + results: List[StudySimResultDTO] = [] if study_data.config.outputs is not None: reference = (patch_metadata.outputs or PatchOutputs()).reference for output in study_data.config.outputs: @@ -254,9 +254,9 @@ def get_study_sim_result( def import_output( self, metadata: T, - output: t.Union[t.BinaryIO, Path], - output_name: t.Optional[str] = None, - ) -> t.Optional[str]: + output: BinaryIO | Path, + output_name: Optional[str] = None, + ) -> Optional[str]: """ Import additional output in an existing study. @@ -274,7 +274,7 @@ def import_output( path_output = Path(metadata.path) / "output" / f"imported_output_{str(uuid4())}" study_id = metadata.id path_output.mkdir(parents=True) - output_full_name: t.Optional[str] + output_full_name: Optional[str] is_zipped = False stopwatch = StopWatch() try: diff --git a/antarest/study/storage/auto_archive_service.py b/antarest/study/storage/auto_archive_service.py index aed3ff1027..20b2bfb9cb 100644 --- a/antarest/study/storage/auto_archive_service.py +++ b/antarest/study/storage/auto_archive_service.py @@ -13,7 +13,7 @@ import datetime import logging import time -import typing as t +from typing import Sequence from typing_extensions import override @@ -47,7 +47,7 @@ def _try_archive_studies(self) -> None: old_date = datetime.datetime.utcnow() - datetime.timedelta(days=self.config.storage.auto_archive_threshold_days) with db(): # in this part full `Read` rights over studies are granted to this function - studies: t.Sequence[Study] = self.study_service.repository.get_all( + studies: Sequence[Study] = self.study_service.repository.get_all( study_filter=StudyFilter(managed=True, access_permissions=AccessPermissions(is_admin=True)) ) # list of study IDs and boolean indicating if it's a raw study (True) or a variant (False) diff --git a/antarest/study/storage/df_download.py b/antarest/study/storage/df_download.py index acb9471802..bd80762bb0 100644 --- a/antarest/study/storage/df_download.py +++ b/antarest/study/storage/df_download.py @@ -11,7 +11,6 @@ # This file is part of the Antares project. import http -import typing as t from pathlib import Path import pandas as pd @@ -71,7 +70,7 @@ def suffix(self) -> str: def export_table( self, df: pd.DataFrame, - export_path: t.Union[str, Path], + export_path: str | Path, *, with_index: bool = True, with_header: bool = True, diff --git a/antarest/study/storage/matrix_profile.py b/antarest/study/storage/matrix_profile.py index c18808c8b3..ca9cb27639 100644 --- a/antarest/study/storage/matrix_profile.py +++ b/antarest/study/storage/matrix_profile.py @@ -12,8 +12,8 @@ import copy import fnmatch -import typing as t from pathlib import Path +from typing import Dict, NamedTuple, Sequence import pandas as pd @@ -21,13 +21,13 @@ from antarest.study.storage.utils import MONTHS -class _MatrixProfile(t.NamedTuple): +class _MatrixProfile(NamedTuple): """ Matrix profile for time series or specific matrices. """ - cols: t.Sequence[str] - rows: t.Sequence[str] + cols: Sequence[str] + rows: Sequence[str] def process_dataframe( self, @@ -61,7 +61,7 @@ def process_dataframe( if with_index and self.rows: df.index = pd.Index(self.rows) - def _process_links_columns(self, matrix_path: str) -> t.Sequence[str]: + def _process_links_columns(self, matrix_path: str) -> Sequence[str]: """Process column names specific to the links matrices.""" path_parts = Path(matrix_path).parts area1_id = path_parts[2] @@ -75,7 +75,7 @@ def _process_links_columns(self, matrix_path: str) -> t.Sequence[str]: return result -_SPECIFIC_MATRICES: t.Dict[str, _MatrixProfile] +_SPECIFIC_MATRICES: Dict[str, _MatrixProfile] """ The dictionary ``_SPECIFIC_MATRICES`` maps file patterns to ``_MatrixProfile`` objects, representing non-time series matrices. diff --git a/antarest/study/storage/patch_service.py b/antarest/study/storage/patch_service.py index f98870e240..5f272c87dd 100644 --- a/antarest/study/storage/patch_service.py +++ b/antarest/study/storage/patch_service.py @@ -10,8 +10,8 @@ # # This file is part of the Antares project. -import typing as t from pathlib import Path +from typing import Optional from antarest.core.serialization import from_json from antarest.study.model import Patch, PatchOutputs, RawStudy, StudyAdditionalData @@ -27,10 +27,10 @@ class PatchService: Handle patch file ("patch.json") for a RawStudy or VariantStudy """ - def __init__(self, repository: t.Optional[StudyMetadataRepository] = None): + def __init__(self, repository: Optional[StudyMetadataRepository] = None): self.repository = repository - def get(self, study: t.Union[RawStudy, VariantStudy], get_from_file: bool = False) -> Patch: + def get(self, study: RawStudy | VariantStudy, get_from_file: bool = False) -> Patch: if not get_from_file and study.additional_data is not None: # the `study.additional_data.patch` field is optional if study.additional_data.patch: @@ -52,7 +52,7 @@ def get_from_filestudy(self, file_study: FileStudy) -> Patch: def set_reference_output( self, - study: t.Union[RawStudy, VariantStudy], + study: RawStudy | VariantStudy, output_id: str, status: bool = True, ) -> None: @@ -63,7 +63,7 @@ def set_reference_output( patch.outputs = PatchOutputs(reference=output_id) self.save(study, patch) - def save(self, study: t.Union[RawStudy, VariantStudy], patch: Patch) -> None: + def save(self, study: RawStudy | VariantStudy, patch: Patch) -> None: if self.repository: study.additional_data = study.additional_data or StudyAdditionalData() study.additional_data.patch = patch.model_dump_json() diff --git a/antarest/study/storage/rawstudy/ini_reader.py b/antarest/study/storage/rawstudy/ini_reader.py index e003878e7c..2abd6a1700 100644 --- a/antarest/study/storage/rawstudy/ini_reader.py +++ b/antarest/study/storage/rawstudy/ini_reader.py @@ -12,22 +12,22 @@ import dataclasses import re -import typing as t from abc import ABC, abstractmethod from pathlib import Path +from typing import Any, Dict, Mapping, Optional, Pattern, Sequence, TextIO, cast from typing_extensions import override from antarest.core.model import JSON -def convert_value(value: str) -> t.Union[str, int, float, bool]: +def convert_value(value: str) -> str | int | float | bool: """Convert value to the appropriate type for JSON.""" try: # Infinity values are not supported by JSON, so we use a string instead. mapping = {"true": True, "false": False, "+inf": "+Inf", "-inf": "-Inf", "inf": "+Inf"} - return t.cast(t.Union[str, int, float, bool], mapping[value.lower()]) + return cast(str | int | float | bool, mapping[value.lower()]) except KeyError: try: return int(value) @@ -48,17 +48,17 @@ class IniFilter: option_regex: A compiled regex for matching option names. """ - section_regex: t.Optional[t.Pattern[str]] = None - option_regex: t.Optional[t.Pattern[str]] = None + section_regex: Optional[Pattern[str]] = None + option_regex: Optional[Pattern[str]] = None @classmethod def from_kwargs( cls, section: str = "", option: str = "", - section_regex: t.Optional[t.Union[str, t.Pattern[str]]] = None, - option_regex: t.Optional[t.Union[str, t.Pattern[str]]] = None, - **_unused: t.Any, # ignore unknown options + section_regex: Optional[str | Pattern[str]] = None, + option_regex: Optional[str | Pattern[str]] = None, + **_unused: Any, # ignore unknown options ) -> "IniFilter": """ Create an instance from given filtering parameters. @@ -110,7 +110,7 @@ class IReader(ABC): """ @abstractmethod - def read(self, path: t.Any, **kwargs: t.Any) -> JSON: + def read(self, path: Any, **kwargs: Any) -> JSON: """ Parse `.ini` file to json object. @@ -152,7 +152,7 @@ class IniReader(IReader): This class is not compatible with standard `.ini` readers. """ - def __init__(self, special_keys: t.Sequence[str] = (), section_name: str = "settings") -> None: + def __init__(self, special_keys: Sequence[str] = (), section_name: str = "settings") -> None: super().__init__() # Default section name to use if `.ini` file has no section. @@ -162,7 +162,7 @@ def __init__(self, special_keys: t.Sequence[str] = (), section_name: str = "sett self._section_name = section_name # Dictionary of parsed sections and options - self._curr_sections: t.Dict[str, t.Dict[str, t.Any]] = {} + self._curr_sections: Dict[str, Dict[str, Any]] = {} # Current section name used during paring self._curr_section = "" @@ -180,7 +180,7 @@ def __repr__(self) -> str: # pragma: no cover return f"{cls}(special_keys={special_keys!r}, section_name={section_name!r})" @override - def read(self, path: t.Any, **kwargs: t.Any) -> JSON: + def read(self, path: Any, **kwargs: Any) -> JSON: if isinstance(path, (Path, str)): try: with open(path, mode="r", encoding="utf-8") as f: @@ -201,9 +201,9 @@ def read(self, path: t.Any, **kwargs: t.Any) -> JSON: else: # pragma: no cover raise TypeError(repr(type(path))) - return t.cast(JSON, sections) + return cast(JSON, sections) - def _parse_ini_file(self, ini_file: t.TextIO, **kwargs: t.Any) -> JSON: + def _parse_ini_file(self, ini_file: TextIO, **kwargs: Any) -> JSON: """ Parse `.ini` file to JSON object. @@ -326,7 +326,7 @@ class SimpleKeyValueReader(IniReader): """ @override - def read(self, path: t.Any, **kwargs: t.Any) -> JSON: + def read(self, path: Any, **kwargs: Any) -> JSON: """ Parse `.ini` file which has no section to JSON object. @@ -340,5 +340,5 @@ def read(self, path: t.Any, **kwargs: t.Any) -> JSON: Dictionary of parsed key/value pairs. """ sections = super().read(path) - obj = t.cast(t.Mapping[str, JSON], sections) + obj = cast(Mapping[str, JSON], sections) return obj[self._section_name] diff --git a/antarest/study/storage/rawstudy/ini_writer.py b/antarest/study/storage/rawstudy/ini_writer.py index 9ff81c213e..8e56efa95b 100644 --- a/antarest/study/storage/rawstudy/ini_writer.py +++ b/antarest/study/storage/rawstudy/ini_writer.py @@ -12,8 +12,8 @@ import ast import configparser -import typing as t from pathlib import Path +from typing import List, Optional from typing_extensions import override @@ -21,7 +21,7 @@ class IniConfigParser(configparser.RawConfigParser): - def __init__(self, special_keys: t.Optional[t.List[str]] = None) -> None: + def __init__(self, special_keys: Optional[List[str]] = None) -> None: super().__init__() self.special_keys = special_keys @@ -70,7 +70,7 @@ class IniWriter: Standard INI writer. """ - def __init__(self, special_keys: t.Optional[t.List[str]] = None): + def __init__(self, special_keys: Optional[List[str]] = None): self.special_keys = special_keys def write(self, data: JSON, path: Path) -> None: diff --git a/antarest/study/storage/rawstudy/model/filesystem/bucket_node.py b/antarest/study/storage/rawstudy/model/filesystem/bucket_node.py index a058721d4d..52bb316751 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/bucket_node.py +++ b/antarest/study/storage/rawstudy/model/filesystem/bucket_node.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Callable, List, Optional from typing_extensions import override @@ -26,7 +26,7 @@ class RegisteredFile: def __init__( self, key: str, - node: t.Optional[t.Callable[[ContextServer, FileStudyTreeConfig], INode[t.Any, t.Any, t.Any]]], + node: Optional[Callable[[ContextServer, FileStudyTreeConfig], INode[Any, Any, Any]]], filename: str = "", ): self.key = key @@ -43,24 +43,24 @@ def __init__( self, context: ContextServer, config: FileStudyTreeConfig, - registered_files: t.Optional[t.List[RegisteredFile]] = None, - default_file_node: t.Callable[..., INode[t.Any, t.Any, t.Any]] = RawFileNode, + registered_files: Optional[List[RegisteredFile]] = None, + default_file_node: Callable[..., INode[Any, Any, Any]] = RawFileNode, ): super().__init__(context, config) - self.registered_files: t.List[RegisteredFile] = registered_files or [] - self.default_file_node: t.Callable[..., INode[t.Any, t.Any, t.Any]] = default_file_node + self.registered_files: List[RegisteredFile] = registered_files or [] + self.default_file_node: Callable[..., INode[Any, Any, Any]] = default_file_node - def _get_registered_file_by_key(self, key: str) -> t.Optional[RegisteredFile]: + def _get_registered_file_by_key(self, key: str) -> Optional[RegisteredFile]: return next((rf for rf in self.registered_files if rf.key == key), None) - def _get_registered_file_by_filename(self, filename: str) -> t.Optional[RegisteredFile]: + def _get_registered_file_by_filename(self, filename: str) -> Optional[RegisteredFile]: return next((rf for rf in self.registered_files if rf.filename == filename), None) @override def save( self, data: SUB_JSON, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, ) -> None: self._assert_not_in_zipped_file() if not self.config.path.exists(): @@ -118,7 +118,7 @@ def build(self) -> TREE: def check_errors( self, data: JSON, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, raising: bool = False, - ) -> t.List[str]: + ) -> List[str]: return [] diff --git a/antarest/study/storage/rawstudy/model/filesystem/common/area_matrix_list.py b/antarest/study/storage/rawstudy/model/filesystem/common/area_matrix_list.py index 0edb3de396..051fe3530f 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/common/area_matrix_list.py +++ b/antarest/study/storage/rawstudy/model/filesystem/common/area_matrix_list.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Callable, Dict, Optional from typing_extensions import override @@ -60,8 +60,8 @@ def __init__( config: FileStudyTreeConfig, *, prefix: str = "", - matrix_class: t.Callable[..., INode[t.Any, t.Any, t.Any]] = InputSeriesMatrix, - additional_matrix_params: t.Optional[t.Dict[str, t.Any]] = None, + matrix_class: Callable[..., INode[Any, Any, Any]] = InputSeriesMatrix, + additional_matrix_params: Optional[Dict[str, Any]] = None, ): super().__init__(context, config) self.prefix = prefix @@ -97,7 +97,7 @@ def __init__( context: ContextServer, config: FileStudyTreeConfig, area: str, - matrix_class: t.Callable[[ContextServer, FileStudyTreeConfig], INode[t.Any, t.Any, t.Any]], + matrix_class: Callable[[ContextServer, FileStudyTreeConfig], INode[Any, Any, Any]], ): super().__init__(context, config) self.area = area @@ -117,7 +117,7 @@ def __init__( self, context: ContextServer, config: FileStudyTreeConfig, - matrix_class: t.Callable[[ContextServer, FileStudyTreeConfig], INode[t.Any, t.Any, t.Any]], + matrix_class: Callable[[ContextServer, FileStudyTreeConfig], INode[Any, Any, Any]], ): super().__init__(context, config) self.matrix_class = matrix_class @@ -137,7 +137,7 @@ def __init__( context: ContextServer, config: FileStudyTreeConfig, area: str, - matrix_class: t.Callable[[ContextServer, FileStudyTreeConfig], INode[t.Any, t.Any, t.Any]], + matrix_class: Callable[[ContextServer, FileStudyTreeConfig], INode[Any, Any, Any]], ): super().__init__(context, config) self.area = area @@ -173,19 +173,19 @@ def __init__( self, context: ContextServer, config: FileStudyTreeConfig, - klass: t.Callable[ + klass: Callable[ [ ContextServer, FileStudyTreeConfig, str, - t.Callable[ + Callable[ [ContextServer, FileStudyTreeConfig], - INode[t.Any, t.Any, t.Any], + INode[Any, Any, Any], ], ], - INode[t.Any, t.Any, t.Any], + INode[Any, Any, Any], ], - matrix_class: t.Callable[[ContextServer, FileStudyTreeConfig], INode[t.Any, t.Any, t.Any]], + matrix_class: Callable[[ContextServer, FileStudyTreeConfig], INode[Any, Any, Any]], ): super().__init__(context, config) self.klass = klass diff --git a/antarest/study/storage/rawstudy/model/filesystem/config/area.py b/antarest/study/storage/rawstudy/model/filesystem/config/area.py index c91b42a00d..9212c48a13 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/config/area.py +++ b/antarest/study/storage/rawstudy/model/filesystem/config/area.py @@ -14,7 +14,7 @@ Object model used to read and update area configuration. """ -import typing as t +from typing import Any, Dict, Mapping, MutableMapping, Optional, Set from pydantic import Field, field_validator, model_validator from typing_extensions import override @@ -91,7 +91,7 @@ class FilteringSection(IniProperties): filter_year_by_year: str = Field("", alias="filter-year-by-year") @field_validator("filter_synthesis", "filter_year_by_year", mode="before") - def _validate_filtering(cls, v: t.Any) -> str: + def _validate_filtering(cls, v: Any) -> str: return validate_filtering(v) # noinspection SpellCheckingInspection @@ -179,15 +179,15 @@ class AreaUI(IniProperties): ) @field_validator("color_rgb", mode="before") - def _validate_color_rgb(cls, v: t.Any) -> str: + def _validate_color_rgb(cls, v: Any) -> str: return validate_color_rgb(v) @model_validator(mode="before") - def _validate_colors(cls, values: t.MutableMapping[str, t.Any]) -> t.Mapping[str, t.Any]: + def _validate_colors(cls, values: MutableMapping[str, Any]) -> Mapping[str, Any]: return validate_colors(values) @override - def to_config(self) -> t.Dict[str, t.Any]: + def to_config(self) -> Dict[str, Any]: """ Convert the object to a dictionary for writing to a configuration file: @@ -262,18 +262,18 @@ class UIProperties(IniProperties): default_factory=AreaUI, description="style of the area in the map: coordinates and color", ) - layers: t.Set[int] = Field( + layers: Set[int] = Field( default_factory=lambda: {0}, description="layers where the area is visible", ) - layer_styles: t.Dict[int, AreaUI] = Field( + layer_styles: Dict[int, AreaUI] = Field( default_factory=dict, description="style of the area in each layer", alias="layerStyles", ) @staticmethod - def _set_default_style(values: t.MutableMapping[str, t.Any]) -> t.Mapping[str, t.Any]: + def _set_default_style(values: MutableMapping[str, Any]) -> Mapping[str, Any]: """Defined the default style if missing.""" style = values.get("style", None) if style is None: @@ -285,7 +285,7 @@ def _set_default_style(values: t.MutableMapping[str, t.Any]) -> t.Mapping[str, t return values @staticmethod - def _set_default_layer_styles(values: t.MutableMapping[str, t.Any]) -> t.Mapping[str, t.Any]: + def _set_default_layer_styles(values: MutableMapping[str, Any]) -> Mapping[str, Any]: """Define the default layer styles if missing.""" layer_styles = values.get("layer_styles") if layer_styles is None: @@ -303,7 +303,7 @@ def _set_default_layer_styles(values: t.MutableMapping[str, t.Any]) -> t.Mapping return values @model_validator(mode="before") - def _validate_layers(cls, values: t.MutableMapping[str, t.Any]) -> t.Mapping[str, t.Any]: + def _validate_layers(cls, values: MutableMapping[str, Any]) -> Mapping[str, Any]: cls._set_default_style(values) cls._set_default_layer_styles(values) # Parse the `[ui]` section (if any) @@ -345,7 +345,7 @@ def _validate_layers(cls, values: t.MutableMapping[str, t.Any]) -> t.Mapping[str return values @override - def to_config(self) -> t.Dict[str, t.Dict[str, t.Any]]: + def to_config(self) -> Dict[str, Dict[str, Any]]: """ Convert the object to a dictionary for writing to a configuration file: @@ -373,7 +373,7 @@ def to_config(self) -> t.Dict[str, t.Dict[str, t.Any]]: 'x': 1148, 'y': 144}} """ - obj: t.Dict[str, t.Dict[str, t.Any]] = { + obj: Dict[str, Dict[str, Any]] = { "ui": {}, "layerX": {}, "layerY": {}, @@ -468,7 +468,7 @@ class AreaFolder(IniProperties): default_factory=OptimizationProperties, description="optimization configuration", ) - adequacy_patch: t.Optional[AdequacyPathProperties] = Field( + adequacy_patch: Optional[AdequacyPathProperties] = Field( None, description="adequacy patch configuration", ) @@ -533,20 +533,20 @@ class ThermalAreasProperties(IniProperties): 'unserverdenergycost': {'at': 6500.0, 'be': 3500.0, 'de': 1250.0, 'fr': 0.0}} """ - unserverd_energy_cost: t.MutableMapping[str, float] = Field( + unserverd_energy_cost: MutableMapping[str, float] = Field( default_factory=dict, alias="unserverdenergycost", description="unserverd energy cost (€/MWh) of each area", ) - spilled_energy_cost: t.MutableMapping[str, float] = Field( + spilled_energy_cost: MutableMapping[str, float] = Field( default_factory=dict, alias="spilledenergycost", description="spilled energy cost (€/MWh) of each area", ) @field_validator("unserverd_energy_cost", "spilled_energy_cost", mode="before") - def _validate_energy_cost(cls, v: t.Any) -> t.MutableMapping[str, float]: + def _validate_energy_cost(cls, v: Any) -> MutableMapping[str, float]: if isinstance(v, dict): return {str(k): float(v) for k, v in v.items()} raise TypeError(f"Invalid type for energy cost: {type(v)}") diff --git a/antarest/study/storage/rawstudy/model/filesystem/config/binding_constraint.py b/antarest/study/storage/rawstudy/model/filesystem/config/binding_constraint.py index ebf18da753..840b1ed6ce 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/config/binding_constraint.py +++ b/antarest/study/storage/rawstudy/model/filesystem/config/binding_constraint.py @@ -14,7 +14,7 @@ Object model used to read and update binding constraint configuration. """ -import typing as t +from typing import Dict, List from antarest.study.business.enum_ignore_case import EnumIgnoreCase @@ -51,7 +51,7 @@ class BindingConstraintOperator(EnumIgnoreCase): EQUAL = "equal" -OPERATOR_MATRICES_MAP: t.Dict[BindingConstraintOperator, t.List[str]] = { +OPERATOR_MATRICES_MAP: Dict[BindingConstraintOperator, List[str]] = { BindingConstraintOperator.EQUAL: ["eq"], BindingConstraintOperator.GREATER: ["gt"], BindingConstraintOperator.LESS: ["lt"], diff --git a/antarest/study/storage/rawstudy/model/filesystem/config/cluster.py b/antarest/study/storage/rawstudy/model/filesystem/config/cluster.py index f26f2b45e4..b958eec914 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/config/cluster.py +++ b/antarest/study/storage/rawstudy/model/filesystem/config/cluster.py @@ -17,7 +17,7 @@ """ import functools -import typing as t +from typing import Any from pydantic import Field @@ -51,7 +51,7 @@ class ItemProperties( name: str = Field(description="Cluster name", pattern=r"[a-zA-Z0-9_(),& -]+") - def __lt__(self, other: t.Any) -> bool: + def __lt__(self, other: Any) -> bool: """ Compare two clusters by group and name. diff --git a/antarest/study/storage/rawstudy/model/filesystem/config/field_validators.py b/antarest/study/storage/rawstudy/model/filesystem/config/field_validators.py index 5089150c71..83dc031f54 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/config/field_validators.py +++ b/antarest/study/storage/rawstudy/model/filesystem/config/field_validators.py @@ -10,12 +10,12 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, List, Mapping, MutableMapping _ALL_FILTERING = ["hourly", "daily", "weekly", "monthly", "annual"] -def extract_filtering(v: t.Any) -> t.List[str]: +def extract_filtering(v: Any) -> List[str]: """ Extract filtering values from a comma-separated list of values. """ @@ -35,7 +35,7 @@ def extract_filtering(v: t.Any) -> t.List[str]: raise ValueError(f"Invalid value for filtering: {e!s}") from None -def validate_filtering(v: t.Any) -> str: +def validate_filtering(v: Any) -> str: """ Validate the filtering field and convert it to a comma separated string. """ @@ -44,12 +44,12 @@ def validate_filtering(v: t.Any) -> str: # noinspection SpellCheckingInspection -def validate_colors(values: t.MutableMapping[str, t.Any]) -> t.Mapping[str, t.Any]: +def validate_colors(values: MutableMapping[str, Any]) -> Mapping[str, Any]: """ Validate ``color_rgb``, ``color_r``, ``color_g``, ``color_b`` and convert them to ``color_rgb``. """ - def _pop_any(dictionary: t.MutableMapping[str, t.Any], *keys: str) -> t.Any: + def _pop_any(dictionary: MutableMapping[str, Any], *keys: str) -> Any: """Save as `pop` but for multiple keys. Return the first found value.""" return next((dictionary.pop(key, None) for key in keys if key in dictionary), None) @@ -61,7 +61,7 @@ def _pop_any(dictionary: t.MutableMapping[str, t.Any], *keys: str) -> t.Any: return values -def validate_color_rgb(v: t.Any) -> str: +def validate_color_rgb(v: Any) -> str: """ Validate RGB color field and convert it to color code. diff --git a/antarest/study/storage/rawstudy/model/filesystem/config/files.py b/antarest/study/storage/rawstudy/model/filesystem/config/files.py index 52458c9970..947dbd8c1c 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/config/files.py +++ b/antarest/study/storage/rawstudy/model/filesystem/config/files.py @@ -15,22 +15,16 @@ import logging import re import tempfile -import typing as t import zipfile from enum import Enum from pathlib import Path +from typing import Any, Dict, List, Optional, Sequence, Tuple, cast -import py7zr from antares.study.version import StudyVersion from antarest.core.model import JSON from antarest.core.serialization import from_json -from antarest.core.utils.archives import ( - ArchiveFormat, - extract_lines_from_archive, - is_archive_format, - read_file_from_archive, -) +from antarest.core.utils.archives import extract_lines_from_archive, is_archive_format, read_file_from_archive from antarest.study.model import STUDY_VERSION_8_1, STUDY_VERSION_8_6 from antarest.study.storage.rawstudy.ini_reader import IniReader from antarest.study.storage.rawstudy.model.filesystem.config.binding_constraint import ( @@ -76,7 +70,7 @@ def extract_data_from_archive( root: Path, posix_path: str, reader: IniReader, -) -> t.Dict[str, t.Any]: +) -> Dict[str, Any]: """ Extract and process data from various types of files. @@ -97,7 +91,7 @@ def extract_data_from_archive( return {} -def build(study_path: Path, study_id: str, output_path: t.Optional[Path] = None) -> "FileStudyTreeConfig": +def build(study_path: Path, study_id: str, output_path: Optional[Path] = None) -> "FileStudyTreeConfig": """ Extracts data from the filesystem to build a study config. @@ -138,8 +132,8 @@ def _extract_data_from_file( root: Path, inside_root_path: Path, file_type: FileType, - multi_ini_keys: t.Sequence[str] = (), -) -> t.Any: + multi_ini_keys: Sequence[str] = (), +) -> Any: """ Extract and process data from various types of files. @@ -196,7 +190,7 @@ def _parse_version(path: Path) -> StudyVersion: return StudyVersion.parse(version) -def _parse_parameters(path: Path) -> t.Tuple[bool, t.List[str], str]: +def _parse_parameters(path: Path) -> Tuple[bool, List[str], str]: general = _extract_data_from_file( root=path, inside_root_path=Path("settings/generaldata.ini"), @@ -204,14 +198,14 @@ def _parse_parameters(path: Path) -> t.Tuple[bool, t.List[str], str]: ) store_new_set: bool = general.get("output", {}).get("storenewset", False) - archive_input_series: t.List[str] = [ + archive_input_series: List[str] = [ e.strip() for e in general.get("output", {}).get("archives", "").strip().split(",") if e.strip() ] enr_modelling: str = general.get("other preferences", {}).get("renewable-generation-modelling", "aggregated") return store_new_set, archive_input_series, enr_modelling -def _parse_bindings(root: Path) -> t.List[BindingConstraintDTO]: +def _parse_bindings(root: Path) -> List[BindingConstraintDTO]: bindings = _extract_data_from_file( root=root, inside_root_path=Path("input/bindingconstraints/bindingconstraints.ini"), @@ -246,7 +240,7 @@ def _parse_bindings(root: Path) -> t.List[BindingConstraintDTO]: return output_list -def _parse_sets(root: Path) -> t.Dict[str, DistrictSet]: +def _parse_sets(root: Path) -> Dict[str, DistrictSet]: obj = _extract_data_from_file( root=root, inside_root_path=Path("input/areas/sets.ini"), @@ -264,7 +258,7 @@ def _parse_sets(root: Path) -> t.Dict[str, DistrictSet]: } -def _parse_areas(root: Path) -> t.Dict[str, Area]: +def _parse_areas(root: Path) -> Dict[str, Area]: areas = _extract_data_from_file( root=root, inside_root_path=Path("input/areas/list.txt"), @@ -274,7 +268,7 @@ def _parse_areas(root: Path) -> t.Dict[str, Area]: return {transform_name_to_id(a): parse_area(root, a) for a in areas} -def parse_outputs(output_path: Path) -> t.Dict[str, Simulation]: +def parse_outputs(output_path: Path) -> Dict[str, Simulation]: if not output_path.is_dir(): return {} sims = {} @@ -381,10 +375,10 @@ def parse_simulation(path: Path, canonical_name: str) -> Simulation: ) -def get_playlist(config: JSON) -> t.Optional[t.Dict[int, float]]: +def get_playlist(config: JSON) -> Optional[Dict[int, float]]: general_config = config.get("general", {}) - nb_years = t.cast(int, general_config.get("nbyears")) - playlist_activated = t.cast(bool, general_config.get("user-playlist", False)) + nb_years = cast(int, general_config.get("nbyears")) + playlist_activated = cast(bool, general_config.get("user-playlist", False)) if not playlist_activated: return None playlist_config = config.get("playlist", {}) @@ -435,13 +429,13 @@ def parse_area(root: Path, area: str) -> "Area": ) -def _parse_thermal(root: Path, area: str) -> t.List[ThermalConfigType]: +def _parse_thermal(root: Path, area: str) -> List[ThermalConfigType]: """ Parse the thermal INI file, return an empty list if missing. """ version = _parse_version(root) relpath = Path(f"input/thermal/clusters/{area}/list.ini") - config_dict: t.Dict[str, t.Any] = _extract_data_from_file( + config_dict: Dict[str, Any] = _extract_data_from_file( root=root, inside_root_path=relpath, file_type=FileType.SIMPLE_INI ) config_list = [] @@ -454,7 +448,7 @@ def _parse_thermal(root: Path, area: str) -> t.List[ThermalConfigType]: return config_list -def _parse_renewables(root: Path, area: str) -> t.List[RenewableConfigType]: +def _parse_renewables(root: Path, area: str) -> List[RenewableConfigType]: """ Parse the renewables INI file, return an empty list if missing. """ @@ -467,7 +461,7 @@ def _parse_renewables(root: Path, area: str) -> t.List[RenewableConfigType]: # Since version 8.1 of the solver, we can use "renewable clusters" objects. relpath = Path(f"input/renewables/clusters/{area}/list.ini") - config_dict: t.Dict[str, t.Any] = _extract_data_from_file( + config_dict: Dict[str, Any] = _extract_data_from_file( root=root, inside_root_path=relpath, file_type=FileType.SIMPLE_INI, @@ -482,7 +476,7 @@ def _parse_renewables(root: Path, area: str) -> t.List[RenewableConfigType]: return config_list -def _parse_st_storage(root: Path, area: str) -> t.List[STStorageConfigType]: +def _parse_st_storage(root: Path, area: str) -> List[STStorageConfigType]: """ Parse the short-term storage INI file, return an empty list if missing. """ @@ -493,7 +487,7 @@ def _parse_st_storage(root: Path, area: str) -> t.List[STStorageConfigType]: return [] relpath = Path(f"input/st-storage/clusters/{area}/list.ini") - config_dict: t.Dict[str, t.Any] = _extract_data_from_file( + config_dict: Dict[str, Any] = _extract_data_from_file( root=root, inside_root_path=relpath, file_type=FileType.SIMPLE_INI, @@ -508,7 +502,7 @@ def _parse_st_storage(root: Path, area: str) -> t.List[STStorageConfigType]: return config_list -def _parse_links_filtering(root: Path, area: str) -> t.Dict[str, Link]: +def _parse_links_filtering(root: Path, area: str) -> Dict[str, Link]: properties_ini = _extract_data_from_file( root=root, inside_root_path=Path(f"input/links/{area}/properties.ini"), diff --git a/antarest/study/storage/rawstudy/model/filesystem/config/identifier.py b/antarest/study/storage/rawstudy/model/filesystem/config/identifier.py index e25c5ab9fb..1c1b38fe6d 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/config/identifier.py +++ b/antarest/study/storage/rawstudy/model/filesystem/config/identifier.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Mapping, MutableMapping from pydantic import Field, model_validator @@ -50,7 +50,7 @@ def generate_id(cls, name: str) -> str: return transform_name_to_id(name, lower=False) @model_validator(mode="before") - def validate_id(cls, values: t.MutableMapping[str, t.Any]) -> t.Mapping[str, t.Any]: + def validate_id(cls, values: MutableMapping[str, Any]) -> Mapping[str, Any]: """ Calculate an ID based on the name, if not provided. diff --git a/antarest/study/storage/rawstudy/model/filesystem/config/ini_properties.py b/antarest/study/storage/rawstudy/model/filesystem/config/ini_properties.py index 05451c6870..6127e7a882 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/config/ini_properties.py +++ b/antarest/study/storage/rawstudy/model/filesystem/config/ini_properties.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, Optional, Set from typing_extensions import override @@ -32,7 +32,7 @@ class IniProperties( Base class for configuration sections. """ - def to_config(self) -> t.Dict[str, t.Any]: + def to_config(self) -> Dict[str, Any]: """ Convert the object to a dictionary for writing to a configuration file (`*.ini`). @@ -55,7 +55,7 @@ def to_config(self) -> t.Dict[str, t.Any]: @classmethod @override - def construct(cls, _fields_set: t.Optional[t.Set[str]] = None, **values: t.Any) -> "IniProperties": + def construct(cls, _fields_set: Optional[Set[str]] = None, **values: Any) -> "IniProperties": """ Construct a new model instance from a dict of values, replacing aliases with real field names. """ @@ -64,5 +64,4 @@ def construct(cls, _fields_set: t.Optional[t.Set[str]] = None, **values: t.Any) renamed_values = {aliases.get(k, k): v for k, v in values.items()} if _fields_set is not None: _fields_set = {aliases.get(f, f) for f in _fields_set} - # noinspection PyTypeChecker - return super().construct(_fields_set, **renamed_values) + return IniProperties.model_construct(_fields_set, **renamed_values) diff --git a/antarest/study/storage/rawstudy/model/filesystem/config/model.py b/antarest/study/storage/rawstudy/model/filesystem/config/model.py index 388bfaeb5d..5d394f383e 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/config/model.py +++ b/antarest/study/storage/rawstudy/model/filesystem/config/model.py @@ -11,8 +11,8 @@ # This file is part of the Antares project. import re -import typing as t from pathlib import Path +from typing import Any, Dict, List, MutableMapping, Optional, Set from antares.study.version import StudyVersion from pydantic import Field, model_validator @@ -66,11 +66,11 @@ class Link(AntaresBaseModel, extra="ignore"): Ignore extra fields, because we only need `filter-synthesis` and `filter-year-by-year`. """ - filters_synthesis: t.List[str] = Field(default_factory=list) - filters_year: t.List[str] = Field(default_factory=list) + filters_synthesis: List[str] = Field(default_factory=list) + filters_year: List[str] = Field(default_factory=list) @model_validator(mode="before") - def validation(cls, values: t.MutableMapping[str, t.Any]) -> t.MutableMapping[str, t.Any]: + def validation(cls, values: MutableMapping[str, Any]) -> MutableMapping[str, Any]: # note: field names are in kebab-case in the INI file filters_synthesis = values.pop("filter-synthesis", values.pop("filters_synthesis", "")) filters_year = values.pop("filter-year-by-year", values.pop("filters_year", "")) @@ -85,13 +85,13 @@ class Area(AntaresBaseModel, extra="forbid"): """ name: str - links: t.Dict[str, Link] - thermals: t.List[ThermalConfigType] - renewables: t.List[RenewableConfigType] - filters_synthesis: t.List[str] - filters_year: t.List[str] + links: Dict[str, Link] + thermals: List[ThermalConfigType] + renewables: List[RenewableConfigType] + filters_synthesis: List[str] + filters_year: List[str] # since v8.6 - st_storages: t.List[STStorageConfigType] = [] + st_storages: List[STStorageConfigType] = [] class DistrictSet(AntaresBaseModel): @@ -99,15 +99,15 @@ class DistrictSet(AntaresBaseModel): Object linked to /inputs/sets.ini information """ - ALL: t.List[str] = ["hourly", "daily", "weekly", "monthly", "annual"] - name: t.Optional[str] = None + ALL: List[str] = ["hourly", "daily", "weekly", "monthly", "annual"] + name: Optional[str] = None inverted_set: bool = False - areas: t.Optional[t.List[str]] = None + areas: Optional[List[str]] = None output: bool = True - filters_synthesis: t.List[str] = ALL - filters_year: t.List[str] = ALL + filters_synthesis: List[str] = ALL + filters_year: List[str] = ALL - def get_areas(self, all_areas: t.List[str]) -> t.List[str]: + def get_areas(self, all_areas: List[str]) -> List[str]: if self.inverted_set: return list(set(all_areas).difference(set(self.areas or []))) return self.areas or [] @@ -125,7 +125,7 @@ class Simulation(AntaresBaseModel): synthesis: bool by_year: bool error: bool - playlist: t.Optional[t.List[int]] + playlist: Optional[List[int]] archived: bool = False xpansion: str @@ -149,8 +149,8 @@ class BindingConstraintDTO(AntaresBaseModel): """ id: str - areas: t.Set[str] - clusters: t.Set[str] + areas: Set[str] + clusters: Set[str] time_step: BindingConstraintFrequency = DEFAULT_TIMESTEP operator: BindingConstraintOperator = DEFAULT_OPERATOR # since v8.7 @@ -168,16 +168,16 @@ def __init__( path: Path, study_id: str, version: StudyVersion, - output_path: t.Optional[Path] = None, - areas: t.Optional[t.Dict[str, Area]] = None, - sets: t.Optional[t.Dict[str, DistrictSet]] = None, - outputs: t.Optional[t.Dict[str, Simulation]] = None, - bindings: t.Optional[t.List[BindingConstraintDTO]] = None, + output_path: Optional[Path] = None, + areas: Optional[Dict[str, Area]] = None, + sets: Optional[Dict[str, DistrictSet]] = None, + outputs: Optional[Dict[str, Simulation]] = None, + bindings: Optional[List[BindingConstraintDTO]] = None, store_new_set: bool = False, - archive_input_series: t.Optional[t.List[str]] = None, + archive_input_series: Optional[List[str]] = None, enr_modelling: str = str(EnrModelling.AGGREGATED), - cache: t.Optional[t.Dict[str, t.List[str]]] = None, - archive_path: t.Optional[Path] = None, + cache: Optional[Dict[str, List[str]]] = None, + archive_path: Optional[Path] = None, ): self.study_path = study_path self.path = path @@ -196,7 +196,7 @@ def __init__( def next_file(self, name: str, is_output: bool = False) -> "FileStudyTreeConfig": if is_output and name in self.outputs and self.outputs[name].archived: - archive_path: t.Optional[Path] = self.path / f"{name}.zip" + archive_path: Optional[Path] = self.path / f"{name}.zip" else: archive_path = self.archive_path @@ -234,36 +234,36 @@ def at_file(self, filepath: Path) -> "FileStudyTreeConfig": cache=self.cache, ) - def area_names(self) -> t.List[str]: + def area_names(self) -> List[str]: return self.cache.get("%areas", list(self.areas)) - def set_names(self, only_output: bool = True) -> t.List[str]: + def set_names(self, only_output: bool = True) -> List[str]: return self.cache.get( f"%districts%{only_output}", [k for k, v in self.sets.items() if v.output or not only_output], ) - def get_thermal_ids(self, area: str) -> t.List[str]: + def get_thermal_ids(self, area: str) -> List[str]: """ Returns a list of thermal cluster IDs for a given area. Note that IDs may not be in lower case (but series IDs are). """ return self.cache.get(f"%thermal%{area}%{area}", [th.id for th in self.areas[area].thermals]) - def get_renewable_ids(self, area: str) -> t.List[str]: + def get_renewable_ids(self, area: str) -> List[str]: """ Returns a list of renewable cluster IDs for a given area. Note that IDs may not be in lower case (but series IDs are). """ return self.cache.get(f"%renewable%{area}", [r.id for r in self.areas[area].renewables]) - def get_st_storage_ids(self, area: str) -> t.List[str]: + def get_st_storage_ids(self, area: str) -> List[str]: return self.cache.get(f"%st-storage%{area}", [s.id for s in self.areas[area].st_storages]) - def get_links(self, area: str) -> t.List[str]: + def get_links(self, area: str) -> List[str]: return self.cache.get(f"%links%{area}", list(self.areas[area].links)) - def get_binding_constraint_groups(self) -> t.List[str]: + def get_binding_constraint_groups(self) -> List[str]: """ Returns the list of binding constraint groups, without duplicates and sorted alphabetically (case-insensitive). @@ -272,14 +272,14 @@ def get_binding_constraint_groups(self) -> t.List[str]: lower_groups = {bc.group.lower(): bc.group for bc in self.bindings} return self.cache.get("%binding-constraints", [grp for _, grp in sorted(lower_groups.items())]) - def get_filters_synthesis(self, area: str, link: t.Optional[str] = None) -> t.List[str]: + def get_filters_synthesis(self, area: str, link: Optional[str] = None) -> List[str]: if link: return self.areas[area].links[link].filters_synthesis if area in self.sets and self.sets[area].output: return self.sets[area].filters_synthesis return self.areas[area].filters_synthesis - def get_filters_year(self, area: str, link: t.Optional[str] = None) -> t.List[str]: + def get_filters_year(self, area: str, link: Optional[str] = None) -> List[str]: if link: return self.areas[area].links[link].filters_year if area in self.sets and self.sets[area].output: @@ -312,15 +312,15 @@ class FileStudyTreeConfigDTO(AntaresBaseModel): path: Path study_id: str version: StudyVersionInt - output_path: t.Optional[Path] = None - areas: t.Dict[str, Area] = dict() - sets: t.Dict[str, DistrictSet] = dict() - outputs: t.Dict[str, Simulation] = dict() - bindings: t.List[BindingConstraintDTO] = list() + output_path: Optional[Path] = None + areas: Dict[str, Area] = dict() + sets: Dict[str, DistrictSet] = dict() + outputs: Dict[str, Simulation] = dict() + bindings: List[BindingConstraintDTO] = list() store_new_set: bool = False - archive_input_series: t.List[str] = list() + archive_input_series: List[str] = list() enr_modelling: str = str(EnrModelling.AGGREGATED) - archive_path: t.Optional[Path] = None + archive_path: Optional[Path] = None @staticmethod def from_build_config( diff --git a/antarest/study/storage/rawstudy/model/filesystem/config/renewable.py b/antarest/study/storage/rawstudy/model/filesystem/config/renewable.py index 6b8087b3fd..d8d5fc98a6 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/config/renewable.py +++ b/antarest/study/storage/rawstudy/model/filesystem/config/renewable.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Optional, Type, cast from antares.study.version import StudyVersion from pydantic import Field @@ -61,7 +61,7 @@ def __repr__(self) -> str: @classmethod @override - def _missing_(cls, value: object) -> t.Optional["RenewableClusterGroup"]: + def _missing_(cls, value: object) -> Optional["RenewableClusterGroup"]: """ Retrieves the default group or the matched group when an unknown value is encountered. """ @@ -69,10 +69,10 @@ def _missing_(cls, value: object) -> t.Optional["RenewableClusterGroup"]: # Check if any group value matches the input value ignoring case sensitivity. # noinspection PyUnresolvedReferences if any(value.upper() == group.value.upper() for group in cls): - return t.cast(RenewableClusterGroup, super()._missing_(value)) + return cast(RenewableClusterGroup, super()._missing_(value)) # If a group is not found, return the default group ('OTHER1' by default). return cls.OTHER1 - return t.cast(t.Optional["RenewableClusterGroup"], super()._missing_(value)) + return cast(Optional["RenewableClusterGroup"], super()._missing_(value)) class RenewableProperties(ClusterProperties): @@ -115,7 +115,7 @@ class RenewableConfig(RenewableProperties, IgnoreCaseIdentifier): RenewableConfigType = RenewableConfig -def get_renewable_config_cls(study_version: StudyVersion) -> t.Type[RenewableConfig]: +def get_renewable_config_cls(study_version: StudyVersion) -> Type[RenewableConfig]: """ Retrieves the renewable configuration class based on the study version. @@ -130,7 +130,7 @@ def get_renewable_config_cls(study_version: StudyVersion) -> t.Type[RenewableCon raise ValueError(f"Unsupported study version {study_version}, required 810 or above.") -def create_renewable_config(study_version: StudyVersion, **kwargs: t.Any) -> RenewableConfigType: +def create_renewable_config(study_version: StudyVersion, **kwargs: Any) -> RenewableConfigType: """ Factory method to create a renewable configuration model. diff --git a/antarest/study/storage/rawstudy/model/filesystem/config/ruleset_matrices.py b/antarest/study/storage/rawstudy/model/filesystem/config/ruleset_matrices.py index 47a7568142..c3084efe3d 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/config/ruleset_matrices.py +++ b/antarest/study/storage/rawstudy/model/filesystem/config/ruleset_matrices.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, Iterable, List, Mapping, MutableMapping, Optional, Tuple, cast import numpy as np import pandas as pd @@ -31,15 +31,15 @@ "hgp": "hydro-generation-power", } -_Value: te.TypeAlias = t.Union[int, float] +_Value: te.TypeAlias = int | float _SimpleScenario: te.TypeAlias = pd.DataFrame -_ClusterScenario: te.TypeAlias = t.MutableMapping[str, pd.DataFrame] -_Scenario: te.TypeAlias = t.Union[_SimpleScenario, _ClusterScenario] -_ScenarioMapping: te.TypeAlias = t.MutableMapping[str, _Scenario] +_ClusterScenario: te.TypeAlias = MutableMapping[str, pd.DataFrame] +_Scenario: te.TypeAlias = _SimpleScenario | _ClusterScenario +_ScenarioMapping: te.TypeAlias = MutableMapping[str, _Scenario] -SimpleTableForm: te.TypeAlias = t.Dict[str, t.Dict[str, t.Union[int, float, str, None]]] -ClusterTableForm: te.TypeAlias = t.Dict[str, SimpleTableForm] -TableForm: te.TypeAlias = t.Union[SimpleTableForm, ClusterTableForm] +SimpleTableForm: te.TypeAlias = Dict[str, Dict[str, int | float | str | None]] +ClusterTableForm: te.TypeAlias = Dict[str, SimpleTableForm] +TableForm: te.TypeAlias = SimpleTableForm | ClusterTableForm _AREA_RELATED_SYMBOLS = "l", "h", "w", "s", "hgp" _BINDING_CONSTRAINTS_RELATED_SYMBOLS = ("bc",) @@ -86,12 +86,12 @@ def __init__( self, *, nb_years: int, - areas: t.Iterable[str], - links: t.Iterable[t.Tuple[str, str]], - thermals: t.Mapping[str, t.Iterable[str]], - renewables: t.Mapping[str, t.Iterable[str]], - groups: t.Iterable[str], - scenario_types: t.Optional[t.Mapping[str, str]] = None, + areas: Iterable[str], + links: Iterable[Tuple[str, str]], + thermals: Mapping[str, Iterable[str]], + renewables: Mapping[str, Iterable[str]], + groups: Iterable[str], + scenario_types: Optional[Mapping[str, str]] = None, ): # List of Monte Carlo years self.columns = [str(i) for i in range(nb_years)] @@ -124,17 +124,17 @@ def __str__(self) -> str: lines.append("") return "\n".join(lines) - def get_area_index(self) -> t.List[str]: + def get_area_index(self) -> List[str]: return [idx_area(area) for area in self.areas.values()] - def get_link_index(self) -> t.List[str]: + def get_link_index(self) -> List[str]: return [idx_link(a1, a2) for a1, a2 in self.links.values()] - def get_cluster_index(self, symbol: str, area: str) -> t.List[str]: + def get_cluster_index(self, symbol: str, area: str) -> List[str]: clusters = self.clusters_by_symbols[symbol][area.lower()] return [idx_cluster(area, cluster) for cluster in clusters.values()] - def get_group_index(self) -> t.List[str]: + def get_group_index(self) -> List[str]: return [idx_group(group) for group in self.groups.values()] def _setup(self) -> None: @@ -179,7 +179,7 @@ def sort_scenarios(self) -> None: scenario = {area: df.sort_index(key=lambda x: x.str.lower()) for area, df in scenario.items()} self.scenarios[scenario_type] = scenario - def update_rules(self, rules: t.Mapping[str, _Value]) -> None: + def update_rules(self, rules: Mapping[str, _Value]) -> None: """ Update the scenario matrices with the given rules read from an INI file. @@ -203,31 +203,31 @@ def update_rules(self, rules: t.Mapping[str, _Value]) -> None: year = parts[2] if symbol in _LINK_RELATED_SYMBOLS else parts[1] if symbol in _AREA_RELATED_SYMBOLS: area = self.areas[area_id] - scenario = t.cast(pd.DataFrame, self.scenarios[scenario_type]) + scenario = cast(pd.DataFrame, self.scenarios[scenario_type]) scenario.at[idx_area(area), str(year)] = value elif symbol in _LINK_RELATED_SYMBOLS: area1 = self.areas[area_id] area2 = self.areas[parts[1].lower()] - scenario = t.cast(pd.DataFrame, self.scenarios[scenario_type]) + scenario = cast(pd.DataFrame, self.scenarios[scenario_type]) scenario.at[idx_link(area1, area2), str(year)] = value elif symbol in _HYDRO_LEVEL_RELATED_SYMBOLS: area = self.areas[area_id] - scenario = t.cast(pd.DataFrame, self.scenarios[scenario_type]) + scenario = cast(pd.DataFrame, self.scenarios[scenario_type]) scenario.at[idx_area(area), str(year)] = value * 100 elif symbol in _CLUSTER_RELATED_SYMBOLS: area = self.areas[area_id] clusters = self.clusters_by_symbols[symbol][area_id] cluster = clusters[parts[2].lower()] - scenario = t.cast(pd.DataFrame, self.scenarios[scenario_type][area]) + scenario = cast(pd.DataFrame, self.scenarios[scenario_type][area]) scenario.at[idx_cluster(area, cluster), str(year)] = value elif symbol in _BINDING_CONSTRAINTS_RELATED_SYMBOLS: group = self.groups[area_id] - scenario = t.cast(pd.DataFrame, self.scenarios[scenario_type]) + scenario = cast(pd.DataFrame, self.scenarios[scenario_type]) scenario.at[idx_group(group), str(year)] = value else: raise NotImplementedError(f"Unknown symbol {symbol}") - def get_rules(self, *, allow_nan: bool = False) -> t.Dict[str, _Value]: + def get_rules(self, *, allow_nan: bool = False) -> Dict[str, _Value]: """ Get the rules from the scenario matrices in INI format. @@ -246,14 +246,14 @@ def get_rules(self, *, allow_nan: bool = False) -> t.Dict[str, _Value]: "symbol,group_id,year": value, # binding constraints } """ - rules: t.Dict[str, _Value] = {} + rules: Dict[str, _Value] = {} for symbol, scenario_type in self.scenario_types.items(): scenario = self.scenarios[scenario_type] scenario_rules = self.get_scenario_rules(scenario, symbol, allow_nan=allow_nan) rules.update(scenario_rules) return rules - def get_scenario_rules(self, scenario: _Scenario, symbol: str, *, allow_nan: bool = False) -> t.Dict[str, _Value]: + def get_scenario_rules(self, scenario: _Scenario, symbol: str, *, allow_nan: bool = False) -> Dict[str, _Value]: """ Get the rules for a specific scenario matrix and symbol. @@ -266,11 +266,11 @@ def get_scenario_rules(self, scenario: _Scenario, symbol: str, *, allow_nan: boo Dictionary of rules. """ - def to_ts_number(v: t.Any) -> _Value: + def to_ts_number(v: Any) -> _Value: """Convert value to TimeSeries number.""" return np.nan if pd.isna(v) else int(v) - def to_percent(v: t.Any) -> _Value: + def to_percent(v: Any) -> _Value: """Convert value to percentage in range [0, 100].""" return np.nan if pd.isna(v) else float(v) / 100 @@ -315,7 +315,7 @@ def to_percent(v: t.Any) -> _Value: raise NotImplementedError(f"Unknown symbol {symbol}") return scenario_rules - def get_table_form(self, scenario_type: str, *, nan_value: t.Union[str, None] = "") -> TableForm: + def get_table_form(self, scenario_type: str, *, nan_value: str | None = "") -> TableForm: """ Get the scenario matrices in table form for the frontend. @@ -355,18 +355,18 @@ def get_table_form(self, scenario_type: str, *, nan_value: t.Union[str, None] = if isinstance(scenario, pd.DataFrame): simple_scenario: _SimpleScenario = scenario.fillna(nan_value) simple_table_form = simple_scenario.to_dict(orient="index") - return t.cast(SimpleTableForm, simple_table_form) + return cast(SimpleTableForm, simple_table_form) else: cluster_scenario: _ClusterScenario = {area: df.fillna(nan_value) for area, df in scenario.items()} cluster_table_form = {area: df.to_dict(orient="index") for area, df in cluster_scenario.items()} - return t.cast(ClusterTableForm, cluster_table_form) + return cast(ClusterTableForm, cluster_table_form) def set_table_form( self, table_form: TableForm, scenario_type: str, *, - nan_value: t.Union[str, None] = "", + nan_value: str | None = "", ) -> None: """ Set the scenario matrix from table form data, for a specific scenario type. @@ -398,12 +398,12 @@ def update_table_form(self, table_form: TableForm, scenario_type: str, *, nan_va """ scenario = self.scenarios[scenario_type] if isinstance(scenario, pd.DataFrame): - simple_table_form = t.cast(SimpleTableForm, table_form) + simple_table_form = cast(SimpleTableForm, table_form) df = pd.DataFrame.from_dict(simple_table_form, orient="index").replace([None, nan_value], np.nan) scenario.loc[df.index, df.columns] = df else: - cluster_table_form = t.cast(ClusterTableForm, table_form) + cluster_table_form = cast(ClusterTableForm, table_form) for area, simple_table_form in cluster_table_form.items(): - scenario = t.cast(pd.DataFrame, self.scenarios[scenario_type][area]) + scenario = cast(pd.DataFrame, self.scenarios[scenario_type][area]) df = pd.DataFrame(simple_table_form).transpose().replace([None, nan_value], np.nan) scenario.loc[df.index, df.columns] = df diff --git a/antarest/study/storage/rawstudy/model/filesystem/config/st_storage.py b/antarest/study/storage/rawstudy/model/filesystem/config/st_storage.py index 0d21455783..de14d40d5d 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/config/st_storage.py +++ b/antarest/study/storage/rawstudy/model/filesystem/config/st_storage.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Type from antares.study.version import StudyVersion from pydantic import Field @@ -160,10 +160,10 @@ class STStorage880Config(STStorage880Properties, LowerCaseIdentifier): # NOTE: In the following Union, it is important to place the older version first, # because otherwise, creating a short term storage always creates a v8.8 one. -STStorageConfigType = t.Union[STStorageConfig, STStorage880Config] +STStorageConfigType = STStorageConfig | STStorage880Config -def get_st_storage_config_cls(study_version: StudyVersion) -> t.Type[STStorageConfigType]: +def get_st_storage_config_cls(study_version: StudyVersion) -> Type[STStorageConfigType]: """ Retrieves the short-term storage configuration class based on the study version. @@ -180,7 +180,7 @@ def get_st_storage_config_cls(study_version: StudyVersion) -> t.Type[STStorageCo raise ValueError(f"Unsupported study version: {study_version}") -def create_st_storage_config(study_version: StudyVersion, **kwargs: t.Any) -> STStorageConfigType: +def create_st_storage_config(study_version: StudyVersion, **kwargs: Any) -> STStorageConfigType: """ Factory method to create a short-term storage configuration model. diff --git a/antarest/study/storage/rawstudy/model/filesystem/config/thermal.py b/antarest/study/storage/rawstudy/model/filesystem/config/thermal.py index 87f20514f4..829877457b 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/config/thermal.py +++ b/antarest/study/storage/rawstudy/model/filesystem/config/thermal.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Optional, Type, cast from antares.study.version import StudyVersion from pydantic import Field @@ -78,7 +78,7 @@ def __repr__(self) -> str: # pragma: no cover @classmethod @override - def _missing_(cls, value: object) -> t.Optional["ThermalClusterGroup"]: + def _missing_(cls, value: object) -> Optional["ThermalClusterGroup"]: """ Retrieves the default group or the matched group when an unknown value is encountered. """ @@ -86,11 +86,11 @@ def _missing_(cls, value: object) -> t.Optional["ThermalClusterGroup"]: # Check if any group value matches the input value ignoring case sensitivity. # noinspection PyUnresolvedReferences if any(value.upper() == group.value.upper() for group in cls): - return t.cast(ThermalClusterGroup, super()._missing_(value)) + return cast(ThermalClusterGroup, super()._missing_(value)) # If a group is not found, return the default group ('OTHER1' by default). # Note that 'OTHER' is an alias for 'OTHER1'. return cls.OTHER1 - return t.cast(t.Optional["ThermalClusterGroup"], super()._missing_(value)) + return cast(Optional["ThermalClusterGroup"], super()._missing_(value)) class ThermalCostGeneration(EnumIgnoreCase): @@ -408,10 +408,10 @@ class Thermal870Config(Thermal870Properties, IgnoreCaseIdentifier): # NOTE: In the following Union, it is important to place the most specific type first, # because the type matching generally occurs sequentially from left to right within the union. -ThermalConfigType = t.Union[Thermal870Config, Thermal860Config, ThermalConfig] +ThermalConfigType = Thermal870Config | Thermal860Config | ThermalConfig -def get_thermal_config_cls(study_version: StudyVersion) -> t.Type[ThermalConfigType]: +def get_thermal_config_cls(study_version: StudyVersion) -> Type[ThermalConfigType]: """ Retrieves the thermal configuration class based on the study version. @@ -429,7 +429,7 @@ def get_thermal_config_cls(study_version: StudyVersion) -> t.Type[ThermalConfigT return ThermalConfig -def create_thermal_config(study_version: StudyVersion, **kwargs: t.Any) -> ThermalConfigType: +def create_thermal_config(study_version: StudyVersion, **kwargs: Any) -> ThermalConfigType: """ Factory method to create a thermal configuration model. diff --git a/antarest/study/storage/rawstudy/model/filesystem/factory.py b/antarest/study/storage/rawstudy/model/filesystem/factory.py index 63bfdc5a31..3a1dfdde28 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/factory.py +++ b/antarest/study/storage/rawstudy/model/filesystem/factory.py @@ -14,8 +14,8 @@ import os.path import tempfile import time -import typing as t from pathlib import Path +from typing import NamedTuple, Optional import filelock @@ -30,7 +30,7 @@ logger = logging.getLogger(__name__) -class FileStudy(t.NamedTuple): +class FileStudy(NamedTuple): """ Antares study stored on the disk. @@ -66,7 +66,7 @@ def create_from_fs( self, path: Path, study_id: str, - output_path: t.Optional[Path] = None, + output_path: Optional[Path] = None, use_cache: bool = True, ) -> FileStudy: """ @@ -96,7 +96,7 @@ def _create_from_fs_unsafe( self, path: Path, study_id: str, - output_path: t.Optional[Path] = None, + output_path: Optional[Path] = None, use_cache: bool = True, ) -> FileStudy: cache_id = f"{CacheConstants.STUDY_FACTORY}/{study_id}" diff --git a/antarest/study/storage/rawstudy/model/filesystem/folder_node.py b/antarest/study/storage/rawstudy/model/filesystem/folder_node.py index cfcebe9019..8daec708d6 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/folder_node.py +++ b/antarest/study/storage/rawstudy/model/filesystem/folder_node.py @@ -11,8 +11,8 @@ # This file is part of the Antares project. import shutil -import typing as t from abc import ABC, abstractmethod +from typing import List, Optional, Tuple from typing_extensions import override @@ -45,7 +45,7 @@ def __init__( self, context: ContextServer, config: FileStudyTreeConfig, - children_glob_exceptions: t.Optional[t.List[str]] = None, + children_glob_exceptions: Optional[List[str]] = None, ) -> None: super().__init__(config) self.context = context @@ -57,11 +57,11 @@ def build(self) -> TREE: def _forward_get( self, - url: t.List[str], + url: List[str], depth: int = -1, formatted: bool = True, get_node: bool = False, - ) -> t.Union[JSON, INode[JSON, SUB_JSON, JSON]]: + ) -> JSON | INode[JSON, SUB_JSON, JSON]: children = self.build() names, sub_url = self.extract_child(children, url) @@ -91,7 +91,7 @@ def _forward_get( def _expand_get( self, depth: int = -1, formatted: bool = True, get_node: bool = False - ) -> t.Union[JSON, INode[JSON, SUB_JSON, JSON]]: + ) -> JSON | INode[JSON, SUB_JSON, JSON]: if get_node: return self @@ -106,11 +106,11 @@ def _expand_get( def _get( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, formatted: bool = True, get_node: bool = False, - ) -> t.Union[JSON, INode[JSON, SUB_JSON, JSON]]: + ) -> JSON | INode[JSON, SUB_JSON, JSON]: if url and url != [""]: return self._forward_get(url, depth, formatted, get_node) else: @@ -119,7 +119,7 @@ def _get( @override def get( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, expanded: bool = False, formatted: bool = True, @@ -131,7 +131,7 @@ def get( @override def get_node( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, ) -> INode[JSON, SUB_JSON, JSON]: output = self._get(url=url, get_node=True) assert isinstance(output, INode) @@ -141,7 +141,7 @@ def get_node( def save( self, data: SUB_JSON, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, ) -> None: self._assert_not_in_zipped_file() children = self.build() @@ -157,7 +157,7 @@ def save( children[key].save(data[key]) @override - def delete(self, url: t.Optional[t.List[str]] = None) -> None: + def delete(self, url: Optional[List[str]] = None) -> None: if url and url != [""]: children = self.build() names, sub_url = self.extract_child(children, url) @@ -170,16 +170,16 @@ def delete(self, url: t.Optional[t.List[str]] = None) -> None: def check_errors( self, data: JSON, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, raising: bool = False, - ) -> t.List[str]: + ) -> List[str]: children = self.build() if url and url != [""]: (name,), sub_url = self.extract_child(children, url) return children[name].check_errors(data, sub_url, raising) else: - errors: t.List[str] = [] + errors: List[str] = [] for key in data: if key not in children: msg = f"key={key} not in {list(children.keys())} for {self.__class__.__name__}" @@ -200,7 +200,7 @@ def denormalize(self) -> None: for child in self.build().values(): child.denormalize() - def extract_child(self, children: TREE, url: t.List[str]) -> t.Tuple[t.List[str], t.List[str]]: + def extract_child(self, children: TREE, url: List[str]) -> Tuple[List[str], List[str]]: names, sub_url = url[0].split(","), url[1:] names = ( list( diff --git a/antarest/study/storage/rawstudy/model/filesystem/ini_file_node.py b/antarest/study/storage/rawstudy/model/filesystem/ini_file_node.py index 960ccc83e4..0dca18383c 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/ini_file_node.py +++ b/antarest/study/storage/rawstudy/model/filesystem/ini_file_node.py @@ -16,9 +16,9 @@ import logging import os import tempfile -import typing as t import zipfile from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, cast import py7zr import pydantic_core @@ -51,7 +51,7 @@ def __init__(self, config: FileStudyTreeConfig, message: str) -> None: super().__init__(f"INI File error '{relpath}': {message}") -def log_warning(f: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: +def log_warning(f: Callable[..., Any]) -> Callable[..., Any]: """ Decorator to suppress `UserWarning` exceptions by logging them as warnings. @@ -63,7 +63,7 @@ def log_warning(f: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: """ @functools.wraps(f) - def wrapper(*args: t.Any, **kwargs: t.Any) -> t.Any: + def wrapper(*args: Any, **kwargs: Any) -> Any: try: return f(*args, **kwargs) except UserWarning as w: @@ -78,9 +78,9 @@ def __init__( self, context: ContextServer, config: FileStudyTreeConfig, - types: t.Optional[t.Dict[str, t.Any]] = None, - reader: t.Optional[IReader] = None, - writer: t.Optional[IniWriter] = None, + types: Optional[Dict[str, Any]] = None, + reader: Optional[IReader] = None, + writer: Optional[IniWriter] = None, ): super().__init__(config) self.context = context @@ -91,11 +91,11 @@ def __init__( def _get( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, expanded: bool = False, get_node: bool = False, - ) -> t.Union[SUB_JSON, INode[SUB_JSON, SUB_JSON, JSON]]: + ) -> SUB_JSON | INode[SUB_JSON, SUB_JSON, JSON]: if get_node: return self @@ -124,10 +124,10 @@ def _get( data = self.reader.read(self.path, **kwargs) data = self._handle_urls(data, depth, url) - return t.cast(SUB_JSON, data) + return cast(SUB_JSON, data) @staticmethod - def _handle_urls(data: t.Dict[str, t.Any], depth: int, url: t.List[str]) -> t.Dict[str, t.Any]: + def _handle_urls(data: Dict[str, Any], depth: int, url: List[str]) -> Dict[str, Any]: if len(url) == 2: if url[0] in data and url[1] in data[url[0]]: data = data[url[0]][url[1]] @@ -148,7 +148,7 @@ def _handle_urls(data: t.Dict[str, t.Any], depth: int, url: t.List[str]) -> t.Di return data # noinspection PyMethodMayBeStatic - def _get_filtering_kwargs(self, url: t.List[str]) -> t.Dict[str, str]: + def _get_filtering_kwargs(self, url: List[str]) -> Dict[str, str]: """ Extracts the filtering arguments from the URL components. @@ -172,7 +172,7 @@ def _get_filtering_kwargs(self, url: t.List[str]) -> t.Dict[str, str]: @override def get( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, expanded: bool = False, formatted: bool = True, @@ -184,14 +184,14 @@ def get( @override def get_node( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, ) -> INode[SUB_JSON, SUB_JSON, JSON]: output = self._get(url, get_node=True) assert isinstance(output, INode) return output @override - def save(self, data: SUB_JSON, url: t.Optional[t.List[str]] = None) -> None: + def save(self, data: SUB_JSON, url: Optional[List[str]] = None) -> None: self._assert_not_in_zipped_file() url = url or [] with FileLock( @@ -212,12 +212,12 @@ def save(self, data: SUB_JSON, url: t.Optional[t.List[str]] = None) -> None: elif len(url) == 1: info[url[0]] = obj else: - info = t.cast(JSON, obj) + info = cast(JSON, obj) self.writer.write(info, self.path) @log_warning @override - def delete(self, url: t.Optional[t.List[str]] = None) -> None: + def delete(self, url: Optional[List[str]] = None) -> None: """ Deletes the specified section or key from the INI file, or the entire INI file if no URL is provided. @@ -283,9 +283,9 @@ def delete(self, url: t.Optional[t.List[str]] = None) -> None: def check_errors( self, data: JSON, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, raising: bool = False, - ) -> t.List[str]: + ) -> List[str]: errors = [] for section, params in self.types.items(): if section not in data: @@ -309,9 +309,9 @@ def denormalize(self) -> None: def _validate_param( self, section: str, - params: t.Any, + params: Any, data: JSON, - errors: t.List[str], + errors: List[str], raising: bool, ) -> None: for param, typing in params.items(): diff --git a/antarest/study/storage/rawstudy/model/filesystem/json_file_node.py b/antarest/study/storage/rawstudy/model/filesystem/json_file_node.py index 0ff675c650..6f0a19b2b3 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/json_file_node.py +++ b/antarest/study/storage/rawstudy/model/filesystem/json_file_node.py @@ -11,8 +11,8 @@ # This file is part of the Antares project. import json -import typing as t from pathlib import Path +from typing import Any, Dict, Optional, cast from typing_extensions import override @@ -31,8 +31,8 @@ class JsonReader(IReader): """ @override - def read(self, path: t.Any, **kwargs: t.Any) -> JSON: - content: t.Union[str, bytes] + def read(self, path: Any, **kwargs: Any) -> JSON: + content: str | bytes if isinstance(path, (Path, str)): try: @@ -51,7 +51,7 @@ def read(self, path: t.Any, **kwargs: t.Any) -> JSON: raise TypeError(repr(type(path))) try: - return t.cast(JSON, from_json(content)) + return cast(JSON, from_json(content)) except json.JSONDecodeError as exc: err_msg = f"Failed to parse JSON file '{path}'" raise ValueError(err_msg) from exc @@ -73,6 +73,6 @@ def __init__( self, context: ContextServer, config: FileStudyTreeConfig, - types: t.Optional[t.Dict[str, t.Any]] = None, + types: Optional[Dict[str, Any]] = None, ) -> None: super().__init__(context, config, types, JsonReader(), JsonWriter()) diff --git a/antarest/study/storage/rawstudy/model/filesystem/lazy_node.py b/antarest/study/storage/rawstudy/model/filesystem/lazy_node.py index 2a9204d71e..ac7d19856e 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/lazy_node.py +++ b/antarest/study/storage/rawstudy/model/filesystem/lazy_node.py @@ -9,11 +9,11 @@ # SPDX-License-Identifier: MPL-2.0 # # This file is part of the Antares project. -import typing as t from abc import ABC, abstractmethod from dataclasses import dataclass from datetime import datetime, timedelta from pathlib import Path +from typing import Any, Dict, Generic, List, Optional, Tuple, cast from zipfile import ZipFile from typing_extensions import override @@ -25,16 +25,16 @@ @dataclass class SimpleCache: - value: t.Any + value: Any expiration_date: datetime -class LazyNode(INode, ABC, t.Generic[G, S, V]): # type: ignore +class LazyNode(INode, ABC, Generic[G, S, V]): # type: ignore """ Abstract left with implemented a lazy loading for its daughter implementation. """ - ZIP_FILELIST_CACHE: t.Dict[str, SimpleCache] = {} + ZIP_FILELIST_CACHE: Dict[str, SimpleCache] = {} def __init__( self, @@ -46,7 +46,7 @@ def __init__( def _get_real_file_path( self, - ) -> t.Tuple[Path, t.Any]: + ) -> Tuple[Path, Any]: tmp_dir = None if self.config.archive_path: path, tmp_dir = self._extract_file_to_tmp_dir(self.config.archive_path) @@ -71,12 +71,12 @@ def file_exists(self) -> bool: def _get( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, expanded: bool = False, formatted: bool = True, get_node: bool = False, - ) -> t.Union[t.Union[str, G], INode[G, S, V]]: + ) -> str | G | INode[G, S, V]: self._assert_url_end(url) if get_node: @@ -87,7 +87,7 @@ def _get( if expanded: return link else: - return t.cast(G, self.context.resolver.resolve(link, formatted)) + return cast(G, self.context.resolver.resolve(link, formatted)) if expanded: return self.get_lazy_content() @@ -97,11 +97,11 @@ def _get( @override def get( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, expanded: bool = False, formatted: bool = True, - ) -> t.Union[str, G]: + ) -> str | G: output = self._get(url, depth, expanded, formatted, get_node=False) assert not isinstance(output, INode) return output @@ -109,14 +109,14 @@ def get( @override def get_node( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, ) -> INode[G, S, V]: output = self._get(url, get_node=True) assert isinstance(output, INode) return output @override - def delete(self, url: t.Optional[t.List[str]] = None) -> None: + def delete(self, url: Optional[List[str]] = None) -> None: self._assert_url_end(url) if self.get_link_path().exists(): self.get_link_path().unlink() @@ -128,7 +128,7 @@ def get_link_path(self) -> Path: return path @override - def save(self, data: t.Union[str, bytes, S], url: t.Optional[t.List[str]] = None) -> None: + def save(self, data: str | bytes | S, url: Optional[List[str]] = None) -> None: self._assert_not_in_zipped_file() self._assert_url_end(url) @@ -138,14 +138,14 @@ def save(self, data: t.Union[str, bytes, S], url: t.Optional[t.List[str]] = None self.config.path.unlink() return None - self.dump(t.cast(S, data), url) + self.dump(cast(S, data), url) if self.get_link_path().exists(): self.get_link_path().unlink() return None def get_lazy_content( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, expanded: bool = False, ) -> str: @@ -154,7 +154,7 @@ def get_lazy_content( @abstractmethod def load( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, expanded: bool = False, formatted: bool = True, @@ -174,7 +174,7 @@ def load( raise NotImplementedError() @abstractmethod - def dump(self, data: S, url: t.Optional[t.List[str]] = None) -> None: + def dump(self, data: S, url: Optional[List[str]] = None) -> None: """ Store data on tree. diff --git a/antarest/study/storage/rawstudy/model/filesystem/matrix/input_series_matrix.py b/antarest/study/storage/rawstudy/model/filesystem/matrix/input_series_matrix.py index 6ead440945..5bf7c25598 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/matrix/input_series_matrix.py +++ b/antarest/study/storage/rawstudy/model/filesystem/matrix/input_series_matrix.py @@ -12,8 +12,8 @@ import io import logging import shutil -import typing as t from pathlib import Path +from typing import List, Optional, cast import numpy as np import pandas as pd @@ -43,8 +43,8 @@ def __init__( context: ContextServer, config: FileStudyTreeConfig, freq: MatrixFrequency = MatrixFrequency.HOURLY, - nb_columns: t.Optional[int] = None, - default_empty: t.Optional[npt.NDArray[np.float64]] = None, + nb_columns: Optional[int] = None, + default_empty: Optional[npt.NDArray[np.float64]] = None, ): super().__init__(context=context, config=config, freq=freq) self.nb_columns = nb_columns @@ -55,7 +55,7 @@ def __init__( self.default_empty = np.copy(default_empty) self.default_empty.flags.writeable = True - def parse_as_dataframe(self, file_path: t.Optional[Path] = None) -> pd.DataFrame: + def parse_as_dataframe(self, file_path: Optional[Path] = None) -> pd.DataFrame: file_path = file_path or self.config.path try: stopwatch = StopWatch() @@ -63,7 +63,7 @@ def parse_as_dataframe(self, file_path: t.Optional[Path] = None) -> pd.DataFrame if link_path.exists(): link = link_path.read_text() matrix_json = self.context.resolver.resolve(link) - matrix_json = t.cast(JSON, matrix_json) + matrix_json = cast(JSON, matrix_json) matrix: pd.DataFrame = pd.DataFrame(**matrix_json) else: try: @@ -91,10 +91,10 @@ def parse_as_dataframe(self, file_path: t.Optional[Path] = None) -> pd.DataFrame return final_matrix @override - def parse_as_json(self, file_path: t.Optional[Path] = None) -> JSON: + def parse_as_json(self, file_path: Optional[Path] = None) -> JSON: df = self.parse_as_dataframe(file_path) stopwatch = StopWatch() - data = t.cast(JSON, df.to_dict(orient="split")) + data = cast(JSON, df.to_dict(orient="split")) stopwatch.log_elapsed(lambda x: logger.info(f"Matrix to dict in {x}s")) return data @@ -102,9 +102,9 @@ def parse_as_json(self, file_path: t.Optional[Path] = None) -> JSON: def check_errors( self, data: JSON, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, raising: bool = False, - ) -> t.List[str]: + ) -> List[str]: self._assert_url_end(url) errors = [] @@ -152,5 +152,5 @@ def get_file_content(self) -> OriginalFile: return OriginalFile(content=content, suffix=suffix, filename=filename) @override - def get_default_empty_matrix(self) -> t.Optional[npt.NDArray[np.float64]]: + def get_default_empty_matrix(self) -> Optional[npt.NDArray[np.float64]]: return self.default_empty diff --git a/antarest/study/storage/rawstudy/model/filesystem/root/input/areas/list.py b/antarest/study/storage/rawstudy/model/filesystem/root/input/areas/list.py index 2b873991c1..4ed88960a6 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/root/input/areas/list.py +++ b/antarest/study/storage/rawstudy/model/filesystem/root/input/areas/list.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import List, Optional from typing_extensions import override @@ -22,7 +22,7 @@ AREAS_LIST_RELATIVE_PATH = "input/areas/list.txt" -class InputAreasList(INode[t.List[str], t.List[str], t.List[str]]): +class InputAreasList(INode[List[str], List[str], List[str]]): @override def normalize(self) -> None: pass # no external store in this node @@ -38,21 +38,21 @@ def __init__(self, context: ContextServer, config: FileStudyTreeConfig): @override def get_node( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, expanded: bool = False, formatted: bool = True, - ) -> INode[t.List[str], t.List[str], t.List[str]]: + ) -> INode[List[str], List[str], List[str]]: return self @override def get( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, expanded: bool = False, formatted: bool = True, - ) -> t.List[str]: + ) -> List[str]: if self.config.archive_path: lines = extract_lines_from_archive(self.config.archive_path, AREAS_LIST_RELATIVE_PATH) else: @@ -60,22 +60,22 @@ def get( return [l.strip() for l in lines if l.strip()] @override - def save(self, data: t.List[str], url: t.Optional[t.List[str]] = None) -> None: + def save(self, data: List[str], url: Optional[List[str]] = None) -> None: self._assert_not_in_zipped_file() self.config.path.write_text("\n".join(data)) @override - def delete(self, url: t.Optional[t.List[str]] = None) -> None: + def delete(self, url: Optional[List[str]] = None) -> None: if self.config.path.exists(): self.config.path.unlink() @override def check_errors( self, - data: t.List[str], - url: t.Optional[t.List[str]] = None, + data: List[str], + url: Optional[List[str]] = None, raising: bool = False, - ) -> t.List[str]: + ) -> List[str]: errors = [] if any(a not in data for a in [area.name for area in self.config.areas.values()]): errors.append(f"list.txt should have {self.config.area_names()} nodes but given {data}") diff --git a/antarest/study/storage/rawstudy/model/filesystem/root/input/hydro/hydro_ini.py b/antarest/study/storage/rawstudy/model/filesystem/root/input/hydro/hydro_ini.py index 02b5c7d62c..55697312af 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/root/input/hydro/hydro_ini.py +++ b/antarest/study/storage/rawstudy/model/filesystem/root/input/hydro/hydro_ini.py @@ -10,8 +10,6 @@ # # This file is part of the Antares project. -from antares.study.version import StudyVersion - from antarest.study.model import STUDY_VERSION_6_5 from antarest.study.storage.rawstudy.model.filesystem.config.model import FileStudyTreeConfig from antarest.study.storage.rawstudy.model.filesystem.context import ContextServer diff --git a/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/common/links.py b/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/common/links.py index 30f06e449c..5457e646f5 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/common/links.py +++ b/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/common/links.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Dict, List from typing_extensions import override @@ -29,7 +29,7 @@ def __init__( context: ContextServer, config: FileStudyTreeConfig, area_from: str, - link_names: t.List[str], + link_names: List[str], ): super().__init__(context, config) self.area_from = area_from @@ -58,7 +58,7 @@ def __init__( def build(self) -> TREE: children: TREE = {} links = [d.stem for d in self.config.path.iterdir()] - areas: t.Dict[str, t.List[str]] = {} + areas: Dict[str, List[str]] = {} for link in links: areas.setdefault(link.split(" - ")[0], []).append(link) for area_from, link_names in areas.items(): diff --git a/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/mcall/digest.py b/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/mcall/digest.py index 27f8f64f3f..561c67b710 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/mcall/digest.py +++ b/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/mcall/digest.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import List, Optional, cast import pandas as pd from pydantic import Field @@ -24,8 +24,8 @@ class DigestMatrixUI(AntaresBaseModel): - columns: t.List[t.Union[str, t.List[str]]] - data: t.List[t.List[str]] + columns: List[str | List[str]] + data: List[List[str]] grouped_columns: bool = Field(alias="groupedColumns") @@ -88,7 +88,7 @@ def __init__(self, context: ContextServer, config: FileStudyTreeConfig): @override def load( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, expanded: bool = False, formatted: bool = True, @@ -97,7 +97,7 @@ def load( output = df.to_dict(orient="split") del output["index"] - return t.cast(JSON, output) + return cast(JSON, output) def get_ui(self) -> DigestUI: """ diff --git a/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/mcall/grid.py b/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/mcall/grid.py index 9e826e747f..df4af73777 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/mcall/grid.py +++ b/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/mcall/grid.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import List, Optional, cast import pandas as pd from typing_extensions import override @@ -43,7 +43,7 @@ def __init__(self, context: ContextServer, config: FileStudyTreeConfig): @override def get_lazy_content( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, expanded: bool = False, ) -> str: @@ -52,7 +52,7 @@ def get_lazy_content( @override def load( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, expanded: bool = False, formatted: bool = True, @@ -62,14 +62,14 @@ def load( df.fillna("", inplace=True) # replace NaN values for the front-end output = df.to_dict(orient="split") del output["index"] - return t.cast(JSON, output) + return cast(JSON, output) @override - def dump(self, data: bytes, url: t.Optional[t.List[str]] = None) -> None: + def dump(self, data: bytes, url: Optional[List[str]] = None) -> None: raise MustNotModifyOutputException(self.config.path.name) @override - def check_errors(self, data: str, url: t.Optional[t.List[str]] = None, raising: bool = False) -> t.List[str]: + def check_errors(self, data: str, url: Optional[List[str]] = None, raising: bool = False) -> List[str]: if not self.config.path.exists(): msg = f"{self.config.path} not exist" if raising: diff --git a/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/mcall/synthesis.py b/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/mcall/synthesis.py index 9bdc2076e1..4b58af3c18 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/mcall/synthesis.py +++ b/antarest/study/storage/rawstudy/model/filesystem/root/output/simulation/mode/mcall/synthesis.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import List, Optional, cast import pandas as pd from typing_extensions import override @@ -29,7 +29,7 @@ def __init__(self, context: ContextServer, config: FileStudyTreeConfig): @override def get_lazy_content( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, expanded: bool = False, ) -> str: @@ -38,7 +38,7 @@ def get_lazy_content( @override def load( self, - url: t.Optional[t.List[str]] = None, + url: Optional[List[str]] = None, depth: int = -1, expanded: bool = False, formatted: bool = True, @@ -48,14 +48,14 @@ def load( df.fillna("", inplace=True) # replace NaN values for the front-end output = df.to_dict(orient="split") del output["index"] - return t.cast(JSON, output) + return cast(JSON, output) @override - def dump(self, data: bytes, url: t.Optional[t.List[str]] = None) -> None: + def dump(self, data: bytes, url: Optional[List[str]] = None) -> None: raise MustNotModifyOutputException(self.config.path.name) @override - def check_errors(self, data: str, url: t.Optional[t.List[str]] = None, raising: bool = False) -> t.List[str]: + def check_errors(self, data: str, url: Optional[List[str]] = None, raising: bool = False) -> List[str]: if not self.config.path.exists(): msg = f"{self.config.path} not exist" if raising: diff --git a/antarest/study/storage/rawstudy/model/filesystem/root/settings/scenariobuilder.py b/antarest/study/storage/rawstudy/model/filesystem/root/settings/scenariobuilder.py index d384433216..d3288e9f6d 100644 --- a/antarest/study/storage/rawstudy/model/filesystem/root/settings/scenariobuilder.py +++ b/antarest/study/storage/rawstudy/model/filesystem/root/settings/scenariobuilder.py @@ -11,7 +11,7 @@ # This file is part of the Antares project. import re -import typing as t +from typing import Dict, List, MutableMapping, Type import typing_extensions as te from typing_extensions import override @@ -29,7 +29,7 @@ _TSNumber: te.TypeAlias = int _HydroLevel: te.TypeAlias = float -_Rules = t.MutableMapping[str, t.Union[t.Type[_TSNumber], t.Type[_HydroLevel]]] +_Rules = MutableMapping[str, Type[_TSNumber] | Type[_HydroLevel]] class ScenarioBuilder(IniFileNode): @@ -126,7 +126,7 @@ def _populate_hydro_generation_power_rules(self, rules: _Rules) -> None: rules[f"hgp,{area_id},0"] = _TSNumber @override - def _get_filtering_kwargs(self, url: t.List[str]) -> t.Dict[str, str]: + def _get_filtering_kwargs(self, url: List[str]) -> Dict[str, str]: # If the URL contains 2 elements, we can filter the options based on the generator type. if len(url) == 2: section, symbol = url diff --git a/antarest/study/storage/rawstudy/raw_study_service.py b/antarest/study/storage/rawstudy/raw_study_service.py index 4d2a027965..be68c08346 100644 --- a/antarest/study/storage/rawstudy/raw_study_service.py +++ b/antarest/study/storage/rawstudy/raw_study_service.py @@ -13,10 +13,10 @@ import logging import shutil import time -import typing as t from datetime import datetime from pathlib import Path from threading import Thread +from typing import BinaryIO, List, Optional, Sequence from uuid import uuid4 from antares.study.version import StudyVersion @@ -76,7 +76,7 @@ def __init__( self.cleanup_thread.start() def update_from_raw_meta( - self, metadata: RawStudy, fallback_on_default: t.Optional[bool] = False, study_path: t.Optional[Path] = None + self, metadata: RawStudy, fallback_on_default: Optional[bool] = False, study_path: Optional[Path] = None ) -> None: """ Update metadata from study raw metadata @@ -165,7 +165,7 @@ def get_raw( self, metadata: RawStudy, use_cache: bool = True, - output_dir: t.Optional[Path] = None, + output_dir: Optional[Path] = None, ) -> FileStudy: """ Fetch a study object and its config @@ -181,7 +181,7 @@ def get_raw( return self.study_factory.create_from_fs(study_path, metadata.id, output_dir, use_cache=use_cache) @override - def get_synthesis(self, metadata: RawStudy, params: t.Optional[RequestParameters] = None) -> FileStudyTreeConfigDTO: + def get_synthesis(self, metadata: RawStudy, params: Optional[RequestParameters] = None) -> FileStudyTreeConfigDTO: self._check_study_exists(metadata) study_path = self.get_study_path(metadata) study = self.study_factory.create_from_fs(study_path, metadata.id) @@ -226,7 +226,7 @@ def copy( self, src_meta: RawStudy, dest_name: str, - groups: t.Sequence[str], + groups: Sequence[str], with_outputs: bool = False, ) -> RawStudy: """ @@ -317,7 +317,7 @@ def delete_output(self, metadata: RawStudy, output_name: str) -> None: output_path.unlink(missing_ok=True) remove_from_cache(self.cache, metadata.id) - def import_study(self, metadata: RawStudy, stream: t.BinaryIO) -> Study: + def import_study(self, metadata: RawStudy, stream: BinaryIO) -> Study: """ Import study in the directory of the study. @@ -352,7 +352,7 @@ def export_study_flat( metadata: RawStudy, dst_path: Path, outputs: bool = True, - output_list_filter: t.Optional[t.List[str]] = None, + output_list_filter: Optional[List[str]] = None, denormalize: bool = True, ) -> None: try: @@ -375,7 +375,7 @@ def export_study_flat( def check_errors( self, metadata: RawStudy, - ) -> t.List[str]: + ) -> List[str]: """ Check study antares data integrity Args: diff --git a/antarest/study/storage/rawstudy/watcher.py b/antarest/study/storage/rawstudy/watcher.py index 40a08f47a4..5a654948f5 100644 --- a/antarest/study/storage/rawstudy/watcher.py +++ b/antarest/study/storage/rawstudy/watcher.py @@ -11,7 +11,6 @@ # This file is part of the Antares project. import logging -import re import tempfile from html import escape from pathlib import Path diff --git a/antarest/study/storage/utils.py b/antarest/study/storage/utils.py index e8336cc1d1..5774271908 100644 --- a/antarest/study/storage/utils.py +++ b/antarest/study/storage/utils.py @@ -18,9 +18,9 @@ import shutil import tempfile import time -import typing as t from datetime import datetime, timedelta from pathlib import Path +from typing import Callable, List, Optional, Sequence, cast from uuid import uuid4 from zipfile import ZipFile @@ -138,7 +138,7 @@ def is_output_archived(path_output: Path) -> bool: return any((path_output.parent / (path_output.name + suffix)).exists() for suffix in suffixes) -def extract_output_name(path_output: Path, new_suffix_name: t.Optional[str] = None) -> str: +def extract_output_name(path_output: Path, new_suffix_name: Optional[str] = None) -> str: ini_reader = IniReader() archived = is_output_archived(path_output) if archived: @@ -186,7 +186,7 @@ def remove_from_cache(cache: ICache, root_id: str) -> None: def create_new_empty_study(version: StudyVersion, path_study: Path, path_resources: Path) -> None: - version_template: t.Optional[str] = STUDY_REFERENCE_TEMPLATES.get(version, None) + version_template: Optional[str] = STUDY_REFERENCE_TEMPLATES.get(version, None) if version_template is None: msg = f"{version} is not a supported version, supported versions are: {list(STUDY_REFERENCE_TEMPLATES.keys())}" raise UnsupportedStudyVersion(msg) @@ -198,8 +198,8 @@ def create_new_empty_study(version: StudyVersion, path_study: Path, path_resourc def study_matcher( - name: t.Optional[str], workspace: t.Optional[str], folder: t.Optional[str] -) -> t.Callable[[StudyMetadataDTO], bool]: + name: Optional[str], workspace: Optional[str], folder: Optional[str] +) -> Callable[[StudyMetadataDTO], bool]: def study_match(study: StudyMetadataDTO) -> bool: if name and not study.name.startswith(name): return False @@ -213,8 +213,8 @@ def study_match(study: StudyMetadataDTO) -> bool: def assert_permission_on_studies( - user: t.Optional[JWTUser], - studies: t.Sequence[t.Union[Study, StudyMetadataDTO]], + user: Optional[JWTUser], + studies: Sequence[Study | StudyMetadataDTO], permission_type: StudyPermissionType, *, raising: bool = True, @@ -253,8 +253,8 @@ def assert_permission_on_studies( def assert_permission( - user: t.Optional[JWTUser], - study: t.Optional[t.Union[Study, StudyMetadataDTO]], + user: Optional[JWTUser], + study: Optional[Study | StudyMetadataDTO], permission_type: StudyPermissionType, raising: bool = True, ) -> bool: @@ -300,7 +300,7 @@ def assert_permission( def get_start_date( file_study: FileStudy, - output_id: t.Optional[str] = None, + output_id: Optional[str] = None, level: StudyDownloadLevelDTO = StudyDownloadLevelDTO.HOURLY, ) -> MatrixIndex: """ @@ -313,12 +313,12 @@ def get_start_date( """ config = FileStudyHelpers.get_config(file_study, output_id)["general"] - starting_month = t.cast(str, config.get("first-month-in-year")) - starting_day = t.cast(str, config.get("january.1st")) - leapyear = t.cast(bool, config.get("leapyear")) - first_week_day = t.cast(str, config.get("first.weekday")) - start_offset = t.cast(int, config.get("simulation.start")) - end = t.cast(int, config.get("simulation.end")) + starting_month = cast(str, config.get("first-month-in-year")) + starting_day = cast(str, config.get("january.1st")) + leapyear = cast(bool, config.get("leapyear")) + first_week_day = cast(str, config.get("first.weekday")) + start_offset = cast(int, config.get("simulation.start")) + end = cast(int, config.get("simulation.end")) starting_month_index = MONTHS[starting_month.title()] starting_day_index = DAY_NAMES.index(starting_day.title()) @@ -334,7 +334,7 @@ def get_start_date( start_date = datetime(target_year, starting_month_index, 1) + start_offset_days def _get_steps( - daily_steps: int, temporality: StudyDownloadLevelDTO, begin_date: datetime, is_output: t.Optional[str] = None + daily_steps: int, temporality: StudyDownloadLevelDTO, begin_date: datetime, is_output: Optional[str] = None ) -> int: temporality_mapping = { StudyDownloadLevelDTO.DAILY: daily_steps, @@ -375,16 +375,16 @@ def export_study_flat( dest: Path, study_factory: StudyFactory, outputs: bool = True, - output_list_filter: t.Optional[t.List[str]] = None, + output_list_filter: Optional[List[str]] = None, denormalize: bool = True, - output_src_path: t.Optional[Path] = None, + output_src_path: Optional[Path] = None, ) -> None: start_time = time.time() output_src_path = output_src_path or study_dir / "output" output_dest_path = dest / "output" - def ignore_outputs(directory: str, _: t.Sequence[str]) -> t.Sequence[str]: + def ignore_outputs(directory: str, _: Sequence[str]) -> Sequence[str]: return ["output"] if str(directory) == str(study_dir) else [] shutil.copytree(src=study_dir, dst=dest, ignore=ignore_outputs) @@ -477,7 +477,7 @@ def is_ts_gen_tmp_dir(path: Path) -> bool: return path.name.startswith(TS_GEN_PREFIX) and "".join(path.suffixes[-2:]) == TS_GEN_SUFFIX and path.is_dir() -def should_ignore_folder_for_scan(path: Path, filter_in: t.List[str], filter_out: t.List[str]) -> bool: +def should_ignore_folder_for_scan(path: Path, filter_in: List[str], filter_out: List[str]) -> bool: if is_aw_no_scan(path): logger.info(f"No scan directive file found. Will skip further scan of folder {path}") return True @@ -497,11 +497,11 @@ def should_ignore_folder_for_scan(path: Path, filter_in: t.List[str], filter_out ) -def has_non_study_folder(path: Path, filter_in: t.List[str], filter_out: t.List[str]) -> bool: +def has_non_study_folder(path: Path, filter_in: List[str], filter_out: List[str]) -> bool: return any(is_non_study_folder(sub_path, filter_in, filter_out) for sub_path in path.iterdir()) -def is_non_study_folder(path: Path, filter_in: t.List[str], filter_out: t.List[str]) -> bool: +def is_non_study_folder(path: Path, filter_in: List[str], filter_out: List[str]) -> bool: if is_study_folder(path): return False if should_ignore_folder_for_scan(path, filter_in, filter_out): diff --git a/antarest/study/storage/variantstudy/business/utils.py b/antarest/study/storage/variantstudy/business/utils.py index 84fb848056..d94f6fe0c5 100644 --- a/antarest/study/storage/variantstudy/business/utils.py +++ b/antarest/study/storage/variantstudy/business/utils.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, List, Optional, Sequence from antarest.core.model import JSON from antarest.matrixstore.model import MatrixData @@ -22,7 +22,7 @@ from antarest.study.storage.variantstudy.model.model import CommandDTO -def validate_matrix(matrix: t.Union[t.List[t.List[MatrixData]], str], values: t.Dict[str, t.Any]) -> str: +def validate_matrix(matrix: List[List[MatrixData]] | str, values: Dict[str, Any]) -> str: """ Validates the matrix, stores the matrix array in the matrices repository, and returns a reference to the stored array. @@ -75,7 +75,7 @@ def remove_none_args(command_dto: CommandDTO) -> CommandDTO: return command_dto -def strip_matrix_protocol(matrix_uri: t.Union[t.List[t.List[float]], str, None]) -> str: +def strip_matrix_protocol(matrix_uri: List[List[float]] | str | None) -> str: assert isinstance(matrix_uri, str) if matrix_uri.startswith(MATRIX_PROTOCOL_PREFIX): return matrix_uri[len(MATRIX_PROTOCOL_PREFIX) :] @@ -102,13 +102,13 @@ def decode(alias: str, study: FileStudy) -> str: def transform_command_to_dto( - commands: t.Sequence[ICommand], - ref_commands: t.Optional[t.Sequence[CommandDTO]] = None, + commands: Sequence[ICommand], + ref_commands: Optional[Sequence[CommandDTO]] = None, force_aggregate: bool = False, -) -> t.List[CommandDTO]: +) -> List[CommandDTO]: if len(commands) <= 1: return [command.to_dto() for command in commands] - commands_dto: t.List[CommandDTO] = [] + commands_dto: List[CommandDTO] = [] ref_commands_dto = ref_commands if ref_commands is not None else [command.to_dto() for command in commands] prev_command = commands[0] cur_dto_index = 0 diff --git a/antarest/study/storage/variantstudy/business/utils_binding_constraint.py b/antarest/study/storage/variantstudy/business/utils_binding_constraint.py index 399df46636..ab93f54d46 100644 --- a/antarest/study/storage/variantstudy/business/utils_binding_constraint.py +++ b/antarest/study/storage/variantstudy/business/utils_binding_constraint.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Literal, Mapping, Sequence from antarest.study.storage.rawstudy.model.filesystem.config.binding_constraint import ( BindingConstraintFrequency, @@ -22,7 +22,7 @@ def parse_bindings_coeffs_and_save_into_config( bd_id: str, study_data_config: FileStudyTreeConfig, - coeffs: t.Mapping[str, t.Union[t.Literal["hourly", "daily", "weekly"], t.Sequence[float]]], + coeffs: Mapping[str, Literal["hourly", "daily", "weekly"] | Sequence[float]], operator: BindingConstraintOperator, time_step: BindingConstraintFrequency, group: str, diff --git a/antarest/study/storage/variantstudy/command_factory.py b/antarest/study/storage/variantstudy/command_factory.py index 94df3e3c49..4be8736dab 100644 --- a/antarest/study/storage/variantstudy/command_factory.py +++ b/antarest/study/storage/variantstudy/command_factory.py @@ -11,7 +11,7 @@ # This file is part of the Antares project. import copy -import typing as t +from typing import List, Optional from antares.study.version import StudyVersion @@ -106,7 +106,7 @@ def __init__( ) def _to_single_command( - self, action: str, args: JSON, version: int, study_version: StudyVersion, command_id: t.Optional[str] + self, action: str, args: JSON, version: int, study_version: StudyVersion, command_id: Optional[str] ) -> ICommand: """Convert a single CommandDTO to ICommand.""" if action in COMMAND_MAPPING: @@ -120,7 +120,7 @@ def _to_single_command( ) raise NotImplementedError(action) - def to_command(self, command_dto: CommandDTO) -> t.List[ICommand]: + def to_command(self, command_dto: CommandDTO) -> List[ICommand]: """ Convert a CommandDTO to a list of ICommand. @@ -156,7 +156,7 @@ def to_command(self, command_dto: CommandDTO) -> t.List[ICommand]: ] raise NotImplementedError() - def to_commands(self, cmd_dto_list: t.List[CommandDTO]) -> t.List[ICommand]: + def to_commands(self, cmd_dto_list: List[CommandDTO]) -> List[ICommand]: """ Convert a list of CommandDTO to a list of ICommand. diff --git a/antarest/study/storage/variantstudy/model/command/binding_constraint_utils.py b/antarest/study/storage/variantstudy/model/command/binding_constraint_utils.py index 45d40d5bc5..935ed43278 100644 --- a/antarest/study/storage/variantstudy/model/command/binding_constraint_utils.py +++ b/antarest/study/storage/variantstudy/model/command/binding_constraint_utils.py @@ -9,12 +9,12 @@ # SPDX-License-Identifier: MPL-2.0 # # This file is part of the Antares project. -import typing as t +from typing import Set from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy -def remove_bc_from_scenario_builder(study_data: FileStudy, removed_groups: t.Set[str]) -> None: +def remove_bc_from_scenario_builder(study_data: FileStudy, removed_groups: Set[str]) -> None: """ Update the scenario builder by removing the rows that correspond to the BC groups to remove. diff --git a/antarest/study/storage/variantstudy/model/command/common.py b/antarest/study/storage/variantstudy/model/command/common.py index 7516b84342..2641e47ffb 100644 --- a/antarest/study/storage/variantstudy/model/command/common.py +++ b/antarest/study/storage/variantstudy/model/command/common.py @@ -10,9 +10,9 @@ # # This file is part of the Antares project. -import typing as t from dataclasses import dataclass from enum import Enum +from typing import List from antarest.study.storage.rawstudy.model.filesystem.root.user.user import User @@ -59,5 +59,5 @@ class CommandName(Enum): REMOVE_USER_RESOURCE = "remove_user_resource" -def is_url_writeable(user_node: User, url: t.List[str]) -> bool: +def is_url_writeable(user_node: User, url: List[str]) -> bool: return url[0] not in [file.filename for file in user_node.registered_files] diff --git a/antarest/study/storage/variantstudy/model/command/create_area.py b/antarest/study/storage/variantstudy/model/command/create_area.py index eba7ac8567..c7b0f81497 100644 --- a/antarest/study/storage/variantstudy/model/command/create_area.py +++ b/antarest/study/storage/variantstudy/model/command/create_area.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, List, Optional, Tuple from pydantic import Field from typing_extensions import override @@ -25,7 +25,7 @@ ) from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput, FilteringOptions -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -77,10 +77,10 @@ class CreateArea(ICommand): # we choose to declare it as an empty dictionary. # fixme: remove this attribute in the next version if it is not used by the "Script R" team, # or if we don't want to support this feature. - metadata: t.Dict[str, str] = Field(default_factory=dict, description="Area metadata: country and tag list") + metadata: Dict[str, str] = Field(default_factory=dict, description="Area metadata: country and tag list") @override - def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: if self.command_context.generator_matrix_constants is None: raise ValueError() @@ -109,7 +109,7 @@ def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutpu ) @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: config = study_data.config output, data = self._apply_config(config) @@ -301,5 +301,5 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: return [] diff --git a/antarest/study/storage/variantstudy/model/command/create_binding_constraint.py b/antarest/study/storage/variantstudy/model/command/create_binding_constraint.py index 1a94a256be..d0edfd6d8b 100644 --- a/antarest/study/storage/variantstudy/model/command/create_binding_constraint.py +++ b/antarest/study/storage/variantstudy/model/command/create_binding_constraint.py @@ -10,9 +10,9 @@ # # This file is part of the Antares project. -import typing as t from abc import ABCMeta from enum import Enum +from typing import Any, Dict, List, Optional, Set, Tuple, Type import numpy as np from antares.study.version import StudyVersion @@ -40,11 +40,11 @@ ) from antarest.study.storage.variantstudy.model.command.binding_constraint_utils import remove_bc_from_scenario_builder from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO -MatrixType = t.List[t.List[MatrixData]] +MatrixType = List[List[MatrixData]] EXPECTED_MATRIX_SHAPES = { BindingConstraintFrequency.HOURLY: (8784, 3), @@ -103,7 +103,7 @@ class BindingConstraintPropertiesBase(AntaresBaseModel, extra="forbid", populate comments: str = "" @model_validator(mode="before") - def replace_with_alias(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]: + def replace_with_alias(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "type" in values: values["time_step"] = values.pop("type") return values @@ -114,7 +114,7 @@ class BindingConstraintProperties830(BindingConstraintPropertiesBase): filter_synthesis: str = Field("", alias="filter-synthesis") @field_validator("filter_synthesis", "filter_year_by_year", mode="before") - def _validate_filtering(cls, v: t.Any) -> str: + def _validate_filtering(cls, v: Any) -> str: return validate_filtering(v) @@ -122,14 +122,12 @@ class BindingConstraintProperties870(BindingConstraintProperties830): group: str = DEFAULT_GROUP -BindingConstraintProperties = t.Union[ - BindingConstraintPropertiesBase, - BindingConstraintProperties830, - BindingConstraintProperties870, -] +BindingConstraintProperties = ( + BindingConstraintPropertiesBase | BindingConstraintProperties830 | BindingConstraintProperties870 +) -def get_binding_constraint_config_cls(study_version: StudyVersion) -> t.Type[BindingConstraintProperties]: +def get_binding_constraint_config_cls(study_version: StudyVersion) -> Type[BindingConstraintProperties]: """ Retrieves the binding constraint configuration class based on the study version. """ @@ -141,7 +139,7 @@ def get_binding_constraint_config_cls(study_version: StudyVersion) -> t.Type[Bin return BindingConstraintPropertiesBase -def create_binding_constraint_config(study_version: StudyVersion, **kwargs: t.Any) -> BindingConstraintProperties: +def create_binding_constraint_config(study_version: StudyVersion, **kwargs: Any) -> BindingConstraintProperties: """ Factory method to create a binding constraint configuration model. @@ -173,27 +171,25 @@ class BindingConstraintMatrices(AntaresBaseModel, extra="forbid", populate_by_na Class used to store the matrices of a binding constraint. """ - values: t.Optional[t.Union[MatrixType, str]] = Field( + values: Optional[MatrixType | str] = Field( default=None, description="2nd member matrix for studies before v8.7", ) - less_term_matrix: t.Optional[t.Union[MatrixType, str]] = Field( + less_term_matrix: Optional[MatrixType | str] = Field( default=None, description="less term matrix for v8.7+ studies", ) - greater_term_matrix: t.Optional[t.Union[MatrixType, str]] = Field( + greater_term_matrix: Optional[MatrixType | str] = Field( default=None, description="greater term matrix for v8.7+ studies", ) - equal_term_matrix: t.Optional[t.Union[MatrixType, str]] = Field( + equal_term_matrix: Optional[MatrixType | str] = Field( default=None, description="equal term matrix for v8.7+ studies", ) @model_validator(mode="before") - def check_matrices( - cls, values: t.Dict[str, t.Optional[t.Union[MatrixType, str]]] - ) -> t.Dict[str, t.Optional[t.Union[MatrixType, str]]]: + def check_matrices(cls, values: Dict[str, Optional[MatrixType | str]]) -> Dict[str, Optional[MatrixType | str]]: values_matrix = values.get("values") or None less_term_matrix = values.get("less_term_matrix") or None greater_term_matrix = values.get("greater_term_matrix") or None @@ -217,7 +213,7 @@ class AbstractBindingConstraintCommand(OptionalProperties, BindingConstraintMatr Abstract class for binding constraint commands. """ - coeffs: t.Optional[t.Dict[str, t.List[float]]] = None + coeffs: Optional[Dict[str, List[float]]] = None @override def to_dto(self) -> CommandDTO: @@ -248,7 +244,7 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: matrix_service = self.command_context.matrix_service return [ matrix_service.get_matrix_id(matrix) @@ -263,11 +259,11 @@ def get_inner_matrices(self) -> t.List[str]: def get_corresponding_matrices( self, - v: t.Optional[t.Union[MatrixType, str]], + v: Optional[MatrixType | str], time_step: BindingConstraintFrequency, version: StudyVersion, create: bool, - ) -> t.Optional[str]: + ) -> Optional[str]: constants: GeneratorMatrixConstants = self.command_context.generator_matrix_constants if v is None: @@ -302,7 +298,7 @@ def validates_and_fills_matrices( self, *, time_step: BindingConstraintFrequency, - specific_matrices: t.Optional[t.List[str]], + specific_matrices: Optional[List[str]], version: StudyVersion, create: bool, ) -> None: @@ -323,11 +319,11 @@ def validates_and_fills_matrices( def apply_binding_constraint( self, study_data: FileStudy, - binding_constraints: t.Dict[str, t.Any], + binding_constraints: Dict[str, Any], new_key: str, bd_id: str, *, - old_groups: t.Optional[t.Set[str]] = None, + old_groups: Optional[Set[str]] = None, ) -> CommandOutput: version = study_data.config.version @@ -417,7 +413,7 @@ class CreateBindingConstraint(AbstractBindingConstraintCommand): name: str @override - def _apply_config(self, study_data_config: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data_config: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: bd_id = transform_name_to_id(self.name) group = self.group or DEFAULT_GROUP operator = self.operator or DEFAULT_OPERATOR @@ -433,7 +429,7 @@ def _apply_config(self, study_data_config: FileStudyTreeConfig) -> t.Tuple[Comma return CommandOutput(status=True), {} @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: binding_constraints = study_data.tree.get(["input", "bindingconstraints", "bindingconstraints"]) new_key = str(len(binding_constraints)) bd_id = transform_name_to_id(self.name) diff --git a/antarest/study/storage/variantstudy/model/command/create_cluster.py b/antarest/study/storage/variantstudy/model/command/create_cluster.py index e02cb944fc..82b5fbe840 100644 --- a/antarest/study/storage/variantstudy/model/command/create_cluster.py +++ b/antarest/study/storage/variantstudy/model/command/create_cluster.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, List, Optional, Tuple from pydantic import Field, ValidationInfo, field_validator from typing_extensions import override @@ -28,7 +28,7 @@ from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.variantstudy.business.utils import strip_matrix_protocol, validate_matrix from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -49,9 +49,9 @@ class CreateCluster(ICommand): area_id: str cluster_name: str - parameters: t.Dict[str, t.Any] - prepro: t.Optional[t.Union[t.List[t.List[MatrixData]], str]] = Field(None, validate_default=True) - modulation: t.Optional[t.Union[t.List[t.List[MatrixData]], str]] = Field(None, validate_default=True) + parameters: Dict[str, Any] + prepro: Optional[List[List[MatrixData]] | str] = Field(None, validate_default=True) + modulation: Optional[List[List[MatrixData]] | str] = Field(None, validate_default=True) @field_validator("cluster_name", mode="before") def validate_cluster_name(cls, val: str) -> str: @@ -63,9 +63,9 @@ def validate_cluster_name(cls, val: str) -> str: @field_validator("prepro", mode="before") def validate_prepro( cls, - v: t.Optional[t.Union[t.List[t.List[MatrixData]], str]], - values: t.Union[t.Dict[str, t.Any], ValidationInfo], - ) -> t.Optional[t.Union[t.List[t.List[MatrixData]], str]]: + v: Optional[List[List[MatrixData]] | str], + values: Dict[str, Any] | ValidationInfo, + ) -> Optional[List[List[MatrixData]] | str]: new_values = values if isinstance(values, dict) else values.data if v is None: v = new_values["command_context"].generator_matrix_constants.get_thermal_prepro_data() @@ -76,9 +76,9 @@ def validate_prepro( @field_validator("modulation", mode="before") def validate_modulation( cls, - v: t.Optional[t.Union[t.List[t.List[MatrixData]], str]], - values: t.Union[t.Dict[str, t.Any], ValidationInfo], - ) -> t.Optional[t.Union[t.List[t.List[MatrixData]], str]]: + v: Optional[List[List[MatrixData]] | str], + values: Dict[str, Any] | ValidationInfo, + ) -> Optional[List[List[MatrixData]] | str]: new_values = values if isinstance(values, dict) else values.data if v is None: v = new_values["command_context"].generator_matrix_constants.get_thermal_prepro_modulation() @@ -88,7 +88,7 @@ def validate_modulation( return validate_matrix(v, new_values) @override - def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: # Search the Area in the configuration if self.area_id not in study_data.areas: return ( @@ -123,7 +123,7 @@ def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutpu ) @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: output, data = self._apply_config(study_data.config) if not output.status: return output @@ -176,8 +176,8 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: - matrices: t.List[str] = [] + def get_inner_matrices(self) -> List[str]: + matrices: List[str] = [] if self.prepro: assert_this(isinstance(self.prepro, str)) matrices.append(strip_matrix_protocol(self.prepro)) diff --git a/antarest/study/storage/variantstudy/model/command/create_district.py b/antarest/study/storage/variantstudy/model/command/create_district.py index ac28b32d92..7307443833 100644 --- a/antarest/study/storage/variantstudy/model/command/create_district.py +++ b/antarest/study/storage/variantstudy/model/command/create_district.py @@ -11,7 +11,7 @@ # This file is part of the Antares project. from enum import Enum -from typing import Any, Dict, List, Optional, Tuple, cast +from typing import Any, Dict, List, Optional, Tuple from pydantic import field_validator from typing_extensions import override @@ -23,7 +23,7 @@ ) from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO diff --git a/antarest/study/storage/variantstudy/model/command/create_link.py b/antarest/study/storage/variantstudy/model/command/create_link.py index c553ab7be7..7f5154986b 100644 --- a/antarest/study/storage/variantstudy/model/command/create_link.py +++ b/antarest/study/storage/variantstudy/model/command/create_link.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. from abc import ABCMeta -from typing import Any, Dict, List, Optional, Tuple, Union, cast +from typing import Any, Dict, List, Optional, Tuple, Union from antares.study.version import StudyVersion from pydantic import ValidationInfo, field_validator, model_validator @@ -25,9 +25,7 @@ from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.variantstudy.business.utils import strip_matrix_protocol, validate_matrix from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput, FilteringOptions -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand -from antarest.study.storage.variantstudy.model.command.replace_matrix import ReplaceMatrix -from antarest.study.storage.variantstudy.model.command.update_config import UpdateConfig +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO diff --git a/antarest/study/storage/variantstudy/model/command/create_renewables_cluster.py b/antarest/study/storage/variantstudy/model/command/create_renewables_cluster.py index 951060e74c..f4b32c9943 100644 --- a/antarest/study/storage/variantstudy/model/command/create_renewables_cluster.py +++ b/antarest/study/storage/variantstudy/model/command/create_renewables_cluster.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, List, Optional, Tuple from pydantic import field_validator from typing_extensions import override @@ -25,7 +25,7 @@ from antarest.study.storage.rawstudy.model.filesystem.config.renewable import create_renewable_config from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -46,7 +46,7 @@ class CreateRenewablesCluster(ICommand): area_id: str cluster_name: str - parameters: t.Dict[str, t.Any] + parameters: Dict[str, Any] @field_validator("cluster_name") def validate_cluster_name(cls, val: str) -> str: @@ -56,7 +56,7 @@ def validate_cluster_name(cls, val: str) -> str: return val @override - def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: if EnrModelling(study_data.enr_modelling) != EnrModelling.CLUSTERS: # Since version 8.1 of the solver, we can use renewable clusters # instead of "Load", "Wind" and "Solar" objects for modelling. @@ -104,7 +104,7 @@ def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutpu ) @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: output, data = self._apply_config(study_data.config) if not output.status: return output @@ -149,5 +149,5 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: return [] diff --git a/antarest/study/storage/variantstudy/model/command/create_st_storage.py b/antarest/study/storage/variantstudy/model/command/create_st_storage.py index 6bd07f5c50..7d1d8da4e8 100644 --- a/antarest/study/storage/variantstudy/model/command/create_st_storage.py +++ b/antarest/study/storage/variantstudy/model/command/create_st_storage.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, List, Optional, Tuple, cast import numpy as np from pydantic import Field, ValidationInfo, model_validator @@ -25,7 +25,7 @@ from antarest.study.storage.variantstudy.business.matrix_constants_generator import GeneratorMatrixConstants from antarest.study.storage.variantstudy.business.utils import strip_matrix_protocol, validate_matrix from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -41,7 +41,7 @@ # Minimum required version. REQUIRED_VERSION = STUDY_VERSION_8_6 -MatrixType = t.List[t.List[MatrixData]] +MatrixType = List[List[MatrixData]] # noinspection SpellCheckingInspection @@ -61,23 +61,23 @@ class CreateSTStorage(ICommand): area_id: str = Field(description="Area ID", pattern=r"[a-z0-9_(),& -]+") parameters: STStorageConfigType - pmax_injection: t.Optional[t.Union[MatrixType, str]] = Field( + pmax_injection: Optional[MatrixType | str] = Field( default=None, description="Charge capacity (modulation)", ) - pmax_withdrawal: t.Optional[t.Union[MatrixType, str]] = Field( + pmax_withdrawal: Optional[MatrixType | str] = Field( default=None, description="Discharge capacity (modulation)", ) - lower_rule_curve: t.Optional[t.Union[MatrixType, str]] = Field( + lower_rule_curve: Optional[MatrixType | str] = Field( default=None, description="Lower rule curve (coefficient)", ) - upper_rule_curve: t.Optional[t.Union[MatrixType, str]] = Field( + upper_rule_curve: Optional[MatrixType | str] = Field( default=None, description="Upper rule curve (coefficient)", ) - inflows: t.Optional[t.Union[MatrixType, str]] = Field( + inflows: Optional[MatrixType | str] = Field( default=None, description="Inflows (MW)", ) @@ -93,9 +93,7 @@ def storage_name(self) -> str: return self.parameters.name @staticmethod - def validate_field( - v: t.Optional[t.Union[MatrixType, str]], values: t.Dict[str, t.Any], field: str - ) -> t.Optional[t.Union[MatrixType, str]]: + def validate_field(v: Optional[MatrixType | str], values: Dict[str, Any], field: str) -> Optional[MatrixType | str]: """ Validates a matrix array or link, and store the matrix array in the matrix repository. @@ -148,21 +146,21 @@ def validate_field( constrained = set(_MATRIX_NAMES) - {"inflows"} if field in constrained and (np.any(array < 0) or np.any(array > 1)): raise ValueError("Matrix values should be between 0 and 1") - v = t.cast(MatrixType, array.tolist()) + v = cast(MatrixType, array.tolist()) return validate_matrix(v, values) # Invalid datatype # pragma: no cover raise TypeError(repr(v)) @model_validator(mode="before") - def validate_matrices(cls, values: t.Union[t.Dict[str, t.Any], ValidationInfo]) -> t.Dict[str, t.Any]: + def validate_matrices(cls, values: Dict[str, Any] | ValidationInfo) -> Dict[str, Any]: new_values = values if isinstance(values, dict) else values.data for field in _MATRIX_NAMES: new_values[field] = cls.validate_field(new_values.get(field, None), new_values, field) return new_values @override - def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: """ Applies configuration changes to the study data: add the short-term storage in the storages list. @@ -218,7 +216,7 @@ def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutpu ) @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: """ Applies the study data to update storage configurations and saves the changes. @@ -272,9 +270,9 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: """ Retrieves the list of matrix IDs. """ - matrices: t.List[str] = [strip_matrix_protocol(getattr(self, attr)) for attr in _MATRIX_NAMES] + matrices: List[str] = [strip_matrix_protocol(getattr(self, attr)) for attr in _MATRIX_NAMES] return matrices diff --git a/antarest/study/storage/variantstudy/model/command/create_user_resource.py b/antarest/study/storage/variantstudy/model/command/create_user_resource.py index 7274df3e13..964d348ebe 100644 --- a/antarest/study/storage/variantstudy/model/command/create_user_resource.py +++ b/antarest/study/storage/variantstudy/model/command/create_user_resource.py @@ -9,8 +9,8 @@ # SPDX-License-Identifier: MPL-2.0 # # This file is part of the Antares project. -import typing as t from enum import StrEnum +from typing import Any, Dict, List, Optional, Tuple, cast from typing_extensions import override @@ -21,7 +21,7 @@ from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.rawstudy.model.filesystem.root.user.user import User from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput, is_url_writeable -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -53,14 +53,14 @@ class CreateUserResource(ICommand): data: CreateUserResourceData @override - def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: return CommandOutput(status=True, message="ok"), {} @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: url = [item for item in self.data.path.split("/") if item] study_tree = study_data.tree - user_node = t.cast(User, study_tree.get_node(["user"])) + user_node = cast(User, study_tree.get_node(["user"])) if not is_url_writeable(user_node, url): return CommandOutput( status=False, message=f"you are not allowed to create a resource here: {self.data.path}" @@ -87,5 +87,5 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: return [] diff --git a/antarest/study/storage/variantstudy/model/command/generate_thermal_cluster_timeseries.py b/antarest/study/storage/variantstudy/model/command/generate_thermal_cluster_timeseries.py index 977e5d21b2..eaf2bb73fc 100644 --- a/antarest/study/storage/variantstudy/model/command/generate_thermal_cluster_timeseries.py +++ b/antarest/study/storage/variantstudy/model/command/generate_thermal_cluster_timeseries.py @@ -13,8 +13,8 @@ import logging import shutil import tempfile -import typing as t from pathlib import Path +from typing import Dict, List, Optional import numpy as np import pandas as pd @@ -53,7 +53,7 @@ def _apply_config(self, study_data: FileStudyTreeConfig) -> OutputTuple: return CommandOutput(status=True, message="Nothing to do"), {} @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: study_path = study_data.config.study_path with tempfile.TemporaryDirectory(suffix=TS_GEN_SUFFIX, prefix=TS_GEN_PREFIX, dir=study_path.parent) as path: tmp_dir = Path(path) @@ -68,7 +68,7 @@ def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = return CommandOutput(status=True, message="All time series were generated successfully") def _build_timeseries( - self, study_data: FileStudy, tmp_path: Path, listener: t.Optional[ICommandListener] = None + self, study_data: FileStudy, tmp_path: Path, listener: Optional[ICommandListener] = None ) -> None: # 1- Get the seed and nb_years to generate # NB: Default seed in IHM Legacy: 5489, default seed in web: 3005489. @@ -81,7 +81,7 @@ def _build_timeseries( # 3- Do a first loop to know how many operations will be performed total_generations = sum(len(area.thermals) for area in study_data.config.areas.values()) # 4- Loop through areas in alphabetical order - areas: t.Dict[str, Area] = study_data.config.areas + areas: Dict[str, Area] = study_data.config.areas sorted_areas = {k: areas[k] for k in sorted(areas)} generation_performed = 0 for area_id, area in sorted_areas.items(): @@ -149,7 +149,7 @@ def to_dto(self) -> CommandDTO: return CommandDTO(action=self.command_name.value, args={}, study_version=self.study_version) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: # This is used to get used matrices and not remove them inside the garbage collector loop. return [] diff --git a/antarest/study/storage/variantstudy/model/command/icommand.py b/antarest/study/storage/variantstudy/model/command/icommand.py index 018729799b..7e75037491 100644 --- a/antarest/study/storage/variantstudy/model/command/icommand.py +++ b/antarest/study/storage/variantstudy/model/command/icommand.py @@ -11,14 +11,13 @@ # This file is part of the Antares project. import logging -import typing as t import uuid from abc import ABC, abstractmethod +from typing import Any, Dict, List, Optional, Tuple import typing_extensions as te from antarest.core.serialization import AntaresBaseModel -from antarest.core.utils.utils import assert_this from antarest.study.model import StudyVersionStr from antarest.study.storage.rawstudy.model.filesystem.config.model import FileStudyTreeConfig from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy @@ -31,7 +30,7 @@ logger = logging.getLogger(__name__) # note: we ought to use a named tuple here ;-) -OutputTuple: te.TypeAlias = t.Tuple[CommandOutput, t.Dict[str, t.Any]] +OutputTuple: te.TypeAlias = Tuple[CommandOutput, Dict[str, Any]] class ICommand(ABC, AntaresBaseModel, extra="forbid", arbitrary_types_allowed=True): @@ -45,7 +44,7 @@ class ICommand(ABC, AntaresBaseModel, extra="forbid", arbitrary_types_allowed=Tr command_context: The context of the command. """ - command_id: t.Optional[uuid.UUID] = None + command_id: Optional[uuid.UUID] = None command_name: CommandName version: int command_context: CommandContext @@ -78,7 +77,7 @@ def apply_config(self, study_data: FileStudyTreeConfig) -> CommandOutput: return output @abstractmethod - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: """ Applies the study data to update storage configurations and saves the changes. @@ -90,7 +89,7 @@ def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = """ raise NotImplementedError() - def apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: """ Applies the study data to update storage configurations and saves the changes. @@ -123,7 +122,7 @@ def to_dto(self) -> CommandDTO: raise NotImplementedError() @abstractmethod - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: """ Retrieves the list of matrix IDs. """ diff --git a/antarest/study/storage/variantstudy/model/command/remove_area.py b/antarest/study/storage/variantstudy/model/command/remove_area.py index 2d52e98cae..2250943400 100644 --- a/antarest/study/storage/variantstudy/model/command/remove_area.py +++ b/antarest/study/storage/variantstudy/model/command/remove_area.py @@ -12,7 +12,7 @@ import contextlib import logging -import typing as t +from typing import Any, Dict, List, Optional, Tuple from typing_extensions import override @@ -31,7 +31,7 @@ remove_area_cluster_from_binding_constraints, ) from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -67,7 +67,7 @@ def _remove_area_from_sets_in_config(self, study_data_config: FileStudyTreeConfi study_data_config.sets[id_] = set_ @override - def _apply_config(self, study_data_config: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data_config: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: del study_data_config.areas[self.id] self._remove_area_from_links_in_config(study_data_config) @@ -227,7 +227,7 @@ def _remove_area_from_scenario_builder(self, study_data: FileStudy) -> None: # noinspection SpellCheckingInspection @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: study_data.tree.delete(["input", "areas", self.id]) study_data.tree.delete(["input", "hydro", "common", "capacity", f"maxpower_{self.id}"]) study_data.tree.delete(["input", "hydro", "common", "capacity", f"reservoir_{self.id}"]) @@ -296,5 +296,5 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: return [] diff --git a/antarest/study/storage/variantstudy/model/command/remove_binding_constraint.py b/antarest/study/storage/variantstudy/model/command/remove_binding_constraint.py index 6d63fb5828..4a4af0b214 100644 --- a/antarest/study/storage/variantstudy/model/command/remove_binding_constraint.py +++ b/antarest/study/storage/variantstudy/model/command/remove_binding_constraint.py @@ -21,7 +21,7 @@ from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.variantstudy.model.command.binding_constraint_utils import remove_bc_from_scenario_builder from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO diff --git a/antarest/study/storage/variantstudy/model/command/remove_cluster.py b/antarest/study/storage/variantstudy/model/command/remove_cluster.py index 0aa6d915a6..711d5bb0ff 100644 --- a/antarest/study/storage/variantstudy/model/command/remove_cluster.py +++ b/antarest/study/storage/variantstudy/model/command/remove_cluster.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, List, Optional, Tuple from typing_extensions import override @@ -20,7 +20,7 @@ remove_area_cluster_from_binding_constraints, ) from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -43,7 +43,7 @@ class RemoveCluster(ICommand): cluster_id: str @override - def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: """ Applies configuration changes to the study data: remove the thermal clusters from the storages list. @@ -102,7 +102,7 @@ def _remove_cluster_from_scenario_builder(self, study_data: FileStudy) -> None: study_data.tree.save(rulesets, ["settings", "scenariobuilder"]) @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: """ Applies the study data to update thermal cluster configurations and saves the changes: remove corresponding the configuration and remove the attached time series. @@ -152,7 +152,7 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: return [] def _remove_cluster_from_binding_constraints(self, study_data: FileStudy) -> None: diff --git a/antarest/study/storage/variantstudy/model/command/remove_district.py b/antarest/study/storage/variantstudy/model/command/remove_district.py index ddf5730712..9d1cdeaec4 100644 --- a/antarest/study/storage/variantstudy/model/command/remove_district.py +++ b/antarest/study/storage/variantstudy/model/command/remove_district.py @@ -17,7 +17,7 @@ from antarest.study.storage.rawstudy.model.filesystem.config.model import FileStudyTreeConfig from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO diff --git a/antarest/study/storage/variantstudy/model/command/remove_link.py b/antarest/study/storage/variantstudy/model/command/remove_link.py index 1357cc9974..93c0eb58f6 100644 --- a/antarest/study/storage/variantstudy/model/command/remove_link.py +++ b/antarest/study/storage/variantstudy/model/command/remove_link.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, List, Optional from pydantic import field_validator, model_validator from typing_extensions import override @@ -19,7 +19,7 @@ from antarest.study.storage.rawstudy.model.filesystem.config.model import FileStudyTreeConfig, transform_name_to_id from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand, OutputTuple +from antarest.study.storage.variantstudy.model.command.icommand import ICommand, OutputTuple from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -77,7 +77,7 @@ def _check_link_exists(self, study_cfg: FileStudyTreeConfig) -> OutputTuple: """ # Data is empty in case of error - data: t.Dict[str, t.Any] = {} + data: Dict[str, Any] = {} if self.area1 not in study_cfg.areas: message = f"The source area '{self.area1}' does not exist." @@ -128,7 +128,7 @@ def _remove_link_from_scenario_builder(self, study_data: FileStudy) -> None: study_data.tree.save(rulesets, ["settings", "scenariobuilder"]) @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: """ Update the configuration and the study data by removing the link between the source and target areas. @@ -163,5 +163,5 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: return [] diff --git a/antarest/study/storage/variantstudy/model/command/remove_multiple_binding_constraints.py b/antarest/study/storage/variantstudy/model/command/remove_multiple_binding_constraints.py index f8dfacadc5..b106568391 100644 --- a/antarest/study/storage/variantstudy/model/command/remove_multiple_binding_constraints.py +++ b/antarest/study/storage/variantstudy/model/command/remove_multiple_binding_constraints.py @@ -9,7 +9,8 @@ # SPDX-License-Identifier: MPL-2.0 # # This file is part of the Antares project. -import typing as t + +from typing import List, Optional from typing_extensions import override @@ -19,7 +20,7 @@ from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.variantstudy.model.command.binding_constraint_utils import remove_bc_from_scenario_builder from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand, OutputTuple +from antarest.study.storage.variantstudy.model.command.icommand import ICommand, OutputTuple from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -33,19 +34,21 @@ class RemoveMultipleBindingConstraints(ICommand): version: int = 1 # Properties of the `REMOVE_MULTIPLE_BINDING_CONSTRAINTS` command: - ids: t.List[str] + ids: List[str] @override def _apply_config(self, study_data: FileStudyTreeConfig) -> OutputTuple: # If at least one bc is missing in the database, we raise an error already_existing_ids = {binding.id for binding in study_data.bindings} missing_bc_ids = [id_ for id_ in self.ids if id_ not in already_existing_ids] + if missing_bc_ids: - return CommandOutput(status=False, message=f"Binding constraint not found: '{missing_bc_ids}'"), {} + return CommandOutput(status=False, message=f"Binding constraints missing: {missing_bc_ids}"), {} + return CommandOutput(status=True), {} @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: command_output, _ = self._apply_config(study_data.config) if not command_output.status: @@ -96,5 +99,5 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: return [] diff --git a/antarest/study/storage/variantstudy/model/command/remove_renewables_cluster.py b/antarest/study/storage/variantstudy/model/command/remove_renewables_cluster.py index 0741bb47e4..5bee20d694 100644 --- a/antarest/study/storage/variantstudy/model/command/remove_renewables_cluster.py +++ b/antarest/study/storage/variantstudy/model/command/remove_renewables_cluster.py @@ -10,14 +10,14 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, List, Optional, Tuple from typing_extensions import override from antarest.study.storage.rawstudy.model.filesystem.config.model import Area, FileStudyTreeConfig from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -40,7 +40,7 @@ class RemoveRenewablesCluster(ICommand): cluster_id: str @override - def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: """ Applies configuration changes to the study data: remove the renewable clusters from the storages list. @@ -97,7 +97,7 @@ def _remove_cluster_from_scenario_builder(self, study_data: FileStudy) -> None: study_data.tree.save(rulesets, ["settings", "scenariobuilder"]) @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: """ Applies the study data to update renewable cluster configurations and saves the changes: remove corresponding the configuration and remove the attached time series. @@ -144,5 +144,5 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: return [] diff --git a/antarest/study/storage/variantstudy/model/command/remove_st_storage.py b/antarest/study/storage/variantstudy/model/command/remove_st_storage.py index c387e3adde..97c6edf0b2 100644 --- a/antarest/study/storage/variantstudy/model/command/remove_st_storage.py +++ b/antarest/study/storage/variantstudy/model/command/remove_st_storage.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, List, Optional, Tuple from pydantic import Field from typing_extensions import override @@ -19,7 +19,7 @@ from antarest.study.storage.rawstudy.model.filesystem.config.model import Area, FileStudyTreeConfig from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -45,7 +45,7 @@ class RemoveSTStorage(ICommand): storage_id: str = Field(description="Short term storage ID", pattern=r"[a-z0-9_(),& -]+") @override - def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: """ Applies configuration changes to the study data: remove the storage from the storages list. @@ -103,7 +103,7 @@ def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutpu ) @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: """ Applies the study data to update storage configurations and saves the changes: remove the storage from the configuration and remove the attached time series. @@ -148,5 +148,5 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: return [] diff --git a/antarest/study/storage/variantstudy/model/command/remove_user_resource.py b/antarest/study/storage/variantstudy/model/command/remove_user_resource.py index d626c031fe..19ba8dac5d 100644 --- a/antarest/study/storage/variantstudy/model/command/remove_user_resource.py +++ b/antarest/study/storage/variantstudy/model/command/remove_user_resource.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, List, Optional, Tuple, cast from typing_extensions import override @@ -20,7 +20,7 @@ from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.rawstudy.model.filesystem.root.user.user import User from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput, is_url_writeable -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -46,14 +46,14 @@ class RemoveUserResource(ICommand): data: RemoveUserResourceData @override - def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: return CommandOutput(status=True, message="ok"), {} @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: url = [item for item in self.data.path.split("/") if item] study_tree = study_data.tree - user_node = t.cast(User, study_tree.get_node(["user"])) + user_node = cast(User, study_tree.get_node(["user"])) if not is_url_writeable(user_node, url): return CommandOutput( status=False, message=f"you are not allowed to delete this resource : {self.data.path}" @@ -75,5 +75,5 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: return [] diff --git a/antarest/study/storage/variantstudy/model/command/replace_matrix.py b/antarest/study/storage/variantstudy/model/command/replace_matrix.py index 1f2a28dd4e..f72c1ee1cf 100644 --- a/antarest/study/storage/variantstudy/model/command/replace_matrix.py +++ b/antarest/study/storage/variantstudy/model/command/replace_matrix.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, List, Optional, Tuple from pydantic import Field, ValidationInfo, field_validator from typing_extensions import override @@ -24,7 +24,7 @@ from antarest.study.storage.rawstudy.model.filesystem.matrix.matrix import MatrixNode from antarest.study.storage.variantstudy.business.utils import AliasDecoder, strip_matrix_protocol, validate_matrix from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -44,14 +44,14 @@ class ReplaceMatrix(ICommand): # ================== target: str - matrix: t.Union[t.List[t.List[MatrixData]], str] = Field(validate_default=True) + matrix: List[List[MatrixData]] | str = Field(validate_default=True) @field_validator("matrix", mode="before") - def matrix_validator(cls, matrix: t.Union[t.List[t.List[MatrixData]], str], values: ValidationInfo) -> str: + def matrix_validator(cls, matrix: List[List[MatrixData]] | str, values: ValidationInfo) -> str: return validate_matrix(matrix, values.data) @override - def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: return ( CommandOutput( status=True, @@ -61,7 +61,7 @@ def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutpu ) @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: if self.target[0] == "@": self.target = AliasDecoder.decode(self.target, study_data) @@ -104,6 +104,6 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: assert_this(isinstance(self.matrix, str)) return [strip_matrix_protocol(self.matrix)] diff --git a/antarest/study/storage/variantstudy/model/command/update_binding_constraint.py b/antarest/study/storage/variantstudy/model/command/update_binding_constraint.py index d55d4721d3..67b5a5b429 100644 --- a/antarest/study/storage/variantstudy/model/command/update_binding_constraint.py +++ b/antarest/study/storage/variantstudy/model/command/update_binding_constraint.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, Mapping, Optional, Tuple from typing_extensions import override @@ -31,7 +31,7 @@ TermMatrices, create_binding_constraint_config, ) -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -115,7 +115,7 @@ class UpdateBindingConstraint(AbstractBindingConstraintCommand): id: str @override - def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: index = next(i for i, bc in enumerate(study_data.bindings) if bc.id == self.id) existing_constraint = study_data.bindings[index] areas_set = existing_constraint.areas @@ -143,7 +143,7 @@ def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutpu study_data.bindings[index] = new_constraint return CommandOutput(status=True), {} - def _find_binding_config(self, binding_constraints: t.Mapping[str, JSON]) -> t.Optional[t.Tuple[str, JSON]]: + def _find_binding_config(self, binding_constraints: Mapping[str, JSON]) -> Optional[Tuple[str, JSON]]: """ Find the binding constraint with the given ID in the list of binding constraints, and returns its index and configuration, or `None` if it does not exist. @@ -155,7 +155,7 @@ def _find_binding_config(self, binding_constraints: t.Mapping[str, JSON]) -> t.O return None @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: binding_constraints = study_data.tree.get(["input", "bindingconstraints", "bindingconstraints"]) # When all BC of a given group are removed, the group should be removed from the scenario builder diff --git a/antarest/study/storage/variantstudy/model/command/update_config.py b/antarest/study/storage/variantstudy/model/command/update_config.py index 605c215871..ead39a4f68 100644 --- a/antarest/study/storage/variantstudy/model/command/update_config.py +++ b/antarest/study/storage/variantstudy/model/command/update_config.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, Generator, List, Optional, Tuple import typing_extensions as te from typing_extensions import override @@ -20,16 +20,16 @@ from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.rawstudy.model.filesystem.ini_file_node import IniFileNode from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO _ENR_MODELLING_KEY = "settings/generaldata/other preferences/renewable-generation-modelling" -_Data: te.TypeAlias = t.Union[str, int, float, bool, JSON, None] +_Data: te.TypeAlias = str | int | float | bool | JSON | None -def _iter_dict(data: _Data, root_key: str = "") -> t.Generator[t.Tuple[str, t.Any], None, None]: +def _iter_dict(data: _Data, root_key: str = "") -> Generator[Tuple[str, Any], None, None]: if isinstance(data, dict): for key, value in data.items(): sub_key = f"{root_key}/{key}" if root_key else key @@ -56,7 +56,7 @@ class UpdateConfig(ICommand): data: _Data @override - def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: # The renewable-generation-modelling parameter must be reflected in the config if self.target.startswith("settings"): for key, value in _iter_dict(self.data, root_key=self.target): @@ -67,7 +67,7 @@ def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutpu return CommandOutput(status=True, message="ok"), {} @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: url = self.target.split("/") tree_node = study_data.tree.get_node(url) if not isinstance(tree_node, IniFileNode): @@ -93,5 +93,5 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: return [] diff --git a/antarest/study/storage/variantstudy/model/command/update_district.py b/antarest/study/storage/variantstudy/model/command/update_district.py index d41bbc6f31..224c921da5 100644 --- a/antarest/study/storage/variantstudy/model/command/update_district.py +++ b/antarest/study/storage/variantstudy/model/command/update_district.py @@ -18,7 +18,7 @@ from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput from antarest.study.storage.variantstudy.model.command.create_district import DistrictBaseFilter -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO diff --git a/antarest/study/storage/variantstudy/model/command/update_link.py b/antarest/study/storage/variantstudy/model/command/update_link.py index 17d52f91ef..742ff54d4a 100644 --- a/antarest/study/storage/variantstudy/model/command/update_link.py +++ b/antarest/study/storage/variantstudy/model/command/update_link.py @@ -9,7 +9,7 @@ # SPDX-License-Identifier: MPL-2.0 # # This file is part of the Antares project. -import typing as t +from typing import List, Optional from typing_extensions import override @@ -18,7 +18,7 @@ from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput from antarest.study.storage.variantstudy.model.command.create_link import AbstractLinkCommand -from antarest.study.storage.variantstudy.model.command.icommand import ICommand, OutputTuple +from antarest.study.storage.variantstudy.model.command.icommand import OutputTuple from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO @@ -45,7 +45,7 @@ def _apply_config(self, study_data: FileStudyTreeConfig) -> OutputTuple: ) @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: version = study_data.config.version properties = study_data.tree.get(["input", "links", self.area1, "properties", self.area2]) @@ -74,5 +74,5 @@ def to_dto(self) -> CommandDTO: return super().to_dto() @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: return super().get_inner_matrices() diff --git a/antarest/study/storage/variantstudy/model/command/update_raw_file.py b/antarest/study/storage/variantstudy/model/command/update_raw_file.py index c085e6753b..d124a728d4 100644 --- a/antarest/study/storage/variantstudy/model/command/update_raw_file.py +++ b/antarest/study/storage/variantstudy/model/command/update_raw_file.py @@ -19,7 +19,7 @@ from antarest.study.storage.rawstudy.model.filesystem.factory import FileStudy from antarest.study.storage.rawstudy.model.filesystem.raw_file_node import RawFileNode from antarest.study.storage.variantstudy.model.command.common import CommandName, CommandOutput -from antarest.study.storage.variantstudy.model.command.icommand import MATCH_SIGNATURE_SEPARATOR, ICommand +from antarest.study.storage.variantstudy.model.command.icommand import ICommand from antarest.study.storage.variantstudy.model.command_listener.command_listener import ICommandListener from antarest.study.storage.variantstudy.model.model import CommandDTO diff --git a/antarest/study/storage/variantstudy/model/command/update_scenario_builder.py b/antarest/study/storage/variantstudy/model/command/update_scenario_builder.py index 871a2f3995..b2c1ad9952 100644 --- a/antarest/study/storage/variantstudy/model/command/update_scenario_builder.py +++ b/antarest/study/storage/variantstudy/model/command/update_scenario_builder.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, cast import numpy as np from typing_extensions import override @@ -34,7 +34,7 @@ def _get_active_ruleset(study_data: FileStudy) -> str: """ url = ["settings", "generaldata", "general", "active-rules-scenario"] try: - return t.cast(str, study_data.tree.get(url)) + return cast(str, study_data.tree.get(url)) except KeyError: return "" @@ -53,10 +53,10 @@ class UpdateScenarioBuilder(ICommand): # Command parameters # ================== - data: t.Union[t.Dict[str, t.Any], t.Mapping[str, t.Any], t.MutableMapping[str, t.Any]] + data: Dict[str, Any] | Mapping[str, Any] | MutableMapping[str, Any] @override - def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = None) -> CommandOutput: + def _apply(self, study_data: FileStudy, listener: Optional[ICommandListener] = None) -> CommandOutput: """ Apply the command to the study data. @@ -98,7 +98,7 @@ def _apply(self, study_data: FileStudy, listener: t.Optional[ICommandListener] = return CommandOutput(status=True) @override - def _apply_config(self, study_data: FileStudyTreeConfig) -> t.Tuple[CommandOutput, t.Dict[str, t.Any]]: + def _apply_config(self, study_data: FileStudyTreeConfig) -> Tuple[CommandOutput, Dict[str, Any]]: return CommandOutput(status=True), {} @override @@ -108,5 +108,5 @@ def to_dto(self) -> CommandDTO: ) @override - def get_inner_matrices(self) -> t.List[str]: + def get_inner_matrices(self) -> List[str]: return [] diff --git a/antarest/study/storage/variantstudy/model/dbmodel.py b/antarest/study/storage/variantstudy/model/dbmodel.py index d0d6435732..2c5fc72e05 100644 --- a/antarest/study/storage/variantstudy/model/dbmodel.py +++ b/antarest/study/storage/variantstudy/model/dbmodel.py @@ -11,9 +11,9 @@ # This file is part of the Antares project. import datetime -import typing as t import uuid from pathlib import Path +from typing import Optional from sqlalchemy import Column, DateTime, ForeignKey, Integer, String # type: ignore from sqlalchemy.orm import relationship # type: ignore @@ -38,7 +38,7 @@ class VariantStudySnapshot(Base): # type: ignore primary_key=True, ) created_at: datetime.date = Column(DateTime) - last_executed_command: t.Optional[str] = Column(String(), nullable=True) + last_executed_command: Optional[str] = Column(String(), nullable=True) __mapper_args__ = { "polymorphic_identity": "variant_study_snapshot", @@ -111,7 +111,7 @@ class VariantStudy(Study): ForeignKey("study.id", ondelete="CASCADE"), primary_key=True, ) - generation_task: t.Optional[str] = Column(String(), nullable=True) + generation_task: Optional[str] = Column(String(), nullable=True) __mapper_args__ = { "polymorphic_identity": "variantstudy", diff --git a/antarest/study/storage/variantstudy/model/model.py b/antarest/study/storage/variantstudy/model/model.py index cb1866037f..99f1fee392 100644 --- a/antarest/study/storage/variantstudy/model/model.py +++ b/antarest/study/storage/variantstudy/model/model.py @@ -10,8 +10,8 @@ # # This file is part of the Antares project. import datetime -import typing as t import uuid +from typing import MutableSequence, Optional, Tuple import typing_extensions as te @@ -19,7 +19,7 @@ from antarest.core.serialization import AntaresBaseModel from antarest.study.model import StudyMetadataDTO, StudyVersionStr -LegacyDetailsDTO = t.Tuple[str, bool, str] +LegacyDetailsDTO = Tuple[str, bool, str] """ Legacy details DTO: triplet of name, output status and output message. """ @@ -42,7 +42,7 @@ class NewDetailsDTO(te.TypedDict): msg: str -DetailsDTO = t.Union[LegacyDetailsDTO, NewDetailsDTO] +DetailsDTO = LegacyDetailsDTO | NewDetailsDTO class GenerationResultInfoDTO(AntaresBaseModel): @@ -55,7 +55,7 @@ class GenerationResultInfoDTO(AntaresBaseModel): """ success: bool - details: t.MutableSequence[DetailsDTO] + details: MutableSequence[DetailsDTO] class CommandDTOAPI(AntaresBaseModel): @@ -69,12 +69,12 @@ class CommandDTOAPI(AntaresBaseModel): version: The version of the command. """ - id: t.Optional[str] = None + id: Optional[str] = None action: str - args: t.Union[t.MutableSequence[JSON], JSON] + args: MutableSequence[JSON] | JSON version: int = 1 - user_name: t.Optional[str] = None - updated_at: t.Optional[datetime.datetime] = None + user_name: Optional[str] = None + updated_at: Optional[datetime.datetime] = None class CommandDTO(AntaresBaseModel): @@ -91,15 +91,15 @@ class CommandDTO(AntaresBaseModel): updated_at: The time the command was last updated. """ - id: t.Optional[str] = None + id: Optional[str] = None action: str - args: t.Union[t.MutableSequence[JSON], JSON] + args: MutableSequence[JSON] | JSON version: int = 1 study_version: StudyVersionStr - user_id: t.Optional[int] = None - updated_at: t.Optional[datetime.datetime] = None + user_id: Optional[int] = None + updated_at: Optional[datetime.datetime] = None - def to_api(self, user_name: t.Optional[str] = None) -> CommandDTOAPI: + def to_api(self, user_name: Optional[str] = None) -> CommandDTOAPI: data = self.model_dump(mode="json", exclude={"study_version", "user_id"}) data["user_name"] = user_name return CommandDTOAPI.model_validate(data) @@ -131,7 +131,7 @@ class VariantTreeDTO: children: A list of variant children. """ - def __init__(self, node: StudyMetadataDTO, children: t.MutableSequence["VariantTreeDTO"]) -> None: + def __init__(self, node: StudyMetadataDTO, children: MutableSequence["VariantTreeDTO"]) -> None: # We are intentionally not using Pydantic’s `BaseModel` here to prevent potential # `RecursionError` exceptions that can occur with Pydantic versions before v2. self.node = node diff --git a/antarest/study/storage/variantstudy/repository.py b/antarest/study/storage/variantstudy/repository.py index 9c9425630d..56abcef977 100644 --- a/antarest/study/storage/variantstudy/repository.py +++ b/antarest/study/storage/variantstudy/repository.py @@ -10,7 +10,7 @@ # # This file is part of the Antares project. -import typing as t +from typing import List, Optional, Sequence, cast from sqlalchemy.orm import Session, joinedload # type: ignore from typing_extensions import override @@ -27,7 +27,7 @@ class VariantStudyRepository(StudyMetadataRepository): Variant study repository """ - def __init__(self, cache_service: ICache, session: t.Optional[Session] = None): + def __init__(self, cache_service: ICache, session: Optional[Session] = None): """ Initialize the variant study repository. @@ -53,7 +53,7 @@ def session(self) -> Session: # Get the user-defined session return self._session - def get_children(self, parent_id: str) -> t.List[VariantStudy]: + def get_children(self, parent_id: str) -> List[VariantStudy]: """ Get the children of a variant study in chronological order. @@ -65,10 +65,10 @@ def get_children(self, parent_id: str) -> t.List[VariantStudy]: """ q = self.session.query(VariantStudy).filter(Study.parent_id == parent_id) q = q.order_by(Study.created_at.desc()) - studies = t.cast(t.List[VariantStudy], q.all()) + studies = cast(List[VariantStudy], q.all()) return studies - def get_ancestor_or_self_ids(self, variant_id: str) -> t.Sequence[str]: + def get_ancestor_or_self_ids(self, variant_id: str) -> Sequence[str]: """ Retrieve the list of ancestor variant identifiers, including the `variant_id`, its parent, and all predecessors of the parent, up to and including the ID @@ -92,17 +92,17 @@ def get_ancestor_or_self_ids(self, variant_id: str) -> t.Sequence[str]: q = self.session.query(recursive_q) return [r[0] for r in q] - def get_all_command_blocks(self) -> t.List[CommandBlock]: + def get_all_command_blocks(self) -> List[CommandBlock]: """ Get all command blocks. Returns: List of `CommandBlock` objects. """ - cmd_blocks: t.List[CommandBlock] = self.session.query(CommandBlock).all() + cmd_blocks: List[CommandBlock] = self.session.query(CommandBlock).all() return cmd_blocks - def find_variants(self, variant_ids: t.Sequence[str]) -> t.Sequence[VariantStudy]: + def find_variants(self, variant_ids: Sequence[str]) -> Sequence[VariantStudy]: """ Find a list of variants by IDs diff --git a/antarest/study/storage/variantstudy/snapshot_generator.py b/antarest/study/storage/variantstudy/snapshot_generator.py index 58d4d62b7b..60e726f2e0 100644 --- a/antarest/study/storage/variantstudy/snapshot_generator.py +++ b/antarest/study/storage/variantstudy/snapshot_generator.py @@ -16,8 +16,8 @@ import datetime import logging import shutil -import typing as t from pathlib import Path +from typing import List, NamedTuple, Optional, Sequence, Tuple from antarest.core.exceptions import VariantGenerationError from antarest.core.interfaces.cache import CacheConstants, ICache @@ -72,7 +72,7 @@ def generate_snapshot( denormalize: bool = True, from_scratch: bool = False, notifier: ITaskNotifier = NoopNotifier(), - listener: t.Optional[ICommandListener] = None, + listener: Optional[ICommandListener] = None, ) -> GenerationResultInfoDTO: # ATTENTION: since we are making changes to disk, a file lock is needed. # The locking is currently done in the `VariantStudyService.generate_task` function @@ -142,7 +142,7 @@ def generate_snapshot( return results - def _retrieve_descendants(self, variant_study_id: str) -> t.Tuple[RawStudy, t.Sequence[VariantStudy]]: + def _retrieve_descendants(self, variant_study_id: str) -> Tuple[RawStudy, Sequence[VariantStudy]]: # Get all ancestors of the current study from bottom to top # The first IDs are variant IDs, the last is the root study ID. ancestor_ids = self.repository.get_ancestor_or_self_ids(variant_study_id) @@ -151,7 +151,7 @@ def _retrieve_descendants(self, variant_study_id: str) -> t.Tuple[RawStudy, t.Se root_study = self.repository.one(descendant_ids[0]) return root_study, descendants - def _export_ref_study(self, snapshot_dir: Path, ref_study: t.Union[RawStudy, VariantStudy]) -> None: + def _export_ref_study(self, snapshot_dir: Path, ref_study: RawStudy | VariantStudy) -> None: if isinstance(ref_study, VariantStudy): snapshot_dir.parent.mkdir(parents=True, exist_ok=True) export_study_flat( @@ -175,8 +175,8 @@ def _apply_commands( self, snapshot_dir: Path, variant_study: VariantStudy, - cmd_blocks: t.Sequence[CommandBlock], - listener: t.Optional[ICommandListener] = None, + cmd_blocks: Sequence[CommandBlock], + listener: Optional[ICommandListener] = None, ) -> GenerationResultInfoDTO: commands = [self.command_factory.to_command(cb.to_dto()) for cb in cmd_blocks] generator = VariantCommandGenerator(self.study_factory) @@ -219,19 +219,19 @@ def _update_cache(self, file_study: FileStudy) -> None: ) -class RefStudySearchResult(t.NamedTuple): +class RefStudySearchResult(NamedTuple): """ Result of the search for the reference study. """ - ref_study: t.Union[RawStudy, VariantStudy] - cmd_blocks: t.Sequence[CommandBlock] + ref_study: RawStudy | VariantStudy + cmd_blocks: Sequence[CommandBlock] force_regenerate: bool = False def search_ref_study( - root_study: t.Union[RawStudy, VariantStudy], - descendants: t.Sequence[VariantStudy], + root_study: RawStudy | VariantStudy, + descendants: Sequence[VariantStudy], *, from_scratch: bool = False, ) -> RefStudySearchResult: @@ -251,10 +251,10 @@ def search_ref_study( return RefStudySearchResult(ref_study=root_study, cmd_blocks=[], force_regenerate=True) # The reference study is the root study or a variant study with a valid snapshot - ref_study: t.Union[RawStudy, VariantStudy] + ref_study: RawStudy | VariantStudy # The commands to apply on the reference study to generate the current variant - cmd_blocks: t.List[CommandBlock] + cmd_blocks: List[CommandBlock] if from_scratch: # In the case of a from scratch generation, the root study will be used as the reference study. diff --git a/antarest/study/storage/variantstudy/variant_command_generator.py b/antarest/study/storage/variantstudy/variant_command_generator.py index 42113ead14..d30b77ece0 100644 --- a/antarest/study/storage/variantstudy/variant_command_generator.py +++ b/antarest/study/storage/variantstudy/variant_command_generator.py @@ -14,7 +14,7 @@ import shutil import uuid from pathlib import Path -from typing import Any, Callable, List, Optional, Tuple, Union, cast +from typing import Callable, List, Optional, Tuple, Union, cast from antarest.core.utils.utils import StopWatch from antarest.study.storage.rawstudy.model.filesystem.config.model import FileStudyTreeConfig diff --git a/antarest/study/storage/variantstudy/variant_study_service.py b/antarest/study/storage/variantstudy/variant_study_service.py index fc9d447ea0..11444615da 100644 --- a/antarest/study/storage/variantstudy/variant_study_service.py +++ b/antarest/study/storage/variantstudy/variant_study_service.py @@ -14,10 +14,10 @@ import logging import re import shutil -import typing as t from datetime import datetime, timedelta from functools import reduce from pathlib import Path +from typing import Callable, Dict, List, Optional, Sequence, Tuple, cast from uuid import uuid4 import humanize @@ -139,7 +139,7 @@ def get_command(self, study_id: str, command_id: str, params: RequestParameters) except ValueError: raise CommandNotFoundError(f"Command with id {command_id} not found") from None - def get_commands(self, study_id: str, params: RequestParameters) -> t.List[CommandDTOAPI]: + def get_commands(self, study_id: str, params: RequestParameters) -> List[CommandDTOAPI]: """ Get commands list Args: @@ -149,7 +149,7 @@ def get_commands(self, study_id: str, params: RequestParameters) -> t.List[Comma """ study = self._get_variant_study(study_id, params) - id_to_name: t.Dict[int, str] = {} + id_to_name: Dict[int, str] = {} command_list = [] for command in study.commands: @@ -160,8 +160,8 @@ def get_commands(self, study_id: str, params: RequestParameters) -> t.List[Comma return command_list def convert_commands( - self, study_id: str, api_commands: t.List[CommandDTOAPI], params: RequestParameters - ) -> t.List[CommandDTO]: + self, study_id: str, api_commands: List[CommandDTOAPI], params: RequestParameters + ) -> List[CommandDTO]: study = self._get_variant_study(study_id, params, raw_study_accepted=True) study_version = StudyVersion.parse(study.version) return [ @@ -169,8 +169,8 @@ def convert_commands( for command in api_commands ] - def _check_commands_validity(self, study_id: str, commands: t.List[CommandDTO]) -> t.List[ICommand]: - command_objects: t.List[ICommand] = [] + def _check_commands_validity(self, study_id: str, commands: List[CommandDTO]) -> List[ICommand]: + command_objects: List[ICommand] = [] for i, command in enumerate(commands): try: command_objects.extend(self.command_factory.to_command(command)) @@ -210,9 +210,9 @@ def append_command(self, study_id: str, command: CommandDTO, params: RequestPara def append_commands( self, study_id: str, - commands: t.List[CommandDTO], + commands: List[CommandDTO], params: RequestParameters, - ) -> t.List[str]: + ) -> List[str]: """ Add command to list of commands (at the end) Args: @@ -255,7 +255,7 @@ def append_commands( def replace_commands( self, study_id: str, - commands: t.List[CommandDTO], + commands: List[CommandDTO], params: RequestParameters, ) -> str: """ @@ -383,13 +383,13 @@ def export_commands_matrices(self, study_id: str, params: RequestParameters) -> lambda: reduce( lambda m, c: m + c.get_inner_matrices(), self.command_factory.to_command(command.to_dto()), - t.cast(t.List[str], []), + cast(List[str], []), ), lambda e: logger.warning(f"Failed to parse command {command}", exc_info=e), ) or [] } - return t.cast(MatrixService, self.command_factory.command_context.matrix_service).download_matrix_list( + return cast(MatrixService, self.command_factory.command_context.matrix_service).download_matrix_list( list(matrices), f"{study.name}_{study.id}_matrices", params ) @@ -473,7 +473,7 @@ def get_all_variants_children( def walk_children( self, parent_id: str, - fun: t.Callable[[VariantStudy], None], + fun: Callable[[VariantStudy], None], bottom_first: bool, ) -> None: study = self._get_variant_study( @@ -490,13 +490,13 @@ def walk_children( if bottom_first: fun(study) - def get_variants_parents(self, study_id: str, params: RequestParameters) -> t.List[StudyMetadataDTO]: - output_list: t.List[StudyMetadataDTO] = self._get_variants_parents(study_id, params) + def get_variants_parents(self, study_id: str, params: RequestParameters) -> List[StudyMetadataDTO]: + output_list: List[StudyMetadataDTO] = self._get_variants_parents(study_id, params) if output_list: output_list = output_list[1:] return output_list - def get_direct_parent(self, id: str, params: RequestParameters) -> t.Optional[StudyMetadataDTO]: + def get_direct_parent(self, id: str, params: RequestParameters) -> Optional[StudyMetadataDTO]: study = self._get_variant_study(id, params, raw_study_accepted=True) if study.parent_id is not None: parent = self._get_variant_study(study.parent_id, params, raw_study_accepted=True) @@ -511,7 +511,7 @@ def get_direct_parent(self, id: str, params: RequestParameters) -> t.Optional[St ) return None - def _get_variants_parents(self, id: str, params: RequestParameters) -> t.List[StudyMetadataDTO]: + def _get_variants_parents(self, id: str, params: RequestParameters) -> List[StudyMetadataDTO]: study = self._get_variant_study(id, params, raw_study_accepted=True) metadata = ( self.get_study_information( @@ -522,7 +522,7 @@ def _get_variants_parents(self, id: str, params: RequestParameters) -> t.List[St study, ) ) - output_list: t.List[StudyMetadataDTO] = [metadata] + output_list: List[StudyMetadataDTO] = [metadata] if study.parent_id is not None: output_list.extend( self._get_variants_parents( @@ -663,7 +663,7 @@ def generate_task( metadata: VariantStudy, denormalize: bool = False, from_scratch: bool = False, - listener: t.Optional[ICommandListener] = None, + listener: Optional[ICommandListener] = None, ) -> str: study_id = metadata.id with FileLock(str(self.config.storage.tmp_dir / f"study-generation-{study_id}.lock")): @@ -745,7 +745,7 @@ def generate_study_config( self, variant_study_id: str, params: RequestParameters, - ) -> t.Tuple[GenerationResultInfoDTO, FileStudyTreeConfig]: + ) -> Tuple[GenerationResultInfoDTO, FileStudyTreeConfig]: # Get variant study variant_study = self._get_variant_study(variant_study_id, params) @@ -759,8 +759,8 @@ def _generate_study_config( self, original_study: VariantStudy, metadata: VariantStudy, - config: t.Optional[FileStudyTreeConfig], - ) -> t.Tuple[GenerationResultInfoDTO, FileStudyTreeConfig]: + config: Optional[FileStudyTreeConfig], + ) -> Tuple[GenerationResultInfoDTO, FileStudyTreeConfig]: parent_study = self.repository.get(metadata.parent_id) if parent_study is None: raise StudyNotFoundError(metadata.parent_id) @@ -790,9 +790,9 @@ def _get_commands_and_notifier( variant_study: VariantStudy, notifier: ITaskNotifier, from_index: int = 0, - ) -> t.Tuple[t.List[t.List[ICommand]], t.Callable[[int, bool, str], None]]: + ) -> Tuple[List[List[ICommand]], Callable[[int, bool, str], None]]: # Generate - commands: t.List[t.List[ICommand]] = self._to_commands(variant_study, from_index) + commands: List[List[ICommand]] = self._to_commands(variant_study, from_index) def notify(command_index: int, command_result: bool, command_message: str) -> None: try: @@ -819,8 +819,8 @@ def notify(command_index: int, command_result: bool, command_message: str) -> No return commands, notify - def _to_commands(self, metadata: VariantStudy, from_index: int = 0) -> t.List[t.List[ICommand]]: - commands: t.List[t.List[ICommand]] = [ + def _to_commands(self, metadata: VariantStudy, from_index: int = 0) -> List[List[ICommand]]: + commands: List[List[ICommand]] = [ self.command_factory.to_command(command_block.to_dto()) for index, command_block in enumerate(metadata.commands) if from_index <= index @@ -832,7 +832,7 @@ def _generate_config( variant_study: VariantStudy, config: FileStudyTreeConfig, notifier: ITaskNotifier = NoopNotifier(), - ) -> t.Tuple[GenerationResultInfoDTO, FileStudyTreeConfig]: + ) -> Tuple[GenerationResultInfoDTO, FileStudyTreeConfig]: commands, notify = self._get_commands_and_notifier(variant_study=variant_study, notifier=notifier) return self.generator.generate_config(commands, config, variant_study, notifier=notify) @@ -904,7 +904,7 @@ def copy( self, src_meta: VariantStudy, dest_name: str, - groups: t.Sequence[str], + groups: Sequence[str], with_outputs: bool = False, ) -> VariantStudy: """ @@ -994,7 +994,7 @@ def _safe_generation(self, metadata: VariantStudy, timeout: int = DEFAULT_AWAIT_ @staticmethod def _get_snapshot_last_executed_command_index( study: VariantStudy, - ) -> t.Optional[int]: + ) -> Optional[int]: if study.snapshot and study.snapshot.last_executed_command: last_executed_command_index = [command.id for command in study.commands].index( study.snapshot.last_executed_command @@ -1007,7 +1007,7 @@ def get_raw( self, metadata: VariantStudy, use_cache: bool = True, - output_dir: t.Optional[Path] = None, + output_dir: Optional[Path] = None, ) -> FileStudy: """ Fetch a study raw tree object and its config @@ -1028,7 +1028,7 @@ def get_raw( ) @override - def get_study_sim_result(self, study: VariantStudy) -> t.List[StudySimResultDTO]: + def get_study_sim_result(self, study: VariantStudy) -> List[StudySimResultDTO]: """ Get global result information Args: @@ -1096,7 +1096,7 @@ def export_study_flat( metadata: VariantStudy, dst_path: Path, outputs: bool = True, - output_list_filter: t.Optional[t.List[str]] = None, + output_list_filter: Optional[List[str]] = None, denormalize: bool = True, ) -> None: self._safe_generation(metadata) @@ -1118,7 +1118,7 @@ def export_study_flat( def get_synthesis( self, metadata: VariantStudy, - params: t.Optional[RequestParameters] = None, + params: Optional[RequestParameters] = None, ) -> FileStudyTreeConfigDTO: """ Return study synthesis diff --git a/antarest/study/web/raw_studies_blueprint.py b/antarest/study/web/raw_studies_blueprint.py index a06eba7515..f4bc6e9fbd 100644 --- a/antarest/study/web/raw_studies_blueprint.py +++ b/antarest/study/web/raw_studies_blueprint.py @@ -14,8 +14,8 @@ import http import io import logging -import typing as t from pathlib import Path, PurePosixPath +from typing import Annotated, Any, List, Sequence from fastapi import APIRouter, Body, Depends, File, HTTPException from fastapi.params import Query @@ -82,10 +82,10 @@ } DEFAULT_EXPORT_FORMAT = Query(TableExportFormat.CSV, alias="format", description="Export format", title="Export Format") -PATH_TYPE = t.Annotated[str, Query(openapi_examples=get_path_examples())] +PATH_TYPE = Annotated[str, Query(openapi_examples=get_path_examples())] -def _split_comma_separated_values(value: str, *, default: t.Sequence[str] = ()) -> t.Sequence[str]: +def _split_comma_separated_values(value: str, *, default: Sequence[str] = ()) -> Sequence[str]: """Split a comma-separated list of values into an ordered set of strings.""" values = value.split(",") if value else default # drop whitespace around values @@ -121,7 +121,7 @@ def get_study_data( depth: int = 3, formatted: bool = True, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: """ Fetches raw data from a study, and returns the data in different formats based on the file type, or as a JSON response. @@ -200,7 +200,7 @@ def get_study_file( uuid: str, path: PATH_TYPE = "/", current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: """ Fetches for a file in its original format from a study folder @@ -236,7 +236,7 @@ def get_study_file( ) def delete_file( uuid: str, - path: t.Annotated[ + path: Annotated[ str, Query( openapi_examples={ @@ -245,7 +245,7 @@ def delete_file( ), ] = "/", current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: uuid = sanitize_uuid(uuid) logger.info(f"Deleting path {path} inside study {uuid}", extra={"user": current_user.id}) study_service.delete_user_file_or_folder(uuid, path, current_user) @@ -598,12 +598,12 @@ def replace_study_file( "/studies/{uuid}/raw/validate", summary="Launch test validation on study", tags=[APITag.study_raw_data], - response_model=t.List[str], + response_model=List[str], ) def validate( uuid: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.List[str]: + ) -> List[str]: """ Launches test validation on the raw data of a study. The validation is done recursively on all the files in the study diff --git a/antarest/study/web/studies_blueprint.py b/antarest/study/web/studies_blueprint.py index bb3785f9a9..65b7b3489d 100644 --- a/antarest/study/web/studies_blueprint.py +++ b/antarest/study/web/studies_blueprint.py @@ -13,9 +13,9 @@ import collections import io import logging -import typing as t from http import HTTPStatus from pathlib import Path +from typing import Any, Dict, List, Optional, Sequence from fastapi import APIRouter, Depends, File, HTTPException, Query, Request from markupsafe import escape @@ -50,7 +50,7 @@ QUERY_REGEX = r"^\s*(?:\d+\s*(?:,\s*\d+\s*)*)?$" -def _split_comma_separated_values(value: str, *, default: t.Sequence[str] = ()) -> t.Sequence[str]: +def _split_comma_separated_values(value: str, *, default: Sequence[str] = ()) -> Sequence[str]: """Split a comma-separated list of values into an ordered set of strings.""" values = value.split(",") if value else default # drop whitespace around values @@ -88,15 +88,15 @@ def get_studies( ), alias="name", ), - managed: t.Optional[bool] = Query(None, description="Filter studies based on their management status."), - archived: t.Optional[bool] = Query(None, description="Filter studies based on their archive status."), - variant: t.Optional[bool] = Query(None, description="Filter studies based on their variant status."), + managed: Optional[bool] = Query(None, description="Filter studies based on their management status."), + archived: Optional[bool] = Query(None, description="Filter studies based on their archive status."), + variant: Optional[bool] = Query(None, description="Filter studies based on their variant status."), versions: str = Query("", description="Comma-separated list of versions for filtering.", regex=QUERY_REGEX), users: str = Query("", description="Comma-separated list of user IDs for filtering.", regex=QUERY_REGEX), groups: str = Query("", description="Comma-separated list of group IDs for filtering."), tags: str = Query("", description="Comma-separated list of tags for filtering."), study_ids: str = Query("", description="Comma-separated list of study IDs for filtering.", alias="studyIds"), - exists: t.Optional[bool] = Query(None, description="Filter studies based on their existence on disk."), + exists: Optional[bool] = Query(None, description="Filter studies based on their existence on disk."), workspace: str = Query("", description="Filter studies based on their workspace."), folder: str = Query("", description="Filter studies based on their folder."), sort_by: StudySortBy = Query( @@ -108,7 +108,7 @@ def get_studies( page_size: NonNegativeInt = Query( 0, description="Number of studies per page (0 = no limit).", alias="pageSize" ), - ) -> t.Dict[str, StudyMetadataDTO]: + ) -> Dict[str, StudyMetadataDTO]: """ Get the list of studies matching the specified criteria. @@ -175,15 +175,15 @@ def get_studies( def count_studies( current_user: JWTUser = Depends(auth.get_current_user), name: str = Query("", description="Case-insensitive: filter studies based on their name.", alias="name"), - managed: t.Optional[bool] = Query(None, description="Management status filter."), - archived: t.Optional[bool] = Query(None, description="Archive status filter."), - variant: t.Optional[bool] = Query(None, description="Variant status filter."), + managed: Optional[bool] = Query(None, description="Management status filter."), + archived: Optional[bool] = Query(None, description="Archive status filter."), + variant: Optional[bool] = Query(None, description="Variant status filter."), versions: str = Query("", description="Comma-separated versions filter.", regex=QUERY_REGEX), users: str = Query("", description="Comma-separated user IDs filter.", regex=QUERY_REGEX), groups: str = Query("", description="Comma-separated group IDs filter."), tags: str = Query("", description="Comma-separated tags filter."), study_ids: str = Query("", description="Comma-separated study IDs filter.", alias="studyIds"), - exists: t.Optional[bool] = Query(None, description="Existence on disk filter."), + exists: Optional[bool] = Query(None, description="Existence on disk filter."), workspace: str = Query("", description="Workspace filter."), folder: str = Query("", description="Study folder filter."), ) -> int: @@ -245,7 +245,7 @@ def count_studies( def get_comments( uuid: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info(f"Get comments of study {uuid}", extra={"user": current_user.id}) params = RequestParameters(user=current_user) study_id = sanitize_uuid(uuid) @@ -262,7 +262,7 @@ def edit_comments( uuid: str, data: CommentsDto, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Editing comments for study {uuid}", extra={"user": current_user.id}, @@ -409,7 +409,7 @@ def move_study( uuid: str, folder_dest: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Moving study {uuid} into folder '{folder_dest}'", extra={"user": current_user.id}, @@ -429,7 +429,7 @@ def create_study( version: str = "", groups: str = "", current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info(f"Creating new study '{name}'", extra={"user": current_user.id}) name_sanitized = escape(name) group_ids = _split_comma_separated_values(groups) @@ -449,7 +449,7 @@ def create_study( def get_study_synthesis( uuid: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: study_id = sanitize_uuid(uuid) logger.info( f"Return a synthesis for study '{study_id}'", @@ -468,7 +468,7 @@ def get_study_matrix_index( uuid: str, path: str = "", current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: study_id = sanitize_uuid(uuid) logger.info( f"Return the start date for input matrix '{study_id}'", @@ -485,9 +485,9 @@ def get_study_matrix_index( ) def export_study( uuid: str, - no_output: t.Optional[bool] = False, + no_output: Optional[bool] = False, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info(f"Exporting study {uuid}", extra={"user": current_user.id}) uuid_sanitized = sanitize_uuid(uuid) @@ -504,7 +504,7 @@ def delete_study( uuid: str, children: bool = False, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info(f"Deleting study {uuid}", extra={"user": current_user.id}) uuid_sanitized = sanitize_uuid(uuid) @@ -524,7 +524,7 @@ def import_output( uuid: str, output: bytes = File(...), current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Importing output for study {uuid}", extra={"user": current_user.id}, @@ -546,7 +546,7 @@ def change_owner( uuid: str, user_id: int, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Changing owner to {user_id} for study {uuid}", extra={"user": current_user.id}, @@ -566,7 +566,7 @@ def add_group( uuid: str, group_id: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Adding group {group_id} to study {uuid}", extra={"user": current_user.id}, @@ -587,7 +587,7 @@ def remove_group( uuid: str, group_id: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Removing group {group_id} to study {uuid}", extra={"user": current_user.id}, @@ -609,7 +609,7 @@ def set_public_mode( uuid: str, mode: PublicMode, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Setting public mode to {mode} for study {uuid}", extra={"user": current_user.id}, @@ -624,11 +624,11 @@ def set_public_mode( "/studies/_versions", tags=[APITag.study_management], summary="Show available study versions", - response_model=t.List[str], + response_model=List[str], ) def get_study_versions( current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: params = RequestParameters(user=current_user) logger.info("Fetching version list") return StudyService.get_studies_versions(params=params) @@ -642,7 +642,7 @@ def get_study_versions( def get_study_metadata( uuid: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info(f"Fetching study {uuid} metadata", extra={"user": current_user.id}) params = RequestParameters(user=current_user) study_metadata = study_service.get_study_information(uuid, params) @@ -658,7 +658,7 @@ def update_study_metadata( uuid: str, study_metadata_patch: StudyMetadataPatchDTO, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Updating metadata for study {uuid}", extra={"user": current_user.id}, @@ -676,7 +676,7 @@ def output_variables_information( study_id: str, output_id: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: study_id = sanitize_uuid(study_id) output_id = sanitize_string(output_id) logger.info(f"Fetching whole output of the simulation {output_id} for study {study_id}") @@ -696,7 +696,7 @@ def output_export( study_id: str, output_id: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: study_id = sanitize_uuid(study_id) output_id = sanitize_string(output_id) logger.info(f"Fetching whole output of the simulation {output_id} for study {study_id}") @@ -720,7 +720,7 @@ def output_download( use_task: bool = False, tmp_export_file: Path = Depends(ftm.request_tmp_file), current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: study_id = sanitize_uuid(study_id) output_id = sanitize_string(output_id) logger.info( @@ -774,7 +774,7 @@ def archive_output( study_id: str, output_id: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: study_id = sanitize_uuid(study_id) output_id = sanitize_string(output_id) logger.info( @@ -799,7 +799,7 @@ def unarchive_output( study_id: str, output_id: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: study_id = sanitize_uuid(study_id) output_id = sanitize_string(output_id) logger.info( @@ -840,12 +840,12 @@ def get_digest_file( "/studies/{study_id}/outputs", summary="Get global information about a study simulation result", tags=[APITag.study_outputs], - response_model=t.List[StudySimResultDTO], + response_model=List[StudySimResultDTO], ) def sim_result( study_id: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Fetching output list for study {study_id}", extra={"user": current_user.id}, @@ -865,7 +865,7 @@ def set_sim_reference( output_id: str, status: bool = True, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Setting output {output_id} as reference simulation for study {study_id}", extra={"user": current_user.id}, @@ -884,7 +884,7 @@ def set_sim_reference( def archive_study( study_id: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info(f"Archiving study {study_id}", extra={"user": current_user.id}) study_id = sanitize_uuid(study_id) params = RequestParameters(user=current_user) @@ -898,7 +898,7 @@ def archive_study( def unarchive_study( study_id: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info(f"Unarchiving study {study_id}", extra={"user": current_user.id}) study_id = sanitize_uuid(study_id) params = RequestParameters(user=current_user) diff --git a/antarest/study/web/study_data_blueprint.py b/antarest/study/web/study_data_blueprint.py index 35019731fb..befee9efcd 100644 --- a/antarest/study/web/study_data_blueprint.py +++ b/antarest/study/web/study_data_blueprint.py @@ -12,8 +12,8 @@ import enum import logging -import typing as t from http import HTTPStatus +from typing import Any, Dict, List, Mapping, Optional, Sequence, cast import typing_extensions as te from fastapi import APIRouter, Body, Depends, Query @@ -91,7 +91,7 @@ class BCKeyValueType(te.TypedDict): """Deprecated type for binding constraint key-value pair (used for update)""" key: str - value: t.Union[str, int, float, bool] + value: str | int | float | bool class ClusterType(enum.StrEnum): @@ -127,14 +127,14 @@ def create_study_data_routes(study_service: StudyService, config: Config) -> API "/studies/{uuid}/areas", tags=[APITag.study_data], summary="Get all areas basic info", - response_model=t.Union[t.List[AreaInfoDTO], t.Dict[str, t.Any]], + response_model=List[AreaInfoDTO] | Dict[str, Any], ) def get_areas( uuid: str, type: AreaType = Query(None), ui: bool = False, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Union[t.List[AreaInfoDTO], t.Dict[str, t.Any]]: + ) -> List[AreaInfoDTO] | Dict[str, Any]: logger.info( f"Fetching area list (type={type}) for study {uuid}", extra={"user": current_user.id}, @@ -147,12 +147,12 @@ def get_areas( "/studies/{uuid}/links", tags=[APITag.study_data], summary="Get all links", - response_model=t.List[LinkDTO], + response_model=List[LinkDTO], ) def get_links( uuid: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.List[LinkDTO]: + ) -> List[LinkDTO]: logger.info( f"Fetching link list for study {uuid}", extra={"user": current_user.id}, @@ -171,7 +171,7 @@ def create_area( uuid: str, area_creation_info: AreaCreationDTO, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Creating new area for study {uuid}", extra={"user": current_user.id}, @@ -209,7 +209,7 @@ def update_link( area_to: str, link_update_dto: LinkBaseDTO, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Updating link {area_from} -> {area_to} for study {uuid}", extra={"user": current_user.id}, @@ -229,7 +229,7 @@ def update_area_ui( area_ui: UpdateAreaUi, layer: str = "0", current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Updating area ui {area_id} for study {uuid}", extra={"user": current_user.id}, @@ -246,9 +246,9 @@ def update_area_ui( def update_area_info( uuid: str, area_id: str, - area_patch_dto: t.Union[PatchArea, t.Dict[str, PatchCluster]], + area_patch_dto: PatchArea | Dict[str, PatchCluster], current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Updating area {area_id} for study {uuid}", extra={"user": current_user.id}, @@ -274,7 +274,7 @@ def delete_area( uuid: str, area_id: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Removing area {area_id} in study {uuid}", extra={"user": current_user.id}, @@ -296,7 +296,7 @@ def delete_link( area_from: str, area_to: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Removing link {area_from}%{area_to} in study {uuid}", extra={"user": current_user.id}, @@ -311,12 +311,12 @@ def delete_link( "/studies/{uuid}/layers", tags=[APITag.study_data], summary="Get all layers info", - response_model=t.List[LayerInfoDTO], + response_model=List[LayerInfoDTO], ) def get_layers( uuid: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.List[LayerInfoDTO]: + ) -> List[LayerInfoDTO]: logger.info( f"Fetching layer list for study {uuid}", extra={"user": current_user.id}, @@ -353,7 +353,7 @@ def update_layer( uuid: str, layer_id: str, name: str = "", - areas: t.Optional[t.List[str]] = None, + areas: Optional[List[str]] = None, current_user: JWTUser = Depends(auth.get_current_user), ) -> None: logger.info( @@ -391,12 +391,12 @@ def remove_layer( "/studies/{uuid}/districts", tags=[APITag.study_data], summary="Get the list of districts defined in this study", - response_model=t.List[DistrictInfoDTO], + response_model=List[DistrictInfoDTO], ) def get_districts( uuid: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.List[DistrictInfoDTO]: + ) -> List[DistrictInfoDTO]: logger.info( f"Fetching districts list for study {uuid}", extra={"user": current_user.id}, @@ -492,7 +492,7 @@ def set_hydro_form_values( area_id: str, data: ManagementOptionsFormFields, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( msg=f"Updating Hydro management config for area {area_id} of study {uuid}", extra={"user": current_user.id}, @@ -551,9 +551,9 @@ def update_inflow_structure( def edit_matrix( uuid: str, path: str, - matrix_edit_instructions: t.List[MatrixEditInstruction] = Body(...), + matrix_edit_instructions: List[MatrixEditInstruction] = Body(...), current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: # NOTE: This Markdown documentation is reflected in the Swagger API """ Edit a matrix in a study based on the provided edit instructions. @@ -610,13 +610,13 @@ def set_thematic_trimming( path="/studies/{uuid}/config/playlist/form", tags=[APITag.study_data], summary="Get MC Scenario playlist data for table form", - response_model=t.Dict[int, PlaylistColumns], + response_model=Dict[int, PlaylistColumns], response_model_exclude_none=True, ) def get_playlist( uuid: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Dict[int, PlaylistColumns]: + ) -> Dict[int, PlaylistColumns]: logger.info( f"Getting MC Scenario playlist data for study {uuid}", extra={"user": current_user.id}, @@ -633,7 +633,7 @@ def get_playlist( ) def set_playlist( uuid: str, - data: t.Dict[int, PlaylistColumns], + data: Dict[int, PlaylistColumns], current_user: JWTUser = Depends(auth.get_current_user), ) -> None: logger.info( @@ -648,12 +648,12 @@ def set_playlist( "/studies/{uuid}/config/playlist", tags=[APITag.study_data], summary="Get playlist config", - response_model=t.Optional[t.Dict[int, float]], + response_model=Optional[Dict[int, float]], ) def get_playlist_config( uuid: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Optional[t.Dict[int, float]]: + ) -> Optional[Dict[int, float]]: logger.info( f"Fetching playlist config for study {uuid}", extra={"user": current_user.id}, @@ -671,10 +671,10 @@ def set_playlist_config( uuid: str, active: bool = True, reverse: bool = False, - playlist: t.Optional[t.List[int]] = Body(default=None), - weights: t.Optional[t.Dict[int, int]] = Body(default=None), + playlist: Optional[List[int]] = Body(default=None), + weights: Optional[Dict[int, int]] = Body(default=None), current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Updating playlist config for study {uuid}", extra={"user": current_user.id}, @@ -706,13 +706,13 @@ def get_scenario_builder_config( path="/studies/{uuid}/config/scenariobuilder/{scenario_type}", tags=[APITag.study_data], summary="Get MC Scenario builder config", - response_model=t.Dict[str, SBTableForm], + response_model=Dict[str, SBTableForm], ) def get_scenario_builder_config_by_type( uuid: str, scenario_type: ScenarioType, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Dict[str, SBTableForm]: + ) -> Dict[str, SBTableForm]: """ Retrieve the scenario matrix corresponding to a specified scenario type. @@ -806,14 +806,14 @@ def update_scenario_builder_config( path="/studies/{uuid}/config/scenariobuilder/{scenario_type}", tags=[APITag.study_data], summary="Set MC Scenario builder config", - response_model=t.Dict[str, SBTableForm], + response_model=Dict[str, SBTableForm], ) def update_scenario_builder_config_by_type( uuid: str, scenario_type: ScenarioType, - data: t.Dict[str, SBTableForm], + data: Dict[str, SBTableForm], current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Dict[str, SBTableForm]: + ) -> Dict[str, SBTableForm]: """ Update the scenario matrix corresponding to a specified scenario type. @@ -1098,7 +1098,7 @@ def update_table_mode( ) def update_version( current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: params = RequestParameters(user=current_user) study_service.check_and_update_all_study_versions_in_database(params) @@ -1106,11 +1106,11 @@ def update_version( "/studies/{uuid}/bindingconstraints", tags=[APITag.study_data], summary="Get binding constraint list", - response_model=t.List[ConstraintOutput], + response_model=List[ConstraintOutput], ) def get_binding_constraint_list( uuid: str, - enabled: t.Optional[bool] = Query(None, description="Filter results based on enabled status"), + enabled: Optional[bool] = Query(None, description="Filter results based on enabled status"), operator: BindingConstraintOperator = Query(None, description="Filter results based on operator"), comments: str = Query("", description="Filter results based on comments (word match)"), group: str = Query("", description="filter binding constraints based on group name (exact match)"), @@ -1140,7 +1140,7 @@ def get_binding_constraint_list( alias="clusterId", ), current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Sequence[ConstraintOutput]: + ) -> Sequence[ConstraintOutput]: logger.info( f"Fetching binding constraint list for study {uuid}", extra={"user": current_user.id}, @@ -1206,7 +1206,7 @@ def update_binding_constraint( def get_grouped_constraints( uuid: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Mapping[str, t.Sequence[ConstraintOutput]]: + ) -> Mapping[str, Sequence[ConstraintOutput]]: """ Get the list of binding constraint groups for the study. @@ -1266,7 +1266,7 @@ def get_constraints_by_group( uuid: str, group: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Sequence[ConstraintOutput]: + ) -> Sequence[ConstraintOutput]: """ Get the binding constraint group for the study. @@ -1382,7 +1382,7 @@ def delete_binding_constraint( response_model=None, ) def delete_multiple_binding_constraints( - uuid: str, binding_constraints_ids: t.List[str], current_user: JWTUser = Depends(auth.get_current_user) + uuid: str, binding_constraints_ids: List[str], current_user: JWTUser = Depends(auth.get_current_user) ) -> None: logger.info( f"Deleting the binding constraints {binding_constraints_ids!r} for study {uuid}", @@ -1429,7 +1429,7 @@ def add_constraint_term( def add_constraint_terms( uuid: str, binding_constraint_id: str, - terms: t.Sequence[ConstraintTerm], + terms: Sequence[ConstraintTerm], current_user: JWTUser = Depends(auth.get_current_user), ) -> None: """ @@ -1483,7 +1483,7 @@ def update_constraint_term( def update_constraint_terms( uuid: str, binding_constraint_id: str, - terms: t.Sequence[ConstraintTerm], + terms: Sequence[ConstraintTerm], current_user: JWTUser = Depends(auth.get_current_user), ) -> None: """ @@ -1544,8 +1544,8 @@ def get_allocation_matrix( """ params = RequestParameters(user=current_user) study = study_service.check_study_access(uuid, StudyPermissionType.READ, params) - all_areas = t.cast( - t.List[AreaInfoDTO], # because `ui=False` + all_areas = cast( + List[AreaInfoDTO], # because `ui=False` study_service.get_all_areas(uuid, area_type=AreaType.AREA, ui=False, params=params), ) return study_service.allocation_manager.get_allocation_matrix(study, all_areas) @@ -1572,8 +1572,8 @@ def get_allocation_form_fields( """ params = RequestParameters(user=current_user) study = study_service.check_study_access(uuid, StudyPermissionType.READ, params) - all_areas = t.cast( - t.List[AreaInfoDTO], # because `ui=False` + all_areas = cast( + List[AreaInfoDTO], # because `ui=False` study_service.get_all_areas(uuid, area_type=AreaType.AREA, ui=False, params=params), ) return study_service.allocation_manager.get_allocation_form_fields(all_areas, study, area_id) @@ -1610,8 +1610,8 @@ def set_allocation_form_fields( """ params = RequestParameters(user=current_user) study = study_service.check_study_access(uuid, StudyPermissionType.WRITE, params) - all_areas = t.cast( - t.List[AreaInfoDTO], # because `ui=False` + all_areas = cast( + List[AreaInfoDTO], # because `ui=False` study_service.get_all_areas(uuid, area_type=AreaType.AREA, ui=False, params=params), ) return study_service.allocation_manager.set_allocation_form_fields(all_areas, study, area_id, data) @@ -1624,7 +1624,7 @@ def set_allocation_form_fields( ) def get_correlation_matrix( uuid: str, - columns: t.Optional[str] = Query( + columns: Optional[str] = Query( default=None, openapi_examples={ "all areas": { @@ -1660,8 +1660,8 @@ def get_correlation_matrix( """ params = RequestParameters(user=current_user) study = study_service.check_study_access(uuid, StudyPermissionType.READ, params) - all_areas = t.cast( - t.List[AreaInfoDTO], # because `ui=False` + all_areas = cast( + List[AreaInfoDTO], # because `ui=False` study_service.get_all_areas(uuid, area_type=AreaType.AREA, ui=False, params=params), ) manager = CorrelationManager(study_service.storage_service) @@ -1708,8 +1708,8 @@ def set_correlation_matrix( """ params = RequestParameters(user=current_user) study = study_service.check_study_access(uuid, StudyPermissionType.WRITE, params) - all_areas = t.cast( - t.List[AreaInfoDTO], # because `ui=False` + all_areas = cast( + List[AreaInfoDTO], # because `ui=False` study_service.get_all_areas(uuid, area_type=AreaType.AREA, ui=False, params=params), ) manager = CorrelationManager(study_service.storage_service) @@ -1737,8 +1737,8 @@ def get_correlation_form_fields( """ params = RequestParameters(user=current_user) study = study_service.check_study_access(uuid, StudyPermissionType.READ, params) - all_areas = t.cast( - t.List[AreaInfoDTO], # because `ui=False` + all_areas = cast( + List[AreaInfoDTO], # because `ui=False` study_service.get_all_areas(uuid, area_type=AreaType.AREA, ui=False, params=params), ) manager = CorrelationManager(study_service.storage_service) @@ -1776,8 +1776,8 @@ def set_correlation_form_fields( """ params = RequestParameters(user=current_user) study = study_service.check_study_access(uuid, StudyPermissionType.WRITE, params) - all_areas = t.cast( - t.List[AreaInfoDTO], # because `ui=False` + all_areas = cast( + List[AreaInfoDTO], # because `ui=False` study_service.get_all_areas(uuid, area_type=AreaType.AREA, ui=False, params=params), ) manager = CorrelationManager(study_service.storage_service) @@ -1896,13 +1896,13 @@ def set_properties_form_values( path="/studies/{uuid}/areas/{area_id}/clusters/renewable", tags=[APITag.study_data], summary="Get all renewable clusters", - response_model=t.Sequence[RenewableClusterOutput], + response_model=Sequence[RenewableClusterOutput], ) def get_renewable_clusters( uuid: str, area_id: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Sequence[RenewableClusterOutput]: + ) -> Sequence[RenewableClusterOutput]: logger.info( "Getting renewable clusters for study %s and area %s", uuid, @@ -2028,7 +2028,7 @@ def redirect_update_renewable_cluster( def delete_renewable_clusters( uuid: str, area_id: str, - cluster_ids: t.Sequence[str], + cluster_ids: Sequence[str], current_user: JWTUser = Depends(auth.get_current_user), ) -> None: """ @@ -2051,13 +2051,13 @@ def delete_renewable_clusters( path="/studies/{uuid}/areas/{area_id}/clusters/thermal", tags=[APITag.study_data], summary="Get thermal clusters for a given area", - response_model=t.Sequence[ThermalClusterOutput], + response_model=Sequence[ThermalClusterOutput], ) def get_thermal_clusters( uuid: str, area_id: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Sequence[ThermalClusterOutput]: + ) -> Sequence[ThermalClusterOutput]: """ Retrieve the list of thermal clusters for a specified area. @@ -2242,7 +2242,7 @@ def validate_cluster_series( def delete_thermal_clusters( uuid: str, area_id: str, - cluster_ids: t.Sequence[str], + cluster_ids: Sequence[str], current_user: JWTUser = Depends(auth.get_current_user), ) -> None: """ @@ -2309,13 +2309,13 @@ def get_st_storage( path="/studies/{uuid}/areas/{area_id}/storages", tags=[APITag.study_data], summary="Get the list of short-term storage properties", - response_model=t.Sequence[STStorageOutput], + response_model=Sequence[STStorageOutput], ) def get_st_storages( uuid: str, area_id: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Sequence[STStorageOutput]: + ) -> Sequence[STStorageOutput]: """ Retrieve the short-term storages by given uuid and area ID of a study. @@ -2559,7 +2559,7 @@ def update_st_storage( def delete_st_storages( uuid: str, area_id: str, - storage_ids: t.Sequence[str], + storage_ids: Sequence[str], current_user: JWTUser = Depends(auth.get_current_user), ) -> None: """ @@ -2593,7 +2593,7 @@ def duplicate_cluster( source_cluster_id: str, new_cluster_name: str = Query(..., alias="newName", title="New Cluster Name"), current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Union[STStorageOutput, ThermalClusterOutput, RenewableClusterOutput]: + ) -> STStorageOutput | ThermalClusterOutput | RenewableClusterOutput: logger.info( f"Duplicates {cluster_type.value} {source_cluster_id} of {area_id} for study {uuid}", extra={"user": current_user.id}, @@ -2601,7 +2601,7 @@ def duplicate_cluster( params = RequestParameters(user=current_user) study = study_service.check_study_access(uuid, StudyPermissionType.WRITE, params) - manager: t.Union[STStorageManager, RenewableManager, ThermalManager] + manager: STStorageManager | RenewableManager | ThermalManager if cluster_type == ClusterType.ST_STORAGES: manager = STStorageManager(study_service.storage_service) elif cluster_type == ClusterType.RENEWABLES: diff --git a/antarest/study/web/xpansion_studies_blueprint.py b/antarest/study/web/xpansion_studies_blueprint.py index 0871efd0f0..72bf5fdb2d 100644 --- a/antarest/study/web/xpansion_studies_blueprint.py +++ b/antarest/study/web/xpansion_studies_blueprint.py @@ -11,7 +11,7 @@ # This file is part of the Antares project. import logging -import typing as t +from typing import Any, Optional, Sequence from fastapi import APIRouter, Depends, File, UploadFile from starlette.responses import Response @@ -52,9 +52,9 @@ def create_xpansion_routes(study_service: StudyService, config: Config) -> APIRo ) def create_xpansion_configuration( uuid: str, - file: t.Optional[UploadFile] = File(None), + file: Optional[UploadFile] = File(None), current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Creating Xpansion Configuration for study {uuid}", extra={"user": current_user.id}, @@ -70,7 +70,7 @@ def create_xpansion_configuration( def delete_xpansion_configuration( uuid: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Deleting Xpansion Configuration for study {uuid}", extra={"user": current_user.id}, @@ -167,7 +167,7 @@ def get_candidate( def get_candidates( uuid: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Sequence[XpansionCandidateDTO]: + ) -> Sequence[XpansionCandidateDTO]: logger.info("Fetching study list", extra={"user": current_user.id}) params = RequestParameters(user=current_user) return study_service.get_candidates(uuid, params) @@ -182,7 +182,7 @@ def update_candidate( candidate_name: str, xpansion_candidate_dto: XpansionCandidateDTO, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Updating xpansion candidate {xpansion_candidate_dto.name} of the study {uuid}", extra={"user": current_user.id}, @@ -199,7 +199,7 @@ def delete_candidate( uuid: str, candidate_name: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Deleting candidate {candidate_name} of the study {uuid}", extra={"user": current_user.id}, @@ -217,7 +217,7 @@ def add_resource( resource_type: XpansionResourceFileType, file: UploadFile = File(...), current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Add xpansion {resource_type} files in the study {uuid}", extra={"user": current_user.id}, @@ -239,7 +239,7 @@ def delete_resource( resource_type: XpansionResourceFileType, filename: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Deleting xpansion {resource_type} file from the study {uuid}", extra={"user": current_user.id}, @@ -261,7 +261,7 @@ def get_resource_content( resource_type: XpansionResourceFileType, filename: str, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Getting xpansion {resource_type} file {filename} from the study {uuid}", extra={"user": current_user.id}, @@ -271,9 +271,7 @@ def get_resource_content( StudyPermissionType.READ, RequestParameters(user=current_user), ) - output: t.Union[JSON, bytes, str] = study_service.xpansion_manager.get_resource_content( - study, resource_type, filename - ) + output: JSON | bytes | str = study_service.xpansion_manager.get_resource_content(study, resource_type, filename) if isinstance(output, bytes): try: @@ -294,7 +292,7 @@ def list_resources( uuid: str, resource_type: XpansionResourceFileType, current_user: JWTUser = Depends(auth.get_current_user), - ) -> t.Any: + ) -> Any: logger.info( f"Getting xpansion {resource_type} resources files from the study {uuid}", extra={"user": current_user.id}, diff --git a/tests/integration/study_data_blueprint/test_link.py b/tests/integration/study_data_blueprint/test_link.py index 7ba267c7cd..9585ad674d 100644 --- a/tests/integration/study_data_blueprint/test_link.py +++ b/tests/integration/study_data_blueprint/test_link.py @@ -9,6 +9,8 @@ # SPDX-License-Identifier: MPL-2.0 # # This file is part of the Antares project. +import re + import pytest from starlette.testclient import TestClient @@ -284,11 +286,14 @@ def test_link_820(self, client: TestClient, user_access_token: str, study_type: ) assert res.status_code == 422, res.json() - expected = { - "description": "Invalid value(s) in filters: centurial. Allowed values are: hourly, daily, weekly, monthly, annual.", - "exception": "LinkValidationError", - } - assert expected == res.json() + + res_json = res.json() + assert res_json["exception"] == "LinkValidationError" + match = re.search(r"Allowed values are: (.*)\.", res_json["description"]) + assert match, f"Unexpected error message format: {res_json['description']}" + res_values = sorted(match.group(1).split(", ")) + expected_values = sorted(["daily", "hourly", "monthly", "weekly", "annual"]) + assert res_values == expected_values, f"Returned values: {res_values}, expected: {expected_values}" # Test create link with empty filters