From dbc1b4201810b96e15fe188af841224e3633f21e Mon Sep 17 00:00:00 2001 From: Maciej Bartkowiak Date: Fri, 20 Oct 2023 11:52:05 +0100 Subject: [PATCH 1/2] Remove all the residual instances of netCDF in the code --- .../Src/DistributedComputing/MasterSlave.py | 605 ------------------ MDANSE/Src/DistributedComputing/Slave.py | 49 -- .../Src/DistributedComputing/TaskManager.py | 605 ------------------ MDANSE/Src/DistributedComputing/__init__.py | 14 - .../NetCDFInputFileConfigurator.py | 112 ---- MDANSE/Src/Framework/Formats/NetCDFFormat.py | 89 --- .../Framework/InputData/NetCDFInputData.py | 63 -- .../Src/Framework/Jobs/AngularCorrelation.py | 2 +- MDANSE/Src/Framework/Jobs/AreaPerMolecule.py | 2 +- MDANSE/Src/Framework/Jobs/Converter.py | 1 - .../Src/Framework/Jobs/CoordinationNumber.py | 2 +- .../Jobs/CurrentCorrelationFunction.py | 16 +- MDANSE/Src/Framework/Jobs/Density.py | 2 +- MDANSE/Src/Framework/Jobs/DensityOfStates.py | 2 +- MDANSE/Src/Framework/Jobs/DensityProfile.py | 2 +- .../Jobs/DipoleAutoCorrelationFunction.py | 2 +- .../Src/Framework/Jobs/DistanceHistogram.py | 2 +- .../Jobs/DynamicCoherentStructureFactor.py | 2 +- .../Jobs/DynamicIncoherentStructureFactor.py | 2 +- MDANSE/Src/Framework/Jobs/Eccentricity.py | 2 +- .../Jobs/ElasticIncoherentStructureFactor.py | 2 +- ...aussianDynamicIncoherentStructureFactor.py | 2 +- .../Jobs/GeneralAutoCorrelationFunction.py | 2 +- .../Framework/Jobs/McStasVirtualInstrument.py | 6 +- .../Framework/Jobs/MeanSquareDisplacement.py | 2 +- MDANSE/Src/Framework/Jobs/MolecularTrace.py | 2 +- .../NeutronDynamicTotalStructureFactor.py | 2 +- MDANSE/Src/Framework/Jobs/OrderParameter.py | 2 +- .../Jobs/PositionAutoCorrelationFunction.py | 2 +- MDANSE/Src/Framework/Jobs/RadiusOfGyration.py | 2 +- .../Framework/Jobs/RootMeanSquareDeviation.py | 2 +- .../Jobs/RootMeanSquareFluctuation.py | 2 +- .../Jobs/SolventAccessibleSurface.py | 2 +- .../Framework/Jobs/StaticStructureFactor.py | 2 +- .../StructureFactorFromScatteringFunction.py | 14 +- MDANSE/Src/Framework/Jobs/Temperature.py | 2 +- .../Jobs/VelocityAutoCorrelationFunction.py | 2 +- MDANSE/Src/Framework/Jobs/Voronoi.py | 2 +- .../Jobs/XRayStaticStructureFactor.py | 2 +- MDANSE/requirements.txt | 1 - 40 files changed, 46 insertions(+), 1585 deletions(-) delete mode 100644 MDANSE/Src/DistributedComputing/MasterSlave.py delete mode 100644 MDANSE/Src/DistributedComputing/Slave.py delete mode 100644 MDANSE/Src/DistributedComputing/TaskManager.py delete mode 100644 MDANSE/Src/DistributedComputing/__init__.py delete mode 100644 MDANSE/Src/Framework/Configurators/NetCDFInputFileConfigurator.py delete mode 100644 MDANSE/Src/Framework/Formats/NetCDFFormat.py delete mode 100644 MDANSE/Src/Framework/InputData/NetCDFInputData.py diff --git a/MDANSE/Src/DistributedComputing/MasterSlave.py b/MDANSE/Src/DistributedComputing/MasterSlave.py deleted file mode 100644 index 4c1101346e..0000000000 --- a/MDANSE/Src/DistributedComputing/MasterSlave.py +++ /dev/null @@ -1,605 +0,0 @@ -# ************************************************************************** -# -# MDANSE: Molecular Dynamics Analysis for Neutron Scattering Experiments -# -# @file Src/DistributedComputing/MasterSlave.py -# @brief Implements module/class/test MasterSlave -# -# @homepage https://www.isis.stfc.ac.uk/Pages/MDANSEproject.aspx -# @license GNU General Public License v3 or higher (see LICENSE) -# @copyright Institut Laue Langevin 2013-now -# @copyright ISIS Neutron and Muon Source, STFC, UKRI 2021-now -# @authors Scientific Computing Group at ILL (see AUTHORS) -# -# ************************************************************************** - -""" -Distributed computing using a master-slave model - -The classes in this module provide a simple way to parallelize independent -computations in a program. The communication is handled by the Pyro package, -which must be installed before this module can be used. -Pyro can be obtained from http://pyro.sourceforge.net/. -By default, the Pyro name server is used to initialize communication. Please -read the Pyro documentation for learning how to use the name server. - -The principle of the master-slave model is that there is a single master -process that defines computational tasks and any number of slave processes -that execute these tasks. The master defines task requests and then waits -for the results to come in. The slaves wait for a task request, execute it, -return the result, and wait for the next task. There can be any number of -slave processes, which can be started and terminated independently, the -only condition being that no slave process can be started before its master -process. This setup makes it possible to perform a lengthy computation using -a variable number of processors. - -Communication between the master and the slave processes passes through -a TaskManager object that is created automatically as part of the master -process. The task manager stores and hands out task requests and results. -The task manager also keeps track of the slave processes. When a slave process -disappears (because it was killed or because of a hardware failure), the -task manager re-schedules its active task(s) to another slave process. This -makes the master-slave system very fault tolerant. - -Each task manager has a label that makes it possible to distinguish between -several master-slave groups running at the same time. It is by the label -that slave processes identify the master process for which they work. - -The script "task_manager" prints statistics about a currently active task -manager; it takes the label as an argument. It shows the number of currently -active processes (master plus slaves), the number of waiting and running -tasks, and the number of results waiting to be picked up. - -The script Examples/master_slave_demo.py illustrates the use of the -master-slave setup in a simple script. Both master and slave processes -are defined in the same script. The scripts Examples/master.py and -Examples/slave.py show a master-slave setup using two distinct scripts. -This is more flexible because task requests and result retrievals -can be made from anywhere in the master code. - -:author: Konrad Hinsen -""" - -import copy -import time -import threading - -import Pyro.core -import Pyro.errors -import Pyro.naming - -from MDANSE.DistributedComputing.TaskManager import TaskManager, TaskManagerTermination - -from MDANSE import PLATFORM - -debug = False - - -class MasterProcessError(Exception): - pass - - -class MasterProcess(object): - """ - Master process in a master-slave setup - - A master process in a program is implemented by subclassing - this class and overriding the method "run", which calls the methods - "requestTask" and "retrieveResult". The process is then - launched by calling the method "start". - """ - - def __init__(self, label, use_name_server=True, maxTrials=None): - """ - :param label: the label that identifies the task manager - :type label: C{str} - - :param use_name_server: If C{True} (default), the task manager is - registered with the Pyro name server. If - C{False}, the name server is not used and - slave processes need to know the host - on which the master process is running. - :type use_name_server: C{bool} - """ - self.label = label - self.task_manager = TaskManager() - self.process_id = self.task_manager.registerProcess(info=getMachineInfo()) - Pyro.core.initServer(banner=False) - self.pyro_ns = None - if use_name_server: - self.pyro_ns = Pyro.naming.NameServerLocator().getNS() - self.manager_thread = threading.Thread(target=self.taskManagerThread) - self.manager_thread.start() - self.global_states = {} - nTrials = 0 - while 1: - try: - self.pyro_daemon.port - except AttributeError: - nTrials += 1 - if debug: - print("Failed to access pyro_daemon. Trial no: %d" % nTrials) - continue - if maxTrials is not None: - if nTrials == maxTrials: - raise MasterProcessError( - "The number of trials to access the pyro daemon has reached its limit. Abort master process creation" - ) - else: - break - - def taskManagerThread(self): - """ - This method represents the code that is executed in a background - thread for remote access to the task manager. - """ - self.pyro_daemon = Pyro.core.Daemon() - if self.pyro_ns is not None: - # Make another name server proxy for this thread - pyro_ns = Pyro.naming.NameServerLocator().getNS() - self.pyro_daemon.useNameServer(pyro_ns) - try: - pyro_ns.createGroup("TaskManager") - except Pyro.errors.NamingError: - pass - self.pyro_daemon.connect(self.task_manager, "TaskManager.%s" % self.label) - try: - self.pyro_daemon.requestLoop() - finally: - self.pyro_daemon.shutdown(True) - if self.pyro_ns is not None: - try: - pyro_ns.unregister("TaskManager.%s" % self.label) - except Pyro.errors.NamingError: - pass - - def requestTask(self, tag, *parameters): - """ - Launches a task request. The task will be executed by a slave - process in a method called 'do\_'+tag that is called with the - parameters given in the task request. Note that the order of - task executions is not defined. - - :param tag: a tag identifying the computational task. It corresponds - to the name of a method in the slave process. - :type tag: C{str} - - :param parameters: the parameters passed to the corresponding method - in the slave process. The only restriction on their - types is that all parameters must be picklable. - - :return: a unique task id - :rtype: C{str} - """ - - return self.task_manager.addTaskRequest(tag, parameters) - - def retrieveResult(self, tag=None): - """ - :param tag: a tag identifying the computational task from which a - return value is requested. If C{None}, results from - any task will be accepted. - :type tag: C{str} - - :return: a tuple containing three values: the task id to which the - result corresponds, the tag of the computational task, - and the result returned by the slave method that handled - the task - :rtype: C{tuple} - - :raise exception: raises TaskRaisedException: if the slave method raised an exception - """ - try: - if tag is None: - return self.task_manager.getAnyResult() - else: - task_id, result = self.task_manager.getResultWithTag(tag) - return task_id, tag, result - except TaskManagerTermination: - return None, None, None - - def setGlobalState(self, **kw): - state_id = min(list(self.global_states.keys()) + [0]) + 1 - self.global_states[state_id] = list(kw.keys()) - for name, value in kw.items(): - label = "state_%d_%s" % (state_id, name) - if debug: - print("Storing state value ", label) - self.task_manager.storeData(**{label: value}) - return state_id - - def deleteGlobalState(self, state_id): - for name in self.global_states[state_id]: - label = "state_%d_%s" % (state_id, name) - if debug: - print("Deleting state value ", label) - self.task_manager.deleteData(label) - - def start(self): - """ - Starts the master process. - """ - try: - self.run() - finally: - self.shutdown() - - def shutdown(self): - self.task_manager.terminate() - while self.task_manager.numberOfActiveProcesses() > 1: - time.sleep(0.1) - self.pyro_daemon.shutdown() - self.manager_thread.join() - - def run(self): - """ - The main routine of the master process. This method must be - overridden in subclasses. - """ - raise NotImplementedError - - def launchSlaveJobs(self, n=1, port=7766): - """ - Launch n slave jobs on the machine that also runs the master job. - @param n: the number of slave jobs to be launched. - @type n: C{int} - """ - import subprocess, sys - - slave_script = ( - ('label="%s"\nport=%d\n' % (self.label, port)) - + """ -import Pyro.core -import Pyro.errors -import sys -Pyro.core.initClient(banner=False) -while True: - try: - task_manager = \ - Pyro.core.getProxyForURI("PYROLOC://localhost:%d/TaskManager.%s" - % (port,label)) - break - except Pyro.errors.NamingError: - continue -try: - slave_code = task_manager.retrieveData("slave_code") -except KeyError: - print "No slave code available for %s" % label - raise SystemExit -namespace = {} -sys.modules["__main__"].SLAVE_PROCESS_LABEL = label -sys.modules["__main__"].SLAVE_NAMESPACE = namespace -exec slave_code % port in namespace -""" - ) - directory = self.task_manager.retrieveData("cwd") - for _ in range(n): - process = subprocess.Popen( - [sys.executable], stdin=subprocess.PIPE, cwd=directory - ) - process.stdin.write(slave_script) - process.stdin.close() - - -class SlaveProcess(object): - """ - Slave process in a master-slave setup - - A concrete slave process in a program is implemented by subclassing - this class and adding the methods that handle the computational - tasks. Such a method has the name 'do\_' followed by the tag of the - computational task. The process is then launched by calling the - method 'start'. - """ - - def __init__(self, label, master_host=None, watchdog_period=120.0): - """ - :param label: the label that identifies the task manager - :type label: C{str} - - :param master_host: If C{None} (default), the task manager of the - master process is located using the Pyro name - server. If no name server is used, this parameter - must be the hostname of the machine on which the - master process runs, plus the port number if it - is different from the default (7766). - :type master_host: C{str} or C{NoneType} - - :param watchdog_period: the interval (in seconds) at which the - slave process sends messages to the - manager to signal that it is still alive. - If None, no messages are sent at all. In that - case, the manager cannot recognize if the slave - job has crashed or been killed. - :type watchdog_period: C{int} or C{NoneType} - """ - Pyro.core.initClient(banner=False) - if master_host is None: - self.task_manager = Pyro.core.getProxyForURI( - "PYRONAME://TaskManager.%s" % label - ) - else: - # URI defaults to "PYROLOC://localhost:7766/" - uri = "PYROLOC://%s/TaskManager.%s" % (master_host, label) - self.task_manager = Pyro.core.getProxyForURI(uri) - self.watchdog_period = watchdog_period - self.done = False - self.global_state_cache = {} - # Compile a dictionary of methods that implement tasks - import inspect - - self.task_methods = {} - if debug: - print("Scanning task handler methods...") - for name, value in inspect.getmembers(self, inspect.isroutine): - if name[:3] == "do_": - self.task_methods[name] = value - if debug: - print(" found handler for task ", name[:3]) - if debug: - print(len(self.task_methods), "task handlers found in class") - print("If the slave is defined by a script or module, it is") - print("normal that none have been found!") - - def watchdogThread(self): - """ - This method is run in a separate thread that pings the master process - regularly to signal that it is still alive. - """ - task_manager = copy.copy(self.task_manager) - while True: - task_manager.ping(self.process_id) - if self.done: - break - time.sleep(self.watchdog_period) - - def processParameter(self, parameter): - if isinstance(parameter, GlobalStateValue): - try: - if debug: - print("Returning state value", parameter.label) - return self.global_state_cache[parameter.label] - except KeyError: - if debug: - print("Retrieving state value", parameter.label) - self.global_state_cache[ - parameter.label - ] = self.task_manager.retrieveData(parameter.label) - return self.global_state_cache[parameter.label] - else: - return parameter - - def start(self, namespace=None): - """ - Starts the slave process. - """ - if debug: - print("Starting slave process") - if namespace is None: - namespace = self.task_methods - self.process_id = self.task_manager.registerProcess( - self.watchdog_period, info=getMachineInfo() - ) - if self.watchdog_period is not None: - self.background_thread = threading.Thread(target=self.watchdogThread) - self.background_thread.setDaemon(True) - self.background_thread.start() - # The slave process main loop - while True: - # Should we terminate for whatever reason? - if self.terminationTest(): - break - # Get a task - try: - task_id, tag, parameters = self.task_manager.getAnyTask(self.process_id) - if debug: - print("Got task", task_id, "of type", tag) - except TaskManagerTermination: - break - # Find the method to call - try: - method = namespace["do_%s" % tag] - except KeyError: - if debug: - print("No suitable handler was found, returning task.") - self.task_manager.returnTask(task_id) - continue - # Replace GlobalStateValue objects by the associated values - parameters = tuple(self.processParameter(p) for p in parameters) - # Call the method - try: - if debug: - print("Executing task handler...") - result = method(*parameters) - if debug: - print("...done.") - except KeyboardInterrupt: - if debug: - print("Keyboard interrupt") - self.task_manager.returnTask(task_id) - self.task_manager.unregisterProcess(self.process_id) - raise - except Exception as e: - import traceback, io - - if debug: - print("Exception:") - traceback.print_exc() - tb_text = io.StringIO() - traceback.print_exc(None, tb_text) - tb_text = tb_text.getvalue() - self.task_manager.storeException(task_id, e, tb_text) - else: - if debug: - print("Storing result...") - self.task_manager.storeResult(task_id, result) - if debug: - print("...done.") - self.task_manager.unregisterProcess(self.process_id) - self.done = True - - # Subclasses can redefine this method - def terminationTest(self): - return False - - -def getMachineInfo(): - import os - import platform - - _, nodename, _, _, machine, _ = platform.uname() - pid = os.getpid() - return "PID %d on %s (%s)" % (pid, nodename, machine) - - -class GlobalStateValue(object): - def __init__(self, state_id, name): - self.label = "state_%d_%s" % (state_id, name) - - -# -# Job handling utility -# -def runJob(label, master_class, slave_class, watchdog_period=120.0, launch_slaves=0): - """ - Creates an instance of the master_class and runs it. A copy - of the script and the current working directory are stored in the - TaskManager object to enable the task_manager script to launch - slave processes. - - :param label: the label that identifies the task manager - :type label: C{str} - - :param master_class: the class implementing the master process - (a subclass of L{MasterProcess}) - - :param slave_class: the class implementing the slave process - (a subclass of L{SlaveProcess}) - - :param watchdog_period: the interval (in seconds) at which the - slave process sends messages to the - manager to signal that it is still alive. - If None, no messages are sent at all. In that - case, the manager cannot recognize if the slave - job has crashed or been killed. - :type watchdog_period: C{int} or C{NoneType} - - :param launch_slaves: the number of slaves jobs to launch - immediately on the same machine that runs - the master process - :type launch_slaves: C{int} - """ - import inspect - import os - import sys - - main_module = sys.modules["__main__"] - try: - slave_label = main_module.SLAVE_PROCESS_LABEL - master = label != slave_label - except AttributeError: - master = True - if master: - filename = inspect.getsourcefile(main_module) - source = open(filename, "r").read() - process = master_class(label) - process.task_manager.storeData(slave_code=source, cwd=os.getcwd()) - if launch_slaves > 0: - process.launchSlaveJobs(launch_slaves) - process.start() - else: - slave_class(label, watchdog_period=watchdog_period).start() - - -# -# Alternate interface for multi-module programs -# -def initializeMasterProcess( - label, slave_script=None, slave_module=None, use_name_server=True -): - """ - Initializes a master process. - - :param label: the label that identifies the task manager - :type label: C{str} - - :param slave_script: the file name of the script that defines - the corresponding slave process - :type slave_script: C{str} - - :param slave_module: the name of the module that defines - the corresponding slave process - :type slave_module: C{str} - - :param use_name_server: If C{True} (default), the task manager is - registered with the Pyro name server. If - C{False}, the name server is not used and - slave processes need to know the host - on which the master process is running. - :type use_name_server: C{bool} - - :returns: a process object on which the methods requestTask() - and retrieveResult() can be called. - :rtype: L{MasterProcess} - """ - import atexit - - process = MasterProcess(label, use_name_server) - atexit.register(process.shutdown) - if slave_script is not None or slave_module is not None: - if slave_script is not None: - source = open(slave_script, "r").read() - else: - source = ( - """ -import MDANSE.DistributedComputing.MasterSlave -from %s import * -""" - % slave_module - ) - if debug: - source += "print('Slave definitions:'\n)" - source += "print(dir()\n)" - source += "MDANSE.DistributedComputing.MasterSlave.debug=True\n" - source += """ -MDANSE.DistributedComputing.MasterSlave.startSlaveProcess() -""" - process.task_manager.storeData(slave_code=source, cwd=PLATFORM.home_directory()) - if debug: - print("Slave source code:") - print(50 * "-") - print(source) - print(50 * "-") - return process - - -def startSlaveProcess(label=None, master_host=None): - """ - Starts a slave process. Must be called at the end of a script - that defines or imports all task handlers. - - :param label: the label that identifies the task manager. May be - omitted if the slave process is started through - the task_manager script. - :type label: C{str} or C{NoneType} - - :param master_host: If C{None} (default), the task manager of the - master process is located using the Pyro name - server. If no name server is used, this parameter - must be the hostname of the machine on which the - master process runs, plus the port number if it - is different from the default (7766). - :type master_host: C{str} or C{NoneType} - """ - import sys - - main_module = sys.modules["__main__"] - if label is None: - label = main_module.SLAVE_PROCESS_LABEL - namespace = main_module.SLAVE_NAMESPACE - else: - namespace = main_module.__dict__ - if debug: - print("Initializing slave process", label) - process = SlaveProcess(label, master_host) - process.start(namespace) diff --git a/MDANSE/Src/DistributedComputing/Slave.py b/MDANSE/Src/DistributedComputing/Slave.py deleted file mode 100644 index fe9d061b9e..0000000000 --- a/MDANSE/Src/DistributedComputing/Slave.py +++ /dev/null @@ -1,49 +0,0 @@ -# ************************************************************************** -# -# MDANSE: Molecular Dynamics Analysis for Neutron Scattering Experiments -# -# @file Src/DistributedComputing/Slave.py -# @brief Implements module/class/test Slave -# -# @homepage https://www.isis.stfc.ac.uk/Pages/MDANSEproject.aspx -# @license GNU General Public License v3 or higher (see LICENSE) -# @copyright Institut Laue Langevin 2013-now -# @copyright ISIS Neutron and Muon Source, STFC, UKRI 2021-now -# @authors Scientific Computing Group at ILL (see AUTHORS) -# -# ************************************************************************** - -import os -from MDANSE import PLATFORM - -import Pyro - -Pyro.config.PYRO_STORAGE = PLATFORM.home_directory() -Pyro.config.PYRO_NS_URIFILE = os.path.join(Pyro.config.PYRO_STORAGE, "Pyro_NS_URI") -Pyro.config.PYRO_LOGFILE = os.path.join(Pyro.config.PYRO_STORAGE, "Pyro_log") -Pyro.config.PYRO_USER_LOGFILE = os.path.join(Pyro.config.PYRO_STORAGE, "Pyro_userlog") -Pyro.config.PYROSSL_CERTDIR = os.path.join(Pyro.config.PYRO_STORAGE, "certs") - - -# Define (or import) all the task handlers. -def do_run_step(job, step): - """ - Computes a single step of a distributed job. - - :param job: the distributed job - :type job: any class that implements the run_step method - :param step: the step number - :type step: int - - :return: the return values of the distributed job for this step - :rtype: tuple of the form (step,return values) - """ - - return job.run_step(step) - - -if __name__ == "__builtin__": - from MDANSE.DistributedComputing.MasterSlave import startSlaveProcess - - # Start the slave process - startSlaveProcess(master_host="localhost:%d") diff --git a/MDANSE/Src/DistributedComputing/TaskManager.py b/MDANSE/Src/DistributedComputing/TaskManager.py deleted file mode 100644 index 1b7688e207..0000000000 --- a/MDANSE/Src/DistributedComputing/TaskManager.py +++ /dev/null @@ -1,605 +0,0 @@ -# -# Task manager for distributed computing based on Pyro -# -# Written by Konrad Hinsen -# last revision: 2010-12-22 -# - -import Pyro.core -import threading -import time - -""" -Task manager for distributed computations. The task manager is used -by the module MasterSlave, but can also be used directly for different -distributed computing setups. -""" - -debug = False - - -class TaskManagerTermination(Exception): - - """ - Signals that the task manager has no more tasks to handle. - """ - - pass - - -class TaskRaisedException(Exception): - - """ - Signals that an exception was raised inside a task. Four - attributes provide information about the task and the exception: - "task_id" is the task's id, "tag" is its tag, "exception" - contains the original exception object, and "traceback" - contains a text representation of the stack traceback at the - time of the exception. - """ - - def __init__(self, task_id, tag, exception, traceback): - self.task_id = task_id - self.tag = tag - self.exception = exception - self.traceback = traceback - - -class Task(object): - - """ - Describes a task inside the task manager. - """ - - def __init__(self, tag, parameters, task_id): - """ - @param tag: the tag of the computational task - @type tag: C{str} - @param parameters: the parameters of the task - @type parameters: C{tuple} - @param task_id: the task id - @type task_id: C{str} - """ - self.tag = tag - self.parameters = parameters - self.id = task_id - self.requesting_processor = None - self.handling_processor = None - self.request_time = None - self.start_time = None - self.end_time = None - - -class TaskQueue(object): - - """ - A FIFO queue for tasks. This class is thread-safe. - """ - - def __init__(self): - self.tasks = [] - self.tasks_by_tag = {} - self.tasks_by_id = {} - self.task_available = threading.Condition() - self.terminate = False - - def __len__(self): - """ - @returns: the number of tasks in the queue - @rtype: C{int} - """ - return len(self.tasks) - - def terminateWaitingThreads(self): - """ - Makes all threads waiting for a task raise L{TaskManagerTermination}. - """ - self.task_available.acquire() - self.terminate = True - self.task_available.notifyAll() - self.task_available.release() - - def _checkForTermination(self): - if self.terminate: - self.task_available.release() - raise TaskManagerTermination() - - def addTask(self, task, in_front=False): - """ - @param task: the task to be added - @type task: L{Task} - @param in_front: if C{True}, add the task at the beginning of the - queue (this is for re-scheduling tasks that were - rejected or not properly handled). Otherwise, add - the task at the end of the queue. - @type in_front: C{bool} - """ - self.task_available.acquire() - self.tasks.append(task) - tasks = self.tasks_by_tag.setdefault(task.tag, []) - if in_front: - tasks.insert(0, task) - else: - tasks.append(task) - self.tasks_by_id[task.id] = task - self.task_available.notifyAll() - self.task_available.release() - - def firstTask(self): - """ - @returns: the first task in the queue - @rtype: L{Task} - - Removes the first task from the queue and returns it. If the task queue - is empty, the method blocks until a task becomes available. - """ - self.task_available.acquire() - while not self.tasks: - self._checkForTermination() - self.task_available.wait() - task = self.tasks[0] - self._removeTask(task) - self.task_available.release() - return task - - def firstTaskWithTag(self, tag): - """ - @param tag: a task tag - @type tag: C{str} - @returns: the first task in the queue - @rtype: L{Task} - - Removes the first task with the given tag from the queue and returns - it. If no task with the requested tag is available, the method blocks - until a matching task becomes available. - """ - self.task_available.acquire() - while not self.tasks_by_tag.get(tag, None): - self._checkForTermination() - self.task_available.wait() - task = self.tasks_by_tag[tag][0] - self._removeTask(task) - self.task_available.release() - return task - - def taskWithId(self, task_id): - """ - @param task_id: a task id - @type task_id: C{str} - @returns: the task with the given task_id - @rtype: L{Task} - - Removes the task with the given task_id from the queue and returns - it. If the task is not in the queue, the method blocks - until it becomes available. - """ - self.task_available.acquire() - while True: - task = self.tasks_by_id.get(task_id, None) - if task is not None: - break - self._checkForTermination() - self.task_available.wait() - self._removeTask(task) - self.task_available.release() - return task - - def _removeTask(self, task): - self.tasks.remove(task) - self.tasks_by_tag[task.tag].remove(task) - del self.tasks_by_id[task.id] - - def taskCount(self): - """ - @returns: a dictionary listing the number of tasks for each tag - @rtype: C{dict} - """ - self.task_available.acquire() - count = {} - for tag, tasks in self.tasks_by_tag.items(): - count[tag] = len(tasks) - self.task_available.release() - return count - - -class TaskManager(Pyro.core.ObjBase): - - """ - Manager for computational tasks. - - A TaskManager accepts task requests and hands them out to other processes. - It stores the results that can then be picked up by the requester. A - TaskManager also keeps track of its compute processes. If a process - disappears, its running tasks are re-scheduled for execution by another - compute process. TaskManangers are thread-safe. - """ - - def __init__(self): - Pyro.core.ObjBase.__init__(self) - self.id_counter = 0 - self.waiting_tasks = TaskQueue() - self.running_tasks = TaskQueue() - self.finished_tasks = TaskQueue() - self.results = {} - self.process_counter = 0 - self.active_processes = [] - self.process_info = [] - self.tasks_by_process = [] - self.data = {} - self.lock = threading.RLock() - self.watchdog = None - - def registerProcess(self, watchdog_period=None, info=None): - """ - @param watchdog_period: the period at which the registering process - promises to ping the task manager to signal - that is still alive. If C{None}, no pings - are expected. - @type watchdog_period: C{int} or C{NoneType} - @param info: an information string telling something about the - machine running the process - @type info: C{str} - @returns: a unique process id - @rtype: C{int} - - Registers a process with the task manager. All processes must call - this method before making any other task manager calls. - """ - self.lock.acquire() - process_id = self.process_counter - self.process_counter += 1 - self.active_processes.append(process_id) - self.process_info.append(info) - self.tasks_by_process.append([]) - self.lock.release() - if watchdog_period is not None: - if self.watchdog is None: - self.watchdog = Watchdog(self) - self.watchdog.registerProcess(process_id, watchdog_period) - return process_id - - def unregisterProcess(self, process_id): - """ - @param process_id: the id of the process - @type process_id: C{int} - - Removes the process from the task manager's process list. All - processes should unregister when they are no longer available - for accepting tasks. The task manager will also unregister processes - itself if they do not ping the task manager at the promised frequency. - """ - if debug: - print("Unregistering process", process_id) - self.lock.acquire() - position = self.active_processes.index(process_id) - for t in self.tasks_by_process[position]: - self.returnTask(t.id) - assert len(self.tasks_by_process[position]) == 0 - del self.tasks_by_process[position] - del self.active_processes[position] - del self.process_info[position] - self.lock.release() - if self.watchdog is not None: - self.watchdog.unregisterProcess(process_id) - - def ping(self, process_id): - """ - @param process_id: the id of the process - @type process_id: C{int} - - Tells the task manager that a process is still alive. - """ - if self.watchdog is not None: - self.watchdog.ping(process_id) - - def numberOfActiveProcesses(self): - """ - @returns: the number of active processes - @rtype: C{int} - """ - return len(self.active_processes) - - def activeProcessInfo(self, pid): - """ - @param pid: the number of an active process - @type pid: C{int} - @returns: information about the active process number pid - @rtype: C{str} - """ - return self.process_info[pid] - - def numberOfTasks(self): - """ - @returns: a tuple of dictionaries containing the number of waiting - tasks, the number of running tasks, and the number of results - waiting to be retrieved. Each dictionary contains the - count for each tag. - @rtype: C{tuple} - """ - self.lock.acquire() - waiting = self.waiting_tasks.taskCount() - running = self.running_tasks.taskCount() - finished = self.finished_tasks.taskCount() - self.lock.release() - return waiting, running, finished - - def addTaskRequest(self, tag, parameters, process_id=None): - """ - @param tag: the tag of the task being requested - @type tag: C{str} - @param parameters: the parameters to be passed to the task - @param process_id: the id of the requesting process (optional) - @type process_id: C{int} - @returns: the task id - @rtype: C{str} - """ - self.lock.acquire() - task_id = tag + "_" + str(self.id_counter) - self.id_counter += 1 - self.lock.release() - new_task = Task(tag, parameters, task_id) - if process_id: - new_task.requesting_processor = process_id - new_task.request_time = time.time() - self.waiting_tasks.addTask(new_task) - if debug: - print("Task request %s: %s(%s)" % (task_id, tag, str(parameters))) - return task_id - - def getTaskWithTag(self, tag, process_id=None): - """ - @param tag: a task tag - @type tag: C{str} - @param process_id: the id of the retrieving process (optional) - @type process_id: C{int} - @returns: the task id and the parameters - @rtype: C{tuple} - - Returns a waiting task with the given tag. The task is removed from - the list of waiting tasks and added to the list of running tasks. - """ - task = self.waiting_tasks.firstTaskWithTag(tag) - self._checkoutTask(task, process_id) - return task.id, task.parameters - - def getAnyTask(self, process_id=None): - """ - @param process_id: the id of the retrieving process (optional) - @type process_id: C{int} - @returns: the task id, the task tag, and the parameters - @rtype: C{tuple} - - Returns a waiting task of arbitrary tag. The task is removed from - the list of waiting tasks and added to the list of running tasks. - """ - task = self.waiting_tasks.firstTask() - self._checkoutTask(task, process_id) - return task.id, task.tag, task.parameters - - def _checkoutTask(self, task, process_id): - task.handling_processor = process_id - task.start_time = time.time() - self.running_tasks.addTask(task) - if process_id is not None: - self.lock.acquire() - position = self.active_processes.index(process_id) - self.tasks_by_process[position].append(task) - self.lock.release() - if debug: - print("Handing out task %s to process %s" % (task.id, str(process_id))) - - def storeResult(self, task_id, result): - """ - @param task_id: the id of the task for which the result is provided - @type task_id: C{str} - @param result: the result of the task - - Stores the result associated with the task. The task is removed from - the list of running tasks and added to the list of finished tasks. - """ - if debug: - print("Task %s yielded result %s" % (task_id, result)) - self.lock.acquire() - self.results[task_id] = result - self.lock.release() - task = self.running_tasks.taskWithId(task_id) - task.end_time = time.time() - task.completed = True - self.finished_tasks.addTask(task) - self._removeTask(task) - - def storeException(self, task_id, exception, traceback): - """ - @param task_id: the id of the task for which the result is provided - @type task_id: C{str} - @param exception: the exception raised by the task - @param traceback: a text version of the stack traceback at the time - of the exception - @type traceback: C{str} - - Stores the exception associated with the task. The task is removed from - the list of running tasks and added to the list of finished tasks. - When the result is retrieved by another process, L{TaskRaisedException} - is raised. - """ - if debug: - print("Task %s raised exception %s" % (task_id, exception)) - self.lock.acquire() - self.results[task_id] = (exception, traceback) - self.lock.release() - task = self.running_tasks.taskWithId(task_id) - task.end_time = time.time() - task.completed = False - self.finished_tasks.addTask(task) - self._removeTask(task) - - def returnTask(self, task_id): - """ - @param task_id: the id of the task for which the result is provided - @type task_id: C{str} - - Removes a task from the list of running tasks and put its back at the - beginning of the list of waiting tasks. This method should be called - by a process that has obtained a task but cannot handle it. - """ - if debug: - print("Task %s returned" % task_id) - task = self.running_tasks.taskWithId(task_id) - self._removeTask(task) - task.start_time = None - task.handling_processor = None - self.waiting_tasks.addTask(task, in_front=True) - - def _removeTask(self, task): - if task.handling_processor is not None: - self.lock.acquire() - try: - position = self.active_processes.index(task.handling_processor) - self.tasks_by_process[position].remove(task) - except ValueError: - pass - self.lock.release() - - def getAnyResult(self): - """ - @returns: the task id, the task tag, and the result of the task - @rtype: C{tuple} - - Returns the result of an arbitrary finished task. The task is removed - from the list of finished tasks. - """ - task = self.finished_tasks.firstTask() - result = self.results[task.id] - del self.results[task.id] - if task.completed: - return task.id, task.tag, result - else: - raise TaskRaisedException(task.id, task.tag, result[0], result[1]) - - def getResultWithTag(self, tag): - """ - @param tag: a task tag - @returns: the task id and the result of the task - @rtype: C{tuple} - - Returns the result of a finished task that has the given tag. The - task is removed from the list of finished tasks. - """ - task = self.finished_tasks.firstTaskWithTag(tag) - result = self.results[task.id] - del self.results[task.id] - if debug: - print("Handed out result of %s" % task.id) - if task.completed: - return task.id, result - else: - raise TaskRaisedException(task.id, task.tag, result[0], result[1]) - - def storeData(self, **kw): - """ - @param kw: a keyword list of data items to be stored - @type kw: C{dict} - - This routine permits processes to exchange arbitrary data items - through the task manager. - """ - self.lock.acquire() - for label, data in kw.items(): - self.data[label] = data - self.lock.release() - - def retrieveData(self, label): - """ - @param label: the label of the data item to be retrieved - @type label: C{str} - """ - self.lock.acquire() - data = self.data[label] - self.lock.release() - return data - - def deleteData(self, label): - """ - @param label: the label of the data item to be deleted - @type label: C{str} - """ - self.lock.acquire() - del self.data[label] - self.lock.release() - - def terminate(self): - """ - Signals that no more tasks or results will be requested. All waiting - threads will be terminated by raising L{TaskManagerTermination}. - """ - if debug: - print("Terminating") - self.waiting_tasks.terminateWaitingThreads() - self.running_tasks.terminateWaitingThreads() - self.finished_tasks.terminateWaitingThreads() - - -class Watchdog(object): - - """ - A background process that watches compute tasks and unregisters those - that do not ping the task manager at the promised interval. - """ - - def __init__(self, task_manager): - self.task_manager = task_manager - self.ping_period = {} - self.last_ping = {} - self.done = False - self.lock = threading.RLock() - self.background_thread = threading.Thread(target=self.watchdogThread) - self.background_thread.setDaemon(True) - self.thread_started = False - - def registerProcess(self, process_id, ping_period): - self.lock.acquire() - self.ping_period[process_id] = ping_period - self.last_ping[process_id] = time.time() - if not self.thread_started: - self.background_thread.start() - self.thread_started = True - self.lock.release() - - def unregisterProcess(self, process_id): - self.lock.acquire() - try: - del self.ping_period[process_id] - del self.last_ping[process_id] - except KeyError: - # KeyError happens when processes without watchdog are unregistered - pass - self.lock.release() - - def ping(self, process_id): - self.lock.acquire() - self.last_ping[process_id] = time.time() - self.lock.release() - - def terminate(self, blocking=False): - self.done = True - if blocking: - self.background_thread.join() - - def watchdogThread(self): - while True: - now = time.time() - dead_processes = [] - min_delay = min(self.ping_period.values() + [60.0]) - self.lock.acquire() - for process_id in self.ping_period.keys(): - delay = now - self.last_ping[process_id] - if delay > 4 * self.ping_period[process_id]: - dead_processes.append(process_id) - self.lock.release() - for process_id in dead_processes: - if debug: - print("Process %d died" % process_id) - self.task_manager.unregisterProcess(process_id) - if self.done: - break - time.sleep(min_delay) diff --git a/MDANSE/Src/DistributedComputing/__init__.py b/MDANSE/Src/DistributedComputing/__init__.py deleted file mode 100644 index 7add38e56d..0000000000 --- a/MDANSE/Src/DistributedComputing/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# ************************************************************************** -# -# MDANSE: Molecular Dynamics Analysis for Neutron Scattering Experiments -# -# @file Src/DistributedComputing/__init__.py -# @brief Implements module/class/test __init__ -# -# @homepage https://www.isis.stfc.ac.uk/Pages/MDANSEproject.aspx -# @license GNU General Public License v3 or higher (see LICENSE) -# @copyright Institut Laue Langevin 2013-now -# @copyright ISIS Neutron and Muon Source, STFC, UKRI 2021-now -# @authors Scientific Computing Group at ILL (see AUTHORS) -# -# ************************************************************************** diff --git a/MDANSE/Src/Framework/Configurators/NetCDFInputFileConfigurator.py b/MDANSE/Src/Framework/Configurators/NetCDFInputFileConfigurator.py deleted file mode 100644 index afa97fc584..0000000000 --- a/MDANSE/Src/Framework/Configurators/NetCDFInputFileConfigurator.py +++ /dev/null @@ -1,112 +0,0 @@ -# ************************************************************************** -# -# MDANSE: Molecular Dynamics Analysis for Neutron Scattering Experiments -# -# @file Src/Framework/Configurators/NetCDFInputFileConfigurator.py -# @brief Implements module/class/test NetCDFInputFileConfigurator -# -# @homepage https://www.isis.stfc.ac.uk/Pages/MDANSEproject.aspx -# @license GNU General Public License v3 or higher (see LICENSE) -# @copyright Institut Laue Langevin 2013-now -# @copyright ISIS Neutron and Muon Source, STFC, UKRI 2021-now -# @authors Scientific Computing Group at ILL (see AUTHORS) -# -# ************************************************************************** - -import netCDF4 - -from MDANSE import REGISTRY -from MDANSE.Framework.Configurators.IConfigurator import ConfiguratorError -from MDANSE.Framework.Configurators.InputFileConfigurator import InputFileConfigurator -from MDANSE.IO.NetCDF import find_numeric_variables - - -class NetCDFInputFileConfigurator(InputFileConfigurator): - """ - This configurator allows to input a NetCDF file as input file. - - NetCDF is a set of software libraries and self-describing, machine-independent data formats that - support the creation, access, and sharing of array-oriented scientific data. - - For more information, please consult the NetCDF website: http://www.unidata.ucar.edu/software/netcdf/ - """ - - _default = "" - - def __init__(self, name, variables=None, **kwargs): - """ - Initializes the configurator. - - :param name: the name of the configurator as it will appear in the configuration. - :type name: str - :param variables: the list of NetCDF variables that must be present in the input NetCDF file or None if there is no compulsory variable. - :type variables: list of str or None - """ - - # The base class constructor. - InputFileConfigurator.__init__(self, name, **kwargs) - - self._variables = variables if variables is not None else [] - - def configure(self, value): - """ - Configure a NetCDF file. - - :param configuration: the current configuration. - :type configuration: a MDANSE.Framework.Configurable.Configurable object - :param value: the path for the NetCDF file. - :type value: str - """ - - InputFileConfigurator.configure(self, value) - - try: - self["instance"] = netCDF4.Dataset(self["value"], "r") - - except IOError: - raise ConfiguratorError( - "can not open %r NetCDF file for reading" % self["value"] - ) - - for v in self._variables: - try: - self[v] = self["instance"].variables[v] - except KeyError: - raise ConfiguratorError( - "the variable %r was not found in %r NetCDF file" - % (v, self["value"]) - ) - - @property - def variables(self): - """ - Returns the list of NetCDF variables that must be present in the NetCDF file. - - :return: the list of NetCDF variables that must be present in the NetCDF file. - :rtype: list of str - """ - - return self._variables - - def get_information(self): - """ - Returns some basic informations about the contents of the NetCDF file. - - :return: the informations about the contents of the NetCDF file. - :rtype: str - """ - - info = ["NetCDF input file: %r" % self["value"]] - - if "instance" in self: - info.append("Contains the following variables:") - variables = [] - find_numeric_variables(variables, self["instance"]) - - for v in variables: - info.append("\t-{}".format(v)) - - return "\n".join(info) - - -REGISTRY["netcdf_input_file"] = NetCDFInputFileConfigurator diff --git a/MDANSE/Src/Framework/Formats/NetCDFFormat.py b/MDANSE/Src/Framework/Formats/NetCDFFormat.py deleted file mode 100644 index 0774944ab7..0000000000 --- a/MDANSE/Src/Framework/Formats/NetCDFFormat.py +++ /dev/null @@ -1,89 +0,0 @@ -# ************************************************************************** -# -# MDANSE: Molecular Dynamics Analysis for Neutron Scattering Experiments -# -# @file Src/Framework/Formats/NetCDFFormat.py -# @brief Implements module/class/test NetCDFFormat -# -# @homepage https://www.isis.stfc.ac.uk/Pages/MDANSEproject.aspx -# @license GNU General Public License v3 or higher (see LICENSE) -# @copyright Institut Laue Langevin 2013-now -# @copyright ISIS Neutron and Muon Source, STFC, UKRI 2021-now -# @authors Scientific Computing Group at ILL (see AUTHORS) -# -# ************************************************************************** - -import os - -import numpy as np - -import netCDF4 - -from MDANSE import REGISTRY -from MDANSE.Framework.Formats.IFormat import IFormat - - -class NetCDFFormat(IFormat): - """ - This class handles the writing of output variables in NetCDF file format. - """ - - extension = ".nc" - - extensions = [".nc", ".cdf", ".netcdf"] - - @classmethod - def write(cls, filename, data, header=""): - """ - Write a set of output variables into a NetCDF file. - - :param filename: the path to the output NetCDF file. - :type filename: str - :param data: the data to be written out. - :type data: dict of Framework.OutputVariables.IOutputVariable - :param header: the header to add to the output file. - :type header: str - """ - - filename = os.path.splitext(filename)[0] - - filename = "%s%s" % (filename, cls.extensions[0]) - - # The NetCDF output file is opened for writing. - outputFile = netCDF4.Dataset(filename, "w") - - if header: - # This is to avoid any segmentation fault when writing the NetCDF header field - header = str(header) - - outputFile.header = header - - # Loop over the OutputVariable instances to write. - - for var in list(data.values()): - varName = str(var.varname).strip().replace("/", "|") - - # The NetCDF dimensions are created for all the dimensions of the OutputVariable instance. - dimensions = [] - for i, v in enumerate(var.shape): - name = str("%s_%d" % (varName, i)) - dimensions.append(name) - outputFile.createDimension(name, int(v)) - - # A NetCDF variable instance is created for the running OutputVariable instance. - NETCDFVAR = outputFile.createVariable( - varName, np.dtype(var.dtype).char, tuple(dimensions) - ) - - # The array stored in the OutputVariable instance is written to the NetCDF file. - NETCDFVAR[:] = var - - # All the attributes stored in the OutputVariable instance are written to the NetCDF file. - for k, v in list(vars(var).items()): - setattr(NETCDFVAR, str(k), str(v)) - - # The NetCDF file is closed. - outputFile.close() - - -REGISTRY["netcdf"] = NetCDFFormat diff --git a/MDANSE/Src/Framework/InputData/NetCDFInputData.py b/MDANSE/Src/Framework/InputData/NetCDFInputData.py deleted file mode 100644 index b5d7bdbf70..0000000000 --- a/MDANSE/Src/Framework/InputData/NetCDFInputData.py +++ /dev/null @@ -1,63 +0,0 @@ -# ************************************************************************** -# -# MDANSE: Molecular Dynamics Analysis for Neutron Scattering Experiments -# -# @file Src/Framework/InputData/NetCDFInputData.py -# @brief Implements module/class/test NetCDFInputData -# -# @homepage https://www.isis.stfc.ac.uk/Pages/MDANSEproject.aspx -# @license GNU General Public License v3 or higher (see LICENSE) -# @copyright Institut Laue Langevin 2013-now -# @copyright ISIS Neutron and Muon Source, STFC, UKRI 2021-now -# @authors Scientific Computing Group at ILL (see AUTHORS) -# -# ************************************************************************** - -import collections - -import netCDF4 - -from MDANSE import REGISTRY -from MDANSE.Framework.InputData.IInputData import InputDataError -from MDANSE.Framework.InputData.InputFileData import InputFileData -from MDANSE.IO.IOUtils import load_variables -from MDANSE.IO.NetCDF import find_numeric_variables - - -class NetCDFInputData(InputFileData): - extension = "nc" - - def info(self): - val = ["Variables found in NetCDF file:"] - - for k, v in list(self._netcdf.variables.items()): - val.append("\t - %s: %s" % (k, v.shape)) - - val = "\n".join(val) - - return val - - def load(self): - try: - self._netcdf = netCDF4.Dataset(self._name, "r") - - except IOError: - raise InputDataError( - "The data stored in %r filename could not be loaded properly." - % self._name - ) - - else: - variables = find_numeric_variables(collections.OrderedDict(), self._netcdf) - - self._data = load_variables(variables) - - def close(self): - self._netcdf.close() - - @property - def netcdf(self): - return self._netcdf - - -REGISTRY["netcdf_data"] = NetCDFInputData diff --git a/MDANSE/Src/Framework/Jobs/AngularCorrelation.py b/MDANSE/Src/Framework/Jobs/AngularCorrelation.py index 543793441f..c784ba3fe0 100644 --- a/MDANSE/Src/Framework/Jobs/AngularCorrelation.py +++ b/MDANSE/Src/Framework/Jobs/AngularCorrelation.py @@ -69,7 +69,7 @@ class AngularCorrelation(IJob): "boolean", {"label": "output contribution per axis", "default": False}, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/AreaPerMolecule.py b/MDANSE/Src/Framework/Jobs/AreaPerMolecule.py index 3465a4a539..1a2c324655 100644 --- a/MDANSE/Src/Framework/Jobs/AreaPerMolecule.py +++ b/MDANSE/Src/Framework/Jobs/AreaPerMolecule.py @@ -65,7 +65,7 @@ class AreaPerMolecule(IJob): }, ) settings["name"] = ("string", {"label": "molecule name", "default": "DMPC"}) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/Converter.py b/MDANSE/Src/Framework/Jobs/Converter.py index a3df1bdbba..c2c04e54cd 100644 --- a/MDANSE/Src/Framework/Jobs/Converter.py +++ b/MDANSE/Src/Framework/Jobs/Converter.py @@ -15,7 +15,6 @@ from abc import ABCMeta, abstractmethod, abstractclassmethod -import netCDF4 import h5py from MDANSE.Framework.Jobs.IJob import IJob diff --git a/MDANSE/Src/Framework/Jobs/CoordinationNumber.py b/MDANSE/Src/Framework/Jobs/CoordinationNumber.py index fbfde2c8c0..54ac3aaf21 100644 --- a/MDANSE/Src/Framework/Jobs/CoordinationNumber.py +++ b/MDANSE/Src/Framework/Jobs/CoordinationNumber.py @@ -56,7 +56,7 @@ class CoordinationNumber(DistanceHistogram): } }, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def finalize(self): diff --git a/MDANSE/Src/Framework/Jobs/CurrentCorrelationFunction.py b/MDANSE/Src/Framework/Jobs/CurrentCorrelationFunction.py index 20b2bb5f01..d09c57f8be 100644 --- a/MDANSE/Src/Framework/Jobs/CurrentCorrelationFunction.py +++ b/MDANSE/Src/Framework/Jobs/CurrentCorrelationFunction.py @@ -21,7 +21,7 @@ import numpy as np -import netCDF4 +import h5py from MDANSE import REGISTRY from MDANSE.Framework.Jobs.IJob import IJob @@ -118,7 +118,7 @@ class CurrentCorrelationFunction(IJob): }, }, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): @@ -279,8 +279,8 @@ def initialize(self): if not hasattr(self, "_name"): self._name = "_".join([self._type, IJob.define_unique_name()]) - with netCDF4.Dataset( - os.path.join(gettempdir(), "mdanse_" + self.name + ".nc"), "w" + with h5py.File( + os.path.join(gettempdir(), "mdanse_" + self.name + ".h5"), "w" ) as velocities: velocities.createDimension("particles", nAtoms + 1) velocities.createDimension("time", nFrames) @@ -320,10 +320,10 @@ def initialize(self): dt=self.configuration["frames"]["time_step"], ) velocities["velocities"][idx, :, :] = vels - self._netcdf = netCDF4.Dataset( - os.path.join(tempdir, "mdanse_" + self.name + ".nc"), "r" + self._data_file = h5py.File( + os.path.join(tempdir, "mdanse_" + self.name + ".h5"), "r" ) - self._velocities = self._netcdf["velocities"] + self._velocities = self._data_file["velocities"] def run_step(self, index): """ @@ -489,7 +489,7 @@ def finalize(self): Finalizes the calculations (e.g. averaging the total term, output files creations ...) """ try: - self._netcdf.close() + self._data_file.close() except (AttributeError, RuntimeError): pass diff --git a/MDANSE/Src/Framework/Jobs/Density.py b/MDANSE/Src/Framework/Jobs/Density.py index c91ba1129c..fb7fe86456 100644 --- a/MDANSE/Src/Framework/Jobs/Density.py +++ b/MDANSE/Src/Framework/Jobs/Density.py @@ -45,7 +45,7 @@ class Density(IJob): settings = collections.OrderedDict() settings["trajectory"] = ("hdf_trajectory", {}) settings["frames"] = ("frames", {"dependencies": {"trajectory": "trajectory"}}) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/DensityOfStates.py b/MDANSE/Src/Framework/Jobs/DensityOfStates.py index 2df90ecb4d..5957294d69 100644 --- a/MDANSE/Src/Framework/Jobs/DensityOfStates.py +++ b/MDANSE/Src/Framework/Jobs/DensityOfStates.py @@ -67,7 +67,7 @@ class DensityOfStates(IJob): "weights", {"dependencies": {"atom_selection": "atom_selection"}}, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/DensityProfile.py b/MDANSE/Src/Framework/Jobs/DensityProfile.py index 4432867acb..4159978e88 100644 --- a/MDANSE/Src/Framework/Jobs/DensityProfile.py +++ b/MDANSE/Src/Framework/Jobs/DensityProfile.py @@ -65,7 +65,7 @@ class DensityProfile(IJob): "weights", {"dependencies": {"atom_selection": "atom_selection"}}, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/DipoleAutoCorrelationFunction.py b/MDANSE/Src/Framework/Jobs/DipoleAutoCorrelationFunction.py index 2c4c9c6376..4d3b14c7c8 100644 --- a/MDANSE/Src/Framework/Jobs/DipoleAutoCorrelationFunction.py +++ b/MDANSE/Src/Framework/Jobs/DipoleAutoCorrelationFunction.py @@ -48,7 +48,7 @@ class DipoleAutoCorrelationFunction(IJob): "default": {0: 0.5, 1: 1.2, 2: -0.2}, }, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/DistanceHistogram.py b/MDANSE/Src/Framework/Jobs/DistanceHistogram.py index ca94fda16f..ed25ed52ce 100644 --- a/MDANSE/Src/Framework/Jobs/DistanceHistogram.py +++ b/MDANSE/Src/Framework/Jobs/DistanceHistogram.py @@ -60,7 +60,7 @@ class DistanceHistogram(IJob): "weights", {"dependencies": {"atom_selection": "atom_selection"}}, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/DynamicCoherentStructureFactor.py b/MDANSE/Src/Framework/Jobs/DynamicCoherentStructureFactor.py index 85eba3a392..c2dc1f4a31 100644 --- a/MDANSE/Src/Framework/Jobs/DynamicCoherentStructureFactor.py +++ b/MDANSE/Src/Framework/Jobs/DynamicCoherentStructureFactor.py @@ -80,7 +80,7 @@ class DynamicCoherentStructureFactor(IJob): }, }, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/DynamicIncoherentStructureFactor.py b/MDANSE/Src/Framework/Jobs/DynamicIncoherentStructureFactor.py index 738ee8a1a9..0fed80247f 100644 --- a/MDANSE/Src/Framework/Jobs/DynamicIncoherentStructureFactor.py +++ b/MDANSE/Src/Framework/Jobs/DynamicIncoherentStructureFactor.py @@ -85,7 +85,7 @@ class DynamicIncoherentStructureFactor(IJob): }, }, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/Eccentricity.py b/MDANSE/Src/Framework/Jobs/Eccentricity.py index d2672451ce..cbb8f59ecf 100644 --- a/MDANSE/Src/Framework/Jobs/Eccentricity.py +++ b/MDANSE/Src/Framework/Jobs/Eccentricity.py @@ -93,7 +93,7 @@ class Eccentricity(IJob): "weights", {"dependencies": {"atom_selection": "atom_selection"}}, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) def initialize(self): """ diff --git a/MDANSE/Src/Framework/Jobs/ElasticIncoherentStructureFactor.py b/MDANSE/Src/Framework/Jobs/ElasticIncoherentStructureFactor.py index 431724e961..7199f512aa 100644 --- a/MDANSE/Src/Framework/Jobs/ElasticIncoherentStructureFactor.py +++ b/MDANSE/Src/Framework/Jobs/ElasticIncoherentStructureFactor.py @@ -81,7 +81,7 @@ class ElasticIncoherentStructureFactor(IJob): "dependencies": {"atom_selection": "atom_selection"}, }, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/GaussianDynamicIncoherentStructureFactor.py b/MDANSE/Src/Framework/Jobs/GaussianDynamicIncoherentStructureFactor.py index d8939d97b3..da8fd5976d 100644 --- a/MDANSE/Src/Framework/Jobs/GaussianDynamicIncoherentStructureFactor.py +++ b/MDANSE/Src/Framework/Jobs/GaussianDynamicIncoherentStructureFactor.py @@ -81,7 +81,7 @@ class GaussianDynamicIncoherentStructureFactor(IJob): "dependencies": {"atom_selection": "atom_selection"}, }, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/GeneralAutoCorrelationFunction.py b/MDANSE/Src/Framework/Jobs/GeneralAutoCorrelationFunction.py index cc5e081f19..e74916f3a1 100644 --- a/MDANSE/Src/Framework/Jobs/GeneralAutoCorrelationFunction.py +++ b/MDANSE/Src/Framework/Jobs/GeneralAutoCorrelationFunction.py @@ -60,7 +60,7 @@ class GeneralAutoCorrelationFunction(IJob): "weights", {"dependencies": {"atom_selection": "atom_selection"}}, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/McStasVirtualInstrument.py b/MDANSE/Src/Framework/Jobs/McStasVirtualInstrument.py index a258a97ca4..a2dbdd03d6 100644 --- a/MDANSE/Src/Framework/Jobs/McStasVirtualInstrument.py +++ b/MDANSE/Src/Framework/Jobs/McStasVirtualInstrument.py @@ -73,7 +73,7 @@ class McStasVirtualInstrument(IJob): ) settings["frames"] = ("frames", {"dependencies": {"trajectory": "trajectory"}}) settings["sample_coh"] = ( - "netcdf_input_file", + "hdf_input_file", { "widget": "input_file", "label": "MDANSE Coherent Structure Factor", @@ -82,7 +82,7 @@ class McStasVirtualInstrument(IJob): }, ) settings["sample_inc"] = ( - "netcdf_input_file", + "hdf_input_file", { "widget": "input_file", "label": "MDANSE Incoherent Structure Factor", @@ -116,7 +116,7 @@ class McStasVirtualInstrument(IJob): "exclude": ["sample_coh", "sample_inc"], }, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) def initialize(self): """ diff --git a/MDANSE/Src/Framework/Jobs/MeanSquareDisplacement.py b/MDANSE/Src/Framework/Jobs/MeanSquareDisplacement.py index dfa8e0bc5c..7ebc1ec24d 100644 --- a/MDANSE/Src/Framework/Jobs/MeanSquareDisplacement.py +++ b/MDANSE/Src/Framework/Jobs/MeanSquareDisplacement.py @@ -92,7 +92,7 @@ class MeanSquareDisplacement(IJob): ) settings["output_files"] = ( "output_files", - {"formats": ["hdf", "netcdf", "ascii", "svg"]}, + {"formats": ["hdf", "ascii", "svg"]}, ) settings["running_mode"] = ("running_mode", {}) diff --git a/MDANSE/Src/Framework/Jobs/MolecularTrace.py b/MDANSE/Src/Framework/Jobs/MolecularTrace.py index 0061e37b7a..ec7f8d1967 100644 --- a/MDANSE/Src/Framework/Jobs/MolecularTrace.py +++ b/MDANSE/Src/Framework/Jobs/MolecularTrace.py @@ -54,7 +54,7 @@ class MolecularTrace(IJob): {"dependencies": {"trajectory": "trajectory"}}, ) settings["spatial_resolution"] = ("float", {"mini": 0.01, "default": 0.1}) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/NeutronDynamicTotalStructureFactor.py b/MDANSE/Src/Framework/Jobs/NeutronDynamicTotalStructureFactor.py index be0e101cee..5c5b0a1534 100644 --- a/MDANSE/Src/Framework/Jobs/NeutronDynamicTotalStructureFactor.py +++ b/MDANSE/Src/Framework/Jobs/NeutronDynamicTotalStructureFactor.py @@ -67,7 +67,7 @@ class NeutronDynamicTotalStructureFactor(IJob): } }, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/OrderParameter.py b/MDANSE/Src/Framework/Jobs/OrderParameter.py index 9909b15edd..c16fc8d3a1 100644 --- a/MDANSE/Src/Framework/Jobs/OrderParameter.py +++ b/MDANSE/Src/Framework/Jobs/OrderParameter.py @@ -89,7 +89,7 @@ class OrderParameter(IJob): "boolean", {"label": "output contribution per axis", "default": False}, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/PositionAutoCorrelationFunction.py b/MDANSE/Src/Framework/Jobs/PositionAutoCorrelationFunction.py index face3c9c4d..6aa678c54e 100644 --- a/MDANSE/Src/Framework/Jobs/PositionAutoCorrelationFunction.py +++ b/MDANSE/Src/Framework/Jobs/PositionAutoCorrelationFunction.py @@ -68,7 +68,7 @@ class PositionAutoCorrelationFunction(IJob): "weights", {"dependencies": {"atom_selection": "atom_selection"}}, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/RadiusOfGyration.py b/MDANSE/Src/Framework/Jobs/RadiusOfGyration.py index f40a1d9b04..9f3f9b896e 100644 --- a/MDANSE/Src/Framework/Jobs/RadiusOfGyration.py +++ b/MDANSE/Src/Framework/Jobs/RadiusOfGyration.py @@ -49,7 +49,7 @@ class RadiusOfGyration(IJob): "weights", {"dependencies": {"atom_selection": "atom_selection"}}, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/RootMeanSquareDeviation.py b/MDANSE/Src/Framework/Jobs/RootMeanSquareDeviation.py index 100434a71e..19e9dab1dc 100644 --- a/MDANSE/Src/Framework/Jobs/RootMeanSquareDeviation.py +++ b/MDANSE/Src/Framework/Jobs/RootMeanSquareDeviation.py @@ -72,7 +72,7 @@ class RootMeanSquareDeviation(IJob): "weights", {"dependencies": {"atom_selection": "atom_selection"}}, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/RootMeanSquareFluctuation.py b/MDANSE/Src/Framework/Jobs/RootMeanSquareFluctuation.py index 2a0308c3aa..7f05136213 100644 --- a/MDANSE/Src/Framework/Jobs/RootMeanSquareFluctuation.py +++ b/MDANSE/Src/Framework/Jobs/RootMeanSquareFluctuation.py @@ -54,7 +54,7 @@ class RootMeanSquareFluctuation(IJob): } }, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/SolventAccessibleSurface.py b/MDANSE/Src/Framework/Jobs/SolventAccessibleSurface.py index 2c3d582983..a9b9262967 100644 --- a/MDANSE/Src/Framework/Jobs/SolventAccessibleSurface.py +++ b/MDANSE/Src/Framework/Jobs/SolventAccessibleSurface.py @@ -66,7 +66,7 @@ class SolventAccessibleSurface(IJob): ) settings["n_sphere_points"] = ("integer", {"mini": 1, "default": 1000}) settings["probe_radius"] = ("float", {"mini": 0.0, "default": 0.14}) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/StaticStructureFactor.py b/MDANSE/Src/Framework/Jobs/StaticStructureFactor.py index c44bdabb68..1b8dd63995 100644 --- a/MDANSE/Src/Framework/Jobs/StaticStructureFactor.py +++ b/MDANSE/Src/Framework/Jobs/StaticStructureFactor.py @@ -70,7 +70,7 @@ class StaticStructureFactor(DistanceHistogram): }, }, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def finalize(self): diff --git a/MDANSE/Src/Framework/Jobs/StructureFactorFromScatteringFunction.py b/MDANSE/Src/Framework/Jobs/StructureFactorFromScatteringFunction.py index bac0af1589..06a48f13fe 100644 --- a/MDANSE/Src/Framework/Jobs/StructureFactorFromScatteringFunction.py +++ b/MDANSE/Src/Framework/Jobs/StructureFactorFromScatteringFunction.py @@ -33,11 +33,11 @@ class StructureFactorFromScatteringFunction(IJob): "Scattering", ) - ancestor = ["netcdf_data"] + ancestor = ["hdf_data"] settings = collections.OrderedDict() - settings["netcdf_input_file"] = ( - "netcdf_input_file", + settings["hdf_input_file"] = ( + "hdf_input_file", { "variables": ["time", "f(q,t)_total"], "default": os.path.join("..", "..", "..", "Data", "NetCDF", "disf_prot.nc"), @@ -45,9 +45,9 @@ class StructureFactorFromScatteringFunction(IJob): ) settings["instrument_resolution"] = ( "instrument_resolution", - {"dependencies": {"frames": "netcdf_input_file"}}, + {"dependencies": {"frames": "hdf_input_file"}}, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) def initialize(self): """ @@ -57,7 +57,7 @@ def initialize(self): # The number of steps is set to 1 as everything is performed in the finalize method self.numberOfSteps = 1 - inputFile = self.configuration["netcdf_input_file"]["instance"] + inputFile = self.configuration["hdf_input_file"]["instance"] resolution = self.configuration["instrument_resolution"] @@ -132,7 +132,7 @@ def finalize(self): self._info, ) - self.configuration["netcdf_input_file"]["instance"].close() + self.configuration["hdf_input_file"]["instance"].close() REGISTRY["sffsf"] = StructureFactorFromScatteringFunction diff --git a/MDANSE/Src/Framework/Jobs/Temperature.py b/MDANSE/Src/Framework/Jobs/Temperature.py index 7c313a5be6..129936aab3 100644 --- a/MDANSE/Src/Framework/Jobs/Temperature.py +++ b/MDANSE/Src/Framework/Jobs/Temperature.py @@ -57,7 +57,7 @@ class Temperature(IJob): "default": 1, }, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) def initialize(self): """ diff --git a/MDANSE/Src/Framework/Jobs/VelocityAutoCorrelationFunction.py b/MDANSE/Src/Framework/Jobs/VelocityAutoCorrelationFunction.py index 9a501c9e22..bd4dee275c 100644 --- a/MDANSE/Src/Framework/Jobs/VelocityAutoCorrelationFunction.py +++ b/MDANSE/Src/Framework/Jobs/VelocityAutoCorrelationFunction.py @@ -85,7 +85,7 @@ class VelocityAutoCorrelationFunction(IJob): "weights", {"dependencies": {"atom_selection": "atom_selection"}}, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def initialize(self): diff --git a/MDANSE/Src/Framework/Jobs/Voronoi.py b/MDANSE/Src/Framework/Jobs/Voronoi.py index 94454dc951..b7bf326e9b 100644 --- a/MDANSE/Src/Framework/Jobs/Voronoi.py +++ b/MDANSE/Src/Framework/Jobs/Voronoi.py @@ -67,7 +67,7 @@ class Voronoi(IJob): {"label": "apply periodic_boundary_condition", "default": True}, ) settings["pbc_border_size"] = ("float", {"mini": 0.0, "default": 0.0}) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) def initialize(self): self.numberOfSteps = self.configuration["frames"]["number"] diff --git a/MDANSE/Src/Framework/Jobs/XRayStaticStructureFactor.py b/MDANSE/Src/Framework/Jobs/XRayStaticStructureFactor.py index c5d19ea177..67cdb0dce8 100644 --- a/MDANSE/Src/Framework/Jobs/XRayStaticStructureFactor.py +++ b/MDANSE/Src/Framework/Jobs/XRayStaticStructureFactor.py @@ -84,7 +84,7 @@ class XRayStaticStructureFactor(DistanceHistogram): } }, ) - settings["output_files"] = ("output_files", {"formats": ["hdf", "netcdf", "ascii"]}) + settings["output_files"] = ("output_files", {"formats": ["hdf", "ascii"]}) settings["running_mode"] = ("running_mode", {}) def finalize(self): diff --git a/MDANSE/requirements.txt b/MDANSE/requirements.txt index 21170c4b4f..620869d7ae 100644 --- a/MDANSE/requirements.txt +++ b/MDANSE/requirements.txt @@ -1,7 +1,6 @@ numpy matplotlib h5py -netCDF4 sphinx icecream cython From 6dfaa9821bd90a8b8cdfceb106b5d433df4904c3 Mon Sep 17 00:00:00 2001 From: Maciej Bartkowiak Date: Fri, 20 Oct 2023 11:58:48 +0100 Subject: [PATCH 2/2] Update the input parameters in unit tests --- MDANSE/Tests/UnitTests/Analysis/test_dynamics.py | 12 ++++++------ .../Tests/UnitTests/Analysis/test_thermodynamics.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/MDANSE/Tests/UnitTests/Analysis/test_dynamics.py b/MDANSE/Tests/UnitTests/Analysis/test_dynamics.py index 2c714eb937..3a4f743094 100644 --- a/MDANSE/Tests/UnitTests/Analysis/test_dynamics.py +++ b/MDANSE/Tests/UnitTests/Analysis/test_dynamics.py @@ -27,12 +27,12 @@ def trajectory(): trajectory = REGISTRY["input_data"]["hdf_trajectory"](short_traj) yield trajectory -@pytest.mark.parametrize('interp_order, normalise',[("1st order", True), - ("2nd order", True), - ("3rd order", True), - ("1st order", False), - ("2nd order", False), - ("3rd order", False),]) +@pytest.mark.parametrize('interp_order, normalise',[(1, True), + (2, True), + (3, True), + (1, False), + (2, False), + (3, False),]) def test_vacf(trajectory, interp_order, normalise): temp_name = tempfile.mktemp() parameters = {} diff --git a/MDANSE/Tests/UnitTests/Analysis/test_thermodynamics.py b/MDANSE/Tests/UnitTests/Analysis/test_thermodynamics.py index 5dcb2eaf33..c52ac0629d 100644 --- a/MDANSE/Tests/UnitTests/Analysis/test_thermodynamics.py +++ b/MDANSE/Tests/UnitTests/Analysis/test_thermodynamics.py @@ -28,7 +28,7 @@ def trajectory(): trajectory = REGISTRY["input_data"]["hdf_trajectory"](short_traj) yield trajectory -@pytest.mark.parametrize('interp_order',["1st order","2nd order","3rd order"]) +@pytest.mark.parametrize('interp_order',[1,2,3]) def test_temperature(trajectory, interp_order): temp_name = tempfile.mktemp() parameters = {}