diff --git a/packages/adi_py/adi_py/__init__.py b/packages/adi_py/adi_py/__init__.py
index fa932dd..2c14b5f 100644
--- a/packages/adi_py/adi_py/__init__.py
+++ b/packages/adi_py/adi_py/__init__.py
@@ -14,6 +14,8 @@
 
 from .process import Process
 
+from .utils import DatastreamIdentifier
+
 from .xarray_accessors import ADIDatasetAccessor, ADIDataArrayAccessor
 
 from .exception import SkipProcessingIntervalException
diff --git a/packages/adi_py/adi_py/constants.py b/packages/adi_py/adi_py/constants.py
index 7676065..8a4a9dd 100644
--- a/packages/adi_py/adi_py/constants.py
+++ b/packages/adi_py/adi_py/constants.py
@@ -26,10 +26,11 @@ class SpecialXrAttributes:
     SOURCE_VAR_NAME = '__source_var_name'
     COORDINATE_SYSTEM = '__coordsys_name'
     OUTPUT_TARGETS = '__output_targets'
-    DATASTREAM_NAME = '__datastream_name'
+    DATASTREAM_DSID = '__datastream_dsid'
     DATASET_TYPE = '__dataset_type'
     OBS_INDEX = '__obs_index'
 
+
 class ADIAtts:
     MISSING_VALUE = 'missing_value'
     LONG_NAME = 'long_name'
@@ -64,4 +65,3 @@ class ADIDatasetType(Enum):
     RETRIEVED = 1
     TRANSFORMED = 2
     OUTPUT = 3
-
diff --git a/packages/adi_py/adi_py/logger.py b/packages/adi_py/adi_py/logger.py
index c9d1937..831123b 100644
--- a/packages/adi_py/adi_py/logger.py
+++ b/packages/adi_py/adi_py/logger.py
@@ -25,7 +25,7 @@ def warning(message):
 
     @staticmethod
     def error(message):
-        dsproc.error(message)
+        dsproc.error(message, None)
 
     @staticmethod
     def exception(message):
diff --git a/packages/adi_py/adi_py/process.py b/packages/adi_py/adi_py/process.py
index 2969a34..d767b6c 100644
--- a/packages/adi_py/adi_py/process.py
+++ b/packages/adi_py/adi_py/process.py
@@ -8,19 +8,19 @@
     only need to override the hook methods that are required for your
     process.
 -----------------------------------------------------------------------"""
-import warnings
-
 import numpy as np
 import os
 import re
 import sys
+import warnings
 import xarray as xr
 from time import gmtime, strftime
-from typing import Any, Callable, List, Union
+from typing import Any, Callable, List, Union, Optional
 
 from .constants import ADIDatasetType, ADIAtts, SpecialXrAttributes, SplitMode, TransformAttributes
 from .logger import ADILogger
-from .utils import get_dataset_id, get_xr_dataset, sync_xr_dataset, get_datastream_files, adi_hook_exception_handler
+from .utils import get_datastream_id, get_xr_datasets, sync_xr_dataset, get_datastream_files, adi_hook_exception_handler, \
+    DatastreamIdentifier
 
 try:
     import dsproc3 as dsproc
@@ -159,59 +159,295 @@ def facility(self) -> str:
         return dsproc.get_facility()
 
     @staticmethod
-    def get_retrieved_dataset(input_datastream_name: str) -> Union[xr.Dataset, List[xr.Dataset]]:
+    def get_dsid(datastream_name: str, site: str = None, facility: str = None,
+                 dataset_type: ADIDatasetType = None) -> Optional[int]:
+        """-----------------------------------------------------------------------
+        Gets the corresponding dataset id for the given datastream (input or output)
+
+        Args:
+            datastream_name (str):  The name of the datastream to find
+
+            site (str):
+                Optional parameter used only to find some input datasets (RETRIEVED or TRANSFORMED).
+                Site is only required if the retrieval rules in the PCM specify two different
+                rules for the same datastream that differ by site.
+
+            facility (str):
+                Optional parameter used only to find some input datasets (RETRIEVED or TRANSFORMED).
+                Facility is only required if the retrieval rules in the PCM specify two different
+                rules for the same datastream that differ by facility.
+
+            dataset_type (ADIDatasetType):
+                The type of the dataset to convert (RETRIEVED, TRANSFORMED, OUTPUT)
+
+        Returns:
+            Optional[int]: The dataset id or None if not found
+        -----------------------------------------------------------------------"""
+        return get_datastream_id(datastream_name, site=site, facility=facility, dataset_type=dataset_type)
+
+    @staticmethod
+    def get_retrieved_dataset(
+        input_datastream_name: str,
+        site: Optional[str] = None,
+        facility: Optional[str] = None,
+    ) -> Optional[xr.Dataset]: 
+        """-----------------------------------------------------------------------
+        Get an ADI retrieved dataset converted to an xr.Dataset.
+
+        Note: This method will return at most a single xr.Dataset. If you expect
+        multiple datasets, or would like to handle cases where multiple dataset files
+        may be retrieved, please use the `Process.get_retrieved_datasets()` function.
+
+        Args:
+            input_datastream_name (str):
+                The name of one of the process' input datastreams as specified in the PCM.
+
+            site (str):
+                Optional parameter used only to find some input datasets (RETRIEVED or TRANSFORMED).
+                Site is only required if the retrieval rules in the PCM specify two different
+                rules for the same datastream that differ by site.
+
+            facility (str):
+                Optional parameter used only to find some input datasets (RETRIEVED or TRANSFORMED).
+                Facility is only required if the retrieval rules in the PCM specify two different
+                rules for the same datastream that differ by facility.
+
+        Returns:
+            xr.Dataset | None: Returns a single xr.Dataset, or None if no retrieved datasets
+                exist for the specified datastream / site / facility.
+        -----------------------------------------------------------------------"""
+        datasets = Process.get_retrieved_datasets(
+            input_datastream_name=input_datastream_name,
+            site=site,
+            facility=facility,
+        )
+        datasets = get_xr_datasets(
+            ADIDatasetType.RETRIEVED,
+            datastream_name=input_datastream_name,
+            site=site,
+            facility=facility,
+        )
+        if not datasets:
+            return None
+        if len(datasets) > 1:
+            raise Exception(f'Datastream "{input_datastream_name}" contains more than one observation (i.e., file)'
+                            f' of data.  Please use the  get_retrieved_datasets() method to get the full list of Xarray'
+                            f' datasets (one for each file).')
+        return datasets[0]
+
+    @staticmethod
+    def get_retrieved_datasets(
+        input_datastream_name: str,
+        site: Optional[str] = None,
+        facility: Optional[str] = None,
+    ) -> List[xr.Dataset]:
+        """-----------------------------------------------------------------------
+        Get the ADI retrieved datasets converted to a list of xarray Datasets.
+
+        Args:
+            input_datastream_name (str):
+                The name of one of the process' input datastreams as specified in the PCM.
+
+            site (str):
+                Optional parameter used only to find some input datasets (RETRIEVED or TRANSFORMED).
+                Site is only required if the retrieval rules in the PCM specify two different
+                rules for the same datastream that differ by site.
+
+            facility (str):
+                Optional parameter used only to find some input datasets (RETRIEVED or TRANSFORMED).
+                Facility is only required if the retrieval rules in the PCM specify two different
+                rules for the same datastream that differ by facility.
+
+        Returns:
+            List[xr.Dataset]: Returns a list of xr.Datasets. If no retrieved datasets
+                exist for the specified datastream / site / facility / coord system
+                then the list will be empty.
+        -----------------------------------------------------------------------"""
+        return get_xr_datasets(
+            ADIDatasetType.RETRIEVED,
+            datastream_name=input_datastream_name,
+            site=site,
+            facility=facility,
+        )
+
+    @staticmethod
+    def get_retrieved_dataset_by_dsid(dsid: int) -> Optional[xr.Dataset]:
+        datasets = get_xr_datasets(ADIDatasetType.RETRIEVED, dsid=dsid)
+        if not datasets:
+            return None
+        if len(datasets) > 1:
+            raise Exception(f'Datastream "{dsid}" contains more than one observation (i.e., file) of data.  '
+                            f'Please use the get_retrieved_datasets_by_dsid() method to get the full list of Xarray'
+                            f' datasets (one for each file).')
+        return datasets[0]
+    
+    @staticmethod
+    def get_retrieved_datasets_by_dsid(dsid: int) -> List[xr.Dataset]:
+        return get_xr_datasets(ADIDatasetType.RETRIEVED, dsid=dsid)
+
+    @staticmethod
+    def get_transformed_dataset(
+        input_datastream_name: str,
+        coordinate_system_name: str,
+        site: Optional[str] = None,
+        facility: Optional[str] = None,
+    ) -> Optional[xr.Dataset]: 
         """-----------------------------------------------------------------------
-        Get an ADI retrieved dataset converted to an xarray.Dataset.
+        Get an ADI transformed dataset converted to an xr.Dataset.
+
+        Note: This method will return at most a single xr.Dataset. If you expect
+        multiple datasets, or would like to handle cases where multiple dataset files
+        may be retrieved, please use the `Process.get_retrieved_datasets()` function.
 
         Args:
             input_datastream_name (str):
                 The name of one of the process' input datastreams as specified in the PCM.
 
+            coordinate_system_name (str):  
+                A coordinate system specified in the PCM or None if no coordinate system was
+                specified.
+
+            site (str):
+                Optional parameter used only to find some input datasets (RETRIEVED or TRANSFORMED).
+                Site is only required if the retrieval rules in the PCM specify two different
+                rules for the same datastream that differ by site.
+
+            facility (str):
+                Optional parameter used only to find some input datasets (RETRIEVED or TRANSFORMED).
+                Facility is only required if the retrieval rules in the PCM specify two different
+                rules for the same datastream that differ by facility.
+
         Returns:
-            Union[xr.Dataset,  List[xr.Dataset]]:  Most of the time, return a single xr.Dataset.
-            If the process is using file-based processing or if there are multiple
-            files for the same datastream and a dimensionality conflict prevented the files
-            from being merged, then return a List[XArray.Dataset], one for each file.
+            xr.Dataset | None: Returns a single xr.Dataset, or None if no transformed
+                datasets exist for the specified datastream / site / facility / coord system.
         -----------------------------------------------------------------------"""
-        return get_xr_dataset(ADIDatasetType.RETRIEVED, input_datastream_name)
+        datasets = Process.get_transformed_datasets(
+            input_datastream_name=input_datastream_name,
+            coordinate_system_name=coordinate_system_name,
+            site=site,
+            facility=facility,
+        )
+        if not datasets:
+            return None
+        if len(datasets) > 1:
+            raise Exception(f'Datastream "{input_datastream_name}" contains more than one observation (i.e., file)'
+                            f' of data.  Please use the get_transformed_datasets() method to get the full list of Xarray'
+                            f' datasets (one for each file).')
+        return datasets[0]
 
     @staticmethod
-    def get_transformed_dataset(input_datastream_name: str,
-                                coordinate_system_name: str) -> Union[xr.Dataset, List[xr.Dataset]]:
+    def get_transformed_datasets(
+        input_datastream_name: str,
+        coordinate_system_name: str,
+        site: Optional[str] = None,
+        facility: Optional[str] = None,
+    ) -> List[xr.Dataset]: 
         """-----------------------------------------------------------------------
-        Get an ADI transformed dataset converted to an xarray.Dataset.
+        Get an ADI transformed dataset converted to an xr.Dataset.
 
         Args:
-            input_datastream_name (str):  The name of one of the process' input
-                datastreams as specified in the PCM.
+            input_datastream_name (str):
+                The name of one of the process' input datastreams as specified in the PCM.
+
+            coordinate_system_name (str):  
+                A coordinate system specified in the PCM or None if no coordinate system was
+                specified.
+
+            site (str):
+                Optional parameter used only to find some input datasets (RETRIEVED or TRANSFORMED).
+                Site is only required if the retrieval rules in the PCM specify two different
+                rules for the same datastream that differ by site.
 
-            coordsys_name (str):  A coordinate system specified in the PCM or None
-                if no coordinate system was specified.
+            facility (str):
+                Optional parameter used only to find some input datasets (RETRIEVED or TRANSFORMED).
+                Facility is only required if the retrieval rules in the PCM specify two different
+                rules for the same datastream that differ by facility.
 
         Returns:
-            Union[xr.Dataset,  List[xr.Dataset]]:  Most of the time, return a single xr.Dataset.
-            If the process is using file-based processing or if there are multiple
-            files for the same datastream and a dimensionality conflict prevented the files
-            from being merged, then return a List[XArray.Dataset], one for each file.
+            List[xr.Dataset]: Returns a list of xr.Datasets. If no transformed datasets
+                exist for the specified datastream / site / facility / coord system
+                then the list will be empty.
         -----------------------------------------------------------------------"""
-        return get_xr_dataset(ADIDatasetType.TRANSFORMED, input_datastream_name, coordinate_system_name)
+        return get_xr_datasets(
+            ADIDatasetType.TRANSFORMED,
+            datastream_name=input_datastream_name,
+            coordsys_name=coordinate_system_name,
+            site=site,
+            facility=facility,
+        )
 
     @staticmethod
-    def get_output_dataset(output_datastream_name: str) -> Union[xr.Dataset, List[xr.Dataset]]:
+    def get_transformed_dataset_by_dsid(dsid: int, coordinate_system_name: str) -> Optional[xr.Dataset]:
+        datasets = get_xr_datasets(ADIDatasetType.TRANSFORMED, dsid=dsid, coordsys_name=coordinate_system_name)
+        if not datasets:
+            return None
+        if len(datasets) > 1:
+            raise Exception(f'Datastream "{dsid}" contains more than one observation (i.e., file) of data.  '
+                            f'Please use the  get_transformed_datasets_by_dsid() method to get the full list of Xarray'
+                            f' datasets (one for each file).')
+        return datasets[0]
+
+    @staticmethod
+    def get_transformed_datasets_by_dsid(dsid: int, coordinate_system_name: str) -> List[xr.Dataset]:
+        return get_xr_datasets(ADIDatasetType.TRANSFORMED, dsid=dsid, coordsys_name=coordinate_system_name)
+
+    @staticmethod
+    def get_output_dataset(output_datastream_name: str) -> Optional[xr.Dataset]: 
         """-----------------------------------------------------------------------
-        Get an ADI output dataset converted to an xarray.Dataset.
+        Get an ADI output dataset converted to an xr.Dataset.
+
+        Note: This method will return at most a single xr.Dataset. If you expect
+        multiple datasets, or would like to handle cases where multiple dataset files
+        may be retrieved, please use the `Process.get_retrieved_datasets()` function.
+
+        Args:
+            output_datastream_name (str):
+                The name of one of the process' output datastreams as specified in the PCM.
 
+        Returns:
+            xr.Dataset | None: Returns a single xr.Dataset, or None if no output
+                datasets exist for the specified datastream / site / facility / coord system.
+        -----------------------------------------------------------------------"""
+        datasets = Process.get_output_datasets(output_datastream_name)
+        if not datasets:
+            return None
+        if len(datasets) > 1:
+            raise Exception(f'Datastream "{output_datastream_name}" contains more than one observation (i.e., file)'
+                            f' of data.  Please use the  get_output_datasets() method to get the full list of Xarray'
+                            f' datasets (one for each file).')
+        return datasets[0]
+
+   
+    @staticmethod
+    def get_output_datasets(output_datastream_name: str) -> List[xr.Dataset]: 
+        """-----------------------------------------------------------------------
+        Get an ADI output dataset converted to an xr.Dataset.
+        
         Args:
             output_datastream_name (str):
                 The name of one of the process' output datastreams as specified in the PCM.
 
         Returns:
-            Union[xr.Dataset,  List[xr.Dataset]]:  Most of the time, return a single xr.Dataset.
-            If the process is using file-based processing or if there are multiple
-            files for the same datastream and a dimensionality conflict prevented the files
-            from being merged, then return a List[XArray.Dataset], one for each file.
+            List[xr.Dataset]: Returns a list of xr.Datasets. If no output datasets
+                exist for the specified datastream / site / facility / coord system
+                then the list will be empty.
         -----------------------------------------------------------------------"""
-        return get_xr_dataset(ADIDatasetType.OUTPUT, output_datastream_name)
+        return get_xr_datasets(ADIDatasetType.OUTPUT, datastream_name=output_datastream_name)
+
+    @staticmethod
+    def get_output_dataset_by_dsid(dsid: int) -> Optional[xr.Dataset]:
+        datasets = get_xr_datasets(ADIDatasetType.OUTPUT,  dsid=dsid)
+        if not datasets:
+            return None
+        if len(datasets) > 1:
+            raise Exception(f'Datastream "{dsid}" contains more than one observation (i.e., file) of data.  '
+                            f'Please use the get_output_datasets_by_dsid() method to get the full list of Xarray'
+                            f' datasets (one for each file).')
+        return datasets[0]
+    
+    @staticmethod
+    def get_output_datasets_by_dsid(dsid: int) -> List[xr.Dataset]:
+        return get_xr_datasets(ADIDatasetType.OUTPUT,  dsid=dsid)
 
     @staticmethod
     def sync_datasets(*args: xr.Dataset):
@@ -246,12 +482,75 @@ def sync_datasets(*args: xr.Dataset):
             if xr_dataset is not None:
                 sync_xr_dataset(xr_dataset)
 
+    @staticmethod
+    def set_datastream_flags(dsid: int, flags: int):
+        """-----------------------------------------------------------------------
+        Apply a set of ADI control flags to a datastream as identified by the
+        dsid.  Multiple flags can be combined together using a bitwise OR (e.g.,
+        dsproc.DS_STANDARD_QC | dsproc.DS_FILTER_NANS). The allowed flags are
+        identified below:
+
+        - dsproc.DS_STANDARD_QC     = Apply standard QC before storing a dataset.
+    
+        - dsproc.DS_FILTER_NANS     = Replace NaN and Inf values with missing values
+                                      before storing a dataset.
+    
+        - dsproc.DS_OVERLAP_CHECK   = Check for overlap with previously processed data.
+                                      This flag will be ignored and the overlap check
+                                      will be skipped if reprocessing mode is enabled,
+                                      or asynchronous processing mode is enabled.
+    
+        - dsproc.DS_PRESERVE_OBS    = Preserve distinct observations when retrieving
+                                      data. Only observations that start within the
+                                      current processing interval will be read in.
+    
+        - dsproc.DS_DISABLE_MERGE   = Do not merge multiple observations in retrieved
+                                      data. Only data for the current processing interval
+                                      will be read in.
+    
+        - dsproc.DS_SKIP_TRANSFORM  = Skip the transformation logic for all variables
+                                      in this datastream.
+    
+        - dsproc.DS_ROLLUP_TRANS_QC = Consolidate the transformation QC bits for all
+                                      variables when mapped to the output datasets.
+    
+        - dsproc.DS_SCAN_MODE       = Enable scan mode for datastream that are not
+                                      expected to be continuous. This prevents warning
+                                      messages from being generated when data is not
+                                      found within a processing interval. Instead, a
+                                      message will be written to the log file indicating
+                                      that the procesing interval was skipped.
+    
+        - dsproc.DS_OBS_LOOP        = Loop over observations instead of time intervals.
+                                      This also sets the DS_PRESERVE_OBS flag.
+    
+        - dsproc.DS_FILTER_VERSIONED_FILES = Check for files with .v# version extensions
+                                             and filter out lower versioned files. Files
+                                             without a version extension take precedence.
+
+        Call self.get_dsid() to obtain the dsid value for a specific datastream.
+        If the flags value is < 0, then the following default flags will be set:
+        - dsprc.DS_STANDARD_QC              'b' level datastreams
+        - dsproc.DS_FILTER_NANS             'a' and 'b' level datastreams
+        - dsproc.DS_OVERLAP_CHECK           all output datastreams
+        - dsproc.DS_FILTER_VERSIONED_FILES  input datastreams that are not level '0'
+
+        Args:
+            dsid (int):  Datastream ID
+            flags (int): Flags to set
+
+        Returns:
+            int: The processing modelj (see dsproc.ProcModel cdeftype)
+        -----------------------------------------------------------------------"""
+        dsproc.set_datastream_flags(dsid, flags)
+
     @staticmethod
     def get_datastream_files(datastream_name: str, begin_date: int, end_date: int) -> List[str]:
         """-----------------------------------------------------------------------
         See :func:`.utils.get_datastream_files`
         -----------------------------------------------------------------------"""
-        return get_datastream_files(datastream_name, begin_date, end_date)
+        dsid = get_datastream_id(datastream_name)
+        return get_datastream_files(dsid, begin_date, end_date)
 
     @staticmethod
     def get_nsamples(xr_var: xr.DataArray) -> int:
@@ -628,13 +927,13 @@ def add_if_set(att_name, att_value):
         return xr_var
 
     @staticmethod
-    def find_retrieved_variable(retrieved_variable_name) -> str:
+    def find_retrieved_variable(retrieved_variable_name) -> Optional[DatastreamIdentifier]:
         """-----------------------------------------------------------------------
-        Find the input datastream name where the given retrieved variable came
+        Find the input datastream where the given retrieved variable came
         from.  We may need this if there are complex retrieval rules and the
         given variable may be retrieved from different datastreams depending
         upon the site/facility where this process runs.  We need to get the
-        datastream name so we can load the correct xarray dataset if we need
+        DatastreamIdentifier so we can load the correct xarray dataset if we need
         to modify the data values.
 
         Args:
@@ -642,18 +941,22 @@ def find_retrieved_variable(retrieved_variable_name) -> str:
                 find
 
         Returns:
-            The name of the datastream that contains this retrieved variable's
-            data or None if the retrieved variable was not found in any datastreams.
+            A DatastreamIdentifier containing all the information needed to look
+            up the given dataset or None if the retrieved variable was not found.
         -----------------------------------------------------------------------"""
-        datastream_name = None
+
         adi_var = dsproc.get_retrieved_var(retrieved_variable_name, 0)
         if adi_var is not None:
             dsid = dsproc.get_source_ds_id(adi_var)
             level = dsproc.datastream_class_level(dsid)
             cls = dsproc.datastream_class_name(dsid)
             datastream_name = f"{cls}.{level}"
+            site = dsproc.datastream_site(dsid)
+            fac = dsproc.datastream_facility(dsid)
+
+            return DatastreamIdentifier(datastream_name=datastream_name, site=site, facility=fac, dsid=dsid)
 
-        return datastream_name
+        return None
 
     @staticmethod
     def add_qc_variable(dataset: xr.Dataset, variable_name: str):
@@ -863,7 +1166,7 @@ def assign_output_datastream_to_variable(variable: xr.DataArray,
             variable_name_in_datastream = variable.name
         output_targets = variable.attrs.get(SpecialXrAttributes.OUPUT_TARGETS)
         output_targets = {} if output_targets is None else output_targets
-        dsid = get_dataset_id(output_datastream_name)
+        dsid = get_datastream_id(output_datastream_name)
         output_targets[dsid] = variable_name_in_datastream
 
         variable.attrs[SpecialXrAttributes.OUPUT_TARGETS] = output_targets
@@ -911,7 +1214,7 @@ def set_datastream_split_mode(output_datastream_name: str, split_mode: SplitMode
             split_interval (int): Depends on the split_mode selected
 
         -----------------------------------------------------------------------"""
-        dsid = get_dataset_id(output_datastream_name)
+        dsid = get_datastream_id(output_datastream_name)
         dsproc.set_datastream_split_mode(dsid, split_mode.value, split_start, split_interval)
 
     @staticmethod
@@ -933,7 +1236,7 @@ def set_retriever_time_offsets(input_datastream_name: str, begin_offset: int, en
                 ends
 
         -----------------------------------------------------------------------"""
-        dsid = get_dataset_id(input_datastream_name)
+        dsid = get_datastream_id(input_datastream_name)
         dsproc.set_retriever_time_offsets(dsid, begin_offset, end_offset)
 
     @staticmethod
@@ -964,7 +1267,7 @@ def shift_output_interval(output_datastream_name: str, hours: int):
             hours (int): Number of hours to shift
 
         -----------------------------------------------------------------------"""
-        dsid = get_dataset_id(output_datastream_name)
+        dsid = get_datastream_id(output_datastream_name)
         dsproc.set_datastream_split_tz_offset(dsid, hours)
 
     def _internal_init_process_hook(self):
diff --git a/packages/adi_py/adi_py/utils.py b/packages/adi_py/adi_py/utils.py
index b0b77c2..206c26d 100644
--- a/packages/adi_py/adi_py/utils.py
+++ b/packages/adi_py/adi_py/utils.py
@@ -1,801 +1,929 @@
-"""---------------------------------------------------------------------------
-This module provides a variety of functions that are used by the Process
-class to serialize/deserialize between ADI and XArray.  Most of these utility
-functions are used within Process class methods and are generally not intended
-to be called directly by developers when implementing a specific Process subclass.
----------------------------------------------------------------------------"""
-import itertools
-import os
-from typing import Any, Callable, Dict, List, Optional, Union
-
-import numpy as np
-import pandas as pd
-import xarray as xr
-
-import cds3
-import dsproc3 as dsproc
-from .constants import SpecialXrAttributes, ADIAtts, ADIDatasetType
-from .exception import SkipProcessingIntervalException
-from .logger import ADILogger
-
-
-def is_empty_function(func: Callable) -> bool:
-    """-----------------------------------------------------------------------
-    Evaluates a given function to see if the code contains anything more than
-    doctrings and 'pass'.  If not, it is considered an 'empty' function.
-
-    Args:
-        func (Callable):
-
-    Returns:
-        bool: True if the function is empty, otherwise False.
-
-    -----------------------------------------------------------------------"""
-
-    def empty_func():
-        pass
-
-    def empty_func_with_doc():
-        """Empty function with docstring."""
-        pass
-
-    return func.__code__.co_code == empty_func.__code__.co_code or \
-           func.__code__.co_code == empty_func_with_doc.__code__.co_code
-
-
-def adi_hook_exception_handler(hook_func: Callable,
-                               pre_hook_func: Callable = None,
-                               post_hook_func: Callable = None) -> Callable:
-    """-----------------------------------------------------------------------
-    Python function decorator used to consistently handle exceptions in hooks
-    so that they return the proper integer value to ADI core.  Also used to
-    ensure that consistent logging and debug dumps happen for hook methods.
-
-    Args:
-        hook_func (Callable): The original hook function implemented by
-            the developer.
-
-        pre_hook_func (Callable): An optional function to be invoked right
-            before the hook function (i.e., to do debug dumps)
-
-        post_hook_func (Callable): An optional function to be invoked right
-            after the hook function (i.e., to do debug dumps
-
-    Returns:
-        Callable: Decorator function that wraps the original hook function to
-        provide built-in, ADI-compliant logging and exception handling.
-    -----------------------------------------------------------------------"""
-    hook_name = hook_func.__name__
-
-    def wrapper_function(*args, **kwargs):
-        ret_val = 1
-
-        # Only run the hook if it is not empty!!
-        if not is_empty_function(hook_func):
-            try:
-                if pre_hook_func is not None:
-                    pre_hook_func()
-
-                ADILogger.debug(f"**************** Starting {hook_name} ****************\n")
-                hook_func(*args, **kwargs)
-
-                if post_hook_func is not None:
-                    post_hook_func()
-
-            except SkipProcessingIntervalException:
-                ret_val = 0
-
-            except Exception as e:
-                # Any other exception we treat as a fatal error
-                # TODO: should we catch other exceptions and then set specific statuses??
-                # e.g., dsproc.set_status("Required Variable(s) Not Found In Retrieved Data")
-                # e.g., dsproc.set_status("Required Variable(s) Not Found In Transformed Data")
-                ret_val = -1
-
-                # Make sure to log the stack trace to the log
-                ADILogger.exception(f"Hook {hook_name} failed.")
-
-                # If we are in debug mode, then raise the exception back so that it can be better
-                # utilized by debuggers.
-                mode = os.environ.get('ADI_PY_MODE', 'production').lower()
-                if mode is 'development':
-                    raise e
-
-            finally:
-                ADILogger.debug(f"**************** Finished {hook_name} ****************\n")
-
-        return ret_val
-
-    return wrapper_function
-
-
-def get_dataset_id(datastream_name: str) -> Optional[int]:
-    """-----------------------------------------------------------------------
-    Gets the corresponding dataset id for the given datastream (input or output)
-
-    Args:
-        datastream_name (str):  The name of the datastream to find
-
-    Returns:
-        Optional[int]: The dataset id or None if not found
-    -----------------------------------------------------------------------"""
-
-    def find_datastream_dsid(dsids):
-        for id in dsids:
-            level = dsproc.datastream_class_level(id)
-            cls = dsproc.datastream_class_name(id)
-            if datastream_name == f"{cls}.{level}":
-                return int(id)
-
-        return None
-
-    # First see if this is an input datastream
-    dsid = find_datastream_dsid(dsproc.get_input_datastream_ids())
-
-    if dsid is None:
-        dsid = find_datastream_dsid(dsproc.get_output_datastream_ids())
-
-    return dsid
-
-
-def add_vartag_attributes(xr_var_attrs: Dict, adi_var: cds3.Var):
-    """-----------------------------------------------------------------------
-    For the given ADI Variable, extract the source_ds_name and source_var_name
-    from the ADI var tags and add them to the attributes Dict for to be used
-    for the Xarray variable.
-
-    Note:
-        Currently we are not including the coordinate system and output
-        targets as part of the XArray variable's attributes since these
-        are unlikely to be changed.  If a user creates a new variable, then
-        they should call the corresponding Process methods
-        assign_coordinate_system_to_variable or assign_output_datastream_to_variable
-        to add the new variable to the designated coordinate system or
-        output datastream, respectively.
-
-    Args:
-        xr_var_attrs (Dict): A Dictionary of attributes to be assigned to the
-            XArray variable.
-
-        adi_var (cds3.Var): The original ADI variable object.
-
-    -----------------------------------------------------------------------"""
-    source_ds_name = dsproc.get_source_ds_name(adi_var)
-    if source_ds_name:
-        xr_var_attrs[SpecialXrAttributes.SOURCE_DS_NAME] = source_ds_name
-
-    source_var_name = dsproc.get_source_var_name(adi_var)
-    if source_var_name:
-        xr_var_attrs[SpecialXrAttributes.SOURCE_VAR_NAME] = source_var_name
-
-    # TODO: I decided it's confusing to have the output dataset and coord sys
-    # copied automatically, so user has to explicitly set this if they create
-    # a new variable.
-    # Add the output targets in case the user creates a new variable and it
-    # does not have output datastream defined
-    # output_targets = dsproc.get_var_output_targets(adi_var)
-    # if output_targets:
-    #     xr_output_targets = {}
-    #     for output_target in output_targets:
-    #         dsid = output_target.ds_id
-    #         ds_var_name = output_target.var_name
-    #         xr_output_targets[dsid] = ds_var_name
-    #
-    #     xr_var_attrs[SpecialXrAttributes.OUTPUT_TARGETS] = xr_output_targets
-
-
-def get_empty_ndarray_for_var(adi_var: cds3.Var, attrs: Dict = None) -> np.ndarray:
-    """-----------------------------------------------------------------------
-    For the given ADI variable object, initialize an empty numpy ndarray data
-    array with the correct shape and data type.  All values will be filled
-    with the appropriate fill value.  The rules for selecting a fill value
-    are as follows:
-
-        - If this is a qc variable, 0 will be used
-        - Else if a missing_value attribute is available, missing_value will be used
-        - Else if a _FillValue attribute is available, _FillValue will be used
-        - Else numpy.NaN will be used
-
-    Args:
-        adi_var (cds3.Var): The ADI variable object
-        attrs (Dict):  A Dictionary of attributes that will be assigned to the
-            variable when it is converted to XArray.  If not provided, it
-            will be created from the ADI variable's attrs.
-
-    Returns:
-        np.ndarray: An empty ndarray of the same shape as the variable.
-    -----------------------------------------------------------------------"""
-
-    if attrs is None:
-        adi_atts: List[cds3.Att] = adi_var.get_atts()
-        attrs = {att.get_name(): dsproc.get_att_value(adi_var, att.get_name(), att.get_type()) for att in adi_atts}
-
-    # Create a data array for this variable with empty values.
-    fill_value = None
-
-    # Figure out the fill value to use based upon the var's metadata
-    missing_values = dsproc.get_var_missing_values(adi_var)
-
-    if adi_var.get_name().startswith('qc_'):
-        # If this is a qc_ var, then we will use 0 for the fill value
-        fill_value = 0
-
-    elif missing_values and len(missing_values) > 0:
-        fill_value = missing_values[0]
-
-    else:
-        _fill_value = attrs.get(ADIAtts.FILL_VALUE)
-        if _fill_value:
-            fill_value = _fill_value
-        else:
-            fill_value = np.NaN
-
-    # Get the np dtype for the variable
-    dtype = dsproc.cds_type_to_dtype_obj(adi_var.get_type())
-
-    # Get the shape of the data
-    shape = []
-    for dim in adi_var.get_dims():
-        shape.append(dim.get_length())
-
-    # Create a np.ndarray from the shape using np.full()
-    # https://numpy.org/doc/stable/reference/generated/numpy.full.html
-    data = np.full(shape, fill_value, dtype=dtype)
-    return data
-
-
-def get_time_data_as_datetime64(time_var: cds3.Var) -> np.ndarray:
-    """-----------------------------------------------------------------------
-    Get the time values from dsproc as seconds since 1970, then convert those
-    values to datetime64 with microsecond precision.
-
-    Args:
-        time_var (cds3.Var): An ADI time variable object
-
-    Returns:
-        np.ndarray: An ndarray of the same shape as the variable with time
-        values converted to the np.datetime64 data type with microsecond
-        precision.
-    -----------------------------------------------------------------------"""
-    microsecond_times = np.asarray(dsproc.get_sample_timevals(time_var, 0)) * 1000000
-    datetime64_times =  np.array(pd.to_datetime(microsecond_times, unit='us'), np.datetime64)
-    return datetime64_times
-
-
-def get_adi_var_as_dict(adi_var: cds3.Var) -> Dict:
-    """-----------------------------------------------------------------------
-    Convert the given adi variable to a dictionary that can be used to create
-    an xarray dataarray.
-
-    Args:
-        adi_var (cds3.Var): An ADI variable object
-
-    Returns:
-        Dict: A Dictionary representation of the variable that can be used
-        in the XArray.DataArray constructor to initialize a corresponding
-        XArray variable.
-    -----------------------------------------------------------------------"""
-    # Get the variable dimensions
-    dims = [dim.get_name() for dim in adi_var.get_dims()]
-
-    # Get all the variable attributes
-    adi_atts: List[cds3.Att] = adi_var.get_atts()
-    attrs = {att.get_name(): dsproc.get_att_value(adi_var, att.get_name(), att.get_type()) for att in adi_atts}
-
-    # Now add special attributes for the variable tags
-    add_vartag_attributes(attrs, adi_var)
-
-    # If the variable is 'time' then we will convert values to datetime64 data types with microsecond precision
-    if adi_var.get_name() == 'time':
-        data = get_time_data_as_datetime64(adi_var)
-
-    else:
-        # This method uses np.PyArray_SimpleNewFromData to convert adi data array to np.ndarray
-        # =====> It will return None if the variable has no data
-        data = adi_var.get_datap()
-
-        if data is None:
-            # We need to initialize the DataArray with empty values.
-            data = get_empty_ndarray_for_var(adi_var, attrs)
-
-    return {
-        'dims': dims,
-        'attrs': attrs,
-        'data': data
-    }
-
-
-def to_xarray(adi_dataset: cds3.Group) -> xr.Dataset:
-    """-----------------------------------------------------------------------
-    Convert the specified CDS.Group into an XArray dataset.
-    Attributes will be copied, but the DataArrays for each variable
-    will be backed by an np.ndarray that links directly to the C
-    ADI data via np.PyArray_SimpleNewFromData
-
-    Args:
-        adi_dataset (cds3.Group): An ADI dataset object.
-
-    Returns:
-        xr.Dataset: The corresponding XArray dataset object.
-    -----------------------------------------------------------------------"""
-    # Get Global attrs
-    adi_atts: List[cds3.Att] = adi_dataset.get_atts()
-    attrs = {att.get_name(): dsproc.get_att_value(adi_dataset, att.get_name(), att.get_type()) for att in adi_atts}
-
-    # Get dims
-    adi_dims = adi_dataset.get_dims()
-    dims = {dim.get_name(): dim.get_length() for dim in adi_dims}
-
-    # Find the coordinate variable names
-    coord_var_names = []
-    for dim in adi_dims:
-        adi_var = dim.get_var()
-        if adi_var is not None:
-            coord_var_names.append(adi_var.get_name())
-
-    # Get coordinate & data variables
-    coords = {}
-    data_vars = {}
-    for adi_var in adi_dataset.get_vars():
-        var_name = adi_var.get_name()
-        var_as_dict = get_adi_var_as_dict(adi_var)
-        if var_name in coord_var_names:
-            coords[var_name] = var_as_dict
-        else:
-            data_vars[var_name] = var_as_dict
-
-    # Create a dictionary from the values
-    data_dict = {
-        'attrs': attrs,
-        'dims': dims,
-        'coords': coords,
-        'data_vars': data_vars
-    }
-
-    dataset = xr.Dataset.from_dict(data_dict)
-    return dataset
-
-
-def get_cds_type(value: Any) -> int:
-    """-----------------------------------------------------------------------
-    For a given Python data value, convert the data type into the corresponding
-    ADI CDS data type.
-
-    Args:
-        value (Any): Can be a single value, a List of values, or a numpy.ndarray
-            of values.
-
-    Returns:
-        int: The corresponding CDS data type
-    -----------------------------------------------------------------------"""
-    val = value
-
-    # Convert value to a numpy array so we can use dsproc method which
-    # only works if value is a numpy ndarray
-    if type(value) == list:
-        val = np.array(value)
-
-    elif type(value) != np.ndarray:
-        # We need to wrap value in a list so np constructor doesn't get confused
-        # if value is numeric
-        val = np.array([value])
-
-    if val.dtype.type == np.str_:
-        # This comparison is always failing from within the cython, because
-        # in the cython, dtype.type = 85 instead of np.str_.
-        # So I'm adding it here instead.  This checks for any string type.
-        cds_type = cds3.CHAR
-
-    else:
-        cds_type = dsproc.dtype_to_cds_type(val.dtype)
-
-    return cds_type
-
-
-def _sync_attrs(xr_atts_dict: Dict, adi_obj: cds3.Object):
-    """-----------------------------------------------------------------------
-    Sync Xarray attributes back to an ADI object (dataset or variable) by
-    checking if the following changes were made:
-
-        - Attribute values changed
-        - Attributes were added
-        - Attributes were deleted
-        - An attribute type changed
-
-    Args:
-        xr_atts (Dict):
-            Dictionary of Xarray attributes, where the keys are
-            attribute names, and values are attribute values
-
-        adi_obj (cds3.Object):
-            ADI dataset or variable
-    -----------------------------------------------------------------------"""
-    # Get lists of attribute names for comparison between two lists
-    adi_atts = {att.get_name() for att in adi_obj.get_atts()}
-    xr_atts = []
-
-    for att_name in xr_atts_dict:
-        if att_name.startswith('__'):
-            # special attributes start with '__' and are handled separately
-            continue
-        xr_atts.append(att_name)
-
-    # First remove deleted atts
-    deleted_atts = [att_name for att_name in adi_atts if att_name not in xr_atts]
-    for att_name in deleted_atts:
-        adi_att = dsproc.get_att(adi_obj, att_name)
-        status = cds3.Att.delete(adi_att)
-        if status < 1:
-            raise Exception(f'Could not delete attribute {att_name}')
-
-    # Then add new atts
-    added_atts = [att_name for att_name in xr_atts if att_name not in adi_atts]
-    for att_name in added_atts:
-        att_value = xr_atts_dict.get(att_name)
-        cds_type = get_cds_type(att_value)
-        status = dsproc.set_att(adi_obj, 1, att_name, cds_type, att_value)
-        if status < 1:
-            raise Exception(f'Could not create attribute {att_name}')
-
-    # Next change the value for other atts if the value changed
-    other_atts = [att_name for att_name in xr_atts if att_name not in added_atts]
-    for att_name in other_atts:
-        att_value = xr_atts_dict.get(att_name)
-
-        # For now, if the att is already defined in adi, we assume that the user will not
-        # change the type, just the value.
-        cds_type = dsproc.get_att(adi_obj, att_name).get_type()
-        existing_value = dsproc.get_att_value(adi_obj, att_name, cds_type)
-
-        if not np.array_equal(att_value, existing_value):
-            status = dsproc.set_att(adi_obj, 1, att_name, cds_type, att_value)
-            if status < 1:
-                raise Exception(f'Could not update attribute {att_name}')
-
-
-def _sync_dims(xr_dims: Dict, adi_dataset: cds3.Group):
-    """-----------------------------------------------------------------------
-    Sync Xarray dimensions back to ADI dataset by checking if:
-        - Any dimensions were deleted and attempts to delete them from the dataset
-        - Any dimensions were added and attempts to add them to the dataset
-        - Any dimension length (e.g. size) and attempts to change the length
-
-    Args:
-        xr_dims (Dict):   
-            Dictionary of Xarray dimiension, where the keys are
-            dimension names, and values are dimension size
-
-        adi_dataset (cds3.Group):
-            ADI dataset
-    -----------------------------------------------------------------------"""
-
-    # key is dimension name, value is cds3.Dim object
-    adi_dims = {dim.get_name(): dim for dim in adi_dataset.get_dims()}
-
-    # Check if dimension needs to be deleted from ADI dataset
-    deleted_dims = [dim_name for dim_name in adi_dims if dim_name not in xr_dims]
-
-    # Delete appropriate dimensions
-    for dim_name in deleted_dims:
-
-        adi_dim = adi_dims[dim_name]
-
-        # This function will also delete all variables that use the specified
-        # dimension.
-        status = cds3.Dim.delete(adi_dim)
-
-        if status == 0:
-            raise Exception(f'Could not delete dimension {dim_name}')
-
-    # Check if dimension needs to be added to ADI Dataset
-    added_dims = [dim_name for dim_name in xr_dims if dim_name not in adi_dims]
-
-    # Add appropriate dimensions (assume dimension is not unlimited)
-    is_unlimited = 0
-    for dim_name in added_dims:
-
-        dim_size = xr_dims[dim_name]
-        dim_obj = adi_dataset.define_dim(dim_name, dim_size, is_unlimited)
-        
-        if dim_obj is None:
-            raise Exception(f'Could not define dimension {dim_name}')
-
-    # Check if existing dimension size changed and set new value, if appropriate
-    existing_dims = [dim_name for dim_name in adi_dims if dim_name in xr_dims]
-
-    for dim_name in existing_dims:
-        adi_dim_size = adi_dims[dim_name].get_length()
-        xr_dim_size = xr_dims[dim_name]
-
-        if adi_dim_size != xr_dim_size:
-            status = dsproc.set_dim_length(adi_dataset, dim_name, xr_dim_size)
-
-            if status == 0:
-                raise Exception(f'Could not change dimension length of {dim_name}')
-
-
-def _add_variable_to_adi(xr_var: xr.DataArray, adi_dataset: cds3.Group):
-    """-----------------------------------------------------------------------
-    Add a new variable specified by an xarray DataArray to the given ADI
-    dataset.
-    -----------------------------------------------------------------------"""
-    # First create the variable
-    cds_type = get_cds_type(xr_var.data)
-    dim_names = xr_dims = list(xr_var.dims)
-    adi_var = dsproc.define_var(adi_dataset, xr_var.name, cds_type, dim_names)
-
-    # Now assign attributes
-    _sync_attrs(xr_var.attrs, adi_var)
-
-    # Now set the data
-    _set_adi_variable_data(xr_var, adi_var)
-
-    # Finally, need to check SpecialXrAttributes.COORDINATE_SYSTEM and SpecialXrAttributes.OUTPUT_TARGETS
-    coord_sys_name = xr_var.attrs.get(SpecialXrAttributes.COORDINATE_SYSTEM, None)
-    dsproc.set_var_coordsys_name(adi_var, coord_sys_name)
-
-    output_targets = xr_var.attrs.get(SpecialXrAttributes.OUTPUT_TARGETS, None)
-    if output_targets is not None:
-        for dsid, datastream_variable_name in output_targets:
-            dsproc.add_var_output_target(adi_var, dsid, datastream_variable_name)
-
-
-def _set_time_variable_data_if_needed(xr_var: xr.DataArray, adi_var: cds3.Var):
-    """-----------------------------------------------------------------------
-    Check to see if the time values have changed, and if so, then push back to
-    ADI.  We can't rely on the data pointer for time, because the times are
-    converted into datetime64 objects for xarray.
-
-    TODO: if this becomes a performance issue to do this comparison, then we
-    can add a parameter to the sync dataset method so that the user can
-    explicitly declare whether syncing the time variable is needed or not
-    -----------------------------------------------------------------------"""
-    # astype will produce nanosecond precision, so we have to convert to seconds
-    timevals = xr_var.data.astype('float') / 1000000000
-
-    # We have to truncate to 6 decimal places so it matches ADI
-    timevals = np.around(timevals, 6)
-
-    # Compare with the original values to see if there have been any changes
-    adi_times = dsproc.get_sample_timevals(adi_var, 0)
-
-    if np.array_equal(timevals, adi_times) is False:
-
-        # Wipe out any existing data
-        adi_var.delete_data()
-
-        # Set the timevals in seconds in ADI
-        sample_count = xr_var.sizes[xr_var.dims[0]]
-        dsproc.set_sample_timevals(adi_var, 0, sample_count, timevals)
-
-
-def _set_adi_variable_data(xr_var: xr.DataArray, adi_var: cds3.Var):
-    """-----------------------------------------------------------------------
-    For the given Xarray DataArray, copy the data values back to ADI.  This
-    method will only be called if the deveoper has replaced the original
-    DataArray of values with a new array by means of a Python operation.
-    In this case, the data values will no longer be mapped directly to the
-    ADI data structure, so they will have to be manually copied over.
-    -----------------------------------------------------------------------"""
-    missing_value = None
-    missing_values = xr_var.attrs.get(ADIAtts.MISSING_VALUE)
-    if missing_values is not None and len(missing_values) > 0:
-        missing_value = missing_values[0]
-
-    sample_start = 0
-
-    # Get the length of the first dimension
-    sample_count = 1
-    if len(xr_var.dims) > 0:
-        sample_count = xr_var.sizes[xr_var.dims[0]]
-
-    # Wipe out any existing data
-    adi_var.delete_data()
-
-    # Store the new values
-    status = dsproc.set_var_data(adi_var, sample_start, sample_count, missing_value, xr_var.data)
-
-    if status is None:
-        raise Exception(f'Could not set data for variable {adi_var.get_name()}')
-
-
-def _sync_vars(xr_dataset: xr.Dataset, adi_dataset: cds3.Group):
-    """-----------------------------------------------------------------------
-    Sync Xarray variables back to ADI dataset by checking if:
-        - a variable's attributes were changed
-        - a variable’s dimensions changed
-        - a variable was added or deleted
-        - a variable's data array was replaced
-
-    Args:
-        xr_dataset (xr.Dataset):
-            The xarray dataset to sync
-
-        adi_dataset (csd3.Group):
-            The ADI group where changes will be applied
-
-    -----------------------------------------------------------------------"""
-    adi_vars = {var.get_name() for var in adi_dataset.get_vars()}
-    xr_vars = {var_name for var_name in xr_dataset.variables}
-
-    # First remove deleted vars from the dataset
-    deleted_vars = [var_name for var_name in adi_vars if var_name not in xr_vars]
-    for var_name in deleted_vars:
-        adi_var = dsproc.get_var(adi_dataset, var_name)
-        dsproc.delete_var(adi_var)
-
-    # Then add new vars to the dataset
-    added_vars = [var_name for var_name in xr_vars if var_name not in adi_vars]
-    for var_name in added_vars:
-        xr_var: xr.DataArray = xr_dataset.get(var_name)
-        _add_variable_to_adi(xr_var, adi_dataset)
-
-    # Now sync up the remaining variables if they have been changed
-    other_vars = [var_name for var_name in xr_vars if var_name not in added_vars]
-    for var_name in other_vars:
-        xr_var: xr.DataArray = xr_dataset.get(var_name)
-        adi_var: cds3.Var = dsproc.get_var(adi_dataset, var_name)
-
-        # Check if dims have changed
-        adi_dims: List[str] = adi_var.get_dim_names()
-        xr_dims = list(xr_var.dims)
-        if adi_dims != xr_dims:
-            raise Exception('Changing dimensions on an existing variable is not supported by ADI!')
-
-        # sync attributes
-        _sync_attrs(xr_var.attrs, adi_var)
-
-        # sync data
-        # If the data pointer has changed or does not exist, we need to wipe out any previous data and then
-        # create new data for the adi variable
-        adi_data = adi_var.get_datap()
-        adi_pointer = adi_data.__array_interface__['data'][0] if adi_data is not None else None
-        xr_pointer = xr_var.data.__array_interface__['data'][0]
-
-        # If the pointers don't match, then we also compare all the values in each data array,
-        # and only if values are different do we sync the values back to ADI.  Note:  we have
-        # to do this because in the xarray dataset.from_dict() method, xarray always changes
-        # the arrays of coordinate variables, so the pointers will never match.
-        if adi_pointer != xr_pointer:
-            if var_name == 'time':
-                _set_time_variable_data_if_needed(xr_var, adi_var)
-            elif adi_data is None or (adi_data != xr_var.data).any():
-                _set_adi_variable_data(xr_var, adi_var)
-
-        # TODO: I don't think we need to change SpecialXrAttributes.COORDINATE_SYSTEM on an existing
-        # variable, but if there is a use case for it, we should add it here
-
-        # TODO: I don't think we need to change SpecialXrAttributes.OUTPUT_TARGETS on an existing
-        # variable, but if there is a use case for it, we should add it here
-
-
-def sync_xarray(xr_dataset: xr.Dataset, adi_dataset: cds3.Group):
-    """-----------------------------------------------------------------------
-    Carefully inspect the xr.Dataset and synchronize any changes back to the
-    given ADI dataset.
-
-    Args:
-        xr_dataset (xr.Dataset): The XArray dataset to sync
-
-        adi_dataset (csd3.Group): The ADI dataset where changes will be applied
-
-    -----------------------------------------------------------------------"""
-
-    # Sync global attributes
-    _sync_attrs(xr_dataset.attrs, adi_dataset)
-
-    # Sync dimensions
-    _sync_dims(xr_dataset.sizes, adi_dataset)
-
-    # Sync variables
-    _sync_vars(xr_dataset, adi_dataset)
-
-
-def get_xr_dataset(dataset_type: ADIDatasetType, datastream_name: str, coordsys_name: str = None) -> Union[xr.Dataset,  List[xr.Dataset]]:
-    """-----------------------------------------------------------------------
-    Get an ADI dataset converted to an xarray.Dataset.
-
-    Args:
-        dataset_type (ADIDatasetType):
-            The type of the dataset to convert (RETRIEVED, TRANSFORMED, OUTPUT)
-
-        datastream_name (str):
-            The name of one of the process' datastreams as specified in the PCM.
-
-        coordsys_name (str):
-            Optional parameter used only to find TRANSFORMED datasets.  Must be a coordinate
-            system specified in the PCM or None if no coordinate system was specified.
-
-    Returns:
-        Union[xr.Dataset,  List[xr.Dataset]]:  Most of the time, return a single xr.Dataset.
-        If the process is using file-based processing or if there are multiple
-        files for the same datastream and a dimensionality conflict prevented the files
-        from being merged, then return a List[XArray.Dataset], one for each file.
-    -----------------------------------------------------------------------"""
-    datasets = []
-
-    dsid = get_dataset_id(datastream_name)
-    if dsid is not None:
-        for i in itertools.count(start=0):
-
-            if dataset_type is ADIDatasetType.RETRIEVED:
-                adi_dataset = dsproc.get_retrieved_dataset(dsid, i)
-            elif dataset_type is ADIDatasetType.TRANSFORMED:
-                adi_dataset = dsproc.get_transformed_dataset(coordsys_name, dsid, i)
-            else:
-                adi_dataset = dsproc.get_output_dataset(dsid, i)
-
-            if not adi_dataset:
-                break
-
-            xr_dataset: xr.Dataset = to_xarray(adi_dataset)
-
-            # Add special metadata
-            xr_dataset.attrs[SpecialXrAttributes.DATASET_TYPE] = dataset_type
-            xr_dataset.attrs[SpecialXrAttributes.DATASTREAM_NAME] = datastream_name
-            xr_dataset.attrs[SpecialXrAttributes.OBS_INDEX] = i
-            if coordsys_name is not None:
-                xr_dataset.attrs[SpecialXrAttributes.COORDINATE_SYSTEM] = coordsys_name
-
-            datasets.append(xr_dataset)
-
-    if len(datasets) == 0:
-        return None
-    elif len(datasets) == 1:
-        return datasets[0]
-    else:
-        return datasets
-
-
-def sync_xr_dataset(xr_dataset: xr.Dataset):
-    """-----------------------------------------------------------------------
-    Sync the contents of the given XArray.Dataset with the corresponding ADI
-    data structure.
-
-    Args:
-        xr_dataset (xr.Dataset):  The xr.Dataset(s) to sync.
-
-    -----------------------------------------------------------------------"""
-    datastream_name = xr_dataset.attrs[SpecialXrAttributes.DATASTREAM_NAME]
-    dataset_type = xr_dataset.attrs[SpecialXrAttributes.DATASET_TYPE]
-    obs_index = xr_dataset.attrs[SpecialXrAttributes.OBS_INDEX]
-    coordsys_name = xr_dataset.attrs.get(SpecialXrAttributes.COORDINATE_SYSTEM)
-    dsid = get_dataset_id(datastream_name)
-
-    if dataset_type is ADIDatasetType.RETRIEVED:
-        adi_dataset = dsproc.get_retrieved_dataset(dsid, obs_index)
-    elif dataset_type is ADIDatasetType.TRANSFORMED:
-        adi_dataset = dsproc.get_transformed_dataset(coordsys_name, dsid, obs_index)
-    else:
-        adi_dataset = dsproc.get_output_dataset(dsid, obs_index)
-
-    sync_xarray(xr_dataset, adi_dataset)
-
-
-def get_datastream_files(datastream_name: str, begin_date: int, end_date: int) -> List[str]:
-    """-----------------------------------------------------------------------
-    Return the full path to each data file found for the given datastream name
-    and time range.
-
-    Args:
-        datastream_name (str): the datastream name (e.g., "met.b1")
-
-        begin_date (int): the begin timestamp of the current processing interval
-            (seconds since 1970)
-
-        end_date (int): the end timestamp of the current processing interval
-            (seconds since 1970)
-
-    Returns:
-        List[str]: A list of file paths that match the datastream query.
-    -----------------------------------------------------------------------"""
-    dsid = get_dataset_id(datastream_name)
-    datastream_path = dsproc.datastream_path(dsid)
-    files = dsproc.find_datastream_files(dsid, begin_date, end_date)
-    file_paths = []
-    for file in files:
-        file_path = f"{datastream_path}/{file}"
-        file_paths.append(file_path)
-
-    return file_paths
-
+"""---------------------------------------------------------------------------
+This module provides a variety of functions that are used by the Process
+class to serialize/deserialize between ADI and XArray.  Most of these utility
+functions are used within Process class methods and are generally not intended
+to be called directly by developers when implementing a specific Process subclass.
+---------------------------------------------------------------------------"""
+import itertools
+import os
+from typing import Any, Callable, Dict, List, Optional, Union, NamedTuple
+
+import numpy as np
+import pandas as pd
+import xarray as xr
+
+import cds3
+import dsproc3 as dsproc
+from .constants import SpecialXrAttributes, ADIAtts, ADIDatasetType
+from .exception import SkipProcessingIntervalException
+from .logger import ADILogger
+
+
+class DatastreamIdentifier(NamedTuple):
+    """-----------------------------------------------------------------------
+    NamedTuple class that holds various information used to identify a specific
+    ADI dataset.
+    -----------------------------------------------------------------------"""
+    datastream_name: str
+    site: str
+    facility: str
+    dsid: int
+
+
+def is_empty_function(func: Callable) -> bool:
+    """-----------------------------------------------------------------------
+    Evaluates a given function to see if the code contains anything more than
+    doctrings and 'pass'.  If not, it is considered an 'empty' function.
+
+    Args:
+        func (Callable):
+
+    Returns:
+        bool: True if the function is empty, otherwise False.
+
+    -----------------------------------------------------------------------"""
+
+    def empty_func():
+        pass
+
+    def empty_func_with_doc():
+        """Empty function with docstring."""
+        pass
+
+    return func.__code__.co_code == empty_func.__code__.co_code or \
+           func.__code__.co_code == empty_func_with_doc.__code__.co_code
+
+
+def adi_hook_exception_handler(hook_func: Callable,
+                               pre_hook_func: Callable = None,
+                               post_hook_func: Callable = None) -> Callable:
+    """-----------------------------------------------------------------------
+    Python function decorator used to consistently handle exceptions in hooks
+    so that they return the proper integer value to ADI core.  Also used to
+    ensure that consistent logging and debug dumps happen for hook methods.
+
+    Args:
+        hook_func (Callable): The original hook function implemented by
+            the developer.
+
+        pre_hook_func (Callable): An optional function to be invoked right
+            before the hook function (i.e., to do debug dumps)
+
+        post_hook_func (Callable): An optional function to be invoked right
+            after the hook function (i.e., to do debug dumps
+
+    Returns:
+        Callable: Decorator function that wraps the original hook function to
+        provide built-in, ADI-compliant logging and exception handling.
+    -----------------------------------------------------------------------"""
+    hook_name = hook_func.__name__
+
+    def wrapper_function(*args, **kwargs):
+        ret_val = 1
+
+        # Only run the hook if it is not empty!!
+        if not is_empty_function(hook_func):
+            try:
+                if pre_hook_func is not None:
+                    pre_hook_func()
+
+                ADILogger.debug(f"**************** Starting {hook_name} ****************\n")
+                hook_func(*args, **kwargs)
+
+                if post_hook_func is not None:
+                    post_hook_func()
+
+            except SkipProcessingIntervalException:
+                ret_val = 0
+
+            except Exception as e:
+                # Any other exception we treat as a fatal error
+                # TODO: should we catch other exceptions and then set specific statuses??
+                # e.g., dsproc.set_status("Required Variable(s) Not Found In Retrieved Data")
+                # e.g., dsproc.set_status("Required Variable(s) Not Found In Transformed Data")
+                ret_val = -1
+
+                # Make sure to log the stack trace to the log
+                ADILogger.exception(f"Hook {hook_name} failed.")
+
+                # If we are in debug mode, then raise the exception back so that it can be better
+                # utilized by debuggers.
+                mode = os.environ.get('ADI_PY_MODE', 'production').lower()
+                if mode is 'development':
+                    raise e
+
+            finally:
+                ADILogger.debug(f"**************** Finished {hook_name} ****************\n")
+
+        return ret_val
+
+    return wrapper_function
+
+
+def get_datastream_id(datastream_name: str, site: str = None, facility: str = None,
+                   dataset_type: ADIDatasetType = None) -> Optional[int]:
+    """-----------------------------------------------------------------------
+    Gets the corresponding dataset id for the given datastream (input or output)
+
+    Args:
+        datastream_name (str):  The name of the datastream to find
+
+        site (str):
+            Optional parameter used only to find some input datasets (RETRIEVED or TRANSFORMED).
+            Site is only required if the retrieval rules in the PCM specify two different
+            rules for the same datastream that differ by site.
+
+        facility (str):
+            Optional parameter used only to find some input datasets (RETRIEVED or TRANSFORMED).
+            Facility is only required if the retrieval rules in the PCM specify two different
+            rules for the same datastream that differ by facility.
+
+        dataset_type (ADIDatasetType):
+            The type of the dataset to convert (RETRIEVED, TRANSFORMED, OUTPUT)
+
+    Returns:
+        Optional[int]: The dataset id or None if not found
+    -----------------------------------------------------------------------"""
+
+    def find_datastream_dsid(dsids):
+
+        for id in dsids:
+            level = dsproc.datastream_class_level(id)
+            cls = dsproc.datastream_class_name(id)
+
+            if datastream_name == f"{cls}.{level}":
+                match = True
+
+                if site:
+                    datastream_site = dsproc.datastream_site(id)
+                    if datastream_site != site.lower():
+                        match = False
+
+                if facility:
+                    datastream_fac = dsproc.datastream_facility(id)
+                    if datastream_fac != facility.upper():
+                        match = False
+
+                if match:
+                    return int(id)
+
+        return None
+
+    if dataset_type == ADIDatasetType.RETRIEVED or dataset_type == ADIDatasetType.TRANSFORMED:
+        dsid = find_datastream_dsid(dsproc.get_input_datastream_ids())
+
+    elif dataset_type == ADIDatasetType.OUTPUT:
+        dsid = find_datastream_dsid(dsproc.get_output_datastream_ids())
+
+    else:
+        dsid = find_datastream_dsid(dsproc.get_input_datastream_ids())
+
+        if dsid is None:
+            dsid = find_datastream_dsid(dsproc.get_output_datastream_ids())
+
+    return dsid
+
+
+def add_vartag_attributes(xr_var_attrs: Dict, adi_var: cds3.Var):
+    """-----------------------------------------------------------------------
+    For the given ADI Variable, extract the source_ds_name and source_var_name
+    from the ADI var tags and add them to the attributes Dict for to be used
+    for the Xarray variable.
+
+    Note:
+        Currently we are not including the coordinate system and output
+        targets as part of the XArray variable's attributes since these
+        are unlikely to be changed.  If a user creates a new variable, then
+        they should call the corresponding Process methods
+        assign_coordinate_system_to_variable or assign_output_datastream_to_variable
+        to add the new variable to the designated coordinate system or
+        output datastream, respectively.
+
+    Args:
+        xr_var_attrs (Dict): A Dictionary of attributes to be assigned to the
+            XArray variable.
+
+        adi_var (cds3.Var): The original ADI variable object.
+
+    -----------------------------------------------------------------------"""
+    source_ds_name = dsproc.get_source_ds_name(adi_var)
+    if source_ds_name:
+        xr_var_attrs[SpecialXrAttributes.SOURCE_DS_NAME] = source_ds_name
+
+    source_var_name = dsproc.get_source_var_name(adi_var)
+    if source_var_name:
+        xr_var_attrs[SpecialXrAttributes.SOURCE_VAR_NAME] = source_var_name
+
+    # TODO: I decided it's confusing to have the output dataset and coord sys
+    # copied automatically, so user has to explicitly set this if they create
+    # a new variable.
+    # Add the output targets in case the user creates a new variable and it
+    # does not have output datastream defined
+    # output_targets = dsproc.get_var_output_targets(adi_var)
+    # if output_targets:
+    #     xr_output_targets = {}
+    #     for output_target in output_targets:
+    #         dsid = output_target.ds_id
+    #         ds_var_name = output_target.var_name
+    #         xr_output_targets[dsid] = ds_var_name
+    #
+    #     xr_var_attrs[SpecialXrAttributes.OUTPUT_TARGETS] = xr_output_targets
+
+
+def get_empty_ndarray_for_var(adi_var: cds3.Var, attrs: Dict = None) -> np.ndarray:
+    """-----------------------------------------------------------------------
+    For the given ADI variable object, initialize an empty numpy ndarray data
+    array with the correct shape and data type.  All values will be filled
+    with the appropriate fill value.  The rules for selecting a fill value
+    are as follows:
+
+        - If this is a qc variable, the missing value bit flag will be used.  If no missing value bit, then the failed
+          transformation bit flag will be used.  If no transformation failed bit, then use _FillValue.  If no _FillValue,
+          then use the netcdf default fill value for integer data type.
+        - Else if a missing_value attribute is available, missing_value will be used
+        - Else if a _FillValue attribute is available, _FillValue will be used
+        - Else use the netcdf default fill value for the variable's data type
+
+    Args:
+        adi_var (cds3.Var): The ADI variable object
+        attrs (Dict):  A Dictionary of attributes that will be assigned to the
+            variable when it is converted to XArray.  If not provided, it
+            will be created from the ADI variable's attrs.
+
+    Returns:
+        np.ndarray: An empty ndarray of the same shape as the variable.
+    -----------------------------------------------------------------------"""
+
+    if attrs is None:
+        adi_atts: List[cds3.Att] = adi_var.get_atts()
+        attrs = {att.get_name(): dsproc.get_att_value(adi_var, att.get_name(), att.get_type()) for att in adi_atts}
+
+    # Create a data array for this variable with empty values.
+    fill_value = None
+
+    # Look up the _FillValue attribute
+    _fill_value = attrs.get(ADIAtts.FILL_VALUE)
+
+    # Figure out the fill value to use based upon the var's metadata
+    missing_values = dsproc.get_var_missing_values(adi_var)
+
+    if adi_var.get_name().startswith('qc_'):
+        # If this is a qc_ var, then first call dsproc_get_missing_value_bit_flag to see if there is a missing value
+        # bit set.
+        # dsproc.get_missing_value_bit_flag() will find a match if the bit description matches the value used in the
+        # current DOD ingest template as well as common permutations.
+        bit_descriptions = dsproc.get_qc_bit_descriptions(adi_var)
+        missing_flag = dsproc.get_missing_value_bit_flag(bit_descriptions)
+        if missing_flag > 0:
+            fill_value = missing_flag
+
+        else:
+            # If not, then we check to see if the bit 1 description contains missing value descriptions found in other
+            # DOD templates
+            if len(bit_descriptions) > 0 and 'Transformation could not finish' in bit_descriptions[0]:
+                # "Transformation could not finish" is found in VAP DOD template
+                fill_value = 1
+
+            elif len(bit_descriptions) > 0 and 'Data value not available' in bit_descriptions[0]:
+                # "Datat value not available" was used in old Ingest DOD template
+                fill_value = 1
+
+            elif _fill_value:
+                # If there is no missing value bit or transformation failed bit, then we will use the fill value attr
+                # if it exists.
+                fill_value = _fill_value
+
+            else:
+                # Get the default NetCDF4 fill value for this data type
+                fill_value = adi_var.get_default_fill_value()
+
+    elif missing_values and len(missing_values) > 0:
+        fill_value = missing_values[0]
+
+    elif _fill_value:
+        fill_value = _fill_value
+
+    else:
+        # Get the default NetCDF4 fill value for this data type
+        fill_value = adi_var.get_default_fill_value()
+
+    # Get the np dtype for the variable
+    dtype = dsproc.cds_type_to_dtype_obj(adi_var.get_type())
+
+    # Get the shape of the data
+    shape = []
+    for dim in adi_var.get_dims():
+        shape.append(dim.get_length())
+
+    # Create a np.ndarray from the shape using np.full()
+    # https://numpy.org/doc/stable/reference/generated/numpy.full.html
+    data = np.full(shape, fill_value, dtype=dtype)
+    return data
+
+
+def get_time_data_as_datetime64(time_var: cds3.Var) -> np.ndarray:
+    """-----------------------------------------------------------------------
+    Get the time values from dsproc as seconds since 1970, then convert those
+    values to datetime64 with microsecond precision.
+
+    Args:
+        time_var (cds3.Var): An ADI time variable object
+
+    Returns:
+        np.ndarray: An ndarray of the same shape as the variable with time
+        values converted to the np.datetime64 data type with microsecond
+        precision.
+    -----------------------------------------------------------------------"""
+    microsecond_times = np.asarray(dsproc.get_sample_timevals(time_var, 0)) * 1000000
+    datetime64_times =  np.array(pd.to_datetime(microsecond_times, unit='us'), np.datetime64)
+    return datetime64_times
+
+
+def get_adi_var_as_dict(adi_var: cds3.Var) -> Dict:
+    """-----------------------------------------------------------------------
+    Convert the given adi variable to a dictionary that can be used to create
+    an xarray dataarray.
+
+    Args:
+        adi_var (cds3.Var): An ADI variable object
+
+    Returns:
+        Dict: A Dictionary representation of the variable that can be used
+        in the XArray.DataArray constructor to initialize a corresponding
+        XArray variable.
+    -----------------------------------------------------------------------"""
+    # Get the variable's dimensions
+    dims = [dim.get_name() for dim in adi_var.get_dims()]
+
+    # Get the variable's attributes
+    adi_atts: List[cds3.Att] = adi_var.get_atts()
+    attrs = {att.get_name(): dsproc.get_att_value(adi_var, att.get_name(), att.get_type()) for att in adi_atts}
+
+    # Now add special attributes for the variable tags
+    add_vartag_attributes(attrs, adi_var)
+
+    # If the variable is 'time' then we will convert values to datetime64 data types with microsecond precision
+    if adi_var.get_name() == 'time':
+        data = get_time_data_as_datetime64(adi_var)
+
+    else:
+        # This method uses np.PyArray_SimpleNewFromData to convert adi data array to np.ndarray
+        # =====> It will return None if the variable has no data
+        data = adi_var.get_datap()
+
+        if data is None:
+            # We need to initialize the DataArray with empty values.
+            data = get_empty_ndarray_for_var(adi_var, attrs)
+
+    return {
+        'dims': dims,
+        'attrs': attrs,
+        'data': data
+    }
+
+
+def get_dataset_dims(adi_dataset: cds3.Group) -> List[cds3.Dim]:
+    # Loop through parent groups to pull dims, and vars that may be associated with parent coord system group
+    adi_dims = []
+    group: cds3.Group = adi_dataset
+    while group:
+        group_dims = group.get_dims()
+        for dim in group_dims:
+            adi_dims.append(dim)
+        group = group.get_parent()
+
+    return adi_dims
+
+
+def get_dataset_vars(adi_dataset: cds3.Group) -> List[cds3.Var]:
+    # Loop through parent groups to pull dims, and vars that may be associated with parent coord system group
+    adi_vars = []
+    group: cds3.Group = adi_dataset
+    while group:
+        group_vars = group.get_vars()
+        for var in group_vars:
+            adi_vars.append(var)
+
+        group = group.get_parent()
+    return adi_vars
+
+
+def to_xarray(adi_dataset: cds3.Group) -> xr.Dataset:
+    """-----------------------------------------------------------------------
+    Convert the specified CDS.Group into an XArray dataset.
+    Attributes will be copied, but the DataArrays for each variable
+    will be backed by an np.ndarray that links directly to the C
+    ADI data via np.PyArray_SimpleNewFromData
+
+    Args:
+        adi_dataset (cds3.Group): An ADI dataset object.
+
+    Returns:
+        xr.Dataset: The corresponding XArray dataset object.
+    -----------------------------------------------------------------------"""
+    # Get Global attrs
+    adi_atts: List[cds3.Att] = adi_dataset.get_atts()
+    attrs = {att.get_name(): dsproc.get_att_value(adi_dataset, att.get_name(), att.get_type()) for att in adi_atts}
+
+    # Loop through parent groups to pull dims, coordinate variables, and bounds vars that may be
+    # associated with parent coord system group
+    adi_dims = get_dataset_dims(adi_dataset)
+    adi_vars = get_dataset_vars(adi_dataset)
+
+    # Convert adi dims to dictionary
+    dims = {dim.get_name(): dim.get_length() for dim in adi_dims}
+
+    # Find the coordinate variable names
+    coord_var_names = []
+    for dim in adi_dims:
+        adi_var = dim.get_var()
+        if adi_var is not None:
+            coord_var_names.append(adi_var.get_name())
+
+    # Get coordinate & data variables
+    coords = {}
+    data_vars = {}
+    for adi_var in adi_vars:
+        var_name = adi_var.get_name()
+        var_as_dict = get_adi_var_as_dict(adi_var)
+        if var_name in coord_var_names:
+            coords[var_name] = var_as_dict
+        else:
+            data_vars[var_name] = var_as_dict
+
+    # Create a dictionary from the values
+    data_dict = {
+        'attrs': attrs,
+        'dims': dims,
+        'coords': coords,
+        'data_vars': data_vars
+    }
+
+    dataset = xr.Dataset.from_dict(data_dict)
+    return dataset
+
+
+def get_cds_type(value: Any) -> int:
+    """-----------------------------------------------------------------------
+    For a given Python data value, convert the data type into the corresponding
+    ADI CDS data type.
+
+    Args:
+        value (Any): Can be a single value, a List of values, or a numpy.ndarray
+            of values.
+
+    Returns:
+        int: The corresponding CDS data type
+    -----------------------------------------------------------------------"""
+    val = value
+
+    # Convert value to a numpy array so we can use dsproc method which
+    # only works if value is a numpy ndarray
+    if type(value) == list:
+        val = np.array(value)
+
+    elif type(value) != np.ndarray:
+        # We need to wrap value in a list so np constructor doesn't get confused
+        # if value is numeric
+        val = np.array([value])
+
+    if val.dtype.type == np.str_:
+        # This comparison is always failing from within the cython, because
+        # in the cython, dtype.type = 85 instead of np.str_.
+        # So I'm adding it here instead.  This checks for any string type.
+        cds_type = cds3.CHAR
+
+    else:
+        cds_type = dsproc.dtype_to_cds_type(val.dtype)
+
+    return cds_type
+
+
+def _sync_attrs(xr_atts_dict: Dict, adi_obj: cds3.Object):
+    """-----------------------------------------------------------------------
+    Sync Xarray attributes back to an ADI object (dataset or variable) by
+    checking if the following changes were made:
+
+        - Attribute values changed
+        - Attributes were added
+        - Attributes were deleted
+        - An attribute type changed
+
+    Args:
+        xr_atts (Dict):
+            Dictionary of Xarray attributes, where the keys are
+            attribute names, and values are attribute values
+
+        adi_obj (cds3.Object):
+            ADI dataset or variable
+    -----------------------------------------------------------------------"""
+    # Get lists of attribute names for comparison between two lists
+    adi_atts = {att.get_name() for att in adi_obj.get_atts()}
+    xr_atts = []
+
+    for att_name in xr_atts_dict:
+        if att_name.startswith('__'):
+            # special attributes start with '__' and are handled separately
+            continue
+        xr_atts.append(att_name)
+
+    # First remove deleted atts
+    deleted_atts = [att_name for att_name in adi_atts if att_name not in xr_atts]
+    for att_name in deleted_atts:
+        adi_att = dsproc.get_att(adi_obj, att_name)
+        status = cds3.Att.delete(adi_att)
+        if status < 1:
+            raise Exception(f'Could not delete attribute {att_name}')
+
+    # Then add new atts
+    added_atts = [att_name for att_name in xr_atts if att_name not in adi_atts]
+    for att_name in added_atts:
+        att_value = xr_atts_dict.get(att_name)
+        cds_type = get_cds_type(att_value)
+        status = dsproc.set_att(adi_obj, 1, att_name, cds_type, att_value)
+        if status < 1:
+            raise Exception(f'Could not create attribute {att_name}')
+
+    # Next change the value for other atts if the value changed
+    other_atts = [att_name for att_name in xr_atts if att_name not in added_atts]
+    for att_name in other_atts:
+        att_value = xr_atts_dict.get(att_name)
+
+        # For now, if the att is already defined in adi, we assume that the user will not
+        # change the type, just the value.
+        cds_type = dsproc.get_att(adi_obj, att_name).get_type()
+        existing_value = dsproc.get_att_value(adi_obj, att_name, cds_type)
+
+        if not np.array_equal(att_value, existing_value):
+            status = dsproc.set_att(adi_obj, 1, att_name, cds_type, att_value)
+            if status < 1:
+                raise Exception(f'Could not update attribute {att_name}')
+
+
+def _sync_dims(xr_dims: Dict, adi_dataset: cds3.Group):
+    """-----------------------------------------------------------------------
+    Sync Xarray dimensions back to ADI dataset by checking if:
+        - Any dimensions were deleted and attempts to delete them from the dataset
+        - Any dimensions were added and attempts to add them to the dataset
+        - Any dimension length (e.g. size) and attempts to change the length
+
+    Args:
+        xr_dims (Dict):   
+            Dictionary of Xarray dimiension, where the keys are
+            dimension names, and values are dimension size
+
+        adi_dataset (cds3.Group):
+            ADI dataset
+    -----------------------------------------------------------------------"""
+
+    # key is dimension name, value is cds3.Dim object
+    adi_dim_objs: List[cds3.Dim] = get_dataset_dims(adi_dataset)
+    adi_dims = {dim.get_name(): dim for dim in adi_dim_objs}
+
+    # Check if dimension needs to be deleted from ADI dataset
+    deleted_dims = [dim_name for dim_name in adi_dims if dim_name not in xr_dims]
+
+    # Delete appropriate dimensions
+    for dim_name in deleted_dims:
+
+        adi_dim = adi_dims[dim_name]
+
+        # This function will also delete all variables that use the specified
+        # dimension.
+        status = cds3.Dim.delete(adi_dim)
+
+        if status == 0:
+            raise Exception(f'Could not delete dimension {dim_name}')
+
+    # Check if dimension needs to be added to ADI Dataset
+    added_dims = [dim_name for dim_name in xr_dims if dim_name not in adi_dims]
+
+    # Add appropriate dimensions (assume dimension is not unlimited)
+    is_unlimited = 0
+    for dim_name in added_dims:
+
+        dim_size = xr_dims[dim_name]
+        dim_obj = adi_dataset.define_dim(dim_name, dim_size, is_unlimited)
+        
+        if dim_obj is None:
+            raise Exception(f'Could not define dimension {dim_name}')
+
+    # Check if existing dimension size changed and set new value, if appropriate
+    existing_dims = [dim_name for dim_name in adi_dims if dim_name in xr_dims]
+
+    for dim_name in existing_dims:
+        adi_dim_size = adi_dims[dim_name].get_length()
+        xr_dim_size = xr_dims[dim_name]
+
+        if adi_dim_size != xr_dim_size:
+            status = dsproc.set_dim_length(adi_dataset, dim_name, xr_dim_size)
+
+            if status == 0:
+                raise Exception(f'Could not change dimension length of {dim_name}')
+
+
+def _add_variable_to_adi(xr_var: xr.DataArray, adi_dataset: cds3.Group):
+    """-----------------------------------------------------------------------
+    Add a new variable specified by an xarray DataArray to the given ADI
+    dataset.
+    -----------------------------------------------------------------------"""
+    # First create the variable
+    cds_type = get_cds_type(xr_var.data)
+    dim_names = xr_dims = list(xr_var.dims)
+    adi_var = dsproc.define_var(adi_dataset, xr_var.name, cds_type, dim_names)
+
+    # Now assign attributes
+    _sync_attrs(xr_var.attrs, adi_var)
+
+    # Now set the data
+    _set_adi_variable_data(xr_var, adi_var)
+
+    # Finally, need to check SpecialXrAttributes.COORDINATE_SYSTEM and SpecialXrAttributes.OUTPUT_TARGETS
+    coord_sys_name = xr_var.attrs.get(SpecialXrAttributes.COORDINATE_SYSTEM, None)
+    dsproc.set_var_coordsys_name(adi_var, coord_sys_name)
+
+    output_targets = xr_var.attrs.get(SpecialXrAttributes.OUTPUT_TARGETS, None)
+    if output_targets is not None:
+        for dsid, datastream_variable_name in output_targets:
+            dsproc.add_var_output_target(adi_var, dsid, datastream_variable_name)
+
+
+def _set_time_variable_data_if_needed(xr_var: xr.DataArray, adi_var: cds3.Var):
+    """-----------------------------------------------------------------------
+    Check to see if the time values have changed, and if so, then push back to
+    ADI.  We can't rely on the data pointer for time, because the times are
+    converted into datetime64 objects for xarray.
+
+    TODO: if this becomes a performance issue to do this comparison, then we
+    can add a parameter to the sync dataset method so that the user can
+    explicitly declare whether syncing the time variable is needed or not
+    -----------------------------------------------------------------------"""
+    # astype will produce nanosecond precision, so we have to convert to seconds
+    timevals = xr_var.data.astype('float') / 1000000000
+
+    # We have to truncate to 6 decimal places so it matches ADI
+    timevals = np.around(timevals, 6)
+
+    # Compare with the original values to see if there have been any changes
+    adi_times = dsproc.get_sample_timevals(adi_var, 0)
+
+    if np.array_equal(timevals, adi_times) is False:
+
+        # Wipe out any existing data
+        adi_var.delete_data()
+
+        # Set the timevals in seconds in ADI
+        dsproc.set_sample_timevals(adi_var, 0, timevals)
+
+
+def _set_adi_variable_data(xr_var: xr.DataArray, adi_var: cds3.Var):
+    """-----------------------------------------------------------------------
+    For the given Xarray DataArray, copy the data values back to ADI.  This
+    method will only be called if the deveoper has replaced the original
+    DataArray of values with a new array by means of a Python operation.
+    In this case, the data values will no longer be mapped directly to the
+    ADI data structure, so they will have to be manually copied over.
+    -----------------------------------------------------------------------"""
+    missing_value = None
+    missing_values = xr_var.attrs.get(ADIAtts.MISSING_VALUE)
+    if missing_values is not None and len(missing_values) > 0:
+        missing_value = missing_values[0]
+
+    sample_start = 0
+
+    # Get the length of the first dimension
+    sample_count = 1
+    if len(xr_var.dims) > 0:
+        sample_count = xr_var.sizes[xr_var.dims[0]]
+
+    # Wipe out any existing data
+    adi_var.delete_data()
+
+    # Store the new values
+    status = dsproc.set_var_data(adi_var, sample_start, sample_count, missing_value, xr_var.data)
+
+    if status is None:
+        raise Exception(f'Could not set data for variable {adi_var.get_name()}')
+
+
+def _sync_vars(xr_dataset: xr.Dataset, adi_dataset: cds3.Group):
+    """-----------------------------------------------------------------------
+    Sync Xarray variables back to ADI dataset by checking if:
+        - a variable's attributes were changed
+        - a variable’s dimensions changed
+        - a variable was added or deleted
+        - a variable's data array was replaced
+
+    Args:
+        xr_dataset (xr.Dataset):
+            The xarray dataset to sync
+
+        adi_dataset (csd3.Group):
+            The ADI group where changes will be applied
+
+    -----------------------------------------------------------------------"""
+    adi_var_objs = get_dataset_vars(adi_dataset)
+    adi_vars = {var.get_name() for var in adi_var_objs}
+    xr_vars = {var_name for var_name in xr_dataset.variables}
+
+    # First remove deleted vars from the dataset
+    deleted_vars = [var_name for var_name in adi_vars if var_name not in xr_vars]
+    for var_name in deleted_vars:
+        adi_var = dsproc.get_var(adi_dataset, var_name)
+        dsproc.delete_var(adi_var)
+
+    # Then add new vars to the dataset
+    added_vars = [var_name for var_name in xr_vars if var_name not in adi_vars]
+    for var_name in added_vars:
+        xr_var: xr.DataArray = xr_dataset.get(var_name)
+        _add_variable_to_adi(xr_var, adi_dataset)
+
+    # Now sync up the remaining variables if they have been changed
+    other_vars = [var_name for var_name in xr_vars if var_name not in added_vars]
+    for var_name in other_vars:
+        xr_var: xr.DataArray = xr_dataset.get(var_name)
+        adi_var: cds3.Var = dsproc.get_var(adi_dataset, var_name)
+
+        if adi_var is None:
+            # adi_var may be None only for coordinate variables of transformed datasets, since the coordinate vars
+            # are stored on the parent Group in adi.  Users should never be changing the coordinate variables on
+            # transformed datasets, so we are skipping these.
+            ADILogger.info(f'Not syncing coordinate variable {var_name} in transformed dataset {adi_dataset.get_name()} because it comes from the parent coordinate system dataset.')
+            continue
+
+        # Check if dims have changed
+        adi_dims: List[str] = adi_var.get_dim_names()
+        xr_dims = list(xr_var.dims)
+        if adi_dims != xr_dims:
+            raise Exception('Changing dimensions on an existing variable is not supported by ADI!')
+
+        # sync attributes
+        _sync_attrs(xr_var.attrs, adi_var)
+
+        # sync data
+        # If the data pointer has changed or does not exist, we need to wipe out any previous data and then
+        # create new data for the adi variable
+        adi_data = adi_var.get_datap()
+        adi_pointer = adi_data.__array_interface__['data'][0] if adi_data is not None else None
+        xr_pointer = xr_var.data.__array_interface__['data'][0]
+
+        # If the pointers don't match, then we also compare all the values in each data array,
+        # and only if values are different do we sync the values back to ADI.  Note:  we have
+        # to do this because in the xarray dataset.from_dict() method, xarray always changes
+        # the arrays of coordinate variables, so the pointers will never match.
+        if adi_pointer != xr_pointer:
+            if var_name == 'time':
+                _set_time_variable_data_if_needed(xr_var, adi_var)
+            elif adi_data is None or (adi_data != xr_var.data).any():
+                _set_adi_variable_data(xr_var, adi_var)
+
+        # TODO: I don't think we need to change SpecialXrAttributes.COORDINATE_SYSTEM on an existing
+        # variable, but if there is a use case for it, we should add it here
+
+        # TODO: I don't think we need to change SpecialXrAttributes.OUTPUT_TARGETS on an existing
+        # variable, but if there is a use case for it, we should add it here
+
+
+def sync_xarray(xr_dataset: xr.Dataset, adi_dataset: cds3.Group):
+    """-----------------------------------------------------------------------
+    Carefully inspect the xr.Dataset and synchronize any changes back to the
+    given ADI dataset.
+
+    Args:
+        xr_dataset (xr.Dataset): The XArray dataset to sync
+
+        adi_dataset (csd3.Group): The ADI dataset where changes will be applied
+
+    -----------------------------------------------------------------------"""
+
+    # Sync global attributes
+    _sync_attrs(xr_dataset.attrs, adi_dataset)
+
+    # Sync dimensions
+    _sync_dims(xr_dataset.sizes, adi_dataset)
+
+    # Sync variables
+    _sync_vars(xr_dataset, adi_dataset)
+
+
+def get_xr_datasets(
+    dataset_type: ADIDatasetType,
+    dsid: Optional[int] = None,
+    datastream_name: Optional[str] = None,
+    site: Optional[str] = None,
+    facility: Optional[str] = None,
+    coordsys_name: Optional[str] = None
+) -> List[xr.Dataset]:
+    """-----------------------------------------------------------------------
+    Get an ADI dataset converted to an xarray.Dataset.
+
+    Args:
+        dataset_type (ADIDatasetType):
+            The type of the dataset to convert (RETRIEVED, TRANSFORMED, OUTPUT)
+
+        dsid (int):
+            If the dsid is known, you can use it to look up the adi dataset.  If it is not known,
+            then use datastream_name, and optionally site/facility to identify the dataset.
+
+        datastream_name (str):
+            The name of one of the process' datastreams as specified in the PCM.
+
+        coordsys_name (str):
+            Optional parameter used only to find TRANSFORMED datasets.  Must be a coordinate
+            system specified in the PCM or None if no coordinate system was specified.
+
+        site (str):
+            Optional parameter used only to find some input datasets (RETRIEVED or TRANSFORMED).
+            Site is only required if the retrieval rules in the PCM specify two different
+            rules for the same datastream that differ by site.
+
+        facility (str):
+            Optional parameter used only to find some input datasets (RETRIEVED or TRANSFORMED).
+            Facility is only required if the retrieval rules in the PCM specify two different
+            rules for the same datastream that differ by facility.
+
+    Returns:
+        List[xr.Dataset]: Returns a list of xr.Datasets, one for each file. If there are
+            no files / datasets for the specified datastream / site / facility / coord system
+            then the list will be empty.
+    -----------------------------------------------------------------------"""
+    datasets: List[xr.Dataset] = []
+    if dsid is None:
+        dsid = get_datastream_id(datastream_name, site=site, facility=facility, dataset_type=dataset_type)
+
+    if dsid is not None:
+        for i in itertools.count(start=0):
+
+            if dataset_type is ADIDatasetType.RETRIEVED:
+                adi_dataset = dsproc.get_retrieved_dataset(dsid, i)
+            elif dataset_type is ADIDatasetType.TRANSFORMED:
+                adi_dataset = dsproc.get_transformed_dataset(coordsys_name, dsid, i)
+            else:
+                adi_dataset = dsproc.get_output_dataset(dsid, i)
+
+            if not adi_dataset:
+                break
+
+            xr_dataset: xr.Dataset = to_xarray(adi_dataset)
+
+            # Add special metadata
+            xr_dataset.attrs[SpecialXrAttributes.DATASET_TYPE] = dataset_type
+            xr_dataset.attrs[SpecialXrAttributes.DATASTREAM_DSID] = dsid
+            xr_dataset.attrs[SpecialXrAttributes.OBS_INDEX] = i
+            if coordsys_name is not None:
+                xr_dataset.attrs[SpecialXrAttributes.COORDINATE_SYSTEM] = coordsys_name
+
+            datasets.append(xr_dataset)
+
+    return datasets
+
+
+def sync_xr_dataset(xr_dataset: xr.Dataset):
+    """-----------------------------------------------------------------------
+    Sync the contents of the given XArray.Dataset with the corresponding ADI
+    data structure.
+
+    Args:
+        xr_dataset (xr.Dataset):  The xr.Dataset(s) to sync.
+
+    -----------------------------------------------------------------------"""
+    dataset_type = xr_dataset.attrs[SpecialXrAttributes.DATASET_TYPE]
+    obs_index = xr_dataset.attrs[SpecialXrAttributes.OBS_INDEX]
+    coordsys_name = xr_dataset.attrs.get(SpecialXrAttributes.COORDINATE_SYSTEM)
+    dsid =  int(xr_dataset.attrs[SpecialXrAttributes.DATASTREAM_DSID])
+
+    if dataset_type is ADIDatasetType.RETRIEVED:
+        adi_dataset = dsproc.get_retrieved_dataset(dsid, obs_index)
+    elif dataset_type is ADIDatasetType.TRANSFORMED:
+        adi_dataset = dsproc.get_transformed_dataset(coordsys_name, dsid, obs_index)
+    else:
+        adi_dataset = dsproc.get_output_dataset(dsid, obs_index)
+
+    sync_xarray(xr_dataset, adi_dataset)
+
+
+def get_datastream_files(dsid: int, begin_date: int, end_date: int) -> List[str]:
+    """-----------------------------------------------------------------------
+    Return the full path to each data file found for the given datastream
+    and time range.
+
+    Args:
+        dsid (int): the datastream id (call get_dsid() to retrieve)
+
+        begin_date (int): the begin timestamp of the current processing interval
+            (seconds since 1970)
+
+        end_date (int): the end timestamp of the current processing interval
+            (seconds since 1970)
+
+    Returns:
+        List[str]: A list of file paths that match the datastream query.
+    -----------------------------------------------------------------------"""
+    datastream_path = dsproc.datastream_path(dsid)
+    files = dsproc.find_datastream_files(dsid, begin_date, end_date)
+    file_paths = []
+    for file in files:
+        file_path = f"{datastream_path}/{file}"
+        file_paths.append(file_path)
+
+    return file_paths
diff --git a/packages/adi_py/cds3/ccds3.pxd b/packages/adi_py/cds3/ccds3.pxd
index 2a79b9a..68dd058 100644
--- a/packages/adi_py/cds3/ccds3.pxd
+++ b/packages/adi_py/cds3/ccds3.pxd
@@ -729,6 +729,12 @@ cdef extern from "cds3.h" nogil:
     # Library Version
     char *cds_lib_version()
 
+    # New method to parse transform params as text string and apply to object
+    int cds_parse_transform_params(
+            CDSGroup *group,
+            char *string,
+            const char *path)
+
 #******************************************************************************
 # DEPRECATED
 #******************************************************************************
diff --git a/packages/adi_py/cds3/ccds3_enums.pxd b/packages/adi_py/cds3/ccds3_enums.pxd
index c831ee4..70c9044 100644
--- a/packages/adi_py/cds3/ccds3_enums.pxd
+++ b/packages/adi_py/cds3/ccds3_enums.pxd
@@ -34,6 +34,7 @@ cdef extern from "cds3.h" nogil:
     signed char CDS_FILL_BYTE
     short       CDS_FILL_SHORT
     int         CDS_FILL_INT
+    long long   CDS_FILL_INT64
     float       CDS_FILL_FLOAT
     double      CDS_FILL_DOUBLE
 
@@ -46,6 +47,8 @@ cdef extern from "cds3.h" nogil:
     short       CDS_MIN_SHORT
     int         CDS_MAX_INT
     int         CDS_MIN_INT
+    long long   CDS_MAX_INT64
+    long long   CDS_MIN_INT64
     float       CDS_MAX_FLOAT
     float       CDS_MIN_FLOAT
     double      CDS_MAX_DOUBLE
@@ -74,6 +77,7 @@ cdef extern from "cds3.h" nogil:
         CDS_BYTE
         CDS_SHORT
         CDS_INT
+        CDS_INT64
         CDS_FLOAT
         CDS_DOUBLE
 
diff --git a/packages/adi_py/cds3/core.pyx b/packages/adi_py/cds3/core.pyx
index 45e2ed3..7a29863 100644
--- a/packages/adi_py/cds3/core.pyx
+++ b/packages/adi_py/cds3/core.pyx
@@ -76,6 +76,8 @@ cdef inline int cds_type_to_dtype(CDSDataType cds_type) except -1:
         return np.NPY_SHORT
     elif cds_type == CDS_INT:
         return np.NPY_INT
+    elif cds_type == CDS_INT64:
+        return np.NPY_INT64
     elif cds_type == CDS_FLOAT:
         return np.NPY_FLOAT
     elif cds_type == CDS_DOUBLE:
@@ -95,6 +97,8 @@ cdef inline np.dtype cds_type_to_dtype_obj(CDSDataType cds_type):
         return np.dtype(np.int16)
     elif cds_type == CDS_INT:
         return np.dtype(np.int32)
+    elif cds_type == CDS_INT64:
+        return np.dtype(np.int64)
     elif cds_type == CDS_FLOAT:
         return np.dtype(np.float32)
     elif cds_type == CDS_DOUBLE:
@@ -1461,7 +1465,45 @@ cdef class Var(Object):
     def get_alloc_count(self):
         return self.c_ob.alloc_count
 
+    def get_default_fill_value(self):
+        """
+        Get the default fill value used by the NetCDF library.
+        """
+        # Initialize a character array of 16 bytes to store the value
+        cdef char fill_value_arr[16]
+        cdef CDSDataType cds_type = self.c_ob.type
+        cds_get_default_fill_value(cds_type, <void *>fill_value_arr)
+
+        fill_value = None
+
+        if cds_type == CDS_NAT:
+            raise ValueError("CDS_NAT")
+        elif cds_type == CDS_CHAR:
+            fill_value = (<char*>fill_value_arr)[0]
+        elif cds_type == CDS_BYTE:
+            fill_value = (<signed char*>fill_value_arr)[0]
+        elif cds_type == CDS_SHORT:
+            fill_value = (<short*>fill_value_arr)[0]
+        elif cds_type == CDS_INT:
+            fill_value = (<int*>fill_value_arr)[0]
+        elif cds_type == CDS_INT64:
+            fill_value = (<long long*>fill_value_arr)[0]
+        elif cds_type == CDS_FLOAT:
+            fill_value = (<float*>fill_value_arr)[0]
+        elif cds_type == CDS_DOUBLE:
+            fill_value = (<double*>fill_value_arr)[0]
+        else:
+            raise ValueError("Unknown CDSDataType %s" % cds_type)
+
+        return fill_value
+
     def get_default_fill(self):
+        """
+        I don't think this method works - it always returns NULL.  I don't think self.c_ob.default_fill
+        was ever obtained.  Use get_default_fill_value() instead.
+
+        TODO:  DELETE ME
+        """
         cdef void *fill_ptr = self.c_ob.default_fill
         cdef CDSDataType cds_type = self.c_ob.type
         if fill_ptr == NULL:
@@ -1476,6 +1518,8 @@ cdef class Var(Object):
             return (<short*>fill_ptr)[0]
         elif cds_type == CDS_INT:
             return (<int*>fill_ptr)[0]
+        elif cds_type == CDS_INT64:
+            return (<long long*>fill_ptr)[0]
         elif cds_type == CDS_FLOAT:
             return (<float*>fill_ptr)[0]
         elif cds_type == CDS_DOUBLE:
@@ -1830,6 +1874,7 @@ cdef class Var(Object):
         cdef signed char missing_signed_char
         cdef short missing_short
         cdef int missing_int
+        cdef long long missing_long
         cdef float missing_float
         cdef double missing_double
         cdef object missing_py
@@ -1843,6 +1888,8 @@ cdef class Var(Object):
             missing_ptr = &missing_short
         elif cds_type == CDS_INT:
             missing_ptr = &missing_int
+        elif cds_type == CDS_INT64:
+            missing_ptr = &missing_long
         elif cds_type == CDS_FLOAT:
             missing_ptr = &missing_float
         elif cds_type == CDS_DOUBLE:
@@ -1872,6 +1919,8 @@ cdef class Var(Object):
             missing_py = missing_short
         elif cds_type == CDS_INT:
             missing_py = missing_int
+        elif cds_type == CDS_INT64:
+            missing_py = missing_long
         elif cds_type == CDS_FLOAT:
             missing_py = missing_float
         elif cds_type == CDS_DOUBLE:
@@ -1880,6 +1929,14 @@ cdef class Var(Object):
             raise ValueError("Unknown CDSDataType")
         return array,missing_py
 
+    def attach_data(self, unsigned long datap, size_t sample_count):
+        self.c_ob.data.vp = <void*> datap
+        self.c_ob.sample_count = sample_count
+
+    def detach_data(self):
+        self.c_ob.data.vp = NULL
+        self.c_ob.sample_count = 0
+
     cpdef np.ndarray get_datap(self, size_t sample_start=0):
         """Get an ndarray for the the data in a CDS variable.
         
@@ -2019,6 +2076,88 @@ cdef class VarArray(Object):
         pass
 
 
+def set_front_edge_param(Group group, object dim_name, size_t length, np.ndarray data_nd):
+    """-----------------------------------------------------------------------------------------------------------------
+    Set the front_edge transform parameter.  Use this method in conjunction with a bounds variable.  Front edge
+    represents the first column in the 2-column bounds array for a coordinate variable.  This is used for bin
+    averaging.
+
+    Parameters
+    ----------
+    group : Group
+        If you are setting bounds on an input dataset, then you should pass the obs group.  If you are setting bounds
+        on the output transformed dataset, then you need to pass the coordinate system group.
+    dim_name : str
+        The name of the dimension the bounds belong to.
+    length :
+        The length of the 1-d data array
+
+    data_nd : np.ndarray
+        The data array as a numpy ndarray.  Data type MUST be double/float.
+
+    Returns
+    -------
+     -  1 if successful
+     -  0 if an error occurred
+    -----------------------------------------------------------------------------------------------------------------"""
+
+    cdef object b_dim_name = _to_byte_c_string(dim_name)  # convert to C string
+    cdef CDSGroup *cds_group = group.c_ob # Get underlying C pointer
+    cds_set_transform_param(cds_group, b_dim_name, "front_edge", CDS_DOUBLE, length, data_nd.data)
+
+
+def set_back_edge_param(Group group, object dim_name, size_t length, np.ndarray data_nd):
+    cdef object b_dim_name = _to_byte_c_string(dim_name)  # convert to C string
+    cdef CDSGroup *cds_group = group.c_ob # Get underlying C pointer
+    cds_set_transform_param(cds_group, b_dim_name, "back_edge", CDS_DOUBLE, length, data_nd.data)
+
+
+def parse_transform_params(Group group, object string):
+    """-----------------------------------------------------------------------------------------------------------------
+    Parse a text string containing transformation parameters and apply the transform parameters to the provided
+    group object.  Groups should be one of two types:
+
+    1) Coordinate System
+    2) Input Datastream
+
+    The string of parameters has to match the type of group you are using.  If you are setting coordinate system
+    parameters, the string should have this format:
+
+        "
+        range:transform = TRANS_PASSTHROUGH;
+        ceil_backscatter:time:transform = TRANS_PASSTHROUGH;
+        time:width = 300;
+        "
+
+        Coordinate system parameters are either set on a coordinate dimension (e.g., time, range) or they can be
+        overridden for a specific variable and dimension (e.g., ceil_backscatter:time)
+
+    If you are setting input datastream parameters, the string should have this format:
+        "
+        time:range = 600;
+        "
+
+    Note that some transform parameters (like range) are set on input datastreams, and others (like transform type or
+    bin width) are set on coordinate systems.  You have to make sure the right parameters get passed for each type
+    of object.
+
+    Parameters
+    ----------
+    parent : cds3.core.Group
+        Pointer to the CDSGroup you will apply the transform parameters to (either a coordinate system or input datastream)
+    string : object
+        text string containing the transformation params
+
+    Returns
+    -------
+     -  1 if successful
+     -  0 if an error occurred
+    -----------------------------------------------------------------------------------------------------------------"""
+
+    cdef object byte_string = _to_byte_c_string(string)      # Convert python string to c string
+    cdef CDSGroup *cds_group = group.c_ob                    # Get underlying C pointer
+    return cds_parse_transform_params(cds_group, byte_string, NULL)
+
 def print_all(object file_like, Group group, int flags):
     cdef FILE *fp
     cdef int fd
diff --git a/packages/adi_py/cds3/enums.pyx b/packages/adi_py/cds3/enums.pyx
index 97fc585..0296fb9 100644
--- a/packages/adi_py/cds3/enums.pyx
+++ b/packages/adi_py/cds3/enums.pyx
@@ -53,6 +53,7 @@ FILL_CHAR = CDS_FILL_CHAR
 FILL_BYTE = CDS_FILL_BYTE
 FILL_SHORT = CDS_FILL_SHORT
 FILL_INT = CDS_FILL_INT
+FILL_INT64 = CDS_FILL_INT64
 FILL_FLOAT = CDS_FILL_FLOAT
 FILL_DOUBLE = CDS_FILL_DOUBLE
 
@@ -65,6 +66,8 @@ MAX_SHORT = CDS_MAX_SHORT
 MIN_SHORT = CDS_MIN_SHORT
 MAX_INT = CDS_MAX_INT
 MIN_INT = CDS_MIN_INT
+MAX_INT64 = CDS_MAX_INT64
+MIN_INT64 = CDS_MIN_INT64
 MAX_FLOAT = CDS_MAX_FLOAT
 MIN_FLOAT = CDS_MIN_FLOAT
 MAX_DOUBLE = CDS_MAX_DOUBLE
@@ -87,6 +90,7 @@ CHAR = CDS_CHAR
 BYTE = CDS_BYTE
 SHORT = CDS_SHORT
 INT = CDS_INT
+INT64 = CDS_INT64
 FLOAT = CDS_FLOAT
 DOUBLE = CDS_DOUBLE
 
diff --git a/packages/adi_py/conda_environment.yml b/packages/adi_py/conda_environment.yml
new file mode 100644
index 0000000..0ea7956
--- /dev/null
+++ b/packages/adi_py/conda_environment.yml
@@ -0,0 +1,13 @@
+# Default environment for developing ADI Python apps
+
+name: adi_py
+channels:
+  - arm-doe
+  - conda-forge
+dependencies:
+  - python=3.9
+  - adi_py
+  - act-atmos
+  - pip=22.3.1
+  - pip:
+    - numpy==1.23.1
diff --git a/packages/adi_py/dsproc3/cdsproc3.pxd b/packages/adi_py/dsproc3/cdsproc3.pxd
index 2b234d6..ea4f262 100644
--- a/packages/adi_py/dsproc3/cdsproc3.pxd
+++ b/packages/adi_py/dsproc3/cdsproc3.pxd
@@ -64,6 +64,16 @@ cdef extern from "dsproc3.h" nogil:
     char *dsproc_datastream_class_level(int ds_id)
     char *dsproc_datastream_path(int ds_id)
 
+    int  dsproc_getopt(
+               const char *option, 
+               const char **value) 
+      
+    int  dsproc_setopt(
+               const char short_opt, 
+               const char *long_opt, 
+               const char *arg_name, 
+               const char *opt_desc) 
+
     void dsproc_use_nc_extension()
 
     void dsproc_disable_lock_file()
@@ -412,6 +422,11 @@ cdef extern from "dsproc3.h" nogil:
                 int          npatterns,
                 char **patterns,
                 int          ignore_case)
+
+    int     dsproc_set_file_name_time_patterns(
+                int          ds_id,
+                int          npatterns,
+                char **patterns)
     
     void    dsproc_set_datastream_file_extension(
                 int   ds_id,
@@ -550,6 +565,9 @@ cdef extern from "dsproc3_internal.h" nogil:
     char *dsproc_get_type()
     char *dsproc_get_version()
     int dsproc_get_force_mode()
+
+    unsigned int dsproc_get_missing_value_bit_flag(int bit_ndescs, const char **bit_descs)
+    int dsproc_get_qc_bit_descriptions(CDSVar *qc_var, const char ***bit_descs)
     
     time_t      dsproc_get_max_run_time()
     time_t      dsproc_get_start_time()
@@ -744,6 +762,14 @@ cdef extern from "dsproc3_internal.h" nogil:
             char *name,
             char *level)
 
+    int     dsproc_set_coordsys_trans_param(
+                char        *coordsys_name,
+                char        *field_name,
+                char        *param_name,
+                CDSDataType type,
+                size_t      length,
+                void        *value)
+
     int dsproc_fetch_dataset(
             int       ds_id,
             timeval_t *begin_timeval,
diff --git a/packages/adi_py/dsproc3/core.pyx b/packages/adi_py/dsproc3/core.pyx
index d88a4be..7e41357 100644
--- a/packages/adi_py/dsproc3/core.pyx
+++ b/packages/adi_py/dsproc3/core.pyx
@@ -3,12 +3,15 @@
 
 from __future__ import print_function
 
+from typing import List
+
 import sys
 import time
 from math import modf
 
 # Cython modules
 from libc.stdlib cimport malloc,free
+from libc.stdio cimport printf
 from cpython.ref cimport PyObject
 from cpython.unicode cimport PyUnicode_AsEncodedString
 from cpython.pycapsule cimport PyCapsule_GetPointer
@@ -242,6 +245,7 @@ def error(object status, object format, *args):
     string will be used.
 
     """
+    if format == None:  format = status
     s=format
     file,line,func=__line()
     if args:
@@ -1279,6 +1283,10 @@ cdef inline void* _alloc_single(CDSDataType cds_type, object initial_value=None)
         retval = malloc(sizeof(int))
         if initial_value is not None:
             (<int*>retval)[0] = initial_value
+    elif cds_type == CDS_INT64:
+        retval = malloc(sizeof(long long))
+        if initial_value is not None:
+            (<long long*>retval)[0] = initial_value
     elif cds_type == CDS_FLOAT:
         retval = malloc(sizeof(float))
         if initial_value is not None:
@@ -1309,6 +1317,9 @@ cdef inline object _convert_single(CDSDataType cds_type, void *value):
     elif cds_type == CDS_INT:
         retval = (<int*>value)[0];
         free(<int*>value)
+    elif cds_type == CDS_INT64:
+        retval = (<long long*>value)[0];
+        free(<long long*>value)
     elif cds_type == CDS_FLOAT:
         retval = (<float*>value)[0];
         free(<float*>value)
@@ -1338,6 +1349,8 @@ cdef inline int cds_type_to_dtype(CDSDataType cds_type) except -1:
         return np.NPY_SHORT
     elif cds_type == CDS_INT:
         return np.NPY_INT
+    elif cds_type == CDS_INT64:
+        return np.NPY_INT64
     elif cds_type == CDS_FLOAT:
         return np.NPY_FLOAT
     elif cds_type == CDS_DOUBLE:
@@ -1358,6 +1371,8 @@ cpdef inline np.dtype cds_type_to_dtype_obj(CDSDataType cds_type):
         return np.dtype(np.int16)
     elif cds_type == CDS_INT:
         return np.dtype(np.int32)
+    elif cds_type == CDS_INT64:
+        return np.dtype(np.int64)
     elif cds_type == CDS_FLOAT:
         return np.dtype(np.float32)
     elif cds_type == CDS_DOUBLE:
@@ -1380,10 +1395,14 @@ cpdef inline int dtype_to_cds_type(np.dtype dtype) except -1:
         return CDS_SHORT
     elif dtype == np.dtype(np.int32):
         return CDS_INT
+    elif dtype == np.dtype(np.int64):
+        return CDS_INT64
     elif dtype == np.dtype(np.float32):
         return CDS_FLOAT
     elif dtype == np.dtype(np.float64):
         return CDS_DOUBLE
+    elif np.issubdtype(dtype, np.datetime64):
+        return CDS_DOUBLE
     else:
         raise ValueError("Unknown dtype %s" % dtype)
 
@@ -1695,6 +1714,98 @@ def dataset_name(cds3.core.Group dataset):
         return None
     return _to_python_string( retval )
 
+def getopt(object option):
+    """Get a user defined command line option.
+    
+    Parameters
+    ----------
+    option : const char *
+       The short or long option. 
+    
+    Returns
+    -------
+    - 1 if the option was specified on the command line
+    - value if the option was specified on the command line and one was provided
+    - None if the option was not specified on the command line
+
+    """
+
+    cdef object b_option
+    cdef char *strval=NULL
+    cdef int retval
+    cdef object retstr
+
+    b_option = _to_byte_c_string( option )
+
+    retval = dsproc_getopt(b_option, &strval)
+    if retval == 0:
+        return None
+    if strval != NULL:
+        retstr = strval
+        return _to_python_string(retstr)
+    else:
+        return retval
+
+def setopt(
+    object short_opt=None,
+    object long_opt=None,
+    object arg_name=None,
+    object opt_desc=None):
+    """ Set user defined command line option.
+    This function must be called before calling dsproc_main.
+
+    Parameters
+    ----------
+    short_opt : object
+        Short options are single letters and are prefixed by a single 
+        dash on the command line. Multiple short options can be grouped 
+        behind a single dash. 
+        Specify None for this argument if a short option should not be used.
+    long_opt : object 
+        Long options are prefixed by two consecutive dashes on the command line. 
+        Specify None for this argument if a long option should not be used.
+    arg_name : object 
+        A single word description of the option argument to be used in the help message. 
+        Specify None if this option does not take an argument on the command line. 
+    opt_desc : object 
+        A brief description of the option to be used in the help message.
+    
+    Returns
+    -------
+    - 1 if successful
+    - 0 if the option has already been used or an error occurs.
+
+    """
+    cdef object b_short_opt
+    cdef char c_short_opt
+    cdef object b_long_opt
+    cdef char *c_long_opt
+    cdef object b_arg_name
+    cdef char *c_arg_name
+    cdef object b_opt_desc
+    cdef char *c_opt_desc
+
+    if short_opt is not None:
+       b_short_opt = _to_byte_c_string(short_opt)
+       c_short_opt = b_short_opt[0] #how to specify char of length 1
+    if long_opt is not None:
+       b_long_opt = _to_byte_c_string(long_opt)
+       c_long_opt = b_long_opt
+    else:
+       c_long_opt = NULL
+    if arg_name is not None:
+       b_arg_name = _to_byte_c_string(arg_name)
+       c_arg_name = b_arg_name
+    else:
+       c_arg_name = NULL
+
+    b_opt_desc = _to_byte_c_string(opt_desc)
+
+    if short_opt is not None:
+        return dsproc_setopt(c_short_opt, c_long_opt, c_arg_name, b_opt_desc)
+    else:
+        return dsproc_setopt('\0', c_long_opt, c_arg_name, b_opt_desc)
+
 def use_nc_extension():
     """ Set the default NetCDF file extension to 
     'nc' for output files. The NetCDF file extension used by
@@ -2410,6 +2521,7 @@ def define_var(
     cdef signed char min_byte, max_byte, missing_byte, fill_byte
     cdef short min_short, max_short, missing_short, fill_short
     cdef int min_int, max_int, missing_int, fill_int
+    cdef long long min_long, max_long, missing_long, fill_long
     cdef float min_float, max_float, missing_float, fill_float
     cdef double min_double, max_double, missing_double, fill_double
     cdef const char **c_dim_names = <const_char**>malloc(len(dim_names) * sizeof(char*))
@@ -2510,6 +2622,19 @@ def define_var(
         if fill_value is not None:
             fill_int = fill_value
             fill_ptr = &fill_int
+    elif cds_type == CDS_INT64:
+        if valid_min is not None:
+            min_long = valid_min
+            min_ptr = &min_long
+        if valid_max is not None:
+            max_long = valid_max
+            max_ptr = &max_long
+        if missing_value is not None:
+            missing_long = missing_value
+            missing_ptr = &missing_long
+        if fill_value is not None:
+            fill_long = fill_value
+            fill_ptr = &fill_long
     elif cds_type == CDS_FLOAT:
         if valid_min is not None:
             min_float = valid_min
@@ -3369,6 +3494,7 @@ def init_var_data_index(
         return None
     return var.get_datap(sample_start)
 
+
 def set_var_data(
             cds3.core.Var var,
             size_t sample_start,
@@ -4154,10 +4280,8 @@ def run_dq_inspector(int ds_id, time_t begin_time,
         Beginning of the time range to search
     end_time : time_t
         End of the time range to search
-
     input_args : object
-        List of command line arguments for dq_inspector. 
-        This list must be terminated with a None value.
+        List of command line arguments for dq_inspector.
     flag : int
         Control flag.  Set to 0 to maintain backward 
         compatibility.
@@ -4168,11 +4292,15 @@ def run_dq_inspector(int ds_id, time_t begin_time,
     - -1  if the process could not be executed
 
     """
+    
+    if len(input_args) != 0 and input_args[-1] != None:
+        input_args = input_args + [None]
+
     cdef int return_value
 
     cdef const char **c_input_args = <const_char**>malloc(len(input_args) * sizeof(char*))
 
-    cdef object b_input_args = [None] * len(input_args)
+    cdef object b = None
 
     # String processing varies depending on which Python version is run
     # The PyString_AsString is different with different functionality depending on the Python version. See imports.
@@ -4186,21 +4314,15 @@ def run_dq_inspector(int ds_id, time_t begin_time,
     else:
         # Python Major Version 3
         for i in range(len(input_args)):
-            if i == len(input_args)-1:
-                break
-            else:
-                b_input_args[i] = PyUnicode_AsEncodedString(input_args[i], "UTF-8","strict")
-
-        for i in range(len(input_args)):
-            if i == len(input_args)-1:
+            if i == len(input_args) - 1:
                 c_input_args[i] = NULL
-            else:
-                c_input_args[i] = PyString_AsString(b_input_args[i])
-
-    return_value = dsproc_run_dq_inspector(ds_id, begin_time,
-       end_time, c_input_args, flag)
+                break
+            b = PyUnicode_AsEncodedString(input_args[i], "UTF-8", "strict")
+            c_input_args[i] = PyString_AsString(b)
+    
+    return_value = dsproc_run_dq_inspector(ds_id, begin_time, end_time, c_input_args, flag)
     free(c_input_args)
-    del b_input_args
+    del b
     return return_value
 
 def add_datastream_file_patterns(int ds_id, object patterns, int ignore_case):
@@ -4246,6 +4368,92 @@ def add_datastream_file_patterns(int ds_id, object patterns, int ignore_case):
     del b_patterns
     return return_value
 
+def set_file_name_time_patterns(int ds_id, object patterns):
+    """
+    Set the file name time pattern(s) used to parse the time from a file name.
+
+    The file name time pattern(s) will also be used to sort the list of files in the datastream directory. 
+    Alternatively a file_name_compare function can be specified using dsproc_set_file_name_compare_function() (not implemented in Python), 
+    or a file_name_time function can be specified using dsproc_set_file_name_time_function()  (not implemented in Python). 
+    If more than one are specified the order of precedence is:
+
+    ```
+    file_name_compare function
+    file name time patterns
+    file_name_time function
+    ```
+
+    The file name time pattern(s) contain a mixture of regex (see regex(7)) and time format codes similar to the 
+    strptime function. The time format codes recognized by this function begin with a % and are followed by one 
+    of the following characters:
+
+    * 'C' century number (year/100) as a 2-digit integer
+    * 'd' day number in the month (1-31).
+    * 'e' day number in the month (1-31).
+    * 'h' hour * 100 + minute (0-2359)
+    * 'H' hour (0-23)
+    * 'j' day number in the year (1-366).
+    * 'm' month number (1-12)
+    * 'M' minute (0-59)
+    * 'n' arbitrary whitespace
+    * 'o' time offset in seconds
+    * 'p' AM or PM
+    * 'q' Mac-Time: seconds since 1904-01-01 00:00:00 +0000 (UTC)
+    * 's' seconds since Epoch, 1970-01-01 00:00:00 +0000 (UTC)
+    * 'S' second (0-60; 60 may occur for leap seconds)
+    * 't' arbitrary whitespace
+    * 'y' year within century (0-99)
+    * 'Y' year with century as a 4-digit integer
+    * '' a literal "%" character
+
+    An optional 0 character can be used between the % and format code to specify that the number must 
+    be zero padded. For example, '%0d' specifies that the day range is 01 to 31.
+
+    Multiple patterns can be provided and will be checked in the specified order.
+
+    Examples:
+
+    * "%Y%0m%0d\\.%0H%0M%0S\\.[a-z]$" would match *20150923.072316.csv
+    * "%Y-%0m-%0d_%0H:%0M:%0S\\.dat" would match *2015-09-23_07:23:16.dat
+    
+    Parameters
+    ----------
+    ds_id : int
+        Datastream ID
+    patterns : object
+        List of extened regex time patterns
+
+    Returns
+    -------
+    - 1 if successful
+    - 0 if a regex compile error occurred
+    """
+    cdef int return_value
+
+    cdef const char **c_patterns = <const_char**>malloc(len(patterns) * sizeof(char*))
+
+    cdef object b_patterns = [None] * len(patterns)
+
+    # String processing varies depending on which Python version is run
+    # The PyString_AsString is different with different functionality depending on the Python version. See imports.
+    if sys.version_info[0] < 3:
+        # Python Major Version 2
+        for i in range(len(patterns)):
+            c_patterns[i] = PyString_AsString(patterns[i])
+    else:
+        # Python Major Version 3
+        for i in range(len(patterns)):
+            b_patterns[i] = PyUnicode_AsEncodedString(patterns[i], "UTF-8","strict")
+
+        for i in range(len(patterns)):
+            c_patterns[i] = PyString_AsString(b_patterns[i])
+
+    return_value = dsproc_set_file_name_time_patterns(ds_id, len(patterns),
+            c_patterns)
+    free(c_patterns)
+    del b_patterns
+    return return_value
+
 def set_datastream_file_extension(int ds_id, object extension):
     """
     Set the datastream file extension.
@@ -4574,6 +4782,65 @@ def set_datastream_split_tz_offset(int ds_id, int split_tz_offset):
     """
     dsproc_set_datastream_split_tz_offset(ds_id, split_tz_offset)
 
+def get_missing_value_bit_flag(object bit_descs) -> int:
+    """
+    Get bit flag for the missing_value check.
+
+    This function will search for a bit description that begins with one of the following strings:
+
+     - "Value is equal to missing_value"
+     - "Value is equal to the missing_value"
+     - "value = missing_value"
+     - "value == missing_value"
+
+     - "Value is equal to missing value"
+     - "Value is equal to the missing value"
+     - "value = missing value"
+     - "value == missing value"
+
+     - "Value is equal to _FillValue"
+     - "Value is equal to the _FillValue"
+     - "value = _FillValue"
+     - "value == _FillValue"
+
+    Note: Use dsproc_get_qc_bit_descriptions() to get the list of bit descriptions for a QC variable.
+
+    Parameters
+    ----------
+    bit_descs : List[str]
+        List of bit descriptions to search for missing value.
+
+    Returns:
+        Bit flag starting from 1 or 0 if not found.
+    """
+
+    # We are converting the python List[str] into a char** variable that we can pass to c function
+    ndescs = len(bit_descs)
+    c_descs = <const_char**> malloc(ndescs * sizeof(char *))
+    for idx in range(ndescs):
+        byte_c_str = _to_byte_c_string(bit_descs[idx])
+        c_descs[idx] = byte_c_str
+
+    return dsproc_get_missing_value_bit_flag(ndescs, c_descs)
+
+def get_qc_bit_descriptions(cds3.core.Var var):
+    """
+    Return a List[str] containing the bit descriptions for the given QC variable
+    """
+    # Create an empty char** and pass the address in as a parameter
+    cdef const char** c_bit_descs = NULL
+    ndescs = dsproc_get_qc_bit_descriptions(var.c_ob, &c_bit_descs)
+
+    if ndescs < 0:
+        return []
+
+    else:
+        bit_descs: List[str] = []
+        for idx in range(ndescs):
+            bit_descs.append(_to_python_string(c_bit_descs[idx]))
+        free(c_bit_descs)
+        return bit_descs
+
 def set_retriever_time_offsets(int ds_id, time_t begin_time, time_t end_time):
     """
     Set the time offsets to use when retrieving data.
@@ -5141,6 +5408,7 @@ def bad_file_warning(char *file_name, char *format, *args):
     b_file_name = _to_byte_c_string( file_name )
     b_s  = _to_byte_c_string( s )
     dsproc_bad_file_warning(func, file, line, b_file_name, b_s )
+
 #
 #void dsproc_bad_line_warning(
 #        char *sender,
@@ -5167,6 +5435,75 @@ def bad_file_warning(char *file_name, char *format, *args):
 #        char *name,
 #        char *level)
 
+def set_coordsys_trans_param(
+        object coordsys_name,
+        object field_name,
+        object param_name,
+        CDSDataType cds_type,
+        object value):
+    """Set the value of a coordinate system transformation parameter.
+
+    Parameters
+    ----------
+    coordsys_name :  object
+        The name of the coordinate system
+    field_name : object
+        Name of the field
+    param_name : object
+        Name of the transform parameter
+    value : object
+        The parameter value
+
+    Returns
+    -------
+    - 1 if successful
+    - 0 if the attribute does not exist
+    """
+
+    cdef np.ndarray value_nd
+    cdef object byte_value
+    cdef object b_coordsys_name = _to_byte_c_string( coordsys_name )
+    cdef object b_field_name = _to_byte_c_string( field_name )
+    cdef object b_param_name = _to_byte_c_string( param_name )
+    cdef char c_value
+
+    if cds_type == CDS_CHAR:
+        byte_value = _to_byte_c_string(value)
+        length = len(byte_value)
+        if length == 1:
+            value_nd = np.asarray(byte_value[0]) 
+            return dsproc_set_coordsys_trans_param(b_coordsys_name, b_field_name,
+              b_param_name, cds_type, length, value_nd.data)
+        else:
+            value_nd = np.asarray(byte_value)
+            return dsproc_set_coordsys_trans_param(b_coordsys_name, b_field_name,
+               b_param_name, cds_type, length, value_nd.data)
+
+    value_nd = np.asarray(value, cds_type_to_dtype_obj(cds_type))
+    if value_nd.ndim == 0:
+        value_nd = value_nd[None] # add dummy dimension to a scalar value
+    assert value_nd.ndim == 1
+    length = len(value_nd)
+
+    return dsproc_set_coordsys_trans_param(b_coordsys_name, b_field_name,
+            b_param_name, cds_type, length, value_nd.data)
+
+def delete_group(cds3.core.Group dataset):
+    """Delete a dataset.
+    
+    Parameters
+    ----------
+    dataset : cds3.core.Group
+        Pointer to the dataset
+    
+    Returns
+    -------
+    - 1 if successful
+    - 0 if the group is locked or parent group is locked
+
+    """
+    return cds_delete_group(dataset.c_ob)
+
 def _ingest_main_loop():
     cdef int       ndsid
     cdef int      *dsids
diff --git a/packages/adi_py/setup.py b/packages/adi_py/setup.py
index 67eb09b..44b5e30 100644
--- a/packages/adi_py/setup.py
+++ b/packages/adi_py/setup.py
@@ -37,9 +37,14 @@ def pkgconfig(lib, opt):
 dsproc3_libdirs = pkgconfig("dsproc3", '--libs-only-L')
 dsproc3_libs    = pkgconfig("dsproc3", '--libs-only-l')
 
+trans_incdirs = pkgconfig("trans", '--cflags-only-I')
+trans_libdirs = pkgconfig("trans", '--libs-only-L')
+trans_libs    = pkgconfig("trans", '--libs-only-l')
+
 numpy_incdir = numpy.get_include()
 cds3_incdirs.append(numpy_incdir)
 dsproc3_incdirs.append(numpy_incdir)
+trans_incdirs.append(numpy_incdir)
 
 # Extension Modules
 
@@ -79,12 +84,21 @@ def pkgconfig(lib, opt):
     runtime_library_dirs = dsproc3_libdirs
 )
 
+trans = Extension(
+    name            = 'trans.core',
+    sources         = ['trans/core.pyx'],
+    include_dirs    = trans_incdirs,
+    library_dirs    = trans_libdirs,
+    libraries       = trans_libs,
+    runtime_library_dirs = trans_libdirs
+)
+
 # Setup
 
 setup(
     name        = name,
     version     = version,
-    ext_modules = cythonize([cds3,cds3_enums,dsproc3,dsproc3_enums]),
-    packages    = ['cds3', 'dsproc3', 'adi_py']
+    ext_modules = cythonize([cds3,cds3_enums,dsproc3,dsproc3_enums, trans]),
+    packages    = ['cds3', 'dsproc3', 'trans', 'adi_py']
 )
 
diff --git a/packages/adi_py/trans/__init__.py b/packages/adi_py/trans/__init__.py
new file mode 100644
index 0000000..0ddecb2
--- /dev/null
+++ b/packages/adi_py/trans/__init__.py
@@ -0,0 +1,2 @@
+from trans.core import *
+
diff --git a/packages/adi_py/trans/core.pxd b/packages/adi_py/trans/core.pxd
new file mode 100644
index 0000000..44c6821
--- /dev/null
+++ b/packages/adi_py/trans/core.pxd
@@ -0,0 +1,11 @@
+from cds3.ccds3 cimport *
+from cds3.core cimport *
+
+# Map to corresponding C function so we can call it from our python method
+cdef extern from "trans.h" nogil:
+    int cds_transform_driver(
+            CDSVar *invar,
+            CDSVar *qc_invar,
+            CDSVar *outvar,
+            CDSVar *qc_outvar
+    )
\ No newline at end of file
diff --git a/packages/adi_py/trans/core.pyx b/packages/adi_py/trans/core.pyx
new file mode 100644
index 0000000..48eef70
--- /dev/null
+++ b/packages/adi_py/trans/core.pyx
@@ -0,0 +1,42 @@
+"""---------------------------------------------------------------------------------------------------------------------
+This file is used to add a cython binding for the libtrans cds_tranform_driver method
+---------------------------------------------------------------------------------------------------------------------"""
+
+cimport cds3.core
+
+
+# Implement our binding method in python
+def transform_driver(cds3.core.Var invar, cds3.core.Var qc_invar, cds3.core.Var outvar, cds3.core.Var qc_outvar):
+    """-----------------------------------------------------------------------------------------------------------------
+    Run the transform engine on an input variable, given input QC and an allocated and dimensioned output variable
+     (and QC) structure.
+
+    Upon successful output, outvar and qc_outvar will contain the transformed data and QC.
+
+     ** Note that all transformation parameters must have already been applied to the input datastreams associated with
+      the invar and/or the coordinate system associated with the outvar.
+
+    Parameters
+    ----------
+    invar : cds3.core.Var
+        pointer to input CDSVar
+    qc_invar : cds3.core.Var
+        pointer to input QC CDSVar
+    outvar : cds3.core.Var
+        pointer to output CDSVar; must have dimensions and data spaces allocated, and the dimensions must have coordinate
+        variables already created and attached (we use this information to build the output grid to transform to)
+    qc_outvar : cds3.core.Var
+        pointer to output QC CDSVar; must be dimensioned and allocated as above for outvar
+
+    Returns
+    -------
+     -  1 if successful
+     -  0 if an error occurred - usually deeper in CDS
+
+    """
+    cdef CDSVar *cds_invar = invar.c_ob
+    cdef CDSVar *cds_qc_invar = qc_invar.c_ob
+    cdef CDSVar *cds_outvar = outvar.c_ob
+    cdef CDSVar *cds_qc_outvar = qc_outvar.c_ob
+
+    return cds_transform_driver(cds_invar, cds_qc_invar, cds_outvar, cds_qc_outvar)
\ No newline at end of file