Skip to content

Commit

Permalink
Start splitting some functionality out into different files
Browse files Browse the repository at this point in the history
  • Loading branch information
adamreeve committed Oct 13, 2019
1 parent 278699c commit e8ef6a1
Show file tree
Hide file tree
Showing 10 changed files with 973 additions and 937 deletions.
3 changes: 2 additions & 1 deletion nptdms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,6 @@
from .version import __version_info__, __version__

# Export public objects
from .tdms import TdmsFile, TdmsObject
from .tdms import TdmsFile
from .tdms_segment import TdmsObject
from .writer import TdmsWriter, RootObject, GroupObject, ChannelObject
46 changes: 42 additions & 4 deletions nptdms/common.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
from collections import namedtuple
import numpy as np


import itertools
try:
long
except NameError:
# Python 3
long = int
try:
zip_longest = itertools.izip_longest
except AttributeError:
# Python 3
zip_longest = itertools.zip_longest


toc_properties = {
Expand All @@ -17,3 +19,39 @@
'kTocBigEndian': (long(1) << 6),
'kTocNewObjList': (long(1) << 2)
}


def path_components(path):
"""Convert a path into group and channel name components"""

def yield_components(path):
# Iterate over each character and the next character
chars = zip_longest(path, path[1:])
try:
# Iterate over components
while True:
char, next_char = next(chars)
if char != '/':
raise ValueError("Invalid path, expected \"/\"")
elif (next_char is not None and next_char != "'"):
raise ValueError("Invalid path, expected \"'\"")
else:
# Consume "'" or raise StopIteration if at the end
next(chars)
component = []
# Iterate over characters in component name
while True:
char, next_char = next(chars)
if char == "'" and next_char == "'":
component += "'"
# Consume second "'"
next(chars)
elif char == "'":
yield "".join(component)
break
else:
component += char
except StopIteration:
return

return list(yield_components(path))
76 changes: 76 additions & 0 deletions nptdms/daqmx.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import numpy as np

from nptdms import types
from nptdms.log import log_manager


log = log_manager.get_logger(__name__)


class DaqMxMetadata(object):
""" Describes DAQmx data
"""

__slots__ = [
'chunk_size',
'data_type',
'dimension',
'raw_data_widths',
'scale_id',
'scaler_data_type',
'scaler_data_type_code',
'scaler_raw_buffer_index',
'scaler_raw_buffer_index',
'scaler_raw_byte_offset',
'scaler_sample_format_bitmap',
'scaler_vector_length',
]

def __init__(self, f, endianness):
"""
Read the metadata for a DAQmx raw segment. This is the raw
DAQmx-specific portion of the raw data index.
"""
self.data_type = types.tds_data_types[0xFFFFFFFF]
self.dimension = types.Uint32.read(f, endianness)
# In TDMS format version 2.0, 1 is the only valid value for dimension
if self.dimension != 1:
log.warning("Data dimension is not 1")
self.chunk_size = types.Uint64.read(f, endianness)
# size of vector of format changing scalers
self.scaler_vector_length = types.Uint32.read(f, endianness)
# Size of the vector
log.debug("mxDAQ format scaler vector size '%d'",
self.scaler_vector_length)
if self.scaler_vector_length > 1:
log.error("mxDAQ multiple format changing scalers not implemented")

for _ in range(self.scaler_vector_length):
# WARNING: This code overwrites previous values with new
# values. At this time NI provides no documentation on
# how to use these scalers and sample TDMS files do not
# include more than one of these scalers.
self.scaler_data_type_code = types.Uint32.read(f, endianness)
self.scaler_data_type = (
types.tds_data_types[self.scaler_data_type_code])

# more info for format changing scaler
self.scaler_raw_buffer_index = types.Uint32.read(f, endianness)
self.scaler_raw_byte_offset = types.Uint32.read(f, endianness)
self.scaler_sample_format_bitmap = types.Uint32.read(f, endianness)
self.scale_id = types.Uint32.read(f, endianness)

raw_data_widths_length = types.Uint32.read(f, endianness)
self.raw_data_widths = np.zeros(raw_data_widths_length, dtype=np.int32)
for cnt in range(raw_data_widths_length):
self.raw_data_widths[cnt] = types.Uint32.read(f, endianness)

def __str__(self):
""" Return string representation of DAQmx metadata
"""
properties = (
"%s: %s" % (name, getattr(self, name))
for name in self.__slots__)

properties_list = ", ".join(properties)
return "%s: ('%s')" % (self.__class__.__name__, properties_list)
38 changes: 38 additions & 0 deletions nptdms/log.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import logging


class LogManager(object):
""" Manages multiple loggers from different modules
"""

def __init__(self):
self.log_level = logging.WARNING

self.console_handler = logging.StreamHandler()
self.console_handler.setLevel(self.log_level)

self.formatter = logging.Formatter(
'[%(name)s %(levelname)s] %(message)s')
self.console_handler.setFormatter(self.formatter)

self.loggers = {}

def get_logger(self, module_name):
""" Return a logger for a module
"""
log = logging.getLogger(module_name)
log.setLevel(self.log_level)
log.addHandler(self.console_handler)
self.loggers[module_name] = log
return log

def set_level(self, level):
""" Set the log level for all loggers that have been created
"""
self.log_level = level
self.console_handler.setLevel(level)
for log in self.loggers.values():
log.setLevel(level)


log_manager = LogManager()
7 changes: 4 additions & 3 deletions nptdms/scaling.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import logging
import numpy as np
import re

from nptdms.utils import OrderedDict
from nptdms.log import log_manager

log = logging.getLogger(__name__)
log.setLevel(logging.WARNING)

log = log_manager.get_logger(__name__)


class LinearScaling(object):
Expand Down
Loading

0 comments on commit e8ef6a1

Please sign in to comment.