Skip to content

Commit

Permalink
Logging tidy ups (#176)
Browse files Browse the repository at this point in the history
  • Loading branch information
adamreeve authored Apr 1, 2020
1 parent 0f64f1f commit 6ac4a7c
Show file tree
Hide file tree
Showing 13 changed files with 110 additions and 33 deletions.
2 changes: 1 addition & 1 deletion nptdms/base_segment.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ def read_property(f, endianness="<"):
prop_name = types.String.read(f, endianness)
prop_data_type = types.tds_data_types[types.Uint32.read(f, endianness)]
value = prop_data_type.read(f, endianness)
log.debug("Property %s: %r", prop_name, value)
log.debug("Property '%s' = %r", prop_name, value)
return prop_name, value


Expand Down
8 changes: 3 additions & 5 deletions nptdms/channel_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,14 +76,12 @@ def __init__(self, obj, num_values, memmap_dir=None):
obj.data_type.nptype, num_values, memmap_dir)
self.scaler_data = {}
self._data_insert_position = 0
log.debug(
"Allocated %d sample slots for %s", len(self.data), obj.path)
log.debug("Allocated %d sample slots for %s", len(self.data), obj.path)

def append_data(self, new_data):
"""Update the object data with a new array of data"""

log.debug(
"Adding %d data points to data for %s", len(new_data), self.path)
log.debug("Adding %d data points to data for %s", len(new_data), self.path)
start_pos = self._data_insert_position
end_pos = self._data_insert_position + len(new_data)
self.data[start_pos:end_pos] = new_data
Expand Down Expand Up @@ -131,7 +129,7 @@ def _new_numpy_array(dtype, num_values, memmap_dir=None):
:param dtype: Numpy data type for array
:param num_values: Capacity required
:param mmemap_dir: Optional directory to store memory mmap files
:param memmap_dir: Optional directory to store memory mmap files
"""
if memmap_dir:
memmap_file = tempfile.NamedTemporaryFile(
Expand Down
18 changes: 10 additions & 8 deletions nptdms/daqmx.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,6 @@ def _read_data_chunk(self, file, data_objects, chunk_index):
scaler_size = scaler.data_type.size
byte_columns = tuple(
range(offset, offset + scaler_size))
log.debug("Byte columns for channel %d scaler %d: %s",
i, scaler.scale_id, byte_columns)
# Select columns for this scaler, so that number of values
# will be number of bytes per point * number of data
# points. Then use ravel to flatten the results into a
Expand Down Expand Up @@ -115,8 +113,6 @@ def read_raw_data_index(self, f, raw_data_index_header):
except KeyError:
raise KeyError("Unrecognised data type: %s" % data_type_val)

log.debug("DAQmx object data type: %r", self.data_type)

daqmx_metadata = DaqMxMetadata(f, self.endianness, raw_data_index_header)
log.debug("DAQmx metadata: %r", daqmx_metadata)

Expand Down Expand Up @@ -162,12 +158,11 @@ def __init__(self, f, endianness, scaler_type):
self.dimension = types.Uint32.read(f, endianness)
# In TDMS format version 2.0, 1 is the only valid value for dimension
if self.dimension != 1:
log.warning("Data dimension is not 1")
raise ValueError("Data dimension is not 1")
self.chunk_size = types.Uint64.read(f, endianness)

# size of vector of format changing scalers
scaler_vector_length = types.Uint32.read(f, endianness)
log.debug("mxDAQ format scaler vector size '%d'", scaler_vector_length)
self.scalers = [
DaqMxScaler(f, endianness, scaler_type)
for _ in range(scaler_vector_length)]
Expand All @@ -186,7 +181,7 @@ def __repr__(self):
""" Return string representation of DAQmx metadata
"""
properties = (
"%s=%s" % (name, getattr(self, name))
"%s=%s" % (name, _get_attr_repr(self, name))
for name in self.__slots__)

properties_list = ", ".join(properties)
Expand Down Expand Up @@ -220,13 +215,20 @@ def __init__(self, open_file, endianness, scaler_type):

def __repr__(self):
properties = (
"%s=%s" % (name, getattr(self, name))
"%s=%s" % (name, _get_attr_repr(self, name))
for name in self.__slots__)

properties_list = ", ".join(properties)
return "%s(%s)" % (self.__class__.__name__, properties_list)


def _get_attr_repr(obj, attr_name):
val = getattr(obj, attr_name)
if isinstance(val, type):
return val.__name__
return repr(val)


# Type codes for DAQmx scalers don't match the normal TDMS type codes:
DAQMX_TYPES = {
0: types.Uint8,
Expand Down
2 changes: 1 addition & 1 deletion nptdms/scaling.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def __init__(self, coefficients, input_source):
def from_properties(properties, scale_index):
try:
number_of_coefficients = properties[
'NI_Scale[%d]_Polynomial_Coefficients_Size' % (scale_index)]
'NI_Scale[%d]_Polynomial_Coefficients_Size' % scale_index]
except KeyError:
number_of_coefficients = 4
try:
Expand Down
4 changes: 2 additions & 2 deletions nptdms/tdms.py
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ def _read_channel_data(self, channel, offset=0, length=None):
raise RuntimeError(
"Cannot read channel data after the underlying TDMS reader is closed")

with Timer(log, "Allocate space"):
with Timer(log, "Allocate space for channel"):
# Allocate space for data
if length is None:
num_values = len(channel) - offset
Expand All @@ -311,7 +311,7 @@ def _read_channel_data(self, channel, offset=0, length=None):
num_values = max(0, num_values)
channel_data = get_data_receiver(channel, num_values, self._memmap_dir)

with Timer(log, "Read data"):
with Timer(log, "Read data for channel"):
# Now actually read all the data
for chunk in self._reader.read_raw_data_for_channel(channel.path, offset, length):
if chunk.data is not None:
Expand Down
6 changes: 3 additions & 3 deletions nptdms/tdms_segment.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def _new_segment_object(self, object_path):
return TdmsSegmentObject(object_path, self.endianness)

def _read_data_chunk(self, file, data_objects, chunk_index):
log.debug("Data is contiguous")
log.debug("Reading contiguous data chunk")
object_data = {}
for obj in data_objects:
number_values = self._get_channel_number_values(obj, chunk_index)
Expand Down Expand Up @@ -235,7 +235,7 @@ def read_raw_data_index(self, f, raw_data_index_header):
types.Uint32.read(f, self.endianness)]
except KeyError:
raise KeyError("Unrecognised data type")
log.debug("Object data type: %r", self.data_type)
log.debug("Object data type: %s", self.data_type.__name__)

if (self.data_type.size is None and
self.data_type != types.String):
Expand All @@ -246,7 +246,7 @@ def read_raw_data_index(self, f, raw_data_index_header):
dimension = types.Uint32.read(f, self.endianness)
# In TDMS version 2.0, 1 is the only valid value for dimension
if dimension != 1:
log.warning("Data dimension is not 1")
raise ValueError("Data dimension is not 1")

# Read number of values
self.number_values = types.Uint64.read(f, self.endianness)
Expand Down
6 changes: 3 additions & 3 deletions nptdms/tdmsinfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,13 @@ def main():


def tdmsinfo(file, show_properties=False):
tdmsfile = TdmsFile.read_metadata(file)
tdms_file = TdmsFile.read_metadata(file)

level = 0
display('/', level)
if show_properties:
display_properties(tdmsfile, level + 1)
for group in tdmsfile.groups():
display_properties(tdms_file, level + 1)
for group in tdms_file.groups():
level = 1
display("%s" % group.path, level)
if show_properties:
Expand Down
29 changes: 29 additions & 0 deletions nptdms/test/test_daqmx.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,11 @@
"""

from collections import defaultdict
import logging
import numpy as np

from nptdms import TdmsFile
from nptdms.log import log_manager
from nptdms.test.util import (
GeneratedFile, hexlify_value, string_hexlify, segment_objects_metadata, hex_properties)

Expand Down Expand Up @@ -624,6 +626,33 @@ def test_stream_data_chunks():
np.testing.assert_equal(actual_data, expected_data)


def test_daqmx_debug_logging(caplog):
""" Test loading a DAQmx file with debug logging enabled
"""
scaler_metadata = daqmx_scaler_metadata(0, 3, 0)
metadata = segment_objects_metadata(
root_metadata(),
group_metadata(),
daqmx_channel_metadata("Channel1", 4, [2], [scaler_metadata]))
data = (
"01 00"
"02 00"
"FF FF"
"FE FF"
)

test_file = GeneratedFile()
test_file.add_segment(segment_toc(), metadata, data)

log_manager.set_level(logging.DEBUG)
_ = test_file.load()

assert "Reading metadata for object /'Group'/'Channel1' with index header 0x00001269" in caplog.text
assert "scaler_type=4713" in caplog.text
assert "scale_id=0" in caplog.text
assert "data_type=Int16" in caplog.text


def segment_toc():
return (
"kTocMetaData", "kTocRawData", "kTocNewObjList", "kTocDAQmxRawData")
Expand Down
14 changes: 14 additions & 0 deletions nptdms/test/test_tdms_file.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
"""Test reading of example TDMS files"""

from collections import defaultdict
import logging
import os
import tempfile
from hypothesis import (assume, given, example, settings, strategies)
import numpy as np
import pytest
from nptdms import TdmsFile
from nptdms.log import log_manager
from nptdms.test.util import (
BytesIoTestFile,
GeneratedFile,
Expand Down Expand Up @@ -652,3 +654,15 @@ def test_file_properties():

file_props = tdms_file.properties
assert file_props['num'] == 15


def test_debug_logging(caplog):
""" Test loading a file with debug logging enabled
"""
test_file, expected_data = scenarios.single_segment_with_one_channel().values

log_manager.set_level(logging.DEBUG)
_ = test_file.load()

assert "Reading metadata for object /'group'/'channel1' with index header 0x00000014" in caplog.text
assert "Object data type: Int32" in caplog.text
2 changes: 2 additions & 0 deletions nptdms/test/writer/test_tdms_segment.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
"""Test TdmsSegment"""

from datetime import datetime
import pytest
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
import numpy as np

from nptdms.writer import TdmsSegment, read_properties_dict
from nptdms.types import *
Expand Down
36 changes: 33 additions & 3 deletions nptdms/types.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,38 @@
"""Conversions to and from bytes representation of values in TDMS files"""

from datetime import datetime, timedelta
import numpy as np
import struct


__all__ = [
'numpy_data_types',
'tds_data_types',
'TdmsType',
'Bytes',
'Void',
'Int8',
'Int16',
'Int32',
'Int64',
'Uint8',
'Uint16',
'Uint32',
'Uint64',
'SingleFloat',
'DoubleFloat',
'ExtendedFloat',
'SingleFloatWithUnit',
'DoubleFloatWithUnit',
'ExtendedFloatWithUnit',
'String',
'Boolean',
'TimeStamp',
'ComplexSingleFloat',
'ComplexDoubleFloat',
'DaqMxRawData',
]


_struct_pack = struct.pack
_struct_unpack = struct.unpack

Expand Down Expand Up @@ -52,14 +80,16 @@ def __init__(self, value):


class StructType(TdmsType):
struct_declaration = None

def __init__(self, value):
self.value = value
self.bytes = _struct_pack('<' + self.struct_declaration, value)

@classmethod
def read(cls, file, endianness="<"):
bytes = file.read(cls.size)
return _struct_unpack(endianness + cls.struct_declaration, bytes)[0]
read_bytes = file.read(cls.size)
return _struct_unpack(endianness + cls.struct_declaration, read_bytes)[0]


@tds_data_type(0, None)
Expand Down
10 changes: 8 additions & 2 deletions nptdms/utils.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import logging
import time

try:
Expand All @@ -16,24 +17,29 @@ class Timer(object):
"""

def __init__(self, log, description):
self._enabled = log.isEnabledFor(logging.INFO)
self._log = log
self._description = description
self._start_time = None

def __enter__(self):
if not self._enabled:
return self
try:
self._start_time = time.perf_counter()
except AttributeError:
# Python < 3.3
self._start_time = time.clock()
return self

def __exit__(self, exc_type, exc_val, exc_tb):
if not self._enabled:
return
try:
end_time = time.perf_counter()
except AttributeError:
# Python < 3.3
end_time = time.clock()

elapsed_time = (end_time - self._start_time) * 1.0e3
self._log.info("{0}: Took {1} ms".format(
self._description, elapsed_time))
self._log.info("{0}: Took {1} ms".format(self._description, elapsed_time))
6 changes: 1 addition & 5 deletions nptdms/writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,10 @@
OrderedDict = dict
from datetime import datetime
from io import UnsupportedOperation
import logging
import numpy as np
from nptdms.common import toc_properties, ObjectPath
from nptdms.types import *

log = logging.getLogger(__name__)
log.setLevel(logging.WARNING)


try:
long
Expand Down Expand Up @@ -71,7 +67,7 @@ def __enter__(self):
self.open()
return self

def __exit__(self, type, value, traceback):
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()


Expand Down

0 comments on commit 6ac4a7c

Please sign in to comment.