From 1fe6cb6e7404d946dfb39c36c7708a21083bf918 Mon Sep 17 00:00:00 2001 From: Max Grover Date: Fri, 27 Oct 2023 07:46:52 -0700 Subject: [PATCH 1/4] FIX: Use the new syntax for setup micromamba (#745) --- .github/workflows/python-package-conda.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml index da39f1a29e..6479a5c5f7 100644 --- a/.github/workflows/python-package-conda.yml +++ b/.github/workflows/python-package-conda.yml @@ -43,7 +43,7 @@ jobs: - name: Setup Conda Environment uses: mamba-org/setup-micromamba@v1 with: - python-version: ${{ matrix.python-version }} + create-args: python=${{ matrix.python-version }} environment-file: ./continuous_integration/environment_actions.yml environment-name: act_env From cbfe7fd2a7ede27cbfc0ed9f69801ca9ce836939 Mon Sep 17 00:00:00 2001 From: Ken Kehoe Date: Fri, 27 Oct 2023 11:27:34 -0600 Subject: [PATCH 2/4] Updated DQR tool to use new DQR web-service. (#743) --- act/qc/arm.py | 155 +++++++++++++++++++++++-------------------- act/tests/test_qc.py | 85 ++++++++++++++---------- 2 files changed, 134 insertions(+), 106 deletions(-) diff --git a/act/qc/arm.py b/act/qc/arm.py index 91e760b1d8..0fb84597d4 100644 --- a/act/qc/arm.py +++ b/act/qc/arm.py @@ -7,6 +7,7 @@ import datetime as dt import numpy as np import requests +import json from act.config import DEFAULT_DATASTREAM_NAME @@ -71,7 +72,7 @@ def add_dqr_to_qc( Returns ------- ds : xarray.Dataset - Xarray dataset containing new quality control variables + Xarray dataset containing new or updated quality control variables Examples -------- @@ -99,93 +100,103 @@ def add_dqr_to_qc( if cleanup_qc: ds.clean.cleanup() - # In order to properly flag data, get all variables if None. Exclude QC variables. - if variable is None: - variable = list(set(ds.data_vars) - set(ds.clean.matched_qc_variables)) + start_date = ds['time'].values[0].astype('datetime64[s]').astype(dt.datetime).strftime('%Y%m%d') + end_date = ds['time'].values[-1].astype('datetime64[s]').astype(dt.datetime).strftime('%Y%m%d') + + # Clean up assessment to ensure it is a string with no spaces. + if isinstance(assessment, (list, tuple)): + assessment = ','.join(assessment) + + # Not strictly needed but should make things more better. + assessment = assessment.replace(' ', '') + assessment = assessment.lower() + + # Create URL + url = 'https://dqr-web-service.svcs.arm.gov/dqr_full' + url += f"/{datastream}" + url += f"/{start_date}/{end_date}" + url += f"/{assessment}" + + # Call web service + req = requests.get(url) + + # Check status values and raise error if not successful + status = req.status_code + if status == 400: + raise ValueError('Check parameters') + if status == 500: + raise ValueError('DQR Webservice Temporarily Down') + + # Convert from string to dictionary + docs = json.loads(req.text) + + # If no DQRs found will not have a key with datastream. + # The status will also be 404. + try: + docs = docs[datastream] + except KeyError: + return ds + + dqr_results = {} + for quality_category in docs: + for dqr_number in docs[quality_category]: + if exclude is not None and dqr_number in exclude: + continue + + if include is not None and dqr_number not in include: + continue + + index = np.array([], dtype=np.int32) + for time_range in docs[quality_category][dqr_number]['dates']: + starttime = np.datetime64(time_range['start_date']) + endtime = np.datetime64(time_range['end_date']) + ind = np.where((ds['time'].values >= starttime) & (ds['time'].values <= endtime)) + if ind[0].size > 0: + index = np.append(index, ind[0]) + + if index.size > 0: + dqr_results[dqr_number] = { + 'index': index, + 'test_assessment': quality_category.lower().capitalize(), + 'test_meaning': f"{dqr_number} : {docs[quality_category][dqr_number]['description']}", + 'variables': docs[quality_category][dqr_number]['variables'], + } + + if dqr_link: + print(f"{dqr_number} - {quality_category.lower().capitalize()}: " + f"https://adc.arm.gov/ArchiveServices/DQRService?dqrid={dqr_number}") # Check to ensure variable is list - if not isinstance(variable, (list, tuple)): + if variable and not isinstance(variable, (list, tuple)): variable = [variable] - # Loop through each variable and call web service for that variable loc_vars = ['lat', 'lon', 'alt', 'latitude', 'longitude', 'altitude'] - for var_name in variable: - if skip_location_vars: - if var_name in loc_vars: - continue - # Create URL - url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream=' - url += datastream - url += '&varname=' + var_name - url += ''.join( - [ - '&searchmetric=', - assessment, - '&dqrfields=dqrid,starttime,endtime,metric,subject', - ] - ) - - # Call web service - req = requests.get(url) - - # Check status values and raise error if not successful - status = req.status_code - if status == 400: - raise ValueError('Check parameters') - if status == 500: - raise ValueError('DQR Webservice Temporarily Down') - - # Get data and run through each dqr - dqrs = req.text.splitlines() - time = ds['time'].values - dqr_results = {} - for line in dqrs: - line = line.split('|') - dqr_no = line[0] - - # Exclude DQRs if in list - if exclude is not None and dqr_no in exclude: - continue + for key, value in dqr_results.items(): + for var_name in value['variables']: - # Only include if in include list - if include is not None and dqr_no not in include: + # Do not process on location variables + if skip_location_vars and var_name in loc_vars: continue - starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1]))) - endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2]))) - ind = np.where((time >= starttime) & (time <= endtime)) - - if ind[0].size == 0: + # Only process provided variable names + if variable is not None and var_name not in variable: continue - if 'time' not in ds[var_name].dims: - ind = np.where((ds[var_name].values == ds[var_name].values) | (np.isnan(ds[var_name].values))) - if np.size(ind) == 1: - ind = ind[0] - - if dqr_no in dqr_results.keys(): - dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind) - else: - dqr_results[dqr_no] = { - 'index': ind, - 'test_assessment': line[3], - 'test_meaning': ': '.join([dqr_no, line[-1]]), - } - if dqr_link: - print_url = 'https://adc.arm.gov/ArchiveServices/DQRService?dqrid=' + str(dqr_no) - print(dqr_no, '-', line[3], ':', print_url) - for key, value in dqr_results.items(): try: ds.qcfilter.add_test( var_name, - index=value['index'], + index=np.unique(value['index']), test_meaning=value['test_meaning'], - test_assessment=value['test_assessment'], - ) + test_assessment=value['test_assessment']) + + except KeyError: # Variable name not in Dataset + continue + except IndexError: print(f"Skipping '{var_name}' DQR application because of IndexError") + continue - if normalize_assessment: - ds.clean.normalize_assessment(variables=var_name) + if normalize_assessment: + ds.clean.normalize_assessment(variables=var_name) return ds diff --git a/act/tests/test_qc.py b/act/tests/test_qc.py index ea6d6ffa60..bfeaaab667 100644 --- a/act/tests/test_qc.py +++ b/act/tests/test_qc.py @@ -90,45 +90,61 @@ def test_qc_test_errors(): def test_arm_qc(): # Test DQR Webservice using known DQR variable = 'wspd_vec_mean' - qc_variable = 'qc_' + variable ds = read_netcdf(EXAMPLE_METE40) + ds_org = copy.deepcopy(ds) + qc_variable = ds.qcfilter.check_for_ancillary_qc(variable) - # DQR webservice does go down, so ensure it - # properly runs first before testing + # DQR webservice does go down, so ensure it properly runs first before testing try: - ds = add_dqr_to_qc(ds, variable=variable) - ran = True - ds.attrs['_datastream'] = ds.attrs['datastream'] - del ds.attrs['datastream'] - ds2 = add_dqr_to_qc(ds, variable=variable) - ds3 = add_dqr_to_qc(ds) - add_dqr_to_qc(ds, variable=variable, exclude=['D190529.4']) - add_dqr_to_qc(ds, variable=variable, include=['D400101.1']) - with np.testing.assert_raises(ValueError): - del ds.attrs['_datastream'] - add_dqr_to_qc(ds, variable=variable) + ds = add_dqr_to_qc(ds) except ValueError: - ran = False - - if ran: - assert qc_variable in ds - dqr = [True for d in ds[qc_variable].attrs['flag_meanings'] if 'D190529.4' in d] - assert dqr[0] is True - assert 'Suspect' not in ds[qc_variable].attrs['flag_assessments'] - assert 'Incorrect' not in ds[qc_variable].attrs['flag_assessments'] - - assert qc_variable in ds2 - dqr = [True for d in ds2[qc_variable].attrs['flag_meanings'] if 'D190529.4' in d] - assert dqr[0] is True - assert 'Suspect' not in ds2[qc_variable].attrs['flag_assessments'] - assert 'Incorrect' not in ds2[qc_variable].attrs['flag_assessments'] - - assert qc_variable in ds3 - dqr = [True for d in ds3[qc_variable].attrs['flag_meanings'] if 'D190529.4' in d] - assert dqr[0] is True - assert 'Suspect' not in ds3[qc_variable].attrs['flag_assessments'] - assert 'Incorrect' not in ds3[qc_variable].attrs['flag_assessments'] + return + + assert 'Suspect' not in ds[qc_variable].attrs['flag_assessments'] + assert 'Incorrect' not in ds[qc_variable].attrs['flag_assessments'] + assert 'Bad' in ds[qc_variable].attrs['flag_assessments'] + assert 'Indeterminate' in ds[qc_variable].attrs['flag_assessments'] + + # Check that defualt will update all variables in DQR + for var_name in ['wdir_vec_mean', 'wdir_vec_std', 'wspd_arith_mean', 'wspd_vec_mean']: + qc_var = ds.qcfilter.check_for_ancillary_qc(var_name) + assert ds[qc_var].attrs['flag_meanings'][-1].startswith('D190529.4') + + # Check that variable keyword works as expected. + ds = copy.deepcopy(ds_org) + add_dqr_to_qc(ds, variable=variable) + qc_var = ds.qcfilter.check_for_ancillary_qc(variable) + assert ds[qc_var].attrs['flag_meanings'][-1].startswith('D190529.4') + qc_var = ds.qcfilter.check_for_ancillary_qc('wdir_vec_std') + assert len(ds[qc_var].attrs['flag_masks']) == 0 + + # Check that include and exclude keywords work as expected + ds = copy.deepcopy(ds_org) + add_dqr_to_qc(ds, variable=variable, exclude=['D190529.4']) + assert len(ds[qc_variable].attrs['flag_meanings']) == 4 + add_dqr_to_qc(ds, variable=variable, include=['D400101.1']) + assert len(ds[qc_variable].attrs['flag_meanings']) == 4 + add_dqr_to_qc(ds, variable=variable, include=['D190529.4']) + assert len(ds[qc_variable].attrs['flag_meanings']) == 5 + add_dqr_to_qc(ds, variable=variable, assessment='Incorrect') + assert len(ds[qc_variable].attrs['flag_meanings']) == 5 + + # Test additional keywords + add_dqr_to_qc(ds, variable=variable, assessment='Suspect', cleanup_qc=False, + dqr_link=True, skip_location_vars=True) + assert len(ds[qc_variable].attrs['flag_meanings']) == 6 + + # Default is to normalize assessment terms. Check that we can turn off. + add_dqr_to_qc(ds, variable=variable, normalize_assessment=False) + assert 'Suspect' in ds[qc_variable].attrs['flag_assessments'] + + # Test that an error is raised when no datastream global attributes + with np.testing.assert_raises(ValueError): + ds4 = copy.deepcopy(ds) + del ds4.attrs['datastream'] + del ds4.attrs['_datastream'] + add_dqr_to_qc(ds4, variable=variable) def test_qcfilter(): @@ -1454,6 +1470,7 @@ def test_scalar_dqr(): if ran: assert 'qc_lat' in ds + assert np.size(ds['qc_lon'].values) == 1 assert np.size(ds['qc_lat'].values) == 1 assert np.size(ds['qc_alt'].values) == 1 assert np.size(ds['base_time'].values) == 1 From bcb8f62e927b03513b71d3ffa7e1e4e1e9128c7d Mon Sep 17 00:00:00 2001 From: Ken Kehoe Date: Tue, 7 Nov 2023 11:28:47 -0700 Subject: [PATCH 3/4] Pint update to stop warning (#747) * Fixing declaring new units with Pint. * Commenting out return to stop warning * PEP8 * Rolling back as this causes errors? --- act/qc/bsrn_tests.py | 24 ++++++++++++------------ act/qc/clean.py | 2 +- act/utils/data_utils.py | 10 +++++----- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/act/qc/bsrn_tests.py b/act/qc/bsrn_tests.py index a135a8d31d..431fefdcf3 100644 --- a/act/qc/bsrn_tests.py +++ b/act/qc/bsrn_tests.py @@ -433,8 +433,8 @@ def bsrn_comparison_tests( with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) if use_dask and isinstance(self._ds[glb_diffuse_SW_dn_name].data, da.Array): - sum_sw_down = (self._ds[glb_diffuse_SW_dn_name].data + - self._ds[direct_normal_SW_dn_name].data * np.cos(np.radians(sza))) + sum_sw_down = (self._ds[glb_diffuse_SW_dn_name].data + + self._ds[direct_normal_SW_dn_name].data * np.cos(np.radians(sza))) sum_sw_down[sum_sw_down < 50] = np.nan ratio = self._ds[gbl_SW_dn_name].data / sum_sw_down index_a = sza < 75 @@ -445,8 +445,8 @@ def bsrn_comparison_tests( index_4 = da.where((ratio < 0.85) & index_b, True, False) index = (index_1 | index_2 | index_3 | index_4).compute() else: - sum_sw_down = (self._ds[glb_diffuse_SW_dn_name].values + - self._ds[direct_normal_SW_dn_name].values * np.cos(np.radians(sza))) + sum_sw_down = (self._ds[glb_diffuse_SW_dn_name].values + + self._ds[direct_normal_SW_dn_name].values * np.cos(np.radians(sza))) sum_sw_down[sum_sw_down < 50] = np.nan ratio = self._ds[gbl_SW_dn_name].values / sum_sw_down index_a = sza < 75 @@ -505,14 +505,14 @@ def bsrn_comparison_tests( with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) if use_dask and isinstance(self._ds[glb_diffuse_SW_dn_name].data, da.Array): - sum_sw_down = (self._ds[glb_diffuse_SW_dn_name].data + - self._ds[direct_normal_SW_dn_name].data * np.cos(np.radians(sza))) + sum_sw_down = (self._ds[glb_diffuse_SW_dn_name].data + + self._ds[direct_normal_SW_dn_name].data * np.cos(np.radians(sza))) sum_sw_down[sum_sw_down < 50] = np.nan index = da.where(self._ds[glb_SW_up_name].data > sum_sw_down, True, False).compute() else: - sum_sw_down = (self._ds[glb_diffuse_SW_dn_name].values + - self._ds[direct_normal_SW_dn_name].values * np.cos(np.radians(sza))) + sum_sw_down = (self._ds[glb_diffuse_SW_dn_name].values + + self._ds[direct_normal_SW_dn_name].values * np.cos(np.radians(sza))) sum_sw_down[sum_sw_down < 50] = np.nan index = self._ds[glb_SW_up_name].values > sum_sw_down @@ -577,10 +577,10 @@ def bsrn_comparison_tests( f'for {test_options[3]} test.') if use_dask and isinstance(self._ds[glb_LW_dn_name].data, da.Array): - index_1 = da.where(self._ds[glb_LW_dn_name].data > - (self._ds[glb_LW_up_name].data + LWdn_lt_LWup_component), True, False) - index_2 = da.where(self._ds[glb_LW_dn_name].data < - (self._ds[glb_LW_up_name].data - LWdn_gt_LWup_component), True, False) + index_1 = da.where(self._ds[glb_LW_dn_name].data + > (self._ds[glb_LW_up_name].data + LWdn_lt_LWup_component), True, False) + index_2 = da.where(self._ds[glb_LW_dn_name].data + < (self._ds[glb_LW_up_name].data - LWdn_gt_LWup_component), True, False) index = (index_1 | index_2).compute() else: index_1 = self._ds[glb_LW_dn_name].values > (self._ds[glb_LW_up_name].values + LWdn_lt_LWup_component) diff --git a/act/qc/clean.py b/act/qc/clean.py index 224198094a..e1a7813e58 100644 --- a/act/qc/clean.py +++ b/act/qc/clean.py @@ -703,7 +703,7 @@ def clean_arm_qc( continue remove_test = True - test_number = int(parse_bit(flag_masks[ii])) + test_number = parse_bit(flag_masks[ii])[0] for attr_name in self._ds[qc_var_name].attrs: if test_attribute_limit_name == attr_name: remove_test = False diff --git a/act/utils/data_utils.py b/act/utils/data_utils.py index 29005f25fa..66242897f7 100644 --- a/act/utils/data_utils.py +++ b/act/utils/data_utils.py @@ -581,8 +581,8 @@ def convert_units(data, in_units, out_units): convert_dict = { 'C': 'degC', 'F': 'degF', - '%': 'percent', # seems like pint does not like this symbol? - '1': 'unitless', # seems like pint does not like this number? + '%': 'percent', # Pint does not like this symbol with .to('%') + '1': 'unitless', # Pint does not like a number } if in_units in convert_dict: @@ -597,9 +597,9 @@ def convert_units(data, in_units, out_units): # Instantiate the registry ureg = pint.UnitRegistry(autoconvert_offset_to_baseunit=True) - # Add missing units - ureg.define('percent = 0.01*count = %') - ureg.define('unitless = count = 1') + # Add missing units and conversions + ureg.define('fraction = []') + ureg.define('unitless = []') if not isinstance(data, np.ndarray): data = np.array(data) From 756acb3acdcacdcd11b9503afa1387e4e7132588 Mon Sep 17 00:00:00 2001 From: Zach Sherman Date: Tue, 7 Nov 2023 15:04:31 -0600 Subject: [PATCH 4/4] MNT: Setup.cfg update (#749) * MNT: Remove armfiles and add in data directory. * MNT: Removed unused variable. --- act/io/armfiles.py | 54 ++++++++++++++++++++++++++-------------------- setup.cfg | 5 ++--- 2 files changed, 33 insertions(+), 26 deletions(-) diff --git a/act/io/armfiles.py b/act/io/armfiles.py index 9e591eae09..7f69ff87bc 100644 --- a/act/io/armfiles.py +++ b/act/io/armfiles.py @@ -5,27 +5,26 @@ """ import copy +import datetime as dt import glob import json import re -import urllib -import warnings -from pathlib import Path, PosixPath -from netCDF4 import Dataset -from os import PathLike import tarfile import tempfile +import urllib import warnings +from os import PathLike +from pathlib import Path, PosixPath -from cftime import num2date import numpy as np import xarray as xr -import datetime as dt +from cftime import num2date +from netCDF4 import Dataset import act import act.utils as utils from act.config import DEFAULT_DATASTREAM_NAME -from act.utils.io_utils import unpack_tar, unpack_gzip, cleanup_files, is_gunzip_file +from act.utils.io_utils import cleanup_files, is_gunzip_file, unpack_gzip, unpack_tar def read_netcdf( @@ -108,7 +107,6 @@ def read_netcdf( message = 'act.io.armfiles.read_netcdf will be replaced in version 2.0.0 by act.io.arm.read_arm_netcdf()' warnings.warn(message, DeprecationWarning, 2) - ds = None filenames, cleanup_temp_directory = check_if_tar_gz_file(filenames) @@ -137,7 +135,8 @@ def read_netcdf( if 'drop_variables' in kwargs.keys(): drop_variables = kwargs['drop_variables'] kwargs['drop_variables'] = keep_variables_to_drop_variables( - filenames, keep_variables, drop_variables=drop_variables) + filenames, keep_variables, drop_variables=drop_variables + ) # Create an exception tuple to use with try statements. Doing it this way # so we can add the FileNotFoundError if requested. Can add more error @@ -178,7 +177,9 @@ def read_netcdf( # If requested use base_time and time_offset to derive time. Assumes that the units # of both are in seconds and that the value is number of seconds since epoch. if use_base_time: - time = num2date(ds['base_time'].values + ds['time_offset'].values, ds['base_time'].attrs['units']) + time = num2date( + ds['base_time'].values + ds['time_offset'].values, ds['base_time'].attrs['units'] + ) time = time.astype('datetime64[ns]') # Need to use a new Dataset creation to correctly index time for use with @@ -280,10 +281,7 @@ def read_netcdf( return ds -def keep_variables_to_drop_variables( - filenames, - keep_variables, - drop_variables=None): +def keep_variables_to_drop_variables(filenames, keep_variables, drop_variables=None): """ Returns a list of variable names to exclude from reading by passing into `Xarray.open_dataset` drop_variables keyword. This can greatly help reduce @@ -347,7 +345,6 @@ def keep_variables_to_drop_variables( # Use netCDF4 library to extract the variable and dimension names. rootgrp = Dataset(filename, 'r') read_variables = list(rootgrp.variables) - dimensions = list(rootgrp.dimensions) # Loop over the variables to exclude needed coordinate dimention names. dims_to_keep = [] for var_name in keep_variables: @@ -400,7 +397,9 @@ def check_arm_standards(ds): return the_flag -def create_ds_from_arm_dod(proc, set_dims, version='', fill_value=-9999.0, scalar_fill_dim=None, local_file=False): +def create_ds_from_arm_dod( + proc, set_dims, version='', fill_value=-9999.0, scalar_fill_dim=None, local_file=False +): """ Queries the ARM DOD api and builds a dataset based on the ARM DOD and @@ -631,7 +630,9 @@ def write_netcdf( try: att_values = write_ds[var_name].attrs[attr_name] if isinstance(att_values, (list, tuple)): - att_values = [att_value.replace(' ', join_char) for att_value in att_values] + att_values = [ + att_value.replace(' ', join_char) for att_value in att_values + ] write_ds[var_name].attrs[attr_name] = ' '.join(att_values) except KeyError: @@ -759,9 +760,16 @@ def write_netcdf( pass current_time = dt.datetime.now().replace(microsecond=0) if 'history' in list(write_ds.attrs.keys()): - write_ds.attrs['history'] += ''.join(['\n', str(current_time), ' created by ACT ', str(act.__version__), - ' act.io.write.write_netcdf']) - + write_ds.attrs['history'] += ''.join( + [ + '\n', + str(current_time), + ' created by ACT ', + str(act.__version__), + ' act.io.write.write_netcdf', + ] + ) + if hasattr(write_ds, 'time_bounds') and not write_ds.time.encoding: write_ds.time.encoding.update(write_ds.time_bounds.encoding) @@ -830,7 +838,7 @@ def read_mmcr(filenames): # read it in with xarray multi_ds = [] for f in filenames: - nc = Dataset(f, "a") + nc = Dataset(f, 'a') # Change heights name to range to read appropriately to xarray if 'heights' in nc.dimensions: nc.renameDimension('heights', 'range') @@ -878,7 +886,7 @@ def read_mmcr(filenames): data=data, coords={time_name: ds['time'].values[idx], range_name: range_data[idy]}, dims=[time_name, range_name], - attrs=attrs + attrs=attrs, ) ds[new_var_name] = da diff --git a/setup.cfg b/setup.cfg index 58b0e8d9d6..09382c0d37 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,9 +1,9 @@ [flake8] -exclude = act/io/armfiles.py docs *__init__.py* setup.cfg +exclude = act/tests/data/ docs *__init__.py* setup.cfg ignore = E203,E266,E501,W503,E722,E402,C901,E731,F401 max-line-length = 100 max-complexity = 18 -extend-exclude = act/io/armfiles.py docs *__init__.py* +extend-exclude = docs *__init__.py* extend-ignore = E203,E266,E501,W503,E722,E402,C901,E731,F401 [isort] @@ -18,7 +18,6 @@ line_length=100 skip= docs/source/conf.py setup.py - act/io/armfiles.py [tool:pytest] addopts = --cov=./ --cov-report=xml --verbose