Skip to content

Commit

Permalink
trunk: post-proc: added travel time distribution and BUGFIX mhm class
Browse files Browse the repository at this point in the history
  • Loading branch information
Falk Heße committed Jun 1, 2017
1 parent ada11bd commit a776322
Show file tree
Hide file tree
Showing 11 changed files with 1,416 additions and 42 deletions.
106 changes: 64 additions & 42 deletions post-proc/mhm.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,13 +62,13 @@ class MHM(object):

##-- init function ------------------------------------------------------------

def __init__(self, model_path, file_name = 'mhm.nml'):
def __init__(self, model_path, file_name = 'mhm.nml',
rel_path_name = False):

# to do: - option for restart file
# - both implicite and explicite
# - adapt for use with several basins
# - onyl simple upscaling
# - only absolute paths
# - documentation
# - doctests

Expand Down Expand Up @@ -103,25 +103,37 @@ def __init__(self, model_path, file_name = 'mhm.nml'):
f_id.close()

self.nBasins = int(self.nml['nBasins'])

self.cellsize['L0'] = 100
self.cellsize['L1'] = int(self.nml['resolution_Hydrology(1)'])
self.cellsize['L2'] = 4000

self.cellsize_ratio['L0'] = 1
self.cellsize_ratio['L1'] = float(self.nml['resolution_Hydrology(1)'])/self.cellsize['L0']
self.cellsize_ratio['L2'] = self.cellsize['L2']/self.cellsize['L0']


if self.nml['timeStep_sm_input'] == '-1':
self.t_stepsize = 'daily'
elif self.nml['timeStep_sm_input'] == '-2':
self.t_stepsize = 'monthly'
elif self.nml['timeStep_sm_input'] == '-3':
self.t_stepsize = 'yearly'

# reading data from the 'dem.asc' file for domain information


if rel_path_name:
for k,v in self.nml.items():
if ('dir_' in k) or ('file_' in k):
self.nml[k] = model_path + self.nml[k]

# reading data from the 'dem.asc' file for domain information

f_id = open(self.nml['dir_Morpho(1)'] + 'dem.asc')

for i, line in enumerate(f_id):
if i == 4:
self.cellsize['L0'] = int(line[9:])
self.cellsize['L1'] = int(self.nml['resolution_Hydrology(1)'])
self.cellsize['L2'] = 4000
f_id.close()

self.cellsize_ratio['L0'] = 1
self.cellsize_ratio['L1'] = float(self.nml['resolution_Hydrology(1)'])/self.cellsize['L0']
self.cellsize_ratio['L2'] = self.cellsize['L2']/self.cellsize['L0']

f_id = open(self.nml['dir_Morpho(1)'] + 'dem.asc')

for line_i in range(6):
line = f_id.next().strip()
line = line.split()
Expand Down Expand Up @@ -152,18 +164,18 @@ def __init__(self, model_path, file_name = 'mhm.nml'):
self.mask['L0'][line_i - 6][col_i] = 1
for row_i in range(0, self.nrows['L1']):
for col_i in range(0, self.ncols['L1']):
row_a = row_i*self.cellsize_ratio['L1']
row_e = (row_i + 1)*self.cellsize_ratio['L1']
col_a = col_i*self.cellsize_ratio['L1']
col_e = (col_i + 1)*self.cellsize_ratio['L1']
row_a = int(row_i*self.cellsize_ratio['L1'])
row_e = int((row_i + 1)*self.cellsize_ratio['L1'])
col_a = int(col_i*self.cellsize_ratio['L1'])
col_e = int((col_i + 1)*self.cellsize_ratio['L1'])
if np.mean(self.mask['L0'][row_a:row_e,col_a:col_e]) == 1:
self.mask['L1'][row_i][col_i] = 1
for row_i in range(0, self.nrows['L2']):
for col_i in range(0, self.ncols['L2']):
row_a = row_i*self.cellsize_ratio['L2']
row_e = (row_i + 1)*self.cellsize_ratio['L2']
col_a = col_i*self.cellsize_ratio['L2']
col_e = (col_i + 1)*self.cellsize_ratio['L2']
row_a = int(row_i*self.cellsize_ratio['L2'])
row_e = int((row_i + 1)*self.cellsize_ratio['L2'])
col_a = int(col_i*self.cellsize_ratio['L2'])
col_e = int((col_i + 1)*self.cellsize_ratio['L2'])
if np.mean(self.mask['L0'][row_a:row_e,col_a:col_e]) == 1:
self.mask['L2'][row_i][col_i] = 1
f_id.close()
Expand All @@ -174,13 +186,17 @@ def import_data(self, data_type, *kwargs ):
if data_type == 'states_and_fluxes':
self.import_states_and_fluxes( *kwargs )
elif data_type == 'lat_lon_L0':
data_path = self.nml['dir_LatLon(1)']
self.lon_L0 = self.import_lat_lon(data_path, 'lon_l0')
self.lat_L0 = self.import_lat_lon(data_path, 'lat_l0')
data_path = str(kwargs[0])
self.lon_L0 = self.import_lat_lon(data_path, 'lon')
self.lat_L0 = self.import_lat_lon(data_path, 'lat')
elif data_type == 'lat_lon':
data_path = self.nml['dir_LatLon(1)']
data_path = self.nml['file_LatLon(1)']
self.lon_L1 = self.import_lat_lon(data_path, 'lon')
self.lat_L1 = self.import_lat_lon(data_path, 'lat')
elif data_type == 'lat_lon_L2':
data_path = str(kwargs[0])
self.lon_L2 = self.import_lat_lon(data_path, 'lon')
self.lat_L2 = self.import_lat_lon(data_path, 'lat')
elif data_type == 'landcover':
data_path = self.nml['dir_LCover(1)'] + str(kwargs[0])
setattr(self, data_type, self.import_L0_data(data_path))
Expand Down Expand Up @@ -213,6 +229,7 @@ def import_precipitation(self, f_path):
def import_states_and_fluxes(self, *kwargs):

f_path = self.nml['dir_Out(1)'] + 'mHM_Fluxes_States.nc'
# print(f_path)

if kwargs[0] == 'all':
var_list = [str(i) for i in readnetcdf(f_path, variables=True)]
Expand All @@ -224,8 +241,8 @@ def import_states_and_fluxes(self, *kwargs):
'recharge', 'aET_L01', 'aET_L02', 'aET_L03',
'preEffect']
else:
var_list = kwargs

var_list = kwargs[0]
# print(var_list)
for var_i in range(0, len(var_list)):
var = var_list[var_i]
setattr(self, var, readnetcdf(f_path, var=var))
Expand All @@ -251,14 +268,14 @@ def import_L0_data(self, f_path):

##-- importing restart file ---------------------------------------------------

# def import_restart_file(self, *kwargs):
#
# f_path = self.nml['dir_Out(1)'] + 'mHM_restart_001.nc'
# var_list = [str(i) for i in readnetcdf(f_path, variables=True)]
def import_restart_file(self, *kwargs):

f_path = self.nml['dir_Out(1)'] + 'mHM_restart_001.nc'
var_list = [str(i) for i in readnetcdf(f_path, variables=True)]
# print(var_list)
# for var_i in range(0, len(var_list)):
# var = var_list[var_i]
# setattr(self, var, readnetcdf(f_path, var=var))
for var_i in range(0, len(var_list)):
var = var_list[var_i]
setattr(self, var, readnetcdf(f_path, var=var))

##-- upscaling and downscaling functions --------------------------------------

Expand All @@ -277,11 +294,11 @@ def upscale_L1_data(self, L0_array, flag):
for col_i in range(0, self.ncols['L1']):
if self.mask['L1'][row_i, col_i]:
continue
row_s = row_i*self.cellsize_ratio['L1']
row_e = (row_i + 1)*self.cellsize_ratio['L1']
row_s = int(row_i*self.cellsize_ratio['L1'])
row_e = int((row_i + 1)*self.cellsize_ratio['L1'])
row_c = int((row_s + row_e)/2)
col_s = col_i*self.cellsize_ratio['L1']
col_e = (col_i + 1)*self.cellsize_ratio['L1']
col_s = int(col_i*self.cellsize_ratio['L1'])
col_e = int((col_i + 1)*self.cellsize_ratio['L1'])
col_c = int((col_s + col_e)/2)
num += 1
if flag == 'any':
Expand All @@ -301,14 +318,18 @@ def upscale_L1_data(self, L0_array, flag):
def downscale_data(self, data_type):

pre_L1 = np.zeros((self.nrows['L1'], self.ncols['L1']))
pre = np.mean(self.pre, axis = 0)
ratio = int(self.cellsize_ratio['L2']/self.cellsize_ratio['L1'])
pre = np.mean(self.pre, axis=0)
L2 = self.cellsize_ratio['L2']
# L2 = 10
ratio = int(L2/self.cellsize_ratio['L1'])

for row_i in range(0, self.nrows['L1']):
for col_i in range(0, self.ncols['L1']):
pre_L1[row_i, col_i] = pre[row_i/ratio, col_i/ratio]

self.pre_L1 = np.ma.array( np.mean(self.preEffect, axis = 0), mask = self.mask['L1'])
self.pre_L1 = np.ma.array(pre_L1, mask = self.mask['L1'])
# self.pre_L1 = np.ma.array( np.mean(self.preEffect, axis = 0), mask = self.mask['L1'])


##-- flow direction functions -------------------------------------------------

Expand Down Expand Up @@ -399,11 +420,12 @@ def combine_variables(self, my_list):

f_path = self.nml['dir_Out(1)'] + 'mHM_Fluxes_States.nc'
var_list = [str(i) for i in readnetcdf(f_path, variables=True)]
# print(var_list)
var_dict = {}
for elem in my_list:
var_dict[elem] = [s for s in var_list if elem + '_L0' in s]
tmp = 0
for sub_elem in var_dict[elem]:
tmp += getattr(self, sub_elem )
setattr(self, elem, tmp )


66 changes: 66 additions & 0 deletions post-proc/sas/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
#!/usr/bin/env python
"""
Python Utilities for computing StorAge-Selection (SAS) functions
Get help on each function by typing
>>> import sas
>>> help(sas.function)
License
-------
This file is part of the UFZ Python package.
Not all files in the package are free software. The license is given in the
'License' section of the docstring of each routine.
The package is released under the GNU Lesser General Public License. The
following applies: The SAS Python package is free software: you can
redistribute it and/or modify it under the terms of the GNU Lesser
General Public License as published by the Free Software Foundation,
either version 3 of the License, or (at your option) any later version.
The SAS Python package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the UFZ makefile project (cf. gpl.txt and lgpl.txt).
If not, see <http://www.gnu.org/licenses/>.
Copyright 2015 Falk He"sse
History
-------
Written, FH, Apr 2015
"""
#from __future__ import print_function

# Routines

from sas_base import SAS
from aux_fun import *
from get_p import *
from get_theta import *
from get_U_num import *
from get_validity_range import *
#from plot_fun import *

# Information
__author__ = 'Falk Hesse'
__version__ = '0.1.0'
#__revision__ =
__date__ = 'Date: 01.04.2015'

# Main
#if __name__ == '__main__':
# print('\nMAD Python Package.')
# print("Version {:s} from {:s}.".format(__version__,__date__))
# print('\nThis is the README file. See als the license file LICENSE.\n\n')
# f = open('README','r')
# for line in f: print(line,end='')
# f.close()
Loading

0 comments on commit a776322

Please sign in to comment.