Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Restart procedure #85

Merged
merged 11 commits into from
Oct 20, 2020
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
.pydevproject
.DS_Store
._.*
test_data/stoporad.ip
*.idx

*.lprof
*.prof
Expand Down
10 changes: 9 additions & 1 deletion awsm/framework/CoreConfig.ini
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,10 @@ run_for_nsteps: type = int,
description = number of timesteps to run iSnobal. This is
optional and mainly used in model crash scenarios

netcdf_output_precision:
default = float,
options = [float double],
description = NetCDF variable output precision for float (32-bit) or double (64-bit)

################################################################################
# Configurations for updating with lidar depths
Expand Down Expand Up @@ -170,4 +174,8 @@ variables: type = string list,

mask_isnobal: default = False,
type = bool,
description = Mask snowpack model output.
description = Mask snowpack model output.

restart_date_time: type = Datetime,
description = Restart iPysnobal at this date time which will keep the output in the same
project and run directory. This will be the first time step that iPysnobal will perform.
68 changes: 34 additions & 34 deletions awsm/framework/framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,30 +98,12 @@ def __init__(self, config):
self.pysnobal_output_vars = [wrd.lower()
for wrd in self.pysnobal_output_vars]

# options for restarting iSnobal
self.restart_crash = False
if self.config['isnobal restart']['restart_crash']:
self.restart_crash = True
# self.new_init = self.config['isnobal restart']['new_init']
self.depth_thresh = self.config['isnobal restart']['depth_thresh']
self.restart_hr = \
int(self.config['isnobal restart']['wyh_restart_output'])
self.restart_folder = self.config['isnobal restart']['output_folders']

# parameters needed for restart procedure
self.restart_run = False
if self.config['isnobal restart']['restart_crash']:
self.restart_run = True
# find restart hour datetime
reset_offset = pd.to_timedelta(self.restart_hr, unit='h')
# set a new start date for this run
self.tmp_log.append('Restart date is {}'.format(self.start_date))
self.model_restart()

# read in update depth parameters
self.update_depth = False
if 'update depth' in self.config:
self.update_depth = self.config['update depth']['update']
if self.update_depth:
self.update_file = self.config['update depth']['update_file']
self.update_buffer = self.config['update depth']['buffer']
self.flight_numbers = self.config['update depth']['flight_numbers']
Expand All @@ -141,9 +123,6 @@ def __init__(self, config):
os.path.join(self.path_output, 'awsm_config_backup.ini')
generate_config(self.ucfg, config_backup_location)

# create log now that directory structure is done
# self.create_log()

self.smrf_connector = SMRFConnector(self)

# if we have a model, initialize it
Expand Down Expand Up @@ -238,6 +217,12 @@ def parse_time(self):
self.start_wyhr = int(utils.water_day(self.start_date)[0]*24)
self.end_wyhr = int(utils.water_day(self.end_date)[0]*24)

# if there is a restart time
if self.config['ipysnobal']['restart_date_time'] is not None:
rs_dt = self.config['ipysnobal']['restart_date_time']
rs_dt = pd.to_datetime(rs_dt).tz_localize(tz=self.tzinfo)
self.config['ipysnobal']['restart_date_time'] = rs_dt

def parse_folder_structure(self):
"""Parse the config to get the folder structure

Expand All @@ -264,24 +249,14 @@ def parse_folder_structure(self):
' than run_smrf_ipysnobal')

def create_log(self):
'''
"""
Now that the directory structure is done, create log file and print out
saved logging statements.
'''
"""

# setup the logging
logfile = None
if self.config['awsm system']['log_to_file']:
# if self.config['isnobal restart']['restart_crash']:
# logfile = \
# os.path.join(self.path_log,
# 'log_restart_{}.out'.format(self.restart_hr))
# elif self.do_forecast:
# logfile = \
# os.path.join(self.path_log,
# 'log_forecast_'
# '{}.out'.format(self.folder_date_stamp))
# else:
logfile = \
os.path.join(self.path_log,
'log_{}.out'.format(self.folder_date_stamp))
Expand All @@ -304,6 +279,30 @@ def create_log(self):
for line in self.tmp_err:
self._logger.error(line)

def model_restart(self):
"""Check if AWSM is being restarted, must have certain outputs
like storm days for certrain models
"""

if self.config['ipysnobal']['restart_date_time'] is not None:
self.start_date = self.config['ipysnobal']['restart_date_time']
self.start_date = self.start_date - \
pd.Timedelta(minutes=self.config['time']['time_step'])

# has to have the storm day file, else the albedo will be
# set to fresh snow
if 'storm_days' not in self.config['output']['variables'] and \
self.model_type != 'ipysnobal':
raise FileNotFoundError("""Restarting with smrf_ipysnobal requires the
storm_days to be output as this restarts the albedo.
Add 'storm_days' to the ouput variables and rerun
from the beginning if the simulation.""")
else:
self.config['precip']['storm_days_restart'] = os.path.join(
self.path_output,
'storm_days.nc'
)

def run_smrf(self):
"""
Run smrf through the :mod: `awsm.smrf_connector.SMRFConnector`
Expand Down Expand Up @@ -430,6 +429,7 @@ def __exit__(self, exc_type, exc_value, traceback):
'AWSM finished in: {}'.format(datetime.now() - self.start_time)
)
self._logger.info('AWSM closed --> %s' % datetime.now())
logging.shutdown()


def run_awsm_daily_ops(config_file):
Expand Down
71 changes: 14 additions & 57 deletions awsm/models/pysnobal/init_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ def __init__(self, cfg, topo, path_output, start_date):
self._logger = logging.getLogger(__name__)
self.topo = topo
self.start_date = start_date
self.config = cfg

# get parameters from awsm
self.init_file = cfg['ipysnobal']['init_file']
Expand All @@ -63,15 +64,18 @@ def __init__(self, cfg, topo, path_output, start_date):
self._logger.info(
'Using {} to build model init state.'.format(self.init_file))

# type of model run
self.model_type = cfg['awsm master']['model_type']
# paths
self.path_output = path_output
# restart parameters
self.restart_crash = cfg['isnobal restart']['restart_crash']
self.restart_hr = cfg['isnobal restart']['wyh_restart_output']
self.depth_thresh = cfg['isnobal restart']['depth_thresh']
self.restart_folder = cfg['isnobal restart']['output_folders']

# when restarting, just reset the start date to grab the right init time step
if self.config['ipysnobal']['restart_date_time'] is not None:
self.start_date = self.start_date - \
pd.Timedelta(minutes=self.config['time']['time_step'])
self.init_type = 'netcdf_out'
self.init_file = os.path.join(self.path_output, 'ipysnobal.nc')
self._logger.info("""Initializing ipysnobal at time {} from """
"""previous output file""".format(
self.start_date))

# dictionary to store init data
self.init = {}
Expand All @@ -80,6 +84,7 @@ def __init__(self, cfg, topo, path_output, start_date):
self.init['mask'] = self.topo.mask
self.init['z_0'] = self.topo.roughness
self.init['elevation'] = self.topo.dem

# read in the init file
self.get_init_file()

Expand All @@ -98,11 +103,9 @@ def get_init_file(self):
Get the necessary data from the init.
This will check the model type and the init file and act accordingly.
"""
# get crash restart if restart_crash
if self.restart_crash:
self.get_crash_init()

# if we have no init info, make zero init
elif self.init_file is None:
if self.init_file is None:
self.get_zero_init()
# get init depending on file type
elif self.init_type == 'netcdf':
Expand Down Expand Up @@ -219,49 +222,3 @@ def get_netcdf_out(self):
self.init['h2o_sat'] = init_data.water_saturation.values

ds.close()

def zero_crash_depths(self, depth_thresh, z_s, rho, T_s_0, T_s_l, T_s, h2o_sat):
"""
Zero snow depth under certain threshold and deal with associated variables.

Args:
depth_thresh: threshold in mm depth to zero
z_s: snow depth (Numpy array)
rho: snow density (Numpy array)
T_s_0: surface layer temperature (Numpy array)
T_s_l: lower layer temperature (Numpy array)
T_s: average snow cover temperature (Numpy array)
h2o_sat: percent liquid h2o saturation (Numpy array)

Returns:
restart_var: dictionary of input variables after correction
"""

# find pixels that need reset
idz = z_s < depth_thresh

# find number of pixels reset
num_pix = len(np.where(idz)[0])
num_pix_tot = z_s.size

self._logger.warning(
'Zeroing depth in pixels lower than {} [m]'.format(depth_thresh))
self._logger.warning(
'Zeroing depth in {} out of {} total pixels'.format(num_pix, num_pix_tot))

z_s[idz] = 0.0
rho[idz] = 0.0
T_s_0[idz] = -75.0
T_s_l[idz] = -75.0
T_s[idz] = -75.0
h2o_sat[idz] = 0.0

restrat_var = {}
restrat_var['z_s'] = z_s
restrat_var['rho'] = rho
restrat_var['T_s_0'] = T_s_0
restrat_var['T_s_l'] = T_s_l
restrat_var['T_s'] = T_s
restrat_var['h2o_sat'] = h2o_sat

return restrat_var
Loading