Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update on NHD to read reservoir_persistence_usgs #737

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 22 additions & 16 deletions src/troute-network/troute/DataAssimilation.py
Original file line number Diff line number Diff line change
Expand Up @@ -432,22 +432,23 @@ def __init__(self, network, from_files, value_dict, da_run=[]):
reset_index().
set_index(['usgs_gage_id']) # <- TODO use input parameter for this
)

gage_lake_df.usgs_lake_id = gage_lake_df.usgs_lake_id.astype(int)
gage_lake_df.index = gage_lake_df.index.astype(str).str.strip()
# build dataframe that crosswalks gageIDs to segmentIDs
gage_link_df = (
network.link_gage_df['gages'].
reset_index().
set_index(['gages'])
)

gage_link_df.index = gage_link_df.index.astype(str).str.strip()
# build dataframe that crosswalks segmentIDs to lakeIDs
link_lake_df = (
gage_lake_df.
join(gage_link_df, how = 'inner').
reset_index().set_index('link').
drop(['index'], axis = 1)
)

# resample `usgs_df` to 15 minute intervals
usgs_df_15min = (
self._usgs_df.
Expand All @@ -462,18 +463,22 @@ def __init__(self, network, from_files, value_dict, da_run=[]):
# containing all of the rows it needs. By using pd.concat here we add in
# the missing rows. But this should be fixed earlier, likely in the
# creation of the gages dictionary...
reservoir_usgs_df = pd.concat(
[
usgs_df_15min.join(link_lake_df, how = 'inner').
reset_index().
set_index('usgs_lake_id').
drop(['index'], axis = 1),
usgs_df_15min.join(network.usgs_lake_gage_crosswalk, how='inner').
drop(['usgs_gage_id'], axis = 1).
rename_axis('usgs_lake_id')
]
)


if not link_lake_df.empty:
reservoir_usgs_df = pd.concat(
[
usgs_df_15min.join(link_lake_df, how = 'inner').
reset_index().
set_index('usgs_lake_id').
drop(['index'], axis = 1),
usgs_df_15min.join(network.usgs_lake_gage_crosswalk, how='inner').
drop(['usgs_gage_id'], axis = 1).
rename_axis('usgs_lake_id')
]
)
else:
reservoir_usgs_df = pd.DataFrame()

# create reservoir hybrid DA initial parameters dataframe
if not reservoir_usgs_df.empty:
reservoir_usgs_param_df = pd.DataFrame(
Expand Down Expand Up @@ -532,7 +537,8 @@ def __init__(self, network, from_files, value_dict, da_run=[]):
# an error. Need to think through this more.
if not self._usgs_df.empty:
self._usgs_df = self._usgs_df.loc[:,network.t0:]

self._usgs_df.index = self._usgs_df.index.astype(str).str.strip()

def update_after_compute(self, run_results,):
'''
Function to update data assimilation object after running routing module.
Expand Down
17 changes: 11 additions & 6 deletions src/troute-network/troute/NHDNetwork.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,16 +269,16 @@ def read_geo_file(self,):
)

if reservoir_da:
usgs_hybrid = reservoir_da.get(
usgs_hybrid = reservoir_da['reservoir_persistence_da'].get(
'reservoir_persistence_usgs',
False
)
usace_hybrid = reservoir_da.get(
usace_hybrid = reservoir_da['reservoir_persistence_da'].get(
'reservoir_persistence_usace',
False
)
param_file = reservoir_da.get(
'gage_lakeID_crosswalk_file',
'reservoir_parameter_file',
None
)
else:
Expand All @@ -287,18 +287,20 @@ def read_geo_file(self,):
usgs_hybrid = False

# check if RFC-type reservoirs are set to true
rfc_params = self.waterbody_parameters.get('rfc')

rfc_params = reservoir_da['reservoir_rfc_da']
if rfc_params:
rfc_forecast = rfc_params.get(
'reservoir_rfc_forecasts',
False
)
param_file = rfc_params.get('reservoir_parameter_file',None)
param_file = reservoir_da.get('reservoir_parameter_file',None)
else:
rfc_forecast = False

if (param_file and reservoir_da) or (param_file and rfc_forecast):
self._waterbody_type_specified = True

(
self._waterbody_types_df,
self._usgs_lake_gage_crosswalk,
Expand All @@ -315,6 +317,9 @@ def read_geo_file(self,):
reservoir_da.get('crosswalk_usace_lakeID_field', 'usace_lake_id'),
self.waterbody_connections.values(),
)
self._usgs_lake_gage_crosswalk['usgs_gage_id'] = self._usgs_lake_gage_crosswalk['usgs_gage_id'].apply(lambda x: x.decode('utf-8'))
self._usgs_lake_gage_crosswalk.index = self._usgs_lake_gage_crosswalk.index.astype(str).str.strip()

else:
self._waterbody_type_specified = True
self._waterbody_types_df = pd.DataFrame(data = 1, index = self.waterbody_dataframe.index, columns = ['reservoir_type'])
Expand Down
Loading