2023-05-04 19:16:35 +02:00
|
|
|
import glob
|
|
|
|
from datetime import date
|
2023-05-04 18:32:17 +02:00
|
|
|
|
2023-05-07 00:38:52 +02:00
|
|
|
import numpy as np
|
|
|
|
from uncertainties import unumpy as unp
|
|
|
|
|
|
|
|
import xarray as xr
|
|
|
|
|
2023-05-04 18:32:17 +02:00
|
|
|
|
2023-05-04 19:16:35 +02:00
|
|
|
def get_mask(dataArray):
|
|
|
|
return np.ones(dataArray.shape, dtype=bool)
|
2023-05-04 18:32:17 +02:00
|
|
|
|
|
|
|
|
2023-05-04 19:16:35 +02:00
|
|
|
def remove_bad_shots(dataArray, **kwargs):
|
|
|
|
dataArray.loc[dict(kwargs)] = np.nan
|
2023-05-04 18:32:17 +02:00
|
|
|
|
|
|
|
|
2023-05-04 19:16:35 +02:00
|
|
|
def auto_rechunk(dataSet):
|
|
|
|
kwargs = {
|
|
|
|
key: "auto"
|
2023-05-05 18:25:03 +02:00
|
|
|
for key in dataSet.dims
|
2023-05-04 19:16:35 +02:00
|
|
|
}
|
|
|
|
return dataSet.chunk(**kwargs)
|
2023-05-04 18:32:17 +02:00
|
|
|
|
|
|
|
|
2023-05-06 11:23:38 +02:00
|
|
|
def copy_chunk(dataSet, dataChunk):
|
|
|
|
kwargs = {
|
|
|
|
key: dataChunk.chunksizes[key]
|
|
|
|
for key in dataChunk.chunksizes
|
|
|
|
if key in dataSet.dims
|
|
|
|
}
|
|
|
|
return dataSet.chunk(**kwargs)
|
|
|
|
|
|
|
|
|
2023-05-04 19:16:35 +02:00
|
|
|
def get_h5_file_path(folderpath, maxFileNum=None, filename='*.h5',):
|
|
|
|
filepath = np.sort(glob.glob(folderpath + filename))
|
|
|
|
if maxFileNum is None:
|
|
|
|
return filepath
|
|
|
|
else:
|
|
|
|
return filepath[:maxFileNum]
|
|
|
|
|
|
|
|
|
|
|
|
def get_date():
|
|
|
|
today = date.today()
|
2023-05-08 11:47:35 +02:00
|
|
|
return today.strftime("%Y/%m/%d")
|
2023-05-05 18:25:03 +02:00
|
|
|
|
2023-05-06 11:23:38 +02:00
|
|
|
|
2023-05-07 00:38:52 +02:00
|
|
|
def _combine_uncertainty(value, std):
|
|
|
|
return unp.uarray(value, std)
|
|
|
|
|
|
|
|
|
|
|
|
def combine_uncertainty(value, std, dask='parallelized', **kwargs):
|
|
|
|
|
|
|
|
kwargs.update(
|
|
|
|
{
|
|
|
|
"dask": dask,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
return xr.apply_ufunc(_combine_uncertainty, value, std, **kwargs)
|
|
|
|
|
2023-05-05 18:25:03 +02:00
|
|
|
|
2023-05-07 00:38:52 +02:00
|
|
|
def _seperate_uncertainty_single(data):
|
|
|
|
return data.n, data.s
|
|
|
|
|
2023-05-08 17:48:53 +02:00
|
|
|
|
2023-05-07 00:38:52 +02:00
|
|
|
def _seperate_uncertainty(data):
|
|
|
|
func = np.vectorize(_seperate_uncertainty_single)
|
|
|
|
return func(data)
|
|
|
|
|
2023-05-08 17:48:53 +02:00
|
|
|
|
2023-05-07 00:38:52 +02:00
|
|
|
def seperate_uncertainty(data, dask='parallelized', **kwargs):
|
2023-05-05 18:25:03 +02:00
|
|
|
|
2023-05-07 00:38:52 +02:00
|
|
|
kwargs.update(
|
|
|
|
{
|
|
|
|
"dask": dask,
|
|
|
|
"output_core_dims": [[], []],
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2023-05-07 23:41:31 +02:00
|
|
|
return xr.apply_ufunc(_seperate_uncertainty, data, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
def get_scanAxis(dataSet):
|
|
|
|
res = dataSet.scanAxis
|
|
|
|
|
|
|
|
if len(res) == 0:
|
|
|
|
res = [None, None]
|
|
|
|
elif len(res) == 1:
|
|
|
|
res = [res[0], None]
|
|
|
|
elif len(res) == 2 and res[0] == 'runs':
|
|
|
|
res = [res[1], res[0]]
|
|
|
|
|
|
|
|
return res
|
|
|
|
|
2023-05-08 17:48:53 +02:00
|
|
|
|
2023-05-07 23:41:31 +02:00
|
|
|
def print_scanAxis(dataSet):
|
|
|
|
scanAxis = dataSet.scanAxis
|
|
|
|
|
|
|
|
scan = {}
|
|
|
|
|
|
|
|
for key in scanAxis:
|
|
|
|
scanValue = np.array(dataSet[key])
|
|
|
|
scanValue, indices = np.unique(scanValue, return_index=True)
|
|
|
|
scan.update(
|
|
|
|
{
|
|
|
|
key: scanValue[indices]
|
|
|
|
}
|
|
|
|
)
|
2023-05-08 16:57:58 +02:00
|
|
|
print("The detected scaning axes and values are: \n")
|
2023-05-08 11:52:21 +02:00
|
|
|
print(scan)
|
|
|
|
|
|
|
|
|
|
|
|
def calculate_mean(dataSet):
|
|
|
|
if 'runs' in dataSet.dims:
|
|
|
|
return dataSet.mean(dim='runs')
|
|
|
|
else:
|
|
|
|
return dataSet
|
|
|
|
|
|
|
|
|
|
|
|
def calculate_std(dataSet):
|
|
|
|
if 'runs' in dataSet.dims:
|
|
|
|
return dataSet.mean(dim='runs')
|
|
|
|
else:
|
2023-05-08 17:48:53 +02:00
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
def extract_temperature_from_fit():
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
def extract_condensate_fraction_from_fit():
|
2023-05-16 15:51:13 +02:00
|
|
|
pass
|
|
|
|
|
|
|
|
def swap_xy(dataSet):
|
|
|
|
dataSet = dataSet.rename_dims(dict(x='__x'))
|
|
|
|
dataSet = dataSet.rename_dims(dict(y='x'))
|
|
|
|
dataSet = dataSet.rename_dims(dict(__x='y'))
|
|
|
|
return dataSet
|