Upload files to "scripts"

Uncommented final version of NNDy
This commit is contained in:
castaneda 2025-03-21 14:03:04 +01:00
parent 35f1d92410
commit 73754efd3b
4 changed files with 275 additions and 0 deletions

89
scripts/NNDy.py Normal file
View File

@ -0,0 +1,89 @@
import labscript
import numpy as np
import pandas as pd
#Imports for M-LOOP
import mloop.interfaces as mli
import mloop.controllers as mlc
import mloop.visualizations as mlv
#import interface
import NNDy_Interface
if __name__ == '__main__':
#indicate name of the sequence to be optimized, to be found in {routine_name}.py
routine_name = 'TestSetup'
#indicate name of the DA file to be run by lyse, {cost_model}.py
cost_model = 'TestDA'
#HALTING CONDITIONS
#indicate maximum number of runs
#max_num_runs = 10
#or one can also indicate max_num_runs_without_better_params
max_num_runs_without_better_params = 50
#indicate target cost
#target_cost = 0
#FIXED GLOBAL VARIABLES
#indicate the values of the global variables that won't be optimized
globalPar = {'T_wlm': 60,
'buffer_time': 10}
# not necessary if they're already set on runmanager
#INPUT PARAMETERS
#indicate the initial values of the global variables to be optimized, in the following called input parameters
#as a dictionary
inputPar = [1e6, 2.5, 2]
num_params = len(inputPar)
inputPar_names = ['delta_freq', 'carrier_amp', 'wait_AWG' ]
#indicate range of input parameters as two dictionaries of length num_params
min_boundary = [-20e6, 0.01, 0]
max_boundary = [20e6, 4.5, 10]
hyperpar = {
'globalPar': globalPar,
'inputPar_names': inputPar_names
}
interface = NNDy_Interface.NNDy_Interface(routine_name, cost_model, hyperpar)
controller = mlc.create_controller(interface,
controller_type = 'neural_net',
#HALTING CONDITIONS
#max_num_runs = max_num_runs,
max_num_runs_without_better_params = max_num_runs_without_better_params,
#target_cost = target_cost,
#INPUT PARAMETERS
num_params = num_params,
#mloop handles the variables as python arrays not as dictionaries so when passing the parameters they're converted into named lists
min_boundary = min_boundary, max_boundary = max_boundary,
first_params = inputPar,
param_names = inputPar_names,
#other settings
#%of allowed variation from current best parameters found
trust_region = 0.5,
#output parameters over which cost is computed are noisy quantities
cost_has_noise = True,
#if False, waits for the experiment to be performed every time so that every new optimization iteration trains on an enlarged training set
no_delay = False)
#for other possible settings for the optimizer see documentation https://m-loop.readthedocs.io/en/latest/tutorials.html
#To run M-LOOP and find the optimal parameters just use the controller method optimize
controller.optimize()
#The results of the optimization will be saved to files and can also be accessed as attributes of the controller.
#print('Best parameters found:')
#print(controller.best_params)
#You can also run the default sets of visualizations for the controller with one command
mlv.show_all_default_visualizations(controller)

90
scripts/NNDy_Interface.py Normal file
View File

@ -0,0 +1,90 @@
#imports for runmanager - labscript
from runmanager_remote import run_experiment
import time
import numpy as np
#Imports for M-LOOP
import mloop.interfaces as mli
#importlib allows to import the costfunction defined in cost_model.py
import importlib
_module_cache = {} #avoid multiple calls of cost function for same routine
#Declare your custom class that inherits from the Interface class
class NNDy_Interface(mli.Interface):
def __init__(self, routine_name, cost_model, hyperpars):
#You must include the super command to call the parent class, Interface, constructor
super(NNDy_Interface,self).__init__()
#Attributes of the interface can be added here
self.exp_global_par = hyperpars['globalPar']
self.input_names = hyperpars['inputPar_names']
self.routine = routine_name
self.cost_model = cost_model
def cost(self, parameters):
module_name = self.cost_model
if module_name not in _module_cache:
try:
module = importlib.import_module(module_name)
cost_func = getattr(module, 'cost')
#analysis_func = getattr(self.module, 'analysis')
_module_cache[module_name] = {
"cost_func": cost_func ,
# "analysis_func": analysis_func
}
except (ModuleNotFoundError, AttributeError) as e:
raise ImportError(f'Failed to load cost function from "{module_name}.py": {e}')
cost_model = _module_cache[module_name]["cost_func"]
return cost_model(parameters)
#the method that runs the experiment given a set of parameters and returns a cost
def get_next_cost_dict(self,In_params_raw):
#The parameters come in a dictionary and are provided in a numpy array
In_params = In_params_raw['params']
#print(In_params)
#optimization parameters to be send back to labscript are converted back into dictionaries
if len(In_params) != len(self.input_names):
raise Exception('number of optimized parameters and names do not match')
In_params_dict = {}
for par,name in zip(In_params, self.input_names) :
In_params_dict.update({name: par})
#merge with fixed global variables
global_group = In_params_dict | self.exp_global_par
#Here you can include the code to run your experiment given a particular set of parameters
#run_experiment runs the routine specified by routine name with global variables equal to the new set of parameters given by the optimizer
#this means that the experiment parameters - In_params - are chosen among the global variablesof this labscript routine and are passed as a dictionary
#print('running the experiment')
results = {
'cost': np.inf,
'bad': False
}
try:
hdf_output_file = run_experiment(self.routine, global_var = global_group)
results = self.cost(hdf_output_file)
except Exception as e:
print(f"Exception type '{e}', considered as bad run!")
results['bad'] = True
#self.analysis(hdf_output_file)
#print('cost is computed')
uncer = 0
time.sleep(0.001)
#The cost, uncertainty and bad boolean must all be returned as a dictionary
cost_dict = {'cost':results['cost'], 'uncer':uncer, 'bad':results['bad']}
return cost_dict

View File

@ -0,0 +1,34 @@
#imports for runmanager - labscript
from runmanager_remote import run_experiment
from lyse import *
import pandas as pd
import TestDA
import xarray as xr
if __name__ == '__main__':
routine_name = 'TestSetup'
TestVar = {'T_wlm': 1,
'delta_freq': 0,
'wait_AWG': 5,
'buffer_time': 5,
'carrier_amp': 2
}
hdf_output_file = run_experiment(routine_name, global_var = TestVar)
print('run finished')
#df = pd.DataFrame(lyse_run.get_result('wlm', 'wlm_df'), columns = ['timestamp [s]','wavelength [nm]', 'frequency [THz]'])
#print(df)
#results = TestDA.analysis(hdf_output_file)
cost = TestDA.cost(hdf_output_file)
#print('I am back')
print(cost)

View File

@ -0,0 +1,62 @@
from labscript.labscript import labscript_init
from runmanager import remote
from datetime import datetime
import time
#from lyse import Run as lyse_run
#from lyse import data as lyse_data
from os import listdir
#from labscript_utils import import_or_reload
def run_experiment(routine_name, global_var):
#import_or_reload('labscriptlib.NNDy_TestSetup.connection_table')
hdf_path = f"C:\\Users\\DyLab\\PrepLab\\Experiments\\NNDy_TestSetup\\{routine_name}"
#this way it should be loading the routine based on past executions, with the possibility of only changing the global variables
#it's possible that there could be some issue if the script is changed before running MLOOP
#without further insights on this, it's recommended to always run the sequence from runmanager GUI before initiating MLOOP
labscript_init(hdf_path,
new = True)
runmanager_client = remote.Client()
runmanager_client.set_view_shots(False)
runmanager_client.set_globals(global_var)
print(f'globals: \n {runmanager_client.get_globals()}')
#print(f'number of shots: \n {runmanager_client.n_shots()}')
hdf_path = runmanager_client.get_shot_output_folder()
runmanager_client.engage()
#print('engaged')
#change measurement_time accordingly
# necessary becasuse engage() doesn't block execution
measurement_time = global_var['T_wlm'] + global_var['wait_AWG'] + global_var['buffer_time']
print(f'now waiting for {measurement_time} s')
time.sleep(measurement_time)
print('waiting time is over')
try:
hdf_files = listdir(hdf_path)
if len(hdf_files) == 0:
raise ModuleNotFoundError
else:
if len(hdf_files) > 1:
raise ImportError
hdf_file = hdf_path + "/" + hdf_files[0]
except (ModuleNotFoundError, ImportError) as e:
raise Error(f'An error has occured while importing from hdf output folder: {e}')
#run = lyse_run(hdf_file, no_write=True)
#print(hdf_file)
return hdf_file