123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368 |
- import ivp
- import solveMatrix
- import os
- import cModel
- import sys
- import json
- from contextlib import contextmanager
- import numpy as np
- import time
- import scipy.interpolate
- import shutil
- import importlib
- from scipy.optimize import least_squares
- importlib.reload(solveMatrix)
- import runSolver
- importlib.reload(runSolver)
- def updateSetup(job):
- setupFileSrc = job['setupFile']
- setupFile = os.path.join(job['jobDir'], 'setupInput.json')
- with open(setupFileSrc, 'r') as f:
- setup = json.load(f)
- try:
- setup.update(job['setupUpdates'])
- except KeyError:
- pass
- with open(setupFile, 'w+') as f:
- f.write(json.dumps(setup))
- return setupFile
- def generate_noisy_data(full_sol, noise_std=20.5):
- full_sol = np.asarray(full_sol) # Ensure input is a NumPy array
- random_state = np.random.RandomState(seed=None)
-
- # Create highly volatile noise
- noise = random_state.normal(loc=0, scale=noise_std, size=full_sol.shape) ** 2 # Squared to amplify large deviations
- noise2 = np.random.laplace(loc=0, scale=noise_std * 10, size=full_sol.shape) # Increased scale for even more extreme spikes
-
- # Introduce highly chaotic scaling factors
- random_scaling = random_state.uniform(-200, 400, size=full_sol.shape) # Further extended range for more variation
- random_exponent = random_state.uniform(4, 12, size=full_sol.shape) # Increased exponentiation for more instability
-
- # Apply extreme, chaotic transformations
- noisy_data = np.zeros_like(full_sol)
- for i in range(len(full_sol)):
- perturbation = random_scaling[i] * (abs(noise[i]) ** random_exponent[i]) - noise2[i] * np.sin(full_sol[i])
- noisy_data[i] = (1 + np.tanh(perturbation)) * full_sol[i] * np.cosh(noise[i])
-
- # Introduce frequent large perturbations to increase instability
- if random_state.rand() < 0.75: # 30% chance of a drastic shift
- noisy_data[i] *= random_state.uniform(2, 5)
-
- return noisy_data
- def calculate_residuals(full_sol, noisy_data):
- """
- Calculates residuals based on the solution and noisy data.
- """
- residuals = np.ravel((noisy_data - full_sol))
- #print(residuals)
- return residuals
- # Updated cost function
- def cost_function(params,parnames,noisy_data,model,startPoint,setup,jobDir):
- """
- Cost function that calculates residuals based on the model.
- """
- #print(params)
- model.setValues(parnames,params)
- clean_dir(jobDir)
- t,sol,se,qt,sOut,full_t,full_sol=runSolver.solve(model,setup,startPoint,jobDir)
- residuals = calculate_residuals(np.array(full_sol), noisy_data)
- return residuals
- def create_name_value_dict(original_dict):
- return {
- val['name']: val['value']
- for key, val in original_dict.items()
- if isinstance(val, dict) and 'name' in val and 'value' in val
- }
-
- # Create parameter dictionary
- def clean_dir(jobDir):
-
- shutil.rmtree(jobDir)
- os.mkdir(jobDir)
- #PRENOS ZAČETNIH PODATKOV
- ##############################################################################################################################################
- fh = os.path.expanduser("~")
- jobDir = os.path.join(fh, 'Documents', 'Sola', 'IJS', 'PBPK_ociscen',"rezultati")
- setup_file = os.path.join(fh, 'Documents', 'Sola', 'IJS', 'PBPK_ociscen', 'setup', 'setupMinute.json')
- model_file = os.path.join(fh, 'Documents', 'Sola', 'IJS', 'PBPK_ociscen', 'models', 'cDiazepam.json')
- parameter_file1 = os.path.join(fh, 'Documents', 'Sola', 'IJS', 'PBPK_ociscen', 'models', 'cDiazepam_parameters.json')
- # Prvi model s prvim parameter file
- model1 = cModel.model()
- model1.parse(model_file, parameter_file1)
- setup1 = runSolver.parseSetup(setup_file)
- name_value_dict1 = create_name_value_dict(model1.parSetup['parameters'])
- # Lahko zdaj uporabiš name_value_dict1 in name_value_dict2 ločeno
- # Excluded parameters
- excluded_params1 = {'venousInput',"heartPC","kidneyPC","remainderPC","skinPC","splanchnicPC","stomachPC","testesPC","one","venousVolume","heartVolume","brainVolume","adiposeVolume","remainderFlow"}
- # Create initial guesses without excluded parameters
- initial_guesses = np.array(
- [val for par, val in name_value_dict1.items() if par not in excluded_params1],
- dtype=np.float64
- )
- # Create list of parameter names without excluded
- param_names = [par for par in name_value_dict1.keys() if par not in excluded_params1]
- initial_pars = np.array([x for x in initial_guesses])
- #initial_pars[3]*=1.1
- clean_dir(jobDir)
- #print("kolicnik")
- #print(initial_guesses/initial_pars)
- #print(param_names)
- # TO NAREDI REŠITVE Z ZAČETNIMI PARAMETRI
- start_point= runSolver.findStartPoint("NONE",setup1)
- model1.setValues(param_names,initial_pars)
- t,sol,se,qt,sOut,full_t,full_sol = runSolver.solve(model1,setup1,start_point,jobDir)
- clean_dir(jobDir)
- # Step 4: Create a new dictionary for the optimized parameters
- # Map the optimized parameters back to the correct order in the original parameter dictionary
- optimized_params_dict1 = {par: initial_guesses[i] for i, par in enumerate(param_names)}
- # Step 5: Include the excluded parameters with their original values
- for par in excluded_params1:
- optimized_params_dict1[par] = name_value_dict1[par]
- # Step 6: Ensure that everything stays in order in the final dictionary
- # This will maintain the order of parameters as they appear in the original `name_value_dict2`
- final_dict1 = {par: optimized_params_dict1.get(par, name_value_dict1[par]) for par in name_value_dict1}
- #################################################################################################################333
- # Z DRUGIMI PARAMETRI
- ##########################################################################################################################
- # Drugi model z drugim parameter file
- parameter_file2 = os.path.join(fh, 'Documents', 'Sola', 'IJS', 'PBPK_ociscen', 'models', 'cDiazepam_parameters1.json')
- model2 = cModel.model()
- model2.parse(model_file, parameter_file2)
- setup2 = runSolver.parseSetup(setup_file)
- name_value_dict2 = create_name_value_dict(model2.parSetup['parameters'])
- # Excluded parameters
- excluded_params = {'venousInput',"heartPC","kidneyPC","remainderPC","skinPC","splanchnicPC","stomachPC","testesPC","one","venousVolume","heartVolume","brainVolume","adiposeVolume","remainderFlow"}
- # Step 1: Filter out excluded parameters for initial guesses
- initial_guesses2 = np.array(
- [val for par, val in name_value_dict2.items() if par not in excluded_params],
- dtype=np.float64
- )
- param_names2 = [par for par in name_value_dict2.keys() if par not in excluded_params]
- # Step 4: Create a new dictionary for the optimized parameters
- # Map the optimized parameters back to the correct order in the original parameter dictionary
- optimized_params_dict22 = {par: initial_guesses2[i] for i, par in enumerate(param_names2)}
- # Step 5: Include the excluded parameters with their original values
- for par in excluded_params1:
- optimized_params_dict22[par] = name_value_dict2[par]
- # Step 6: Ensure that everything stays in order in the final dictionary
- # This will maintain the order of parameters as they appear in the original `name_value_dict2`
- final_dict22 = {par: optimized_params_dict22.get(par, name_value_dict1[par]) for par in name_value_dict2}
- # Step 2: Run optimization
- result = least_squares(
- cost_function, initial_guesses2,
- args=(param_names2, full_sol, model2, start_point, setup2, jobDir)
- )
- # Step 3: Retrieve optimized parameters
- optimized_params2 = result.x
- # Step 4: Create a new dictionary for the optimized parameters
- # Map the optimized parameters back to the correct order in the original parameter dictionary
- optimized_params_dict2 = {par: optimized_params2[i] for i, par in enumerate(param_names2)}
- # Step 5: Include the excluded parameters with their original values
- for par in excluded_params:
- optimized_params_dict2[par] = name_value_dict2[par]
- # Step 6: Ensure that everything stays in order in the final dictionary
- # This will maintain the order of parameters as they appear in the original `name_value_dict2`
- final_optimized_dict = {par: optimized_params_dict2.get(par, name_value_dict2[par]) for par in name_value_dict2}
- # Now, final_optimized_dict contains the optimized values in the same order as in `name_value_dict2`
- print(name_value_dict2)
- print(final_optimized_dict)
- vrednosti3 = np.array(
- [val for par, val in final_optimized_dict.items()],
- dtype=np.float64
- )
- param_names3 = [par for par in final_optimized_dict.keys()]
- #model2.setValues(param_names3,vrednosti3)
- #################################################################################################################################
- # PREVRBA ČE SO 3 različne rešitve
- clean_dir(jobDir)
- t,sol,se,qt,sOut,full_t,full_sol1 = runSolver.solve(model1,setup1,start_point,jobDir)
- clean_dir(jobDir)
- t,sol,se,qt,sOut,full_t,full_sol2 = runSolver.solve(model2,setup2,start_point,jobDir)
- model2.setValues(param_names3,vrednosti3)
- clean_dir(jobDir)
- t,sol,se,qt,sOut,full_t,full_sol3 = runSolver.solve(model2,setup2,start_point,jobDir)
- print(np.array(full_sol2)/np.array(full_sol1))
- print(np.array(full_sol3)/np.array(full_sol1))
- print(np.array(full_sol3)/np.array(full_sol2))
- print(initial_guesses2/initial_guesses)
- print(optimized_params2/initial_guesses2)
- print(optimized_params2/initial_guesses)
- ##################################################################################################################################
- # SHRANIMO REŠITVE ZA PARAMETRE
- def create_or_clean_dir(directory):
- """Create the directory if it doesn't exist, otherwise clean it."""
- if os.path.exists(directory):
- shutil.rmtree(directory) # Remove the existing directory and its contents
- os.makedirs(directory) # Create a fresh directory
- def save_parameters(file_path, data):
- """Save dictionary data to a JSON file."""
- with open(file_path, 'w') as f:
- json.dump(data, f, indent=4)
- return file_path # Return the path for immediate use
- # Define unique subdirectories
- jobDir1 = os.path.join(jobDir, "run1")
- jobDir2 = os.path.join(jobDir, "run2")
- jobDir3 = os.path.join(jobDir, "run3")
- # Ensure directories exist and are clean
- create_or_clean_dir(jobDir1)
- create_or_clean_dir(jobDir2)
- create_or_clean_dir(jobDir3)
- # Solve and save results in respective directories
- t, sol, se, qt, sOut, full_t, full_sol1 = runSolver.solve(model1, setup1, start_point, jobDir1)
- save_parameters(os.path.join(jobDir1, "parameters.json"), final_dict1)
- t, sol, se, qt, sOut, full_t, full_sol2 = runSolver.solve(model2, setup2, start_point, jobDir2)
- save_parameters(os.path.join(jobDir2, "parameters.json"), final_dict22)
- model2.setValues(param_names3, vrednosti3)
- t, sol, se, qt, sOut, full_t, full_sol3 = runSolver.solve(model2, setup2, start_point, jobDir3)
- # Save parameters and immediately store the path for further use
- param_file3 = save_parameters(os.path.join(jobDir3, "parameters.json"), final_optimized_dict)
- # Now you can directly use param_file3 in further processing
- print(f"Parameters for run3 saved at: {param_file3}")
- ########################################################################################################################
- #ZGRADIMO 3 MODEL (da bo lažje pol risat)
- ##############################################################################################################################################
- # param_file3 already contains the full path to "parameters.json"
- directory = os.path.dirname(param_file3)
- print(directory)
- model3 = cModel.model()
- parameter_file3 = os.path.join(directory) # Use it as a path
- model3.parse(model_file, parameter_file3)
- # Load the model using the saved parameter file
- #model3.parse(model_file, parameter_file3) # Use param_file3 here
- # Parse the setup
- setup3 = runSolver.parseSetup(setup_file)
- # Create the name-value dictionary
- name_value_dict3 = create_name_value_dict(model3.parSetup['parameters'])
- ########################################################################################################3333
- runSolver.main([setup_file,model3,parameter_file3,jobDir])
- #RISANJE
- ########################################################################################################################
- job_configs = {
- 'cDiazepam_IVP': {
- 'jobDir': os.path.join(fh, 'Documents', 'Sola', 'IJS', 'PBPK_ociscen', 'rezultati','run1')
- },
- 'cDiazepam1_IVP': {
- 'jobDir': os.path.join(fh, 'Documents', 'Sola', 'IJS', 'PBPK_ociscen', 'rezultati','run2')
- },
- 'cDiazepamF_IVP': {
- 'jobDir': os.path.join(fh, 'Documents', 'Sola', 'IJS', 'PBPK_ociscen', 'rezultati','run3')
- }
- }
- def mergeSolutions(seq):
- """
- Merge multiple solution dictionaries into a single dictionary.
- """
- out = {}
- for v in ['t', 'sol', 'se', 'qt', 'sOut']: # Merge arrays from each solution
- out[v] = np.concatenate([x[v] for x in seq])
- for v in ['lut', 'lutSE', 'setup', 'model', 'parameters', 'qt', 'sOut']: # Use data from the last solution for these keys
- out[v] = seq[-1][v]
- return out # Return the merged dictionary
|