12345678910111213141516171819202122232425262728293031323334353637383940414243444546 |
- import numpy as np
- from scipy.optimize import bisect, root_scalar
- from charged_shells import expansion, mapping, parameters, potentials
- from typing import Callable
- Expansion = expansion.Expansion
- Array = np.ndarray
- ModelParams = parameters.ModelParams
- @mapping.map_over_expansion
- def charge_patch_size(ex: Expansion, phi: float = 0, theta0: Array | float = 0, theta1: Array | float = np.pi / 2):
- return bisect(lambda theta: ex.charge_value(theta, phi), theta0, theta1)
- def potential_patch_size(ex: Expansion, params: ModelParams,
- phi: float = 0, theta0: Array | float = 0, theta1: Array | float = np.pi / 2,
- match_expansion_axis_to_params: int = None):
- # this is more complicate to map over leading axes of the expansion as potential also depends on model parameters,
- # with some, such as kappaR, also being the parameters of the expansion in the first place. When mapping,
- # we must therefore provide the expansion axis that should match the collection of parameters in params.
- @mapping.map_over_expansion
- def potential_zero(exp: Expansion, prms: ModelParams):
- return bisect(lambda theta: potentials.charged_shell_potential(theta, phi, 1, exp, prms), theta0, theta1)
- return mapping.parameter_map_single_expansion(potential_zero, match_expansion_axis_to_params)(ex, params)
- def inverse_potential_patch_size(target_patch_size: float,
- ex_generator: Callable[[float], Expansion],
- x0: float,
- params: ModelParams, **ps_kwargs):
- def patch_size_dif(x):
- ex = ex_generator(x)
- return potential_patch_size(ex, params, **ps_kwargs) - target_patch_size
- root_result = root_scalar(patch_size_dif, x0=x0)
- if not root_result.converged:
- raise ValueError('No convergence. Patches of desired size might not be achievable in the given model. '
- 'Conversely, a common mistake might be target patch size input in degrees.')
- return root_result.root
|