patch_size.py 2.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. import numpy as np
  2. from scipy.optimize import bisect, root_scalar
  3. from charged_shells import expansion, mapping, parameters, potentials
  4. from typing import Callable
  5. Expansion = expansion.Expansion
  6. Array = np.ndarray
  7. ModelParams = parameters.ModelParams
  8. @mapping.map_over_expansion
  9. def charge_patch_size(ex: Expansion, phi: float = 0, theta0: Array | float = 0, theta1: Array | float = np.pi / 2):
  10. return bisect(lambda theta: ex.charge_value(theta, phi), theta0, theta1)
  11. def potential_patch_size(ex: Expansion, params: ModelParams,
  12. phi: float = 0, theta0: Array | float = 0, theta1: Array | float = np.pi / 2,
  13. match_expansion_axis_to_params: int = None,
  14. skip_nozero_cases: bool = False):
  15. # this is more complicate to map over leading axes of the expansion as potential also depends on model parameters,
  16. # with some, such as kappaR, also being the parameters of the expansion in the first place. When mapping,
  17. # we must therefore provide the expansion axis that should match the collection of parameters in params.
  18. @mapping.map_over_expansion
  19. def potential_zero(exp: Expansion, prms: ModelParams):
  20. try:
  21. return bisect(lambda theta: potentials.charged_shell_potential(theta, phi, 1, exp, prms), theta0, theta1)
  22. except ValueError:
  23. if skip_nozero_cases:
  24. return 0.
  25. else:
  26. raise ValueError("Potential has no zero on the given interval.")
  27. return mapping.parameter_map_single_expansion(potential_zero, match_expansion_axis_to_params)(ex, params)
  28. def inverse_potential_patch_size(target_patch_size: float,
  29. ex_generator: Callable[[float], Expansion],
  30. x0: float,
  31. params: ModelParams, **ps_kwargs):
  32. def patch_size_dif(x):
  33. ex = ex_generator(x)
  34. return potential_patch_size(ex, params, **ps_kwargs) - target_patch_size
  35. root_result = root_scalar(patch_size_dif, x0=x0)
  36. if not root_result.converged:
  37. raise ValueError('No convergence. Patches of desired size might not be achievable in the given model. '
  38. 'Conversely, a common mistake might be target patch size input in degrees.')
  39. return root_result.root