7
7
import numpy as np
8
8
import scipy
9
9
from pydantic .v1 import Field , NonNegativeFloat , PositiveFloat , PositiveInt , validator
10
- from rich .progress import Progress
11
10
12
11
from ..constants import fp_eps
13
12
from ..exceptions import ValidationError
14
- from ..log import get_logging_console , log
13
+ from ..log import Progress , get_logging_console , log
15
14
from .base import Tidy3dBaseModel , cached_property , skip_if_fields_missing
16
15
from .types import ArrayComplex1D , ArrayComplex2D , ArrayFloat1D , ArrayFloat2D
17
16
@@ -823,7 +822,6 @@ def fit(
823
822
The dispersive medium parameters have the form (resp_inf, poles, residues)
824
823
and are in the original unscaled units.
825
824
"""
826
-
827
825
if max_num_poles < min_num_poles :
828
826
raise ValidationError (
829
827
"Dispersion fitter cannot have 'max_num_poles' less than 'min_num_poles'."
@@ -864,86 +862,82 @@ def make_configs():
864
862
865
863
with Progress (console = get_logging_console ()) as progress :
866
864
task = progress .add_task (
867
- f"Fitting to weighted RMS of { tolerance_rms } ..." ,
865
+ description = f"Fitting to weighted RMS of { tolerance_rms } ..." ,
868
866
total = len (configs ),
869
867
visible = init_model .show_progress ,
870
868
)
871
869
872
- while not progress .finished :
873
- # try different initial pole configurations
874
- for num_poles , relaxed , smooth , logspacing , optimize_eps_inf in configs :
875
- model = init_model .updated_copy (
876
- num_poles = num_poles ,
877
- relaxed = relaxed ,
878
- smooth = smooth ,
879
- logspacing = logspacing ,
880
- optimize_eps_inf = optimize_eps_inf ,
870
+ # try different initial pole configurations
871
+ for num_poles , relaxed , smooth , logspacing , optimize_eps_inf in configs :
872
+ model = init_model .updated_copy (
873
+ num_poles = num_poles ,
874
+ relaxed = relaxed ,
875
+ smooth = smooth ,
876
+ logspacing = logspacing ,
877
+ optimize_eps_inf = optimize_eps_inf ,
878
+ )
879
+ model = _fit_fixed_parameters ((min_num_poles , max_num_poles ), model )
880
+
881
+ if model .rms_error < best_model .rms_error :
882
+ log .debug (
883
+ f"Fitter: possible improved fit with "
884
+ f"rms_error={ model .rms_error :.3g} found using "
885
+ f"relaxed={ model .relaxed } , "
886
+ f"smooth={ model .smooth } , "
887
+ f"logspacing={ model .logspacing } , "
888
+ f"optimize_eps_inf={ model .optimize_eps_inf } , "
889
+ f"loss_in_bounds={ model .loss_in_bounds } , "
890
+ f"passivity_optimized={ model .passivity_optimized } , "
891
+ f"sellmeier_passivity={ model .sellmeier_passivity } ."
881
892
)
882
- model = _fit_fixed_parameters ((min_num_poles , max_num_poles ), model )
883
-
884
- if model .rms_error < best_model .rms_error :
885
- log .debug (
886
- f"Fitter: possible improved fit with "
887
- f"rms_error={ model .rms_error :.3g} found using "
888
- f"relaxed={ model .relaxed } , "
889
- f"smooth={ model .smooth } , "
890
- f"logspacing={ model .logspacing } , "
891
- f"optimize_eps_inf={ model .optimize_eps_inf } , "
892
- f"loss_in_bounds={ model .loss_in_bounds } , "
893
- f"passivity_optimized={ model .passivity_optimized } , "
894
- f"sellmeier_passivity={ model .sellmeier_passivity } ."
895
- )
896
- if model .loss_in_bounds and model .sellmeier_passivity :
897
- best_model = model
898
- else :
899
- if (
900
- not warned_about_passivity_num_iters
901
- and model .passivity_num_iters_too_small
902
- ):
903
- warned_about_passivity_num_iters = True
904
- log .warning (
905
- "Did not finish enforcing passivity in dispersion fitter. "
906
- "If the fit is not good enough, consider increasing "
907
- "'AdvancedFastFitterParam.passivity_num_iters'."
908
- )
909
- if (
910
- not warned_about_slsqp_constraint_scale
911
- and model .slsqp_constraint_scale_too_small
912
- ):
913
- warned_about_slsqp_constraint_scale = True
914
- log .warning (
915
- "SLSQP constraint scale may be too small. "
916
- "If the fit is not good enough, consider increasing "
917
- "'AdvancedFastFitterParam.slsqp_constraint_scale'."
918
- )
893
+ if model .loss_in_bounds and model .sellmeier_passivity :
894
+ best_model = model
895
+ else :
896
+ if not warned_about_passivity_num_iters and model .passivity_num_iters_too_small :
897
+ warned_about_passivity_num_iters = True
898
+ log .warning (
899
+ "Did not finish enforcing passivity in dispersion fitter. "
900
+ "If the fit is not good enough, consider increasing "
901
+ "'AdvancedFastFitterParam.passivity_num_iters'."
902
+ )
903
+ if (
904
+ not warned_about_slsqp_constraint_scale
905
+ and model .slsqp_constraint_scale_too_small
906
+ ):
907
+ warned_about_slsqp_constraint_scale = True
908
+ log .warning (
909
+ "SLSQP constraint scale may be too small. "
910
+ "If the fit is not good enough, consider increasing "
911
+ "'AdvancedFastFitterParam.slsqp_constraint_scale'."
912
+ )
913
+ progress .update (
914
+ task ,
915
+ advance = 1 ,
916
+ description = f"Best weighted RMS error so far: { best_model .rms_error :.3g} " ,
917
+ refresh = True ,
918
+ )
919
+
920
+ # if below tolerance, return
921
+ if best_model .rms_error < tolerance_rms :
919
922
progress .update (
920
923
task ,
921
- advance = 1 ,
922
- description = f"Best weighted RMS error so far : { best_model .rms_error :.3g} " ,
924
+ completed = len ( configs ) ,
925
+ description = f"Best weighted RMS error: { best_model .rms_error :.3g} " ,
923
926
refresh = True ,
924
927
)
925
-
926
- # if below tolerance, return
927
- if best_model .rms_error < tolerance_rms :
928
- progress .update (
929
- task ,
930
- completed = len (configs ),
931
- description = f"Best weighted RMS error: { best_model .rms_error :.3g} " ,
932
- refresh = True ,
933
- )
928
+ log .info (
929
+ "Found optimal fit with weighted RMS error %.3g" ,
930
+ best_model .rms_error ,
931
+ )
932
+ if best_model .show_unweighted_rms :
934
933
log .info (
935
- "Found optimal fit with weighted RMS error %.3g" ,
936
- best_model .rms_error ,
937
- )
938
- if best_model .show_unweighted_rms :
939
- log .info (
940
- "Unweighted RMS error %.3g" ,
941
- best_model .unweighted_rms_error ,
942
- )
943
- return (
944
- best_model .pole_residue ,
945
- best_model .rms_error ,
934
+ "Unweighted RMS error %.3g" ,
935
+ best_model .unweighted_rms_error ,
946
936
)
937
+ return (
938
+ best_model .pole_residue ,
939
+ best_model .rms_error ,
940
+ )
947
941
948
942
# if exited loop, did not reach tolerance (warn)
949
943
progress .update (
@@ -967,3 +961,57 @@ def make_configs():
967
961
best_model .pole_residue ,
968
962
best_model .rms_error ,
969
963
)
964
+
965
+
966
+ def constant_loss_tangent_model (
967
+ eps_real : float ,
968
+ loss_tangent : float ,
969
+ frequency_range : Tuple [float , float ],
970
+ max_num_poles : PositiveInt = DEFAULT_MAX_POLES ,
971
+ number_sampling_frequency : PositiveInt = 10 ,
972
+ tolerance_rms : NonNegativeFloat = DEFAULT_TOLERANCE_RMS ,
973
+ scale_factor : float = 1 ,
974
+ ) -> Tuple [Tuple [float , ArrayComplex1D , ArrayComplex1D ], float ]:
975
+ """Fit a constant loss tangent material model.
976
+
977
+ Parameters
978
+ ----------
979
+ eps_real : float
980
+ Real part of permittivity
981
+ loss_tangent : float
982
+ Loss tangent.
983
+ frequency_range : Tuple[float, float]
984
+ Freqquency range for the material to exhibit constant loss tangent response.
985
+ max_num_poles : PositiveInt, optional
986
+ Maximum number of poles in the model.
987
+ number_sampling_frequency : PositiveInt, optional
988
+ Number of sampling frequencies to compute RMS error for fitting.
989
+ tolerance_rms : float, optional
990
+ Weighted RMS error below which the fit is successful and the result is returned.
991
+ scale_factor : PositiveFloat, optional
992
+ Factor to rescale frequency by before fitting.
993
+
994
+ Returns
995
+ -------
996
+ Tuple[Tuple[float, ArrayComplex1D, ArrayComplex1D], float]
997
+ Best fitting result: (dispersive medium parameters, weighted RMS error).
998
+ The dispersive medium parameters have the form (resp_inf, poles, residues)
999
+ and are in the original unscaled units.
1000
+ """
1001
+ if number_sampling_frequency < 2 :
1002
+ frequencies = np .array ([np .mean (frequency_range )])
1003
+ else :
1004
+ frequencies = np .linspace (frequency_range [0 ], frequency_range [1 ], number_sampling_frequency )
1005
+ eps_real_array = np .ones_like (frequencies ) * eps_real
1006
+ loss_tangent_array = np .ones_like (frequencies ) * loss_tangent
1007
+
1008
+ omega_data = frequencies * 2 * np .pi
1009
+ eps_complex = eps_real_array * (1 + 1j * loss_tangent_array )
1010
+
1011
+ return fit (
1012
+ omega_data = omega_data ,
1013
+ resp_data = eps_complex ,
1014
+ max_num_poles = max_num_poles ,
1015
+ tolerance_rms = tolerance_rms ,
1016
+ scale_factor = scale_factor ,
1017
+ )
0 commit comments