@@ -500,7 +500,8 @@ pipeline_plan = PipelinePlan()
500
500
end
501
501
502
502
503
- if args[" apply_continuum_normalization" ]== true && args[" continuum_normalization_individually" ]== true
503
+ if args[" apply_continuum_normalization" ] && args[" continuum_normalization_individually" ]
504
+ println (" Applying continuum normalization to each spectrum individually." )
504
505
local anchors, continuum, f_filtered
505
506
if args[" anchors_filename" ] != nothing
506
507
@assert isfile (args[" anchors_filename" ]) && filesize (args[" anchors_filename" ])> 0
@@ -525,19 +526,24 @@ pipeline_plan = PipelinePlan()
525
526
mean_clean_flux_continuum_normalized .+ = f_norm # .*weight
526
527
mean_clean_var_continuum_normalized .+ = var_norm # .*weight
527
528
global mean_clean_flux_continuum_normalized_weight_sum += weight
528
- end
529
+ end
529
530
spec. flux .= f_norm
530
531
spec. var .= var_norm
531
532
532
533
end
533
534
push! (all_spectra,spec)
534
535
end
535
536
GC. gc ()
537
+ mean_lambda ./= mean_clean_flux_weight_sum
538
+ mean_clean_flux ./= mean_clean_flux_weight_sum
539
+ mean_clean_var ./= mean_clean_flux_weight_sum
540
+ mean_clean_flux_sed_normalized ./= mean_clean_flux_sed_normalized_weight_sum
541
+ mean_clean_var_sed_normalized ./= mean_clean_flux_sed_normalized_weight_sum
536
542
dont_need_to! (pipeline_plan,:read_spectra );
537
543
538
544
539
- if args[" apply_continuum_normalization" ]== true && ! ( args[" continuum_normalization_individually" ] == true )
540
- println (" # Computing continuum normalization from mean spectra." )
545
+ if args[" apply_continuum_normalization" ] && ! args[" continuum_normalization_individually" ]
546
+ println (" Applying continuum normalization based on mean of clean spectra." )
541
547
local anchors, continuum, f_filtered
542
548
if args[" anchors_filename" ] != nothing
543
549
@assert isfile (args[" anchors_filename" ]) && filesize (args[" anchors_filename" ])> 0
@@ -549,15 +555,20 @@ pipeline_plan = PipelinePlan()
549
555
(anchors, continuum, f_filtered) = Continuum. calc_continuum (spec. λ, mean_clean_flux_sed_normalized, mean_clean_var_sed_normalized; fwhm = args[" fwhm_continuum" ]* 1000 , ν = args[" nu_continuum" ],
550
556
stretch_factor = args[" stretch_factor" ], merging_threshold = args[" merging_threshold" ], smoothing_half_width = args[" smoothing_half_width" ], min_R_factor = args[" min_rollingpin_r" ],
551
557
orders_to_use = orders_to_use_for_continuum, verbose = false )
552
- save (args[" anchors_filename_output" ], Dict (" anchors" => anchors) )
558
+ if ! isnothing (args[" anchors_filename_output" ])
559
+ println (" # Storing anchors used for continuum model in " ,args[" anchors_filename_output" ], " ." )
560
+ save (args[" anchors_filename_output" ], Dict (" anchors" => anchors) )
561
+ end
553
562
else
554
563
(anchors, continuum, f_filtered) = Continuum. calc_continuum (spec. λ, mean_clean_flux, mean_clean_var; fwhm = args[" fwhm_continuum" ]* 1000 , ν = args[" nu_continuum" ],
555
564
stretch_factor = args[" stretch_factor" ], merging_threshold = args[" merging_threshold" ], smoothing_half_width = args[" smoothing_half_width" ], min_R_factor = args[" min_rollingpin_r" ],
556
565
orders_to_use = orders_to_use_for_continuum, verbose = false )
557
- save (args[" anchors_filename_output" ], Dict (" anchors" => anchors) )
558
- end
559
- println (" # Stored anchors used for continuum model." )
560
- end
566
+ if ! isnothing (args[" anchors_filename_output" ])
567
+ println (" # Storing anchors used for continuum model in " ,args[" anchors_filename_output" ], " ." )
568
+ save (args[" anchors_filename_output" ], Dict (" anchors" => anchors) )
569
+ end
570
+ end # @isdefined sed
571
+ end # args["anchors_filename"]
561
572
normalization_anchors_list = anchors
562
573
563
574
weight = 1
@@ -573,13 +584,8 @@ pipeline_plan = PipelinePlan()
573
584
end
574
585
end
575
586
end
576
- mean_lambda ./= mean_clean_flux_weight_sum
577
- mean_clean_flux ./= mean_clean_flux_weight_sum
578
- mean_clean_var ./= mean_clean_flux_weight_sum
579
- mean_clean_flux_sed_normalized ./= mean_clean_flux_sed_normalized_weight_sum
580
- mean_clean_var_sed_normalized ./= mean_clean_flux_sed_normalized_weight_sum
581
- mean_clean_flux_continuum_normalized ./= mean_clean_flux_continuum_normalized_weight_sum
582
- mean_clean_var_continuum_normalized ./= mean_clean_flux_continuum_normalized_weight_sum
587
+ mean_clean_flux_continuum_normalized ./= mean_clean_flux_continuum_normalized_weight_sum
588
+ mean_clean_var_continuum_normalized ./= mean_clean_flux_continuum_normalized_weight_sum
583
589
584
590
order_list_timeseries = extract_orders (all_spectra, pipeline_plan, orders_to_use= orders_to_use, remove_bad_chunks= false , recalc= true )
585
591
@@ -600,7 +606,6 @@ line_width = line_width_50_default
600
606
@assert all (map (k-> k ∈ names (line_list_espresso), [" lambda" ," weight" ," order" ]))
601
607
dont_need_to! (pipeline_plan,:clean_line_list_tellurics )
602
608
else
603
- println (" # Can't find " , line_list_filename, " . Trying ESPRESSO line list." )
604
609
# orders_to_use = good_orders
605
610
# order_list_timeseries = extract_orders(all_spectra,pipeline_plan, orders_to_use=orders_to_use, recalc=true )
606
611
touch (line_list_filename)
@@ -609,6 +614,7 @@ line_width = line_width_50_default
609
614
else
610
615
line_list_input_filename = joinpath (pkgdir (EchelleCCFs)," data" ," masks" ," espresso+neid_mask_97_to_108.mas" )
611
616
end
617
+ println (" # Recreating line list weights from " , line_list_input_filename)
612
618
line_list_espresso = prepare_line_list (line_list_input_filename, all_spectra, pipeline_plan, v_center_to_avoid_tellurics= ccf_mid_velocity,
613
619
Δv_to_avoid_tellurics = 2 * max_bc+ range_no_mask_change* line_width_50_default+ max_mask_scale_factor* default_ccf_mask_v_width (NEID2D ()), orders_to_use= #= orders_to_use=# 56 : 108 , recalc= true , verbose= true )
614
620
if args[" recompute_line_weights" ] && ! isnothing (args[" line_list_output_filename" ])
@@ -651,7 +657,7 @@ if verbose println(now()) end
651
657
println (" # Saving results to " , daily_ccf_filename, " ." )
652
658
stop_processing_time = now ()
653
659
jldopen (daily_ccf_filename, " w" ) do f
654
- f[" v_grid" ] = v_grid_order_ccfs
660
+ f[" v_grid" ] = collect ( v_grid_order_ccfs)
655
661
f[" order_ccfs" ] = order_ccfs
656
662
f[" order_ccf_vars" ] = order_ccf_vars
657
663
f[" Δfwhm" ] = Δfwhm
@@ -690,7 +696,7 @@ for (i,row) in enumerate(eachrow(df_files_use))
690
696
#ccf_filename = joinpath(neid_data_path,target_subdir,"output","ccfs", m[1] * "_ccfs=default.jld2")
691
697
ccf_filename = joinpath(neid_data_path,target_subdir,"ccfs", m[1] * "_ccfs=default.jld2")
692
698
jldopen(ccf_filename, "w") do f
693
- f["v_grid"] = v_grid_order_ccfs
699
+ f["v_grid"] = collect( v_grid_order_ccfs)
694
700
f["order_ccfs"] = order_ccfs[:,:,i]
695
701
f["order_ccf_vars"] = order_ccf_vars[:,:,i]
696
702
f["orders_to_use"] = orders_to_use
@@ -807,7 +813,7 @@ msf = lsf_width/default_ccf_mask_v_width(NEID2D()); fwtf = 0.5 # using LSF widt
807
813
mask_type=:gaussian, Δfwhm=Δfwhm,
808
814
mask_scale_factor=msf, range_no_mask_change=5*line_width_50, ccf_mid_velocity=ccf_mid_velocity, v_step=100, #155,
809
815
v_max=max(5*line_width_50,2*max_bc), allow_nans=true, calc_ccf_var=true, recalc=true)
810
- outputs["v_grid"] = v_grid
816
+ outputs["v_grid"] = collect( v_grid)
811
817
outputs["ccfs_espresso"] = ccfs_espresso
812
818
outputs["ccf_vars_espresso"] = ccf_vars_espresso
813
819
0 commit comments