From 8984df9ea52c519123d856a6cecf9b9b4fe53d87 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Thu, 9 Oct 2025 13:52:41 -0700 Subject: [PATCH 01/21] Fix a couple things I stumbled on in the contribution guide --- docs/source/develop/contribution_guide.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/source/develop/contribution_guide.md b/docs/source/develop/contribution_guide.md index 7c8fcbb..b24e963 100644 --- a/docs/source/develop/contribution_guide.md +++ b/docs/source/develop/contribution_guide.md @@ -124,7 +124,7 @@ The very first question you should ask yourself before making a new function is, Take the `histogram` function for example, are you wondering why we did a histogram function? -After all, `IDL's` histogram function can be done using a combination of `np.histogram` and the reverse indices can technically be done using many indexing functions available in NumPy as well. Well it turns out the NumPy functions like `np.histogram` suffer from a fatal flaw, they're awfully slow with large arrays, and this isn't necessarily their fault either, NumPy (rightly for their particular case) prioritised compatibility over raw speed. Other than speed, a function needed to be made to create the reverse indices part of `IDL's` histogram anyway as there is no `SciPy` or `NumPy` equivalent. As such it was decided to rewrite the histogram from scratch with speed as the priority given that `histogram` in `PyFHD` get's called hundreds, maybe hundreds of times. +After all, `IDL's` histogram function can be done using a combination of `np.histogram` and the reverse indices can technically be done using many indexing functions available in NumPy as well. Well it turns out the NumPy functions like `np.histogram` suffer from a fatal flaw, they're awfully slow with large arrays, and this isn't necessarily their fault either, NumPy (rightly for their particular case) prioritised compatibility over raw speed. Other than speed, a function needed to be made to create the reverse indices part of `IDL's` histogram anyway as there is no `SciPy` or `NumPy` equivalent. As such it was decided to rewrite the histogram from scratch with speed as the priority given that `histogram` in `PyFHD` get's called hundreds, maybe thousands of times. To summarise, only make a new function if it hasn't been made before, or the ones that do exist do not meet your requirements. The requirements initally should not include anything in regards to the speed of the function unless you know before hand that the function is going to be called a lot or in a loop. @@ -570,6 +570,9 @@ In order to run these tests you need to do the following: Put the zip file in the following directory inside the repository (the directory given is relative to the root of the repository): `PyFHD/resources/test_data` 2. Unzip the zip file in the previously mentioned `PyFHD/resources/test_data` directory. +After unzipping, there will be a new folder named `pyfhd_test_data`. Move all +the contents out of that folder up into the `PyFHD/resources/test_data` folder +and delete the now empty `pyfhd_test_data` folder. 3. In a terminal, at the root of the repository, run the following command: From 5b9747f83a61a9541e255c10a74716c49c25622a Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Thu, 9 Oct 2025 13:52:51 -0700 Subject: [PATCH 02/21] remove defaults from the conda env --- environment.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/environment.yml b/environment.yml index b79ef8b..cbb7f45 100644 --- a/environment.yml +++ b/environment.yml @@ -1,7 +1,6 @@ name: pyfhd channels: - conda-forge - - defaults dependencies: - python - astropy From cf3d8f495edb598d60dde9451cc90d5b187ade23 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Tue, 21 Oct 2025 10:33:55 -0700 Subject: [PATCH 03/21] fix path handling and a numpy bug --- PyFHD/beam_setup/beam.py | 2 +- PyFHD/calibration/calibration_utils.py | 2 +- PyFHD/pyfhd_tools/pyfhd_setup.py | 24 ++++++++++++++++-------- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/PyFHD/beam_setup/beam.py b/PyFHD/beam_setup/beam.py index 26c4dc1..a7ef0fb 100644 --- a/PyFHD/beam_setup/beam.py +++ b/PyFHD/beam_setup/beam.py @@ -320,6 +320,6 @@ def create_psf(obs: dict, pyfhd_config: dict, logger: Logger) -> dict | File: sys.exit(1) raise ValueError( f"Unknown beam file type {pyfhd_config['beam_file_path'].suffix}. " - "Please use a .sav, .h5, .hdf5" + "Please use a .sav, .h5, .hdf5 " "If you meant for PyFHD to do the beam forming, please set the beam_file_path to None (~ in YAML)." ) diff --git a/PyFHD/calibration/calibration_utils.py b/PyFHD/calibration/calibration_utils.py index cdc5c42..7c3980f 100644 --- a/PyFHD/calibration/calibration_utils.py +++ b/PyFHD/calibration/calibration_utils.py @@ -49,7 +49,7 @@ def vis_extract_autocorr( if autocorr_i.size > 0: auto_tile_i = obs["baseline_info"]["tile_a"][autocorr_i] - 1 # As auto_tile_i is used for indexing we need to make it an integer array - auto_tile_i = auto_tile_i.astype(np.integer) + auto_tile_i = auto_tile_i.astype(int) auto_tile_i_single = np.unique(auto_tile_i) # expect it as a list of 2D arrays, so there might be trouble if not pyfhd_config["cal_time_average"]: diff --git a/PyFHD/pyfhd_tools/pyfhd_setup.py b/PyFHD/pyfhd_tools/pyfhd_setup.py index 21e1f7a..c65fd8f 100644 --- a/PyFHD/pyfhd_tools/pyfhd_setup.py +++ b/PyFHD/pyfhd_tools/pyfhd_setup.py @@ -961,7 +961,7 @@ def _check_file_exists(config: dict, key: str) -> int: """ if config[key]: # If it doesn't exist, add error message - if not Path(config[key]).exists(): + if not Path(config[key]).expanduser().resolve().exists(): logging.error( "{} has been enabled with a path that doesn't exist, check the path.".format( key @@ -1142,6 +1142,9 @@ def pyfhd_logger(pyfhd_config: dict) -> Tuple[logging.Logger, Path]: if pyfhd_config["get_sample_data"]: output_dir = Path(pyfhd_config["output_path"]) else: + pyfhd_config["output_path"] = ( + Path(pyfhd_config["output_path"]).expanduser().resolve() + ) output_dir = Path(pyfhd_config["output_path"], dir_name) if Path.is_dir(output_dir): output_dir_exists = True @@ -1217,6 +1220,7 @@ def pyfhd_setup(options: argparse.Namespace) -> Tuple[dict, logging.Logger]: pyfhd_config["output_dir"] = output_dir pyfhd_config["top_level_dir"] = str(output_dir).split("/")[-1] # Check input_path exists and obs_id uvfits and metafits files exist (Error) + pyfhd_config["input_path"] = Path(pyfhd_config["input_path"]).expanduser().resolve() if not pyfhd_config["input_path"].exists(): logger.error( "{} doesn't exist, please check your input path".format(options.input_path) @@ -1272,14 +1276,15 @@ def pyfhd_setup(options: argparse.Namespace) -> Tuple[dict, logging.Logger]: pyfhd_config["interpolate_kernel"] = False # If the user has set a beam file, check it exists (Error) - if ( - pyfhd_config["beam_file_path"] is not None - and not Path(pyfhd_config["beam_file_path"]).exists() - ): - logger.error( - f"Beam file {pyfhd_config['beam_file_path']} does not exist, please check your input path" + if pyfhd_config["beam_file_path"] is not None: + pyfhd_config["beam_file_path"] = ( + Path(pyfhd_config["beam_file_path"]).expanduser().resolve() ) - errors += 1 + if not Path(pyfhd_config["beam_file_path"]).exists(): + logger.error( + f"Beam file {pyfhd_config['beam_file_path']} does not exist, please check your input path" + ) + errors += 1 if pyfhd_config["beam_file_path"] is None: logger.info("No beam file was set, PyFHD will calculate the beam.") @@ -1384,6 +1389,9 @@ def pyfhd_setup(options: argparse.Namespace) -> Tuple[dict, logging.Logger]: # if importing model visiblities from a uvfits file, check that file # exists if pyfhd_config["model_file_path"]: + pyfhd_config["model_file_path"] = ( + Path(pyfhd_config["model_file_path"]).expanduser().resolve() + ) errors += _check_file_exists(pyfhd_config, "model_file_path") if pyfhd_config["model_file_path"] == "sav": From 3c651097d2e2c7743e397ca2cc87562680ff2780 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Tue, 21 Oct 2025 19:49:19 -0700 Subject: [PATCH 04/21] fix errors in uvfits reader --- PyFHD/data_setup/uvfits.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/PyFHD/data_setup/uvfits.py b/PyFHD/data_setup/uvfits.py index 29aef60..0a8b577 100644 --- a/PyFHD/data_setup/uvfits.py +++ b/PyFHD/data_setup/uvfits.py @@ -88,17 +88,31 @@ def extract_header( pyfhd_header["frequency_array"] = ( np.arange(pyfhd_header["n_freq"]) - freq_ref_i ) * pyfhd_header["freq_res"] + pyfhd_header["freq_ref"] + # the following is guaranteed from the uvfits memo, logic stolen from pyuvdata + if params_header["naxis"] == 7: + ra_axis = 6 + dec_axis = 7 + else: + ra_axis = 5 + dec_axis = 6 + # obsra/obsdec/ra/dec are not standard uvfits keywords try: pyfhd_header["obsra"] = params_header["obsra"] except KeyError: logger.warning("OBSRA not found in UVFITS file") - pyfhd_header["obsra"] = params_header["ra"] + if "ra" in params_header: + pyfhd_header["obsra"] = params_header["ra"] + else: + pyfhd_header["obsra"] = params_header[f"CRVAL{ra_axis}"] try: pyfhd_header["obsdec"] = params_header["obsdec"] except KeyError: logger.warning("OBSDEC not found in UVFITS file") - pyfhd_header["obsdec"] = params_header["dec"] + if "dec" in params_header: + pyfhd_header["obsdec"] = params_header["dec"] + else: + pyfhd_header["obsdec"] = params_header[f"CRVAL{dec_axis}"] # Put in locations of instrument from FITS file or from Astropy site data # If you want to see the list of current site names using EarthLocation.get_site_names() # If you want to use PyFHD with HERA in the future @@ -113,6 +127,8 @@ def extract_header( # Can also do MWA or Murchison Widefield Array location = EarthLocation("mwa") + # These are all non-standard uvfits keywords. This information is stored in + # the antenna table. See pyuvdata for the right way to do this. try: pyfhd_header["lon"] = params_header["lon"] except KeyError: From b31ae2a15fd65b39a632721eab0cde229bc3d644 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Tue, 21 Oct 2025 19:50:04 -0700 Subject: [PATCH 05/21] fix checkpointing to work properly --- PyFHD/pyfhd.py | 67 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 46 insertions(+), 21 deletions(-) diff --git a/PyFHD/pyfhd.py b/PyFHD/pyfhd.py index a6ea170..0e43fa9 100644 --- a/PyFHD/pyfhd.py +++ b/PyFHD/pyfhd.py @@ -144,6 +144,45 @@ def main(): ) pyfhd_config["checkpoint_dir"].mkdir(exist_ok=True) + obs_checkpoint_file = Path( + pyfhd_config["checkpoint_dir"], + f"{checkpoint_name}_obs_checkpoint.h5", + ) + if ( + pyfhd_config["obs_checkpoint"] is not None + and not obs_checkpoint_file.exists() + ): + logger.warning( + "obs_checkpoint is set but obs checkpoint file does not exist. Recalculating obs." + ) + pyfhd_config["obs_checkpoint"] = None + + cal_checkpoint_file = Path( + pyfhd_config["checkpoint_dir"], + f"{checkpoint_name}_calibrate_checkpoint.h5", + ) + if ( + pyfhd_config["calibrate_checkpoint"] is not None + and not cal_checkpoint_file.exists() + ): + logger.warning( + "calibrate_checkpoint is set but cal checkpoint file does not exist. Recalculating cal." + ) + pyfhd_config["calibrate_checkpoint"] = None + + grid_checkpoint_file = Path( + pyfhd_config["checkpoint_dir"], + f"{checkpoint_name}_gridding_checkpoint.h5", + ) + if ( + pyfhd_config["gridding_checkpoint"] is not None + and not grid_checkpoint_file.exists() + ): + logger.warning( + "gridding_checkpoint is set but grid checkpoint file does not exist. Recalculating grid." + ) + pyfhd_config["gridding_checkpoint"] = None + if ( pyfhd_config["obs_checkpoint"] is None and pyfhd_config["calibrate_checkpoint"] is None @@ -217,10 +256,7 @@ def main(): "vis_weights": vis_weights, } save( - Path( - pyfhd_config["checkpoint_dir"], - f"{checkpoint_name}_obs_checkpoint.h5", - ), + obs_checkpoint_file, checkpoint, "obs_checkpoint", logger=logger, @@ -231,11 +267,8 @@ def main(): ) else: # Load the checkpoint and initialize the required variables - if ( - pyfhd_config["obs_checkpoint"] - and Path(pyfhd_config["obs_checkpoint"]).exists() - ): - obs_checkpoint = load(pyfhd_config["obs_checkpoint"], logger=logger) + if pyfhd_config["obs_checkpoint"]: + obs_checkpoint = load(obs_checkpoint_file, logger=logger) obs = obs_checkpoint["obs"] params = obs_checkpoint["params"] vis_arr = obs_checkpoint["vis_arr"] @@ -251,7 +284,7 @@ def main(): pyfhd_config["calibrate_checkpoint"] is not None and Path(pyfhd_config["calibrate_checkpoint"]).exists() ): - cal_checkpoint = load(pyfhd_config["calibrate_checkpoint"], logger=logger) + cal_checkpoint = load(cal_checkpoint_file, logger=logger) obs = cal_checkpoint["obs"] params = cal_checkpoint["params"] vis_arr = cal_checkpoint["vis_arr"] @@ -394,10 +427,7 @@ def main(): "cal": cal, } save( - Path( - pyfhd_config["checkpoint_dir"], - f"{checkpoint_name}_calibrate_checkpoint.h5", - ), + cal_checkpoint_file, checkpoint, "calibrate_checkpoint", logger=logger, @@ -557,10 +587,7 @@ def main(): if vis_model_arr is not None: checkpoint["model_uv"] = model_uv save( - Path( - pyfhd_config["checkpoint_dir"], - f"{checkpoint_name}_gridding_checkpoint.h5", - ), + grid_checkpoint_file, checkpoint, "gridding_checkpoint", logger=logger, @@ -573,9 +600,7 @@ def main(): _print_time_diff(grid_start, grid_end, "Visibilities gridded", logger) else: if pyfhd_config["gridding_checkpoint"]: - grid_checkpoint = load( - pyfhd_config["gridding_checkpoint"], logger=logger - ) + grid_checkpoint = load(grid_checkpoint_file, logger=logger) image_uv = grid_checkpoint["image_uv"] weights_uv = grid_checkpoint["weights_uv"] variance_uv = grid_checkpoint["variance_uv"] From 5682be6309a0cfe08f092706b172148cf6c39b3e Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Tue, 21 Oct 2025 19:50:42 -0700 Subject: [PATCH 06/21] fix vis_model_transfer to handle standard FHD folder structure --- PyFHD/source_modeling/vis_model_transfer.py | 22 +++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/PyFHD/source_modeling/vis_model_transfer.py b/PyFHD/source_modeling/vis_model_transfer.py index e338e2e..397cabc 100644 --- a/PyFHD/source_modeling/vis_model_transfer.py +++ b/PyFHD/source_modeling/vis_model_transfer.py @@ -105,18 +105,36 @@ def import_vis_model_from_sav( params_model : dict The parameters for said model used for flagging """ + fhd_subdirs = { + "params": "metadata", + "vis": "vis_data", + } + try: path = Path( pyfhd_config["model_file_path"], f"{pyfhd_config['obs_id']}_params.sav" ) + if not path.exists(): + path = Path( + pyfhd_config["model_file_path"], + fhd_subdirs["params"], + f"{pyfhd_config['obs_id']}_params.sav", + ) params_model = readsav(path) params_model = recarray_to_dict(params_model.params) # Read in the first polarization from pol_names pol_i = 0 + vis_path_parts = [pyfhd_config["model_file_path"]] path = Path( - pyfhd_config["model_file_path"], + *vis_path_parts, f"{pyfhd_config['obs_id']}_vis_model_{obs['pol_names'][pol_i]}.sav", ) + if not path.exists(): + vis_path_parts = [pyfhd_config["model_file_path"], fhd_subdirs["vis"]] + path = Path( + *vis_path_parts, + f"{pyfhd_config['obs_id']}_vis_model_{obs['pol_names'][pol_i]}.sav", + ) curr_vis_model = readsav(path) if isinstance(curr_vis_model, dict): curr_vis_model = curr_vis_model["vis_model_ptr"] @@ -132,7 +150,7 @@ def import_vis_model_from_sav( vis_model_arr[pol_i] = curr_vis_model for pol_i in range(1, obs["n_pol"]): path = Path( - pyfhd_config["model_file_path"], + *vis_path_parts, f"{pyfhd_config['obs_id']}_vis_model_{obs['pol_names'][pol_i]}.sav", ) curr_vis_model = readsav(path) From b1aebe01fe0df2daede03831ea9a26c7b9e6f903 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Tue, 21 Oct 2025 19:52:29 -0700 Subject: [PATCH 07/21] fix misspelled calibration checkpoint option in yamls --- PyFHD/resources/1088285600_example/1088285600_example.yaml | 2 +- PyFHD/resources/config/pyfhd.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/PyFHD/resources/1088285600_example/1088285600_example.yaml b/PyFHD/resources/1088285600_example/1088285600_example.yaml index 3566c78..9e61ef2 100644 --- a/PyFHD/resources/1088285600_example/1088285600_example.yaml +++ b/PyFHD/resources/1088285600_example/1088285600_example.yaml @@ -18,7 +18,7 @@ deproject-w-term : ~ # Checkpointing save-checkpoints: true obs-checkpoint: ~ -calibration-checkpoint: ~ +calibrate-checkpoint: ~ gridding-checkpoint: ~ # Instrument diff --git a/PyFHD/resources/config/pyfhd.yaml b/PyFHD/resources/config/pyfhd.yaml index da4e376..4de6191 100644 --- a/PyFHD/resources/config/pyfhd.yaml +++ b/PyFHD/resources/config/pyfhd.yaml @@ -18,7 +18,7 @@ deproject-w-term : ~ # Checkpointing save-checkpoints: true obs-checkpoint: ~ -calibration-checkpoint: ~ +calibrate-checkpoint: ~ gridding-checkpoint: ~ # Instrument From c39fe3f99e159a21855065ccd70b4b5201def02f Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Tue, 21 Oct 2025 20:10:29 -0700 Subject: [PATCH 08/21] fix shape error in vis_baseline_hist caused by flagged data --- PyFHD/calibration/calibration_utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/PyFHD/calibration/calibration_utils.py b/PyFHD/calibration/calibration_utils.py index 7c3980f..82a90fc 100644 --- a/PyFHD/calibration/calibration_utils.py +++ b/PyFHD/calibration/calibration_utils.py @@ -1557,6 +1557,10 @@ def vis_baseline_hist( wh_noflag = np.where(np.abs(model_vals) > 0)[0] if wh_noflag.size > 0: inds = inds[wh_noflag] + # inds changed so we need to update model_vals + # otherwise it has a different shape than vis_cal_use below + # causing errors + model_vals = (vis_model_arr[pol_i]).flatten()[inds] else: continue # if Keyword_Set(calibration_visibilities_subtract) THEN BEGIN From 925f6cdd05e1aa48b7c38a19cb4b5fe6e509013d Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Wed, 22 Oct 2025 10:00:58 -0700 Subject: [PATCH 09/21] Add more prominent explanation of passing None via yamls. --- docs/source/tutorial/tutorial.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/source/tutorial/tutorial.rst b/docs/source/tutorial/tutorial.rst index b6e1d3b..e57f540 100644 --- a/docs/source/tutorial/tutorial.rst +++ b/docs/source/tutorial/tutorial.rst @@ -507,6 +507,9 @@ likely you'll need to adjust the default configuration file to suit your needs. `pyfhd.yaml `_ + Note that to provide a ``None`` input in the yaml you must use ``~`` or ``null`` + as those are translated to ``None`` when yamls are read into python. + Some files can be discovered automatically through the ``input-path`` option of ``PyFHD`` so read through the usage help text to work out how you wish to configure your input. ``PyFHD`` is rather flexible on how you do your input as many of the files you may require can be in completely separate directories. From ac4c51a3e4bf06ad0d88d1afb18c1a66dacbe51a Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Thu, 23 Oct 2025 14:56:41 -0700 Subject: [PATCH 10/21] fix more checkpointing errors --- PyFHD/pyfhd.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/PyFHD/pyfhd.py b/PyFHD/pyfhd.py index 0e43fa9..6cefd5c 100644 --- a/PyFHD/pyfhd.py +++ b/PyFHD/pyfhd.py @@ -282,7 +282,7 @@ def main(): # to get the observation metadata and visibility parameters if ( pyfhd_config["calibrate_checkpoint"] is not None - and Path(pyfhd_config["calibrate_checkpoint"]).exists() + and cal_checkpoint_file.exists() ): cal_checkpoint = load(cal_checkpoint_file, logger=logger) obs = cal_checkpoint["obs"] @@ -305,7 +305,10 @@ def main(): ) # Check if the calibrate checkpoint has been used, if not run the calibration steps - if pyfhd_config["calibrate_checkpoint"] is None: + if ( + pyfhd_config["calibrate_checkpoint"] is None + or not cal_checkpoint_file.exists() + ): if pyfhd_config["deproject_w_term"] is not None: w_term_start = time.time() vis_arr = simple_deproject_w_term( @@ -501,7 +504,8 @@ def main(): if ( pyfhd_config["recalculate_grid"] - and pyfhd_config["gridding_checkpoint"] is None + or pyfhd_config["gridding_checkpoint"] is None + or not grid_checkpoint_file.exists() ): grid_start = time.time() image_uv = np.empty( @@ -565,6 +569,8 @@ def main(): if vis_model_arr is not None: model_uv = crosspol_reformat(model_uv) if pyfhd_config["gridding_plots"]: + # TODO: move this after the checkpointing so an error in plotting + # doesn't require rerunning gridding. logger.info( f"Plotting the continuum gridding outputs into {pyfhd_config['output_dir']/'plots'/'gridding'}" ) From e5934c7562d6d6b6c46baed3014848b853ee039b Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Thu, 23 Oct 2025 14:57:27 -0700 Subject: [PATCH 11/21] fix keyerror caused by not checking for key existence --- PyFHD/pyfhd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PyFHD/pyfhd.py b/PyFHD/pyfhd.py index 6cefd5c..1bace64 100644 --- a/PyFHD/pyfhd.py +++ b/PyFHD/pyfhd.py @@ -494,7 +494,7 @@ def main(): pyfhd_successful = True sys.exit(0) - if ( + if "image_info" not in psf or ( psf["image_info"]["image_power_beam_arr"] is not None and psf["image_info"]["image_power_beam_arr"].shape == 1 ): From 7e4f250936889aad3f9276865699cd36a8405b81 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Thu, 23 Oct 2025 14:57:40 -0700 Subject: [PATCH 12/21] fix a bug in image plotting --- PyFHD/plotting/image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PyFHD/plotting/image.py b/PyFHD/plotting/image.py index 6236dcf..2de8402 100644 --- a/PyFHD/plotting/image.py +++ b/PyFHD/plotting/image.py @@ -176,7 +176,7 @@ def quick_image( count_missing = len(wh_missing[0]) if count_missing > 0: image[wh_missing] = np.nan - missing_color = 0 + missing_color = 0 else: count_missing = 0 wh_missing = None From 60ea3c1b98d39cadbebdf85e6437ef3840aa80c8 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Thu, 23 Oct 2025 16:55:33 -0700 Subject: [PATCH 13/21] fix a keyerror and numpy indexing errors in beam_image --- PyFHD/beam_setup/beam_utils.py | 12 +++++++----- docs/source/tutorial/tutorial.rst | 2 +- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/PyFHD/beam_setup/beam_utils.py b/PyFHD/beam_setup/beam_utils.py index f2c46a3..3b58bdb 100644 --- a/PyFHD/beam_setup/beam_utils.py +++ b/PyFHD/beam_setup/beam_utils.py @@ -156,7 +156,7 @@ def beam_image( """ psf_dim = psf["dim"] - freq_norm = psf["fnorm"] + freq_norm = psf["freq_norm"] pix_horizon = psf["pix_horizon"] group_id = psf["id"][pol_i, 0, :] if "beam_gaussian_params" in psf: @@ -170,10 +170,12 @@ def beam_image( freq_norm = freq_norm[:] pix_horizon = pix_horizon[0] dimension = elements = obs["dimension"] - xl = dimension / 2 - psf_dim / 2 + 1 - xh = dimension / 2 - psf_dim / 2 + psf_dim - yl = elements / 2 - psf_dim / 2 + 1 - yh = elements / 2 - psf_dim / 2 + psf_dim + # these should all be integers b/c dimensions are usually even numbers. + # but they have to be cast to ints to be used in slicing. + xl = int(dimension / 2 - psf_dim / 2 + 1) + xh = int(dimension / 2 - psf_dim / 2 + psf_dim) + yl = int(elements / 2 - psf_dim / 2 + 1) + yh = int(elements / 2 - psf_dim / 2 + psf_dim) group_n, _, ri_id = histogram(group_id, min=0) gi_use = np.nonzero(group_n) diff --git a/docs/source/tutorial/tutorial.rst b/docs/source/tutorial/tutorial.rst index e57f540..4779a16 100644 --- a/docs/source/tutorial/tutorial.rst +++ b/docs/source/tutorial/tutorial.rst @@ -1391,7 +1391,7 @@ are not complete (for example the model doesn't also create the params file), bu dim = H5D_READ(dim) fbin_i = H5D_OPEN(file_id, "fbin_i") fbin_i = H5D_READ(fbin_i) - fnorm = H5D_OPEN(file_id, "fnorm") + fnorm = H5D_OPEN(file_id, "freq_norm") fnorm = H5D_READ(fnorm) freq = H5D_OPEN(file_id, "freq") freq = H5D_READ(freq) From 4788b582d609b89f26f68cbc000c04837e9c4909 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Wed, 29 Oct 2025 12:33:31 -0700 Subject: [PATCH 14/21] fix defaulting of baseline_threshold in dirty_image_generate --- PyFHD/gridding/gridding_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/PyFHD/gridding/gridding_utils.py b/PyFHD/gridding/gridding_utils.py index 21bac94..0771ce7 100644 --- a/PyFHD/gridding/gridding_utils.py +++ b/PyFHD/gridding/gridding_utils.py @@ -300,7 +300,7 @@ def dirty_image_generate( logger: Logger, uniform_filter_uv: NDArray[np.float64] | None = None, mask: NDArray[np.integer] | None = None, - baseline_threshold: int | float = 0, + baseline_threshold: int | float | None = None, normalization: float | NDArray[np.float64] | None = None, resize: int | None = None, width_smooth: int | float | None = None, @@ -330,7 +330,7 @@ def dirty_image_generate( mask : NDArray[np.integer] | None, optional A 2D {u,v} mask to apply before image creation, by default None baseline_threshold : int | float, optional - The maximum baseline length to include in units of pixels, by default 0 + The maximum baseline length to include in units of pixels, default None normalization : float | NDArray[np.float64] | None, optional A value by which to normalize the image by, by default None resize : int | None, optional From e024c229127c95e8c57623c7e195993b69e8a594 Mon Sep 17 00:00:00 2001 From: Nichole Barry Date: Wed, 15 Oct 2025 13:02:17 +1100 Subject: [PATCH 15/21] beam hot fixes --- PyFHD/beam_setup/antenna.py | 70 ++++++++++++++++++++++++---------- PyFHD/beam_setup/beam.py | 56 +++++++++++++++------------ PyFHD/beam_setup/beam_utils.py | 29 ++++++++------ PyFHD/beam_setup/mwa.py | 6 ++- 4 files changed, 102 insertions(+), 59 deletions(-) diff --git a/PyFHD/beam_setup/antenna.py b/PyFHD/beam_setup/antenna.py index 3e0f2e6..a5dec64 100644 --- a/PyFHD/beam_setup/antenna.py +++ b/PyFHD/beam_setup/antenna.py @@ -117,7 +117,7 @@ def init_beam(obs: dict, pyfhd_config: dict, logger: Logger) -> dict: "beam_model_version": pyfhd_config["beam_model_version"], "freq": freq_center, "nfreq_bin": nfreq_bin, - "n_ant_elements": 0, + "n_ant_elements": n_dipoles, # Anything that was pointer arrays in IDL will be None until assigned in Python "jones": None, "coupling": coupling, @@ -204,11 +204,17 @@ def init_beam(obs: dict, pyfhd_config: dict, logger: Logger) -> dict: location.height.value, jdate_use, ) - zenith_angle_arr = np.full([psf["image_dim"], psf["image_dim"]], 90) - zenith_angle_arr[valid_i] = 90 - alt_arr.value - # Initialize the azimuth angle array in degrees - azimuth_arr = np.zeros([psf["image_dim"], psf["image_dim"]]) - azimuth_arr[valid_i] = az_arr.value + + # Only keep the pixels that are above the horizon to save memory + # Convert to radians for pyuvdata functions + zenith_angle_arr = np.deg2rad(90 - alt_arr) + azimuth_arr = np.deg2rad(az_arr) + + # Store pixel indices above the horizon + antenna["pix_use"] = np.ravel_multi_index( + valid_i, (psf["image_dim"], psf["image_dim"]) + ) + # Save some memory by deleting the unused arrays del ra_arr, dec_arr, alt_arr, az_arr @@ -229,12 +235,14 @@ def init_beam(obs: dict, pyfhd_config: dict, logger: Logger) -> dict: # Do an analytic beam as a placeholder beam = ShortDipoleBeam() + print(zenith_angle_arr.shape) + # Get the jones matrix for the antenna antenna["jones"] = general_jones_matrix( beam, - za_array=zenith_angle_arr, - az_array=azimuth_arr, - freq_array=frequency_array, + za_array=zenith_angle_arr.flatten(), + az_array=azimuth_arr.flatten(), + freq_array=freq_center, telescope_location=location, ) @@ -388,6 +396,29 @@ def general_jones_matrix( else: interpol_fn = None + print(noe_az_array.size, za_array.size, freq_array.size) + + # chunk_size = 10000 # or whatever fits in memory + # n_pix = noe_az_array.size + # responses = [] + # for start in range(0, n_pix, chunk_size): + # end = min(start + chunk_size, n_pix) + # resp = beam.compute_response( + # az_array=az_array[start:end], + # za_array=za_array[start:end], + # freq_array=freq_array, + # interpolation_function=interpol_fn, + # spline_opts=spline_opts, + # check_azza_domain=check_azza_domain, + # ) + # responses.append(resp) + # # Concatenate results if needed + # full_response = np.concatenate(responses, axis=-1) + + # return full_response + + print(za_array) + return beam.compute_response( az_array=noe_az_array, za_array=za_array, @@ -442,13 +473,9 @@ def general_antenna_response( ) # Calculate projections only at locations of non-zero pixels - proj_east_use = np.sin(za_arr[antenna["pix_use"]]) * np.sin( - az_arr[antenna["pix_use"]] - ) - proj_north_use = np.sin(za_arr[antenna["pix_use"]]) * np.cos( - az_arr[antenna["pix_use"]] - ) - proj_z_use = np.cos(za_arr[antenna["pix_use"]]) + proj_east_use = np.sin(za_arr) * np.sin(az_arr) + proj_north_use = np.sin(za_arr) * np.cos(az_arr) + proj_z_use = np.cos(za_arr) # FHD assumes you might be dealing with more than one antenna, hence the groupings it used. # PyFHD currently only supports one antenna, so we can ignore the groupings. @@ -466,15 +493,16 @@ def general_antenna_response( 1j * 2 * np.pi - * antenna["delays"] + * antenna["delays"][pol_i, :] * antenna["freq"][freq_i] * antenna["gain"][pol_i, freq_i] ) # TODO: Check if it's actually outer, although it does look like voltage_delay is likely 1D - measured_current = np.outer( - voltage_delay, antenna["coupling"][pol_i, freq_i] - ) - zenith_norm = np.outer( + # measured_current = np.outer( + # voltage_delay, antenna["coupling"][pol_i, freq_i] + # ) + measured_current = np.dot(voltage_delay, antenna["coupling"][pol_i, freq_i]) + zenith_norm = np.dot( np.ones(antenna["n_ant_elements"]), antenna["coupling"][pol_i, freq_i], ) diff --git a/PyFHD/beam_setup/beam.py b/PyFHD/beam_setup/beam.py index a7ef0fb..afafb4a 100644 --- a/PyFHD/beam_setup/beam.py +++ b/PyFHD/beam_setup/beam.py @@ -68,7 +68,7 @@ def create_psf(obs: dict, pyfhd_config: dict, logger: Logger) -> dict | File: dtype=np.complex128, ) xvals_i, yvals_i = np.meshgrid( - np.arange(psf["resolution"]), np.arange(psf["resolution"]), indexing="ij" + np.arange(psf["dim"]), np.arange(psf["dim"]), indexing="ij" ) xvals_i *= psf["resolution"] yvals_i *= psf["resolution"] @@ -116,8 +116,8 @@ def create_psf(obs: dict, pyfhd_config: dict, logger: Logger) -> dict | File: freq_center = antenna["freq"][0] primary_beam_area = np.zeros([obs["n_pol"], obs["n_freq"]], dtype=np.float64) primary_beam_sq_area = np.zeros([obs["n_pol"], obs["n_freq"]], dtype=np.float64) - ant_a_list = obs["baseline_info"]["tile_A"][0 : obs["n_baselines"]] - ant_b_list = obs["baseline_info"]["tile_B"][0 : obs["n_baselines"]] + ant_a_list = obs["baseline_info"]["tile_a"][0 : obs["n_baselines"]] + ant_b_list = obs["baseline_info"]["tile_b"][0 : obs["n_baselines"]] baseline_mod = np.max( [ 2 @@ -149,9 +149,12 @@ def create_psf(obs: dict, pyfhd_config: dict, logger: Logger) -> dict | File: hgroup2, _, gri2 = histogram(group2, min=0) # Histogram matrix between all separate groups of different beams group_matrix = np.outer(hgroup2, hgroup1) + # TODO: actually put in group loop and functionality + for freq_i in range(n_freq_bin): beam_int = 0 beam_int_2 = 0 + n_grp_use = 0 baseline_group_n = group_matrix[0, 0] # Get antenna indices which use this group's unique beam (probably all of them...) ant_1_arr = gri1[gri1[0] : gri1[1]] @@ -159,18 +162,19 @@ def create_psf(obs: dict, pyfhd_config: dict, logger: Logger) -> dict | File: # Get the number of baselines in each group ant_1_n = hgroup1[0] ant_2_n = hgroup2[0] - bi_use = np.flatten( - rebin(ant_1_arr + 1, (ant_1_n, ant_2_n)) * baseline_mod - + rebin(ant_2_arr.T + 1, (ant_2_n, ant_1_n)) - ) - bi_use2 = np.flatten( - rebin(ant_1_arr + 1, (ant_1_n, ant_2_n)) - + rebin(ant_2_arr.T + 1, (ant_2_n, ant_1_n)) * baseline_mod - ) - bi_use = np.concatenate([bi_use, bi_use2]) + bi_use = ( + rebin(ant_1_arr + 1, (ant_1_n, ant_2_n)).astype(int) * baseline_mod + + rebin(ant_2_arr.T + 1, (ant_2_n, ant_1_n)).astype(int) + ).flatten() + bi_use2 = ( + rebin(ant_1_arr + 1, (ant_1_n, ant_2_n)).astype(int) + + rebin(ant_2_arr.T + 1, (ant_2_n, ant_1_n)).astype(int) + * baseline_mod + ).flatten() + bi_use = np.concatenate([bi_use, bi_use2]).astype(int) if np.max(bi_use) > bi_max: bi_use = bi_use[np.where(bi_use <= bi_max)] - bi_use_i = np.nonzero(bi_hist0[bi_use]) + bi_use_i = np.nonzero(bi_hist0[bi_use])[0] if bi_use_i.size > 0: bi_use = bi_use[bi_use_i] baseline_group_n = bi_use.size @@ -201,34 +205,38 @@ def create_psf(obs: dict, pyfhd_config: dict, logger: Logger) -> dict | File: ) n_grp_use += baseline_group_n psf_single = np.zeros( - [psf["resolution"] + 1, psf["resolution"] + 1], - psf["dim"] ** 2, + (psf["resolution"] + 1, psf["resolution"] + 1, psf["dim"] ** 2), dtype=np.complex128, ) for i in range(psf["resolution"]): for j in range(psf["resolution"]): - psf_single[psf["resolution"] - i, psf["resolution"] - j] = ( - psf_base_superres[xvals_i + i, yvals_i + j] - ) + psf_single[ + psf["resolution"] - i - 1, psf["resolution"] - j - 1 + ] = psf_base_superres[xvals_i + i, yvals_i + j] # TODO: check the rolling (shifting) and potential reshaping done here (should already be in) for i in range(psf["resolution"]): - psf_single[psf["resolution"] - i, psf["resolution"]] = np.roll( - psf_base_superres[xvals_i + i, yvals_i + psf["resolution"]], + psf_single[psf["resolution"] - i - 1, psf["resolution"]] = np.roll( + psf_base_superres[ + xvals_i + i, yvals_i + psf["resolution"] - 1 + ].reshape(psf["dim"], psf["dim"]), 1, 0, ).flatten() for j in range(psf["resolution"]): - psf_single[psf["resolution"], psf["resolution"] - j] = np.roll( - psf_base_superres[xvals_i + psf["resolution"], yvals_i + j], + psf_single[psf["resolution"], psf["resolution"] - j - 1] = np.roll( + psf_base_superres[ + xvals_i + psf["resolution"] - 1, yvals_i + j + ].reshape(psf["dim"], psf["dim"]), 1, 1, ).flatten() psf_single[psf["resolution"], psf["resolution"]] = np.roll( np.roll( psf_base_superres[ - xvals_i + psf["resolution"], yvals_i + psf["resolution"] - ], + xvals_i + psf["resolution"] - 1, + yvals_i + psf["resolution"] - 1, + ].reshape(psf["dim"], psf["dim"]), 1, 1, ), diff --git a/PyFHD/beam_setup/beam_utils.py b/PyFHD/beam_setup/beam_utils.py index 3b58bdb..538e694 100644 --- a/PyFHD/beam_setup/beam_utils.py +++ b/PyFHD/beam_setup/beam_utils.py @@ -352,13 +352,15 @@ def beam_image_hyperresolved( + np.abs(antenna["jones"][1, ant_pol_2]) ** 2 ) # Amplitude of the baseline response is the product of the "two" antenna responses - power_zenith_beam = np.sqrt(amp_1 * amp_2) + power_zenith_beam = np.sqrt(amp_1 * amp_2).flatten() # Create one full-scale array - image_power_beam = np.zeros([psf["dim"], psf["dim"]], dtype=np.complex128) + image_power_beam = np.zeros( + [psf["image_dim"], psf["image_dim"]], dtype=np.complex128 + ) # Co-opt the array to calculate the power at zenith - image_power_beam[antenna["pix_use"]] = power_zenith_beam + image_power_beam.flat[antenna["pix_use"]] = power_zenith_beam # TODO: Work out the interpolation of the zenith power, it uses cubic interpolation # But the IDL Interpolate function in IDL uses an interpolation paramter of -0.5, where # scipy, numpy with their B-Splines seem to use a parameter of 0 by default with no way @@ -370,10 +372,16 @@ def beam_image_hyperresolved( # Initial trying out of using pyuvdata, not close at all. This is interpolating # the zenith power using the x and y pixel coordinates, to use pyuvdata likely need to do # pixel to ra/dec then to za/az - power_zenith = np.interp(zen_int_x, zen_int_y, image_power_beam) + + # Use order=1 for bilinear interpolation (matches IDL parameter -0.5) + # mode='nearest' handles out-of-bounds by using nearest edge values + # Interpolate real part because power is defined as real-valued + power_zenith = map_coordinates( + image_power_beam.real, [[zen_int_x], [zen_int_y]], order=1, mode="nearest" + )[0] # Normalize the image power beam to the zenith - image_power_beam[antenna["pix_use"]] = ( + image_power_beam.flat[antenna["pix_use"]] = ( power_zenith_beam * beam_ant_1 * beam_ant_2 ) / power_zenith @@ -439,9 +447,8 @@ def beam_power( zen_int_x, zen_int_y, psf, - pyfhd_config, ) - if pyfhd_config["kernel_window"]: + if pyfhd_config.get("kernel_window", False): image_power_beam *= antenna["pix_window"] psf_base_single = np.fft.fftshift(np.fft.ifftn(np.fft.fftshift(image_power_beam))) # TODO: Same cubic problem as in beam_image_hyperresolved here @@ -452,19 +459,17 @@ def beam_power( ) # Build a mask to create a well-defined finite beam - uv_mask_superres = np.zeros( - [[psf_base_superres.shape[0], psf_base_superres.shape[1]]], dtype=np.float64 - ) + uv_mask_superres = np.zeros(psf_base_superres.shape, dtype=np.float64) psf_mask_threshold_use = ( np.max(np.abs(psf_base_superres)) / pyfhd_config["beam_mask_threshold"] ) beam_i = region_grow( np.abs(psf_base_superres), - psf["superres_dim"] * (1 + psf["superres_dim"]) / 2, + int(psf["superres_dim"] * (1 + psf["superres_dim"]) / 2), low=psf_mask_threshold_use, high=np.max(np.abs(psf_base_superres)), ) - uv_mask_superres[beam_i] = 1 + uv_mask_superres.flat[beam_i] = 1 # FFT normalization correction in case this changes the total number of pixels psf_base_superres *= psf["intermediate_res"] ** 2 diff --git a/PyFHD/beam_setup/mwa.py b/PyFHD/beam_setup/mwa.py index 15911f1..bc290e5 100644 --- a/PyFHD/beam_setup/mwa.py +++ b/PyFHD/beam_setup/mwa.py @@ -57,10 +57,12 @@ def dipole_mutual_coupling( for di1 in range(n_dipole): for di2 in range(n_dipole): z_mat_interp[:, pol_i, di1, di2] = np.interp( - freq_arr_z_mat, freq_arr, z_mat_arr[:, pol_i, di1, di2] + freq_arr, freq_arr_z_mat, z_mat_arr[:, pol_i, di1, di2] ) - zlna_arr = np.interp(z_lna_dict["z"], z_lna_dict["frequency"], freq_arr) + zlna_real = np.interp(freq_arr, z_lna_dict["frequency"], z_lna_dict["z"].real) + zlna_imag = np.interp(freq_arr, z_lna_dict["frequency"], z_lna_dict["z"].imag) + zlna_arr = zlna_real + 1j * zlna_imag for fi in range(freq_arr.size): z_lna = zlna_arr[fi] * np.identity(n_dipole) From a884ad73f08a76e28d09eb899f6711f7b4191eac Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Tue, 21 Oct 2025 20:16:10 -0700 Subject: [PATCH 16/21] overhaul uvbeam handling --- PyFHD/beam_setup/antenna.py | 165 +++--------------- PyFHD/beam_setup/beam.py | 4 +- PyFHD/beam_setup/beam_utils.py | 54 +++--- PyFHD/beam_setup/mwa.py | 82 --------- PyFHD/pyfhd_tools/pyfhd_setup.py | 17 -- .../1088285600_example.yaml | 1 - PyFHD/resources/config/pyfhd.yaml | 1 - docs/source/tutorial/tutorial.rst | 16 +- 8 files changed, 54 insertions(+), 286 deletions(-) delete mode 100644 PyFHD/beam_setup/mwa.py diff --git a/PyFHD/beam_setup/antenna.py b/PyFHD/beam_setup/antenna.py index a5dec64..dfa11b5 100644 --- a/PyFHD/beam_setup/antenna.py +++ b/PyFHD/beam_setup/antenna.py @@ -6,10 +6,8 @@ from astropy.coordinates import SkyCoord, EarthLocation from astropy import units from scipy.interpolate import interp1d -from PyFHD.beam_setup.mwa import dipole_mutual_coupling from PyFHD.pyfhd_tools.unit_conv import pixel_to_radec, radec_to_altaz from pyuvdata import ShortDipoleBeam, BeamInterface, UVBeam -from pyuvdata.telescopes import known_telescope_location from pyuvdata.analytic_beam import AnalyticBeam from typing import Literal from astropy.time import Time @@ -91,18 +89,10 @@ def init_beam(obs: dict, pyfhd_config: dict, logger: Logger) -> dict: coords = np.array([xc_arr, yc_arr, zc_arr]) # Get the delays delays = obs["delays"] * 4.35e-10 - if pyfhd_config["dipole_mutual_coupling_factor"]: - coupling = dipole_mutual_coupling(freq_center) - else: - coupling = np.tile( - np.identity(n_dipoles), (n_ant_pol, freq_center.size, 1, 1) - ) - else: n_dipoles = 1 coords = np.zeros((3, n_dipoles)) delays = np.zeros(n_dipoles) - coupling = np.tile(np.identity(n_dipoles), (n_ant_pol, freq_center.size, 1, 1)) # Create basic antenna dictionary antenna = { @@ -120,11 +110,11 @@ def init_beam(obs: dict, pyfhd_config: dict, logger: Logger) -> dict: "n_ant_elements": n_dipoles, # Anything that was pointer arrays in IDL will be None until assigned in Python "jones": None, - "coupling": coupling, "gain": np.ones([n_ant_pol, freq_center.size, n_dipoles], dtype=np.float64), "coords": coords, "delays": delays, - "response": None, + "iresponse": None, + "projection": None, # PyFHD supports one instrument at a time, so we setup the group so they're all in the same group. "group_id": np.zeros([n_ant_pol, obs["n_tile"]], dtype=np.int8), "pix_window": None, @@ -238,21 +228,17 @@ def init_beam(obs: dict, pyfhd_config: dict, logger: Logger) -> dict: print(zenith_angle_arr.shape) # Get the jones matrix for the antenna - antenna["jones"] = general_jones_matrix( + # shape is: (number of vector directions (usually 2), number of feeds (usually 2), + # number of frequencies, number of directions on the sky) + antenna["iresponse"], antenna["projection"] = general_jones_matrix( beam, za_array=zenith_angle_arr.flatten(), az_array=azimuth_arr.flatten(), freq_array=freq_center, telescope_location=location, ) - - # Get the antenna response - antenna["response"] = general_antenna_response( - obs, - antenna, - za_arr=zenith_angle_arr, - az_arr=azimuth_arr, - ) + # remove the initial shallow dimension in iresponse + antenna["iresponse"] = antenna["iresponse"][0] return antenna, psf, beam @@ -284,7 +270,7 @@ def general_jones_matrix( Parameters ---------- beam_obj : UVBeam or AnalyticBeam or BeamInterface - A pyuvdata beam, can be a UVBeam, and AnalyticBeam subclass, or a + A pyuvdata beam, can be a UVBeam, an AnalyticBeam subclass, or a BeamInterface object. alt_array : np.ndarray[float] Array of altitudes (also called elevations) in radians. Must be a 1D array. @@ -352,9 +338,6 @@ def general_jones_matrix( f"It was {az_convention}." ) - # FHD requires an Efield beam, so set it here to be explicit - beam = BeamInterface(beam_obj, beam_type="efield") - if ra_dec_in: if ra_array.shape != dec_array.shape: raise ValueError("ra_array and dec_array must have the same shape") @@ -390,130 +373,28 @@ def general_jones_matrix( where_neg_az = np.nonzero(noe_az_array < 0) noe_az_array[where_neg_az] = noe_az_array[where_neg_az] + np.pi * 2.0 - # use the faster interpolation method if appropriate - if beam._isuvbeam and beam.beam.pixel_coordinate_system == "az_za": - interpol_fn = "az_za_map_coordinates" + if isinstance(beam_obj, UVBeam): + f_obj, k_obj = beam_obj.decompose_feed_iresponse_projection() + f_beam = BeamInterface(f_obj) + k_beam = BeamInterface(k_obj) else: - interpol_fn = None - - print(noe_az_array.size, za_array.size, freq_array.size) - - # chunk_size = 10000 # or whatever fits in memory - # n_pix = noe_az_array.size - # responses = [] - # for start in range(0, n_pix, chunk_size): - # end = min(start + chunk_size, n_pix) - # resp = beam.compute_response( - # az_array=az_array[start:end], - # za_array=za_array[start:end], - # freq_array=freq_array, - # interpolation_function=interpol_fn, - # spline_opts=spline_opts, - # check_azza_domain=check_azza_domain, - # ) - # responses.append(resp) - # # Concatenate results if needed - # full_response = np.concatenate(responses, axis=-1) - - # return full_response - - print(za_array) - - return beam.compute_response( + f_beam = BeamInterface(beam_obj, beam_type="feed_iresponse") + k_beam = BeamInterface(beam_obj, beam_type="feed_projection") + + f_vals = f_beam.compute_response( az_array=noe_az_array, za_array=za_array, freq_array=freq_array, - interpolation_function=interpol_fn, spline_opts=spline_opts, check_azza_domain=check_azza_domain, ) - -def general_antenna_response( - obs: dict, - antenna: dict, - za_arr: NDArray[np.floating], - az_arr: NDArray[np.floating], -) -> NDArray[np.complexfloating]: - """ - Calculate the response of a set of antennas for a given observation and antenna configuration, - including the electrical delays and coupling. - - Parameters - ---------- - obs : dict - Observation metadata dictionary - antenna : dict - Antenna metadata dictionary - za_arr : NDArray[np.floating] - Zenith angle array in radians - az_arr : NDArray[np.floating] - Azimuth angle array in radians - - Returns - ------- - response - The unpolarised response of a set of antennas - """ - light_speed = c.value - - """ - Given that in FHD the antenna response is a pointer array of shape (antenna["n_pol", obs["n_tile"]) - where each pointer is an array of pointers of shape (antenna["n_freq_bin"]). Each pointer in the array - of shape (antenna["n_freq_bin"]) points to a complex array of shape (antenna["pix_use"].size,). - - Furthermore, when the antenna response is calculated, it looks like this is done on a per frequency bin - basis and each tile will point to the same antenna response for that frequency bin. This means we can ignore - the tile dimension and just calculate the antenna response for each frequency bin and polarization to save - memory in Python. - """ - response = np.zeros( - [antenna["n_pol"], antenna["nfreq_bin"], antenna["pix_use"].size], - dtype=np.complex128, + k_vals = k_beam.compute_response( + az_array=noe_az_array, + za_array=za_array, + freq_array=freq_array, + spline_opts=spline_opts, + check_azza_domain=check_azza_domain, ) - # Calculate projections only at locations of non-zero pixels - proj_east_use = np.sin(za_arr) * np.sin(az_arr) - proj_north_use = np.sin(za_arr) * np.cos(az_arr) - proj_z_use = np.cos(za_arr) - - # FHD assumes you might be dealing with more than one antenna, hence the groupings it used. - # PyFHD currently only supports one antenna, so we can ignore the groupings. - for pol_i in range(antenna["n_pol"]): - # Phase of each dipole for the source (relative to the beamformer settings) - D_d = ( - np.outer(antenna["coords"][0], proj_east_use) - + np.outer(antenna["coords"][1], proj_north_use) - + np.outer(antenna["coords"][2], proj_z_use) - ) - - for freq_i in range(antenna["nfreq_bin"]): - Kconv = 2 * np.pi * antenna["freq"][freq_i] / light_speed - voltage_delay = np.exp( - 1j - * 2 - * np.pi - * antenna["delays"][pol_i, :] - * antenna["freq"][freq_i] - * antenna["gain"][pol_i, freq_i] - ) - # TODO: Check if it's actually outer, although it does look like voltage_delay is likely 1D - # measured_current = np.outer( - # voltage_delay, antenna["coupling"][pol_i, freq_i] - # ) - measured_current = np.dot(voltage_delay, antenna["coupling"][pol_i, freq_i]) - zenith_norm = np.dot( - np.ones(antenna["n_ant_elements"]), - antenna["coupling"][pol_i, freq_i], - ) - measured_current /= zenith_norm - - # TODO: This loop can probably be vectorized - for ii in range(antenna["n_ant_elements"]): - # TODO: check the way D_d needs to be indexed - antenna_gain_arr = np.exp(-1j * Kconv * D_d[ii, :]) - response[pol_i, freq_i] += ( - antenna_gain_arr * measured_current[ii] / antenna["n_ant_elements"] - ) - - return response + return f_vals, k_vals diff --git a/PyFHD/beam_setup/beam.py b/PyFHD/beam_setup/beam.py index afafb4a..689e10a 100644 --- a/PyFHD/beam_setup/beam.py +++ b/PyFHD/beam_setup/beam.py @@ -47,10 +47,8 @@ def create_psf(obs: dict, pyfhd_config: dict, logger: Logger) -> dict | File: """ if pyfhd_config["beam_file_path"] is None: - # Form the beam from scratch using pyuvdata for the Jones Matrix - # and translations from FHD for the antenna response. logger.info( - "PyFHD will do the beam forming from scratch using pyuvdata and the antenna response from FHD." + "PyFHD is using pyuvdata to set up the beam. " "Please note, gaussian decomp for MWA is not implemented yet." ) antenna, psf, beam = init_beam(obs, pyfhd_config, logger) diff --git a/PyFHD/beam_setup/beam_utils.py b/PyFHD/beam_setup/beam_utils.py index 538e694..fead2f8 100644 --- a/PyFHD/beam_setup/beam_utils.py +++ b/PyFHD/beam_setup/beam_utils.py @@ -303,10 +303,14 @@ def beam_image_hyperresolved( psf: dict, ) -> NDArray[np.complexfloating]: """ - Build the hyperresolved image-space beam power for a station/tile. Currently, this - function assumes that the amplitude of the jones matrix response between two antennas, - multiplied by the station/tile response, is the image beam power. This calculation - may be suject to change in the future. + Build the hyperresolved image-space beam power for a station/tile. This is + computed as the product of the F-matrices for the two antennas. The Jones + matrices are decomposed into F and K, where F gives the complex sensitivity + of each instrumental polarization to unpolarized emission (so it only has an + instrumental polarization index, the phase is related to time delays which + can vary spatially) and K is the projection from celestial polarization + vector components (orthogonal on the sky) to instrumental polarization vector + components (often non-orthogonal on the sky) Parameters ---------- @@ -332,35 +336,21 @@ def beam_image_hyperresolved( NDArray[np.complexfloating] An estimation of the image-space beam power, normalized to the zenith power. """ - # FHD was designed to account for multiple antennas but in most cases only one was ever used - # So we will just use the first antenna twice as I PyFHD does not support multiple antennas at this time, - # If you want to use multiple antennas, please open an issue on the PyFHD GitHub repository or do the translation and/or - # adjustments yourself. - beam_ant_1 = antenna["response"][ant_pol_1, freq_i] - beam_ant_2 = np.conjugate(antenna["response"][ant_pol_2, freq_i]) - - # Amplitude of the response from "ant1" (again FHD takes more than one antenna) - # is Sqrt(|J1[0,pol1]|^2 + |J1[1,pol1]|^2) - amp_1 = ( - np.abs(antenna["jones"][0, ant_pol_1]) ** 2 - + np.abs(antenna["jones"][1, ant_pol_1]) ** 2 - ) - # Amplitude of the response from "ant2" (again FHD takes more than one antenna) - # is Sqrt(|J2[0,pol2]|^2 + |J2[1,pol2]|^2) - amp_2 = ( - np.abs(antenna["jones"][0, ant_pol_2]) ** 2 - + np.abs(antenna["jones"][1, ant_pol_2]) ** 2 - ) - # Amplitude of the baseline response is the product of the "two" antenna responses - power_zenith_beam = np.sqrt(amp_1 * amp_2).flatten() - # Create one full-scale array image_power_beam = np.zeros( [psf["image_dim"], psf["image_dim"]], dtype=np.complex128 ) - # Co-opt the array to calculate the power at zenith - image_power_beam.flat[antenna["pix_use"]] = power_zenith_beam + # FHD was designed to account for multiple antennas but in most cases only one was ever used + # So we will just use the first antenna twice as I PyFHD does not support multiple antennas at this time, + # If you want to use multiple antennas, please open an issue on the PyFHD GitHub repository or do the translation and/or + # adjustments yourself. + # baseline response (power beam) is product of the "two" antenna responses + image_power_beam.flat[antenna["pix_use"]] = ( + antenna["iresponse"][ant_pol_1, freq_i] + * np.conjugate(antenna["iresponse"][ant_pol_2, freq_i]) + ).flatten() + # TODO: Work out the interpolation of the zenith power, it uses cubic interpolation # But the IDL Interpolate function in IDL uses an interpolation paramter of -0.5, where # scipy, numpy with their B-Splines seem to use a parameter of 0 by default with no way @@ -375,15 +365,15 @@ def beam_image_hyperresolved( # Use order=1 for bilinear interpolation (matches IDL parameter -0.5) # mode='nearest' handles out-of-bounds by using nearest edge values - # Interpolate real part because power is defined as real-valued + # Interpolate the abs to get the abs power at zenith, which I think is what we want here. power_zenith = map_coordinates( - image_power_beam.real, [[zen_int_x], [zen_int_y]], order=1, mode="nearest" + np.abs(image_power_beam), [[zen_int_x], [zen_int_y]], order=1, mode="nearest" )[0] # Normalize the image power beam to the zenith image_power_beam.flat[antenna["pix_use"]] = ( - power_zenith_beam * beam_ant_1 * beam_ant_2 - ) / power_zenith + image_power_beam.flat[antenna["pix_use"]] / power_zenith + ) return image_power_beam diff --git a/PyFHD/beam_setup/mwa.py b/PyFHD/beam_setup/mwa.py deleted file mode 100644 index bc290e5..0000000 --- a/PyFHD/beam_setup/mwa.py +++ /dev/null @@ -1,82 +0,0 @@ -import importlib_resources -import numpy as np -from numpy.typing import NDArray -from astropy.io import fits -from PyFHD.io.pyfhd_io import recarray_to_dict -from scipy.io import readsav - - -def dipole_mutual_coupling( - freq_arr: NDArray[np.floating], -) -> NDArray[np.complexfloating]: - """ - Calculate the mutual coupling for a dipole antenna at the given frequencies. - - Parameters - ---------- - freq_arr : NDArray[np.floating] - Array of frequencies in Hz. - - Returns - ------- - NDArray[np.complexfloating] - Array of mutual coupling values for the dipole antenna. - """ - # Placeholder implementation, replace with actual mutual coupling calculation - z_matrix_file = importlib_resources.files( - "PyFHD.resources.instrument_config" - ).joinpath("mwa_ZMatrix.fits") - z_lna_file = importlib_resources.files( - "PyFHD.resources.instrument_config" - ).joinpath("mwa_LNA_impedance.sav") - n_dipole = 16 - n_ant_pol = 2 - - # Read the Z matrix and LNA impedance from the files - z_matrix = fits.open(z_matrix_file) - z_mat_arr = np.zeros( - (len(z_matrix), n_ant_pol, n_dipole, n_dipole), dtype=np.complex128 - ) - freq_arr_z_mat = np.zeros(len(z_matrix), dtype=np.float64) - for ext_i in range(len(z_matrix)): - z_mat = z_matrix[ext_i].data - z_mat = z_mat[0] * (np.cos(z_mat[1]) + 1j * np.sin(z_mat[1])) - freq_arr_z_mat[ext_i] = z_matrix[ext_i].header["FREQ"] - z_mat_arr[ext_i, 0, :, :] = z_mat[n_dipole:, n_dipole:] - z_mat_arr[ext_i, 1, :, :] = z_mat[:n_dipole, :n_dipole] - z_matrix.close() - - z_lna_dict = recarray_to_dict(readsav(z_lna_file)["lna_impedance"]) - z_mat_return = np.zeros( - (n_ant_pol, freq_arr.size, n_dipole, n_dipole), dtype=np.complex128 - ) - z_mat_interp = np.zeros( - (freq_arr.size, n_ant_pol, n_dipole, n_dipole), dtype=np.complex128 - ) - for pol_i in range(n_ant_pol): - for di1 in range(n_dipole): - for di2 in range(n_dipole): - z_mat_interp[:, pol_i, di1, di2] = np.interp( - freq_arr, freq_arr_z_mat, z_mat_arr[:, pol_i, di1, di2] - ) - - zlna_real = np.interp(freq_arr, z_lna_dict["frequency"], z_lna_dict["z"].real) - zlna_imag = np.interp(freq_arr, z_lna_dict["frequency"], z_lna_dict["z"].imag) - zlna_arr = zlna_real + 1j * zlna_imag - - for fi in range(freq_arr.size): - z_lna = zlna_arr[fi] * np.identity(n_dipole) - z_inv_x = np.linalg.inv(z_lna + z_mat_interp[fi, 0]) - z_inv_y = np.linalg.inv(z_lna + z_mat_interp[fi, 1]) - - # normalize to a zenith pointing, where voltage=Exp(icomp*2.*!Pi*Delay*frequency) and delay=0 so voltage=1. - - norm_test_x = n_dipole / np.abs(np.sum(z_inv_x)) - norm_test_y = n_dipole / np.abs(np.sum(z_inv_y)) - z_inv_x *= norm_test_x - z_inv_y *= norm_test_y - - z_mat_return[0, fi] = z_inv_x - z_mat_return[1, fi] = z_inv_y - - return z_mat_return diff --git a/PyFHD/pyfhd_tools/pyfhd_setup.py b/PyFHD/pyfhd_tools/pyfhd_setup.py index c65fd8f..5e301d7 100644 --- a/PyFHD/pyfhd_tools/pyfhd_setup.py +++ b/PyFHD/pyfhd_tools/pyfhd_setup.py @@ -566,12 +566,6 @@ def pyfhd_parser(): action=OrderedBooleanOptionalAction, help="Set to true if the beams were made with corrective phases given the baseline location, which then enables the gridding to be done per baseline", ) - beam.add_argument( - "--dipole-mutual-coupling-factor", - default=False, - action=OrderedBooleanOptionalAction, - help="Allows a modification to the beam as a result of mutual coupling between dipoles calculated in mwa_dipole_mutual_coupling (See Sutinjo 2015 for more details).", - ) beam.add_argument( "--beam-offset-time", type=float, @@ -1289,17 +1283,6 @@ def pyfhd_setup(options: argparse.Namespace) -> Tuple[dict, logging.Logger]: if pyfhd_config["beam_file_path"] is None: logger.info("No beam file was set, PyFHD will calculate the beam.") - if ( - pyfhd_config["instrument"] == "mwa" - and pyfhd_config["beam_file_path"] is None - and not pyfhd_config["dipole_mutual_coupling_factor"] - ): - logger.warning( - "Since the instrument is MWA and we're calculating the beam, it's recommended to set the dipole mutual coupling factor to True." - ) - pyfhd_config["dipole_mutual_coupling_factor"] = True - warnings += 1 - # cal_bp_transfer when enabled should point to a file with a saved bandpass (Error) errors += _check_file_exists(pyfhd_config, "cal_bp_transfer") diff --git a/PyFHD/resources/1088285600_example/1088285600_example.yaml b/PyFHD/resources/1088285600_example/1088285600_example.yaml index 9e61ef2..cbef306 100644 --- a/PyFHD/resources/1088285600_example/1088285600_example.yaml +++ b/PyFHD/resources/1088285600_example/1088285600_example.yaml @@ -31,7 +31,6 @@ lazy-load-beam: false recalculate-beam : true beam-clip-floor : true interpolate-kernel : true -dipole-mutual-coupling-factor : true beam-nfreq-avg : 16 psf-dim: 54 psf-resolution : 100 diff --git a/PyFHD/resources/config/pyfhd.yaml b/PyFHD/resources/config/pyfhd.yaml index 4de6191..3b7dc8b 100644 --- a/PyFHD/resources/config/pyfhd.yaml +++ b/PyFHD/resources/config/pyfhd.yaml @@ -31,7 +31,6 @@ lazy-load-beam: false recalculate-beam : true beam-clip-floor : true interpolate-kernel : true -dipole-mutual-coupling-factor : true beam-nfreq-avg : 16 psf-dim: 54 psf-resolution : 100 diff --git a/docs/source/tutorial/tutorial.rst b/docs/source/tutorial/tutorial.rst index 4779a16..6f2df1d 100644 --- a/docs/source/tutorial/tutorial.rst +++ b/docs/source/tutorial/tutorial.rst @@ -871,7 +871,6 @@ the beam is inside the ``beams`` directory (not that we need it for this run, as recalculate-beam : true beam-clip-floor : true interpolate-kernel : true - dipole-mutual-coupling-factor : true beam-nfreq-avg : 16 psf-dim: 54 psf-resolution : 100 @@ -1691,13 +1690,14 @@ The most important options are the ``save-healpix-fits`` and the ``snapshot-heal Beam Setup ++++++++++ -The beam setup in ``PyFHD`` has been translated from `FHD`_ and is a combination of using `pyuvdata`_ and translation from `FHD`_, it is by no means tested and is definitely a work in progress. -More specifically, the ``beam_setup`` uses `pyuvdata`_ to create the ``Jones`` matrix for the beam, and then ``FHD`` translation is used to create the main response and the representation of the beam -in UV space. For the moment, PyFHD only supports using one beam per observation and does not currently support different beams for different antennas. Furthermore, mode advanced features like gaussian decomp and -many of the debugging options are not implemented, as such there are plenty of opportunities to add to the ``beam_setup``, both in small and large pieces of code. - -You can see test out the beam_setup by setting the ``beam-file-path`` to ``None`` (~ in the yaml configuration file) and setting the ``recalculate-beam`` option to ``True``. You'll likely run into -memory limitations with your machine during testing. The ``beam_setup`` branch has been purposely left there ready for you to directly contribute code to it. +The beam setup in ``PyFHD`` uses the ``UVBeam`` object from `pyuvdata`_ to get the ``Jones`` matrix and it's decomposition for the beam, and then ``FHD`` translation is used to create the representation of the beam +in UV space. For the moment, PyFHD only supports using one beam per observation and does not currently support different beams for different antennas. Furthermore, more advanced features like gaussian decomp and +many of the debugging options are not implemented, so there are plenty of opportunities to add to the ``beam_setup``, both in small and large pieces of code. + +You can test out the beam_setup by saving the MWA beam file (``mwa_full_embedded_element_pattern.h5``, downloaded from ``http://ws.mwatelescope.org/static/mwa_full_embedded_element_pattern.h5``) +to the ``PyFHD/PyFHD/resources/instrument_config/`` folder and setting the ``beam-file-path`` to ``~`` in the yaml configuration file (translates to ``None`` in python) and setting the +``recalculate-beam`` option to ``True``. You may run into memory limitations with your machine during testing, you can decrease memory use by setting ``beam-nfreq-avg`` to larger values +(as high as the number of frequencies in your input file). Deconvolution ++++++++++++++ From a2703abb1c8fdde734bb8bf25e32d6cfb367aa65 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Tue, 28 Oct 2025 14:06:46 -0700 Subject: [PATCH 17/21] only read in the beam for the relevant freq range --- PyFHD/beam_setup/antenna.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/PyFHD/beam_setup/antenna.py b/PyFHD/beam_setup/antenna.py index dfa11b5..506dc59 100644 --- a/PyFHD/beam_setup/antenna.py +++ b/PyFHD/beam_setup/antenna.py @@ -219,14 +219,21 @@ def init_beam(obs: dict, pyfhd_config: dict, logger: Logger) -> dict: "Please download it from http://ws.mwatelescope.org/static/mwa_full_embedded_element_pattern.h5 into the." f"directory {mwa_beam_file.parent}" ) - beam = UVBeam.from_file(mwa_beam_file, delays=obs["delays"]) + # only read in the range of beam frequencies we need. add a buffer + # to ensure that we have enough outside our range for interpolation + freq_buffer = 2e6 + freq_range = [ + np.min(obs["baseline_info"]["freq"]) - freq_buffer, + np.max(obs["baseline_info"]["freq"]) + freq_buffer, + ] + beam = UVBeam.from_file( + mwa_beam_file, delays=obs["delays"], freq_range=freq_range + ) # If you wish to add a different insturment, do it by adding a new elif here else: # Do an analytic beam as a placeholder beam = ShortDipoleBeam() - print(zenith_angle_arr.shape) - # Get the jones matrix for the antenna # shape is: (number of vector directions (usually 2), number of feeds (usually 2), # number of frequencies, number of directions on the sky) From df160d87eb78443ec6dbb1cfb6636828d843ab19 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Tue, 28 Oct 2025 14:07:49 -0700 Subject: [PATCH 18/21] fix a major bug that broke the gridding kernel specifically, resulted in a gridding kernel that did not contain the actual beam --- PyFHD/beam_setup/beam.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/PyFHD/beam_setup/beam.py b/PyFHD/beam_setup/beam.py index 689e10a..4d1f4d7 100644 --- a/PyFHD/beam_setup/beam.py +++ b/PyFHD/beam_setup/beam.py @@ -103,12 +103,12 @@ def create_psf(obs: dict, pyfhd_config: dict, logger: Logger) -> dict | File: xvals_uv_superres = ( xvals_uv_superres * res_super - np.floor(psf["dim"] / 2) * psf["intermediate_res"] - + np.floor(psf["dim"] / 2) + + np.floor(psf["image_dim"] / 2) ) yvals_uv_superres = ( yvals_uv_superres * res_super - np.floor(psf["dim"] / 2) * psf["intermediate_res"] - + np.floor(psf["dim"] / 2) + + np.floor(psf["image_dim"] / 2) ) freq_center = antenna["freq"][0] From 2bc3c79a6de57fa955f158503f2aee3c45fa8c7f Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Tue, 28 Oct 2025 14:08:15 -0700 Subject: [PATCH 19/21] fix a bug in computing beam phase that caused NaNs in beams --- PyFHD/beam_setup/beam_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PyFHD/beam_setup/beam_utils.py b/PyFHD/beam_setup/beam_utils.py index fead2f8..d0e66df 100644 --- a/PyFHD/beam_setup/beam_utils.py +++ b/PyFHD/beam_setup/beam_utils.py @@ -477,7 +477,7 @@ def beam_power( if pyfhd_config["beam_clip_floor"]: i_use = np.where(np.abs(psf_base_superres)) psf_amp = np.abs(psf_base_superres) - psf_phase = np.arctan(psf_base_superres.imag / psf_base_superres.real) + psf_phase = np.angle(psf_base_superres) psf_floor = psf_mask_threshold_use * (psf["intermediate_res"] ** 2) psf_amp[i_use] -= psf_floor psf_base_superres = psf_amp * np.cos(psf_phase) + 1j * psf_amp * np.sin( From 2a684083ae86ce46045c91894d85955281b658ce Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Wed, 29 Oct 2025 12:31:33 -0700 Subject: [PATCH 20/21] fix fft direction in beam_image function --- PyFHD/beam_setup/beam_utils.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/PyFHD/beam_setup/beam_utils.py b/PyFHD/beam_setup/beam_utils.py index d0e66df..bac1830 100644 --- a/PyFHD/beam_setup/beam_utils.py +++ b/PyFHD/beam_setup/beam_utils.py @@ -240,7 +240,7 @@ def beam_image( beam_base_uv1 = np.zeros([dimension, elements], np.complex128) beam_base_uv1[xl : xh + 1, yl : yh + 1] = beam_single beam_base_single = np.fft.fftshift( - np.fft.ifftn(np.fft.fftshift(beam_base_uv1)) + np.fft.fftn(np.fft.fftshift(beam_base_uv1)) ) beam_base += ( nf_bin * (beam_base_single * np.conjugate(beam_base_single)).real @@ -284,10 +284,9 @@ def beam_image( if beam_gaussian_params is None: beam_base_uv1 = np.zeros([dimension, elements], dtype=np.complex128) beam_base_uv1[xl : xh + 1, yl : yh + 1] = beam_base_uv - beam_base = np.fft.fftshift(np.fft.ifftn(np.fft.fftshift(beam_base_uv1))) + beam_base = np.fft.fftshift(np.fft.fftn(np.fft.fftshift(beam_base_uv1))) else: beam_base = beam_base_uv - beam_base /= n_bin_use return beam_base.real From b6b1f70fb6dde4a02e7a1b2c6aaf3d0fcb1371c9 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Wed, 29 Oct 2025 12:32:32 -0700 Subject: [PATCH 21/21] fix computation of beam squared area --- PyFHD/beam_setup/beam.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/PyFHD/beam_setup/beam.py b/PyFHD/beam_setup/beam.py index 4d1f4d7..0af0731 100644 --- a/PyFHD/beam_setup/beam.py +++ b/PyFHD/beam_setup/beam.py @@ -151,7 +151,7 @@ def create_psf(obs: dict, pyfhd_config: dict, logger: Logger) -> dict | File: for freq_i in range(n_freq_bin): beam_int = 0 - beam_int_2 = 0 + beam2_int = 0 n_grp_use = 0 baseline_group_n = group_matrix[0, 0] # Get antenna indices which use this group's unique beam (probably all of them...) @@ -197,9 +197,9 @@ def create_psf(obs: dict, pyfhd_config: dict, logger: Logger) -> dict | File: baseline_group_n + np.sum(psf_base_superres) / psf["resolution"] ** 2 ) - beam_int_2 += ( + beam2_int += ( baseline_group_n - + np.sum(np.abs(psf_base_superres)) / psf["resolution"] ** 2 + + np.sum(np.abs(psf_base_superres) ** 2) / psf["resolution"] ** 2 ) n_grp_use += baseline_group_n psf_single = np.zeros( @@ -245,11 +245,11 @@ def create_psf(obs: dict, pyfhd_config: dict, logger: Logger) -> dict | File: beam_arr[pol_i, freq_i, :, :, :] = psf_single # Calculate the primary beam area and squared area - beam_int_2 *= weight_invert(n_grp_use) / obs["kpix"] ** 2 + beam2_int *= weight_invert(n_grp_use) / obs["kpix"] ** 2 beam_int *= weight_invert(n_grp_use) / obs["kpix"] ** 2 fi_use = np.where(obs["baseline_info"]["fbin_i"] == freq_i) primary_beam_area[pol_i, fi_use] = beam_int - primary_beam_sq_area[pol_i, fi_use] = beam_int_2 + primary_beam_sq_area[pol_i, fi_use] = beam2_int psf["beam_ptr"] = beam_arr obs["primary_beam_area"] = primary_beam_area