Source code for neuroconv.datainterfaces.ophys.baseimagingextractorinterface

"""Author: Ben Dichter."""

import warnings
from typing import Literal

import numpy as np
from pynwb import NWBFile
from pynwb.device import Device
from pynwb.ophys import ImagingPlane, OnePhotonSeries, TwoPhotonSeries

from ...baseextractorinterface import BaseExtractorInterface
from ...utils import (
    DeepDict,
    dict_deep_update,
    fill_defaults,
    get_base_schema,
    get_schema_from_hdmf_class,
)


[docs] class BaseImagingExtractorInterface(BaseExtractorInterface): """Parent class for all ImagingExtractorInterfaces.""" keywords = ( "ophys", "optical electrophysiology", "fluorescence", "microscopy", "two photon", "one photon", "voltage imaging", "calcium imaging", ) def _initialize_extractor(self, interface_kwargs: dict): """ Initialize and return the extractor instance for imaging interfaces. Extends the base implementation to also remove the 'photon_series_type' parameter which is specific to the imaging interface, not the extractor. Parameters ---------- interface_kwargs : dict The source data parameters passed to the interface constructor. Returns ------- extractor_instance An initialized imaging extractor instance. """ self.extractor_kwargs = interface_kwargs.copy() self.extractor_kwargs.pop("verbose", None) self.extractor_kwargs.pop("photon_series_type", None) extractor_class = self.get_extractor_class() extractor_instance = extractor_class(**self.extractor_kwargs) return extractor_instance def __init__( self, verbose: bool = False, photon_series_type: Literal["OnePhotonSeries", "TwoPhotonSeries"] = "TwoPhotonSeries", **source_data, ): from roiextractors import ImagingExtractor super().__init__(**source_data) self.imaging_extractor: ImagingExtractor = self._extractor_instance self.verbose = verbose self.photon_series_type = photon_series_type
[docs] def get_metadata_schema( self, ) -> dict: """ Retrieve the metadata schema for the optical physiology (Ophys) data. Returns ------- dict The metadata schema dictionary containing definitions for Device, ImagingPlane, and either OnePhotonSeries or TwoPhotonSeries based on the photon_series_type. """ metadata_schema = super().get_metadata_schema() metadata_schema["required"] = ["Ophys"] # Initiate Ophys metadata metadata_schema["properties"]["Ophys"] = get_base_schema(tag="Ophys") metadata_schema["properties"]["Ophys"]["required"] = ["Device", "ImagingPlane", self.photon_series_type] metadata_schema["properties"]["Ophys"]["properties"] = dict( Device=dict(type="array", minItems=1, items={"$ref": "#/properties/Ophys/definitions/Device"}), ImagingPlane=dict(type="array", minItems=1, items={"$ref": "#/properties/Ophys/definitions/ImagingPlane"}), ) metadata_schema["properties"]["Ophys"]["properties"].update( { self.photon_series_type: dict( type="array", minItems=1, items={"$ref": f"#/properties/Ophys/definitions/{self.photon_series_type}"}, ), } ) # Schema definition for arrays imaging_plane_schema = get_schema_from_hdmf_class(ImagingPlane) imaging_plane_schema["properties"]["optical_channel"].pop("maxItems") metadata_schema["properties"]["Ophys"]["definitions"] = dict( Device=get_schema_from_hdmf_class(Device), ImagingPlane=imaging_plane_schema, ) photon_series = dict( OnePhotonSeries=OnePhotonSeries, TwoPhotonSeries=TwoPhotonSeries, )[self.photon_series_type] metadata_schema["properties"]["Ophys"]["definitions"].update( { self.photon_series_type: get_schema_from_hdmf_class(photon_series), } ) fill_defaults(metadata_schema, self.get_metadata()) return metadata_schema
[docs] def get_metadata( self, ) -> DeepDict: """ Retrieve the metadata for the imaging data. Returns ------- DeepDict Dictionary containing metadata including device information, imaging plane details, and photon series configuration. """ from ...tools.roiextractors import get_nwb_imaging_metadata metadata = super().get_metadata() default_metadata = get_nwb_imaging_metadata(self.imaging_extractor, photon_series_type=self.photon_series_type) metadata = dict_deep_update(default_metadata, metadata) # fix troublesome data types if "TwoPhotonSeries" in metadata["Ophys"]: for two_photon_series in metadata["Ophys"]["TwoPhotonSeries"]: if "dimension" in two_photon_series: two_photon_series["dimension"] = list(two_photon_series["dimension"]) if "rate" in two_photon_series: two_photon_series["rate"] = float(two_photon_series["rate"]) return metadata
[docs] def get_original_timestamps(self) -> np.ndarray: reinitialized_extractor = self._initialize_extractor(self.source_data) return reinitialized_extractor.get_timestamps()
[docs] def get_timestamps(self) -> np.ndarray: return self.imaging_extractor.get_timestamps()
[docs] def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): self.imaging_extractor.set_times(times=aligned_timestamps)
[docs] def add_to_nwbfile( self, nwbfile: NWBFile, metadata: dict | None = None, *args, # TODO: change to * (keyword only) on or after August 2026 photon_series_type: Literal["TwoPhotonSeries", "OnePhotonSeries"] = "TwoPhotonSeries", photon_series_index: int = 0, parent_container: Literal["acquisition", "processing/ophys"] = "acquisition", stub_test: bool = False, always_write_timestamps: bool = False, iterator_type: str | None = "v2", iterator_options: dict | None = None, stub_samples: int = 100, ): """ Add imaging data to the NWB file Parameters ---------- nwbfile : NWBFile The NWB file where the imaging data will be added. metadata : dict, optional Metadata for the NWBFile, by default None. photon_series_type : {"TwoPhotonSeries", "OnePhotonSeries"}, optional The type of photon series to be added, by default "TwoPhotonSeries". photon_series_index : int, optional The index of the photon series in the provided imaging data, by default 0. parent_container : {"acquisition", "processing/ophys"}, optional Specifies the parent container to which the photon series should be added, either as part of "acquisition" or under the "processing/ophys" module, by default "acquisition". stub_test : bool, optional If True, only writes a small subset of frames for testing purposes, by default False. always_write_timestamps : bool, optional Whether to always write timestamps, by default False. iterator_type : {"v2", None}, default: "v2" The type of iterator for chunked data writing. 'v2': Uses iterative write with control over chunking and progress bars. None: Loads all data into memory before writing (not recommended for large datasets). Note: 'v1' is deprecated and will be removed on or after March 2026. iterator_options : dict, optional Options for controlling the iterative write process (buffer size, progress bars). See the `pynwb tutorial on iterative write <https://pynwb.readthedocs.io/en/stable/tutorials/advanced_io/plot_iterative_write.html#sphx-glr-tutorials-advanced-io-plot-iterative-write-py>`_ for more information on chunked data writing. Note: To configure chunk size and compression, use the backend configuration system via ``get_default_backend_configuration()`` and ``configure_backend()`` after calling this method. See the backend configuration documentation for details. stub_samples : int, default: 100 The number of samples (frames) to use for testing. """ from ...tools.roiextractors import add_imaging_to_nwbfile # TODO: Remove this block in August 2026 or after when positional arguments are no longer supported. if args: parameter_names = [ "photon_series_type", "photon_series_index", "parent_container", "stub_test", "always_write_timestamps", "iterator_type", "iterator_options", "stub_samples", ] num_positional_args_before_args = 2 # nwbfile, metadata if len(args) > len(parameter_names): raise TypeError( f"add_to_nwbfile() takes at most {len(parameter_names) + num_positional_args_before_args} positional arguments but " f"{len(args) + num_positional_args_before_args} were given. " "Note: Positional arguments are deprecated and will be removed in August 2026 or after. Please use keyword arguments." ) positional_values = dict(zip(parameter_names, args)) passed_as_positional = list(positional_values.keys()) warnings.warn( f"Passing arguments positionally to add_to_nwbfile is deprecated " f"and will be removed in August 2026 or after. " f"The following arguments were passed positionally: {passed_as_positional}. " "Please use keyword arguments instead.", FutureWarning, stacklevel=2, ) photon_series_type = positional_values.get("photon_series_type", photon_series_type) photon_series_index = positional_values.get("photon_series_index", photon_series_index) parent_container = positional_values.get("parent_container", parent_container) stub_test = positional_values.get("stub_test", stub_test) always_write_timestamps = positional_values.get("always_write_timestamps", always_write_timestamps) iterator_type = positional_values.get("iterator_type", iterator_type) iterator_options = positional_values.get("iterator_options", iterator_options) stub_samples = positional_values.get("stub_samples", stub_samples) if stub_test: stub_samples = min([stub_samples, self.imaging_extractor.get_num_samples()]) imaging_extractor = self.imaging_extractor.slice_samples(start_sample=0, end_sample=stub_samples) else: imaging_extractor = self.imaging_extractor metadata = metadata or self.get_metadata() add_imaging_to_nwbfile( imaging=imaging_extractor, nwbfile=nwbfile, metadata=metadata, photon_series_type=photon_series_type, photon_series_index=photon_series_index, parent_container=parent_container, always_write_timestamps=always_write_timestamps, iterator_type=iterator_type, iterator_options=iterator_options, )