From 7f7cedcbf855fad9d9b0a26148bc666a1cfa9241 Mon Sep 17 00:00:00 2001 From: bbean Date: Sun, 14 Apr 2024 16:33:50 -0600 Subject: [PATCH 01/32] add collaborative_dir setting, fix SpotAnalysis __name__ == "__main__" example, expand SpotAnalysis desired features list --- opencsp/common/lib/cv/SpotAnalysis.py | 36 +++++++++++++-------- opencsp/common/lib/opencsp_path/__init__.py | 8 ++++- 2 files changed, 30 insertions(+), 14 deletions(-) diff --git a/opencsp/common/lib/cv/SpotAnalysis.py b/opencsp/common/lib/cv/SpotAnalysis.py index 701bdb4c5..cb847aed7 100644 --- a/opencsp/common/lib/cv/SpotAnalysis.py +++ b/opencsp/common/lib/cv/SpotAnalysis.py @@ -14,6 +14,8 @@ from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperablesStream import SpotAnalysisOperablesStream from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperableAttributeParser import SpotAnalysisOperableAttributeParser import opencsp.common.lib.render.VideoHandler as vh +from opencsp.common.lib.opencsp_path import opencsp_settings +import opencsp.common.lib.opencsp_path.opencsp_root_path as orp import opencsp.common.lib.tool.file_tools as ft import opencsp.common.lib.tool.image_tools as it import opencsp.common.lib.tool.log_tools as lt @@ -53,7 +55,7 @@ class SpotAnalysis(Iterator[tuple[SpotAnalysisOperable]]): The features necessary to support these use cases include:: - a. Null image subtraction (TODO) + a. NULL image subtraction (TODO) b. Ambient and gradient light subtraction (TODO) c. Lens correction (TODO) d. Intensity per pixel generation (TODO) @@ -78,12 +80,20 @@ class SpotAnalysis(Iterator[tuple[SpotAnalysisOperable]]): w. Screen/camera plane homography (TODO) x. Self-PnP (TODO) y. Annotate power envelopes (TODO) - z. Orthorectify wrt. target - aa. Orthorectify wrt. beam - ab. Spot radius in mrad, at angle Beta relative to centroid - ac. Spot radius in mrad, at a half angle relative to the heliostat location - ad. Beam radius in mrad, at angle Beta relative to centroid - ae. Beam radius in mrad, at a half angle relative to the heliostat location + z. Orthorectify wrt. target (TODO) + aa. Orthorectify wrt. beam (TODO) + ab. Spot radius in mrad, at angle Beta relative to centroid (TODO) + ac. Spot radius in mrad, at a half angle relative to the heliostat location (TODO) + ad. Beam radius in mrad, at angle Beta relative to centroid (TODO) + ae. Beam radius in mrad, at a half angle relative to the heliostat location (TODO) + af. Cropping (TODO) + ag. Over and under exposure detection (TODO) + ah. Tagging images as NULL images (TODO) + ai. Filters (gaussian, box, etc) (TODO) + aj. Peak value pixel identification (TODO) + ak. Logorithmic scaling (LogScaleImageProcessor) + al. False color visualization (FalseColorImageProcessor) + am. Over/under exposure visualization (TODO) The inputs to support these features include:: @@ -381,13 +391,13 @@ def __next__(self): lt.logger() - collaborative = collaborative_dir() - # indir = collaborative + "/Experiments/2023-05-12_SpringEquinoxMidSummerSolstice/2_Data/BCS_data/Measure_01/20230512_071442 5W01_off/Raw Images" - indir = ( - collaborative - + "/Experiments/2023-05-12_SpringEquinoxMidSummerSolstice/2_Data/BCS_data/Measure_01/20230512_071638 5W01_000_880_2890/Raw Images" + collaborative_dir = opencsp_settings["opencsp_root_path"]["collaborative_dir"] + experiment_dir = os.path.join( + collaborative_dir, "Experiments", "2023-05-12_SpringEquinoxMidSummerSolstice", "2_Data", "BCS_data" ) - outdir = tmp_dir() + # indir = os.path.join(experiment_dir, "Measure_01", "20230512_071442 5W01_off", "Raw Images") + indir = os.path.join(experiment_dir, "Measure_01", "20230512_071638 5W01_000_880_2890", "Raw Images") + outdir = orp.opencsp_temporary_dir() image_processors = [ PopulationStatisticsImageProcessor(min_pop_size=-1), diff --git a/opencsp/common/lib/opencsp_path/__init__.py b/opencsp/common/lib/opencsp_path/__init__.py index 984d7405c..e17763616 100644 --- a/opencsp/common/lib/opencsp_path/__init__.py +++ b/opencsp/common/lib/opencsp_path/__init__.py @@ -8,10 +8,16 @@ import opencsp.common.lib.tool.log_tools as lt _orp_settings_key = "opencsp_root_path" -_orp_settings_default = {"example_data_dir": None, "scratch_dir": None, "scratch_name": "scratch"} +_orp_settings_default = { + "example_data_dir": None, + "scratch_dir": None, + "scratch_name": "scratch", + "collaborative_dir": None, +} """ example_data_dir: The directory containing the opencsp example data, for examples that have very large data inputs. scratch_dir: The directory containing the scratch folder, for use with HPC clusters. scratch_name: The name of the scratch directory. Default to "scratch". +collaborative_dir: A shared directory where experimental data is collected """ _settings_list = [[_orp_settings_key, _orp_settings_default]] From 7f1e53b6693180ab61fdc689b884430b7aed0841 Mon Sep 17 00:00:00 2001 From: bbean Date: Sun, 14 Apr 2024 18:12:33 -0600 Subject: [PATCH 02/32] start #82 spot analysis for peak intensity correction --- contrib/app/SpotAnalysis/PeakFlux.py | 86 ++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 contrib/app/SpotAnalysis/PeakFlux.py diff --git a/contrib/app/SpotAnalysis/PeakFlux.py b/contrib/app/SpotAnalysis/PeakFlux.py new file mode 100644 index 000000000..d40147e82 --- /dev/null +++ b/contrib/app/SpotAnalysis/PeakFlux.py @@ -0,0 +1,86 @@ +import os + + +import opencsp.common.lib.cv.SpotAnalysis as sa +import opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperableAttributeParser as saoap +from opencsp.common.lib.cv.spot_analysis.image_processor import * +import opencsp.common.lib.tool.file_tools as ft +import opencsp.common.lib.tool.log_tools as lt +import opencsp.common.lib.tool.time_date_tools as tdt + + +class PeakFlux: + """ + A class to process images from heliostat sweeps across a target, to find the spot of maximum flux from the + heliostat. + + The input includes:: + + - A series of images with the heliostat on target, and with the target in ambient light conditions. These images + should be clearly labeled with the name of the heliostat under test, and whether the target is under ambient + light or heliostat reflected light. + - The pixel intensity to flux correction mapping. + + The generated output includes:: + + - Over/under exposure warnings + - Per-heliostat heatmap visualizations + - Per-heliostat peak flux identification + """ + + def __init__(self, indir: str, outdir: str, experiment_name: str): + self.indir = indir + self.outdir = outdir + + self.image_processors: list[AbstractSpotAnalysisImagesProcessor] = [ + # TODO + ] + self.spot_analysis = sa.SpotAnalysis( + experiment_name, self.image_processors, save_dir=outdir, save_overwrite=True + ) + + def run(self): + # process all images from indir + for result in self.spot_analysis: + # save the processed image + save_path = self.spot_analysis.save_image( + result, self.outdir, save_ext="png", also_save_supporting_images=False, also_save_attributes_file=True + ) + if save_path is None: + lt.warn( + f"Warning in PeakFlux.run(): failed to save image. " + + "Maybe SpotAnalaysis.save_overwrite is False? ({self.spot_analysis.save_overwrite=})" + ) + else: + lt.info(f"Saved image to {save_path}") + + # Get the attributes of the processed image, to save the results we're most interested in into a single + # condensed csv file. + parser = saoap.SpotAnalysisOperableAttributeParser(result, self.spot_analysis) + + # TODO append these results to the csv file + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser( + prog=__file__.rstrip(".py"), description='Processes images to find the point of peak flux.' + ) + parser.add_argument('indir', type=str, help="Directory with images to be processed.") + parser.add_argument('outdir', type=str, help="Directory for where to put processed images and computed results.") + parser.add_argument('experiment_name', type=str, help="A description of the current data collection.") + args = parser.parse_args() + + # create the output directory + ft.create_directories_if_necessary(args.outdir) + + # create the log file + log_path_name_ext = os.path.join(args.outdir, "PeakFlux_" + tdt.current_date_time_string_forfile() + ".log") + lt.logger(log_path_name_ext) + + # validate the rest of the inputs + if not ft.directory_exists(args.indir): + lt.error_and_raise(FileNotFoundError, f"Error in PeakFlux.py: input directory '{args.indir}' does not exist!") + + PeakFlux(args.indir, args.outdir).run() From 11641f9f96bc5701be139dde61ba90a5fc379dd3 Mon Sep 17 00:00:00 2001 From: bbean Date: Sun, 14 Apr 2024 21:09:25 -0600 Subject: [PATCH 03/32] add CroppingImageProcessor --- contrib/app/SpotAnalysis/PeakFlux.py | 17 ++-- .../image_processor/CroppingImageProcessor.py | 97 +++++++++++++++++++ .../spot_analysis/image_processor/__init__.py | 6 +- .../test/TestCroppingImageProcessor.py | 48 +++++++++ 4 files changed, 160 insertions(+), 8 deletions(-) create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/test/TestCroppingImageProcessor.py diff --git a/contrib/app/SpotAnalysis/PeakFlux.py b/contrib/app/SpotAnalysis/PeakFlux.py index d40147e82..84e9dcdfd 100644 --- a/contrib/app/SpotAnalysis/PeakFlux.py +++ b/contrib/app/SpotAnalysis/PeakFlux.py @@ -1,6 +1,6 @@ +import json import os - import opencsp.common.lib.cv.SpotAnalysis as sa import opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperableAttributeParser as saoap from opencsp.common.lib.cv.spot_analysis.image_processor import * @@ -28,13 +28,17 @@ class PeakFlux: - Per-heliostat peak flux identification """ - def __init__(self, indir: str, outdir: str, experiment_name: str): + def __init__(self, indir: str, outdir: str, experiment_name: str, settings_path_name_ext: str): self.indir = indir self.outdir = outdir + self.experiment_name = experiment_name + self.settings_path_name_ext = settings_path_name_ext + + with open(settings_path_name_ext, 'r') as fin: + settings_dict = json.load(fin) + self.crop_box: list[int] = settings_dict['crop_box'] - self.image_processors: list[AbstractSpotAnalysisImagesProcessor] = [ - # TODO - ] + self.image_processors: list[AbstractSpotAnalysisImagesProcessor] = [CroppingImageProcessor(*self.crop_box)] self.spot_analysis = sa.SpotAnalysis( experiment_name, self.image_processors, save_dir=outdir, save_overwrite=True ) @@ -70,6 +74,7 @@ def run(self): parser.add_argument('indir', type=str, help="Directory with images to be processed.") parser.add_argument('outdir', type=str, help="Directory for where to put processed images and computed results.") parser.add_argument('experiment_name', type=str, help="A description of the current data collection.") + parser.add_argument('settings_file', type=str, help="Path to the settings JSON file for this PeakFlux evaluation.") args = parser.parse_args() # create the output directory @@ -83,4 +88,4 @@ def run(self): if not ft.directory_exists(args.indir): lt.error_and_raise(FileNotFoundError, f"Error in PeakFlux.py: input directory '{args.indir}' does not exist!") - PeakFlux(args.indir, args.outdir).run() + PeakFlux(args.indir, args.outdir, args.experiment_name, args.settings_file).run() diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py new file mode 100644 index 000000000..8c88533d3 --- /dev/null +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py @@ -0,0 +1,97 @@ +import dataclasses + +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( + AbstractSpotAnalysisImagesProcessor, +) +import opencsp.common.lib.opencsp_path.opencsp_root_path as orp +import opencsp.common.lib.tool.file_tools as ft +import opencsp.common.lib.tool.log_tools as lt + + +class CroppingImageProcessor(AbstractSpotAnalysisImagesProcessor): + def __init__(self, x1: int, x2: int, y1: int, y2: int): + """ + Crops all input images to the given shape. If the input image is too small, then an error will be thrown. + + Parameters + ---------- + x1 : int + The left side of the box to crop to (inclusive). + x2 : int + The right side of the box to crop to (exclusive). + y1 : int + The top side of the box to crop to (inclusive). + y2 : int + The bottom side of the box to crop to (exclusive). + """ + super().__init__(self.__class__.__name__) + + # validate the inputs + self.cropped_size_str = f"[left: {x1}, right: {x2}, top: {y1}, bottom: {y2}]" + if x1 < 0 or x2 < 0 or y1 < 0 or y2 < 0: + lt.error_and_raise( + ValueError, + "Error in CroppingImageProcessor(): " + f"all input values {self.cropped_size_str} must be >= 0", + ) + if x1 >= x2 or y1 >= y2: + lt.error_and_raise( + ValueError, + "Error in CroppingImageProcessor(): " + + f"x2 must be > x1, and y2 must be > y1, but {self.cropped_size_str}", + ) + + self.x1 = x1 + self.x2 = x2 + self.y1 = y1 + self.y2 = y2 + + def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: + image = operable.primary_image.nparray + + # check the size of the image + h, w = image.shape[0], image.shape[1] + if w < self.x2 or h < self.y2: + lt.error_and_raise( + ValueError, + "Error in CroppingImageProcessor._execute(): " + + f"given image '{operable.primary_image_source_path}' is smaller than the cropped size {self.cropped_size_str}", + ) + + # create the cropped image + cropped = image[self.y1 : self.y2, self.x1 : self.x2] + new_primary = CacheableImage(cropped) + + ret = dataclasses.replace(operable, primary_image=new_primary) + return [ret] + + +if __name__ == "__main__": + expdir = orp.opencsp_scratch_dir() + "/solar_noon/dev/2023-05-12_SpringEquinoxMidSummerSolstice/2_Data/BCS_data/Measure_01" + indir = expdir + "/raw_images" + outdir = expdir + "/cropped_images" + + # ft.create_directories_if_necessary(indir) + # ft.delete_files_in_directory(indir, "*") + + # dirnames = ft.files_in_directory(expdir, files_only=False) + # dirnames = list(filter(lambda s: s not in ["raw_images", "cropped_images"], dirnames)) + # for dirname in dirnames: + # fromdir = expdir + "/" + dirname + "/Raw Images" + # for filename in ft.files_in_directory(fromdir): + # ft.copy_file(fromdir + "/" + filename, indir, filename) + + x1, y1, x2, y2 = 120, 29, 1526, 1158 + x1, y1 = x1+20, y1+20 + x2, y2 = x2-20, y2-20 + + ft.create_directories_if_necessary(outdir) + ft.delete_files_in_directory(outdir, "*") + + processor = CroppingImageProcessor(x1, x2, y1, y2) + for filename in ft.files_in_directory(indir): + img = CacheableImage.from_single_source(indir + "/" + filename) + result = processor.process_image(SpotAnalysisOperable(img))[0] + cropped = result.primary_image.to_image() + cropped.save(outdir + "/" + filename) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py index a173135ee..6f05f3ce0 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py @@ -1,9 +1,10 @@ from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( AbstractSpotAnalysisImagesProcessor, ) -from opencsp.common.lib.cv.spot_analysis.image_processor.LogScaleImageProcessor import LogScaleImageProcessor +from opencsp.common.lib.cv.spot_analysis.image_processor.CroppingImageProcessor import CroppingImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.EchoImageProcessor import EchoImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.FalseColorImageProcessor import FalseColorImageProcessor +from opencsp.common.lib.cv.spot_analysis.image_processor.LogScaleImageProcessor import LogScaleImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.PopulationStatisticsImageProcessor import ( PopulationStatisticsImageProcessor, ) @@ -11,8 +12,9 @@ # Make these classes available when importing cv.spot_analysis.image_processor.* __all__ = [ 'AbstractSpotAnalysisImagesProcessor', - 'LogScaleImageProcessor', + 'CroppingImageProcessor', 'EchoImageProcessor', 'FalseColorImageProcessor', + 'LogScaleImageProcessor', 'PopulationStatisticsImageProcessor', ] diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestCroppingImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestCroppingImageProcessor.py new file mode 100644 index 000000000..21a9650a9 --- /dev/null +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestCroppingImageProcessor.py @@ -0,0 +1,48 @@ +import numpy as np +import os +import unittest +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.CroppingImageProcessor import CroppingImageProcessor + +import opencsp.common.lib.tool.file_tools as ft + + +class TestCroppingImageProcessor(unittest.TestCase): + def setUp(self) -> None: + path, _, _ = ft.path_components(__file__) + self.data_dir = os.path.join(path, "data", "input", "CroppingImageProcessor") + self.out_dir = os.path.join(path, "data", "output", "CroppingImageProcessor") + ft.create_directories_if_necessary(self.data_dir) + ft.create_directories_if_necessary(self.out_dir) + + def test_valid_crop(self): + tenbyfive = CacheableImage(np.arange(50).reshape((5, 10))) + # [[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + # [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], + # [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], + # [30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + # [40, 41, 42, 43, 44, 45, 46, 47, 48, 49]] + + processor = CroppingImageProcessor(x1=1, x2=9, y1=2, y2=4) + operable = SpotAnalysisOperable(tenbyfive, "tenbyfive") + result = processor.process_image(operable)[0] + cropped_image = result.primary_image.nparray + + expected = np.array([ + [21, 22, 23, 24, 25, 26, 27, 28], + [31, 32, 33, 34, 35, 36, 37, 38] + ]) + + np.testing.assert_array_equal(cropped_image, expected) + + def test_bad_input_raises_error(self): + tenbyfive = np.arange(50).reshape((5, 10)) + + processor = CroppingImageProcessor(x1=1, x2=90, y1=2, y2=40) + with self.assertRaises(ValueError): + processor.process_image(SpotAnalysisOperable(tenbyfive)) + + +if __name__ == '__main__': + unittest.main() From 89a71e34b64c2959e099def42c137ef14ae83617 Mon Sep 17 00:00:00 2001 From: bbean Date: Tue, 16 Apr 2024 19:24:35 -0600 Subject: [PATCH 04/32] fix error with ImageAttributeParser subclass __init__ initialization --- .../SpotAnalysisOperableAttributeParser.py | 20 ++++++++++++++++--- opencsp/common/lib/file/AttributesManager.py | 2 +- .../common/lib/render/ImageAttributeParser.py | 12 +++++++++++ 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperableAttributeParser.py b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperableAttributeParser.py index 5445a8e51..df29438b3 100644 --- a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperableAttributeParser.py +++ b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperableAttributeParser.py @@ -5,17 +5,31 @@ class SpotAnalysisOperableAttributeParser(iap.ImageAttributeParser): + """ + Subclass of ImageAttributeParser that adds the following extra attributes to the attributes file: + + - spot_analysis_name (str): The 'name' property of the SpotAnalaysis operation that evaluated on this image. + - image_processor (list[str]): The names of the image processors that were executed against on this image. + """ + def __init__(self, operable: sao.SpotAnalysisOperable = None, spot_analysis=None): + # declare values that will be called in set_defaults(), so that they don't cause an error when set_defaults() is + # called in the parent's __init__ method. + self.spot_analysis: str = None + self.image_processors: list[str] = None + # get the current image source path, and initialize the parent current_image_source = tt.default(lambda: operable.primary_image.source_path, None) super().__init__(current_image_source=current_image_source) - # set values based on inputs + retrieved attributes + # prepare values to be assigned to this instance image_processors: list[asaip.AbstractSpotAnalysisImagesProcessor] = tt.default( lambda: spot_analysis.image_processors, [] ) - self.spot_analysis: str = tt.default(lambda: spot_analysis.name, None) - self.image_processors: list[str] = [processor.name for processor in image_processors] + + # Set values based on inputs + retrieved attributes. + self.spot_analysis = tt.default(lambda: spot_analysis.name, None) + self.image_processors = [processor.name for processor in image_processors] # retrieve any available attributes from the associated attributes file if self._previous_attr != None: diff --git a/opencsp/common/lib/file/AttributesManager.py b/opencsp/common/lib/file/AttributesManager.py index 31f2ead6d..2a500bca0 100644 --- a/opencsp/common/lib/file/AttributesManager.py +++ b/opencsp/common/lib/file/AttributesManager.py @@ -187,7 +187,7 @@ def load(self, attributes_file_path_name_ext: str): f"Error in AttributesManager.load(): attributes file '{attributes_file_path_name_ext}' does not exist!" ) lt.debug(errstr) - raise FileExistsError(errstr) + raise FileNotFoundError(errstr) with open(attributes_file_path_name_ext, 'r') as fin: str_contents = fin.read() diff --git a/opencsp/common/lib/render/ImageAttributeParser.py b/opencsp/common/lib/render/ImageAttributeParser.py index 2355fdaa1..4289e78c9 100644 --- a/opencsp/common/lib/render/ImageAttributeParser.py +++ b/opencsp/common/lib/render/ImageAttributeParser.py @@ -9,6 +9,18 @@ class ImageAttributeParser(aap.AbstractAttributeParser): + """ + Subclass of AbstractAttributeParser that adds the following extra attributes to the attributes file: + + - current_image_source (str): The most recent filename (or network streamed name) that this image was loaded + from. + - original_image_source (str): The definitive filename (or network streamed name) that this image was loaded + from. This is usually going to be the name of the original file, such as "Nikon_2024-04-16.png". + - date_collected (datetime): Notes from the specific image processors about this image. + - experiment_name (str): The name of the measurement or experiment that this image was collected as a part of. + - notes (str): Extra notes about the image, typically added by the user. + """ + def __init__( self, current_image_source: str = None, From cce3f964ce4973a4e5f3ccf871bad2e683a162fb Mon Sep 17 00:00:00 2001 From: bbean Date: Tue, 16 Apr 2024 19:25:23 -0600 Subject: [PATCH 05/32] add method to retrieve a logger function based on the log level --- opencsp/common/lib/tool/log_tools.py | 30 ++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/opencsp/common/lib/tool/log_tools.py b/opencsp/common/lib/tool/log_tools.py index 5fd1a8eae..fe28de90c 100644 --- a/opencsp/common/lib/tool/log_tools.py +++ b/opencsp/common/lib/tool/log_tools.py @@ -9,6 +9,7 @@ import re import socket import sys +from typing import Callable # Don't import any other opencsp libraries here. Log tools _must_ be able to be # imported before any other opencsp code. Instead, if there are other @@ -164,6 +165,28 @@ def _add_stream_handlers(logger_: log.Logger, level: int, formatter: log.Formatt logger_.addHandler(h2) +def get_log_method_for_level(level: int) -> Callable: + """ + Returns one of the log methods (debug, info, warning, error, critical) based on the given level. + + Parameters + ---------- + level : int + One of log.DEBUG, log.INFO, log.WARNING, log.ERROR, or log.CRITICAL + """ + if level == log.DEBUG: + return debug + if level == log.INFO: + return info + if level == log.WARNING: + return warning + if level == log.ERROR: + return error + if level == log.CRITICAL: + return critical + error_and_raise(ValueError, f"Error in log_tools.get_log_method_for_level(): unknown log level {level}") + + def debug(*vargs, **kwargs) -> int: """Output debugging information, both to console and log file. @@ -209,10 +232,6 @@ def info(*vargs, **kwargs) -> int: return 0 -def warn(*vargs, **kwargs): - warning(*vargs, **kwargs) - - def warning(*vargs, **kwargs): """Warning message, both to console and log file. @@ -236,6 +255,9 @@ def warning(*vargs, **kwargs): return 0 +warn = warning + + def error(*vargs, **kwargs) -> int: """Error message, both to console and log file. From 5f5a10cf6372a9630f679d8d4e059844d44365db Mon Sep 17 00:00:00 2001 From: bbean Date: Tue, 16 Apr 2024 19:26:29 -0600 Subject: [PATCH 06/32] formatting --- .../image_processor/CroppingImageProcessor.py | 9 ++++++--- .../image_processor/test/TestCroppingImageProcessor.py | 5 +---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py index 8c88533d3..af739b51c 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py @@ -68,7 +68,10 @@ def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAn if __name__ == "__main__": - expdir = orp.opencsp_scratch_dir() + "/solar_noon/dev/2023-05-12_SpringEquinoxMidSummerSolstice/2_Data/BCS_data/Measure_01" + expdir = ( + orp.opencsp_scratch_dir() + + "/solar_noon/dev/2023-05-12_SpringEquinoxMidSummerSolstice/2_Data/BCS_data/Measure_01" + ) indir = expdir + "/raw_images" outdir = expdir + "/cropped_images" @@ -83,8 +86,8 @@ def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAn # ft.copy_file(fromdir + "/" + filename, indir, filename) x1, y1, x2, y2 = 120, 29, 1526, 1158 - x1, y1 = x1+20, y1+20 - x2, y2 = x2-20, y2-20 + x1, y1 = x1 + 20, y1 + 20 + x2, y2 = x2 - 20, y2 - 20 ft.create_directories_if_necessary(outdir) ft.delete_files_in_directory(outdir, "*") diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestCroppingImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestCroppingImageProcessor.py index 21a9650a9..4b0a6cf83 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestCroppingImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestCroppingImageProcessor.py @@ -29,10 +29,7 @@ def test_valid_crop(self): result = processor.process_image(operable)[0] cropped_image = result.primary_image.nparray - expected = np.array([ - [21, 22, 23, 24, 25, 26, 27, 28], - [31, 32, 33, 34, 35, 36, 37, 38] - ]) + expected = np.array([[21, 22, 23, 24, 25, 26, 27, 28], [31, 32, 33, 34, 35, 36, 37, 38]]) np.testing.assert_array_equal(cropped_image, expected) From 5fd25591c101926a4dc7099e45595824840c13e9 Mon Sep 17 00:00:00 2001 From: bbean Date: Tue, 16 Apr 2024 19:27:24 -0600 Subject: [PATCH 07/32] add ExposureDetectionImageProcessor, add image_processor_notes to SpotAnalysisOperable --- .../cv/spot_analysis/SpotAnalysisOperable.py | 3 + .../SpotAnalysisOperableAttributeParser.py | 13 +- .../image_processor/CroppingImageProcessor.py | 4 +- .../ExposureDetectionImageProcessor.py | 139 ++++++++++++++++++ 4 files changed, 156 insertions(+), 3 deletions(-) create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py diff --git a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py index 726948880..c1cef249e 100644 --- a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py +++ b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py @@ -45,6 +45,9 @@ class SpotAnalysisOperable: or with simulations to provide a starting estimate of beam shape. """ population_statistics: SpotAnalysisPopulationStatistics = None """ The population statistics, as populated by PopulationStatisticsImageProcessor. """ + image_processor_notes: list[tuple[str, list[str]]] = field(default_factory=list) + """ Notes from specific image processors. These notes are generally intended for human use, but it is recommended + that they maintain a consistent formatting so that they can also be used programmatically. """ def __post_init__(self): # We use this method to sanitize the inputs to the constructor. diff --git a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperableAttributeParser.py b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperableAttributeParser.py index df29438b3..1d6c2cf10 100644 --- a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperableAttributeParser.py +++ b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperableAttributeParser.py @@ -10,6 +10,7 @@ class SpotAnalysisOperableAttributeParser(iap.ImageAttributeParser): - spot_analysis_name (str): The 'name' property of the SpotAnalaysis operation that evaluated on this image. - image_processor (list[str]): The names of the image processors that were executed against on this image. + - image_processor_notes (list[tuple[str, list[str]]]): Notes from the specific image processors about this image. """ def __init__(self, operable: sao.SpotAnalysisOperable = None, spot_analysis=None): @@ -17,6 +18,7 @@ def __init__(self, operable: sao.SpotAnalysisOperable = None, spot_analysis=None # called in the parent's __init__ method. self.spot_analysis: str = None self.image_processors: list[str] = None + self.image_processor_notes: list[tuple[str, list[str]]] = None # get the current image source path, and initialize the parent current_image_source = tt.default(lambda: operable.primary_image.source_path, None) @@ -28,8 +30,11 @@ def __init__(self, operable: sao.SpotAnalysisOperable = None, spot_analysis=None ) # Set values based on inputs + retrieved attributes. + # Note: image_processor_notes is a reference, not a copy, so that as this list is updated by image processors + # those updates will be reflected here. self.spot_analysis = tt.default(lambda: spot_analysis.name, None) self.image_processors = [processor.name for processor in image_processors] + self.image_processor_notes = tt.default(lambda: operable.image_processor_notes, None) # retrieve any available attributes from the associated attributes file if self._previous_attr != None: @@ -41,6 +46,7 @@ def attributes_key(self) -> str: def set_defaults(self, other: 'SpotAnalysisOperableAttributeParser'): self.spot_analysis = tt.default(self.spot_analysis, other.spot_analysis) self.image_processors = tt.default(self.image_processors, other.image_processors) + self.image_processor_notes = tt.default(self.image_processor_notes, other.image_processor_notes) super().set_defaults(other) def has_contents(self) -> bool: @@ -51,10 +57,15 @@ def has_contents(self) -> bool: def parse_my_contents(self, file_path_name_ext: str, raw_contents: str, my_contents: any): self.spot_analysis = my_contents['spot_analysis_name'] self.image_processors = my_contents['image_processors'] + self.image_processor_notes = my_contents['image_processor_notes'] super().parse_my_contents(file_path_name_ext, raw_contents, my_contents) def my_contents_to_json(self, file_path_name_ext: str) -> any: - ret = {'spot_analysis_name': self.spot_analysis, 'image_processors': self.image_processors} + ret = { + 'spot_analysis_name': self.spot_analysis, + 'image_processors': self.image_processors, + 'image_processor_notes': self.image_processor_notes, + } ret = {**ret, **super().my_contents_to_json(file_path_name_ext)} return ret diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py index af739b51c..309515d37 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py @@ -73,13 +73,13 @@ def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAn + "/solar_noon/dev/2023-05-12_SpringEquinoxMidSummerSolstice/2_Data/BCS_data/Measure_01" ) indir = expdir + "/raw_images" - outdir = expdir + "/cropped_images" + outdir = expdir + "/processed_images" # ft.create_directories_if_necessary(indir) # ft.delete_files_in_directory(indir, "*") # dirnames = ft.files_in_directory(expdir, files_only=False) - # dirnames = list(filter(lambda s: s not in ["raw_images", "cropped_images"], dirnames)) + # dirnames = list(filter(lambda s: s not in ["raw_images", "processed_images"], dirnames)) # for dirname in dirnames: # fromdir = expdir + "/" + dirname + "/Raw Images" # for filename in ft.files_in_directory(fromdir): diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py new file mode 100644 index 000000000..5cf732441 --- /dev/null +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py @@ -0,0 +1,139 @@ +import dataclasses + +import numpy as np + +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( + AbstractSpotAnalysisImagesProcessor, +) +import opencsp.common.lib.opencsp_path.opencsp_root_path as orp +import opencsp.common.lib.tool.file_tools as ft +import opencsp.common.lib.tool.log_tools as lt + + +class ExposureDetectionImageProcessor(AbstractSpotAnalysisImagesProcessor): + def __init__( + self, + under_exposure_limit=0.99, + under_exposure_threshold: int | float = 0.95, + over_exposure_limit=0.97, + max_pixel_value=255, + log_level=lt.log.WARN, + ): + """ + Detects over and under exposure in images and adds the relavent tag to the image. + + Over or under exposure is determined by the proportion of pixels that are at near the max_pixel_value threshold. + If more pixels than the over exposure limit is at the maximum level, then the image is considered over exposed. If + more pixels than the under exposure limit is below the under_exposure_threshold, then the image is considered under + exposed. + + For color images, the proportion of pixels across all color channels is used. + + Parameters + ---------- + under_exposure_limit : float, optional + Fraction of pixels that should be below the under_exposure_threshold, by default 0.99 + under_exposure_threshold : int | float, optional + If a float, then this is the fraction of the max_pixel_value that is used to determine under exposure. If an + int, then this is the pixel value. For example, 0.95 and 243 will produce the same results when + max_pixel_value is 255. By default 0.95 + over_exposure_limit : float, optional + Fraction of pixels that should be below the maximum value, by default 0.97 + max_pixel_value : int, optional + The maximum possible value of the pixels, by default 255 to match uint8 images + log_level : int, optional + The level to print out warnings at, by default log.WARN + """ + super().__init__(self.__class__.__name__) + + # validate the inputs + val_err = lambda s: lt.error_and_raise(ValueError, "Error in ExposureDetectionImageProcessor: " + s) + if under_exposure_limit < 0 or under_exposure_limit > 1 or over_exposure_limit < 0 or over_exposure_limit > 1: + val_err(f"exposure limits must be between 0 and 1, but {under_exposure_limit=}, {over_exposure_limit=}") + if max_pixel_value < 0: + val_err(f"max_pixel_value should be the maximum possible value from the camera, but is {max_pixel_value=}") + + self.under_exposure_limit = under_exposure_limit + self.under_exposure_threshold = under_exposure_threshold + self.over_exposure_limit = over_exposure_limit + self.max_pixel_value = max_pixel_value + self.log_level = log_level + self.log = lt.get_log_method_for_level(self.log_level) + + def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: + image = operable.primary_image.nparray + notes = ( + 'ExposureDetectionImageProcessor', + [ + f'settings: {self.under_exposure_limit=}, {self.under_exposure_threshold=}, {self.over_exposure_limit=}, {self.max_pixel_value=}' + ], + ) + + # check for under exposure + if isinstance(self.under_exposure_threshold, float): + under_exposure_threshold = int(np.ceil(self.max_pixel_value * self.under_exposure_threshold)) + else: # isinstance(self.under_exposure_threshold, int) + under_exposure_threshold: int = self.under_exposure_threshold + num_dark_pixels = np.sum(image < under_exposure_threshold) + proportion_dark_pixels = num_dark_pixels / image.size + if proportion_dark_pixels > self.under_exposure_limit: + self.log( + "Warning in ExposureDetectionImageProcessor._execute(): image is under exposed. " + + f"At most {self.under_exposure_limit*100:0.2f}% of pixels should have values less than {under_exposure_threshold}, " + + f"but instead {proportion_dark_pixels*100:0.2f}% have a value less than that threshold." + ) + notes[1].append( + f"Image is under exposed. {proportion_dark_pixels*100:0.2f}% of pixels are below {under_exposure_threshold}" + ) + + # check for over exposure + over_exposure_threshold = self.max_pixel_value + num_light_pixels = np.sum(image >= over_exposure_threshold) + proportion_light_pixels = num_light_pixels / image.size + if proportion_light_pixels > self.over_exposure_limit: + self.log( + "Warning in ExposureDetectionImageProcessor._execute(): image is over exposed. " + + f"At most {self.over_exposure_limit*100:0.2f}% of pixels should have the value {over_exposure_threshold}, " + + f"but instead {proportion_light_pixels*100:0.2f}% have a value greater than or equal to that threshold." + ) + notes[1].append( + f"Image is over exposed. {proportion_light_pixels*100:0.2f}% of pixels are above {over_exposure_threshold}" + ) + + operable.image_processor_notes.append(notes) + return [operable] + + +if __name__ == "__main__": + expdir = ( + orp.opencsp_scratch_dir() + + "/solar_noon/dev/2023-05-12_SpringEquinoxMidSummerSolstice/2_Data/BCS_data/Measure_01" + ) + indir = expdir + "/raw_images" + outdir = expdir + "/processed_images" + lt.logger(outdir + "/log.txt", level=lt.log.INFO) + + x1, y1, x2, y2 = 120, 29, 1526, 1158 + x1, y1 = x1 + 20, y1 + 20 + x2, y2 = x2 - 20, y2 - 20 + + ft.create_directories_if_necessary(outdir) + ft.delete_files_in_directory(outdir, "*") + images_filenames = ft.files_in_directory_by_extension(indir, ["jpg"])["jpg"] + images_path_name_ext = [indir + '/' + filename for filename in images_filenames] + + import opencsp.common.lib.cv.SpotAnalysis as sa + from opencsp.common.lib.cv.spot_analysis.image_processor import * + + image_processors = [ + CroppingImageProcessor(x1, x2, y1, y2), + ExposureDetectionImageProcessor(under_exposure_threshold=120), + ] + + spot_analysis = sa.SpotAnalysis('ExposureDetectionImageProcessor test', image_processors, outdir) + spot_analysis.set_primary_images(images_path_name_ext) + + for operable in spot_analysis: + spot_analysis.save_image(operable) From 56f5498a080bb0da4df57d7bcdc6980e7a647d7c Mon Sep 17 00:00:00 2001 From: bbean Date: Tue, 16 Apr 2024 20:10:50 -0600 Subject: [PATCH 08/32] add TestExposureDetectionImageProcessor --- .../ExposureDetectionImageProcessor.py | 12 +- .../TestExposureDetectionImageProcessor.py | 105 ++++++++++++++++++ 2 files changed, 116 insertions(+), 1 deletion(-) create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/test/TestExposureDetectionImageProcessor.py diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py index 5cf732441..36a9ecde6 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py @@ -34,7 +34,7 @@ def __init__( Parameters ---------- under_exposure_limit : float, optional - Fraction of pixels that should be below the under_exposure_threshold, by default 0.99 + Fraction of pixels allowed to be below the under_exposure_threshold, by default 0.99 under_exposure_threshold : int | float, optional If a float, then this is the fraction of the max_pixel_value that is used to determine under exposure. If an int, then this is the pixel value. For example, 0.95 and 243 will produce the same results when @@ -55,13 +55,19 @@ def __init__( if max_pixel_value < 0: val_err(f"max_pixel_value should be the maximum possible value from the camera, but is {max_pixel_value=}") + # register values self.under_exposure_limit = under_exposure_limit self.under_exposure_threshold = under_exposure_threshold self.over_exposure_limit = over_exposure_limit self.max_pixel_value = max_pixel_value self.log_level = log_level + + # internal variables self.log = lt.get_log_method_for_level(self.log_level) + # variables for unit tests + self._raise_on_error = False + def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: image = operable.primary_image.nparray notes = ( @@ -87,6 +93,8 @@ def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAn notes[1].append( f"Image is under exposed. {proportion_dark_pixels*100:0.2f}% of pixels are below {under_exposure_threshold}" ) + if self._raise_on_error: + raise RuntimeError("for unit testing: under exposed image") # check for over exposure over_exposure_threshold = self.max_pixel_value @@ -101,6 +109,8 @@ def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAn notes[1].append( f"Image is over exposed. {proportion_light_pixels*100:0.2f}% of pixels are above {over_exposure_threshold}" ) + if self._raise_on_error: + raise RuntimeError("for unit testing: over exposed image") operable.image_processor_notes.append(notes) return [operable] diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestExposureDetectionImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestExposureDetectionImageProcessor.py new file mode 100644 index 000000000..0f74a5594 --- /dev/null +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestExposureDetectionImageProcessor.py @@ -0,0 +1,105 @@ +import numpy as np +import os +import unittest +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.ExposureDetectionImageProcessor import ( + ExposureDetectionImageProcessor, +) + +import opencsp.common.lib.tool.file_tools as ft + + +class TestExposureDetectionImageProcessor(unittest.TestCase): + def setUp(self) -> None: + path, _, _ = ft.path_components(__file__) + self.data_dir = os.path.join(path, "data", "input", "ExposureDetectionImageProcessor") + self.out_dir = os.path.join(path, "data", "output", "ExposureDetectionImageProcessor") + ft.create_directories_if_necessary(self.data_dir) + ft.create_directories_if_necessary(self.out_dir) + + self.simple_image = np.array([100, 150, 200, 255]) + self.cacheable_simple_image = CacheableImage(self.simple_image, source_path="test_under_exposure") + self.operable = SpotAnalysisOperable(self.cacheable_simple_image) + + def test_under_exposure_limit(self): + # raises + processor = ExposureDetectionImageProcessor(under_exposure_limit=0) + processor._raise_on_error = True + with self.assertRaises(RuntimeError) as ex: + processor.process_image(self.operable) + self.assertIn("under exposed", repr(ex)) + + # raises + processor = ExposureDetectionImageProcessor(under_exposure_limit=0.74) + processor._raise_on_error = True + with self.assertRaises(RuntimeError) as ex: + processor.process_image(self.operable) + self.assertIn("under exposed", repr(ex)) + + # passes + processor = ExposureDetectionImageProcessor(under_exposure_limit=0.75) + processor._raise_on_error = True + processor.process_image(self.operable) + + def test_under_exposure_threshold(self): + # passes + processor = ExposureDetectionImageProcessor(under_exposure_threshold=0) + processor._raise_on_error = True + processor.process_image(self.operable) + + # passes + processor = ExposureDetectionImageProcessor(under_exposure_threshold=255) + processor._raise_on_error = True + processor.process_image(self.operable) + + # raises + processor = ExposureDetectionImageProcessor(under_exposure_threshold=256) + processor._raise_on_error = True + with self.assertRaises(RuntimeError) as ex: + processor.process_image(self.operable) + self.assertIn("under exposed", repr(ex)) + + def test_over_exposure_limit(self): + # raises + processor = ExposureDetectionImageProcessor(over_exposure_limit=0) + processor._raise_on_error = True + with self.assertRaises(RuntimeError) as ex: + processor.process_image(self.operable) + self.assertIn("over exposed", repr(ex)) + + # raises + processor = ExposureDetectionImageProcessor(over_exposure_limit=0.24) + processor._raise_on_error = True + with self.assertRaises(RuntimeError) as ex: + processor.process_image(self.operable) + self.assertIn("over exposed", repr(ex)) + + # passes + processor = ExposureDetectionImageProcessor(over_exposure_limit=0.25) + processor._raise_on_error = True + processor.process_image(self.operable) + + def test_max_pixel_value(self): + # raises + processor = ExposureDetectionImageProcessor(max_pixel_value=0) + processor._raise_on_error = True + with self.assertRaises(RuntimeError) as ex: + processor.process_image(self.operable) + self.assertIn("over exposed", repr(ex)) + + # raises + processor = ExposureDetectionImageProcessor(max_pixel_value=100) + processor._raise_on_error = True + with self.assertRaises(RuntimeError) as ex: + processor.process_image(self.operable) + self.assertIn("over exposed", repr(ex)) + + # passes + processor = ExposureDetectionImageProcessor(max_pixel_value=101) + processor._raise_on_error = True + processor.process_image(self.operable) + + +if __name__ == '__main__': + unittest.main() From d8fe94b71f5bf293cc5d19076453db7d8be4959b Mon Sep 17 00:00:00 2001 From: bbean Date: Wed, 17 Apr 2024 15:04:34 -0600 Subject: [PATCH 09/32] add AbstractAggregateImageProcessor and associated test --- .../AbstractAggregateImageProcessor.py | 210 ++++++++++++++++++ .../spot_analysis/image_processor/__init__.py | 8 + .../TestAbstractAggregateImageProcessor.py | 143 ++++++++++++ .../AbstractAggregateImageProcessor/a1.np.npy | Bin 0 -> 132 bytes .../AbstractAggregateImageProcessor/a2.np.npy | Bin 0 -> 132 bytes .../AbstractAggregateImageProcessor/a3.np.npy | Bin 0 -> 132 bytes .../AbstractAggregateImageProcessor/b1.np.npy | Bin 0 -> 132 bytes .../AbstractAggregateImageProcessor/b2.np.npy | Bin 0 -> 132 bytes 8 files changed, 361 insertions(+) create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/test/TestAbstractAggregateImageProcessor.py create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/a1.np.npy create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/a2.np.npy create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/a3.np.npy create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/b1.np.npy create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/b2.np.npy diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py new file mode 100644 index 000000000..a7bfafad9 --- /dev/null +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py @@ -0,0 +1,210 @@ +from abc import ABC, abstractmethod +import dataclasses +import re +from typing import Callable + +import numpy as np + +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisImagesStream import ImageType +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( + AbstractSpotAnalysisImagesProcessor, +) +import opencsp.common.lib.opencsp_path.opencsp_root_path as orp +import opencsp.common.lib.tool.file_tools as ft +import opencsp.common.lib.tool.log_tools as lt + + +class AbstractAggregateImageProcessor(AbstractSpotAnalysisImagesProcessor, ABC): + def __init__( + self, + images_group_assigner: Callable[[SpotAnalysisOperable], int], + group_execution_trigger: Callable[[list[tuple[SpotAnalysisOperable, int]]], int | None] = None, + *vargs, + **kwargs, + ): + """ + Detects and collects images that are part of the same group, so that they can be acted upon all at the same time. + + Each operator is assigned to an image group. Groups are determined by the images_group_assigner function. Any + function with the correct signature can be used, or one of the builtin methods can be assigned. The builtin + methods for this include AbstractAggregateImageProcessor.*, where "*" is one of: + + - group_by_brightness: groups are determined by the brightest pixel in the image + - group_by_name: all images with the same name match are included as part of the same group + + When the assigned group number for the current operator is different than for the previous operator, the group + execution is triggered. _execute_aggregate() will be called for the entire group, and afterwards the group's + list will be cleared. The trigger behavior can be changed by providing a value for the group_execution_trigger + parameter. + + Parameters + ---------- + images_group_assigner : Callable[[SpotAnalysisOperable], int] + The function that determines which group a given operable should be assigned to. + group_execution_trigger : Callable[[], bool], optional + The function that determines when a group of operators is executed on, by default group_trigger_on_change. + """ + super().__init__(self.__class__.__name__) + + # register arguments + self.images_group_assigner = images_group_assigner + self.group_execution_trigger = group_execution_trigger + + # images groups dictionary + self.image_groups: list[tuple[SpotAnalysisOperable, int]] = [] + """The lists of images and group assignments. The images are in the same order they were received.""" + + @staticmethod + def group_by_brightness(intensity_to_group: dict[int, int]) -> Callable[[SpotAnalysisOperable], int]: + """ + Returns a group for the given operable based on the intensity mapping and the brightest pixel in the operable's + primary image. + + Intended use is as the images_group_assigner parameter to this class. + """ + + def group_by_brightness_inner(operable: SpotAnalysisOperable, intensity_to_group: dict[int, int]): + image = operable.primary_image.nparray + + # get the brightest pixel's value + max_pixel_value = np.max(image) + + # choose a group + intensity_thresholds = sorted(list(intensity_to_group.keys())) + assigned_group = intensity_to_group[intensity_thresholds[0]] + for intensity_threshold in intensity_thresholds[1:]: + if max_pixel_value >= intensity_threshold: + assigned_group = intensity_to_group[intensity_threshold] + + return assigned_group + + return lambda operable: group_by_brightness_inner(operable, intensity_to_group) + + @staticmethod + def group_by_name(name_pattern: re.Pattern) -> Callable[[SpotAnalysisOperable], int]: + """ + Returns a group for the given operable based on the groups matches "()" for the given pattern. + + Intended use is as the images_group_assigner parameter to this class. + + Example assignments:: + + pattern = re.compile(r"(foo|bar)") + + foo_operable = SpotAnalaysisOperable(CacheableImage(source_path="hello_foo.png")) + food_operable = SpotAnalaysisOperable(CacheableImage(source_path="hello_food.png")) + bar_operable = SpotAnalaysisOperable(CacheableImage(source_path="hello_bar.png")) + + groups: list[str] = [] + images_group_assigner = group_by_name(pattern, groups) + images_group_assigner(foo_operable) # returns 0, groups=["foo"] + images_group_assigner(food_operable) # returns 0, groups=["foo"] + images_group_assigner(bar_operable) # returns 1, groups=["foo", "bar"] + """ + + def group_by_name_inner(operable: SpotAnalysisOperable, name_pattern: re.Pattern, groups: list[str]) -> int: + names_to_check = [ + operable.primary_image_source_path, + operable.primary_image.source_path, + operable.primary_image.cache_path, + ] + names_to_check = list(filter(lambda name: name is not None, names_to_check)) + if len(names_to_check) == 0: + lt.warning("Warning in AbstractAggregateImageProcessor.group_by_name(): operator has no image name") + return 0 + + # match the name_pattern against each of the names_to_check + for name in names_to_check: + m = name_pattern.search(name) + if m is None: + continue + if len(m.groups()) == 0: + lt.debug( + "In AbstractAggregateImageProcessor.group_by_name(): " + + f"no groups found for pattern {name_pattern} when trying to match against name {name}" + ) + continue + + # get the name match + group_str = "".join(m.groups()) + + # return the index of the existing group, or add a new group + if group_str in groups: + return groups.index(group_str) + else: + groups.append(group_str) + return len(groups) - 1 + + # failed to find a match, assign to default group 0 + lt.warning( + "Warning in AbstractAggregateImageProcessor.group_by_name(): " + + f"failed to find a match to {name_pattern} in {names_to_check}" + ) + return 0 + + groups: list[str] = [] + return lambda operable: group_by_name_inner(operable, name_pattern, groups) + + @staticmethod + def group_trigger_on_change() -> Callable[[list[tuple[SpotAnalysisOperable, int]]], int | None]: + """ + Triggers anytime that the group assigned to the current operable is different than for the previous operable. + + Intended use is as the group_execution_trigger parameter to this class. + """ + + def group_trigger_on_change_inner(image_groups: list[tuple[SpotAnalysisOperable, int]]) -> int | None: + if len(image_groups) <= 1: + return None + + current_group = image_groups[-1][1] + previous_group = image_groups[-2][1] + if current_group != previous_group: + return previous_group + else: + return None + + return group_trigger_on_change_inner + + def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: + # assign this operable to a group + operable_group = self.images_group_assigner(operable) + self.image_groups.append((operable, operable_group)) + + # check if we should trigger execution + triggered_groups: list[int] = [] + if is_last: + triggered_groups_set = {operable_and_group[1]: None for operable_and_group in self.image_groups} + triggered_groups = list(triggered_groups_set.keys()) + else: + triggered_group = self.group_execution_trigger(self.image_groups) + if triggered_group is not None: + triggered_groups = [triggered_group] + + # execute the triggered groups + ret: list[SpotAnalysisOperable] = [] + for group in triggered_groups: + + # get the operables to execute on + operables: list[SpotAnalysisOperable] = [] + i = 0 + while i < len(self.image_groups): + operable_and_group = self.image_groups[i] + if operable_and_group[1] == group: + operables.append(operable_and_group[0]) + del self.image_groups[i] + else: + i += 1 + + # collect the results of the execution + ret += self._execute_aggregate(group, operables, is_last) + + return ret + + @abstractmethod + def _execute_aggregate( + self, group: int, operables: list[SpotAnalysisOperable], is_last: bool + ) -> list[SpotAnalysisOperable]: + pass diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py index 6f05f3ce0..7fee1d939 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py @@ -1,8 +1,14 @@ +from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractAggregateImageProcessor import ( + AbstractAggregateImageProcessor, +) from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( AbstractSpotAnalysisImagesProcessor, ) from opencsp.common.lib.cv.spot_analysis.image_processor.CroppingImageProcessor import CroppingImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.EchoImageProcessor import EchoImageProcessor +from opencsp.common.lib.cv.spot_analysis.image_processor.ExposureDetectionImageProcessor import ( + ExposureDetectionImageProcessor, +) from opencsp.common.lib.cv.spot_analysis.image_processor.FalseColorImageProcessor import FalseColorImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.LogScaleImageProcessor import LogScaleImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.PopulationStatisticsImageProcessor import ( @@ -11,9 +17,11 @@ # Make these classes available when importing cv.spot_analysis.image_processor.* __all__ = [ + 'AbstractAggregateImageProcessor', 'AbstractSpotAnalysisImagesProcessor', 'CroppingImageProcessor', 'EchoImageProcessor', + 'ExposureDetectionImageProcessor', 'FalseColorImageProcessor', 'LogScaleImageProcessor', 'PopulationStatisticsImageProcessor', diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestAbstractAggregateImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestAbstractAggregateImageProcessor.py new file mode 100644 index 000000000..571314e40 --- /dev/null +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestAbstractAggregateImageProcessor.py @@ -0,0 +1,143 @@ +import os +import re +from typing import Callable +import unittest + +import numpy as np + +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractAggregateImageProcessor import ( + AbstractAggregateImageProcessor, +) +import opencsp.common.lib.cv.SpotAnalysis as sa +import opencsp.common.lib.tool.file_tools as ft + + +class TestAbstractAggregateImageProcessor(unittest.TestCase): + def setUp(self) -> None: + path, _, _ = ft.path_components(__file__) + self.data_dir = os.path.join(path, "data", "input", "AbstractAggregateImageProcessor") + self.out_dir = os.path.join(path, "data", "output", "AbstractAggregateImageProcessor") + ft.create_directories_if_necessary(self.data_dir) + ft.create_directories_if_necessary(self.out_dir) + + # # generate the test data + # a = np.array([0]) + # np.save(os.path.join(self.data_dir, "a1.np"), a, allow_pickle=False) + # np.save(os.path.join(self.data_dir, "b1.np"), a, allow_pickle=False) + # np.save(os.path.join(self.data_dir, "b2.np"), a, allow_pickle=False) + # np.save(os.path.join(self.data_dir, "a2.np"), a, allow_pickle=False) + # np.save(os.path.join(self.data_dir, "a3.np"), a, allow_pickle=False) + filenames = ["a1.np", "b1.np", "b2.np", "a2.np", "a3.np"] + self.image_files = [os.path.join(self.data_dir, filename) for filename in filenames] + + def test_group_by_brightness(self): + op0 = SpotAnalysisOperable(CacheableImage(np.array([0])), primary_image_source_path="op0") + op10 = SpotAnalysisOperable(CacheableImage(np.array([10])), primary_image_source_path="op10") + op20 = SpotAnalysisOperable(CacheableImage(np.array([20])), primary_image_source_path="op20") + op30 = SpotAnalysisOperable(CacheableImage(np.array([30])), primary_image_source_path="op30") + op40 = SpotAnalysisOperable(CacheableImage(np.array([40])), primary_image_source_path="op40") + + g = AbstractAggregateImageProcessor.group_by_brightness({10: 3, 20: 2, 30: 1}) + self.assertEqual(g(op0), 3) + self.assertEqual(g(op10), 3) + self.assertEqual(g(op20), 2) + self.assertEqual(g(op30), 1) + self.assertEqual(g(op40), 1) + + def test_group_by_name(self): + opa1 = SpotAnalysisOperable(CacheableImage(np.array([0])), primary_image_source_path="testastring") + opb1 = SpotAnalysisOperable(CacheableImage(np.array([0])), primary_image_source_path="testbstring") + opc1 = SpotAnalysisOperable(CacheableImage(np.array([0])), primary_image_source_path="testcstring") + opa2 = SpotAnalysisOperable(CacheableImage(np.array([0])), primary_image_source_path="testastring") + opa3 = SpotAnalysisOperable(CacheableImage(np.array([0])), primary_image_source_path="tesastring") + + # normal operation + g = AbstractAggregateImageProcessor.group_by_name(re.compile("test?(.)string")) + self.assertEqual(g(opa1), 0) + self.assertEqual(g(opb1), 1) + self.assertEqual(g(opc1), 2) + self.assertEqual(g(opa2), 0) + self.assertEqual(g(opa3), 0) + + # doesn't match the pattern, return default (should also print a warning) + opbad = SpotAnalysisOperable(CacheableImage(np.array([0])), primary_image_source_path="b") + self.assertEqual(g(opbad), 0) + + def test_group_trigger_on_change(self): + opa1 = SpotAnalysisOperable(CacheableImage(np.array([0])), primary_image_source_path="testastring") + opb1 = SpotAnalysisOperable(CacheableImage(np.array([0])), primary_image_source_path="testbstring") + opc1 = SpotAnalysisOperable(CacheableImage(np.array([0])), primary_image_source_path="testcstring") + opa2 = SpotAnalysisOperable(CacheableImage(np.array([0])), primary_image_source_path="testastring") + opa3 = SpotAnalysisOperable(CacheableImage(np.array([0])), primary_image_source_path="tesastring") + + t = AbstractAggregateImageProcessor.group_trigger_on_change() + image_groups = [] + self.assertEqual(t(image_groups), None) + image_groups.append((opa1, 0)) + self.assertEqual(t(image_groups), None) + image_groups.append((opb1, 1)) + self.assertEqual(t(image_groups), 0) + image_groups.append((opc1, 2)) + self.assertEqual(t(image_groups), 1) + image_groups.append((opa2, 0)) + self.assertEqual(t(image_groups), 2) + image_groups.append((opa3, 0)) + self.assertEqual(t(image_groups), None) + + def test_execute_aggregate(self): + assigner = AbstractAggregateImageProcessor.group_by_name(re.compile(r"(.)[0-9].np")) + always_trigger = lambda image_groups: image_groups[0][1] + aggregator = ConcreteAggregateImageProcessor(assigner, always_trigger) + spot_analysis = sa.SpotAnalysis("test_execute_aggregate", [aggregator]) + spot_analysis.set_primary_images(self.image_files) + + expected_group_order = [0, 1, 1, 0, 0] + expected_group_sizes = [1, 1, 1, 1, 1] + expected_is_last = [False, False, False, False, True] + for i, operable in enumerate(spot_analysis): + self.assertEqual(expected_group_order[i], aggregator.prev_executed_group) + self.assertEqual(expected_group_sizes[i], aggregator.prev_group_size) + self.assertEqual(expected_is_last[i], aggregator.prev_is_last) + + def test_execute_aggregate_(self): + assigner = AbstractAggregateImageProcessor.group_by_name(re.compile(r"(.)[0-9].np")) + triggerer = AbstractAggregateImageProcessor.group_trigger_on_change() + aggregator = ConcreteAggregateImageProcessor(assigner, triggerer) + spot_analysis = sa.SpotAnalysis("test_execute_aggregate", [aggregator]) + spot_analysis.set_primary_images(self.image_files) + + expected_group_order = [0, 1, 1, 0, 0] + expected_group_sizes = [1, 2, 2, 2, 2] + expected_is_last = [False, False, False, True, True] + for i, operable in enumerate(spot_analysis): + self.assertEqual(expected_group_order[i], aggregator.prev_executed_group) + self.assertEqual(expected_group_sizes[i], aggregator.prev_group_size) + self.assertEqual(expected_is_last[i], aggregator.prev_is_last) + + +class ConcreteAggregateImageProcessor(AbstractAggregateImageProcessor): + def __init__( + self, + images_group_assigner: Callable[[SpotAnalysisOperable], int], + group_execution_trigger: Callable[[list[tuple[SpotAnalysisOperable, int]]], int | None] = None, + *vargs, + **kwargs + ): + super().__init__(images_group_assigner, group_execution_trigger, *vargs, **kwargs) + self.prev_executed_group = None + self.prev_group_size = 0 + self.prev_is_last = False + + def _execute_aggregate( + self, group: int, operables: list[SpotAnalysisOperable], is_last: bool + ) -> list[SpotAnalysisOperable]: + self.prev_executed_group = group + self.prev_group_size = len(operables) + self.prev_is_last = is_last + return operables + + +if __name__ == '__main__': + unittest.main() diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/a1.np.npy b/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/a1.np.npy new file mode 100644 index 0000000000000000000000000000000000000000..f061420f4b51476a5c729108095808471dab527f GIT binary patch literal 132 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlWC%^qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= YXCxM+0{I$-I+{8PwF(pfE(Ra~0KohkTL1t6 literal 0 HcmV?d00001 diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/a2.np.npy b/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/a2.np.npy new file mode 100644 index 0000000000000000000000000000000000000000..f061420f4b51476a5c729108095808471dab527f GIT binary patch literal 132 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlWC%^qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= YXCxM+0{I$-I+{8PwF(pfE(Ra~0KohkTL1t6 literal 0 HcmV?d00001 diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/a3.np.npy b/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/a3.np.npy new file mode 100644 index 0000000000000000000000000000000000000000..f061420f4b51476a5c729108095808471dab527f GIT binary patch literal 132 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlWC%^qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= YXCxM+0{I$-I+{8PwF(pfE(Ra~0KohkTL1t6 literal 0 HcmV?d00001 diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/b1.np.npy b/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/b1.np.npy new file mode 100644 index 0000000000000000000000000000000000000000..f061420f4b51476a5c729108095808471dab527f GIT binary patch literal 132 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlWC%^qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= YXCxM+0{I$-I+{8PwF(pfE(Ra~0KohkTL1t6 literal 0 HcmV?d00001 diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/b2.np.npy b/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AbstractAggregateImageProcessor/b2.np.npy new file mode 100644 index 0000000000000000000000000000000000000000..f061420f4b51476a5c729108095808471dab527f GIT binary patch literal 132 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlWC%^qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= YXCxM+0{I$-I+{8PwF(pfE(Ra~0KohkTL1t6 literal 0 HcmV?d00001 From 4c140727ab30990db0a24c06f9d971de9c9da597 Mon Sep 17 00:00:00 2001 From: bbean Date: Thu, 18 Apr 2024 13:19:05 -0600 Subject: [PATCH 10/32] add AverageByGroupImageProcessor and associated unit test, fix CacheableImage --- opencsp/common/lib/cv/CacheableImage.py | 4 +- .../AverageByGroupImageProcessor.py | 51 +++++++ .../spot_analysis/image_processor/__init__.py | 4 + .../test/TestAverageByGroupImageProcessor.py | 133 ++++++++++++++++++ .../input/AverageByGroupImageProcessor/a1.npy | Bin 0 -> 132 bytes .../input/AverageByGroupImageProcessor/a2.npy | Bin 0 -> 132 bytes .../input/AverageByGroupImageProcessor/b1.npy | Bin 0 -> 132 bytes .../input/AverageByGroupImageProcessor/b2.npy | Bin 0 -> 132 bytes 8 files changed, 190 insertions(+), 2 deletions(-) create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/test/TestAverageByGroupImageProcessor.py create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AverageByGroupImageProcessor/a1.npy create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AverageByGroupImageProcessor/a2.npy create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AverageByGroupImageProcessor/b1.npy create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AverageByGroupImageProcessor/b2.npy diff --git a/opencsp/common/lib/cv/CacheableImage.py b/opencsp/common/lib/cv/CacheableImage.py index 7174a4833..6aaf2322f 100644 --- a/opencsp/common/lib/cv/CacheableImage.py +++ b/opencsp/common/lib/cv/CacheableImage.py @@ -87,9 +87,9 @@ def _load_image(im: str | np.ndarray) -> np.ndarray: return np.array(im) def __load_image(self): - if not self._array is None: + if self._array is not None: return self._load_image(self._array) - elif self.cache_path != None and ft.file_exists(self.cache_path): + elif self.cache_path is not None and ft.file_exists(self.cache_path): self.cached = True return self._load_image(self.cache_path) elif ft.file_exists(self.source_path): diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py new file mode 100644 index 000000000..b67036b5a --- /dev/null +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py @@ -0,0 +1,51 @@ +import dataclasses +from typing import Callable + +import numpy as np + +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractAggregateImageProcessor import ( + AbstractAggregateImageProcessor, +) +import opencsp.common.lib.tool.log_tools as lt + + +class AverageByGroupImageProcessor(AbstractAggregateImageProcessor): + def __init__( + self, + images_group_assigner: Callable[[SpotAnalysisOperable], int], + group_execution_trigger: Callable[[list[tuple[SpotAnalysisOperable, int]]], int | None] = None, + *vargs, + **kwargs, + ): + """ + Averages the values from groups of images into a single image. All images must have the same shape. + """ + super().__init__(images_group_assigner, group_execution_trigger, *vargs, **kwargs) + + def _execute_aggregate( + self, group: int, operables: list[SpotAnalysisOperable], is_last: bool + ) -> list[SpotAnalysisOperable]: + # initialize the image to return + averaged_image = np.array(operables[0].primary_image.nparray) + + # build the average image + for operable in operables[1:]: + other_image = operable.primary_image.nparray + if averaged_image.shape != other_image.shape: + lt.error_and_raise( + ValueError, + "Error in AverageByGroupImageProcessor._execute_aggregate(): " + + f"first image in group has a different shape {averaged_image.shape} than another image's shape {other_image.shape}. " + + f"First image is '{operables[0].primary_image_source_path}', current image is '{operable.primary_image_source_path}'.", + ) + averaged_image += other_image + averaged_image = averaged_image.astype(np.float_) + averaged_image /= len(operables) + + # build the return operable from the first operable + averaged_cacheable = CacheableImage(averaged_image, source_path=operables[0].primary_image.source_path) + ret = dataclasses.replace(operables[0], primary_image=averaged_cacheable) + + return [ret] diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py index 7fee1d939..188a263aa 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py @@ -4,6 +4,9 @@ from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( AbstractSpotAnalysisImagesProcessor, ) +from opencsp.common.lib.cv.spot_analysis.image_processor.AverageByGroupImageProcessor import ( + AverageByGroupImageProcessor, +) from opencsp.common.lib.cv.spot_analysis.image_processor.CroppingImageProcessor import CroppingImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.EchoImageProcessor import EchoImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.ExposureDetectionImageProcessor import ( @@ -19,6 +22,7 @@ __all__ = [ 'AbstractAggregateImageProcessor', 'AbstractSpotAnalysisImagesProcessor', + 'AverageByGroupImageProcessor', 'CroppingImageProcessor', 'EchoImageProcessor', 'ExposureDetectionImageProcessor', diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestAverageByGroupImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestAverageByGroupImageProcessor.py new file mode 100644 index 000000000..91d0c3b56 --- /dev/null +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestAverageByGroupImageProcessor.py @@ -0,0 +1,133 @@ +import os +import re +import unittest + +import numpy as np + +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.AverageByGroupImageProcessor import ( + AverageByGroupImageProcessor, +) +import opencsp.common.lib.tool.file_tools as ft + + +class TestAverageByGroupImageProcessor(unittest.TestCase): + def setUp(self) -> None: + path, _, _ = ft.path_components(__file__) + self.data_dir = os.path.join(path, "data", "input", "AverageByGroupImageProcessor") + self.out_dir = os.path.join(path, "data", "output", "AverageByGroupImageProcessor") + ft.create_directories_if_necessary(self.data_dir) + ft.create_directories_if_necessary(self.out_dir) + + # # generate test data + # np.save(os.path.join(self.data_dir, "a1"), np.array([10]), allow_pickle=False) + # np.save(os.path.join(self.data_dir, "a2"), np.array([20]), allow_pickle=False) + # np.save(os.path.join(self.data_dir, "b1"), np.array([30]), allow_pickle=False) + # np.save(os.path.join(self.data_dir, "b2"), np.array([40]), allow_pickle=False) + self.a1_img = CacheableImage(os.path.join(self.data_dir, "a1.npy")) + self.a2_img = CacheableImage(os.path.join(self.data_dir, "a2.npy")) + self.b1_img = CacheableImage(os.path.join(self.data_dir, "b1.npy")) + self.b2_img = CacheableImage(os.path.join(self.data_dir, "b2.npy")) + self.a1 = SpotAnalysisOperable(self.a1_img, primary_image_source_path="a1.npy") + self.a2 = SpotAnalysisOperable(self.a2_img, primary_image_source_path="a2.npy") + self.b1 = SpotAnalysisOperable(self.b1_img, primary_image_source_path="b1.npy") + self.b2 = SpotAnalysisOperable(self.b2_img, primary_image_source_path="b2.npy") + + self.assigner = AverageByGroupImageProcessor.group_by_name(re.compile(r"^([a-z]).*")) + self.triggerer = AverageByGroupImageProcessor.group_trigger_on_change() + self.processor = AverageByGroupImageProcessor(self.assigner, self.triggerer) + + @staticmethod + def always_trigger(image_groups: list[tuple[SpotAnalysisOperable, int]], *vargs) -> int: + return image_groups[0][1] + + def test_single_image(self): + processor_always_trigger = AverageByGroupImageProcessor(self.assigner, self.always_trigger) + + operables = processor_always_trigger.process_image(self.a1) + self.assertEqual(1, len(operables)) + + operable = operables[0] + np.testing.assert_array_equal(operable.primary_image.nparray, self.a1_img.nparray.astype(np.float_)) + + def test_two_images_one_group(self): + operables = self.processor.process_image(self.a1) + self.assertEqual(0, len(operables)) + operables = self.processor.process_image(self.a2) + self.assertEqual(0, len(operables)) + operables = self.processor.process_image(self.b1) # changing names should trigger an execution + self.assertEqual(1, len(operables)) + + operable = operables[0] + np.testing.assert_array_equal(operable.primary_image.nparray, np.array([15], dtype=np.float_)) + + def test_two_images_two_groups(self): + operables = self.processor.process_image(self.a1) + self.assertEqual(0, len(operables)) + operables_a = self.processor.process_image(self.b1) # changing names should trigger an execution + self.assertEqual(1, len(operables_a)) + operables_b = self.processor.process_image(self.a2) # trigger again, to get a second pair of results + self.assertEqual(1, len(operables_b)) + + operable = operables_a[0] + np.testing.assert_array_equal(operable.primary_image.nparray, self.a1_img.nparray.astype(np.float_)) + operable = operables_b[0] + np.testing.assert_array_equal(operable.primary_image.nparray, self.b1_img.nparray.astype(np.float_)) + + def test_three_images_two_groups(self): + operables = self.processor.process_image(self.a1) + self.assertEqual(0, len(operables)) + operables = self.processor.process_image(self.a2) + self.assertEqual(0, len(operables)) + operables_a = self.processor.process_image(self.b1) # changing names should trigger an execution + self.assertEqual(1, len(operables_a)) + operables_b = self.processor.process_image(self.a1) # trigger again, to get a second pair of results + self.assertEqual(1, len(operables_b)) + + operable = operables_a[0] + np.testing.assert_array_equal(operable.primary_image.nparray, np.array([15], dtype=np.float_)) + operable = operables_b[0] + np.testing.assert_array_equal(operable.primary_image.nparray, self.b1_img.nparray.astype(np.float_)) + + def test_four_images_two_groups(self): + operables = self.processor.process_image(self.a1) + self.assertEqual(0, len(operables)) + operables = self.processor.process_image(self.a2) + self.assertEqual(0, len(operables)) + operables_a = self.processor.process_image(self.b1) # changing names should trigger an execution + self.assertEqual(1, len(operables_a)) + operables = self.processor.process_image(self.b2) + self.assertEqual(0, len(operables)) + operables_b = self.processor.process_image(self.a1) # trigger again, to get a second pair of results + self.assertEqual(1, len(operables_b)) + + operable = operables_a[0] + np.testing.assert_array_equal(operable.primary_image.nparray, np.array([15], dtype=np.float_)) + operable = operables_b[0] + np.testing.assert_array_equal(operable.primary_image.nparray, np.array([35], dtype=np.float_)) + + def test_four_images_alternating_group(self): + operables = self.processor.process_image(self.a1) + self.assertEqual(0, len(operables)) + operables_a1 = self.processor.process_image(self.b1) + self.assertEqual(1, len(operables_a1)) + operables_b1 = self.processor.process_image(self.a2) + self.assertEqual(1, len(operables_b1)) + operables_a2 = self.processor.process_image(self.b2) + self.assertEqual(1, len(operables_a2)) + operables_b2 = self.processor.process_image(self.a1) # trigger again, to get a second pair of results + self.assertEqual(1, len(operables_b2)) + + operable = operables_a1[0] + np.testing.assert_array_equal(operable.primary_image.nparray, self.a1_img.nparray.astype(np.float_)) + operable = operables_a2[0] + np.testing.assert_array_equal(operable.primary_image.nparray, self.a2_img.nparray.astype(np.float_)) + operable = operables_b1[0] + np.testing.assert_array_equal(operable.primary_image.nparray, self.b1_img.nparray.astype(np.float_)) + operable = operables_b2[0] + np.testing.assert_array_equal(operable.primary_image.nparray, self.b2_img.nparray.astype(np.float_)) + + +if __name__ == '__main__': + unittest.main() diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AverageByGroupImageProcessor/a1.npy b/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AverageByGroupImageProcessor/a1.npy new file mode 100644 index 0000000000000000000000000000000000000000..6ce0aec16ac8506d669a865ce28e0aa33533ff68 GIT binary patch literal 132 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlWC%^qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= ZXCxM+0{I$-I+{8PwF(pfE-nTJ1^~e%8)X0h literal 0 HcmV?d00001 diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AverageByGroupImageProcessor/a2.npy b/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AverageByGroupImageProcessor/a2.npy new file mode 100644 index 0000000000000000000000000000000000000000..24963b1697c846f1f5773056e27392edd1a65d19 GIT binary patch literal 132 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlWC%^qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= ZXCxM+0{I$-I+{8PwF(pfE)fO>1^~fK8*cyr literal 0 HcmV?d00001 diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AverageByGroupImageProcessor/b1.npy b/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AverageByGroupImageProcessor/b1.npy new file mode 100644 index 0000000000000000000000000000000000000000..e0bab90ef9597caebd812cc5a2e7cf7c7a1e672d GIT binary patch literal 132 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlWC%^qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= ZXCxM+0{I$-I+{8PwF(pfE;$AU1^~fy8+iZ# literal 0 HcmV?d00001 diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AverageByGroupImageProcessor/b2.npy b/opencsp/common/lib/cv/spot_analysis/image_processor/test/data/input/AverageByGroupImageProcessor/b2.npy new file mode 100644 index 0000000000000000000000000000000000000000..b90dd6990372f70deb97ea61cf394bf5907951ea GIT binary patch literal 132 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlWC%^qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= ZXCxM+0{I$-I+{8PwF(pfE)50-1^~gF8-oA< literal 0 HcmV?d00001 From 97d55eba233ffaf00f6e5f85ca8195887406a756 Mon Sep 17 00:00:00 2001 From: bbean Date: Thu, 18 Apr 2024 19:10:42 -0600 Subject: [PATCH 11/32] add write_json and read_json to file_tools.py --- opencsp/common/lib/opencsp_path/__init__.py | 8 +- opencsp/common/lib/tool/file_tools.py | 91 +++++++++++++++++++-- 2 files changed, 86 insertions(+), 13 deletions(-) diff --git a/opencsp/common/lib/opencsp_path/__init__.py b/opencsp/common/lib/opencsp_path/__init__.py index e17763616..73aabf869 100644 --- a/opencsp/common/lib/opencsp_path/__init__.py +++ b/opencsp/common/lib/opencsp_path/__init__.py @@ -47,8 +47,8 @@ def __load_settings_files(): The third key, then, is the setting's name. """ import os - import json from opencsp.common.lib.opencsp_path.opencsp_root_path import _opencsp_settings_dirs + import opencsp.common.lib.tool.file_tools as ft ret: dict[str, dict[str, dict[str, any]]] = {} @@ -58,10 +58,8 @@ def __load_settings_files(): # would use file_tools.directory_exists() except that I don't want to depend on any other part of opencsp if os.path.exists(settings_file_name_path_ext) and os.path.isfile(settings_file_name_path_ext): - with open(settings_file_name_path_ext, 'r') as fin: - lines = fin.readlines() - lines = map(lambda l: "" if l.strip().startswith("//") else l, lines) - settings = json.loads("\n".join(lines)) + settings_path, settings_name, settings_ext = ft.path_components(settings_file_name_path_ext) + settings = ft.read_json("global settings", settings_path, settings_name+settings_ext) # verify the types for the loaded settings err_msg_preamble = ( diff --git a/opencsp/common/lib/tool/file_tools.py b/opencsp/common/lib/tool/file_tools.py index fdde7faf8..dafc74370 100755 --- a/opencsp/common/lib/tool/file_tools.py +++ b/opencsp/common/lib/tool/file_tools.py @@ -8,13 +8,10 @@ import csv from datetime import datetime import glob -import random +import json import os import os.path - -# import pickle import shutil -import string import tempfile from typing import Optional @@ -1023,7 +1020,7 @@ def write_text_file( # Write output file. output_body_ext = convert_string_to_file_body(output_file_body) + '.txt' output_dir_body_ext = os.path.join(output_dir, output_body_ext) - if description != None: + if description is not None: print('Saving ' + description + ': ', output_dir_body_ext) with open(output_dir_body_ext, 'w') as output_stream: # Write strings. @@ -1111,7 +1108,7 @@ def to_csv( # Write output file. output_body_ext = convert_string_to_file_body(output_file_body) + '.csv' output_dir_body_ext = os.path.join(output_dir, output_body_ext) - if description != None: + if description is not None: lt.info('Saving ' + description + ': ' + output_dir_body_ext) output_stream = open(output_dir_body_ext, 'w') # Write heading lines. @@ -1143,7 +1140,7 @@ def read_csv_file(description, input_path, input_file_name, log_warning=True): return from_csv(description, input_path, input_file_name) -def from_csv(description: str, input_path: str, input_file_name_ext: str): +def from_csv(description: str | None, input_path: str, input_file_name_ext: str): """Reads a csv file and returns the rows, including the header row. Concise example:: @@ -1177,7 +1174,8 @@ def from_csv(description: str, input_path: str, input_file_name_ext: str): # However, this version works well with csv files where row lengths are irregular. # Consruct input file path and name. input_path_file = os.path.join(input_path, input_file_name_ext) - lt.info('Reading ' + description + ': ' + input_path_file + ' ...') + if description is not None: + lt.info('Reading ' + description + ': ' + input_path_file + ' ...') # Read csv file. data_rows: list[list[str]] = [] with open(input_path_file) as csvfile: @@ -1297,6 +1295,83 @@ def read_dict(input_dict_dir_body_ext): return output_dict +def write_json(description: str | None, output_dir: str, output_file_body: str, output_object: any, error_if_dir_not_exist=True): + """ + Like json.dump(output_object, output_file_body) but with a few more safety checks and automatic ".json" extension appending. + + Parameters + ---------- + description : str | None + A human-readable description of what this file is for, to be logged to the command line. If None, then no log is created. + output_dir : str + The destination directory for the file. + output_file_body : str + The destination name for the file. Should not include an extension. For example: "foo" is ok, but "foo.json" is not. + output_object : any + The object to be saved to the given file. + error_if_dir_not_exist : bool, optional + If True, then first check if the given output_dir exists. By default True. + """ + # normalize input + output_name_ext = output_file_body + if not output_file_body.lower().endswith(".json"): + output_name_ext = output_name_ext + ".json" + output_path_name_ext = os.path.join(output_dir, output_name_ext) + + # validate input + if error_if_dir_not_exist: + if not directory_exists(output_dir): + lt.error_and_raise(FileNotFoundError, "Error in file_tools.write_json(): " + + f"the directory {output_dir} does not exist!") + if file_exists(output_path_name_ext): + lt.error_and_raise(FileExistsError, "Error in file_tools.write_json(): " + + f"the file {output_path_name_ext} already exists!") + + # save the file + if description != None: + print('Saving ' + description + ': ', output_path_name_ext) + with open(output_path_name_ext, "w") as fout: + json.dump(output_object, fout) + + +def read_json(description: str | None, input_dir: str, input_file_body_ext: str) -> any: + """ + Like json.loads(file_contents) but with more safety checks, and ignoring any lines starting with "//" as comments. + + Parameters + ---------- + description : str | None + A human-readable description of what this file is for, to be logged to the command line. If None, then doesn't log. + input_dir : str + The source directory where the file exists. + input_file_body_ext : str + The source name+ext of the file. For example "foo.json". + + Returns + ------- + any + The json-parsed contents of the file. + """ + # TODO should we switch to https://pypi.org/project/pyjson5/? I'm not doing that now, because it would mean another + # dependency, and this is good enough for now. + + # normalize input + input_path_name_ext = os.path.join(input_dir, input_file_body_ext) + + # validate input + if not file_exists(input_path_name_ext): + lt.error_and_raise(FileNotFoundError, "Error in file_tools.read_json(): " + + f"the file {input_path_name_ext} does not exist!") + + # read the file + if description is not None: + lt.info('Reading ' + description + ': ' + input_path_name_ext + ' ...') + with open(input_path_name_ext, 'r') as fin: + lines = fin.readlines() + lines = map(lambda l: "" if l.strip().startswith("//") else l, lines) + return json.loads("\n".join(lines)) + + # PICKLE FILES # def write_pickle_file(description, # Explanatory string to include in notification output. None to skip. From 04ba15da031c89b92a6d26fce657165c363c803c Mon Sep 17 00:00:00 2001 From: bbean Date: Thu, 18 Apr 2024 19:11:01 -0600 Subject: [PATCH 12/32] more prototyping of the PeakFlux workflow --- contrib/app/SpotAnalysis/PeakFlux.py | 82 ++++++++++++++++++- .../FalseColorImageProcessor.py | 2 +- 2 files changed, 79 insertions(+), 5 deletions(-) diff --git a/contrib/app/SpotAnalysis/PeakFlux.py b/contrib/app/SpotAnalysis/PeakFlux.py index 84e9dcdfd..812497ced 100644 --- a/contrib/app/SpotAnalysis/PeakFlux.py +++ b/contrib/app/SpotAnalysis/PeakFlux.py @@ -1,7 +1,12 @@ import json import os +import re + +import numpy as np import opencsp.common.lib.cv.SpotAnalysis as sa +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisImagesStream import ImageType +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable import opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperableAttributeParser as saoap from opencsp.common.lib.cv.spot_analysis.image_processor import * import opencsp.common.lib.tool.file_tools as ft @@ -34,11 +39,36 @@ def __init__(self, indir: str, outdir: str, experiment_name: str, settings_path_ self.experiment_name = experiment_name self.settings_path_name_ext = settings_path_name_ext - with open(settings_path_name_ext, 'r') as fin: - settings_dict = json.load(fin) + settings_path, settings_name, settings_ext = ft.path_components(self.settings_path_name_ext) + settings_dict = json.load("PeakFlux settings", settings_path, settings_name+settings_ext) self.crop_box: list[int] = settings_dict['crop_box'] - - self.image_processors: list[AbstractSpotAnalysisImagesProcessor] = [CroppingImageProcessor(*self.crop_box)] + self.bcs_pixel: list[int] = settings_dict['bcs_pixel_location'] + self.heliostate_name_pattern = re.compile(settings_dict['heliostat_name_pattern']) + + group_assigner = AverageByGroupImageProcessor.group_by_name(re.compile(r"(_off)?( Raw)")) + group_trigger = AverageByGroupImageProcessor.group_trigger_on_change() + supporting_images_map = { + ImageType.PRIMARY: lambda operable, operables: "off" not in operable.primary_image_source_path, + ImageType.NULL: lambda operable, operables: "off" in operable.primary_image_source_path, + } + # max_pixel_value_locator = AnnotationImageProcessor.AnnotationEngine( + # feature_locator=lambda operable: np.argmax(operable.primary_image.ndarray), + # color='k' + # ) + # bcs_locator = AnnotationImageProcessor.AnnotationEngine( + # feature_locator=lambda operable: self.bcs_pixel, + # color='k' + # ) + + self.image_processors: list[AbstractSpotAnalysisImagesProcessor] = [ + CroppingImageProcessor(*self.crop_box), + AverageByGroupImageProcessor(group_assigner, group_trigger), + EchoImageProcessor(), + # SupportingImagesCollectorImageProcessor(group_assigner, supporting_images_map), + # NullImageSubtractionImageProcessor(), + # FilterImageProcessor(filter="box", diameter=3), + # AnnotationImageProcessor(max_pixel_value_locator, bcs_locator) + ] self.spot_analysis = sa.SpotAnalysis( experiment_name, self.image_processors, save_dir=outdir, save_overwrite=True ) @@ -65,6 +95,50 @@ def run(self): # TODO append these results to the csv file +# class PeakFluxOffsetImageProcessor(AbstractSpotAnalysisImagesProcessor): +# def __init__(self, outfile_path_name_ext: str, max_pixel_value_locator: AnnotationImageProcessor.AnnotationEngine, bcs_pixel_location: tuple[int, int], heliostat_name_pattern: re.Pattern): +# super().__init__("PeakFluxOffsetImageProcessor") + +# self.outfile_path_name_ext = outfile_path_name_ext +# self.max_pixel_value_locator = max_pixel_value_locator +# self.bcs_pixel_location = bcs_pixel_location +# self.heliostat_name_pattern = heliostat_name_pattern + +# with open(outfile_path_name_ext, "w") as fout: +# fout.writelines(["Heliostat,Peak Flux Pixel,Pixels Offset"]) + +# def _execute(self, operable: sa.SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: +# # get the heliostat name +# names_to_search = [ +# operable.primary_image_source_path, +# operable.primary_image.source_path, +# operable.primary_image.cache_path +# ] + +# heliostat_name = None +# for name in names_to_search: +# m = self.heliostat_name_pattern.search(name) +# if m is not None: +# if len(m.groups()) > 0: +# heliostat_name = "".join(m.groups()) +# break + +# if heliostat_name is None: +# lt.error("Error in PeakFluxOffsetImageProcessor._execute(): " + +# f"failed to find heliostat name in {names_to_search}") +# return [operable] + +# # get the peak pixel location +# peak_flux_pixel = max_pixel_value_locator.feature_locator(operable.primary_image.nparray)[0] +# pixels_offset = peak_flux_pixel - np.array(self.bcs_pixel_location) + +# # write the results +# peak_flux_pixel_str = f"{peak_flux_pixel[0]} {peak_flux_pixel[1]}" +# pixels_offset_str = f"{pixels_offset[0]} {pixels_offset[1]}" +# with open(self.outfile_path_name_ext, "a") as fout: +# fout.writelines([f"{heliostat_name},{peak_flux_pixel_str},{pixels_offset_str}"]) + + if __name__ == "__main__": import argparse diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/FalseColorImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/FalseColorImageProcessor.py index 19758899d..ecd5b4392 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/FalseColorImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/FalseColorImageProcessor.py @@ -19,7 +19,7 @@ def __init__(self, map_type='human', opencv_map=cv2.COLORMAP_JET): ---------- map_type : str, optional This determines the number of visible colors. Options are 'opencv' - (256), 'human' (TODO), 'large' (1530). Large has the most possible + (256), 'human' (1020), 'large' (1530). Large has the most possible colors. Human reduces the number of greens and reds, since those are difficult to discern. Default is 'human'. opencv_map : opencv map type, optional From 4237aa41244dff61537778672e13ee9aa691819d Mon Sep 17 00:00:00 2001 From: bbean Date: Thu, 18 Apr 2024 20:50:52 -0600 Subject: [PATCH 13/32] fix AbstractAggregateImageProcessor and AverageByGroupImageProcessor, add initial_min and initial_max to PopulationStatisticsImageProcessor --- contrib/app/SpotAnalysis/PeakFlux.py | 18 ++++++++--- opencsp/common/lib/cv/SpotAnalysis.py | 4 +-- .../AbstractAggregateImageProcessor.py | 5 +-- .../AverageByGroupImageProcessor.py | 9 ++++-- .../PopulationStatisticsImageProcessor.py | 32 +++++++++++++++++-- 5 files changed, 55 insertions(+), 13 deletions(-) diff --git a/contrib/app/SpotAnalysis/PeakFlux.py b/contrib/app/SpotAnalysis/PeakFlux.py index 812497ced..e92985c24 100644 --- a/contrib/app/SpotAnalysis/PeakFlux.py +++ b/contrib/app/SpotAnalysis/PeakFlux.py @@ -40,7 +40,7 @@ def __init__(self, indir: str, outdir: str, experiment_name: str, settings_path_ self.settings_path_name_ext = settings_path_name_ext settings_path, settings_name, settings_ext = ft.path_components(self.settings_path_name_ext) - settings_dict = json.load("PeakFlux settings", settings_path, settings_name+settings_ext) + settings_dict = ft.read_json("PeakFlux settings", settings_path, settings_name+settings_ext) self.crop_box: list[int] = settings_dict['crop_box'] self.bcs_pixel: list[int] = settings_dict['bcs_pixel_location'] self.heliostate_name_pattern = re.compile(settings_dict['heliostat_name_pattern']) @@ -64,6 +64,8 @@ def __init__(self, indir: str, outdir: str, experiment_name: str, settings_path_ CroppingImageProcessor(*self.crop_box), AverageByGroupImageProcessor(group_assigner, group_trigger), EchoImageProcessor(), + PopulationStatisticsImageProcessor(initial_min=0, initial_max=255), + FalseColorImageProcessor(), # SupportingImagesCollectorImageProcessor(group_assigner, supporting_images_map), # NullImageSubtractionImageProcessor(), # FilterImageProcessor(filter="box", diameter=3), @@ -73,8 +75,14 @@ def __init__(self, indir: str, outdir: str, experiment_name: str, settings_path_ experiment_name, self.image_processors, save_dir=outdir, save_overwrite=True ) + filenames = ft.files_in_directory_by_extension(self.indir, [".jpg"])[".jpg"] + source_path_name_exts = [os.path.join(self.indir, filename) for filename in filenames] + self.spot_analysis.set_primary_images(source_path_name_exts) + def run(self): # process all images from indir + i = iter(self.spot_analysis) + next(i) for result in self.spot_analysis: # save the processed image save_path = self.spot_analysis.save_image( @@ -92,8 +100,6 @@ def run(self): # condensed csv file. parser = saoap.SpotAnalysisOperableAttributeParser(result, self.spot_analysis) - # TODO append these results to the csv file - # class PeakFluxOffsetImageProcessor(AbstractSpotAnalysisImagesProcessor): # def __init__(self, outfile_path_name_ext: str, max_pixel_value_locator: AnnotationImageProcessor.AnnotationEngine, bcs_pixel_location: tuple[int, int], heliostat_name_pattern: re.Pattern): @@ -119,8 +125,9 @@ def run(self): # for name in names_to_search: # m = self.heliostat_name_pattern.search(name) # if m is not None: -# if len(m.groups()) > 0: -# heliostat_name = "".join(m.groups()) +# groups = list(filter(lambda s: s is not None, m.groups())) +# if len(groups) > 0: +# heliostat_name = "".join(groups) # break # if heliostat_name is None: @@ -153,6 +160,7 @@ def run(self): # create the output directory ft.create_directories_if_necessary(args.outdir) + ft.delete_files_in_directory(args.outdir, "*") # create the log file log_path_name_ext = os.path.join(args.outdir, "PeakFlux_" + tdt.current_date_time_string_forfile() + ".log") diff --git a/opencsp/common/lib/cv/SpotAnalysis.py b/opencsp/common/lib/cv/SpotAnalysis.py index cb847aed7..a76129965 100644 --- a/opencsp/common/lib/cv/SpotAnalysis.py +++ b/opencsp/common/lib/cv/SpotAnalysis.py @@ -249,11 +249,11 @@ def process_next(self): The processed primary image and other associated data. None if done processing. """ - if self._results_iter == None: + if self._results_iter is None: self._results_iter = iter(self.image_processors[-1]) # Release memory from the previous result - if self._prev_result != None: + if self._prev_result is not None: self.image_processors[-1].cache_image_to_disk_as_necessary(self._prev_result) self._prev_result = None diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py index a7bfafad9..96e91d1c5 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py @@ -120,7 +120,8 @@ def group_by_name_inner(operable: SpotAnalysisOperable, name_pattern: re.Pattern m = name_pattern.search(name) if m is None: continue - if len(m.groups()) == 0: + groups = list(filter(lambda s: s is not None, m.groups())) + if len(groups) == 0: lt.debug( "In AbstractAggregateImageProcessor.group_by_name(): " + f"no groups found for pattern {name_pattern} when trying to match against name {name}" @@ -128,7 +129,7 @@ def group_by_name_inner(operable: SpotAnalysisOperable, name_pattern: re.Pattern continue # get the name match - group_str = "".join(m.groups()) + group_str = "".join(groups) # return the index of the existing group, or add a new group if group_str in groups: diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py index b67036b5a..1c66d9508 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py @@ -27,8 +27,8 @@ def __init__( def _execute_aggregate( self, group: int, operables: list[SpotAnalysisOperable], is_last: bool ) -> list[SpotAnalysisOperable]: - # initialize the image to return - averaged_image = np.array(operables[0].primary_image.nparray) + # Initialize the image to return. + averaged_image = np.array(operables[0].primary_image.nparray).astype(np.int64) # build the average image for operable in operables[1:]: @@ -43,9 +43,14 @@ def _execute_aggregate( averaged_image += other_image averaged_image = averaged_image.astype(np.float_) averaged_image /= len(operables) + averaged_image = averaged_image.astype(operables[0].primary_image.nparray.dtype) + + # collect the list of images that were averaged + image_names = [operable.primary_image_source_path for operable in operables] # build the return operable from the first operable averaged_cacheable = CacheableImage(averaged_image, source_path=operables[0].primary_image.source_path) ret = dataclasses.replace(operables[0], primary_image=averaged_cacheable) + ret.image_processor_notes.append(("AverageByGroupImageProcessor", f"averaged_images: {image_names}")) return [ret] diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py index 296a545d2..10a4d3581 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py @@ -20,7 +20,33 @@ class _RollingWindowOperableStats: class PopulationStatisticsImageProcessor(AbstractSpotAnalysisImagesProcessor): - def __init__(self, min_pop_size=1, target_rolling_window_size=1): + def __init__(self, min_pop_size=1, target_rolling_window_size=1, initial_min: int = None, initial_max: int = None): + """ + Generates statistics for groups of images. + + A group of images is held until enough have been seen to generate statistics off of. Once the required number of + images has been reached, then images will start being released one at a time with the statistics for the group + up until that point. + + Some use cases for this class could include automatically determining the maximum pixel value during streaming + to select an appropriate bit depth, using the rolling average for exposure calibration, or leveling all images + by subtracting the gloal pixel minimum. + + Parameters + ---------- + min_pop_size : int, optional + The minimum number of images that must be seen before any images (and their statistics) are released to the + next image processor. -1 to wait for all images. By default 1 + target_rolling_window_size : int, optional + Number of images used to determine rolling averages. The first N-1 images are not held back while waiting + for this target. By default 1 + initial_min : int, optional + Initial value used to estimate the population minimum. If None, then the minimum of the first image seen is + used. By default None + initial_max : int, optional + Initial value used to estimage the population maximum. If None, then the maximum of the first image seen is + used. By default None + """ super().__init__(self.__class__.__name__) if min_pop_size > 0: @@ -42,6 +68,8 @@ def __init__(self, min_pop_size=1, target_rolling_window_size=1): self.curr_stats: SpotAnalysisPopulationStatistics = None """ The current statistics, which get updated with each image seen. None if min_pop_size hasn't been met yet. """ + self.initial_min = [initial_min] if initial_min is not None else None + self.initial_max = [initial_max] if initial_max is not None else None self.initial_operables: list[SpotAnalysisOperable] = [] """ The initial operables gathered while waiting for min_pop_size. """ self.rolling_window_operables: list[SpotAnalysisOperable] = [] @@ -131,7 +159,7 @@ def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAn pass # We've reached the minimum population size (or the end of the images stream, as indicated by is_last). - self.curr_stats = SpotAnalysisPopulationStatistics() + self.curr_stats = SpotAnalysisPopulationStatistics(minf=self.initial_min, maxf=self.initial_max) for prior_operable in self.initial_operables: self.curr_stats = self._calculate_rolling_window( self.curr_stats, prior_operable, self.rolling_window_operables From a5ee42b3f80837ac2591d1edc74add91f1364aee Mon Sep 17 00:00:00 2001 From: bbean Date: Thu, 18 Apr 2024 20:51:20 -0600 Subject: [PATCH 14/32] fix SpotAnalysisImageAttributeParser by fixing ImageAttributeParser --- opencsp/common/lib/render/ImageAttributeParser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opencsp/common/lib/render/ImageAttributeParser.py b/opencsp/common/lib/render/ImageAttributeParser.py index 4289e78c9..dab712209 100644 --- a/opencsp/common/lib/render/ImageAttributeParser.py +++ b/opencsp/common/lib/render/ImageAttributeParser.py @@ -72,7 +72,7 @@ def __init__( except: pass if self._previous_attr != None: - prev_image_attr: ImageAttributeParser = self._previous_attr.get_parser(ImageAttributeParser) + prev_image_attr: ImageAttributeParser = self._previous_attr.get_parser(self.__class__) # Sanity check: are we trying to overwrite the "original_image_source" value? if prev_image_attr != None: From 4a5841f2ebead6b360d2094462529f62bcfcb87a Mon Sep 17 00:00:00 2001 From: bbean Date: Fri, 19 Apr 2024 12:41:24 -0600 Subject: [PATCH 15/32] more debugging options --- .../image_processor/AverageByGroupImageProcessor.py | 1 + .../image_processor/EchoImageProcessor.py | 10 ++++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py index 1c66d9508..061ca24ac 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py @@ -28,6 +28,7 @@ def _execute_aggregate( self, group: int, operables: list[SpotAnalysisOperable], is_last: bool ) -> list[SpotAnalysisOperable]: # Initialize the image to return. + lt.debug(f"In AverageByGroupImageProcessor._execute_aggregate(): averaging {len(operables)} images") averaged_image = np.array(operables[0].primary_image.nparray).astype(np.int64) # build the average image diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py index c09ca61ba..acb61458e 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py @@ -7,9 +7,15 @@ class EchoImageProcessor(AbstractSpotAnalysisImagesProcessor): - def __init__(self): + def __init__(self, log_level=lt.log.INFO, prefix=""): super().__init__(self.__class__.__name__) + self.log_level = log_level + self.prefix = prefix + + self.logger = lt.get_log_method_for_level(self.log_level) + def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: - lt.debug(f"Processing image {operable.primary_image_name_for_logs}") + self.logger(f"{self.prefix}Processing image {operable.primary_image_name_for_logs}") + return [operable] From 60093dbc1e7f3293ab3582094df849cfd6e17ed1 Mon Sep 17 00:00:00 2001 From: bbean Date: Fri, 19 Apr 2024 12:43:28 -0600 Subject: [PATCH 16/32] add SupportingImagesCollectorImageProcessor, other small code and formatting fixes --- contrib/app/SpotAnalysis/PeakFlux.py | 10 +- .../spot_analysis/SpotAnalysisImagesStream.py | 7 + ...SupportingImagesCollectorImageProcessor.py | 145 ++++++++++++++++++ .../spot_analysis/image_processor/__init__.py | 4 + opencsp/common/lib/opencsp_path/__init__.py | 2 +- opencsp/common/lib/tool/file_tools.py | 19 ++- 6 files changed, 173 insertions(+), 14 deletions(-) create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/SupportingImagesCollectorImageProcessor.py diff --git a/contrib/app/SpotAnalysis/PeakFlux.py b/contrib/app/SpotAnalysis/PeakFlux.py index e92985c24..e3974217c 100644 --- a/contrib/app/SpotAnalysis/PeakFlux.py +++ b/contrib/app/SpotAnalysis/PeakFlux.py @@ -40,7 +40,7 @@ def __init__(self, indir: str, outdir: str, experiment_name: str, settings_path_ self.settings_path_name_ext = settings_path_name_ext settings_path, settings_name, settings_ext = ft.path_components(self.settings_path_name_ext) - settings_dict = ft.read_json("PeakFlux settings", settings_path, settings_name+settings_ext) + settings_dict = ft.read_json("PeakFlux settings", settings_path, settings_name + settings_ext) self.crop_box: list[int] = settings_dict['crop_box'] self.bcs_pixel: list[int] = settings_dict['bcs_pixel_location'] self.heliostate_name_pattern = re.compile(settings_dict['heliostat_name_pattern']) @@ -64,11 +64,11 @@ def __init__(self, indir: str, outdir: str, experiment_name: str, settings_path_ CroppingImageProcessor(*self.crop_box), AverageByGroupImageProcessor(group_assigner, group_trigger), EchoImageProcessor(), - PopulationStatisticsImageProcessor(initial_min=0, initial_max=255), - FalseColorImageProcessor(), - # SupportingImagesCollectorImageProcessor(group_assigner, supporting_images_map), + SupportingImagesCollectorImageProcessor(supporting_images_map), # NullImageSubtractionImageProcessor(), # FilterImageProcessor(filter="box", diameter=3), + PopulationStatisticsImageProcessor(initial_min=0, initial_max=255), + FalseColorImageProcessor(), # AnnotationImageProcessor(max_pixel_value_locator, bcs_locator) ] self.spot_analysis = sa.SpotAnalysis( @@ -81,8 +81,6 @@ def __init__(self, indir: str, outdir: str, experiment_name: str, settings_path_ def run(self): # process all images from indir - i = iter(self.spot_analysis) - next(i) for result in self.spot_analysis: # save the processed image save_path = self.spot_analysis.save_image( diff --git a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py index 479ef9d69..2a9751959 100644 --- a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py +++ b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py @@ -1,5 +1,6 @@ from collections.abc import Iterator from enum import Enum +import functools from opencsp.common.lib.cv.CacheableImage import CacheableImage from opencsp.common.lib.cv.spot_analysis.ImagesIterable import ImagesIterable @@ -8,6 +9,7 @@ import opencsp.common.lib.tool.typing_tools as tt +@functools.total_ordering class ImageType(Enum): PRIMARY = 1 REFERENCE = 2 @@ -15,6 +17,11 @@ class ImageType(Enum): COMPARISON = 4 BACKGROUND_MASK = 5 + def __lt__(self, other): + if isinstance(other, self.__class__): + return self.value < other.value + raise NotImplementedError + class SpotAnalysisImagesStream(Iterator[dict[ImageType, CacheableImage]]): tt.strict_types diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/SupportingImagesCollectorImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/SupportingImagesCollectorImageProcessor.py new file mode 100644 index 000000000..3d8a4fa65 --- /dev/null +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/SupportingImagesCollectorImageProcessor.py @@ -0,0 +1,145 @@ +import dataclasses +from typing import Callable + +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisImagesStream import ImageType +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( + AbstractSpotAnalysisImagesProcessor, +) +import opencsp.common.lib.tool.log_tools as lt + + +class NoPrimaryImageException(Exception): + def __init__(self, msg: str): + super().__init__(msg) + + +class SupportingImagesCollectorImageProcessor(AbstractSpotAnalysisImagesProcessor): + def __init__( + self, + supporting_images_map: dict[ + ImageType, Callable[[SpotAnalysisOperable, dict[ImageType, SpotAnalysisOperable]], bool] + ], + ): + """ + Collects primary and supporting images together from a stream of mixed images. + + The basic algorithm is pretty simple: + + 1. catagorize images based on the given supporting_images_map + 2. if the image type isn't already in the internal list, then add it and go back to step 1 + 3. collect all images in the internal list together as a single operable + 4. clear the internal list + 5. start a new internal list with the current image + 6. return the new operable, go back to step 1 + """ + super().__init__(self.__class__.__name__) + + # register inputs + self.supporting_images_map = supporting_images_map + + # list of images to be collected together + self.collection: dict[ImageType, SpotAnalysisOperable] = {} + self.prev_image_types: list[ImageType] = None + + def _update_collection(self) -> SpotAnalysisOperable: + # 3. Turn the current collection into a new operable + # 3.1. Check that there is a primary image + if ImageType.PRIMARY not in self.collection: + raise NoPrimaryImageException("No primary image registerd. Failed to update collection.") + primary = self.collection[ImageType.PRIMARY] + + # 3.2. Check that we have as many image types as we expect + image_types = sorted(list(self.collection.keys())) + expected_image_types = sorted(list(self.supporting_images_map.keys())) + if self.prev_image_types is not None: + if image_types != self.prev_image_types: + lt.warning( + "Warning in SupportingImagesCollectorImageProcessor._update_collection(): " + + f"expected to find images with types {self.prev_image_types}, but instead found {image_types}" + ) + if image_types != expected_image_types: + lt.debug( + "In SupportingImagesCollectorImageProcessor._update_collection(): " + + f"expected image types from input map {expected_image_types}, found image types {image_types}" + ) + self.prev_image_types = image_types + + # 3.2. We have a primary, create the new operable + lt.debug( + "In SupportingImagesCollectorImageProcessor._update_collection(): " + + f"collecting images {sorted(list(self.collection.keys()))}" + ) + supporting_images: dict[ImageType, CacheableImage] = {} + for it in self.collection: + if it != ImageType.PRIMARY: + supporting_images[it] = self.collection[it].primary_image + new_operable = dataclasses.replace(primary, supporting_images=supporting_images) + + return new_operable + + def _execute(self, curr_operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: + # 1. get the image type + curr_image_type = None + for it in self.supporting_images_map: + if self.supporting_images_map[it](curr_operable, self.collection): + curr_image_type = it + if curr_image_type is None: + lt.error_and_raise( + ValueError, + "Error in SupportingImagesCollectorImageProcessor._execute(): " + + f"unable to determine image type for operable {curr_operable.primary_image_source_path} ({curr_operable})", + ) + + # Handle is_last edge case + if is_last: + # add this operable to the collection, but first check if there's room in the collection + if curr_image_type in self.collection: + lt.warning( + "Warning in SupportingImagesCollectorImageProcessor._update_collection(): " + + "mismatched image types. " + + f"Removing {curr_image_type} '{self.collection[curr_image_type].primary_image_source_path}' and replacing it with '{curr_operable.primary_image_source_path}'" + ) + self.collection[curr_image_type] = curr_operable + + # update the collection + try: + new_operable = self._update_collection() + except NoPrimaryImageException as ex: + lt.warning(repr(ex)) + lt.warning( + "Warning in SupportingImagesCollectorImageProcessor._update_collection(): " + + f"discarding {len(self.collection)} operables that don't have a matching primary image." + ) + + # 6. Return the new operable + return [new_operable] + + # 2. If this image type isn't already in the collection, then add it and continue. + elif curr_image_type not in self.collection: + self.collection[curr_image_type] = curr_operable + return [] + + # Otherwise there is a duplicate. + else: + try: + new_operable = self._update_collection() + except NoPrimaryImageException as ex: + lt.warning(repr(ex)) + lt.warning( + "Warning in SupportingImagesCollectorImageProcessor._update_collection(): " + + "no PRIMARY image is available, so we can't create new a new operable. " + + f"Removing {curr_image_type} '{self.collection[curr_image_type].primary_image_source_path}' and replacing it with '{curr_operable.primary_image_source_path}'" + ) + self.collection[curr_image_type] = curr_operable + return [] + + # 4. Clear the collection + self.collection.clear() + + # 5. Start a new collection with the current operable + self.collection[curr_image_type] = curr_operable + + # 6. Return the new operable + return [new_operable] diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py index 188a263aa..4cf71733a 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py @@ -17,6 +17,9 @@ from opencsp.common.lib.cv.spot_analysis.image_processor.PopulationStatisticsImageProcessor import ( PopulationStatisticsImageProcessor, ) +from opencsp.common.lib.cv.spot_analysis.image_processor.SupportingImagesCollectorImageProcessor import ( + SupportingImagesCollectorImageProcessor, +) # Make these classes available when importing cv.spot_analysis.image_processor.* __all__ = [ @@ -29,4 +32,5 @@ 'FalseColorImageProcessor', 'LogScaleImageProcessor', 'PopulationStatisticsImageProcessor', + 'SupportingImagesCollectorImageProcessor', ] diff --git a/opencsp/common/lib/opencsp_path/__init__.py b/opencsp/common/lib/opencsp_path/__init__.py index 73aabf869..d83bb27a6 100644 --- a/opencsp/common/lib/opencsp_path/__init__.py +++ b/opencsp/common/lib/opencsp_path/__init__.py @@ -59,7 +59,7 @@ def __load_settings_files(): # would use file_tools.directory_exists() except that I don't want to depend on any other part of opencsp if os.path.exists(settings_file_name_path_ext) and os.path.isfile(settings_file_name_path_ext): settings_path, settings_name, settings_ext = ft.path_components(settings_file_name_path_ext) - settings = ft.read_json("global settings", settings_path, settings_name+settings_ext) + settings = ft.read_json("global settings", settings_path, settings_name + settings_ext) # verify the types for the loaded settings err_msg_preamble = ( diff --git a/opencsp/common/lib/tool/file_tools.py b/opencsp/common/lib/tool/file_tools.py index dafc74370..430b2e874 100755 --- a/opencsp/common/lib/tool/file_tools.py +++ b/opencsp/common/lib/tool/file_tools.py @@ -1295,7 +1295,9 @@ def read_dict(input_dict_dir_body_ext): return output_dict -def write_json(description: str | None, output_dir: str, output_file_body: str, output_object: any, error_if_dir_not_exist=True): +def write_json( + description: str | None, output_dir: str, output_file_body: str, output_object: any, error_if_dir_not_exist=True +): """ Like json.dump(output_object, output_file_body) but with a few more safety checks and automatic ".json" extension appending. @@ -1321,11 +1323,13 @@ def write_json(description: str | None, output_dir: str, output_file_body: str, # validate input if error_if_dir_not_exist: if not directory_exists(output_dir): - lt.error_and_raise(FileNotFoundError, "Error in file_tools.write_json(): " + - f"the directory {output_dir} does not exist!") + lt.error_and_raise( + FileNotFoundError, "Error in file_tools.write_json(): " + f"the directory {output_dir} does not exist!" + ) if file_exists(output_path_name_ext): - lt.error_and_raise(FileExistsError, "Error in file_tools.write_json(): " + - f"the file {output_path_name_ext} already exists!") + lt.error_and_raise( + FileExistsError, "Error in file_tools.write_json(): " + f"the file {output_path_name_ext} already exists!" + ) # save the file if description != None: @@ -1360,8 +1364,9 @@ def read_json(description: str | None, input_dir: str, input_file_body_ext: str) # validate input if not file_exists(input_path_name_ext): - lt.error_and_raise(FileNotFoundError, "Error in file_tools.read_json(): " + - f"the file {input_path_name_ext} does not exist!") + lt.error_and_raise( + FileNotFoundError, "Error in file_tools.read_json(): " + f"the file {input_path_name_ext} does not exist!" + ) # read the file if description is not None: From ad46e21eaa8e086564fb7da0b0b4bdbf2d26163e Mon Sep 17 00:00:00 2001 From: bbean Date: Fri, 19 Apr 2024 13:54:12 -0600 Subject: [PATCH 17/32] add NullImageSubtractionImageProcessor.py --- contrib/app/SpotAnalysis/PeakFlux.py | 2 +- .../NullImageSubtractionImageProcessor.py | 36 +++++++++++++++++++ .../spot_analysis/image_processor/__init__.py | 2 ++ 3 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/NullImageSubtractionImageProcessor.py diff --git a/contrib/app/SpotAnalysis/PeakFlux.py b/contrib/app/SpotAnalysis/PeakFlux.py index e3974217c..8128d46b3 100644 --- a/contrib/app/SpotAnalysis/PeakFlux.py +++ b/contrib/app/SpotAnalysis/PeakFlux.py @@ -65,7 +65,7 @@ def __init__(self, indir: str, outdir: str, experiment_name: str, settings_path_ AverageByGroupImageProcessor(group_assigner, group_trigger), EchoImageProcessor(), SupportingImagesCollectorImageProcessor(supporting_images_map), - # NullImageSubtractionImageProcessor(), + NullImageSubtractionImageProcessor(), # FilterImageProcessor(filter="box", diameter=3), PopulationStatisticsImageProcessor(initial_min=0, initial_max=255), FalseColorImageProcessor(), diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/NullImageSubtractionImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/NullImageSubtractionImageProcessor.py new file mode 100644 index 000000000..d30e97dcc --- /dev/null +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/NullImageSubtractionImageProcessor.py @@ -0,0 +1,36 @@ +import dataclasses + +import numpy as np + +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisImagesStream import ImageType +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( + AbstractSpotAnalysisImagesProcessor, +) +import opencsp.common.lib.tool.log_tools as lt + + +class NullImageSubtractionImageProcessor(AbstractSpotAnalysisImagesProcessor): + def __init__(self): + super().__init__(self.__class__.__name__) + + def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: + # validate the input + if (ImageType.NULL not in operable.supporting_images) or (operable.supporting_images[ImageType.NULL] is None): + lt.warning("Warning in NullImageSubtractionImageProcessor._execute(): " + + f"skipping subtraction of null image for {operable.primary_image_source_path}. " + "Given image does not have an associated NULL supporting image.") + return [operable] + + # Get the primary image with the null image subtracted. + # We convert the primary image to type int64 so that we have negative values available. If left as a uint8, then + # subtracting below 0 would cause values to wrap around, instead. + primary_image = operable.primary_image.nparray.astype(np.int64) + null_image = operable.supporting_images[ImageType.NULL].nparray.astype(np.int64) + new_primary_image = np.clip(primary_image - null_image, 0, np.max(primary_image)) + new_primary_image = new_primary_image.astype(operable.primary_image.nparray.dtype) + + # Create and return the updated operable + new_primary_cacheable = CacheableImage(new_primary_image, source_path=operable.primary_image.source_path) + new_operable = dataclasses.replace(operable, primary_image=new_primary_cacheable) + return [new_operable] diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py index 4cf71733a..0b22d1693 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py @@ -14,6 +14,7 @@ ) from opencsp.common.lib.cv.spot_analysis.image_processor.FalseColorImageProcessor import FalseColorImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.LogScaleImageProcessor import LogScaleImageProcessor +from opencsp.common.lib.cv.spot_analysis.image_processor.NullImageSubtractionImageProcessor import NullImageSubtractionImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.PopulationStatisticsImageProcessor import ( PopulationStatisticsImageProcessor, ) @@ -31,6 +32,7 @@ 'ExposureDetectionImageProcessor', 'FalseColorImageProcessor', 'LogScaleImageProcessor', + 'NullImageSubtractionImageProcessor', 'PopulationStatisticsImageProcessor', 'SupportingImagesCollectorImageProcessor', ] From de9f9788081caa1fb3a26969334bf2321687825d Mon Sep 17 00:00:00 2001 From: bbean Date: Fri, 19 Apr 2024 16:18:41 -0600 Subject: [PATCH 18/32] better comments in CacheableImage --- opencsp/common/lib/cv/CacheableImage.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/opencsp/common/lib/cv/CacheableImage.py b/opencsp/common/lib/cv/CacheableImage.py index 6aaf2322f..3993a734a 100644 --- a/opencsp/common/lib/cv/CacheableImage.py +++ b/opencsp/common/lib/cv/CacheableImage.py @@ -1,8 +1,10 @@ -import numpy as np -from PIL import Image import sys from typing import Optional, Union +import numpy as np +import numpy.typing as npt +from PIL import Image + import opencsp.common.lib.tool.file_tools as ft import opencsp.common.lib.tool.image_tools as it import opencsp.common.lib.tool.log_tools as lt @@ -49,8 +51,8 @@ def __sizeof__(self) -> int: return sys.getsizeof(self._array) + sys.getsizeof(self._image) @classmethod - def from_single_source(cls, array_or_path: Union[np.ndarray, str, 'CacheableImage']): - """Generates a CacheableImage from the given numpy or image file.""" + def from_single_source(cls, array_or_path: Union[np.ndarray, str, 'CacheableImage']) -> 'CacheableImage': + """Generates a CacheableImage from the given numpy array, numpy '.npy' file, or image file.""" if isinstance(array_or_path, CacheableImage): return array_or_path elif isinstance(array_or_path, str): @@ -77,7 +79,7 @@ def validate_cache_path(self, cache_path: Optional[str], caller_name: str): ) @staticmethod - def _load_image(im: str | np.ndarray) -> np.ndarray: + def _load_image(im: str | np.ndarray) -> npt.NDArray[np.int_]: if isinstance(im, np.ndarray): return im elif im.lower().endswith(".npy"): @@ -86,7 +88,7 @@ def _load_image(im: str | np.ndarray) -> np.ndarray: im = Image.open(im) return np.array(im) - def __load_image(self): + def __load_image(self) -> npt.NDArray[np.int_] | None: if self._array is not None: return self._load_image(self._array) elif self.cache_path is not None and ft.file_exists(self.cache_path): @@ -101,7 +103,7 @@ def __load_image(self): ) @property - def nparray(self): + def nparray(self) -> npt.NDArray[np.int_] | None: self._image = None if self._array is None: @@ -110,7 +112,7 @@ def nparray(self): return self.__load_image() - def to_image(self): + def to_image(self) -> Image.Image: if self._image == None: self._image = it.numpy_to_image(self.nparray) return self._image From 5b0c73d850dc149ebb03f03159f63221fc3a0233 Mon Sep 17 00:00:00 2001 From: bbean Date: Mon, 22 Apr 2024 16:06:37 -0600 Subject: [PATCH 19/32] add ConvolutionImageProcessor --- contrib/app/SpotAnalysis/PeakFlux.py | 2 +- .../ConvolutionImageProcessor.py | 116 ++++++++++++++++++ .../NullImageSubtractionImageProcessor.py | 7 +- .../TestConvolutionImageProcessor.py | 98 +++++++++++++++ .../spot_analysis/image_processor/__init__.py | 6 +- 5 files changed, 225 insertions(+), 4 deletions(-) create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/ConvolutionImageProcessor.py create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/TestConvolutionImageProcessor.py diff --git a/contrib/app/SpotAnalysis/PeakFlux.py b/contrib/app/SpotAnalysis/PeakFlux.py index 8128d46b3..cca9c282d 100644 --- a/contrib/app/SpotAnalysis/PeakFlux.py +++ b/contrib/app/SpotAnalysis/PeakFlux.py @@ -66,7 +66,7 @@ def __init__(self, indir: str, outdir: str, experiment_name: str, settings_path_ EchoImageProcessor(), SupportingImagesCollectorImageProcessor(supporting_images_map), NullImageSubtractionImageProcessor(), - # FilterImageProcessor(filter="box", diameter=3), + ConvolutionImageProcessor(kernel="box", diameter=3), PopulationStatisticsImageProcessor(initial_min=0, initial_max=255), FalseColorImageProcessor(), # AnnotationImageProcessor(max_pixel_value_locator, bcs_locator) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/ConvolutionImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/ConvolutionImageProcessor.py new file mode 100644 index 000000000..874fe4da0 --- /dev/null +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/ConvolutionImageProcessor.py @@ -0,0 +1,116 @@ +import dataclasses +from typing import Callable + +import numpy as np +import numpy.typing as npt +import scipy.ndimage +import scipy.signal + +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( + AbstractSpotAnalysisImagesProcessor, +) +import opencsp.common.lib.tool.log_tools as lt + + +class ConvolutionImageProcessor(AbstractSpotAnalysisImagesProcessor): + def __init__(self, kernel="gaussian", diameter=3): + """ + Convolves an image by the given kernel + + Example use cases include reducing the effects of noise, and finding the average value for a larger area. + + Parameters + ---------- + kernel : str, optional + The type of kernel to apply. Options are "gaussian" or "box". By default "gaussian". + diameter : int, optional + The size of the kernel to be applied, by default 3 + """ + super().__init__(self.__class__.__name__) + + # validate inputs + if kernel not in self._kernels: + lt.error_and_raise( + ValueError, + "Error in ConvolutionImageProcessor(): " + + f"the kernel for convolution must be one of {list(self._kernels.keys())}, but is instead '{kernel}'", + ) + if diameter < 1: + lt.error_and_raise( + ValueError, "Error in ConvolutionImageProcessor(): " + f"the diameter must be >= 1, but is {diameter}" + ) + if diameter % 2 == 0: + lt.error_and_raise( + ValueError, "Error in ConvolutionImageProcessor(): " + f"diameter must be odd, but is {diameter}" + ) + + # register parameters + self.kernel_name = kernel + self.diameter = diameter + + # internal values + self.kernel = lambda img: self._kernels[self.kernel_name](img) + self.radius = diameter / 2 + self.iradius = int(self.radius) + + @property + def _kernels(self) -> dict[str, Callable[[np.ndarray], np.ndarray]]: + return {"box": self._box_filter, "gaussian": self._gaussian_filter} + + def _box_filter(self, image: npt.NDArray[np.int_]): + """ + Convolve the image with a simple box filter, where all pixels in a neighborhood are weighted equaly. + + For example, with a diameter of 3, the image will be convolved with the following array:: + + kernel = np.array([[1/9, 1/9, 1/9], + [1/9, 1/9, 1/9], + [1/9, 1/9, 1/9]]) + """ + orig_type = image.dtype + image = image.astype(np.float64) + + # evaluate the filter + mode = 'same' # shape is max(image, kernel) + boundary = 'symm' # edges are reflected, ie image[-1] = image[0], image[-2] = image[1], etc... + kernel = np.ones((self.diameter, self.diameter)) / (self.diameter**2) + ret = scipy.signal.convolve2d(image, kernel, mode, boundary) + + ret = np.round(ret) + ret = ret.astype(orig_type) + return ret + + def _gaussian_filter(self, image: npt.NDArray[np.int_]): + """ + Convolves the image with a gaussian filter with sigma 1. + """ + orig_type = image.dtype + image = image.astype(np.float64) + + # evaluate the filter + ret = scipy.ndimage.gaussian_filter(image, sigma=1, radius=self.iradius) + + ret = np.round(ret) + ret = ret.astype(orig_type) + return ret + + def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: + # validate input + input_image = operable.primary_image.nparray + if (self.diameter > input_image.shape[0]) or (self.diameter > input_image.shape[1]): + lt.error_and_raise( + RuntimeError, + "Error in ConvolutionImageProcessor._box_filter(): " + + "although scipy.signal.convolve2d supports convolutions with a kernel that is larger than " + + "the array being convolved, we don't currently support that use case. Consider adding that functionality.", + ) + + # evaluate the kernel + filtered_image = self.kernel(input_image) + + # create the returned operable + cacheable = CacheableImage(filtered_image, source_path=operable.primary_image.source_path) + new_operable = dataclasses.replace(operable, primary_image=cacheable) + return [new_operable] diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/NullImageSubtractionImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/NullImageSubtractionImageProcessor.py index d30e97dcc..025067124 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/NullImageSubtractionImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/NullImageSubtractionImageProcessor.py @@ -18,8 +18,11 @@ def __init__(self): def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: # validate the input if (ImageType.NULL not in operable.supporting_images) or (operable.supporting_images[ImageType.NULL] is None): - lt.warning("Warning in NullImageSubtractionImageProcessor._execute(): " + - f"skipping subtraction of null image for {operable.primary_image_source_path}. " + "Given image does not have an associated NULL supporting image.") + lt.warning( + "Warning in NullImageSubtractionImageProcessor._execute(): " + + f"skipping subtraction of null image for {operable.primary_image_source_path}. " + + "Given image does not have an associated NULL supporting image." + ) return [operable] # Get the primary image with the null image subtracted. diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/TestConvolutionImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/TestConvolutionImageProcessor.py new file mode 100644 index 000000000..a640f86cb --- /dev/null +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/TestConvolutionImageProcessor.py @@ -0,0 +1,98 @@ +import os +import unittest + +import numpy as np +import numpy.testing as nptest + +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.ConvolutionImageProcessor import ConvolutionImageProcessor +import opencsp.common.lib.tool.file_tools as ft + + +class TestConvolutionImageProcessor(unittest.TestCase): + def setUp(self) -> None: + path, _, _ = ft.path_components(__file__) + self.data_dir = os.path.join(path, "data", "input", "ConvolutionImageProcessor") + self.out_dir = os.path.join(path, "data", "output", "ConvolutionImageProcessor") + ft.create_directories_if_necessary(self.data_dir) + ft.create_directories_if_necessary(self.out_dir) + + self.ones = np.ones((5, 5)) + """5x5 array of 1's""" + self.tfive_arr = np.arange(1, 26).reshape((5, 5)) + """5x5 array of values between 1 and 25""" + + def test_validate_initialization(self): + with self.assertRaises(ValueError): + ConvolutionImageProcessor(kernel='not a valid kernel') + with self.assertRaises(ValueError): + ConvolutionImageProcessor(diameter=-3) + with self.assertRaises(ValueError): + ConvolutionImageProcessor(diameter=0) + with self.assertRaises(ValueError): + ConvolutionImageProcessor(diameter=2) + + def test_box(self): + processor = ConvolutionImageProcessor(kernel="box", diameter=3) + # fmt: off + expected = np.array([ + [ 3, 4, 5, 6, 6], + [ 6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [20, 20, 21, 22, 23] + ]) + # fmt: on + + # simply test the filter function + actual = processor._box_filter(self.tfive_arr) + np.testing.assert_array_equal(expected, actual) + + # test the processor + cacheable = CacheableImage.from_single_source(self.tfive_arr) + operable = SpotAnalysisOperable(cacheable, primary_image_source_path="test_box") + result = processor.process_image(operable, False)[0] + np.testing.assert_array_equal(expected, result.primary_image.nparray) + + def test_box_large_diameter(self): + processor = ConvolutionImageProcessor(kernel="box", diameter=7) + cacheable = CacheableImage.from_single_source(self.tfive_arr) + operable = SpotAnalysisOperable(cacheable, primary_image_source_path="test_box_large_diameter") + + with self.assertRaises(RuntimeError): + processor.process_image(operable) + + def test_gaussian(self): + processor = ConvolutionImageProcessor(kernel="gaussian", diameter=3) + # fmt: off + expected = np.array([ + [ 3, 3, 4, 5, 6], + [ 6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [20, 21, 22, 23, 23] + ]) + # fmt: on + + # simply test the filter function + actual = processor._gaussian_filter(self.tfive_arr) + np.testing.assert_array_equal(expected, actual) + + # test the processor + cacheable = CacheableImage.from_single_source(self.tfive_arr) + operable = SpotAnalysisOperable(cacheable, primary_image_source_path="test_gaussian") + result = processor.process_image(operable, False)[0] + np.testing.assert_array_equal(expected, result.primary_image.nparray) + + def test_gaussian_large_diameter(self): + processor = ConvolutionImageProcessor(kernel="gaussian", diameter=7) + cacheable = CacheableImage.from_single_source(self.tfive_arr) + operable = SpotAnalysisOperable(cacheable, primary_image_source_path="test_gaussian_large_diameter") + + with self.assertRaises(RuntimeError): + processor.process_image(operable) + + +if __name__ == '__main__': + unittest.main() diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py index 0b22d1693..e0b289fd1 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py @@ -7,6 +7,7 @@ from opencsp.common.lib.cv.spot_analysis.image_processor.AverageByGroupImageProcessor import ( AverageByGroupImageProcessor, ) +from opencsp.common.lib.cv.spot_analysis.image_processor.ConvolutionImageProcessor import ConvolutionImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.CroppingImageProcessor import CroppingImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.EchoImageProcessor import EchoImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.ExposureDetectionImageProcessor import ( @@ -14,7 +15,9 @@ ) from opencsp.common.lib.cv.spot_analysis.image_processor.FalseColorImageProcessor import FalseColorImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.LogScaleImageProcessor import LogScaleImageProcessor -from opencsp.common.lib.cv.spot_analysis.image_processor.NullImageSubtractionImageProcessor import NullImageSubtractionImageProcessor +from opencsp.common.lib.cv.spot_analysis.image_processor.NullImageSubtractionImageProcessor import ( + NullImageSubtractionImageProcessor, +) from opencsp.common.lib.cv.spot_analysis.image_processor.PopulationStatisticsImageProcessor import ( PopulationStatisticsImageProcessor, ) @@ -27,6 +30,7 @@ 'AbstractAggregateImageProcessor', 'AbstractSpotAnalysisImagesProcessor', 'AverageByGroupImageProcessor', + 'ConvolutionImageProcessor', 'CroppingImageProcessor', 'EchoImageProcessor', 'ExposureDetectionImageProcessor', From 5c011b7611afa2915fc785aaf12f0338c8066cb2 Mon Sep 17 00:00:00 2001 From: bbean Date: Wed, 24 Apr 2024 10:23:40 -0600 Subject: [PATCH 20/32] update AbstractFiducial->AbstractFiducials to indicate the plural nature of the Pxy and Vxyz classes --- opencsp/common/lib/cv/AbstractFiducial.py | 57 ------- opencsp/common/lib/cv/AbstractFiducials.py | 152 ++++++++++++++++++ .../cv/spot_analysis/SpotAnalysisOperable.py | 6 +- opencsp/common/lib/geometry/Vxy.py | 21 +++ opencsp/common/lib/geometry/Vxyz.py | 29 +++- 5 files changed, 204 insertions(+), 61 deletions(-) delete mode 100644 opencsp/common/lib/cv/AbstractFiducial.py create mode 100644 opencsp/common/lib/cv/AbstractFiducials.py diff --git a/opencsp/common/lib/cv/AbstractFiducial.py b/opencsp/common/lib/cv/AbstractFiducial.py deleted file mode 100644 index c0b7ab367..000000000 --- a/opencsp/common/lib/cv/AbstractFiducial.py +++ /dev/null @@ -1,57 +0,0 @@ -from abc import ABC -import numpy as np - -import opencsp.common.lib.geometry.Vxy as v2 -import opencsp.common.lib.geometry.Vxyz as v3 -import opencsp.common.lib.geometry.Pxy as p2 -import opencsp.common.lib.geometry.RegionXY as reg - - -class AbstractFiducial(ABC): - """A marker (such as an ArUco board) that is used to orient the camera - relative to observed objects in the scene. Note that each implementing class - must also implement a matching locate_instances() method.""" - - @property - def bounding_box(self) -> reg.RegionXY: - """The X/Y bounding box of this instance, in pixels.""" - - @property - def unit_vector(self) -> v3.Vxyz: - """Returns a vector representing the origin, orientation, and scale of this instance.""" - pass - - @property - def origin(self) -> p2.Pxy: - """The origin point of this instance, in pixels.""" - - @property - def orientation(self) -> v3.Vxyz: - """The orientation of this instance, in radians. This is relative to - the source image, where x is positive to the right, y is positive down, - and z is positive in (away from the camera).""" - - @property - def size(self) -> float: - """The scale of this fiducial, in pixels, relative to its longest axis. - For example, if the fiducial is a square QR-code and is oriented tangent - to the camera, then the scale will be the number of pixels from one - corner to the other.""" # TODO is this a good definition? - - @property - def scale(self) -> float: - """The scale of this fiducial, in meters, relative to its longest axis. - This can be used to determine the distance and orientation of the - fiducial relative to the camera.""" - - @classmethod - def locate_instances(self, img: np.ndarray, anticipated_unit_vector: v3.Vxyz = None) -> list["AbstractFiducial"]: - """For the given input image, find and report any regions that strongly match this fiducial type. - - Parameters: - ----------- - - img (ndarray): The image to search for fiducials within. - - anticipated_unit_vector (Vxyz): Where the fiducial is expected to - be, based on some outsize knowledge. If None, then there isn't - enough information to make an informed guess. Default None. - """ diff --git a/opencsp/common/lib/cv/AbstractFiducials.py b/opencsp/common/lib/cv/AbstractFiducials.py new file mode 100644 index 000000000..748e78076 --- /dev/null +++ b/opencsp/common/lib/cv/AbstractFiducials.py @@ -0,0 +1,152 @@ +from abc import ABC, abstractmethod +from typing import Callable + +import matplotlib.axes +import matplotlib.pyplot as plt +import numpy as np + +import opencsp.common.lib.geometry.Pxy as p2 +import opencsp.common.lib.geometry.RegionXY as reg +import opencsp.common.lib.geometry.Vxyz as v3 +import opencsp.common.lib.render.figure_management as fm +import opencsp.common.lib.render_control.RenderControlPointSeq as rcps +import opencsp.common.lib.tool.log_tools as lt + + +class AbstractFiducials(ABC): + def __init__(self, style=None, pixels_to_meters: Callable[[p2.Pxy], v3.Vxyz] = None): + """ + A collection of markers (such as an ArUco board) that is used to orient the camera relative to observed objects + in the scene. It is suggested that each implementing class be paired with a complementary FiducialLocator or + PredictingFiducialLocator class. + + Parameters + ---------- + style : RenderControlPointSeq, optional + How to render this fiducial when using the defaul render_to_plot() method. By default rcps.default(). + pixels_to_meters : Callable[[p2.Pxy], v3.Vxyz], optional + Conversion function to get the physical point in space for the given x/y position information. Used in the + default self.scale implementation. Defaults to 1 meter per pixel. + """ + self.style = style if style is not None else rcps.default() + self.pixels_to_meters = pixels_to_meters + + @abstractmethod + def get_bounding_box(self, index=0) -> reg.RegionXY: + """The X/Y bounding box(es) of this instance, in pixels.""" + + @property + @abstractmethod + def origin(self) -> p2.Pxy: + """The origin point(s) of this instance, in pixels.""" + + @property + @abstractmethod + def orientation(self) -> v3.Vxyz: + """The orientation(s) of this instance, in radians. This is relative to + the source image, where x is positive to the right, y is positive down, + and z is positive in (away from the camera).""" + + @property + @abstractmethod + def size(self) -> list[float]: + """The scale(s) of this fiducial, in pixels, relative to its longest axis. + For example, if the fiducial is a square QR-code and is oriented tangent + to the camera, then the scale will be the number of pixels from one + corner to the other.""" # TODO is this a good definition? + + @property + def scale(self) -> list[float]: + """ + The scale(s) of this fiducial, in meters, relative to its longest axis. + This can be used to determine the distance and orientation of the + fiducial relative to the camera. + """ + ret = [] + + for i in range(len(self.origin)): + bb = self.get_bounding_box(i) + left_px, right_px, bottom_px, top_px = bb.loops[0].axis_aligned_bounding_box() + top_left_m = self.pixels_to_meters(p2.Pxy([left_px, top_px])) + bottom_right_m = self.pixels_to_meters(p2.Pxy([right_px, bottom_px])) + scale = (bottom_right_m - top_left_m).magnitude()[0] + ret.append(scale) + + return ret + + def _render(self, axes: matplotlib.axes.Axes): + """ + Called from render(). The parameters are always guaranteed to be set. + """ + axes.scatter( + self.origin.x, + self.origin.y, + linewidth=self.style.linewidth, + marker=self.style.marker, + s=self.style.markersize, + c=self.style.markerfacecolor, + edgecolor=self.style.markeredgecolor, + ) + + def render(self, axes: matplotlib.axes.Axes = None): + """ + Renders this fiducial to the active matplotlib.pyplot plot. + + The default implementation uses plt.scatter(). + + Parameters + ---------- + axes: matplotlib.axes.Axes, optional + The plot to render to. Uses the active plot if None. Default is None. + """ + if axes is None: + axes = plt.gca() + self._render(axes) + + def render_to_image(self, image: np.ndarray) -> np.ndarray: + """ + Renders this fiducial to the a new image on top of the given image. + + The default implementation creates a new matplotlib plot, and then renders to it with self.render_to_plot(). + """ + # Create the figure to plot to + dpi = 300 + width = image.shape[1] + height = image.shape[0] + fig = fm.mpl_pyplot_figure(figsize=(width / dpi, height / dpi), dpi=dpi) + + try: + # A portion of this code is from: + # https://stackoverflow.com/questions/35355930/figure-to-image-as-a-numpy-array + + # Get the axis and canvas + axes = fig.gca() + canvas = fig.canvas + + # Image from plot + axes.axis('off') + fig.tight_layout(pad=0) + + # To remove the huge white borders + axes.margins(0) + + # Prepare the image and the feature points + axes.imshow(image) + self.render(axes) + + # Render + canvas.draw() + + # Convert back to a numpy array + new_image = np.asarray(canvas.buffer_rgba()) + new_image = new_image.astype(image.dtype) + + # Return the updated image + return new_image + + except Exception as ex: + lt.error("Error in AnnotationImageProcessor.render_points(): " + repr(ex)) + raise + + finally: + plt.close(fig) diff --git a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py index c1cef249e..d7fb665da 100644 --- a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py +++ b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py @@ -5,7 +5,7 @@ import sys import opencsp.common.lib.csp.LightSource as ls -import opencsp.common.lib.cv.AbstractFiducial as af +import opencsp.common.lib.cv.AbstractFiducials as af from opencsp.common.lib.cv.CacheableImage import CacheableImage from opencsp.common.lib.cv.spot_analysis.SpotAnalysisImagesStream import ImageType from opencsp.common.lib.cv.spot_analysis.SpotAnalysisPopulationStatistics import SpotAnalysisPopulationStatistics @@ -31,9 +31,9 @@ class SpotAnalysisOperable: supporting_images: dict[ImageType, CacheableImage] = field(default_factory=dict) """ The supporting images, if any, that were provided with the associated input primary image. """ - given_fiducials: list[af.AbstractFiducial] = field(default_factory=list) + given_fiducials: list[af.AbstractFiducials] = field(default_factory=list) """ Any fiducials handed to us in the currently processing image. """ - found_fiducials: list[af.AbstractFiducial] = field(default_factory=list) + found_fiducials: list[af.AbstractFiducials] = field(default_factory=list) """ The identified fiducials in the currently processing image. """ camera_intrinsics_characterization: any = ( None # TODO figure out how to specify information here, maybe using common/lib/camera/Camera diff --git a/opencsp/common/lib/geometry/Vxy.py b/opencsp/common/lib/geometry/Vxy.py index 0e572497a..bdffa6d87 100644 --- a/opencsp/common/lib/geometry/Vxy.py +++ b/opencsp/common/lib/geometry/Vxy.py @@ -8,6 +8,24 @@ def __init__(self, data, dtype=float): """ 2D vector class to represent 2D points/vectors. + To represent a single vector:: + + x = 1 + y = 2 + vec = Vxy(np.array([[x], [y])) # same as vec = Vxy([x, y]) + print(vec.x) # [1.] + print(vec.y) # [2.] + + To represent a set of vectors:: + + vec1 = [1, 2] + vec2 = [4, 5] + vec3 = [7, 8] + zipped = list(zip(vec1, vec2, vec3)) + vecs = Vxy(np.array(zipped)) + print(vec.x) # [1. 4. 7.] + print(vec.y) # [2. 5. 8.] + Parameters ---------- data : array-like @@ -31,6 +49,9 @@ def __init__(self, data, dtype=float): @property def data(self): + """ + An array with shape (2, N), where N is the number of 2D vectors in this instance. + """ return self._data @property diff --git a/opencsp/common/lib/geometry/Vxyz.py b/opencsp/common/lib/geometry/Vxyz.py index b9bd8dc07..d55f15e7e 100644 --- a/opencsp/common/lib/geometry/Vxyz.py +++ b/opencsp/common/lib/geometry/Vxyz.py @@ -15,10 +15,32 @@ def __init__(self, data, dtype=float): """ 3D vector class to represent 3D points/vectors. + To represent a single vector:: + + x = 1 + y = 2 + z = 3 + vec = Vxyz(np.array([[x], [y], [z]])) # same as vec = Vxyz([x, y, z]) + print(vec.x) # [1.] + print(vec.y) # [2.] + print(vec.z) # [3.] + + To represent a set of vectors:: + + vec1 = [1, 2, 3] + vec2 = [4, 5, 6] + vec3 = [7, 8, 9] + zipped = list(zip(vec1, vec2, vec3)) + vecs = Vxyz(np.array(zipped)) + print(vec.x) # [1. 4. 7.] + print(vec.y) # [2. 5. 8.] + print(vec.z) # [3. 6. 9.] + Parameters ---------- data : array-like - The 3d point data: 3xN array, length 3 tuple, length 3 list + The 3d point data: 3xN array, length 3 tuple, length 3 list. If a Vxy, then the data will be padded with 0s + for 'z'. dtype : data type, optional Data type. The default is float. @@ -30,6 +52,8 @@ def __init__(self, data, dtype=float): raise ValueError('Input data must have 1 or 2 dimensions if ndarray.') elif np.ndim(data) == 2 and data.shape[0] != 3: raise ValueError('First dimension of 2-dimensional data must be length 3 if ndarray.') + elif isinstance(data, Vxy): + data = np.pad(data.data, ((0, 1), (0, 0))) elif len(data) != 3: raise ValueError('Input data must have length 3.') @@ -38,6 +62,9 @@ def __init__(self, data, dtype=float): @property def data(self): + """ + An array with shape (3, N), where N is the number of 3D vectors in this instance. + """ return self._data @property From f7ac1fa651f7d0e60a1bac31793f2a885a38aecb Mon Sep 17 00:00:00 2001 From: bbean Date: Wed, 24 Apr 2024 10:26:39 -0600 Subject: [PATCH 21/32] fix SpotAnalysisOperable --- .../cv/spot_analysis/SpotAnalysisOperable.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py index d7fb665da..eadea1030 100644 --- a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py +++ b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisOperable.py @@ -97,7 +97,15 @@ def __post_init__(self): if requires_update: # use __init__ to update frozen values self.__init__( - primary_image, primary_image_source_path=primary_image_source_path, supporting_images=supporting_images + primary_image, + primary_image_source_path, + supporting_images, + self.given_fiducials, + self.found_fiducials, + self.camera_intrinsics_characterization, + self.light_sources, + self.population_statistics, + self.image_processor_notes, ) def __sizeof__(self) -> int: @@ -118,14 +126,14 @@ def replace_use_default_values( ret = replace(ret, supporting_images=supporting_images) if data != None: - given_fiducials = data.given_fiducials if self.given_fiducials == None else self.given_fiducials - found_fiducials = data.found_fiducials if self.found_fiducials == None else self.found_fiducials + given_fiducials = data.given_fiducials if len(self.given_fiducials) == 0 else self.given_fiducials + found_fiducials = data.found_fiducials if len(self.found_fiducials) == 0 else self.found_fiducials camera_intrinsics_characterization = ( data.camera_intrinsics_characterization - if self.camera_intrinsics_characterization == None + if self.camera_intrinsics_characterization is None else self.camera_intrinsics_characterization ) - light_sources = data.light_sources if self.light_sources == None else self.light_sources + light_sources = data.light_sources if len(self.light_sources) == 0 else self.light_sources ret = replace( ret, given_fiducials=given_fiducials, From eb141e5f7a0e3ebd58fbbc923369d88cf4b93f37 Mon Sep 17 00:00:00 2001 From: bbean Date: Wed, 24 Apr 2024 10:31:52 -0600 Subject: [PATCH 22/32] add AnnotationImageProcessor, add PointFiducials --- opencsp/common/lib/cv/AbstractFiducials.py | 4 +- .../common/lib/cv/fiducials/PointFiducials.py | 38 ++++++++++++ .../AnnotationImageProcessor.py | 61 +++++++++++++++++++ .../spot_analysis/image_processor/__init__.py | 2 + 4 files changed, 103 insertions(+), 2 deletions(-) create mode 100644 opencsp/common/lib/cv/fiducials/PointFiducials.py create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/AnnotationImageProcessor.py diff --git a/opencsp/common/lib/cv/AbstractFiducials.py b/opencsp/common/lib/cv/AbstractFiducials.py index 748e78076..c99a6ecb8 100644 --- a/opencsp/common/lib/cv/AbstractFiducials.py +++ b/opencsp/common/lib/cv/AbstractFiducials.py @@ -17,8 +17,8 @@ class AbstractFiducials(ABC): def __init__(self, style=None, pixels_to_meters: Callable[[p2.Pxy], v3.Vxyz] = None): """ A collection of markers (such as an ArUco board) that is used to orient the camera relative to observed objects - in the scene. It is suggested that each implementing class be paired with a complementary FiducialLocator or - PredictingFiducialLocator class. + in the scene. It is suggested that each implementing class be paired with a complementary locator method or + SpotAnalysisImageProcessor. Parameters ---------- diff --git a/opencsp/common/lib/cv/fiducials/PointFiducials.py b/opencsp/common/lib/cv/fiducials/PointFiducials.py new file mode 100644 index 000000000..87061c38a --- /dev/null +++ b/opencsp/common/lib/cv/fiducials/PointFiducials.py @@ -0,0 +1,38 @@ +import numpy as np + +from opencsp.common.lib.cv.AbstractFiducials import AbstractFiducials +import opencsp.common.lib.geometry.Vxyz as v3 +import opencsp.common.lib.geometry.Pxy as p2 +import opencsp.common.lib.geometry.RegionXY as reg + + +class PointFiducials(AbstractFiducials): + def __init__(self, style=None, points: p2.Pxy = None): + """ + A collection of pixel locations where points of interest are located in an image. + """ + super().__init__(style) + self.points = points + + def get_bounding_box(self, index=0) -> reg.RegionXY: + # TODO untested + return reg.RegionXY.from_vertices(p2.Pxy((self.points.x[index], self.points.y[index]))) + + @property + def origin(self) -> p2.Pxy: + return self.points + + @property + def orientation(self) -> v3.Vxyz: + # TODO untested + return np.zeros((3, self.points.x.size)) + + @property + def size(self) -> list[float]: + # TODO untested + return [0] * len(self.points) + + @property + def scale(self) -> list[float]: + # TODO untested + return [0] * len(self.points) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/AnnotationImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/AnnotationImageProcessor.py new file mode 100644 index 000000000..7577b0aeb --- /dev/null +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/AnnotationImageProcessor.py @@ -0,0 +1,61 @@ +import dataclasses +from typing import Callable + +import matplotlib.pyplot as plt +import numpy as np + +from opencsp.common.lib.cv.AbstractFiducials import AbstractFiducials +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.fiducials.PointFiducials import PointFiducials +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( + AbstractSpotAnalysisImagesProcessor, +) +import opencsp.common.lib.geometry.Pxy as p2 +import opencsp.common.lib.opencsp_path.opencsp_root_path as orp +import opencsp.common.lib.render.figure_management as fm +import opencsp.common.lib.render_control.RenderControlPointSeq as rcps +import opencsp.common.lib.tool.file_tools as ft +import opencsp.common.lib.tool.log_tools as lt + + +class AnnotationImageProcessor(AbstractSpotAnalysisImagesProcessor): + def __init__(self): + super().__init__(self.__class__.__name__) + + def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: + old_image = operable.primary_image.nparray + new_image = np.array(old_image) + + for fiducials in operable.given_fiducials: + new_image = fiducials.render_to_image(new_image) + for fiducials in operable.found_fiducials: + new_image = fiducials.render_to_image(new_image) + + cacheable_image = CacheableImage(new_image, source_path=operable.primary_image.source_path) + ret = dataclasses.replace(operable, primary_image=cacheable_image) + return [ret] + + +if __name__ == "__main__": + import os + + indir = ft.norm_path( + os.path.join( + orp.opencsp_scratch_dir(), + "solar_noon/dev/2023-05-12_SpringEquinoxMidSummerSolstice/2_Data/BCS_data/Measure_01/processed_images", + ) + ) + image_file = ft.norm_path(os.path.join(indir, "20230512_113032.81 5W01_000_880_2890 Raw_Testing_Peak_Flux.png")) + + style = rcps.RenderControlPointSeq(markersize=10) + fiducials = PointFiducials(style, points=p2.Pxy(np.array([[0, 643, 1000], [0, 581, 1000]]))) + operable = SpotAnalysisOperable(CacheableImage(source_path=image_file), given_fiducials=[fiducials]) + + processor = AnnotationImageProcessor() + result = processor.process_image(operable)[0] + img = result.primary_image.nparray + + plt.figure() + plt.imshow(img) + plt.show(block=True) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py index e0b289fd1..98d2995b4 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py @@ -4,6 +4,7 @@ from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( AbstractSpotAnalysisImagesProcessor, ) +from opencsp.common.lib.cv.spot_analysis.image_processor.AnnotationImageProcessor import AnnotationImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.AverageByGroupImageProcessor import ( AverageByGroupImageProcessor, ) @@ -29,6 +30,7 @@ __all__ = [ 'AbstractAggregateImageProcessor', 'AbstractSpotAnalysisImagesProcessor', + 'AnnotationImageProcessor', 'AverageByGroupImageProcessor', 'ConvolutionImageProcessor', 'CroppingImageProcessor', From 2b7ad94df0bb8fc81aca28db7802c966b14c8c78 Mon Sep 17 00:00:00 2001 From: bbean Date: Wed, 24 Apr 2024 10:42:08 -0600 Subject: [PATCH 23/32] remove unused imports, fix test file location --- .../image_processor/AbstractAggregateImageProcessor.py | 5 ----- .../image_processor/AnnotationImageProcessor.py | 4 ---- .../cv/spot_analysis/image_processor/EchoImageProcessor.py | 1 - .../image_processor/ExposureDetectionImageProcessor.py | 3 --- .../spot_analysis/image_processor/LogScaleImageProcessor.py | 1 - .../image_processor/PopulationStatisticsImageProcessor.py | 2 -- .../{ => test}/TestConvolutionImageProcessor.py | 1 - 7 files changed, 17 deletions(-) rename opencsp/common/lib/cv/spot_analysis/image_processor/{ => test}/TestConvolutionImageProcessor.py (99%) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py index 96e91d1c5..ae6b2a7c9 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py @@ -1,18 +1,13 @@ from abc import ABC, abstractmethod -import dataclasses import re from typing import Callable import numpy as np -from opencsp.common.lib.cv.CacheableImage import CacheableImage -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisImagesStream import ImageType from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( AbstractSpotAnalysisImagesProcessor, ) -import opencsp.common.lib.opencsp_path.opencsp_root_path as orp -import opencsp.common.lib.tool.file_tools as ft import opencsp.common.lib.tool.log_tools as lt diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/AnnotationImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/AnnotationImageProcessor.py index 7577b0aeb..23507aad5 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/AnnotationImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/AnnotationImageProcessor.py @@ -1,10 +1,8 @@ import dataclasses -from typing import Callable import matplotlib.pyplot as plt import numpy as np -from opencsp.common.lib.cv.AbstractFiducials import AbstractFiducials from opencsp.common.lib.cv.CacheableImage import CacheableImage from opencsp.common.lib.cv.fiducials.PointFiducials import PointFiducials from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable @@ -13,10 +11,8 @@ ) import opencsp.common.lib.geometry.Pxy as p2 import opencsp.common.lib.opencsp_path.opencsp_root_path as orp -import opencsp.common.lib.render.figure_management as fm import opencsp.common.lib.render_control.RenderControlPointSeq as rcps import opencsp.common.lib.tool.file_tools as ft -import opencsp.common.lib.tool.log_tools as lt class AnnotationImageProcessor(AbstractSpotAnalysisImagesProcessor): diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py index acb61458e..7eb35c134 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py @@ -2,7 +2,6 @@ from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( AbstractSpotAnalysisImagesProcessor, ) -import opencsp.common.lib.tool.file_tools as ft import opencsp.common.lib.tool.log_tools as lt diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py index 36a9ecde6..3a4a1ea09 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py @@ -1,8 +1,5 @@ -import dataclasses - import numpy as np -from opencsp.common.lib.cv.CacheableImage import CacheableImage from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( AbstractSpotAnalysisImagesProcessor, diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/LogScaleImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/LogScaleImageProcessor.py index 8b61d41e3..8b479e9ac 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/LogScaleImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/LogScaleImageProcessor.py @@ -1,7 +1,6 @@ import dataclasses import numpy as np -import opencsp.common.lib.tool.image_tools as it from opencsp.common.lib.cv.spot_analysis.image_processor import AbstractSpotAnalysisImagesProcessor from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py index 10a4d3581..8ac9905c7 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py @@ -1,13 +1,11 @@ import dataclasses import numpy as np -from opencsp.common.lib.cv.CacheableImage import CacheableImage from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable from opencsp.common.lib.cv.spot_analysis.SpotAnalysisPopulationStatistics import SpotAnalysisPopulationStatistics from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( AbstractSpotAnalysisImagesProcessor, ) -import opencsp.common.lib.tool.file_tools as ft import opencsp.common.lib.tool.image_tools as it import opencsp.common.lib.tool.log_tools as lt diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/TestConvolutionImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestConvolutionImageProcessor.py similarity index 99% rename from opencsp/common/lib/cv/spot_analysis/image_processor/TestConvolutionImageProcessor.py rename to opencsp/common/lib/cv/spot_analysis/image_processor/test/TestConvolutionImageProcessor.py index a640f86cb..5c1b0c711 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/TestConvolutionImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestConvolutionImageProcessor.py @@ -2,7 +2,6 @@ import unittest import numpy as np -import numpy.testing as nptest from opencsp.common.lib.cv.CacheableImage import CacheableImage from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable From bd484e39922ec64f6504993ba0c0ffc7827760c1 Mon Sep 17 00:00:00 2001 From: bbean Date: Wed, 24 Apr 2024 10:44:47 -0600 Subject: [PATCH 24/32] add BcsFiducial, BcsLocatorImageProcessor, RenderControlBcs --- contrib/app/SpotAnalysis/PeakFlux.py | 11 +- .../common/lib/cv/fiducials/BcsFiducial.py | 77 +++++++++++++ .../BcsLocatorImageProcessor.py | 107 ++++++++++++++++++ .../spot_analysis/image_processor/__init__.py | 2 + .../lib/render_control/RenderControlBcs.py | 63 +++++++++++ 5 files changed, 251 insertions(+), 9 deletions(-) create mode 100644 opencsp/common/lib/cv/fiducials/BcsFiducial.py create mode 100644 opencsp/common/lib/cv/spot_analysis/image_processor/BcsLocatorImageProcessor.py create mode 100644 opencsp/common/lib/render_control/RenderControlBcs.py diff --git a/contrib/app/SpotAnalysis/PeakFlux.py b/contrib/app/SpotAnalysis/PeakFlux.py index cca9c282d..e107b14f3 100644 --- a/contrib/app/SpotAnalysis/PeakFlux.py +++ b/contrib/app/SpotAnalysis/PeakFlux.py @@ -51,14 +51,6 @@ def __init__(self, indir: str, outdir: str, experiment_name: str, settings_path_ ImageType.PRIMARY: lambda operable, operables: "off" not in operable.primary_image_source_path, ImageType.NULL: lambda operable, operables: "off" in operable.primary_image_source_path, } - # max_pixel_value_locator = AnnotationImageProcessor.AnnotationEngine( - # feature_locator=lambda operable: np.argmax(operable.primary_image.ndarray), - # color='k' - # ) - # bcs_locator = AnnotationImageProcessor.AnnotationEngine( - # feature_locator=lambda operable: self.bcs_pixel, - # color='k' - # ) self.image_processors: list[AbstractSpotAnalysisImagesProcessor] = [ CroppingImageProcessor(*self.crop_box), @@ -67,9 +59,10 @@ def __init__(self, indir: str, outdir: str, experiment_name: str, settings_path_ SupportingImagesCollectorImageProcessor(supporting_images_map), NullImageSubtractionImageProcessor(), ConvolutionImageProcessor(kernel="box", diameter=3), + BcsLocatorImageProcessor(), PopulationStatisticsImageProcessor(initial_min=0, initial_max=255), FalseColorImageProcessor(), - # AnnotationImageProcessor(max_pixel_value_locator, bcs_locator) + AnnotationImageProcessor(), ] self.spot_analysis = sa.SpotAnalysis( experiment_name, self.image_processors, save_dir=outdir, save_overwrite=True diff --git a/opencsp/common/lib/cv/fiducials/BcsFiducial.py b/opencsp/common/lib/cv/fiducials/BcsFiducial.py new file mode 100644 index 000000000..a43905081 --- /dev/null +++ b/opencsp/common/lib/cv/fiducials/BcsFiducial.py @@ -0,0 +1,77 @@ +import matplotlib.axes +import matplotlib.patches + +from opencsp.common.lib.cv.AbstractFiducials import AbstractFiducials +import opencsp.common.lib.geometry.LoopXY as loop +import opencsp.common.lib.geometry.RegionXY as reg +import opencsp.common.lib.geometry.Pxy as p2 +import opencsp.common.lib.geometry.Vxyz as v3 +import opencsp.common.lib.render_control.RenderControlBcs as rcb + + +class BcsFiducial(AbstractFiducials): + def __init__( + self, origin_px: p2.Pxy, radius_px: float, style: rcb.RenderControlBcs = None, pixels_to_meters: float = 0.1 + ): + """ + Fiducial for indicating where the BCS target is in an image. + + Parameters + ---------- + origin_px : Pxy + The center point of the BCS target, in pixels + radius_px : float + The radius of the BCS target, in pixels + style : RenderControlBcs, optional + The rendering style, by default None + pixels_to_meters : float, optional + A simple conversion method for how many meters a pixel represents, for use in scale(). by default 0.1 + """ + super().__init__(style=style) + self.origin_px = origin_px + self.radius_px = radius_px + self.pixels_to_meters = pixels_to_meters + + def get_bounding_box(self, index=0) -> reg.RegionXY: + x1, x2 = self.origin.x[0] - self.radius_px, self.origin.x[0] + self.radius_px + y1, y2 = self.origin.y[0] - self.radius_px, self.origin.y[0] + self.radius_px + return reg.RegionXY(loop.LoopXY.from_rectangle(x1, y1, x2 - x1, y2 - y1)) + + @property + def origin(self) -> p2.Pxy: + return self.origin_px + + @property + def orientation(self) -> v3.Vxyz: + return v3.Vxyz([0, 0, 0]) + + @property + def size(self) -> list[float]: + return [self.radius_px * 2] + + @property + def scale(self) -> list[float]: + return [self.size * self.pixels_to_meters] + + def _render(self, axes: matplotlib.axes.Axes): + if self.style.linestyle is not None: + circ = matplotlib.patches.Circle( + self.origin.data.tolist(), + self.radius_px, + color=self.style.color, + linestyle=self.style.linestyle, + linewidth=self.style.linewidth, + fill=False, + ) + axes.add_patch(circ) + + if self.style.marker is not None: + axes.scatter( + self.origin.x, + self.origin.y, + linewidth=self.style.linewidth, + marker=self.style.marker, + s=self.style.markersize, + c=self.style.markerfacecolor, + edgecolor=self.style.markeredgecolor, + ) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/BcsLocatorImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/BcsLocatorImageProcessor.py new file mode 100644 index 000000000..c3eec92a9 --- /dev/null +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/BcsLocatorImageProcessor.py @@ -0,0 +1,107 @@ +import copy +import dataclasses + +import cv2 as cv +import matplotlib.pyplot as plt +import numpy as np + +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.fiducials.BcsFiducial import BcsFiducial +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( + AbstractSpotAnalysisImagesProcessor, +) +from opencsp.common.lib.cv.spot_analysis.image_processor.AnnotationImageProcessor import AnnotationImageProcessor +from opencsp.common.lib.cv.spot_analysis.image_processor.ConvolutionImageProcessor import ConvolutionImageProcessor +import opencsp.common.lib.geometry.Pxy as p2 +import opencsp.common.lib.opencsp_path.opencsp_root_path as orp +import opencsp.common.lib.render_control.RenderControlBcs as rcb +import opencsp.common.lib.render_control.RenderControlPointSeq as rcps +import opencsp.common.lib.tool.file_tools as ft +import opencsp.common.lib.tool.log_tools as lt + + +class BcsLocatorImageProcessor(AbstractSpotAnalysisImagesProcessor): + def __init__(self, min_radius_px=30, max_radius_px=150): + """ + Locates the BCS by identifying a circle in the image. + + It is recommended this this processor be used after ConvolutionImageProcessor(kernel='gaussian'). + + Parameters + ---------- + min_radius_px : int, optional + Minimum radius of the BSC circle, in pixels. By default 50 + max_radius_px : int, optional + Maximum radius of the BSC circle, in pixels. By default 300 + """ + super().__init__(self.__class__.__name__) + + self.min_radius_px = min_radius_px + self.max_radius_px = max_radius_px + + def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: + image = operable.primary_image.nparray.squeeze() + if image.ndim > 2: + lt.error_and_raise( + RuntimeError, + "Error in BcsLocatorImageProcessor._execute(): image must be grayscale (2 dimensions), but " + + f"the shape of the image is {image.shape} for '{operable.primary_image_source_path}'", + ) + + # find all possible matches + method = cv.HOUGH_GRADIENT + accumulator_pixel_size: float = 1 + circles: np.ndarray | None = cv.HoughCircles( + image, + method, + accumulator_pixel_size, # resolution for accumulators + minDist=self.min_radius_px, # distance between circles + param1=70, # upper threshold to Canny edge detector + param2=20, # minimum accumulations count for matching circles + minRadius=self.min_radius_px, + maxRadius=self.max_radius_px, + ) + + # opencv returns circles in order from best to worst matches, choose the first circle (best match) + circle: BcsFiducial = None + if circles is not None: + circle_arr = circles[0][0] + center = p2.Pxy([circle_arr[0], circle_arr[1]]) + radius = circle_arr[2] + circle = BcsFiducial(center, radius, style=rcb.thin(color='m')) + + # assign to the operable + new_found_fiducials = copy.copy(operable.found_fiducials) + if circle != None: + new_found_fiducials.append(circle) + ret = dataclasses.replace(operable, found_fiducials=new_found_fiducials) + return [ret] + + +if __name__ == "__main__": + import os + + indir = ft.norm_path( + os.path.join( + orp.opencsp_scratch_dir(), + "solar_noon/dev/2023-05-12_SpringEquinoxMidSummerSolstice/2_Data/BCS_data/Measure_01/raw_images", + ) + ) + image_file = ft.norm_path(os.path.join(indir, "20230512_114854.74 5E09_000_880_2890 Raw.JPG")) + + style = rcps.RenderControlPointSeq(markersize=10) + operable = SpotAnalysisOperable(CacheableImage(source_path=image_file)) + + processor0 = ConvolutionImageProcessor(kernel='gaussian', diameter=3) + processor1 = BcsLocatorImageProcessor() + processor2 = AnnotationImageProcessor() + + result0 = processor0.process_image(operable)[0] + result1 = processor1.process_image(result0)[0] + result2 = processor2.process_image(result1)[0] + img = result2.primary_image.nparray + + plt.figure() + plt.imshow(img) + plt.show(block=True) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py index 98d2995b4..339d1f341 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/__init__.py @@ -8,6 +8,7 @@ from opencsp.common.lib.cv.spot_analysis.image_processor.AverageByGroupImageProcessor import ( AverageByGroupImageProcessor, ) +from opencsp.common.lib.cv.spot_analysis.image_processor.BcsLocatorImageProcessor import BcsLocatorImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.ConvolutionImageProcessor import ConvolutionImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.CroppingImageProcessor import CroppingImageProcessor from opencsp.common.lib.cv.spot_analysis.image_processor.EchoImageProcessor import EchoImageProcessor @@ -32,6 +33,7 @@ 'AbstractSpotAnalysisImagesProcessor', 'AnnotationImageProcessor', 'AverageByGroupImageProcessor', + 'BcsLocatorImageProcessor', 'ConvolutionImageProcessor', 'CroppingImageProcessor', 'EchoImageProcessor', diff --git a/opencsp/common/lib/render_control/RenderControlBcs.py b/opencsp/common/lib/render_control/RenderControlBcs.py new file mode 100644 index 000000000..510a5a3ba --- /dev/null +++ b/opencsp/common/lib/render_control/RenderControlBcs.py @@ -0,0 +1,63 @@ +from opencsp.common.lib.render_control.RenderControlPointSeq import RenderControlPointSeq + + +class RenderControlBcs(RenderControlPointSeq): + def __init__( + self, + linestyle: str | None = '-', + linewidth: float = 1, + color: str = 'b', + marker: str | None = '.', + markersize: float = 8, + markeredgecolor: str | None = None, + markeredgewidth: float | None = None, + markerfacecolor: str | None = None, + ): + """ + Render control for the Beam Characterization System target. + + Controls style of the point marker and circle marker of the BCS. + + Parameters + ---------- + linestyle : str, optional + How to draw the line for the circle around the BCS. One of '-', '--', '-.', ':', '' or None (see RenderControlPointSeq for a description). By default '-' + linewidth : int, optional + Width of the line for the circle around the BCS. By default 1 + color : str, optional + Color for the circle around the BCS. One of bgrcmykw (see RenderControlPointSeq for a description). By default 'b' + marker : str, optional + Shape of the center BCS marker. One of .,ov^<>12348sp*hH+xXDd|_ or None. By default '.' + markersize : int, optional + Size of the center BCS marker. By default 8 + markeredgecolor : str, optional + Defaults to color above if not set. By default None + markeredgewidth : float, optional + Defaults to linewidth if not set. By default None + markerfacecolor : str, optional + Defaults to color above if not set. By default None + """ + super().__init__( + linestyle=linestyle, + linewidth=linewidth, + color=color, + marker=marker, + markersize=markersize, + markeredgecolor=markeredgecolor, + markeredgewidth=markeredgewidth, + markerfacecolor=markerfacecolor, + ) + + +# COMMON CASES + + +def default(marker='.', color='b', linewidth=1, markersize=8) -> RenderControlBcs: + """ + What to draw if no particular preference is expressed. + """ + return RenderControlBcs(linewidth=linewidth, color=color, marker=marker, markersize=markersize) + + +def thin(marker='.', color='b', linewidth=0.3, markersize=5) -> RenderControlBcs: + return RenderControlBcs(color=color, marker=marker, linewidth=linewidth, markersize=markersize) From da10f689ed9e6f27c81dc948a96881c40cd5b74b Mon Sep 17 00:00:00 2001 From: bbean Date: Wed, 24 Apr 2024 11:12:10 -0600 Subject: [PATCH 25/32] remove unused code from PeakFlux and CroppingImageProcessor --- contrib/app/SpotAnalysis/PeakFlux.py | 48 ------------------- .../image_processor/CroppingImageProcessor.py | 10 ---- 2 files changed, 58 deletions(-) diff --git a/contrib/app/SpotAnalysis/PeakFlux.py b/contrib/app/SpotAnalysis/PeakFlux.py index e107b14f3..f7aa1fb61 100644 --- a/contrib/app/SpotAnalysis/PeakFlux.py +++ b/contrib/app/SpotAnalysis/PeakFlux.py @@ -1,12 +1,9 @@ -import json import os import re -import numpy as np import opencsp.common.lib.cv.SpotAnalysis as sa from opencsp.common.lib.cv.spot_analysis.SpotAnalysisImagesStream import ImageType -from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable import opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperableAttributeParser as saoap from opencsp.common.lib.cv.spot_analysis.image_processor import * import opencsp.common.lib.tool.file_tools as ft @@ -92,51 +89,6 @@ def run(self): parser = saoap.SpotAnalysisOperableAttributeParser(result, self.spot_analysis) -# class PeakFluxOffsetImageProcessor(AbstractSpotAnalysisImagesProcessor): -# def __init__(self, outfile_path_name_ext: str, max_pixel_value_locator: AnnotationImageProcessor.AnnotationEngine, bcs_pixel_location: tuple[int, int], heliostat_name_pattern: re.Pattern): -# super().__init__("PeakFluxOffsetImageProcessor") - -# self.outfile_path_name_ext = outfile_path_name_ext -# self.max_pixel_value_locator = max_pixel_value_locator -# self.bcs_pixel_location = bcs_pixel_location -# self.heliostat_name_pattern = heliostat_name_pattern - -# with open(outfile_path_name_ext, "w") as fout: -# fout.writelines(["Heliostat,Peak Flux Pixel,Pixels Offset"]) - -# def _execute(self, operable: sa.SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: -# # get the heliostat name -# names_to_search = [ -# operable.primary_image_source_path, -# operable.primary_image.source_path, -# operable.primary_image.cache_path -# ] - -# heliostat_name = None -# for name in names_to_search: -# m = self.heliostat_name_pattern.search(name) -# if m is not None: -# groups = list(filter(lambda s: s is not None, m.groups())) -# if len(groups) > 0: -# heliostat_name = "".join(groups) -# break - -# if heliostat_name is None: -# lt.error("Error in PeakFluxOffsetImageProcessor._execute(): " + -# f"failed to find heliostat name in {names_to_search}") -# return [operable] - -# # get the peak pixel location -# peak_flux_pixel = max_pixel_value_locator.feature_locator(operable.primary_image.nparray)[0] -# pixels_offset = peak_flux_pixel - np.array(self.bcs_pixel_location) - -# # write the results -# peak_flux_pixel_str = f"{peak_flux_pixel[0]} {peak_flux_pixel[1]}" -# pixels_offset_str = f"{pixels_offset[0]} {pixels_offset[1]}" -# with open(self.outfile_path_name_ext, "a") as fout: -# fout.writelines([f"{heliostat_name},{peak_flux_pixel_str},{pixels_offset_str}"]) - - if __name__ == "__main__": import argparse diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py index 309515d37..f0dd39fa7 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py @@ -75,16 +75,6 @@ def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAn indir = expdir + "/raw_images" outdir = expdir + "/processed_images" - # ft.create_directories_if_necessary(indir) - # ft.delete_files_in_directory(indir, "*") - - # dirnames = ft.files_in_directory(expdir, files_only=False) - # dirnames = list(filter(lambda s: s not in ["raw_images", "processed_images"], dirnames)) - # for dirname in dirnames: - # fromdir = expdir + "/" + dirname + "/Raw Images" - # for filename in ft.files_in_directory(fromdir): - # ft.copy_file(fromdir + "/" + filename, indir, filename) - x1, y1, x2, y2 = 120, 29, 1526, 1158 x1, y1 = x1 + 20, y1 + 20 x2, y2 = x2 - 20, y2 - 20 From 66631a9defa7238f169ffac0d5f7fae26891d5a5 Mon Sep 17 00:00:00 2001 From: bbean Date: Wed, 24 Apr 2024 11:35:34 -0600 Subject: [PATCH 26/32] add addition comments --- .../image_processor/AnnotationImageProcessor.py | 5 +++++ .../image_processor/NullImageSubtractionImageProcessor.py | 7 +++++++ 2 files changed, 12 insertions(+) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/AnnotationImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/AnnotationImageProcessor.py index 23507aad5..956f967a8 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/AnnotationImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/AnnotationImageProcessor.py @@ -16,6 +16,11 @@ class AnnotationImageProcessor(AbstractSpotAnalysisImagesProcessor): + """ + Draws annotations on top of the input image. The annotations drawn are those in operable.given_fiducials and + operable.found_fiducials. + """ + def __init__(self): super().__init__(self.__class__.__name__) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/NullImageSubtractionImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/NullImageSubtractionImageProcessor.py index 025067124..5f421e68e 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/NullImageSubtractionImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/NullImageSubtractionImageProcessor.py @@ -12,6 +12,13 @@ class NullImageSubtractionImageProcessor(AbstractSpotAnalysisImagesProcessor): + """ + Subtracts the NULL supporting image from the primary image, if there is an associated NULL image. + + Suggested use is by either assigning supporting images manually, or by using the + SupportingImagesCollectorImageProcessor. + """ + def __init__(self): super().__init__(self.__class__.__name__) From 0a7a5e511e969db1e81d0b6d7082140f39d8c559 Mon Sep 17 00:00:00 2001 From: bbean Date: Fri, 26 Apr 2024 14:20:35 -0600 Subject: [PATCH 27/32] updating docstrings as suggested in PR --- opencsp/common/lib/cv/AbstractFiducials.py | 14 +++++--- opencsp/common/lib/cv/CacheableImage.py | 33 ++++++++++--------- opencsp/common/lib/cv/SpotAnalysis.py | 23 ++++++------- .../lib/cv/spot_analysis/ImagesStream.py | 29 ++++++++-------- .../spot_analysis/SpotAnalysisImagesStream.py | 13 ++++---- .../AbstractAggregateImageProcessor.py | 30 +++++++++-------- .../AverageByGroupImageProcessor.py | 9 +++-- .../BcsLocatorImageProcessor.py | 10 +++--- .../ConvolutionImageProcessor.py | 10 +++--- .../image_processor/CroppingImageProcessor.py | 5 +-- .../image_processor/EchoImageProcessor.py | 4 +++ .../ExposureDetectionImageProcessor.py | 20 ++++++----- .../FalseColorImageProcessor.py | 9 +++-- .../PopulationStatisticsImageProcessor.py | 20 ++++++----- ...SupportingImagesCollectorImageProcessor.py | 30 +++++++++++------ opencsp/common/lib/geometry/Vxy.py | 5 +++ opencsp/common/lib/geometry/Vxyz.py | 6 ++++ 17 files changed, 162 insertions(+), 108 deletions(-) diff --git a/opencsp/common/lib/cv/AbstractFiducials.py b/opencsp/common/lib/cv/AbstractFiducials.py index c99a6ecb8..ee6e7e8ed 100644 --- a/opencsp/common/lib/cv/AbstractFiducials.py +++ b/opencsp/common/lib/cv/AbstractFiducials.py @@ -14,19 +14,23 @@ class AbstractFiducials(ABC): + """ + A collection of markers (such as an ArUco board) that is used to orient the camera relative to observed objects + in the scene. It is suggested that each implementing class be paired with a complementary locator method or + SpotAnalysisImageProcessor. + """ + def __init__(self, style=None, pixels_to_meters: Callable[[p2.Pxy], v3.Vxyz] = None): """ - A collection of markers (such as an ArUco board) that is used to orient the camera relative to observed objects - in the scene. It is suggested that each implementing class be paired with a complementary locator method or - SpotAnalysisImageProcessor. - Parameters ---------- style : RenderControlPointSeq, optional How to render this fiducial when using the defaul render_to_plot() method. By default rcps.default(). pixels_to_meters : Callable[[p2.Pxy], v3.Vxyz], optional Conversion function to get the physical point in space for the given x/y position information. Used in the - default self.scale implementation. Defaults to 1 meter per pixel. + default self.scale implementation. A good implementation of this function will correct for many factors such + as relative camera position and camera distortion. For extreme accuracy, this will also account for + non-uniformity in the target surface. Defaults to a simple 1 meter per pixel model. """ self.style = style if style is not None else rcps.default() self.pixels_to_meters = pixels_to_meters diff --git a/opencsp/common/lib/cv/CacheableImage.py b/opencsp/common/lib/cv/CacheableImage.py index 3993a734a..d8a558b1a 100644 --- a/opencsp/common/lib/cv/CacheableImage.py +++ b/opencsp/common/lib/cv/CacheableImage.py @@ -11,22 +11,25 @@ class CacheableImage: - def __init__(self, array: np.ndarray = None, cache_path: str = None, source_path: str = None): - """An image container that allows for caching an image when the image - data isn't in use, or for retrieval of an image from the cached file - when the data is in use. - - Only one of the inputs (image, cache_path, source_path) are required. - However, if the image doesn't exist as a cache file (.npy) but does - exists as an image file (.png), then both the cache_path and source_path - can be provided. In this case the image will be loaded from the - source_path and when cached will be saved to the cache_path. - - The intended use for this class is to reduce memory usage by caching - images to disk while not in use. Therefore, there is an inherent - priority order for the data that is returned from various methods: - (1) in-memory array, (2) numpy cache file, (3) image source file. + """ + An image container that allows for caching an image when the image + data isn't in use, or for retrieval of an image from the cached file + when the data is in use. + + Only one of the inputs (image, cache_path, source_path) are required. + However, if the image doesn't exist as a cache file (.npy) but does + exists as an image file (.png), then both the cache_path and source_path + can be provided. In this case the image will be loaded from the + source_path and when cached will be saved to the cache_path. + + The intended use for this class is to reduce memory usage by caching + images to disk while not in use. Therefore, there is an inherent + priority order for the data that is returned from various methods: + (1) in-memory array, (2) numpy cache file, (3) image source file. + """ + def __init__(self, array: np.ndarray = None, cache_path: str = None, source_path: str = None): + """ Parameters ---------- array: np.ndarray, optional diff --git a/opencsp/common/lib/cv/SpotAnalysis.py b/opencsp/common/lib/cv/SpotAnalysis.py index a76129965..3e4a06c60 100644 --- a/opencsp/common/lib/cv/SpotAnalysis.py +++ b/opencsp/common/lib/cv/SpotAnalysis.py @@ -111,9 +111,18 @@ class SpotAnalysis(Iterator[tuple[SpotAnalysisOperable]]): - multiple primary images, or a primary video (u,x) (TODO) - fiducial definition and location (v) (TODO) - manual 3D point identification from 2D images (w) (TODO) + """ - Parameters - ---------- + def __init__( + self, + name: str, + image_processors: list[asaip.AbstractSpotAnalysisImagesProcessor], + save_dir: str = None, + save_overwrite=False, + ): + """ + Parameters + ---------- name: str The name of this instance. For example, this could be one of the use cases listed above. @@ -126,15 +135,7 @@ class SpotAnalysis(Iterator[tuple[SpotAnalysisOperable]]): save_overwrite: bool If True, then overwrite any existing images in the save_dir with the new output. Defaults to False. - """ - - def __init__( - self, - name: str, - image_processors: list[asaip.AbstractSpotAnalysisImagesProcessor], - save_dir: str = None, - save_overwrite=False, - ): + """ self.name = name """ The name of this instance. For example, this could be one of the use cases listed above. """ diff --git a/opencsp/common/lib/cv/spot_analysis/ImagesStream.py b/opencsp/common/lib/cv/spot_analysis/ImagesStream.py index b681f77ea..9f6c2402f 100644 --- a/opencsp/common/lib/cv/spot_analysis/ImagesStream.py +++ b/opencsp/common/lib/cv/spot_analysis/ImagesStream.py @@ -18,6 +18,21 @@ def __next__(self): class ImagesStream(Iterator[CacheableImage]): + """ + A one-time iterator over a list of images. + + Iterates over the given images. The next() method returns the next item + from the input list. + + Note that calling iter() on this instance DOES NOT restart iteration. + This is to maintain the interface for streamable image sources, such as + webcams or networked cameras, which have no replay ability. + + Note that this does NOT build on the python asyncio stream library for + networking. Such a class might be implemented later and will likely be + called "ImagesStreamOverIP". + """ + def __init__( self, images: ( @@ -27,19 +42,7 @@ def __init__( | Iterator[str | CacheableImage] ), ): - """A one-time iterator over a list of images. - - Iterates over the given images. The next() method returns the next item - from the input list. - - Note that calling iter() on this instance DOES NOT restart iteration. - This is to maintain the interface for streamable image sources, such as - webcams or networked cameras, which have no replay ability. - - Note that this does NOT build on the python asyncio stream library for - networking. Such a class might be implemented later and will likely be - called "ImagesStreamOverIP". - + """ Parameters ---------- images : Callable[[int],CacheableImage] | list[str|CacheableImage] | vh.VideoHandler | Iterator[str|CacheableImage] diff --git a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py index 2a9751959..5f1c853fc 100644 --- a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py +++ b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py @@ -24,18 +24,19 @@ def __lt__(self, other): class SpotAnalysisImagesStream(Iterator[dict[ImageType, CacheableImage]]): - tt.strict_types + """ + This class combines the image streams for several ImageTypes into + one convenient package. This helps to guarantee that images that are + supposed to be processed together stay together for the entirety of the + SpotAnalysis pipeline. + """ def __init__( self, primary_iterator: ImagesIterable | ImagesStream, other_iterators: dict[ImageType, ImagesIterable | ImagesStream] = None, ): - """This class combines the image streams for several ImageTypes into - one convenient package. This helps to guarantee that images that are - supposed to be processed together stay together for the entirety of the - SpotAnalysis pipeline. - + """ Parameters ---------- primary_iterator : ImagesIterable | ImagesStream diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py index ae6b2a7c9..cd3a237e8 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/AbstractAggregateImageProcessor.py @@ -12,6 +12,22 @@ class AbstractAggregateImageProcessor(AbstractSpotAnalysisImagesProcessor, ABC): + """ + Detects and collects images that are part of the same group, so that they can be acted upon all at the same time. + + Each operator is assigned to an image group. Groups are determined by the images_group_assigner function. Any + function with the correct signature can be used, or one of the builtin methods can be assigned. The builtin + methods for this include AbstractAggregateImageProcessor.*, where "*" is one of: + + - group_by_brightness: groups are determined by the brightest pixel in the image + - group_by_name: all images with the same name match are included as part of the same group + + When the assigned group number for the current operator is different than for the previous operator, the group + execution is triggered. _execute_aggregate() will be called for the entire group, and afterwards the group's + list will be cleared. The trigger behavior can be changed by providing a value for the group_execution_trigger + parameter. + """ + def __init__( self, images_group_assigner: Callable[[SpotAnalysisOperable], int], @@ -20,20 +36,6 @@ def __init__( **kwargs, ): """ - Detects and collects images that are part of the same group, so that they can be acted upon all at the same time. - - Each operator is assigned to an image group. Groups are determined by the images_group_assigner function. Any - function with the correct signature can be used, or one of the builtin methods can be assigned. The builtin - methods for this include AbstractAggregateImageProcessor.*, where "*" is one of: - - - group_by_brightness: groups are determined by the brightest pixel in the image - - group_by_name: all images with the same name match are included as part of the same group - - When the assigned group number for the current operator is different than for the previous operator, the group - execution is triggered. _execute_aggregate() will be called for the entire group, and afterwards the group's - list will be cleared. The trigger behavior can be changed by providing a value for the group_execution_trigger - parameter. - Parameters ---------- images_group_assigner : Callable[[SpotAnalysisOperable], int] diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py index 061ca24ac..54a19b5ff 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/AverageByGroupImageProcessor.py @@ -12,6 +12,12 @@ class AverageByGroupImageProcessor(AbstractAggregateImageProcessor): + """ + Averages the values from groups of images into a single image. All images must have the same shape. + + See AbstractAggregateImageProcessor for more information + """ + def __init__( self, images_group_assigner: Callable[[SpotAnalysisOperable], int], @@ -19,9 +25,6 @@ def __init__( *vargs, **kwargs, ): - """ - Averages the values from groups of images into a single image. All images must have the same shape. - """ super().__init__(images_group_assigner, group_execution_trigger, *vargs, **kwargs) def _execute_aggregate( diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/BcsLocatorImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/BcsLocatorImageProcessor.py index c3eec92a9..5efa093e4 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/BcsLocatorImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/BcsLocatorImageProcessor.py @@ -22,12 +22,14 @@ class BcsLocatorImageProcessor(AbstractSpotAnalysisImagesProcessor): - def __init__(self, min_radius_px=30, max_radius_px=150): - """ - Locates the BCS by identifying a circle in the image. + """ + Locates the BCS by identifying a circle in the image. - It is recommended this this processor be used after ConvolutionImageProcessor(kernel='gaussian'). + It is recommended this this processor be used after ConvolutionImageProcessor(kernel='gaussian'). + """ + def __init__(self, min_radius_px=30, max_radius_px=150): + """ Parameters ---------- min_radius_px : int, optional diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/ConvolutionImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/ConvolutionImageProcessor.py index 874fe4da0..16c2fe451 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/ConvolutionImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/ConvolutionImageProcessor.py @@ -15,12 +15,14 @@ class ConvolutionImageProcessor(AbstractSpotAnalysisImagesProcessor): - def __init__(self, kernel="gaussian", diameter=3): - """ - Convolves an image by the given kernel + """ + Convolves an image by the given kernel - Example use cases include reducing the effects of noise, and finding the average value for a larger area. + Example use cases include reducing the effects of noise, and finding the average value for a larger area. + """ + def __init__(self, kernel="gaussian", diameter=3): + """ Parameters ---------- kernel : str, optional diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py index f0dd39fa7..23c34b66f 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py @@ -11,10 +11,11 @@ class CroppingImageProcessor(AbstractSpotAnalysisImagesProcessor): + """ + Crops all input images to the given shape. If the input image is too small, then an error will be thrown. + """ def __init__(self, x1: int, x2: int, y1: int, y2: int): """ - Crops all input images to the given shape. If the input image is too small, then an error will be thrown. - Parameters ---------- x1 : int diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py index 7eb35c134..1be3cb5f7 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/EchoImageProcessor.py @@ -6,6 +6,10 @@ class EchoImageProcessor(AbstractSpotAnalysisImagesProcessor): + """ + Prints the image names to the console as they are encountered. + """ + def __init__(self, log_level=lt.log.INFO, prefix=""): super().__init__(self.__class__.__name__) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py index 3a4a1ea09..6ce7594ec 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/ExposureDetectionImageProcessor.py @@ -10,6 +10,17 @@ class ExposureDetectionImageProcessor(AbstractSpotAnalysisImagesProcessor): + """ + Detects over and under exposure in images and adds the relavent tag to the image. + + Over or under exposure is determined by the proportion of pixels that are at near the max_pixel_value threshold. + If more pixels than the over exposure limit is at the maximum level, then the image is considered over exposed. If + more pixels than the under exposure limit is below the under_exposure_threshold, then the image is considered under + exposed. + + For color images, the proportion of pixels across all color channels is used. + """ + def __init__( self, under_exposure_limit=0.99, @@ -19,15 +30,6 @@ def __init__( log_level=lt.log.WARN, ): """ - Detects over and under exposure in images and adds the relavent tag to the image. - - Over or under exposure is determined by the proportion of pixels that are at near the max_pixel_value threshold. - If more pixels than the over exposure limit is at the maximum level, then the image is considered over exposed. If - more pixels than the under exposure limit is below the under_exposure_threshold, then the image is considered under - exposed. - - For color images, the proportion of pixels across all color channels is used. - Parameters ---------- under_exposure_limit : float, optional diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/FalseColorImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/FalseColorImageProcessor.py index ecd5b4392..096bc3efc 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/FalseColorImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/FalseColorImageProcessor.py @@ -11,10 +11,13 @@ class FalseColorImageProcessor(AbstractSpotAnalysisImagesProcessor): - def __init__(self, map_type='human', opencv_map=cv2.COLORMAP_JET): - """Image processor to produce color gradient images from grayscale - images, for better contrast and legibility by humans. + """ + Image processor to produce color gradient images from grayscale + images, for better contrast and legibility by humans. + """ + def __init__(self, map_type='human', opencv_map=cv2.COLORMAP_JET): + """ Parameters ---------- map_type : str, optional diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py index 8ac9905c7..9ef1e69f6 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/PopulationStatisticsImageProcessor.py @@ -18,18 +18,20 @@ class _RollingWindowOperableStats: class PopulationStatisticsImageProcessor(AbstractSpotAnalysisImagesProcessor): - def __init__(self, min_pop_size=1, target_rolling_window_size=1, initial_min: int = None, initial_max: int = None): - """ - Generates statistics for groups of images. + """ + Generates statistics for groups of images. - A group of images is held until enough have been seen to generate statistics off of. Once the required number of - images has been reached, then images will start being released one at a time with the statistics for the group - up until that point. + A group of images is held until enough have been seen to generate statistics off of. Once the required number of + images has been reached, then images will start being released one at a time with the statistics for the group + up until that point. - Some use cases for this class could include automatically determining the maximum pixel value during streaming - to select an appropriate bit depth, using the rolling average for exposure calibration, or leveling all images - by subtracting the gloal pixel minimum. + Some use cases for this class could include automatically determining the maximum pixel value during streaming + to select an appropriate bit depth, using the rolling average for exposure calibration, or leveling all images + by subtracting the gloal pixel minimum. + """ + def __init__(self, min_pop_size=1, target_rolling_window_size=1, initial_min: int = None, initial_max: int = None): + """ Parameters ---------- min_pop_size : int, optional diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/SupportingImagesCollectorImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/SupportingImagesCollectorImageProcessor.py index 3d8a4fa65..50117faaf 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/SupportingImagesCollectorImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/SupportingImagesCollectorImageProcessor.py @@ -16,6 +16,19 @@ def __init__(self, msg: str): class SupportingImagesCollectorImageProcessor(AbstractSpotAnalysisImagesProcessor): + """ + Collects primary and supporting images together from a stream of mixed images. + + The basic algorithm is pretty simple: + + 1. catagorize images based on the given supporting_images_map + 2. if the image type isn't already in the internal list, then add it and go back to step 1 + 3. collect all images in the internal list together as a single operable + 4. clear the internal list + 5. start a new internal list with the current image + 6. return the new operable, go back to step 1 + """ + def __init__( self, supporting_images_map: dict[ @@ -23,16 +36,13 @@ def __init__( ], ): """ - Collects primary and supporting images together from a stream of mixed images. - - The basic algorithm is pretty simple: - - 1. catagorize images based on the given supporting_images_map - 2. if the image type isn't already in the internal list, then add it and go back to step 1 - 3. collect all images in the internal list together as a single operable - 4. clear the internal list - 5. start a new internal list with the current image - 6. return the new operable, go back to step 1 + Parameters + ---------- + supporting_images_map : dict[ ImageType, Callable[[SpotAnalysisOperable, dict[ImageType, SpotAnalysisOperable]], bool] ] + How to categorize images. If + `supporting_images_map[ImageType.PRIMARY](operable, curr_mapped_images) == True` + then the image will be assigned as a primary image. Otherwise it will be grouped with another primary image + as a supporting image. """ super().__init__(self.__class__.__name__) diff --git a/opencsp/common/lib/geometry/Vxy.py b/opencsp/common/lib/geometry/Vxy.py index bdffa6d87..c43f29a1e 100644 --- a/opencsp/common/lib/geometry/Vxy.py +++ b/opencsp/common/lib/geometry/Vxy.py @@ -26,6 +26,11 @@ def __init__(self, data, dtype=float): print(vec.x) # [1. 4. 7.] print(vec.y) # [2. 5. 8.] + # or this equivalent method + xs = [1, 4 ,7] + ys = [2, 5, 8] + vecs = Vxy((xs, ys)) + Parameters ---------- data : array-like diff --git a/opencsp/common/lib/geometry/Vxyz.py b/opencsp/common/lib/geometry/Vxyz.py index d55f15e7e..87c2afc42 100644 --- a/opencsp/common/lib/geometry/Vxyz.py +++ b/opencsp/common/lib/geometry/Vxyz.py @@ -36,6 +36,12 @@ def __init__(self, data, dtype=float): print(vec.y) # [2. 5. 8.] print(vec.z) # [3. 6. 9.] + # or this equivalent method + xs = [1, 4 ,7] + ys = [2, 5, 8] + zs = [3, 6, 9] + vecs = Vxyz((xs, ys, zs)) + Parameters ---------- data : array-like From 920663b6e78e56d28fb94e661f4b447176169ced Mon Sep 17 00:00:00 2001 From: bbean Date: Fri, 26 Apr 2024 14:33:08 -0600 Subject: [PATCH 28/32] better comments and code order for SupportingImagesCollectorImageProcessor --- ...SupportingImagesCollectorImageProcessor.py | 67 ++++++++++--------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/SupportingImagesCollectorImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/SupportingImagesCollectorImageProcessor.py index 50117faaf..1dc7479e3 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/SupportingImagesCollectorImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/SupportingImagesCollectorImageProcessor.py @@ -76,7 +76,7 @@ def _update_collection(self) -> SpotAnalysisOperable: ) self.prev_image_types = image_types - # 3.2. We have a primary, create the new operable + # 3.3. We have a primary, turn the collection into a new operable lt.debug( "In SupportingImagesCollectorImageProcessor._update_collection(): " + f"collecting images {sorted(list(self.collection.keys()))}" @@ -102,9 +102,38 @@ def _execute(self, curr_operable: SpotAnalysisOperable, is_last: bool) -> list[S + f"unable to determine image type for operable {curr_operable.primary_image_source_path} ({curr_operable})", ) - # Handle is_last edge case - if is_last: - # add this operable to the collection, but first check if there's room in the collection + if not is_last: + # 2. If this image type isn't already in the collection, then add it and go back to step 1. + if curr_image_type not in self.collection: + self.collection[curr_image_type] = curr_operable + return [] + + # Otherwise there is a duplicate. + else: + # 3. Collect all images together into a new operable + try: + new_operable = self._update_collection() + except NoPrimaryImageException as ex: + lt.warning(repr(ex)) + lt.warning( + "Warning in SupportingImagesCollectorImageProcessor._update_collection(): " + + "no PRIMARY image is available, so we can't create new a new operable. " + + f"Removing {curr_image_type} '{self.collection[curr_image_type].primary_image_source_path}' and replacing it with '{curr_operable.primary_image_source_path}'" + ) + self.collection[curr_image_type] = curr_operable + return [] + + # 4. Clear the collection + self.collection.clear() + + # 5. Start a new collection with the current operable + self.collection[curr_image_type] = curr_operable + + # 6. Return the new operable + return [new_operable] + + else: # Handle is_last edge case + # 2. add this operable to the collection, but first check if there's room in the collection if curr_image_type in self.collection: lt.warning( "Warning in SupportingImagesCollectorImageProcessor._update_collection(): " @@ -113,7 +142,7 @@ def _execute(self, curr_operable: SpotAnalysisOperable, is_last: bool) -> list[S ) self.collection[curr_image_type] = curr_operable - # update the collection + # 3. Collect all images together into a new operable try: new_operable = self._update_collection() except NoPrimaryImageException as ex: @@ -125,31 +154,3 @@ def _execute(self, curr_operable: SpotAnalysisOperable, is_last: bool) -> list[S # 6. Return the new operable return [new_operable] - - # 2. If this image type isn't already in the collection, then add it and continue. - elif curr_image_type not in self.collection: - self.collection[curr_image_type] = curr_operable - return [] - - # Otherwise there is a duplicate. - else: - try: - new_operable = self._update_collection() - except NoPrimaryImageException as ex: - lt.warning(repr(ex)) - lt.warning( - "Warning in SupportingImagesCollectorImageProcessor._update_collection(): " - + "no PRIMARY image is available, so we can't create new a new operable. " - + f"Removing {curr_image_type} '{self.collection[curr_image_type].primary_image_source_path}' and replacing it with '{curr_operable.primary_image_source_path}'" - ) - self.collection[curr_image_type] = curr_operable - return [] - - # 4. Clear the collection - self.collection.clear() - - # 5. Start a new collection with the current operable - self.collection[curr_image_type] = curr_operable - - # 6. Return the new operable - return [new_operable] From a6d74a51f6991d666fb21fe1479d9bb85c91df1e Mon Sep 17 00:00:00 2001 From: bbean Date: Fri, 26 Apr 2024 14:42:51 -0600 Subject: [PATCH 29/32] add descriptions for the various ImageTypes --- .../common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py index 5f1c853fc..ac7f3b2e7 100644 --- a/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py +++ b/opencsp/common/lib/cv/spot_analysis/SpotAnalysisImagesStream.py @@ -12,10 +12,15 @@ @functools.total_ordering class ImageType(Enum): PRIMARY = 1 + """ The image we are trying to analyze. """ REFERENCE = 2 + """ Contains a pattern to be compared or matched with in the PRIMARY image. """ NULL = 3 + """ The same as the PRIMARY image, but without a beam on target. Likely this will be used to subtract out the background. """ COMPARISON = 4 + """ For multi-image comparison, such as for re-alignment to a previous position, motion characterization, or measuring wind effect. """ BACKGROUND_MASK = 5 + """ A boolean image that indicates which pixels should be included in a computation (True to include, False to exclude). """ def __lt__(self, other): if isinstance(other, self.__class__): From c75a80b903bc0652760aced203bab94e00556ccd Mon Sep 17 00:00:00 2001 From: bbean Date: Fri, 26 Apr 2024 14:59:14 -0600 Subject: [PATCH 30/32] better AbstractFiducial.orientation description --- opencsp/common/lib/cv/AbstractFiducials.py | 28 ++++++++++++++++--- .../common/lib/cv/fiducials/BcsFiducial.py | 5 ++-- .../common/lib/cv/fiducials/PointFiducials.py | 6 ++-- 3 files changed, 30 insertions(+), 9 deletions(-) diff --git a/opencsp/common/lib/cv/AbstractFiducials.py b/opencsp/common/lib/cv/AbstractFiducials.py index ee6e7e8ed..4c14a9774 100644 --- a/opencsp/common/lib/cv/AbstractFiducials.py +++ b/opencsp/common/lib/cv/AbstractFiducials.py @@ -4,6 +4,7 @@ import matplotlib.axes import matplotlib.pyplot as plt import numpy as np +import scipy.spatial import opencsp.common.lib.geometry.Pxy as p2 import opencsp.common.lib.geometry.RegionXY as reg @@ -46,10 +47,29 @@ def origin(self) -> p2.Pxy: @property @abstractmethod - def orientation(self) -> v3.Vxyz: - """The orientation(s) of this instance, in radians. This is relative to - the source image, where x is positive to the right, y is positive down, - and z is positive in (away from the camera).""" + def orientation(self) -> scipy.spatial.transform.Rotation: + """ + The orientation of the normal vector(s) of this instance. + This is relative to the orthorectified source image, where x is positive + to the right, y is positive down, and z is positive in (away from the + camera). + + This can be used to describe the forward transformation from the + camera's perspective. For example, an aruco marker whose origin is in + the center of the image and is facing towards the camera could have the + orientation:: + + Rotation.from_euler('y', np.pi) + + If that same aruco marker was also placed upside down, then it's + orientation could be:: + + Rotation.from_euler( + 'yz', + [ [np.pi, 0], + [0, np.pi] ] + ) + """ @property @abstractmethod diff --git a/opencsp/common/lib/cv/fiducials/BcsFiducial.py b/opencsp/common/lib/cv/fiducials/BcsFiducial.py index a43905081..0ecb25854 100644 --- a/opencsp/common/lib/cv/fiducials/BcsFiducial.py +++ b/opencsp/common/lib/cv/fiducials/BcsFiducial.py @@ -1,5 +1,6 @@ import matplotlib.axes import matplotlib.patches +import scipy.spatial from opencsp.common.lib.cv.AbstractFiducials import AbstractFiducials import opencsp.common.lib.geometry.LoopXY as loop @@ -42,8 +43,8 @@ def origin(self) -> p2.Pxy: return self.origin_px @property - def orientation(self) -> v3.Vxyz: - return v3.Vxyz([0, 0, 0]) + def orientation(self) -> scipy.spatial.transform.Rotation: + raise NotImplementedError("Orientation is not yet implemented for PointFiducials") @property def size(self) -> list[float]: diff --git a/opencsp/common/lib/cv/fiducials/PointFiducials.py b/opencsp/common/lib/cv/fiducials/PointFiducials.py index 87061c38a..e4b378c6f 100644 --- a/opencsp/common/lib/cv/fiducials/PointFiducials.py +++ b/opencsp/common/lib/cv/fiducials/PointFiducials.py @@ -1,4 +1,5 @@ import numpy as np +import scipy.spatial from opencsp.common.lib.cv.AbstractFiducials import AbstractFiducials import opencsp.common.lib.geometry.Vxyz as v3 @@ -23,9 +24,8 @@ def origin(self) -> p2.Pxy: return self.points @property - def orientation(self) -> v3.Vxyz: - # TODO untested - return np.zeros((3, self.points.x.size)) + def orientation(self) -> scipy.spatial.transform.Rotation: + raise NotImplementedError("Orientation is not yet implemented for PointFiducials") @property def size(self) -> list[float]: From c80b85b7630fe3cad2f2f0d6a15a9cf277232010 Mon Sep 17 00:00:00 2001 From: bbean Date: Fri, 26 Apr 2024 15:18:11 -0600 Subject: [PATCH 31/32] formatting --- .../cv/spot_analysis/image_processor/CroppingImageProcessor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py index 23c34b66f..55b004e7c 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py @@ -14,6 +14,7 @@ class CroppingImageProcessor(AbstractSpotAnalysisImagesProcessor): """ Crops all input images to the given shape. If the input image is too small, then an error will be thrown. """ + def __init__(self, x1: int, x2: int, y1: int, y2: int): """ Parameters From 918a09b0d320be894ca8abe65e8010ea0be6bf31 Mon Sep 17 00:00:00 2001 From: bbean Date: Fri, 24 May 2024 16:08:25 -0600 Subject: [PATCH 32/32] better method names and descriptions, to match the comments on PR #87 --- opencsp/common/lib/cv/AbstractFiducials.py | 15 +++++++++------ opencsp/common/lib/cv/fiducials/BcsFiducial.py | 4 ++-- opencsp/common/lib/cv/fiducials/PointFiducials.py | 2 +- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/opencsp/common/lib/cv/AbstractFiducials.py b/opencsp/common/lib/cv/AbstractFiducials.py index 4c14a9774..b1ee64072 100644 --- a/opencsp/common/lib/cv/AbstractFiducials.py +++ b/opencsp/common/lib/cv/AbstractFiducials.py @@ -47,28 +47,31 @@ def origin(self) -> p2.Pxy: @property @abstractmethod - def orientation(self) -> scipy.spatial.transform.Rotation: + def rotation(self) -> scipy.spatial.transform.Rotation: """ - The orientation of the normal vector(s) of this instance. - This is relative to the orthorectified source image, where x is positive + The pointing of the normal vector(s) of this instance. + This is relative to the camera's reference frame, where x is positive to the right, y is positive down, and z is positive in (away from the camera). This can be used to describe the forward transformation from the camera's perspective. For example, an aruco marker whose origin is in the center of the image and is facing towards the camera could have the - orientation:: + rotation:: Rotation.from_euler('y', np.pi) If that same aruco marker was also placed upside down, then it's - orientation could be:: + rotation could be:: Rotation.from_euler( 'yz', [ [np.pi, 0], [0, np.pi] ] ) + + Not that this just describes rotation, and not the translation. We call + the rotation and translation together the orientation. """ @property @@ -83,7 +86,7 @@ def size(self) -> list[float]: def scale(self) -> list[float]: """ The scale(s) of this fiducial, in meters, relative to its longest axis. - This can be used to determine the distance and orientation of the + This can be used to determine the distance and rotation of the fiducial relative to the camera. """ ret = [] diff --git a/opencsp/common/lib/cv/fiducials/BcsFiducial.py b/opencsp/common/lib/cv/fiducials/BcsFiducial.py index 0ecb25854..ce350d97a 100644 --- a/opencsp/common/lib/cv/fiducials/BcsFiducial.py +++ b/opencsp/common/lib/cv/fiducials/BcsFiducial.py @@ -43,8 +43,8 @@ def origin(self) -> p2.Pxy: return self.origin_px @property - def orientation(self) -> scipy.spatial.transform.Rotation: - raise NotImplementedError("Orientation is not yet implemented for PointFiducials") + def rotation(self) -> scipy.spatial.transform.Rotation: + raise NotImplementedError("rotation is not yet implemented for PointFiducials") @property def size(self) -> list[float]: diff --git a/opencsp/common/lib/cv/fiducials/PointFiducials.py b/opencsp/common/lib/cv/fiducials/PointFiducials.py index e4b378c6f..d0d41c6ea 100644 --- a/opencsp/common/lib/cv/fiducials/PointFiducials.py +++ b/opencsp/common/lib/cv/fiducials/PointFiducials.py @@ -24,7 +24,7 @@ def origin(self) -> p2.Pxy: return self.points @property - def orientation(self) -> scipy.spatial.transform.Rotation: + def rotation(self) -> scipy.spatial.transform.Rotation: raise NotImplementedError("Orientation is not yet implemented for PointFiducials") @property