diff --git a/plugins/python-codecs/.vscode/settings.json b/plugins/python-codecs/.vscode/settings.json index 7e36f0cd8f..013506b55b 100644 --- a/plugins/python-codecs/.vscode/settings.json +++ b/plugins/python-codecs/.vscode/settings.json @@ -19,5 +19,9 @@ "scrypted.pythonRemoteRoot": "${config:scrypted.serverRoot}/volume/plugin.zip", "python.analysis.extraPaths": [ "./node_modules/@scrypted/sdk/types/scrypted_python" - ] + ], + "[python]": { + "editor.defaultFormatter": "ms-python.black-formatter" + }, + "python.formatting.provider": "none" } \ No newline at end of file diff --git a/plugins/python-codecs/src/gstreamer.py b/plugins/python-codecs/src/gstreamer.py index d1eae54adb..1c1915ad95 100644 --- a/plugins/python-codecs/src/gstreamer.py +++ b/plugins/python-codecs/src/gstreamer.py @@ -1,140 +1,125 @@ -from gst_generator import createPipelineIterator, Gst -from gstreamer_postprocess import GstreamerPostProcess, AppleMediaPostProcess, VaapiPostProcess -from util import optional_chain -import scrypted_sdk +import platform +from asyncio import Future from typing import Any from urllib.parse import urlparse -import vipsimage -import pilimage -import platform -from generator_common import createVideoFrame, createImageMediaObject -from typing import Tuple -import copy -def getBands(caps): - capsFormat = caps.get_structure(0).get_value('format') +import scrypted_sdk - if capsFormat == 'RGB': - return 3 - elif capsFormat == 'RGBA': - return 4 - elif capsFormat == 'NV12' or capsFormat == 'I420': - return 1 +import pilimage +import vipsimage +from generator_common import createImageMediaObject, createVideoFrame +from gst_generator import Gst, createPipelineIterator +from gstreamer_postprocess import ( + GstreamerFormatPostProcess, + GstreamerPostProcess, + OpenGLPostProcess, + VaapiPostProcess, + getBands, +) +from util import optional_chain - raise Exception(f'unknown pixel format, please report this bug to @koush on Discord {capsFormat}') class GstSession: def __init__(self, gst) -> None: self.gst = gst self.reuse = [] + class GstImage(scrypted_sdk.Image): def __init__(self, gst: GstSession, sample, postProcessPipeline: str): super().__init__() caps = sample.get_caps() - self.width = caps.get_structure(0).get_value('width') - self.height = caps.get_structure(0).get_value('height') + self.width = caps.get_structure(0).get_value("width") + self.height = caps.get_structure(0).get_value("height") self.gst = gst self.sample = sample self.postProcessPipeline = postProcessPipeline + self.cached: Future[scrypted_sdk.Image] = None async def close(self): self.sample = None async def toImage(self, options: scrypted_sdk.ImageOptions = None): - copyOptions: scrypted_sdk.ImageOptions = None - needPostProcess = False - if not self.postProcessPipeline: - copyOptions = copy.deepcopy(options) - if options: - if options.get('crop') or options.get('resize'): - needPostProcess = True - options['crop'] = None - options['resize'] = None - - gstsample = await toGstSample(self.gst, self.sample, options, self.postProcessPipeline) + options = options or {} + options["format"] = "rgb" + + gstsample = await toGstSample( + self.gst, self.sample, options, self.postProcessPipeline + ) caps = gstsample.get_caps() + height = caps.get_structure(0).get_value("height") + width = caps.get_structure(0).get_value("width") capsBands = getBands(caps) - height = caps.get_structure(0).get_value('height') - width = caps.get_structure(0).get_value('width') gst_buffer = gstsample.get_buffer() result, info = gst_buffer.map(Gst.MapFlags.READ) if not result: - raise Exception('unable to map gst buffer') + raise Exception("unable to map gst buffer") try: if vipsimage.pyvips: - vips = vipsimage.new_from_memory(bytes(info.data), width, height, capsBands) + vips = vipsimage.new_from_memory( + bytes(info.data), width, height, capsBands + ) image = vipsimage.VipsImage(vips) else: - pil = pilimage.new_from_memory(bytes(info.data), width, height, capsBands) + pil = pilimage.new_from_memory( + bytes(info.data), width, height, capsBands + ) image = pilimage.PILImage(pil) - if needPostProcess: - image = await image.toImage(copyOptions) return await createImageMediaObject(image) finally: gst_buffer.unmap(info) - + async def toBuffer(self, options: scrypted_sdk.ImageOptions = None): - format = options and options.get('format') - if format == 'rgb': + format = options and options.get("format") + if format == "rgb": bands = 3 - elif format == 'rgba': + elif format == "rgba": bands = 4 - elif format == 'gray': + elif format == "gray": bands = 1 - elif format == 'jpg': + elif format == "jpg": bands = 0 else: - raise Exception(f'invalid output format {format}') - - copyOptions: scrypted_sdk.ImageOptions = None - needPostProcess = False - if not self.postProcessPipeline: - copyOptions = copy.deepcopy(options) - if options: - if options.get('crop') or options.get('resize'): - needPostProcess = True - options['crop'] = None - options['resize'] = None - - gstsample = await toGstSample(self.gst, self.sample, options, self.postProcessPipeline) + raise Exception(f"invalid output format {format}") + + gstsample = await toGstSample( + self.gst, self.sample, options, self.postProcessPipeline + ) caps = gstsample.get_caps() - height = caps.get_structure(0).get_value('height') - width = caps.get_structure(0).get_value('width') - capsFormat = caps.get_structure(0).get_value('format') - - if capsFormat == 'RGB': - capsBands = 3 - elif capsFormat == 'RGBA': - capsBands = 4 - elif capsFormat == 'NV12' or capsFormat == 'I420': + height = caps.get_structure(0).get_value("height") + width = caps.get_structure(0).get_value("width") + # toGstSample may return the I420/NV12 image if there + # is no transformation necessary. ie, a low res stream being used + # for motion detection. + if format == 'gray' and self.sample == gstsample: capsBands = 1 else: - raise Exception(f'unknown pixel format, please report this bug to @koush on Discord {capsFormat}') + capsBands = getBands(caps) gst_buffer = gstsample.get_buffer() result, info = gst_buffer.map(Gst.MapFlags.READ) if not result: - raise Exception('unable to map gst buffer') + raise Exception("unable to map gst buffer") try: - # print("~~~~~~~~~SAMPLE", width, height) # pil = pilimage.new_from_memory(info.data, width, height, capsBands) # pil.convert('RGB').save('/server/volume/test.jpg') - # format may have been previously specified and known to caller? - - if not needPostProcess: - if not format: - return bytes(info.data) - - if format == 'gray' and capsBands == 1: + stridePadding = width % 4 + if stridePadding: + if capsBands != 1: + raise Exception( + f"found stride in conversion. this should not be possible. {caps.to_string()}" + ) + width += stridePadding + else: + if format == "gray" and capsBands == 1: buffer = bytes(info.data) - return buffer[0:width * height] - + return buffer[0 : width * height] + if bands == capsBands: buffer = bytes(info.data) return buffer @@ -146,39 +131,69 @@ async def toBuffer(self, options: scrypted_sdk.ImageOptions = None): pil = pilimage.new_from_memory(info.data, width, height, capsBands) image = pilimage.PILImage(pil) + crop = None + if stridePadding: + crop = { + "left": 0, + "top": 0, + "width": width - stridePadding, + "height": height, + } + + reformat = None + if bands and bands != capsBands: + reformat = format + + if reformat or crop: + colored = image + image = await image.toImageInternal( + { + "crop": crop, + "format": reformat, + } + ) + await colored.close() + try: - if not self.postProcessPipeline: - return await image.toBuffer(copyOptions) - else: - return await image.toBuffer({ - 'format': options and options.get('format'), - }) + return await image.toBuffer( + { + "format": format, + } + ) finally: await image.close() finally: gst_buffer.unmap(info) -async def createResamplerPipeline(sample, gst: GstSession, options: scrypted_sdk.ImageOptions, postProcessPipeline: str): + +async def createResamplerPipeline( + sample, + gst: GstSession, + options: scrypted_sdk.ImageOptions, + postProcessPipeline: str, +): if not sample: - raise Exception('Video Frame has been invalidated') - + raise Exception("Video Frame has been invalidated") + resize = None if options: - resize = options.get('resize') + resize = options.get("resize") if resize: - resize = (resize.get('width'), resize.get('height')) + resize = (resize.get("width"), resize.get("height")) for check in gst.reuse: if check.resize == resize: gst.reuse.remove(check) return check - if postProcessPipeline == 'VAAPI': + if postProcessPipeline == "VAAPI": pp = VaapiPostProcess() - elif postProcessPipeline == 'OpenGL (GPU memory)': - pp = AppleMediaPostProcess() - elif postProcessPipeline == 'OpenGL (system memory)': - pp = AppleMediaPostProcess() + elif postProcessPipeline == "OpenGL (GPU memory)": + pp = OpenGLPostProcess() + elif postProcessPipeline == "OpenGL (system memory)": + pp = OpenGLPostProcess() + elif postProcessPipeline == None: + pp = GstreamerFormatPostProcess() else: # trap the pipeline before it gets here. videocrop # in the pipeline seems to spam the stdout?? @@ -187,65 +202,71 @@ async def createResamplerPipeline(sample, gst: GstSession, options: scrypted_sdk caps = sample.get_caps() - srcCaps = caps.to_string().replace(' ', '') + srcCaps = caps.to_string().replace(" ", "") pipeline = f"appsrc name=appsrc emit-signals=True is-live=True caps={srcCaps}" await pp.create(gst.gst, pipeline) pp.resize = resize return pp -async def toGstSample(gst: GstSession, sample, options: scrypted_sdk.ImageOptions, postProcessPipeline: str) -> GstImage: + +async def toGstSample( + gst: GstSession, + sample, + options: scrypted_sdk.ImageOptions, + postProcessPipeline: str, +) -> GstImage: if not sample: - raise Exception('Video Frame has been invalidated') + raise Exception("Video Frame has been invalidated") if not options: return sample - - crop = options.get('crop') - resize = options.get('resize') - format = options.get('format') + + crop = options.get("crop") + resize = options.get("resize") + format = options.get("format") caps = sample.get_caps() - sampleWidth = caps.get_structure(0).get_value('width') - sampleHeight = caps.get_structure(0).get_value('height') - capsFormat = caps.get_structure(0).get_value('format') + sampleWidth = caps.get_structure(0).get_value("width") + sampleHeight = caps.get_structure(0).get_value("height") + capsFormat = caps.get_structure(0).get_value("format") # normalize format, eliminating it if possible - if format == 'jpg': + if format == "jpg": # get into a format suitable to be be handled by vips/pil - if capsFormat == 'RGB' or capsFormat == 'RGBA': - format = None + if capsFormat == "RGB" or capsFormat == "RGBA": + sinkFormat = None else: - format = 'RGBA' - elif format == 'rgb': - if capsFormat == 'RGB': - format = None + sinkFormat = "RGBA" + elif format == "rgb": + if capsFormat == "RGB": + sinkFormat = None else: - format = 'RGB' - elif format == 'rgba': - if capsFormat == 'RGBA': - format = None + sinkFormat = "RGB" + elif format == "rgba": + if capsFormat == "RGBA": + sinkFormat = None else: - format = 'RGBA' - elif format == 'gray': + sinkFormat = "RGBA" + elif format == "gray": # are there others? does the output format depend on GPU? # have only ever seen NV12 - if capsFormat == 'NV12' or capsFormat == 'I420': - format = None + if capsFormat == "NV12" or capsFormat == "I420" or capsFormat == "GRAY8": + sinkFormat = None else: - format = 'NV12' + sinkFormat = "GRAY8" elif format: - raise Exception(f'invalid output format {format}') + raise Exception(f"invalid output format {format}") - if not crop and not resize and not format: + if not crop and not resize and not sinkFormat: return sample pp = await createResamplerPipeline(sample, gst, options, postProcessPipeline) try: - pp.update(caps, (sampleWidth, sampleHeight), options, format) + pp.update(caps, (sampleWidth, sampleHeight), options) - appsrc = pp.gst.get_by_name('appsrc') - srcCaps = caps.to_string().replace(' ', '') - appsrc.set_property('caps', caps.from_string(srcCaps)) + appsrc = pp.gst.get_by_name("appsrc") + srcCaps = caps.to_string().replace(" ", "") + appsrc.set_property("caps", caps.from_string(srcCaps)) appsrc.emit("push-sample", sample) @@ -258,78 +279,101 @@ async def toGstSample(gst: GstSession, sample, options: scrypted_sdk.ImageOption return newSample + async def createGstMediaObject(image: GstImage): - ret = await scrypted_sdk.mediaManager.createMediaObject(image, scrypted_sdk.ScryptedMimeTypes.Image.value, { - 'format': None, - 'width': image.width, - 'height': image.height, - 'toBuffer': lambda options = None: image.toBuffer(options), - 'toImage': lambda options = None: image.toImage(options), - }) + ret = await scrypted_sdk.mediaManager.createMediaObject( + image, + scrypted_sdk.ScryptedMimeTypes.Image.value, + { + "format": None, + "width": image.width, + "height": image.height, + "toBuffer": lambda options=None: image.toBuffer(options), + "toImage": lambda options=None: image.toImage(options), + }, + ) return ret -async def generateVideoFramesGstreamer(mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None, h264Decoder: str = None, postProcessPipeline: str = None) -> scrypted_sdk.VideoFrame: - ffmpegInput: scrypted_sdk.FFmpegInput = await scrypted_sdk.mediaManager.convertMediaObjectToJSON(mediaObject, scrypted_sdk.ScryptedMimeTypes.FFmpegInput.value) - container = ffmpegInput.get('container', None) - pipeline = ffmpegInput.get('url') - videoCodec = optional_chain(ffmpegInput, 'mediaStreamOptions', 'video', 'codec') - if pipeline.startswith('tcp://'): +async def generateVideoFramesGstreamer( + mediaObject: scrypted_sdk.MediaObject, + options: scrypted_sdk.VideoFrameGeneratorOptions = None, + filter: Any = None, + h264Decoder: str = None, + postProcessPipeline: str = None, +) -> scrypted_sdk.VideoFrame: + ffmpegInput: scrypted_sdk.FFmpegInput = ( + await scrypted_sdk.mediaManager.convertMediaObjectToJSON( + mediaObject, scrypted_sdk.ScryptedMimeTypes.FFmpegInput.value + ) + ) + container = ffmpegInput.get("container", None) + pipeline = ffmpegInput.get("url") + videoCodec = optional_chain(ffmpegInput, "mediaStreamOptions", "video", "codec") + + if pipeline.startswith("tcp://"): parsed_url = urlparse(pipeline) - pipeline = 'tcpclientsrc port=%s host=%s' % ( - parsed_url.port, parsed_url.hostname) - if container == 'mpegts': - pipeline += ' ! tsdemux' - elif container == 'sdp': - pipeline += ' ! sdpdemux' + pipeline = "tcpclientsrc port=%s host=%s" % ( + parsed_url.port, + parsed_url.hostname, + ) + if container == "mpegts": + pipeline += " ! tsdemux" + elif container == "sdp": + pipeline += " ! sdpdemux" else: - raise Exception('unknown container %s' % container) - elif pipeline.startswith('rtsp'): - pipeline = 'rtspsrc buffer-mode=0 location=%s protocols=tcp latency=0' % pipeline - if videoCodec == 'h264': - pipeline += ' ! rtph264depay ! h264parse' + raise Exception("unknown container %s" % container) + elif pipeline.startswith("rtsp"): + pipeline = ( + "rtspsrc buffer-mode=0 location=%s protocols=tcp latency=0" % pipeline + ) + if videoCodec == "h264": + pipeline += " ! rtph264depay ! h264parse" decoder = None + def setDecoderClearDefault(value: str): nonlocal decoder decoder = value - if decoder == 'Default': + if decoder == "Default": decoder = None setDecoderClearDefault(None) - if videoCodec == 'h264': + if videoCodec == "h264": setDecoderClearDefault(h264Decoder) if not decoder: # hw acceleration is "safe" to use on mac, but not # on other hosts where it may crash. # defaults must be safe. - if platform.system() == 'Darwin': - decoder = 'vtdec_hw' + if platform.system() == "Darwin": + decoder = "vtdec_hw" else: - decoder = 'avdec_h264' + decoder = "avdec_h264" else: # decodebin may pick a hardware accelerated decoder, which isn't ideal # so use a known software decoder for h264 and decodebin for anything else. - decoder = 'decodebin' + decoder = "decodebin" - fps = options and options.get('fps', None) - videorate = '' + fps = options and options.get("fps", None) + videorate = "" if fps: - videorate = f'! videorate max-rate={fps}' - - if postProcessPipeline == 'VAAPI': - pipeline += f' ! {decoder} {videorate} ! queue leaky=downstream max-size-buffers=0' - elif postProcessPipeline == 'OpenGL (GPU memory)': - pipeline += f' ! {decoder} {videorate} ! queue leaky=downstream max-size-buffers=0 ! glupload' - elif postProcessPipeline == 'OpenGL (system memory)': - pipeline += f' ! {decoder} {videorate} ! queue leaky=downstream max-size-buffers=0 ! video/x-raw ! glupload' + videorate = f"! videorate max-rate={fps}" + + if postProcessPipeline == "VAAPI": + pipeline += ( + f" ! {decoder} {videorate} ! queue leaky=downstream max-size-buffers=0" + ) + elif postProcessPipeline == "OpenGL (GPU memory)": + pipeline += f" ! {decoder} {videorate} ! queue leaky=downstream max-size-buffers=0 ! glupload" + elif postProcessPipeline == "OpenGL (system memory)": + pipeline += f" ! {decoder} {videorate} ! queue leaky=downstream max-size-buffers=0 ! video/x-raw ! glupload" else: - pipeline += f' ! {decoder} ! video/x-raw {videorate} ! queue leaky=downstream max-size-buffers=0' + pipeline += f" ! avdec_h264 ! video/x-raw {videorate} ! queue leaky=downstream max-size-buffers=0" # disable the gstreamer post process because videocrop spams the log - # postProcessPipeline = 'Default' - postProcessPipeline = None + postProcessPipeline = "Default" + # postProcessPipeline = None print(pipeline) mo: scrypted_sdk.MediaObject = None diff --git a/plugins/python-codecs/src/gstreamer_postprocess.py b/plugins/python-codecs/src/gstreamer_postprocess.py index 8f21e01ed1..77079d60c3 100644 --- a/plugins/python-codecs/src/gstreamer_postprocess.py +++ b/plugins/python-codecs/src/gstreamer_postprocess.py @@ -2,9 +2,58 @@ from typing import Tuple from gst_generator import createPipelineIterator +def getCapsFormat(caps): + return caps.get_structure(0).get_value('format') + +def getBands(caps): + capsFormat = getCapsFormat(caps) + + if capsFormat == 'RGB': + return 3 + elif capsFormat == 'RGBA': + return 4 + elif capsFormat == 'GRAY8': + return 1 + + raise Exception(f'unknown pixel format, please report this bug to @koush on Discord {capsFormat}') + +def toCapsFormat(options: scrypted_sdk.ImageOptions): + format = options.get('format') + + if format == 'jpg': + return 'RGB' + elif format == 'rgb': + return 'RGB' + elif format == 'rgba': + return 'RGBA' + elif format == 'gray': + return 'GRAY8' + elif format: + raise Exception(f'invalid output format {format}') + else: + return None + +class GstreamerFormatPostProcess(): + def __init__(self) -> None: + self.postprocess = ' ! videoconvert ! capsfilter name=capsfilter' + self.resize = None + + async def create(self, gst, pipeline: str): + gst, gen = await createPipelineIterator(pipeline + self.postprocess, gst) + g = gen() + self.gst = gst + self.g = g + self.capsfilter = self.gst.get_by_name('capsfilter') + + def update(self, caps, sampleSize: Tuple[int, int], options: scrypted_sdk.ImageOptions): + sinkCaps = "video/x-raw" + if format: + sinkCaps += f",format={format}" + self.capsfilter.set_property('caps', caps.from_string(sinkCaps)) + class GstreamerPostProcess(): def __init__(self) -> None: - self.postprocess = ' ! videoconvert ! videocrop name=videocrop ! videoscale ! capsfilter name=capsfilter' + self.postprocess = ' ! videocrop name=videocrop ! videoconvertscale ! capsfilter name=scaleCapsFilter' self.resize = None async def create(self, gst, pipeline: str): @@ -13,9 +62,9 @@ async def create(self, gst, pipeline: str): self.gst = gst self.g = g self.videocrop = self.gst.get_by_name('videocrop') - self.capsfilter = self.gst.get_by_name('capsfilter') + self.scaleCapsFilter = self.gst.get_by_name('scaleCapsFilter') - def update(self, caps, sampleSize: Tuple[int, int], options: scrypted_sdk.ImageOptions = None, format: str = None): + def update(self, caps, sampleSize: Tuple[int, int], options: scrypted_sdk.ImageOptions): sampleWidth, sampleHeight = sampleSize crop = options.get('crop') @@ -42,7 +91,7 @@ def update(self, caps, sampleSize: Tuple[int, int], options: scrypted_sdk.ImageO videocrop.set_property('right', right) videocrop.set_property('bottom', bottom) - sinkCaps = "video/x-raw" + scaleCaps = "video/x-raw,pixel-aspect-ratio=(fraction)1/1" if resize: width = resize.get('width') if width: @@ -59,12 +108,24 @@ def update(self, caps, sampleSize: Tuple[int, int], options: scrypted_sdk.ImageO height = int(height) # pipeline += " ! videoscale" - sinkCaps += f",width={width},height={height}" + scaleCaps += f",width={width},height={height}" - if format: - sinkCaps += f",format={format}" + # gstreamer aligns stride to a 4 byte boundary. + # this makes it painful to get data out with RGB, NV12, or I420. + format = toCapsFormat(options) + if format != 'RGBA': + if not format: + format = 'RGBA' + elif format == 'RGB': + format = 'RGBA' + elif format == 'GRAY8': + pass + else: + raise Exception('unexpected target format returned from toCapsFormat') - self.capsfilter.set_property('caps', caps.from_string(sinkCaps)) + scaleCaps += f",format={format}" + + self.scaleCapsFilter.set_property('caps', caps.from_string(scaleCaps)) class VaapiPostProcess(): def __init__(self) -> None: @@ -78,7 +139,7 @@ async def create(self, gst, pipeline: str): self.g = g self.vaapipostproc = self.gst.get_by_name('vaapipostproc') - def update(self, caps, sampleSize: Tuple[int, int], options: scrypted_sdk.ImageOptions = None, format: str = None): + def update(self, caps, sampleSize: Tuple[int, int], options: scrypted_sdk.ImageOptions): sampleWidth, sampleHeight = sampleSize crop = options.get('crop') @@ -110,12 +171,11 @@ def update(self, caps, sampleSize: Tuple[int, int], options: scrypted_sdk.ImageO vaapipostproc.set_property('width', outputWidth) vaapipostproc.set_property('height', outputHeight) - if format: - if format == 'RGB': - format = 'RGBA' + # not sure vaapi supports non-rgba across all hardware... + # (11): rgba - GST_VIDEO_FORMAT_RGBA vaapipostproc.set_property('format', 11) - if False and crop: + if crop: left = int(crop['left']) top = int(crop['top']) width = int(crop['width']) @@ -136,7 +196,7 @@ def update(self, caps, sampleSize: Tuple[int, int], options: scrypted_sdk.ImageO vaapipostproc.set_property('crop-right', right) vaapipostproc.set_property('crop-bottom', bottom) -class AppleMediaPostProcess(): +class OpenGLPostProcess(): def __init__(self) -> None: self.postprocess = ' ! glcolorconvert ! gltransformation name=gltransformation ! glcolorscale ! capsfilter name=glCapsFilter caps="video/x-raw(memory:GLMemory),format=RGBA" ! gldownload' self.resize = None @@ -154,7 +214,7 @@ async def create(self, gst, pipeline: str): # self.swCapsFilter = self.gst.get_by_name('swCapsFilter') - def update(self, caps, sampleSize: Tuple[int, int], options: scrypted_sdk.ImageOptions = None, format: str = None): + def update(self, caps, sampleSize: Tuple[int, int], options: scrypted_sdk.ImageOptions): # print(options) sampleWidth, sampleHeight = sampleSize diff --git a/plugins/python-codecs/src/pilimage.py b/plugins/python-codecs/src/pilimage.py index 4d77240e15..bdfe92c217 100644 --- a/plugins/python-codecs/src/pilimage.py +++ b/plugins/python-codecs/src/pilimage.py @@ -24,7 +24,7 @@ async def close(self): pil.close() async def toBuffer(self, options: scrypted_sdk.ImageOptions = None) -> bytearray: - pilImage: PILImage = await self.toPILImage(options) + pilImage: PILImage = await self.toImageInternal(options) if not options or not options.get('format', None): def format(): @@ -60,13 +60,13 @@ def save(): return await to_thread(lambda: save()) - async def toPILImage(self, options: scrypted_sdk.ImageOptions = None): + async def toImageInternal(self, options: scrypted_sdk.ImageOptions = None): return await to_thread(lambda: toPILImage(self, options)) async def toImage(self, options: scrypted_sdk.ImageOptions = None) -> Any: if options and options.get('format', None): raise Exception('format can only be used with toBuffer') - newPILImage = await self.toPILImage(options) + newPILImage = await self.toImageInternal(options) return await createImageMediaObject(newPILImage) def toPILImage(pilImageWrapper: PILImage, options: scrypted_sdk.ImageOptions = None) -> PILImage: diff --git a/plugins/python-codecs/src/vipsimage.py b/plugins/python-codecs/src/vipsimage.py index f6412c80d2..3435ff24a3 100644 --- a/plugins/python-codecs/src/vipsimage.py +++ b/plugins/python-codecs/src/vipsimage.py @@ -24,7 +24,7 @@ async def close(self): vips.invalidate() async def toBuffer(self, options: scrypted_sdk.ImageOptions = None) -> bytearray: - vipsImage: VipsImage = await self.toVipsImage(options) + vipsImage: VipsImage = await self.toImageInternal(options) if not options or not options.get('format', None): def format(): @@ -62,13 +62,13 @@ def format(): return await to_thread(lambda: vipsImage.vipsImage.write_to_buffer('.' + options['format'])) - async def toVipsImage(self, options: scrypted_sdk.ImageOptions = None): + async def toImageInternal(self, options: scrypted_sdk.ImageOptions = None): return await to_thread(lambda: toVipsImage(self, options)) async def toImage(self, options: scrypted_sdk.ImageOptions = None) -> Any: if options and options.get('format', None): raise Exception('format can only be used with toBuffer') - newVipsImage = await self.toVipsImage(options) + newVipsImage = await self.toImageInternal(options) return await createImageMediaObject(newVipsImage) def toVipsImage(vipsImageWrapper: VipsImage, options: scrypted_sdk.ImageOptions = None) -> VipsImage: