From 2a00311f15239336cc092233e8e4952f3ca0725d Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 28 Apr 2021 12:58:16 +0200 Subject: [PATCH] Make sure that check_struct is called where needed (#161) * Make sure that check_struct is called where needed * consistency * black is the new black * Check for errors in code gen report * implement check_struct * nicer name, without typo * Test that check_struct actually does something * Implement check_struct and apply a few fixes --- codegen/__main__.py | 6 +-- codegen/apipatcher.py | 77 ++++++++++++++++++++++++++++++--- codegen/hparser.py | 2 +- codegen/idlparser.py | 2 +- codegen/rspatcher.py | 6 +-- codegen/tests/test_codegen_z.py | 10 +++++ codegen/utils.py | 2 +- examples/cube_glfw.py | 2 +- examples/triangle_glfw.py | 4 +- tests/test_compute.py | 9 +++- tests/test_rs_basics.py | 2 +- tests/test_rs_render.py | 4 +- wgpu/backends/rs.py | 42 +++++++++++++----- wgpu/base.py | 62 +++++++++++++------------- wgpu/gui/base.py | 4 +- wgpu/utils/_compute.py | 2 +- 16 files changed, 170 insertions(+), 66 deletions(-) diff --git a/codegen/__main__.py b/codegen/__main__.py index 4f6e0b6f..66e4cdb9 100644 --- a/codegen/__main__.py +++ b/codegen/__main__.py @@ -21,7 +21,7 @@ def prepare(): def update_api(): - """ Update the public API and patch the public-facing API of the backends. """ + """Update the public API and patch the public-facing API of the backends.""" print("## Updating API") @@ -50,7 +50,7 @@ def update_api(): def update_rs(): - """ Update and check the rs backend. """ + """Update and check the rs backend.""" print("## Validating rs.py") @@ -68,7 +68,7 @@ def update_rs(): def main(): - """ Codegen entry point. """ + """Codegen entry point.""" with PrintToFile(os.path.join(lib_dir, "resources", "codegen_report.md")): print("# Code generatation report") diff --git a/codegen/apipatcher.py b/codegen/apipatcher.py index df310999..18712caa 100644 --- a/codegen/apipatcher.py +++ b/codegen/apipatcher.py @@ -5,8 +5,8 @@ import os -from .utils import print, lib_dir, blacken, to_snake_case, to_camel_case, Patcher -from .idlparser import get_idl_parser +from codegen.utils import print, lib_dir, blacken, to_snake_case, to_camel_case, Patcher +from codegen.idlparser import get_idl_parser def patch_base_api(code): @@ -42,7 +42,11 @@ def patch_backend_api(code): base_api_code = f.read().decode() # Patch! - for patcher in [CommentRemover(), BackendApiPatcher(base_api_code)]: + for patcher in [ + CommentRemover(), + BackendApiPatcher(base_api_code), + StructValidationChecker(), + ]: patcher.apply(code) code = patcher.dumps() return code @@ -53,7 +57,7 @@ class CommentRemover(Patcher): to prevent accumulating comments. """ - triggers = "# IDL:", "# FIXME: unknown api" + triggers = "# IDL:", "# FIXME: unknown api", "# FIXME: missing check_struct" def apply(self, code): self._init(code) @@ -174,7 +178,7 @@ def patch_properties(self, classname, i1, i2): self._apidiffs_from_lines(pre_lines, propname) if self.prop_is_known(classname, propname): if "@apidiff.add" in pre_lines: - print(f"Error: apidiff.add for known {classname}.{propname}") + print(f"ERROR: apidiff.add for known {classname}.{propname}") elif "@apidiff.hide" in pre_lines: pass # continue as normal old_line = self.lines[j1] @@ -207,7 +211,7 @@ def patch_methods(self, classname, i1, i2): self._apidiffs_from_lines(pre_lines, methodname) if self.method_is_known(classname, methodname): if "@apidiff.add" in pre_lines: - print(f"Error: apidiff.add for known {classname}.{methodname}") + print(f"ERROR: apidiff.add for known {classname}.{methodname}") elif "@apidiff.hide" in pre_lines: pass # continue as normal elif "@apidiff.change" in pre_lines: @@ -443,3 +447,64 @@ def get_required_prop_names(self, classname): def get_required_method_names(self, classname): _, methods = self.classes[classname] return list(name for name in methods.keys() if methods[name][1]) + + +class StructValidationChecker(Patcher): + """Checks that all structs are vaildated in the methods that have incoming structs.""" + + def apply(self, code): + self._init(code) + + idl = get_idl_parser() + all_structs = set() + ignore_structs = {"Extent3D"} + + for classname, i1, i2 in self.iter_classes(): + if classname not in idl.classes: + continue + + # For each method ... + for methodname, j1, j2 in self.iter_methods(i1 + 1): + code = "\n".join(self.lines[j1 : j2 + 1]) + # Get signature and cut it up in words + sig_words = code.partition("(")[2].split("):")[0] + for c in "][(),\"'": + sig_words = sig_words.replace(c, " ") + # Collect incoming structs from signature + method_structs = set() + for word in sig_words.split(): + if word.startswith("structs."): + structname = word.partition(".")[2] + method_structs.update(self._get_sub_structs(idl, structname)) + all_structs.update(method_structs) + # Collect structs being checked + checked = set() + for line in code.splitlines(): + line = line.lstrip() + if line.startswith("check_struct("): + name = line.split("(")[1].split(",")[0].strip('"') + checked.add(name) + # Test that a matching check is done + unchecked = method_structs.difference(checked) + unchecked = list(sorted(unchecked.difference(ignore_structs))) + if ( + methodname.endswith("_async") + and f"return self.{methodname[:-7]}" in code + ): + pass + elif unchecked: + msg = f"missing check_struct in {methodname}: {unchecked}" + self.insert_line(j1, f"# FIXME: {msg}") + print(f"ERROR: {msg}") + + # Test that we did find structs. In case our detection fails for + # some reason, this would probably catch that. + assert len(all_structs) > 10 + + def _get_sub_structs(self, idl, structname): + structnames = {structname} + for structfield in idl.structs[structname].values(): + structname2 = structfield.typename[3:] # remove "GPU" + if structname2 in idl.structs: + structnames.update(self._get_sub_structs(idl, structname2)) + return structnames diff --git a/codegen/hparser.py b/codegen/hparser.py index 5e60a436..856e7905 100644 --- a/codegen/hparser.py +++ b/codegen/hparser.py @@ -10,7 +10,7 @@ def get_h_parser(*, allow_cache=True): - """ Get the global HParser object. """ + """Get the global HParser object.""" # Singleton pattern global _parser diff --git a/codegen/idlparser.py b/codegen/idlparser.py index 0d41137f..a4f7dff1 100644 --- a/codegen/idlparser.py +++ b/codegen/idlparser.py @@ -16,7 +16,7 @@ def get_idl_parser(*, allow_cache=True): - """ Get the global IdlParser object. """ + """Get the global IdlParser object.""" # Singleton pattern global _parser diff --git a/codegen/rspatcher.py b/codegen/rspatcher.py index 7a1efda5..fbbf644b 100644 --- a/codegen/rspatcher.py +++ b/codegen/rspatcher.py @@ -193,7 +193,7 @@ def apply(self, code): if name not in hp.functions: msg = f"unknown C function {name}" self.insert_line(i, f"{indent}# FIXME: {msg}") - print(f"Error: {msg}") + print(f"ERROR: {msg}") else: detected.add(name) anno = hp.functions[name].replace(name, "f").strip(";") @@ -302,7 +302,7 @@ def _validate_struct(self, hp, i1, i2): if struct_name not in hp.structs: msg = f"unknown C struct {struct_name}" self.insert_line(i1, f"{indent}# FIXME: {msg}") - print(f"Error: {msg}") + print(f"ERROR: {msg}") return else: struct = hp.structs[struct_name] @@ -322,7 +322,7 @@ def _validate_struct(self, hp, i1, i2): if key not in struct: msg = f"unknown C struct field {struct_name}.{key}" self.insert_line(i1 + j, f"{indent}# FIXME: {msg}") - print(f"Error: {msg}") + print(f"ERROR: {msg}") # Insert comments for unused keys more_lines = [] diff --git a/codegen/tests/test_codegen_z.py b/codegen/tests/test_codegen_z.py index 79e00515..02729fbb 100644 --- a/codegen/tests/test_codegen_z.py +++ b/codegen/tests/test_codegen_z.py @@ -71,5 +71,15 @@ def test_that_code_is_up_to_date(): print("Codegen check ok!") +def test_that_codegen_report_has_no_errors(): + filename = os.path.join(lib_dir, "resources", "codegen_report.md") + with open(filename, "rb") as f: + text = f.read().decode() + + # The codegen uses a prefix "ERROR:" for unacceptable things. + # All caps, some function names may contain the name "error". + assert "ERROR" not in text + + if __name__ == "__main__": test_that_code_is_up_to_date() diff --git a/codegen/utils.py b/codegen/utils.py index cf4bd91f..b72a4923 100644 --- a/codegen/utils.py +++ b/codegen/utils.py @@ -55,7 +55,7 @@ def print(*args, **kwargs): class PrintToFile: - """ Context manager to print to file. """ + """Context manager to print to file.""" def __init__(self, f): if isinstance(f, str): diff --git a/examples/cube_glfw.py b/examples/cube_glfw.py index 6c3aedc9..e21e5749 100644 --- a/examples/cube_glfw.py +++ b/examples/cube_glfw.py @@ -389,7 +389,7 @@ def draw_frame(): def simple_event_loop(): - """ A real simple event loop, but it keeps the CPU busy. """ + """A real simple event loop, but it keeps the CPU busy.""" while update_glfw_canvasses(): glfw.poll_events() diff --git a/examples/triangle_glfw.py b/examples/triangle_glfw.py index 436d277a..0a0a862d 100644 --- a/examples/triangle_glfw.py +++ b/examples/triangle_glfw.py @@ -21,13 +21,13 @@ def simple_event_loop(): - """ A real simple event loop, but it keeps the CPU busy. """ + """A real simple event loop, but it keeps the CPU busy.""" while update_glfw_canvasses(): glfw.poll_events() def better_event_loop(max_fps=100): - """ A simple event loop that schedules draws. """ + """A simple event loop that schedules draws.""" td = 1 / max_fps while update_glfw_canvasses(): # Determine next time to draw diff --git a/tests/test_compute.py b/tests/test_compute.py index 74c9753b..e4530df6 100644 --- a/tests/test_compute.py +++ b/tests/test_compute.py @@ -172,6 +172,13 @@ def compute_shader( ) bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) + # Create and run the pipeline, fail - test check_struct + with raises(ValueError): + compute_pipeline = device.create_compute_pipeline( + layout=pipeline_layout, + compute={"module": cshader, "entry_point": "main", "foo": 42}, + ) + # Create and run the pipeline compute_pipeline = device.create_compute_pipeline( layout=pipeline_layout, @@ -259,7 +266,7 @@ def compute_shader( compute_with_buffers({0: in1}, {0: c_int32 * 100}, compute_shader, n=-1) with raises(TypeError): # invalid shader - compute_with_buffers({0: in1}, {0: c_int32 * 100}, "not a shader") + compute_with_buffers({0: in1}, {0: c_int32 * 100}, {"not", "a", "shader"}) if __name__ == "__main__": diff --git a/tests/test_rs_basics.py b/tests/test_rs_basics.py index 82a9c1b6..a5d901e9 100644 --- a/tests/test_rs_basics.py +++ b/tests/test_rs_basics.py @@ -144,7 +144,7 @@ def test_shader_module_creation(): with raises(TypeError): device.create_shader_module(code=code4) with raises(TypeError): - device.create_shader_module(code="not a shader") + device.create_shader_module(code={"not", "a", "shader"}) with raises(ValueError): device.create_shader_module(code=b"bytes but no SpirV magic number") diff --git a/tests/test_rs_render.py b/tests/test_rs_render.py index af8e7ce8..d69b2c92 100644 --- a/tests/test_rs_render.py +++ b/tests/test_rs_render.py @@ -511,13 +511,13 @@ def cb(renderpass): format=wgpu.TextureFormat.depth24plus_stencil8, depth_write_enabled=True, depth_compare=wgpu.CompareFunction.less_equal, - front={ + stencil_front={ "compare": wgpu.CompareFunction.equal, "fail_op": wgpu.StencilOperation.keep, "depth_fail_op": wgpu.StencilOperation.keep, "pass_op": wgpu.StencilOperation.keep, }, - back={ + stencil_back={ "compare": wgpu.CompareFunction.equal, "fail_op": wgpu.StencilOperation.keep, "depth_fail_op": wgpu.StencilOperation.keep, diff --git a/wgpu/backends/rs.py b/wgpu/backends/rs.py index dc4b55ea..4f1b1a4e 100644 --- a/wgpu/backends/rs.py +++ b/wgpu/backends/rs.py @@ -164,7 +164,7 @@ def _loadop_and_clear_from_value(value): def to_c_label(label): - """ Get the C representation of a label. """ + """Get the C representation of a label.""" if not label: return _empty_label else: @@ -172,7 +172,7 @@ def to_c_label(label): def feature_flag_to_feature_names(flag): - """ Convert a feature flags into a tuple of names. """ + """Convert a feature flags into a tuple of names.""" features = [] for i in range(32): val = int(2 ** i) @@ -181,6 +181,14 @@ def feature_flag_to_feature_names(flag): return tuple(sorted(features)) +def check_struct(struct_name, d): + """Check that all keys in the given dict exist in the corresponding struct.""" + valid_keys = set(getattr(structs, struct_name)) + invalid_keys = set(d.keys()).difference(valid_keys) + if invalid_keys: + raise ValueError(f"Invalid keys in {struct_name}: {invalid_keys}") + + # %% The API @@ -294,7 +302,7 @@ def request_device_tracing( *, label="", non_guaranteed_features: "list(enums.FeatureName)" = [], - non_guaranteed_limits: "structs.Limits" = {}, + non_guaranteed_limits: "Dict[str, int]" = {}, ): """Write a trace of all commands to a file so it can be reproduced elsewhere. The trace is cross-platform! @@ -510,6 +518,7 @@ def create_bind_group_layout( ): c_entries_list = [] for entry in entries: + check_struct("BindGroupLayoutEntry", entry) c_has_dynamic_offset = False c_view_dimension = 0 c_texture_component_type = 0 @@ -517,6 +526,7 @@ def create_bind_group_layout( c_storage_texture_format = 0 if entry.get("buffer"): info = entry["buffer"] + check_struct("BufferBindingLayout", info) type = info["type"] if type == enums.BufferBindingType.uniform: c_type = lib.WGPUBindingType_UniformBuffer @@ -530,6 +540,7 @@ def create_bind_group_layout( min_binding_size = 0 # noqa: not yet supported in wgpy-native elif entry.get("sampler"): info = entry["sampler"] + check_struct("SamplerBindingLayout", info) type = info["type"] if type == enums.SamplerBindingType.filtering: c_type = lib.WGPUBindingType_Sampler @@ -541,6 +552,7 @@ def create_bind_group_layout( raise ValueError(f"Unknown sampler binding type {type}") elif entry.get("texture"): info = entry["texture"] + check_struct("TextureBindingLayout", info) c_type = lib.WGPUBindingType_SampledTexture type = info.get("sample_type", "float") if type == enums.TextureSampleType.float: @@ -558,6 +570,7 @@ def create_bind_group_layout( c_multisampled = info.get("multisampled", False) elif entry.get("storage_texture"): info = entry["storage_texture"] + check_struct("StorageTextureBindingLayout", info) access = info["access"] if access == enums.StorageTextureAccess.read_only: c_type = lib.WGPUBindingType_ReadonlyStorageTexture @@ -618,6 +631,7 @@ def create_bind_group( c_entries_list = [] for entry in entries: + check_struct("BindGroupEntry", entry) # The resource can be a sampler, texture view, or buffer descriptor resource = entry["resource"] if isinstance(resource, GPUSampler): @@ -748,7 +762,7 @@ def create_compute_pipeline( layout: "GPUPipelineLayout" = None, compute: "structs.ProgrammableStage", ): - + check_struct("ProgrammableStage", compute) # H: module: WGPUShaderModuleId/int, entry_point: WGPULabel c_compute_stage = new_struct( "WGPUProgrammableStageDescriptor", @@ -790,9 +804,12 @@ def create_render_pipeline( ): depth_stencil = depth_stencil or {} multisample = multisample or {} + primitive = primitive or {} - # Little helper, remove after june 2021 or so - assert "stencil_front" not in depth_stencil, "stencil_front -> front" + check_struct("VertexState", vertex) + check_struct("DepthStencilState", depth_stencil) + check_struct("MultisampleState", multisample) + check_struct("PrimitiveState", primitive) # H: module: WGPUShaderModuleId/int, entry_point: WGPULabel c_vertex_stage = new_struct( @@ -802,6 +819,7 @@ def create_render_pipeline( ) c_fragment_stage = ffi.NULL if fragment is not None: + check_struct("FragmentState", fragment) # H: module: WGPUShaderModuleId/int, entry_point: WGPULabel c_fragment_stage = new_struct_p( "WGPUProgrammableStageDescriptor *", @@ -862,7 +880,8 @@ def create_render_pipeline( assert ( depth_stencil.get("format", None) is not None ), "depth_stencil needs format" - stencil_front = depth_stencil.get("front", {}) + stencil_front = depth_stencil.get("stencil_front", {}) + check_struct("StencilFaceState", stencil_front) # H: compare: WGPUCompareFunction/int, failOp: WGPUStencilOperation, depthFailOp: WGPUStencilOperation, passOp: WGPUStencilOperation c_stencil_front = new_struct( "WGPUStencilStateFaceDescriptor", @@ -871,7 +890,8 @@ def create_render_pipeline( depthFailOp=stencil_front.get("depth_fail_op", "keep"), passOp=stencil_front.get("pass_op", "keep"), ) - stencil_back = depth_stencil.get("back", {}) + stencil_back = depth_stencil.get("stencil_back", {}) + check_struct("StencilFaceState", stencil_front) # H: compare: WGPUCompareFunction/int, failOp: WGPUStencilOperation, depthFailOp: WGPUStencilOperation, passOp: WGPUStencilOperation c_stencil_back = new_struct( "WGPUStencilStateFaceDescriptor", @@ -1260,6 +1280,7 @@ def begin_render_pass( c_color_attachments_list = [] for color_attachment in color_attachments: + check_struct("RenderPassColorAttachment", color_attachment) assert isinstance(color_attachment["view"], GPUTextureView) texture_view_id = color_attachment["view"]._internal c_resolve_target = ( @@ -1305,6 +1326,7 @@ def begin_render_pass( c_depth_stencil_attachment = ffi.NULL if depth_stencil_attachment is not None: + check_struct("RenderPassDepthStencilAttachment", depth_stencil_attachment) c_depth_load_op, c_depth_clear = _loadop_and_clear_from_value( depth_stencil_attachment["depth_load_value"] ) @@ -1628,7 +1650,7 @@ def insert_debug_marker(self, marker_label): class GPUComputePassEncoder( base.GPUComputePassEncoder, GPUProgrammablePassEncoder, GPUObjectBase ): - """""" + """ """ def set_pipeline(self, pipeline): pipeline_id = pipeline._internal @@ -1662,7 +1684,7 @@ def _destroy(self): class GPURenderEncoderBase(base.GPURenderEncoderBase): - """""" + """ """ def set_pipeline(self, pipeline): pipeline_id = pipeline._internal diff --git a/wgpu/base.py b/wgpu/base.py index 1d414737..b43db5cc 100644 --- a/wgpu/base.py +++ b/wgpu/base.py @@ -167,13 +167,13 @@ def name(self): # IDL: [SameObject] readonly attribute GPUSupportedFeatures features; @property def features(self): - """ A tuple of supported feature names. """ + """A tuple of supported feature names.""" return self._features # IDL: [SameObject] readonly attribute GPUAdapterLimits limits; @property def limits(self): - """ A dict with the adapter limits.""" + """A dict with the adapter limits.""" return self._limits # IDL: Promise requestDevice(optional GPUDeviceDescriptor descriptor = {}); @@ -293,19 +293,19 @@ def adapter(self): # IDL: readonly attribute Promise lost; @property def lost(self): - """ Provides information about why the device is lost. """ + """Provides information about why the device is lost.""" raise NotImplementedError() # FIXME: new prop to implement # IDL: attribute EventHandler onuncapturederror; @property def onuncapturederror(self): - """ Method called when an error is capured?""" + """Method called when an error is capured?""" raise NotImplementedError() # IDL: undefined destroy(); def destroy(self): - """ Destroy this device. """ + """Destroy this device.""" return self._destroy() # IDL: GPUBuffer createBuffer(GPUBufferDescriptor descriptor); @@ -569,7 +569,7 @@ async def create_compute_pipeline_async( layout: "GPUPipelineLayout" = None, compute: "structs.ProgrammableStage", ): - """Async version of create_compute_pipeline(). """ + """Async version of create_compute_pipeline().""" raise NotImplementedError() # IDL: GPURenderPipeline createRenderPipeline(GPURenderPipelineDescriptor descriptor); @@ -725,7 +725,7 @@ async def create_render_pipeline_async( multisample: "structs.MultisampleState" = {}, fragment: "structs.FragmentState" = None, ): - """ Async version of create_render_pipeline(). """ + """Async version of create_render_pipeline().""" raise NotImplementedError() # IDL: GPUCommandEncoder createCommandEncoder(optional GPUCommandEncoderDescriptor descriptor = {}); @@ -765,7 +765,7 @@ def create_query_set( count: int, pipeline_statistics: "List[enums.PipelineStatisticName]" = [], ): - """ Create a :class:`GPUQuerySet` object. """ + """Create a :class:`GPUQuerySet` object.""" raise NotImplementedError() # IDL: undefined pushErrorScope(GPUErrorFilter filter); @@ -1057,7 +1057,7 @@ async def compilation_info_async(self): class GPUPipelineBase: - """ A mixin class for render and compute pipelines. """ + """A mixin class for render and compute pipelines.""" def __init__(self, label, internal, device, layout): super().__init__(label, internal, device) @@ -1099,7 +1099,7 @@ class GPUCommandBuffer(GPUObjectBase): # IDL: readonly attribute Promise executionTime; @property def execution_time(self): - """ Returns a future that, if measureExecutionTime is true, resolves after the command buffer executes.""" + """Returns a future that, if measureExecutionTime is true, resolves after the command buffer executes.""" raise NotImplementedError() @@ -1223,7 +1223,7 @@ def push_debug_group(self, group_label): # IDL: undefined popDebugGroup(); def pop_debug_group(self): - """Pop a label from the debug group stack. """ + """Pop a label from the debug group stack.""" raise NotImplementedError() # IDL: undefined insertDebugMarker(USVString markerLabel); @@ -1244,7 +1244,7 @@ def finish(self, *, label=""): # FIXME: new method to implement # IDL: undefined writeTimestamp(GPUQuerySet querySet, GPUSize32 queryIndex); def write_timestamp(self, query_set, query_index): - """ TODO """ + """TODO""" raise NotImplementedError() # FIXME: new method to implement @@ -1252,7 +1252,7 @@ def write_timestamp(self, query_set, query_index): def resolve_query_set( self, query_set, first_query, query_count, destination, destination_offset ): - """ TODO """ + """TODO""" raise NotImplementedError() @@ -1353,7 +1353,7 @@ def end_pipeline_statistics_query(self): # FIXME: new method to implement # IDL: undefined writeTimestamp(GPUQuerySet querySet, GPUSize32 queryIndex); def write_timestamp(self, query_set, query_index): - """ TODO """ + """TODO""" raise NotImplementedError() @@ -1524,13 +1524,13 @@ def end_pass(self): # FIXME: new method to implement # IDL: undefined beginOcclusionQuery(GPUSize32 queryIndex); def begin_occlusion_query(self, query_index): - """ TODO """ + """TODO""" raise NotImplementedError() # FIXME: new method to implement # IDL: undefined endOcclusionQuery(); def end_occlusion_query(self): - """ TODO """ + """TODO""" raise NotImplementedError() # IDL: undefined beginPipelineStatisticsQuery(GPUQuerySet querySet, GPUSize32 queryIndex); @@ -1546,7 +1546,7 @@ def end_pipeline_statistics_query(self): # FIXME: new method to implement # IDL: undefined writeTimestamp(GPUQuerySet querySet, GPUSize32 queryIndex); def write_timestamp(self, query_set, query_index): - """ TODO """ + """TODO""" raise NotImplementedError() @@ -1677,7 +1677,7 @@ def read_texture(self, source, data_layout, size): # FIXME: new method to implement # IDL: Promise onSubmittedWorkDone(); def on_submitted_work_done(self): - """ TODO """ + """TODO""" raise NotImplementedError() @@ -1738,7 +1738,7 @@ def __exit__(self, type, value, tb): class GPUDeviceLostInfo: - """ An object that contains information about the device being lost.""" + """An object that contains information about the device being lost.""" def __init__(self, reason, message): self._reason = reason @@ -1747,18 +1747,18 @@ def __init__(self, reason, message): # IDL: readonly attribute DOMString message; @property def message(self): - """ The error message specifying the reason for the device being lost. """ + """The error message specifying the reason for the device being lost.""" return self._message # IDL: readonly attribute (GPUDeviceLostReason or undefined) reason; @property def reason(self): - """ The reason (enums.GPUDeviceLostReason) for the device getting lost. Can be None. """ + """The reason (enums.GPUDeviceLostReason) for the device getting lost. Can be None.""" return self._reason class GPUOutOfMemoryError(Exception): - """ An error raised when the GPU is out of memory. """ + """An error raised when the GPU is out of memory.""" # IDL: constructor(); def __init__(self): @@ -1766,12 +1766,12 @@ def __init__(self): class GPUValidationError(Exception): - """ An error raised when the pipeline could not be validated. """ + """An error raised when the pipeline could not be validated.""" # IDL: readonly attribute DOMString message; @property def message(self): - """ The error message specifying the reason for invalidation. """ + """The error message specifying the reason for invalidation.""" return self._message # IDL: constructor(DOMString message); @@ -1789,25 +1789,25 @@ class GPUCompilationMessage: # IDL: readonly attribute DOMString message; @property def message(self): - """ The warning/error message. """ + """The warning/error message.""" raise NotImplementedError() # IDL: readonly attribute GPUCompilationMessageType type; @property def type(self): - """ The type of warning/problem. """ + """The type of warning/problem.""" raise NotImplementedError() # IDL: readonly attribute unsigned long long lineNum; @property def line_num(self): - """ The corresponding line number in the shader source. """ + """The corresponding line number in the shader source.""" raise NotImplementedError() # IDL: readonly attribute unsigned long long linePos; @property def line_pos(self): - """ The position on the line in the shader source. """ + """The position on the line in the shader source.""" raise NotImplementedError() @@ -1818,7 +1818,7 @@ class GPUCompilationInfo: # IDL: readonly attribute FrozenArray messages; @property def messages(self): - """ A list of ``GPUCompilationMessage`` objects. """ + """A list of ``GPUCompilationMessage`` objects.""" raise NotImplementedError() @@ -1828,7 +1828,7 @@ class GPUQuerySet(GPUObjectBase): # IDL: undefined destroy(); def destroy(self): - """ Destroy the queryset.""" + """Destroy the queryset.""" raise NotImplementedError() @@ -1839,7 +1839,7 @@ class GPUUncapturedErrorEvent: # IDL: [SameObject] readonly attribute GPUError error; @property def error(self): - """ The error object.""" + """The error object.""" raise NotImplementedError() # IDL: constructor( DOMString type, GPUUncapturedErrorEventInit gpuUncapturedErrorEventInitDict ); diff --git a/wgpu/gui/base.py b/wgpu/gui/base.py index e0d83ae2..1b715300 100644 --- a/wgpu/gui/base.py +++ b/wgpu/gui/base.py @@ -62,7 +62,7 @@ def configure_swap_chain( format: "enums.TextureFormat" = None, usage: "flags.TextureUsage" = None, ): - """ Obtain a swap-chain object. """ + """Obtain a swap-chain object.""" # Let's be nice and allow not-specifying the format format = format or self.get_swap_chain_preferred_format(device.adapter) return super().configure_swap_chain( @@ -70,7 +70,7 @@ def configure_swap_chain( ) def get_swap_chain_preferred_format(self, adapter): - """ Get the preferred swap-chain texture format for this canvas. """ + """Get the preferred swap-chain texture format for this canvas.""" return "bgra8unorm-srgb" # seems to be a good default, can be overridden diff --git a/wgpu/utils/_compute.py b/wgpu/utils/_compute.py index 6e8c7115..20a86a07 100644 --- a/wgpu/utils/_compute.py +++ b/wgpu/utils/_compute.py @@ -152,8 +152,8 @@ def compute_with_buffers(input_arrays, output_arrays, shader, n=None): "visibility": wgpu.ShaderStage.COMPUTE, "buffer": { "type": storage_types[index in output_infos], + "has_dynamic_offset": False, }, - "has_dynamic_offset": False, } )