From 3ed20956e1c77703eaf73bbfe7890ea15d4b17a8 Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Tue, 24 Sep 2024 15:42:22 +0200 Subject: [PATCH 01/18] Refactor generating methods/props that are async --- codegen/README.md | 2 +- codegen/apipatcher.py | 131 +++++++++++++++++++++++++++++++----------- codegen/idlparser.py | 3 +- 3 files changed, 99 insertions(+), 37 deletions(-) diff --git a/codegen/README.md b/codegen/README.md index fdeebb6d..f8cc477c 100644 --- a/codegen/README.md +++ b/codegen/README.md @@ -69,7 +69,7 @@ In some cases we may want to deviate from the WebGPU API, because well ... Pytho Other changes include: * Where in JS the input args are provided via a dict, we use kwargs directly. Nevertheless, some input args have subdicts (and sub-sub-dicts) -* For methods that are async in IDL, we also provide sync methods. The Async method names have an "_async" suffix. +* For methods that are async in JavaScript (i.e return a `Promise`), we provide both an asynchronous and synchronous variant, indicated by an `_async` and `_sync` suffix. ### Codegen summary diff --git a/codegen/apipatcher.py b/codegen/apipatcher.py index 1c18a099..b52241c8 100644 --- a/codegen/apipatcher.py +++ b/codegen/apipatcher.py @@ -241,7 +241,7 @@ def get_missing_properties(self, classname, seen_props): if propname not in seen_props: lines.append(" # FIXME: new prop to implement") lines.append(" @property") - lines.append(f" def {propname}(self):") + lines.append(self.get_property_def(classname, propname)) lines.append(" raise NotImplementedError()") lines.append("") return lines @@ -266,15 +266,67 @@ def __init__(self): super().__init__() self.idl = get_idl_parser() - def name2idl(self, name): - m = {"__init__": "constructor"} - name = m.get(name, name) - return to_camel_case(name) - - def name2py(self, name): - m = {"constructor": "__init__"} - name = m.get(name, name) - return to_snake_case(name) + def name2idl(self, name_py, attributes_or_functions): + """Map a python propname/methodname to the idl variant. + Take async into account. + """ + if name_py == "__init__": + name_idl = "constructor" + elif name_py.endswith(("_sync", "_async")): + py_is_async = name_py.endswith("_async") + # Select idl name, preferring the matching suffix, because + # in IDL some functions also have both variants! + name_idl_base = to_camel_case(name_py.rsplit("_", 1)[0]) + if py_is_async: + names_idl = [name_idl_base + "Async", name_idl_base] + else: + names_idl = [name_idl_base, name_idl_base + "Async"] + for name_idl in names_idl: + if name_idl in attributes_or_functions: + break + else: + name_idl = name_idl_base + # Guard for case where py-name uses the suffix, but the idl method is not async + if name_idl in attributes_or_functions: + if py_is_async and "Promise" not in attributes_or_functions[name_idl]: + name_idl = name_idl + "_wrong_py_name" + else: + name_idl = to_camel_case(name_py) + # Guard for case where py-name matches lacks the suffix + if name_idl in attributes_or_functions: + if "Promise" in attributes_or_functions[name_idl]: + name_idl = name_idl + "_wrong_py_name" + + return name_idl + + def name2py_names(self, name_idl, attributes_or_functions): + """Map a idl propname/methodname to the python variant. + Take async into account. Returns a list with one or two names; + for async props/methods Python has the sync and the async variant. + """ + idl_line = attributes_or_functions[name_idl] + idl_line_return_part = idl_line.split(name_idl)[0] + + if name_idl == "constructor": + names_py = ["__init__"] + elif "Promise" in idl_line_return_part: + name_py = to_snake_case(name_idl) + if name_py.endswith("_async"): + name_py = name_py[:-6] + idl_has_both_variants = ( + name_idl + "Async" in attributes_or_functions + and name_idl in attributes_or_functions + ) + if idl_has_both_variants: + if name_idl.endswith("Async"): + names_py = [name_py + "_async"] + else: + names_py = [name_py + "_sync"] + else: + names_py = [name_py + "_async", name_py + "_sync"] + else: + names_py = [to_snake_case(name_idl)] + return names_py def class_is_known(self, classname): return classname in self.idl.classes @@ -295,22 +347,28 @@ def get_class_def(self, classname): bases = "" if not bases else f"({', '.join(bases)})" return f"class {classname}{bases}:" + def get_property_def(self, classname, propname): + attributes = self.idl.classes[classname].attributes + name_idl = self.name2idl(propname, attributes) + assert name_idl in attributes + + line = "def " + to_snake_case(propname) + "(self):" + if propname.endswith("_async"): + line = "async " + line + return " " + line + def get_method_def(self, classname, methodname): - # Get the corresponding IDL line functions = self.idl.classes[classname].functions - name_idl = self.name2idl(methodname) - if methodname.endswith("_async") and name_idl not in functions: - name_idl = self.name2idl(methodname.replace("_async", "")) - elif name_idl not in functions and name_idl + "Async" in functions: - name_idl += "Async" - idl_line = functions[name_idl] + name_idl = self.name2idl(methodname, functions) + assert name_idl in functions # Construct preamble preamble = "def " + to_snake_case(methodname) + "(" - if "async" in methodname: + if methodname.endswith("_async"): preamble = "async " + preamble # Get arg names and types + idl_line = functions[name_idl] args = idl_line.split("(", 1)[1].split(")", 1)[0].split(",") args = [arg.strip() for arg in args if arg.strip()] raw_defaults = [arg.partition("=")[2].strip() for arg in args] @@ -361,28 +419,31 @@ def _arg_from_struct_field(self, field): return result def prop_is_known(self, classname, propname): - propname_idl = self.name2idl(propname) - return propname_idl in self.idl.classes[classname].attributes + attributes = self.idl.classes[classname].attributes + propname_idl = self.name2idl(propname, attributes) + return propname_idl if propname_idl in attributes else None def method_is_known(self, classname, methodname): functions = self.idl.classes[classname].functions - name_idl = self.name2idl(methodname) - if "_async" in methodname and name_idl not in functions: - name_idl = self.name2idl(methodname.replace("_async", "")) - elif name_idl not in functions and name_idl + "Async" in functions: - name_idl += "Async" - return name_idl if name_idl in functions else None + methodname_idl = self.name2idl(methodname, functions) + return methodname_idl if methodname_idl in functions else None def get_class_names(self): return list(self.idl.classes.keys()) def get_required_prop_names(self, classname): - propnames_idl = self.idl.classes[classname].attributes.keys() - return [self.name2py(x) for x in propnames_idl] + attributes = self.idl.classes[classname].attributes + names = [] + for name_idl in attributes.keys(): + names.extend(self.name2py_names(name_idl, attributes)) + return names def get_required_method_names(self, classname): - methodnames_idl = self.idl.classes[classname].functions.keys() - return [self.name2py(x) for x in methodnames_idl] + functions = self.idl.classes[classname].functions + names = [] + for name_idl in functions.keys(): + names.extend(self.name2py_names(name_idl, functions)) + return names class BaseApiPatcher(IdlPatcherMixin, AbstractApiPatcher): @@ -398,14 +459,16 @@ def get_class_comment(self, classname): return None def get_prop_comment(self, classname, propname): - if self.prop_is_known(classname, propname): - propname_idl = self.name2idl(propname) - return " # IDL: " + self.idl.classes[classname].attributes[propname_idl] + attributes = self.idl.classes[classname].attributes + name_idl = self.prop_is_known(classname, propname) + if name_idl: + return " # IDL: " + attributes[name_idl] def get_method_comment(self, classname, methodname): + functions = self.idl.classes[classname].functions name_idl = self.method_is_known(classname, methodname) if name_idl: - return " # IDL: " + self.idl.classes[classname].functions[name_idl] + return " # IDL: " + functions[name_idl] class BackendApiPatcher(AbstractApiPatcher): diff --git a/codegen/idlparser.py b/codegen/idlparser.py index 5063c91d..6446a2b6 100644 --- a/codegen/idlparser.py +++ b/codegen/idlparser.py @@ -67,8 +67,7 @@ class IdlParser: * enums: a dict mapping the (Pythonic) enum name to a dict of field-value pairs. * structs: a dict mapping the (Pythonic) struct name to a dict of StructField objects. - * functions: a dict mapping the (normalized) func name to the line defining the - function. + * classes: a dict mapping the (normalized) class name an Interface object. """ From f67a973dae89938f7a4c0eb91694a0cd73862ecc Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 10:39:03 +0200 Subject: [PATCH 02/18] Better logic --- codegen/apipatcher.py | 143 ++++++++++++++++++++++++++---------------- 1 file changed, 90 insertions(+), 53 deletions(-) diff --git a/codegen/apipatcher.py b/codegen/apipatcher.py index b52241c8..0bc19c9e 100644 --- a/codegen/apipatcher.py +++ b/codegen/apipatcher.py @@ -265,68 +265,105 @@ class IdlPatcherMixin: def __init__(self): super().__init__() self.idl = get_idl_parser() + self.detect_async_props_and_methods() + + def detect_async_props_and_methods(self): + + self.async_idl_names = async_idl_names = {} # (sync-name, async-name) + + for classname, interface in self.idl.classes.items(): + for namedict in [interface.attributes, interface.functions]: + for name_idl, idl_line in namedict.items(): + idl_result = idl_line.split(name_idl)[0] + if "Promise" in idl_result: + # We found an async property or method. + name_idl_base = name_idl + if name_idl.endswith("Async"): + name_idl_base = name_idl[:-5] + key = classname, name_idl_base + # Now we determine the kind + if name_idl_base != name_idl and name_idl_base in namedict: + # Has both + async_idl_names[key] = name_idl_base, name_idl + else: + # Only has async + async_idl_names[key] = None, name_idl + + def get_idl_name_variants(self, classname, base_name): + """Returns the names of an idl prop/method for its sync and async variant. + Either can be None. + """ + # Must be a base name, without the suffix + assert not base_name.lower().endswith(("sync", "async")) + + key = classname, base_name + default = base_name, None + return self.async_idl_names.get(key, default) - def name2idl(self, name_py, attributes_or_functions): + def name2idl(self, classname, name_py): """Map a python propname/methodname to the idl variant. Take async into account. """ if name_py == "__init__": - name_idl = "constructor" - elif name_py.endswith(("_sync", "_async")): - py_is_async = name_py.endswith("_async") - # Select idl name, preferring the matching suffix, because - # in IDL some functions also have both variants! + return "constructor" + + # Get idl base name + if name_py.endswith(("_sync", "_async")): name_idl_base = to_camel_case(name_py.rsplit("_", 1)[0]) - if py_is_async: - names_idl = [name_idl_base + "Async", name_idl_base] - else: - names_idl = [name_idl_base, name_idl_base + "Async"] - for name_idl in names_idl: - if name_idl in attributes_or_functions: - break - else: - name_idl = name_idl_base - # Guard for case where py-name uses the suffix, but the idl method is not async - if name_idl in attributes_or_functions: - if py_is_async and "Promise" not in attributes_or_functions[name_idl]: - name_idl = name_idl + "_wrong_py_name" else: - name_idl = to_camel_case(name_py) - # Guard for case where py-name matches lacks the suffix - if name_idl in attributes_or_functions: - if "Promise" in attributes_or_functions[name_idl]: - name_idl = name_idl + "_wrong_py_name" + name_idl_base = to_camel_case(name_py) - return name_idl + # Get idl variant names + idl_sync, idl_async = self.get_idl_name_variants(classname, name_idl_base) - def name2py_names(self, name_idl, attributes_or_functions): - """Map a idl propname/methodname to the python variant. + # Triage + if idl_sync and idl_async: + if name_py.endswith("_async"): + return idl_async + elif name_py.endswith("_sync"): + return name_idl_base + "InvalidVariantSync" + else: + return idl_sync + elif idl_async: + if name_py.endswith("_async"): + return idl_async + elif name_py.endswith("_sync"): + return idl_async + else: + return name_idl_base + "InvalidVariant" + else: # idl_sync only + if name_py.endswith("_async"): + return name_idl_base + "InvalidVariantAsync" + elif name_py.endswith("_sync"): + return name_idl_base + "InvalidVariantSync" + else: + return idl_sync + + def name2py_names(self, classname, name_idl): + """Map a idl propname/methodname to the python variants. Take async into account. Returns a list with one or two names; for async props/methods Python has the sync and the async variant. """ - idl_line = attributes_or_functions[name_idl] - idl_line_return_part = idl_line.split(name_idl)[0] if name_idl == "constructor": - names_py = ["__init__"] - elif "Promise" in idl_line_return_part: - name_py = to_snake_case(name_idl) - if name_py.endswith("_async"): - name_py = name_py[:-6] - idl_has_both_variants = ( - name_idl + "Async" in attributes_or_functions - and name_idl in attributes_or_functions - ) - if idl_has_both_variants: - if name_idl.endswith("Async"): - names_py = [name_py + "_async"] - else: - names_py = [name_py + "_sync"] - else: - names_py = [name_py + "_async", name_py + "_sync"] + return ["__init__"] + + # Get idl base name + name_idl_base = name_idl + if name_idl.endswith("Async"): + name_idl_base = name_idl[:-5] + name_py_base = to_snake_case(name_idl_base) + + # Get idl variant names + idl_sync, idl_async = self.get_idl_name_variants(classname, name_idl_base) + + if idl_sync and idl_async: + return [to_snake_case(idl_sync), name_py_base + "_async"] + elif idl_async: + return [name_py_base + "_sync", name_py_base + "_async"] else: - names_py = [to_snake_case(name_idl)] - return names_py + assert idl_sync == name_idl_base + return [name_py_base] def class_is_known(self, classname): return classname in self.idl.classes @@ -349,7 +386,7 @@ def get_class_def(self, classname): def get_property_def(self, classname, propname): attributes = self.idl.classes[classname].attributes - name_idl = self.name2idl(propname, attributes) + name_idl = self.name2idl(classname, propname) assert name_idl in attributes line = "def " + to_snake_case(propname) + "(self):" @@ -359,7 +396,7 @@ def get_property_def(self, classname, propname): def get_method_def(self, classname, methodname): functions = self.idl.classes[classname].functions - name_idl = self.name2idl(methodname, functions) + name_idl = self.name2idl(classname, methodname) assert name_idl in functions # Construct preamble @@ -420,12 +457,12 @@ def _arg_from_struct_field(self, field): def prop_is_known(self, classname, propname): attributes = self.idl.classes[classname].attributes - propname_idl = self.name2idl(propname, attributes) + propname_idl = self.name2idl(classname, propname) return propname_idl if propname_idl in attributes else None def method_is_known(self, classname, methodname): functions = self.idl.classes[classname].functions - methodname_idl = self.name2idl(methodname, functions) + methodname_idl = self.name2idl(classname, methodname) return methodname_idl if methodname_idl in functions else None def get_class_names(self): @@ -435,14 +472,14 @@ def get_required_prop_names(self, classname): attributes = self.idl.classes[classname].attributes names = [] for name_idl in attributes.keys(): - names.extend(self.name2py_names(name_idl, attributes)) + names.extend(self.name2py_names(classname, name_idl)) return names def get_required_method_names(self, classname): functions = self.idl.classes[classname].functions names = [] for name_idl in functions.keys(): - names.extend(self.name2py_names(name_idl, functions)) + names.extend(self.name2py_names(classname, name_idl)) return names From 10ffaa662fdc93623dd6b11bde747a2472fac81e Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 11:14:35 +0200 Subject: [PATCH 03/18] Codegen + update _classes.py --- wgpu/_classes.py | 144 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 101 insertions(+), 43 deletions(-) diff --git a/wgpu/_classes.py b/wgpu/_classes.py index 3bd6ba23..b7760f8a 100644 --- a/wgpu/_classes.py +++ b/wgpu/_classes.py @@ -81,23 +81,17 @@ class GPU: # IDL: Promise requestAdapter(optional GPURequestAdapterOptions options = {}); @apidiff.change("arguments include canvas") - def request_adapter( + def request_adapter_sync( self, *, power_preference=None, force_fallback_adapter=False, canvas=None ): - """Create a `GPUAdapter`, the object that represents an abstract wgpu - implementation, from which one can request a `GPUDevice`. + """Sync version of `request_adapter_async()`. - Arguments: - power_preference (PowerPreference): "high-performance" or "low-power". - force_fallback_adapter (bool): whether to use a (probably CPU-based) - fallback adapter. - canvas (WgpuCanvasInterface): The canvas that the adapter should - be able to render to. This can typically be left to None. + Provided by wgpu-py, but not compatible with WebGPU. """ # If this method gets called, no backend has been loaded yet, let's do that now! from .backends.auto import gpu # noqa - return gpu.request_adapter( + return gpu.request_adapter_sync( power_preference=power_preference, force_fallback_adapter=force_fallback_adapter, canvas=canvas, @@ -108,15 +102,39 @@ def request_adapter( async def request_adapter_async( self, *, power_preference=None, force_fallback_adapter=False, canvas=None ): - """Async version of `request_adapter()`.""" - return self.request_adapter( + """Create a `GPUAdapter`, the object that represents an abstract wgpu + implementation, from which one can request a `GPUDevice`. + + Arguments: + power_preference (PowerPreference): "high-performance" or "low-power". + force_fallback_adapter (bool): whether to use a (probably CPU-based) + fallback adapter. + canvas (WgpuCanvasInterface): The canvas that the adapter should + be able to render to. This can typically be left to None. + """ + # If this method gets called, no backend has been loaded yet, let's do that now! + from .backends.auto import gpu # noqa + + return await gpu.request_adapter_async( power_preference=power_preference, force_fallback_adapter=force_fallback_adapter, canvas=canvas, ) @apidiff.add("Method useful for multi-gpu environments") - def enumerate_adapters(self): + def enumerate_adapters_sync(self): + """Sync version of `enumerate_adapters_async()`. + + Provided by wgpu-py, but not compatible with WebGPU. + """ + + # If this method gets called, no backend has been loaded yet, let's do that now! + from .backends.auto import gpu # noqa + + return gpu.enumerate_adapters_sync() + + @apidiff.add("Method useful for multi-gpu environments") + async def enumerate_adapters_async(self): """Get a list of adapter objects available on the current system. An adapter can then be selected (e.g. using it's summary), and a device @@ -143,12 +161,7 @@ def enumerate_adapters(self): # If this method gets called, no backend has been loaded yet, let's do that now! from .backends.auto import gpu # noqa - return gpu.enumerate_adapters() - - @apidiff.add("Method useful on desktop") - async def enumerate_adapters_async(self): - """Async version of enumerate_adapters.""" - return self.enumerate_adapters() + return await gpu.enumerate_adapters_async() # IDL: GPUTextureFormat getPreferredCanvasFormat(); @apidiff.change("Disabled because we put it on the canvas context") @@ -564,7 +577,7 @@ def limits(self): return self._limits # IDL: Promise requestDevice(optional GPUDeviceDescriptor descriptor = {}); - def request_device( + def request_device_sync( self, *, label="", @@ -572,13 +585,9 @@ def request_device( required_limits: "Dict[str, int]" = {}, default_queue: "structs.QueueDescriptor" = {}, ): - """Request a `GPUDevice` from the adapter. + """Sync version of `request_device_async()`. - Arguments: - label (str): A human readable label. Optional. - required_features (list of str): the features (extensions) that you need. Default []. - required_limits (dict): the various limits that you need. Default {}. - default_queue (structs.QueueDescriptor): Descriptor for the default queue. Optional. + Provided by wgpu-py, but not compatible with WebGPU. """ raise NotImplementedError() @@ -591,7 +600,14 @@ async def request_device_async( required_limits: "Dict[str, int]" = {}, default_queue: "structs.QueueDescriptor" = {}, ): - """Async version of `request_device()`.""" + """Request a `GPUDevice` from the adapter. + + Arguments: + label (str): A human readable label. Optional. + required_features (list of str): the features (extensions) that you need. Default []. + required_limits (dict): the various limits that you need. Default {}. + default_queue (structs.QueueDescriptor): Descriptor for the default queue. Optional. + """ raise NotImplementedError() def _release(self): @@ -709,7 +725,17 @@ def adapter(self): # IDL: readonly attribute Promise lost; @apidiff.hide("Not a Pythonic API") @property - def lost(self): + def lost_sync(self): + """Sync version of `lost`. + + Provided by wgpu-py, but not compatible with WebGPU. + """ + raise NotImplementedError() + + # IDL: readonly attribute Promise lost; + @apidiff.hide("Not a Pythonic API") + @property + async def lost_async(self): """Provides information about why the device is lost.""" # In JS you can device.lost.then ... to handle lost devices. # We may want to eventually support something similar async-like? @@ -1002,7 +1028,9 @@ async def create_compute_pipeline_async( layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", compute: "structs.ProgrammableStage", ): - """Async version of create_compute_pipeline().""" + """Async version of `create_compute_pipeline()`. + + Both versions are compatible with WebGPU.""" raise NotImplementedError() # IDL: GPURenderPipeline createRenderPipeline(GPURenderPipelineDescriptor descriptor); @@ -1161,7 +1189,9 @@ async def create_render_pipeline_async( multisample: "structs.MultisampleState" = {}, fragment: "structs.FragmentState" = None, ): - """Async version of create_render_pipeline().""" + """Async version of `create_render_pipeline()`. + + Both versions are compatible with WebGPU.""" raise NotImplementedError() # IDL: GPUCommandEncoder createCommandEncoder(optional GPUCommandEncoderDescriptor descriptor = {}); @@ -1215,7 +1245,16 @@ def push_error_scope(self, filter): # IDL: Promise popErrorScope(); @apidiff.hide - def pop_error_scope(self): + def pop_error_scope_sync(self): + """Sync version of `pop_error_scope_async(). + + Provided by wgpu-py, but not compatible with WebGPU. + """ + raise NotImplementedError() + + # IDL: Promise popErrorScope(); + @apidiff.hide + async def pop_error_scope_async(self): """Pops a GPU error scope from the stack.""" raise NotImplementedError() @@ -1290,7 +1329,15 @@ def map_state(self): # an array-like object that exposes the shared memory. # IDL: Promise mapAsync(GPUMapModeFlags mode, optional GPUSize64 offset = 0, optional GPUSize64 size); - def map(self, mode, offset=0, size=None): + def map_sync(self, mode, offset=0, size=None): + """Sync version of `map_async()`. + + Provided by wgpu-py, but not compatible with WebGPU. + """ + raise NotImplementedError() + + # IDL: Promise mapAsync(GPUMapModeFlags mode, optional GPUSize64 offset = 0, optional GPUSize64 size); + async def map_async(self, mode, offset=0, size=None): """Maps the given range of the GPUBuffer. When this call returns, the buffer content is ready to be @@ -1307,11 +1354,6 @@ def map(self, mode, offset=0, size=None): """ raise NotImplementedError() - # IDL: Promise mapAsync(GPUMapModeFlags mode, optional GPUSize64 offset = 0, optional GPUSize64 size); - async def map_async(self, mode, offset=0, size=None): - """Alternative version of map().""" - raise NotImplementedError() - # IDL: undefined unmap(); def unmap(self): """Unmaps the buffer. @@ -1617,7 +1659,15 @@ class GPUShaderModule(GPUObjectBase): """ # IDL: Promise getCompilationInfo(); - def get_compilation_info(self): + def get_compilation_info_sync(self): + """Sync version of `get_compilation_info_async()`. + + Provided by wgpu-py, but not compatible with WebGPU. + """ + raise NotImplementedError() + + # IDL: Promise getCompilationInfo(); + async def get_compilation_info_async(self): """Get shader compilation info. Always returns empty list at the moment.""" # How can this return shader errors if one cannot create a # shader module when the shader source has errors? @@ -2222,16 +2272,24 @@ def read_texture(self, source, data_layout, size): """ raise NotImplementedError() - # IDL: Promise onSubmittedWorkDone(); - def on_submitted_work_done(self): - """TODO""" - raise NotImplementedError() - # IDL: undefined copyExternalImageToTexture( GPUImageCopyExternalImage source, GPUImageCopyTextureTagged destination, GPUExtent3D copySize); @apidiff.hide("Specific to browsers") def copy_external_image_to_texture(self, source, destination, copy_size): raise NotImplementedError() + # IDL: Promise onSubmittedWorkDone(); + def on_submitted_work_done_sync(self): + """Sync version of `on_submitted_work_done_async()`. + + Provided by wgpu-py, but not compatible with WebGPU. + """ + raise NotImplementedError() + + # IDL: Promise onSubmittedWorkDone(); + async def on_submitted_work_done_async(self): + """TODO""" + raise NotImplementedError() + # %% Further non-GPUObject classes From 5271504caacdd5b5c1657697d8ec3c1ba6626045 Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 11:56:18 +0200 Subject: [PATCH 04/18] fix issue, plus add tests --- codegen/apipatcher.py | 8 ++-- codegen/tests/test_codegen_apipatcher.py | 56 +++++++++++++++++++++++- codegen/tests/test_codegen_result.py | 18 ++++++++ 3 files changed, 77 insertions(+), 5 deletions(-) create mode 100644 codegen/tests/test_codegen_result.py diff --git a/codegen/apipatcher.py b/codegen/apipatcher.py index 0bc19c9e..d6058a66 100644 --- a/codegen/apipatcher.py +++ b/codegen/apipatcher.py @@ -179,7 +179,7 @@ def patch_properties(self, classname, i1, i2): elif "@apidiff.hide" in pre_lines: pass # continue as normal old_line = self.lines[j1] - new_line = f" def {propname}(self):" + new_line = self.get_property_def(classname, propname) if old_line != new_line: fixme_line = " # FIXME: was " + old_line.split("def ", 1)[-1] lines = [fixme_line, new_line] @@ -321,7 +321,7 @@ def name2idl(self, classname, name_py): if name_py.endswith("_async"): return idl_async elif name_py.endswith("_sync"): - return name_idl_base + "InvalidVariantSync" + return name_idl_base + "InvalidVariant" else: return idl_sync elif idl_async: @@ -333,9 +333,9 @@ def name2idl(self, classname, name_py): return name_idl_base + "InvalidVariant" else: # idl_sync only if name_py.endswith("_async"): - return name_idl_base + "InvalidVariantAsync" + return name_idl_base + "InvalidVariant" elif name_py.endswith("_sync"): - return name_idl_base + "InvalidVariantSync" + return name_idl_base + "InvalidVariant" else: return idl_sync diff --git a/codegen/tests/test_codegen_apipatcher.py b/codegen/tests/test_codegen_apipatcher.py index 6ef5bb13..31d66a89 100644 --- a/codegen/tests/test_codegen_apipatcher.py +++ b/codegen/tests/test_codegen_apipatcher.py @@ -2,7 +2,7 @@ """ from codegen.utils import blacken -from codegen.apipatcher import CommentRemover, AbstractCommentInjector +from codegen.apipatcher import CommentRemover, AbstractCommentInjector, IdlPatcherMixin def dedent(code): @@ -110,6 +110,60 @@ def eggs(self): assert code2 == code3 +def test_async_api_logic(): + + class Object(object): + pass + + class OtherIdlPatcherMixin(IdlPatcherMixin): + def __init__(self): + cls = Object() + cls.attributes = { + "prop1": "x prop1 bla", + "prop2": "Promise prop2 bla", + } + cls.functions = { + "method1": "x method1 bla", + "method2": "Promise method2 bla", + "method3Async": "Promise method3 bla", + "method3": "x method3 bla", + } + + self.idl = Object() + self.idl.classes = {"Foo": cls} + + patcher = OtherIdlPatcherMixin() + patcher.detect_async_props_and_methods() + + # Normal prop + assert patcher.name2idl("Foo", "prop1") == "prop1" + assert patcher.name2idl("Foo", "prop1_sync") == "prop1InvalidVariant" + assert patcher.name2idl("Foo", "prop1_async") == "prop1InvalidVariant" + + # Unknow prop, name still works + assert patcher.name2idl("Foo", "prop_unknown") == "propUnknown" + + # Async prop + assert patcher.name2idl("Foo", "prop2_async") == "prop2" + assert patcher.name2idl("Foo", "prop2_sync") == "prop2" + assert patcher.name2idl("Foo", "prop2") == "prop2InvalidVariant" + + # Normal method + assert patcher.name2idl("Foo", "method1") == "method1" + assert patcher.name2idl("Foo", "method1_sync") == "method1InvalidVariant" + assert patcher.name2idl("Foo", "method1_async") == "method1InvalidVariant" + + # Async method + assert patcher.name2idl("Foo", "method2_async") == "method2" + assert patcher.name2idl("Foo", "method2_sync") == "method2" + assert patcher.name2idl("Foo", "method2") == "method2InvalidVariant" + + # Async method that also has sync variant in JS + assert patcher.name2idl("Foo", "method3_async") == "method3Async" + assert patcher.name2idl("Foo", "method3") == "method3" + assert patcher.name2idl("Foo", "method3_sync") == "method3InvalidVariant" + + if __name__ == "__main__": for func in list(globals().values()): if callable(func) and func.__name__.startswith("test_"): diff --git a/codegen/tests/test_codegen_result.py b/codegen/tests/test_codegen_result.py new file mode 100644 index 00000000..218fdc8a --- /dev/null +++ b/codegen/tests/test_codegen_result.py @@ -0,0 +1,18 @@ +""" Test some aspects of the generated code. +""" + +from codegen.files import read_file + + +def test_async_methods_and_props(): + # Test that only and all aync methods are suffixed with '_async' + + for fname in ["_classes.py", "backends/wgpu_native/_api.py"]: + code = read_file(fname) + for line in code.splitlines(): + line = line.strip() + if line.startswith("def "): + assert "async" not in line, line + elif line.startswith("async def "): + name = line.split("def", 1)[1].split("(")[0].strip() + assert name.endswith("_async"), line From db891880811f2e3d26cb0f00ac5630ff53a9a766 Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 11:56:54 +0200 Subject: [PATCH 05/18] add comment --- wgpu/_classes.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/wgpu/_classes.py b/wgpu/_classes.py index b7760f8a..a3908354 100644 --- a/wgpu/_classes.py +++ b/wgpu/_classes.py @@ -746,7 +746,11 @@ async def lost_async(self): @apidiff.hide("Specific to browsers") @property def onuncapturederror(self): - """Method called when an error is capured?""" + """Event handler. + + In JS you'd do ``gpuDevice.addEventListener('uncapturederror', ...)``. We'd need + to figure out how to do this in Python. + """ raise NotImplementedError() # IDL: undefined destroy(); From bc3a72648473fb794b1d3325b29ac7ad181d4319 Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 11:57:48 +0200 Subject: [PATCH 06/18] Add docs --- docs/wgpu.rst | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/docs/wgpu.rst b/docs/wgpu.rst index 8df5cf63..04900403 100644 --- a/docs/wgpu.rst +++ b/docs/wgpu.rst @@ -34,10 +34,14 @@ Some arguments have a default value. Most do not. Differences from WebGPU ----------------------- -This API is derived from the WebGPU spec, but differs in a few ways. -For example, methods that in WebGPU accept a descriptor/struct/dict, -here accept the fields in that struct as keyword arguments. +This API is derived from the WebGPU spec, but differs in a few ways: +* Methods names are snake_case (instead of camelCase). +* Enums and flags are represented as objects with snake_case field names. +* Methods that in WebGPU accept a single descriptor, will accept the fields of that descriptor as keyword arguments. +* Async methods have a different name, read more below. + +Further changes: .. autodata:: wgpu._classes.apidiff :annotation: Differences of base API: @@ -47,6 +51,21 @@ Each backend may implement extra functionality on top of the base API. This is listed in :doc:`backends `. +Async code +---------- + +Some methods and properties in the WebGPU API are asynchronous. In wgpu-py, these methods +are always suffixed with ``_async``. These method also have a synchronous variant, which +come in two flafours: + +* If the method has the plain method name (no suffix), the synchronous method is + available in WebGPU as well. There's no problem to use this variant. +* If the method ends with ``_sync``, this is a convenience method, added in + wgpu-py to fully support synchronous code. However, the synchronous variant is + not part of the WebGPU spec, and as a consequence, code that uses this method + is less portable (to e.g. pyodide/pyscript). + + Overview -------- From 5962410f79a8e1a0fa060d679fee3f3b16e534e3 Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 12:23:30 +0200 Subject: [PATCH 07/18] apply codegenn to _api.py --- wgpu/backends/wgpu_native/_api.py | 41 ++++++++++++++++++++++++++----- 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/wgpu/backends/wgpu_native/_api.py b/wgpu/backends/wgpu_native/_api.py index f360aec0..6d30945f 100644 --- a/wgpu/backends/wgpu_native/_api.py +++ b/wgpu/backends/wgpu_native/_api.py @@ -304,7 +304,8 @@ def _get_features(id: int, device: bool = False, adapter: bool = False): class GPU(classes.GPU): - def request_adapter( + + def request_adapter_sync( self, *, power_preference=None, force_fallback_adapter=False, canvas=None ): """Create a `GPUAdapter`, the object that represents an abstract wgpu @@ -394,7 +395,7 @@ async def request_adapter_async( canvas=canvas, ) # no-cover - def enumerate_adapters(self): + def enumerate_adapters_sync(self): """Get a list of adapter objects available on the current system. This is the implementation based on wgpu-native. """ @@ -409,6 +410,12 @@ def enumerate_adapters(self): libf.wgpuInstanceEnumerateAdapters(instance, ffi.NULL, adapters) return [self._create_adapter(adapter) for adapter in adapters] + async def enumerate_adapters_async(self): + """Async version of ``enumerate_adapters_sync()``. + This is the implementation based on wgpu-native. + """ + return self.enumerate_adapters_sync() + def _create_adapter(self, adapter_id): # ----- Get adapter info @@ -807,7 +814,8 @@ class GPUAdapterInfo(classes.GPUAdapterInfo): class GPUAdapter(classes.GPUAdapter): - def request_device( + + def request_device_sync( self, *, label="", @@ -821,6 +829,21 @@ def request_device( label, required_features, required_limits, default_queue, "" ) + async def request_device_async( + self, + *, + label="", + required_features: "List[enums.FeatureName]" = [], + required_limits: "Dict[str, int]" = {}, + default_queue: "structs.QueueDescriptor" = {}, + ): + return self.request_device_async( + label, + required_features=required_features, + required_limits=required_limits, + default_queue=default_queue, + ) + def _request_device( self, label, required_features, required_limits, default_queue, trace_path ): @@ -1900,7 +1923,7 @@ def _check_range(self, offset, size): raise ValueError("Mapped range must not extend beyond total buffer size.") return offset, size - def map(self, mode, offset=0, size=None): + def map_sync(self, mode, offset=0, size=None): sync_on_read = True # Check mode @@ -2175,7 +2198,7 @@ class GPUShaderModule(classes.GPUShaderModule, GPUObjectBase): # GPUObjectBaseMixin _release_function = libf.wgpuShaderModuleRelease - def get_compilation_info(self): + def get_compilation_info_sync(self): # Here's a little setup to implement this method. Unfortunately, # this is not yet implemented in wgpu-native. Another problem # is that if there is an error in the shader source, we raise @@ -2206,6 +2229,9 @@ def get_compilation_info(self): return [] + async def get_compilation_info_async(self): + raise NotImplementedError() + class GPUPipelineBase(classes.GPUPipelineBase): def get_bind_group_layout(self, index): @@ -3206,7 +3232,7 @@ def read_texture(self, source, data_layout, size): return data - def on_submitted_work_done(self): + def on_submitted_work_done_sync(self): # In JS, this returns a Promise that can be awaited to (async) wait # for the work that is currently in the pipeline. We need to figure out # how to expose these async parts. @@ -3230,6 +3256,9 @@ def callback(status_, user_data_p): if status != 0: raise RuntimeError(f"Queue work done status: {status}") + async def on_submitted_work_done_async(self): + raise NotImplementedError() + class GPURenderBundle(classes.GPURenderBundle, GPUObjectBase): # GPUObjectBaseMixin From 2b7c3b3b96159d78a17a38b567aaa6af9b2d6df3 Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 12:29:54 +0200 Subject: [PATCH 08/18] Tweak for prop --- wgpu/_classes.py | 10 +++++++++- wgpu/backends/wgpu_native/_api.py | 6 ++++++ wgpu/resources/codegen_report.md | 8 ++++---- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/wgpu/_classes.py b/wgpu/_classes.py index a3908354..820d02de 100644 --- a/wgpu/_classes.py +++ b/wgpu/_classes.py @@ -730,7 +730,7 @@ def lost_sync(self): Provided by wgpu-py, but not compatible with WebGPU. """ - raise NotImplementedError() + return self._get_lost_sync() # IDL: readonly attribute Promise lost; @apidiff.hide("Not a Pythonic API") @@ -740,6 +740,14 @@ async def lost_async(self): # In JS you can device.lost.then ... to handle lost devices. # We may want to eventually support something similar async-like? # at some point + + # Properties don't get repeated at _api.py, so we use a proxy method. + return await self._get_lost_async() + + def _get_lost_sync(self): + raise NotImplementedError() + + async def _get_lost_async(self): raise NotImplementedError() # IDL: attribute EventHandler onuncapturederror; diff --git a/wgpu/backends/wgpu_native/_api.py b/wgpu/backends/wgpu_native/_api.py index 6d30945f..6f0a191d 100644 --- a/wgpu/backends/wgpu_native/_api.py +++ b/wgpu/backends/wgpu_native/_api.py @@ -1865,6 +1865,12 @@ def create_query_set(self, *, label="", type: "enums.QueryType", count: int): query_id = libf.wgpuDeviceCreateQuerySet(self._internal, query_set_descriptor) return GPUQuerySet(label, query_id, self._internal, type, count) + def _get_lost_sync(self): + raise NotImplementedError() + + async def _get_lost_async(self): + raise NotImplementedError() + def destroy(self): # Note: not yet implemented in wgpu-core, the wgpu-native func is a noop internal = self._internal diff --git a/wgpu/resources/codegen_report.md b/wgpu/resources/codegen_report.md index 6693fa4a..7d3787bb 100644 --- a/wgpu/resources/codegen_report.md +++ b/wgpu/resources/codegen_report.md @@ -9,18 +9,18 @@ * Wrote 34 enums to enums.py * Wrote 60 structs to structs.py ### Patching API for _classes.py -* Diffs for GPU: add enumerate_adapters, add enumerate_adapters_async, change get_preferred_canvas_format, change request_adapter, change request_adapter_async +* Diffs for GPU: add enumerate_adapters_async, add enumerate_adapters_sync, change get_preferred_canvas_format, change request_adapter_async, change request_adapter_sync * Diffs for GPUCanvasContext: add get_preferred_format, add present * Diffs for GPUAdapter: add summary -* Diffs for GPUDevice: add adapter, add create_buffer_with_data, hide import_external_texture, hide lost, hide onuncapturederror, hide pop_error_scope, hide push_error_scope +* Diffs for GPUDevice: add adapter, add create_buffer_with_data, hide import_external_texture, hide lost_async, hide lost_sync, hide onuncapturederror, hide pop_error_scope_async, hide pop_error_scope_sync, hide push_error_scope * Diffs for GPUBuffer: add map_read, add map_write, add read_mapped, add write_mapped, hide get_mapped_range * Diffs for GPUTexture: add size * Diffs for GPUTextureView: add size, add texture * Diffs for GPUBindingCommandsMixin: change set_bind_group * Diffs for GPUQueue: add read_buffer, add read_texture, hide copy_external_image_to_texture -* Validated 37 classes, 121 methods, 45 properties +* Validated 37 classes, 126 methods, 46 properties ### Patching API for backends/wgpu_native/_api.py -* Validated 37 classes, 96 methods, 0 properties +* Validated 37 classes, 101 methods, 0 properties ## Validating backends/wgpu_native/_api.py * Enum field FeatureName.texture-compression-bc-sliced-3d missing in wgpu.h * Enum field FeatureName.clip-distances missing in wgpu.h From e52322d3727eece819f964b460dafa51e8745ba3 Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 12:54:24 +0200 Subject: [PATCH 09/18] Backwards compat --- wgpu/_classes.py | 27 +++++++++++++++++++++++++++ wgpu/backends/wgpu_native/_api.py | 22 +++++----------------- 2 files changed, 32 insertions(+), 17 deletions(-) diff --git a/wgpu/_classes.py b/wgpu/_classes.py index 820d02de..ebf81419 100644 --- a/wgpu/_classes.py +++ b/wgpu/_classes.py @@ -2517,5 +2517,32 @@ def _set_repr_methods(): cls.__repr__ = generic_repr +_async_warnings = {} + + +def _set_compat_methods_for_async(): + def create_new_method(name): + def proxy_method(self, *args, **kwargs): + warning = _async_warnings.pop(name, None) + if warning: + logger.warning(warning) + return getattr(self, name)(*args, **kwargs) + + proxy_method.__name__ = name + "_backwards_compat_proxy" + return proxy_method + + m = globals() + for class_name in __all__: + cls = m[class_name] + for name, func in list(cls.__dict__.items()): + if name.endswith("_sync") and callable(func): + old_name = name[:-5] + setattr(cls, old_name, create_new_method(name)) + _async_warnings[name] = ( + f"WGPU: {old_name}() is deprecated, use {name}() instead." + ) + + _seed_object_counts() _set_repr_methods() +_set_compat_methods_for_async() diff --git a/wgpu/backends/wgpu_native/_api.py b/wgpu/backends/wgpu_native/_api.py index 6f0a191d..c12d4a2d 100644 --- a/wgpu/backends/wgpu_native/_api.py +++ b/wgpu/backends/wgpu_native/_api.py @@ -389,7 +389,7 @@ async def request_adapter_async( """Async version of ``request_adapter()``. This is the implementation based on wgpu-native. """ - return self.request_adapter( + return self.request_adapter_sync( power_preference=power_preference, force_fallback_adapter=force_fallback_adapter, canvas=canvas, @@ -837,7 +837,9 @@ async def request_device_async( required_limits: "Dict[str, int]" = {}, default_queue: "structs.QueueDescriptor" = {}, ): - return self.request_device_async( + if default_queue: + check_struct("QueueDescriptor", default_queue) + return self._request_device( label, required_features=required_features, required_limits=required_limits, @@ -1030,20 +1032,6 @@ def callback(status, result, message, userdata): return device - async def request_device_async( - self, - *, - label="", - required_features: "List[enums.FeatureName]" = [], - required_limits: "Dict[str, int]" = {}, - default_queue: "structs.QueueDescriptor" = {}, - ): - if default_queue: - check_struct("QueueDescriptor", default_queue) - return self._request_device( - label, required_features, required_limits, default_queue, "" - ) # no-cover - def _release(self): if self._internal is not None and libf is not None: self._internal, internal = None, self._internal @@ -1980,7 +1968,7 @@ def callback(status_, user_data_p): self._mapped_memoryviews = [] async def map_async(self, mode, offset=0, size=None): - return self.map(mode, offset, size) # for now + return self.map_sync(mode, offset, size) # for now def unmap(self): if self._map_state != enums.BufferMapState.mapped: From f406fcddc2a4860cc7a1bf8e9e722344a1a54bfe Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 13:23:40 +0200 Subject: [PATCH 10/18] fix codegen test --- codegen/tests/test_codegen_result.py | 2 +- wgpu/_classes.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/codegen/tests/test_codegen_result.py b/codegen/tests/test_codegen_result.py index 218fdc8a..44c3b830 100644 --- a/codegen/tests/test_codegen_result.py +++ b/codegen/tests/test_codegen_result.py @@ -12,7 +12,7 @@ def test_async_methods_and_props(): for line in code.splitlines(): line = line.strip() if line.startswith("def "): - assert "async" not in line, line + assert not line.endswith("_async"), line elif line.startswith("async def "): name = line.split("def", 1)[1].split("(")[0].strip() assert name.endswith("_async"), line diff --git a/wgpu/_classes.py b/wgpu/_classes.py index ebf81419..e0e9fb04 100644 --- a/wgpu/_classes.py +++ b/wgpu/_classes.py @@ -2520,7 +2520,7 @@ def _set_repr_methods(): _async_warnings = {} -def _set_compat_methods_for_async(): +def _set_compat_methods_for_async_methods(): def create_new_method(name): def proxy_method(self, *args, **kwargs): warning = _async_warnings.pop(name, None) @@ -2545,4 +2545,4 @@ def proxy_method(self, *args, **kwargs): _seed_object_counts() _set_repr_methods() -_set_compat_methods_for_async() +_set_compat_methods_for_async_methods() From a1183fcba6e4d1ecf24ebccba4aac80a65cda19c Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 13:32:42 +0200 Subject: [PATCH 11/18] Fix tests --- tests/test_api.py | 8 ++++---- wgpu/_classes.py | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/test_api.py b/tests/test_api.py index 313ce5b6..4ad0e391 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -16,13 +16,13 @@ def test_basic_api(): assert isinstance(wgpu.gpu, wgpu.GPU) # Entrypoint funcs - assert wgpu.gpu.request_adapter + assert wgpu.gpu.request_adapter_sync assert wgpu.gpu.request_adapter_async - code1 = wgpu.GPU.request_adapter.__code__ + code1 = wgpu.GPU.request_adapter_sync.__code__ code2 = wgpu.GPU.request_adapter_async.__code__ - nargs1 = code1.co_argcount + code1.co_kwonlyargcount - assert code1.co_varnames[:nargs1] == code2.co_varnames + # nargs1 = code1.co_argcount + code1.co_kwonlyargcount + assert code1.co_varnames == code2.co_varnames assert repr(wgpu.classes.GPU()).startswith( " Date: Wed, 25 Sep 2024 14:02:11 +0200 Subject: [PATCH 12/18] Replace method usage, and disbale backwards compat --- docs/backends.rst | 4 ++-- docs/guide.rst | 6 ++--- docs/start.rst | 2 +- examples/compute_noop.py | 4 ++-- examples/compute_timestamps.py | 6 +++-- examples/cube.py | 6 ++--- examples/imgui_backend_sea.py | 4 ++-- examples/imgui_basic_example.py | 4 ++-- examples/imgui_cmap_picker.py | 4 ++-- examples/imgui_multi_canvas.py | 4 ++-- examples/imgui_renderer_sea.py | 4 ++-- examples/triangle.py | 4 ++-- examples/triangle_glsl.py | 4 ++-- tests/test_api.py | 2 +- tests/test_gui_glfw.py | 4 ++-- tests/test_set_constant.py | 4 ++-- tests/test_wgpu_native_basics.py | 24 +++++++++---------- tests/test_wgpu_native_buffer.py | 36 ++++++++++++++--------------- tests/test_wgpu_native_query_set.py | 4 ++-- tests/test_wgpu_native_texture.py | 8 +++---- tests/test_wgpu_vertex_instance.py | 6 ++--- tests_mem/test_destroy.py | 4 ++-- tests_mem/test_objects.py | 6 ++--- wgpu/__init__.py | 2 +- wgpu/_classes.py | 8 +++---- wgpu/backends/js_webgpu/__init__.py | 2 +- wgpu/backends/rs.py | 2 +- wgpu/backends/wgpu_native/_api.py | 6 ++--- wgpu/backends/wgpu_native/extras.py | 10 +++++++- wgpu/utils/device.py | 4 ++-- 30 files changed, 99 insertions(+), 89 deletions(-) diff --git a/docs/backends.rst b/docs/backends.rst index 8b985140..93a2d4fd 100644 --- a/docs/backends.rst +++ b/docs/backends.rst @@ -44,7 +44,7 @@ It also works out of the box, because the wgpu-native DLL is shipped with wgpu-p The wgpu_native backend provides a few extra functionalities: -.. py:function:: wgpu.backends.wgpu_native.request_device(adapter, trace_path, *, label="", required_features, required_limits, default_queue) +.. py:function:: wgpu.backends.wgpu_native.request_device_sync(adapter, trace_path, *, label="", required_features, required_limits, default_queue) An alternative to :func:`wgpu.GPUAdapter.request_adapter`, that streams a trace of all low level calls to disk, so the visualization can be replayed (also on other systems), @@ -88,7 +88,7 @@ You must tell the adapter to create a device that supports push constants, and you must tell it the number of bytes of push constants that you are using. Overestimating is okay:: - device = adapter.request_device( + device = adapter.request_device_sync( required_features=["push-constants"], required_limits={"max-push-constant-size": 256}, ) diff --git a/docs/guide.rst b/docs/guide.rst index 118443ba..5f03221b 100644 --- a/docs/guide.rst +++ b/docs/guide.rst @@ -43,8 +43,8 @@ you can obtain a device. .. code-block:: py - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - device = adapter.request_device() + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") + device = adapter.request_device_sync() The ``wgpu.gpu`` object is the API entrypoint (:class:`wgpu.GPU`). It contains just a handful of functions, including ``request_adapter()``. The device is used to create most other GPU objects. @@ -232,7 +232,7 @@ You can run your application via RenderDoc, which is able to capture a frame, including all API calls, objects and the complete pipeline state, and display all of that information within a nice UI. -You can use ``adapter.request_device()`` to provide a directory path +You can use ``adapter.request_device_sync()`` to provide a directory path where a trace of all API calls will be written. This trace can then be used to re-play your use-case elsewhere (it's cross-platform). diff --git a/docs/start.rst b/docs/start.rst index 6ccc490b..218a9ea3 100644 --- a/docs/start.rst +++ b/docs/start.rst @@ -99,7 +99,7 @@ You can verify whether the `"DiscreteGPU"` adapters are found: import wgpu import pprint - for a in wgpu.gpu.enumerate_adapters(): + for a in wgpu.gpu.enumerate_adapters_sync(): pprint.pprint(a.info) If you are using a remote frame buffer via `jupyter-rfb `_ we also recommend installing the following for optimal performance: diff --git a/examples/compute_noop.py b/examples/compute_noop.py index 8e9d08b5..9be2b906 100644 --- a/examples/compute_noop.py +++ b/examples/compute_noop.py @@ -62,7 +62,7 @@ device = wgpu.utils.get_default_device() # Show all available adapters -adapters = wgpu.gpu.enumerate_adapters() +adapters = wgpu.gpu.enumerate_adapters_sync() for a in adapters: print(a.summary) @@ -73,7 +73,7 @@ # adapter = a # break # assert adapter is not None -# device = adapter.request_device() +# device = adapter.request_device_sync() # %% cshader = device.create_shader_module(code=shader_source) diff --git a/examples/compute_timestamps.py b/examples/compute_timestamps.py index b22564d9..60afddc9 100644 --- a/examples/compute_timestamps.py +++ b/examples/compute_timestamps.py @@ -41,10 +41,12 @@ for i in range(n): data2[i] = i * 2 -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") # Request a device with the timestamp_query feature, so we can profile our computation -device = adapter.request_device(required_features=[wgpu.FeatureName.timestamp_query]) +device = adapter.request_device_sync( + required_features=[wgpu.FeatureName.timestamp_query] +) cshader = device.create_shader_module(code=shader_source) # Create buffer objects, input buffer is mapped. diff --git a/examples/cube.py b/examples/cube.py index c1b1a81c..09936fe9 100644 --- a/examples/cube.py +++ b/examples/cube.py @@ -12,7 +12,7 @@ print("Available adapters on this system:") -for a in wgpu.gpu.enumerate_adapters(): +for a in wgpu.gpu.enumerate_adapters_sync(): print(a.summary) @@ -22,8 +22,8 @@ canvas = WgpuCanvas(title="wgpu cube", size=(640, 480)) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") -device = adapter.request_device() +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") +device = adapter.request_device_sync() # Prepare present context present_context = canvas.get_context() diff --git a/examples/imgui_backend_sea.py b/examples/imgui_backend_sea.py index f21b7996..77d6af87 100644 --- a/examples/imgui_backend_sea.py +++ b/examples/imgui_backend_sea.py @@ -15,9 +15,9 @@ canvas = WgpuCanvas(title="imgui_sea", size=(800, 450), max_fps=60) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") -device = adapter.request_device() +device = adapter.request_device_sync() # Prepare present context present_context = canvas.get_context() diff --git a/examples/imgui_basic_example.py b/examples/imgui_basic_example.py index 0012a942..50873dd7 100644 --- a/examples/imgui_basic_example.py +++ b/examples/imgui_basic_example.py @@ -15,8 +15,8 @@ canvas = WgpuCanvas(title="imgui", size=(640, 480)) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") -device = adapter.request_device() +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") +device = adapter.request_device_sync() app_state = {"text": "Hello, World\nLorem ipsum, etc.\netc."} imgui_renderer = ImguiRenderer(device, canvas) diff --git a/examples/imgui_cmap_picker.py b/examples/imgui_cmap_picker.py index 2c71e58a..3c91d18f 100644 --- a/examples/imgui_cmap_picker.py +++ b/examples/imgui_cmap_picker.py @@ -26,8 +26,8 @@ canvas = WgpuCanvas(title="imgui", size=(512, 256)) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") -device = adapter.request_device() +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") +device = adapter.request_device_sync() imgui_renderer = ImguiRenderer(device, canvas) diff --git a/examples/imgui_multi_canvas.py b/examples/imgui_multi_canvas.py index 8495666c..37972cfe 100644 --- a/examples/imgui_multi_canvas.py +++ b/examples/imgui_multi_canvas.py @@ -17,8 +17,8 @@ canvases = [canvas1, canvas2, canvas3] # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") -device = adapter.request_device() +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") +device = adapter.request_device_sync() # create a imgui renderer for each canvas imgui_renderer1 = ImguiRenderer(device, canvas1) diff --git a/examples/imgui_renderer_sea.py b/examples/imgui_renderer_sea.py index 3fba3094..4ebba4a0 100644 --- a/examples/imgui_renderer_sea.py +++ b/examples/imgui_renderer_sea.py @@ -15,9 +15,9 @@ canvas = WgpuCanvas(title="imgui_sea", size=(800, 450), max_fps=60) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") -device = adapter.request_device() +device = adapter.request_device_sync() # Prepare present context present_context = canvas.get_context() diff --git a/examples/triangle.py b/examples/triangle.py index 43e82d89..11f43c62 100644 --- a/examples/triangle.py +++ b/examples/triangle.py @@ -62,8 +62,8 @@ def main(canvas, power_preference="high-performance", limits=None): """Regular function to setup a viz on the given canvas.""" - adapter = wgpu.gpu.request_adapter(power_preference=power_preference) - device = adapter.request_device(required_limits=limits) + adapter = wgpu.gpu.request_adapter_sync(power_preference=power_preference) + device = adapter.request_device_sync(required_limits=limits) return _main(canvas, device) diff --git a/examples/triangle_glsl.py b/examples/triangle_glsl.py index 146a525e..67b2638e 100644 --- a/examples/triangle_glsl.py +++ b/examples/triangle_glsl.py @@ -47,8 +47,8 @@ def main(canvas, power_preference="high-performance", limits=None): """Regular function to setup a viz on the given canvas.""" - adapter = wgpu.gpu.request_adapter(power_preference=power_preference) - device = adapter.request_device(required_limits=limits) + adapter = wgpu.gpu.request_adapter_sync(power_preference=power_preference) + device = adapter.request_device_sync(required_limits=limits) return _main(canvas, device) diff --git a/tests/test_api.py b/tests/test_api.py index 4ad0e391..a5f75cdb 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -111,7 +111,7 @@ def test_base_wgpu_api(): @mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") def test_backend_is_selected_automatically(): # Test this in a subprocess to have a clean wgpu with no backend imported yet - code = "import wgpu; print(wgpu.gpu.request_adapter())" + code = "import wgpu; print(wgpu.gpu.request_adapter_sync())" result = subprocess.run( [sys.executable, "-c", code], stdout=subprocess.PIPE, diff --git a/tests/test_gui_glfw.py b/tests/test_gui_glfw.py index 32a77edd..67d9be29 100644 --- a/tests/test_gui_glfw.py +++ b/tests/test_gui_glfw.py @@ -213,10 +213,10 @@ def get_context(self): canvas = CustomCanvas() # Also pass canvas here, to touch that code somewhere - adapter = wgpu.gpu.request_adapter( + adapter = wgpu.gpu.request_adapter_sync( canvas=canvas, power_preference="high-performance" ) - device = adapter.request_device() + device = adapter.request_device_sync() draw_frame = _get_draw_function(device, canvas) for i in range(5): diff --git a/tests/test_set_constant.py b/tests/test_set_constant.py index 1252feef..b12957a3 100644 --- a/tests/test_set_constant.py +++ b/tests/test_set_constant.py @@ -66,8 +66,8 @@ def setup_pipeline(): - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - device = adapter.request_device( + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") + device = adapter.request_device_sync( required_features=["push-constants"], required_limits={"max-push-constant-size": 128}, ) diff --git a/tests/test_wgpu_native_basics.py b/tests/test_wgpu_native_basics.py index b4db2c87..21111d90 100644 --- a/tests/test_wgpu_native_basics.py +++ b/tests/test_wgpu_native_basics.py @@ -255,7 +255,7 @@ def test_compute_shader_wgsl(): assert isinstance(code, str) shader = device.create_shader_module(code=code) - assert shader.get_compilation_info() == [] + assert shader.get_compilation_info_sync() == [] run_compute_shader(device, shader) @@ -268,7 +268,7 @@ def test_compute_shader_glsl(): assert isinstance(code, str) shader = device.create_shader_module(label="simple comp", code=code) - assert shader.get_compilation_info() == [] + assert shader.get_compilation_info_sync() == [] run_compute_shader(device, shader) @@ -282,7 +282,7 @@ def test_compute_shader_spirv(): assert isinstance(code, bytes) shader = device.create_shader_module(code=code) - assert shader.get_compilation_info() == [] + assert shader.get_compilation_info_sync() == [] run_compute_shader(device, shader) @@ -328,7 +328,7 @@ def test_wgpu_native_tracer(): assert not os.path.isdir(tempdir) # Works! - wgpu.backends.wgpu_native.request_device(adapter, tempdir) + wgpu.backends.wgpu_native.request_device_sync(adapter, tempdir) assert os.path.isdir(tempdir) # Make dir not empty @@ -336,13 +336,13 @@ def test_wgpu_native_tracer(): pass # Still works, but produces warning - wgpu.backends.wgpu_native.request_device(adapter, tempdir) + wgpu.backends.wgpu_native.request_device_sync(adapter, tempdir) @mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") def test_enumerate_adapters(): # Get all available adapters - adapters = wgpu.gpu.enumerate_adapters() + adapters = wgpu.gpu.enumerate_adapters_sync() assert len(adapters) > 0 # Check adapter summaries @@ -353,13 +353,13 @@ def test_enumerate_adapters(): # Check that we can get a device from each adapter for adapter in adapters: - d = adapter.request_device() + d = adapter.request_device_sync() assert isinstance(d, wgpu.backends.wgpu_native.GPUDevice) @mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") def test_adapter_destroy(): - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") assert adapter._internal is not None adapter.__del__() assert adapter._internal is None @@ -401,9 +401,9 @@ def are_features_wgpu_legal(features): """Returns true if the list of features is legal. Determining whether a specific set of features is implemented on a particular device would make the tests fragile, so we only verify that the names are legal feature names.""" - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") try: - adapter.request_device(required_features=features) + adapter.request_device_sync(required_features=features) return True except RuntimeError as e: assert "Unsupported features were requested" in str(e) @@ -440,9 +440,9 @@ def are_limits_wgpu_legal(limits): """Returns true if the list of features is legal. Determining whether a specific set of features is implemented on a particular device would make the tests fragile, so we only verify that the names are legal feature names.""" - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") try: - adapter.request_device(required_limits=limits) + adapter.request_device_sync(required_limits=limits) return True except RuntimeError as e: assert "Unsupported features were requested" in str(e) diff --git a/tests/test_wgpu_native_buffer.py b/tests/test_wgpu_native_buffer.py index 31f9f5e3..22d25cf6 100644 --- a/tests/test_wgpu_native_buffer.py +++ b/tests/test_wgpu_native_buffer.py @@ -35,7 +35,7 @@ def test_buffer_init1(): ) # Download from buffer to CPU - buf.map(wgpu.MapMode.READ) + buf.map_sync(wgpu.MapMode.READ) wgpu.backends.wgpu_native._api.libf.wgpuDevicePoll( buf._device._internal, True, wgpu.backends.wgpu_native.ffi.NULL ) @@ -74,7 +74,7 @@ def test_buffer_init2(): buf.unmap() # Download from buffer to CPU - buf.map("read") + buf.map_sync("read") data2 = buf.read_mapped() buf.unmap() print(data2.tobytes()) @@ -108,7 +108,7 @@ def test_buffer_init3(): buf = device.create_buffer(size=len(data1), usage="MAP_WRITE | COPY_SRC") # Write data to it - buf.map("write") + buf.map_sync("write") buf.write_mapped(data1) buf.unmap() @@ -124,7 +124,7 @@ def test_buffer_init3(): device.queue.write_buffer(buf, 0, data1) # Download from buffer to CPU - buf.map("read") + buf.map_sync("read") data2 = buf.read_mapped() buf.unmap() assert data1 == data2 @@ -149,7 +149,7 @@ def test_consequitive_writes1(): # Write in parts for i in range(4): - buf.map("write") + buf.map_sync("write") buf.write_mapped(f"{i+1}".encode() * 8, i * 8) buf.unmap() @@ -175,7 +175,7 @@ def test_consequitive_writes2(): ) # Write in parts - buf.map("write") + buf.map_sync("write") for i in range(4): buf.write_mapped(f"{i+1}".encode() * 8, i * 8) buf.unmap() @@ -205,13 +205,13 @@ def test_consequitive_reads(): # Read in parts, the inefficient way for i in range(4): - buf.map("read") + buf.map_sync("read") data = buf.read_mapped(i * 8, 8) assert data == f"{i+1}".encode() * 8 buf.unmap() # Read in parts, the efficient way - buf.map("read") + buf.map_sync("read") for i in range(4): data = buf.read_mapped(i * 8, 8) assert data == f"{i+1}".encode() * 8 @@ -234,15 +234,15 @@ def test_buffer_mapping_fails(): buf.read_mapped() # Not mapped with raises(ValueError): - buf.map("boo") # Invalid map mode + buf.map_sync("boo") # Invalid map mode - buf.map("write", 0, 28) + buf.map_sync("write", 0, 28) with raises(RuntimeError): - buf.map("write") # Cannot map twice + buf.map_sync("write") # Cannot map twice with raises(RuntimeError): - buf.map("read") # Cannot map twice + buf.map_sync("read") # Cannot map twice with raises(RuntimeError): buf.read_mapped() # Not mapped in read mode @@ -296,13 +296,13 @@ def test_buffer_mapping_fails(): with raises(RuntimeError): buf.write_mapped(data) # not mapped - buf.map("read", 8, 20) + buf.map_sync("read", 8, 20) with raises(RuntimeError): - buf.map("read") # Cannot map twice + buf.map_sync("read") # Cannot map twice with raises(RuntimeError): - buf.map("write") # Cannot map twice + buf.map_sync("write") # Cannot map twice with raises(RuntimeError): buf.write_mapped(data) # not mapped in write mode @@ -334,7 +334,7 @@ def test_buffer_read_no_copy(): device.queue.write_buffer(buf, 0, data1) # Download from buffer to CPU - buf.map("read") + buf.map_sync("read") data2 = buf.read_mapped(copy=False) data3 = buf.read_mapped(0, 8, copy=False) data4 = buf.read_mapped(8, 8, copy=False) @@ -502,7 +502,7 @@ def test_buffer_map_read_and_write(): # Upload data1 = b"abcdefghijkl" - buf1.map("write") + buf1.map_sync("write") buf1.write_mapped(data1) buf1.unmap() @@ -512,7 +512,7 @@ def test_buffer_map_read_and_write(): device.queue.submit([command_encoder.finish()]) # Download - buf2.map("read") + buf2.map_sync("read") data2 = buf2.read_mapped() buf2.unmap() assert data1 == data2 diff --git a/tests/test_wgpu_native_query_set.py b/tests/test_wgpu_native_query_set.py index 805ebba6..00ed8fd8 100644 --- a/tests/test_wgpu_native_query_set.py +++ b/tests/test_wgpu_native_query_set.py @@ -30,8 +30,8 @@ def test_query_set(): for i in range(n): data1[i] = float(i) - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - device = adapter.request_device( + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") + device = adapter.request_device_sync( required_features=[wgpu.FeatureName.timestamp_query] ) diff --git a/tests/test_wgpu_native_texture.py b/tests/test_wgpu_native_texture.py index 6bd300e0..0861fdb5 100644 --- a/tests/test_wgpu_native_texture.py +++ b/tests/test_wgpu_native_texture.py @@ -58,7 +58,7 @@ def test_do_a_copy_roundtrip(): # Upload from CPU to buffer # assert buf1.state == "unmapped" - # mapped_data = buf1.map(wgpu.MapMode.WRITE) + # mapped_data = buf1.map_sync(wgpu.MapMode.WRITE) # assert buf1.state == "mapped" # mapped_data.cast("f")[:] = data1 # buf1.unmap() @@ -97,7 +97,7 @@ def test_do_a_copy_roundtrip(): # Download from buffer to CPU # assert buf5.state == "unmapped" # assert buf5.map_mode == 0 - # result_data = buf5.map(wgpu.MapMode.READ) # a memoryview + # result_data = buf5.map_sync(wgpu.MapMode.READ) # a memoryview # assert buf5.state == "mapped" # assert buf5.map_mode == wgpu.MapMode.READ # buf5.unmap() @@ -115,7 +115,7 @@ def test_do_a_copy_roundtrip(): # Upload from CPU to buffer # assert buf1.state == "unmapped" # assert buf1.map_mode == 0 - # mapped_data = buf1.map(wgpu.MapMode.WRITE) + # mapped_data = buf1.map_sync(wgpu.MapMode.WRITE) # assert buf1.state == "mapped" # assert buf1.map_mode == wgpu.MapMode.WRITE # mapped_data.cast("f")[:] = data3 @@ -150,7 +150,7 @@ def test_do_a_copy_roundtrip(): # Download from buffer to CPU # assert buf5.state == "unmapped" - # result_data = buf5.map(wgpu.MapMode.READ) # always an uint8 array + # result_data = buf5.map_sync(wgpu.MapMode.READ) # always an uint8 array # assert buf5.state == "mapped" # buf5.unmap() # assert buf5.state == "unmapped" diff --git a/tests/test_wgpu_vertex_instance.py b/tests/test_wgpu_vertex_instance.py index ecda57dc..dfba3e42 100644 --- a/tests/test_wgpu_vertex_instance.py +++ b/tests/test_wgpu_vertex_instance.py @@ -72,16 +72,16 @@ class Runner: @classmethod def is_usable(cls): - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") return set(cls.REQUIRED_FEATURES) <= adapter.features def __init__(self): - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") features = [ *self.REQUIRED_FEATURES, *[x for x in self.OPTIONAL_FEATURES if x in adapter.features], ] - self.device = adapter.request_device(required_features=features) + self.device = adapter.request_device_sync(required_features=features) self.output_texture = self.device.create_texture( # Actual size is immaterial. Could just be 1x1 size=[128, 128], diff --git a/tests_mem/test_destroy.py b/tests_mem/test_destroy.py index 3736624b..3424cc1f 100644 --- a/tests_mem/test_destroy.py +++ b/tests_mem/test_destroy.py @@ -26,7 +26,7 @@ def test_destroy_device(n): adapter = DEVICE.adapter for i in range(n): - d = adapter.request_device() + d = adapter.request_device_sync() d.destroy() # NOTE: destroy is not yet implemented in wgpu-natice - this does not actually do anything yet yield d @@ -57,7 +57,7 @@ def test_destroy_buffer(n): # Uncomment the following lines to see. These are commented because it makes wgpu-core create a command-buffer. # try: - # b.map("READ") + # b.map_sync("READ") # except wgpu.GPUValidationError as err: # error = err # assert "destroyed" in error.message.lower() diff --git a/tests_mem/test_objects.py b/tests_mem/test_objects.py index 6aee8068..ce34a10a 100644 --- a/tests_mem/test_objects.py +++ b/tests_mem/test_objects.py @@ -20,7 +20,7 @@ def test_release_adapter(n): yield {} for i in range(n): - yield wgpu.gpu.request_adapter(power_preference="high-performance") + yield wgpu.gpu.request_adapter_sync(power_preference="high-performance") @create_and_release @@ -33,7 +33,7 @@ def test_release_device(n): } adapter = DEVICE.adapter for i in range(n): - d = adapter.request_device() + d = adapter.request_device_sync() yield d @@ -197,7 +197,7 @@ def test_release_queue(n): } adapter = DEVICE.adapter for i in range(n): - d = adapter.request_device() + d = adapter.request_device_sync() q = d.queue d._queue = None # detach yield q diff --git a/wgpu/__init__.py b/wgpu/__init__.py index 0c9ea7cd..646eef13 100644 --- a/wgpu/__init__.py +++ b/wgpu/__init__.py @@ -25,5 +25,5 @@ def request_adapter(*args, **kwargs): """Deprecated!""" raise DeprecationWarning( - "wgpu.request_adapter() is deprecated! Use wgpu.gpu.request_adapter() instead." + "wgpu.request_adapter() is deprecated! Use wgpu.gpu.request_adapter_sync() instead." ) diff --git a/wgpu/_classes.py b/wgpu/_classes.py index fa7dde84..ad020a8e 100644 --- a/wgpu/_classes.py +++ b/wgpu/_classes.py @@ -681,7 +681,7 @@ class GPUDevice(GPUObjectBase): from it: when the device is lost, all objects created from it become invalid. - Create a device using `GPUAdapter.request_device()` or + Create a device using `GPUAdapter.request_device_sync()` or `GPUAdapter.request_device_async()`. """ @@ -2221,7 +2221,7 @@ def write_buffer(self, buffer, buffer_offset, data, data_offset=0, size=None): Alignment: the buffer offset must be a multiple of 4, the total size to write must be a multiple of 4 bytes. - Also see `GPUBuffer.map()`. + Also see `GPUBuffer.map_sync()` and `GPUBuffer.map_async()`. """ raise NotImplementedError() @@ -2239,7 +2239,7 @@ def read_buffer(self, buffer, buffer_offset=0, size=None): and then maps that buffer to read the data. The given buffer's usage must include COPY_SRC. - Also see `GPUBuffer.map()`. + Also see `GPUBuffer._sync()` and `GPUBuffer._async()`. """ raise NotImplementedError() @@ -2546,4 +2546,4 @@ def proxy_method(self, *args, **kwargs): _seed_object_counts() _set_repr_methods() -_set_compat_methods_for_async_methods() +# _set_compat_methods_for_async_methods() diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py index d19d6c24..d8842abf 100644 --- a/wgpu/backends/js_webgpu/__init__.py +++ b/wgpu/backends/js_webgpu/__init__.py @@ -12,7 +12,7 @@ class GPU: - def request_adapter(self, **parameters): + def request_adapter_sync(self, **parameters): raise NotImplementedError("Cannot use sync API functions in JS.") async def request_adapter_async(self, **parameters): diff --git a/wgpu/backends/rs.py b/wgpu/backends/rs.py index a2e4a187..cfe3f2aa 100644 --- a/wgpu/backends/rs.py +++ b/wgpu/backends/rs.py @@ -6,7 +6,7 @@ WARNING: wgpu.backends.rs is deprecated. Instead you can use: - import wgpu.backends.wgpu_native to use the backend by its new name. - import wgpu.backends.auto to do the same, but simpler and more future proof. -- simply use wgpu.gpu.request_adapter() to auto-load the backend. +- simply use wgpu.gpu.request_adapter_sync() to auto-load the backend. """.strip() print(_deprecation_msg) diff --git a/wgpu/backends/wgpu_native/_api.py b/wgpu/backends/wgpu_native/_api.py index c12d4a2d..f372fd61 100644 --- a/wgpu/backends/wgpu_native/_api.py +++ b/wgpu/backends/wgpu_native/_api.py @@ -2224,7 +2224,7 @@ def get_compilation_info_sync(self): return [] async def get_compilation_info_async(self): - raise NotImplementedError() + return self.get_compilation_info_sync() class GPUPipelineBase(classes.GPUPipelineBase): @@ -3098,7 +3098,7 @@ def read_buffer(self, buffer, buffer_offset=0, size=None): self.submit([command_buffer]) # Download from mappable buffer - tmp_buffer.map("READ_NOSYNC") + tmp_buffer.map_sync("READ_NOSYNC") data = tmp_buffer.read_mapped() # Explicit drop. @@ -3202,7 +3202,7 @@ def read_texture(self, source, data_layout, size): self.submit([command_buffer]) # Download from mappable buffer - tmp_buffer.map("READ_NOSYNC") + tmp_buffer.map_sync("READ_NOSYNC") data = tmp_buffer.read_mapped() # Explicit drop. diff --git a/wgpu/backends/wgpu_native/extras.py b/wgpu/backends/wgpu_native/extras.py index e04196c9..e2fe8005 100644 --- a/wgpu/backends/wgpu_native/extras.py +++ b/wgpu/backends/wgpu_native/extras.py @@ -14,7 +14,7 @@ def enumerate_adapters(): raise RuntimeError("Deprecated: use wgpu.gpu.enumerate_adapters() instead.") -def request_device( +def request_device_sync( adapter, trace_path, *, @@ -35,6 +35,14 @@ def request_device( ) +# Backwards compat for deprecated function +def request_device(*args, **kwargs): + logger.warning( + "WGPU: wgpu.backends.wgpu_native.request_device() is deprecated, use request_device_sync() instead." + ) + return request_device_sync(*args, **kwargs) + + def create_pipeline_layout( device, *, diff --git a/wgpu/utils/device.py b/wgpu/utils/device.py index 1a42076e..c50dbbae 100644 --- a/wgpu/utils/device.py +++ b/wgpu/utils/device.py @@ -12,6 +12,6 @@ def get_default_device(): if _default_device is None: import wgpu.backends.auto # noqa - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - _default_device = adapter.request_device() + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") + _default_device = adapter.request_device_sync() return _default_device From b5ecbc1dd0515bd2522a9b3587a9998f311a3cd2 Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 14:04:15 +0200 Subject: [PATCH 13/18] forgot one --- wgpu/backends/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgpu/backends/__init__.py b/wgpu/backends/__init__.py index 3e78dc0f..577708cd 100644 --- a/wgpu/backends/__init__.py +++ b/wgpu/backends/__init__.py @@ -14,7 +14,7 @@ def _register_backend(gpu): root_namespace = sys.modules["wgpu"].__dict__ needed_attributes = ( - "request_adapter", + "request_adapter_sync", "request_adapter_async", "wgsl_language_features", ) From 79fec1d960eb425caa83d425044710893ec9aa13 Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 14:11:57 +0200 Subject: [PATCH 14/18] fix --- tests/test_api.py | 1 + wgpu/backends/wgpu_native/__init__.py | 2 +- wgpu/backends/wgpu_native/extras.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/test_api.py b/tests/test_api.py index a5f75cdb..52599f62 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -201,6 +201,7 @@ class GPU: with raises(RuntimeError): wgpu.backends._register_backend(fake_gpu) + fake_gpu.request_adapter_sync = lambda: None fake_gpu.request_adapter_async = lambda: None fake_gpu.wgsl_language_features = set() wgpu.backends._register_backend(fake_gpu) diff --git a/wgpu/backends/wgpu_native/__init__.py b/wgpu/backends/wgpu_native/__init__.py index 430200dd..60779cc0 100644 --- a/wgpu/backends/wgpu_native/__init__.py +++ b/wgpu/backends/wgpu_native/__init__.py @@ -18,4 +18,4 @@ gpu = GPU() # noqa: F405 _register_backend(gpu) # noqa: F405 -from .extras import enumerate_adapters, request_device # noqa: F401, E402 +from .extras import enumerate_adapters, request_device_sync, request_device # noqa: F401, E402 diff --git a/wgpu/backends/wgpu_native/extras.py b/wgpu/backends/wgpu_native/extras.py index e2fe8005..2fd772cc 100644 --- a/wgpu/backends/wgpu_native/extras.py +++ b/wgpu/backends/wgpu_native/extras.py @@ -11,7 +11,7 @@ def enumerate_adapters(): """Deprecated.""" - raise RuntimeError("Deprecated: use wgpu.gpu.enumerate_adapters() instead.") + raise RuntimeError("Deprecated: use wgpu.gpu.enumerate_adapters_sync() instead.") def request_device_sync( From acfeff5fe3e19145ffb2a8aefba08fd37aeb9b70 Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 14:16:50 +0200 Subject: [PATCH 15/18] format --- wgpu/backends/wgpu_native/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/wgpu/backends/wgpu_native/__init__.py b/wgpu/backends/wgpu_native/__init__.py index 60779cc0..0e81c859 100644 --- a/wgpu/backends/wgpu_native/__init__.py +++ b/wgpu/backends/wgpu_native/__init__.py @@ -18,4 +18,5 @@ gpu = GPU() # noqa: F405 _register_backend(gpu) # noqa: F405 -from .extras import enumerate_adapters, request_device_sync, request_device # noqa: F401, E402 +from .extras import enumerate_adapters # noqa: F401, E402 +from .extras import request_device_sync, request_device # noqa: F401, E402 From f7f6964534494c48b1e429bb58952665d00a9291 Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 14:35:20 +0200 Subject: [PATCH 16/18] Logic to disable sync method for portability testing --- wgpu/backends/wgpu_native/_api.py | 77 +++++++++++++++++++++---------- 1 file changed, 52 insertions(+), 25 deletions(-) diff --git a/wgpu/backends/wgpu_native/_api.py b/wgpu/backends/wgpu_native/_api.py index f372fd61..dc97f3f4 100644 --- a/wgpu/backends/wgpu_native/_api.py +++ b/wgpu/backends/wgpu_native/_api.py @@ -45,6 +45,9 @@ # %% Helper functions and objects +def check_can_use_sync_variants(): + if False: # placeholder, let's implement a little wgpu config thingy + raise RuntimeError("Disallowed use of '_sync' API.") # Object to be able to bind the lifetime of objects to other objects _refs_per_struct = WeakKeyDictionary() @@ -307,6 +310,19 @@ class GPU(classes.GPU): def request_adapter_sync( self, *, power_preference=None, force_fallback_adapter=False, canvas=None + ): + """Async version of ``request_adapter_async()``. + This is the implementation based on wgpu-native. + """ + check_can_use_sync_variants() + return self._request_adapter( + power_preference=power_preference, + force_fallback_adapter=force_fallback_adapter, + canvas=canvas, + ) + + async def request_adapter_async( + self, *, power_preference=None, force_fallback_adapter=False, canvas=None ): """Create a `GPUAdapter`, the object that represents an abstract wgpu implementation, from which one can request a `GPUDevice`. @@ -320,7 +336,15 @@ def request_adapter_sync( canvas (WgpuCanvasInterface): The canvas that the adapter should be able to render to. This can typically be left to None. """ + return self._request_adapter( + power_preference=power_preference, + force_fallback_adapter=force_fallback_adapter, + canvas=canvas, + ) # no-cover + def _request_adapter( + self, *, power_preference=None, force_fallback_adapter=False, canvas=None + ): # ----- Surface ID # Get surface id that the adapter must be compatible with. If we @@ -383,22 +407,20 @@ def callback(status, result, message, userdata): return self._create_adapter(adapter_id) - async def request_adapter_async( - self, *, power_preference=None, force_fallback_adapter=False, canvas=None - ): - """Async version of ``request_adapter()``. + def enumerate_adapters_sync(self): + """Sync version of ``enumerate_adapters_async()``. This is the implementation based on wgpu-native. """ - return self.request_adapter_sync( - power_preference=power_preference, - force_fallback_adapter=force_fallback_adapter, - canvas=canvas, - ) # no-cover + check_can_use_sync_variants() + return self._enumerate_adapters() - def enumerate_adapters_sync(self): + async def enumerate_adapters_async(self): """Get a list of adapter objects available on the current system. This is the implementation based on wgpu-native. """ + return self._enumerate_adapters() + + def _enumerate_adapters(self): # The first call is to get the number of adapters, and the second call # is to get the actual adapters. Note that the second arg (now NULL) can # be a `WGPUInstanceEnumerateAdapterOptions` to filter by backend. @@ -410,12 +432,6 @@ def enumerate_adapters_sync(self): libf.wgpuInstanceEnumerateAdapters(instance, ffi.NULL, adapters) return [self._create_adapter(adapter) for adapter in adapters] - async def enumerate_adapters_async(self): - """Async version of ``enumerate_adapters_sync()``. - This is the implementation based on wgpu-native. - """ - return self.enumerate_adapters_sync() - def _create_adapter(self, adapter_id): # ----- Get adapter info @@ -823,6 +839,7 @@ def request_device_sync( required_limits: "Dict[str, int]" = {}, default_queue: "structs.QueueDescriptor" = {}, ): + check_can_use_sync_variants() if default_queue: check_struct("QueueDescriptor", default_queue) return self._request_device( @@ -1854,6 +1871,7 @@ def create_query_set(self, *, label="", type: "enums.QueryType", count: int): return GPUQuerySet(label, query_id, self._internal, type, count) def _get_lost_sync(self): + check_can_use_sync_variants() raise NotImplementedError() async def _get_lost_async(self): @@ -1918,6 +1936,13 @@ def _check_range(self, offset, size): return offset, size def map_sync(self, mode, offset=0, size=None): + check_can_use_sync_variants() + return self._map(mode, offset, size) + + async def map_async(self, mode, offset=0, size=None): + return self._map(mode, offset, size) # for now + + def _map(self, mode, offset=0, size=None): sync_on_read = True # Check mode @@ -1967,9 +1992,6 @@ def callback(status_, user_data_p): self._mapped_status = offset, offset + size, mode self._mapped_memoryviews = [] - async def map_async(self, mode, offset=0, size=None): - return self.map_sync(mode, offset, size) # for now - def unmap(self): if self._map_state != enums.BufferMapState.mapped: raise RuntimeError("Can only unmap a buffer if its currently mapped.") @@ -2193,7 +2215,14 @@ class GPUShaderModule(classes.GPUShaderModule, GPUObjectBase): _release_function = libf.wgpuShaderModuleRelease def get_compilation_info_sync(self): - # Here's a little setup to implement this method. Unfortunately, + check_can_use_sync_variants() + return self._get_compilation_info() + + async def get_compilation_info_async(self): + return self._get_compilation_info() + + def _get_compilation_info(self): + # Here's a little setup to implement this method. Unfortunately, # this is not yet implemented in wgpu-native. Another problem # is that if there is an error in the shader source, we raise # an exception, so the user never gets a GPUShaderModule object @@ -2223,9 +2252,6 @@ def get_compilation_info_sync(self): return [] - async def get_compilation_info_async(self): - return self.get_compilation_info_sync() - class GPUPipelineBase(classes.GPUPipelineBase): def get_bind_group_layout(self, index): @@ -3098,7 +3124,7 @@ def read_buffer(self, buffer, buffer_offset=0, size=None): self.submit([command_buffer]) # Download from mappable buffer - tmp_buffer.map_sync("READ_NOSYNC") + tmp_buffer._map("READ_NOSYNC") data = tmp_buffer.read_mapped() # Explicit drop. @@ -3202,7 +3228,7 @@ def read_texture(self, source, data_layout, size): self.submit([command_buffer]) # Download from mappable buffer - tmp_buffer.map_sync("READ_NOSYNC") + tmp_buffer._map("READ_NOSYNC") data = tmp_buffer.read_mapped() # Explicit drop. @@ -3227,6 +3253,7 @@ def read_texture(self, source, data_layout, size): return data def on_submitted_work_done_sync(self): + check_can_use_sync_variants() # In JS, this returns a Promise that can be awaited to (async) wait # for the work that is currently in the pipeline. We need to figure out # how to expose these async parts. From fc688f86720e182319dafb53ebbb656642aa11ee Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 14:36:27 +0200 Subject: [PATCH 17/18] format and enable backwards compat again --- wgpu/_classes.py | 2 +- wgpu/backends/wgpu_native/_api.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/wgpu/_classes.py b/wgpu/_classes.py index ad020a8e..2504554f 100644 --- a/wgpu/_classes.py +++ b/wgpu/_classes.py @@ -2546,4 +2546,4 @@ def proxy_method(self, *args, **kwargs): _seed_object_counts() _set_repr_methods() -# _set_compat_methods_for_async_methods() +_set_compat_methods_for_async_methods() diff --git a/wgpu/backends/wgpu_native/_api.py b/wgpu/backends/wgpu_native/_api.py index dc97f3f4..0326ba09 100644 --- a/wgpu/backends/wgpu_native/_api.py +++ b/wgpu/backends/wgpu_native/_api.py @@ -45,10 +45,12 @@ # %% Helper functions and objects + def check_can_use_sync_variants(): if False: # placeholder, let's implement a little wgpu config thingy raise RuntimeError("Disallowed use of '_sync' API.") + # Object to be able to bind the lifetime of objects to other objects _refs_per_struct = WeakKeyDictionary() @@ -2222,7 +2224,7 @@ async def get_compilation_info_async(self): return self._get_compilation_info() def _get_compilation_info(self): - # Here's a little setup to implement this method. Unfortunately, + # Here's a little setup to implement this method. Unfortunately, # this is not yet implemented in wgpu-native. Another problem # is that if there is an error in the shader source, we raise # an exception, so the user never gets a GPUShaderModule object From 453ba9ed2db2c4fce5ff1debae2f15dc87cf4c44 Mon Sep 17 00:00:00 2001 From: Almar Klein Date: Wed, 25 Sep 2024 14:41:10 +0200 Subject: [PATCH 18/18] codegen --- wgpu/resources/codegen_report.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgpu/resources/codegen_report.md b/wgpu/resources/codegen_report.md index 7d3787bb..d29d8407 100644 --- a/wgpu/resources/codegen_report.md +++ b/wgpu/resources/codegen_report.md @@ -20,7 +20,7 @@ * Diffs for GPUQueue: add read_buffer, add read_texture, hide copy_external_image_to_texture * Validated 37 classes, 126 methods, 46 properties ### Patching API for backends/wgpu_native/_api.py -* Validated 37 classes, 101 methods, 0 properties +* Validated 37 classes, 105 methods, 0 properties ## Validating backends/wgpu_native/_api.py * Enum field FeatureName.texture-compression-bc-sliced-3d missing in wgpu.h * Enum field FeatureName.clip-distances missing in wgpu.h