diff --git a/Cargo.lock b/Cargo.lock index 0855cb2be10..1b2d835a120 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2415,6 +2415,7 @@ dependencies = [ "libm", "log", "naga-test", + "naga-types", "num-traits", "once_cell", "petgraph 0.8.3", @@ -2466,6 +2467,18 @@ dependencies = [ "toml", ] +[[package]] +name = "naga-types" +version = "29.0.0" +dependencies = [ + "arbitrary", + "hashbrown 0.16.1", + "indexmap", + "rustc-hash 1.1.0", + "serde", + "thiserror 2.0.18", +] + [[package]] name = "naga-xtask" version = "0.1.0" @@ -4719,6 +4732,7 @@ dependencies = [ "log", "macro_rules_attribute", "naga", + "naga-types", "once_cell", "parking_lot", "portable-atomic", @@ -4855,6 +4869,7 @@ dependencies = [ "log", "mach-dxcompiler-rs", "naga", + "naga-types", "ndk-sys", "objc2 0.6.4", "objc2-core-foundation", @@ -4972,6 +4987,7 @@ dependencies = [ "hashbrown 0.16.1", "js-sys", "log", + "naga-types", "raw-window-handle", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index 2b4c6083734..a8bc30f3445 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ "lock-analyzer", "naga-cli", "naga-test", + "naga-types", "naga", "naga/fuzz", "naga/hlsl-snapshots", @@ -37,6 +38,7 @@ default-members = [ "lock-analyzer", "naga-cli", "naga-test", + "naga-types", "naga", "naga/fuzz", "naga/hlsl-snapshots", @@ -72,6 +74,7 @@ authors = ["gfx-rs developers"] [workspace.dependencies] naga = { version = "29.0.0", path = "./naga" } naga-test = { path = "./naga-test" } +naga-types = { version = "29.0.0", path = "./naga-types" } wgpu = { version = "29.0.0", path = "./wgpu", default-features = false, features = [ "std", "serde", diff --git a/README.md b/README.md index 799fbac4d23..1284fd36088 100644 --- a/README.md +++ b/README.md @@ -107,7 +107,7 @@ We will avoid bumping the MSRV of `wgpu` without good reason, and such a change Due to complex dependants, we have three MSRV policies: - `wgpu`'s MSRV is **1.87** -- `wgpu-core` (and hence `wgpu-hal`, `naga`, and `wgpu-types`)'s MSRV is **1.87**. +- `wgpu-core` (and hence `wgpu-hal`, `naga`, `naga-types` and `wgpu-types`)'s MSRV is **1.87**. - The rest of the workspace has an MSRV of **1.93**. It is enforced on CI (in "/.github/workflows/ci.yml") with the `WGPU_MSRV`, `CORE_MSRV`, and `REPO_MSRV` variables, respectively. diff --git a/naga-types/Cargo.toml b/naga-types/Cargo.toml new file mode 100644 index 00000000000..a4163467a0f --- /dev/null +++ b/naga-types/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "naga-types" +version.workspace = true +authors.workspace = true +edition.workspace = true +description = "Common types for naga and wgpu" +homepage.workspace = true +repository.workspace = true +license.workspace = true + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[features] +serialize = ["dep:serde", "indexmap/serde"] +deserialize = ["dep:serde", "indexmap/serde"] +arbitrary = ["dep:arbitrary", "std", "indexmap/arbitrary"] +std = [] + +[dependencies] +arbitrary = { workspace = true, features = ["derive"], optional = true } +hashbrown.workspace = true +indexmap.workspace = true +rustc-hash.workspace = true +serde = { workspace = true, features = ["alloc", "derive"], optional = true } +thiserror.workspace = true diff --git a/naga-types/LICENSE.APACHE b/naga-types/LICENSE.APACHE new file mode 100644 index 00000000000..d9a10c0d8e8 --- /dev/null +++ b/naga-types/LICENSE.APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/naga-types/LICENSE.MIT b/naga-types/LICENSE.MIT new file mode 100644 index 00000000000..8d02e4dbd5f --- /dev/null +++ b/naga-types/LICENSE.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 The gfx-rs developers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/naga-types/README.md b/naga-types/README.md new file mode 100644 index 00000000000..026046ef6d3 --- /dev/null +++ b/naga-types/README.md @@ -0,0 +1,5 @@ +# naga-types + +Naga-types contains some types used by both naga and wgpu. Naga may be an optional dependency of wgpu in the future, +so these can't live in naga. Additionally, naga is a mostly independent crate, so it cannot depend on wgpu-types. +For this reason, the types must live in a naga-specific crate but not naga itself. Naga-types serves that purpose. diff --git a/naga-types/src/glsl.rs b/naga-types/src/glsl.rs new file mode 100644 index 00000000000..07915793a12 --- /dev/null +++ b/naga-types/src/glsl.rs @@ -0,0 +1,233 @@ +use core::{cmp::Ordering, fmt}; + +// Must match code in glsl_built_in +pub const FIRST_INSTANCE_BINDING: &str = "naga_vs_first_instance"; + +/// List of supported `core` GLSL versions. +pub const SUPPORTED_CORE_VERSIONS: &[u16] = &[140, 150, 330, 400, 410, 420, 430, 440, 450, 460]; +/// List of supported `es` GLSL versions. +pub const SUPPORTED_ES_VERSIONS: &[u16] = &[300, 310, 320]; + +/// A GLSL version. +#[derive(Debug, Copy, Clone, PartialEq)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize))] +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] +pub enum Version { + /// `core` GLSL. + Desktop(u16), + /// `es` GLSL. + Embedded { version: u16, is_webgl: bool }, +} + +impl Version { + /// Create a new gles version + pub const fn new_gles(version: u16) -> Self { + Self::Embedded { + version, + is_webgl: false, + } + } + + /// Returns true if self is `Version::Embedded` (i.e. is a es version) + pub const fn is_es(&self) -> bool { + match *self { + Version::Desktop(_) => false, + Version::Embedded { .. } => true, + } + } + + /// Returns true if targeting WebGL + pub const fn is_webgl(&self) -> bool { + match *self { + Version::Desktop(_) => false, + Version::Embedded { is_webgl, .. } => is_webgl, + } + } + + /// Checks the list of currently supported versions and returns true if it contains the + /// specified version + /// + /// # Notes + /// As an invalid version number will never be added to the supported version list + /// so this also checks for version validity + pub fn is_supported(&self) -> bool { + match *self { + Version::Desktop(v) => SUPPORTED_CORE_VERSIONS.contains(&v), + Version::Embedded { version: v, .. } => SUPPORTED_ES_VERSIONS.contains(&v), + } + } + + pub fn supports_io_locations(&self) -> bool { + *self >= Version::Desktop(330) || *self >= Version::new_gles(300) + } + + /// Checks if the version supports all of the explicit layouts: + /// - `location=` qualifiers for bindings + /// - `binding=` qualifiers for resources + /// + /// Note: `location=` for vertex inputs and fragment outputs is supported + /// unconditionally for GLES 300. + pub fn supports_explicit_locations(&self) -> bool { + *self >= Version::Desktop(420) || *self >= Version::new_gles(310) + } + + pub fn supports_early_depth_test(&self) -> bool { + *self >= Version::Desktop(130) || *self >= Version::new_gles(310) + } + + pub fn supports_std140_layout(&self) -> bool { + *self >= Version::Desktop(140) || *self >= Version::new_gles(300) + } + + pub fn supports_std430_layout(&self) -> bool { + // std430 is available from 400 via GL_ARB_shader_storage_buffer_object. + *self >= Version::Desktop(400) || *self >= Version::new_gles(310) + } + + pub fn supports_fma_function(&self) -> bool { + *self >= Version::Desktop(400) || *self >= Version::new_gles(320) + } + + pub fn supports_integer_functions(&self) -> bool { + *self >= Version::Desktop(400) || *self >= Version::new_gles(310) + } + + pub fn supports_frexp_function(&self) -> bool { + *self >= Version::Desktop(400) || *self >= Version::new_gles(310) + } + + pub fn supports_derivative_control(&self) -> bool { + *self >= Version::Desktop(450) + } + + // For supports_pack_unpack_4x8, supports_pack_unpack_snorm_2x16, supports_pack_unpack_unorm_2x16 + // see: + // https://registry.khronos.org/OpenGL-Refpages/gl4/html/unpackUnorm.xhtml + // https://registry.khronos.org/OpenGL-Refpages/es3/html/unpackUnorm.xhtml + // https://registry.khronos.org/OpenGL-Refpages/gl4/html/packUnorm.xhtml + // https://registry.khronos.org/OpenGL-Refpages/es3/html/packUnorm.xhtml + pub fn supports_pack_unpack_4x8(&self) -> bool { + *self >= Version::Desktop(400) || *self >= Version::new_gles(310) + } + pub fn supports_pack_unpack_snorm_2x16(&self) -> bool { + *self >= Version::Desktop(420) || *self >= Version::new_gles(300) + } + pub fn supports_pack_unpack_unorm_2x16(&self) -> bool { + *self >= Version::Desktop(400) || *self >= Version::new_gles(300) + } + + // https://registry.khronos.org/OpenGL-Refpages/gl4/html/unpackHalf2x16.xhtml + // https://registry.khronos.org/OpenGL-Refpages/gl4/html/packHalf2x16.xhtml + // https://registry.khronos.org/OpenGL-Refpages/es3/html/unpackHalf2x16.xhtml + // https://registry.khronos.org/OpenGL-Refpages/es3/html/packHalf2x16.xhtml + pub fn supports_pack_unpack_half_2x16(&self) -> bool { + *self >= Version::Desktop(420) || *self >= Version::new_gles(300) + } +} + +impl PartialOrd for Version { + fn partial_cmp(&self, other: &Self) -> Option { + match (*self, *other) { + (Version::Desktop(x), Version::Desktop(y)) => Some(x.cmp(&y)), + (Version::Embedded { version: x, .. }, Version::Embedded { version: y, .. }) => { + Some(x.cmp(&y)) + } + _ => None, + } + } +} + +impl fmt::Display for Version { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Version::Desktop(v) => write!(f, "{v} core"), + Version::Embedded { version: v, .. } => write!(f, "{v} es"), + } + } +} + +/// Mapping between resources and bindings. +pub type BindingMap = alloc::collections::BTreeMap; + +/// Separate type from `naga::ScalarKind` so that naga can easily add impl's +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize))] +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] +pub enum GlslScalarKind { + Sint, + Uint, + Float, +} + +/// Separate type from `naga::VectorSize` so that naga can easily add impl's +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize))] +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] +pub enum GlslVectorSize { + Bi = 2, + Tri = 3, + Quad = 4, +} +impl GlslVectorSize { + pub fn alignment(&self) -> u32 { + match self { + Self::Bi => 2, + Self::Tri | Self::Quad => 4, + } + } +} + +/// Separate type from `naga::Scalar` so that naga can easily add impl's +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize))] +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] +pub struct GlslScalar { + pub kind: GlslScalarKind, + pub width: u8, +} + +impl GlslScalar { + pub const F32: Self = Self { + kind: GlslScalarKind::Float, + width: 4, + }; + pub const I32: Self = Self { + kind: GlslScalarKind::Sint, + width: 4, + }; + pub const U32: Self = Self { + kind: GlslScalarKind::Uint, + width: 4, + }; +} + +/// A subset of `naga::TypeInner` so that uniforms can be analyzed without pulling in the entire naga IR. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize))] +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] +pub enum GlslUniformType { + Scalar(GlslScalar), + Vector { + size: GlslVectorSize, + scalar: GlslScalar, + }, + Matrix { + columns: GlslVectorSize, + rows: GlslVectorSize, + scalar: GlslScalar, + }, +} +impl GlslUniformType { + pub fn size(&self) -> u32 { + match self { + Self::Scalar(scalar) => scalar.width as u32, + Self::Vector { size, scalar } => *size as u32 * scalar.width as u32, + // matrices are treated as arrays of aligned columns + Self::Matrix { + columns, + rows, + scalar, + } => rows.alignment() * scalar.width as u32 * *columns as u32, + } + } +} diff --git a/naga-types/src/hlsl.rs b/naga-types/src/hlsl.rs new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/naga-types/src/hlsl.rs @@ -0,0 +1 @@ + diff --git a/naga-types/src/lib.rs b/naga-types/src/lib.rs new file mode 100644 index 00000000000..3932df3307a --- /dev/null +++ b/naga-types/src/lib.rs @@ -0,0 +1,551 @@ +#![cfg_attr(docsrs, feature(doc_cfg))] +#![no_std] +extern crate alloc; +#[cfg(feature = "std")] +extern crate std; + +pub mod glsl; +pub mod hlsl; +pub mod msl; +pub mod spv; + +/// Create a Markdown link definition referring to the `wgpu` crate. +/// +/// This macro should be used inside a `#[doc = ...]` attribute. +/// The two arguments should be string literals or macros that expand to string literals. +/// If the module in which the item using this macro is located is not the crate root, +/// use the `../` syntax. +/// +/// We cannot simply use rustdoc links to `wgpu` because it is one of our dependents. +/// This link adapts to work in locally generated documentation (`cargo doc`) by default, +/// and work with `docs.rs` URL structure when building for `docs.rs`. +/// +/// Note: This macro cannot be used outside this crate, because `cfg(docsrs)` will not apply. +#[cfg(not(docsrs))] +#[macro_export] +macro_rules! link_to_wgpu_docs { + ([$reference:expr]: $url_path:expr) => { + concat!("[", $reference, "]: ../wgpu/", $url_path) + }; + + (../ [$reference:expr]: $url_path:expr) => { + concat!("[", $reference, "]: ../../wgpu/", $url_path) + }; +} +#[cfg(docsrs)] +#[macro_export] +macro_rules! link_to_wgpu_docs { + ($(../)? [$reference:expr]: $url_path:expr) => { + concat!( + "[", + $reference, + // URL path will have a base URL of https://docs.rs/ + "]: /wgpu/", + // The version of wgpu-types is not necessarily the same as the version of wgpu + // if a patch release of either has been published, so we cannot use the full version + // number. docs.rs will interpret this single number as a Cargo-style version + // requirement and redirect to the latest compatible version. + // + // This technique would break if `wgpu` and `wgpu-types` ever switch to having distinct + // major version numbering. An alternative would be to hardcode the corresponding `wgpu` + // version, but that would give us another thing to forget to update. + env!("CARGO_PKG_VERSION_MAJOR"), + "/wgpu/", + $url_path + ) + }; +} + +/// Create a Markdown link definition referring to an item in the `wgpu` crate. +/// +/// This macro should be used inside a `#[doc = ...]` attribute. +/// See [`link_to_wgpu_docs`] for more details. +#[macro_export] +macro_rules! link_to_wgpu_item { + ($kind:ident $name:ident) => { + $crate::link_to_wgpu_docs!( + [concat!("`", stringify!($name), "`")]: concat!(stringify!($kind), ".", stringify!($name), ".html") + ) + }; +} + +/// Create a Markdown link definition referring to the `wgpu_core` crate. +/// +/// This macro should be used inside a `#[doc = ...]` attribute. +/// See [`link_to_wgpu_docs`] for more details. +#[cfg(not(docsrs))] +#[macro_export] +macro_rules! link_to_wgc_docs { + ([$reference:expr]: $url_path:expr) => { + concat!("[", $reference, "]: ../wgpu_core/", $url_path) + }; + + (../ [$reference:expr]: $url_path:expr) => { + concat!("[", $reference, "]: ../../wgpu_core/", $url_path) + }; +} +#[cfg(docsrs)] +#[macro_export] +macro_rules! link_to_wgc_docs { + ($(../)? [$reference:expr]: $url_path:expr) => { + concat!( + "[", + $reference, + // URL path will have a base URL of https://docs.rs/ + "]: /wgpu_core/", + // The version of wgpu-types is not necessarily the same as the version of wgpu_core + // if a patch release of either has been published, so we cannot use the full version + // number. docs.rs will interpret this single number as a Cargo-style version + // requirement and redirect to the latest compatible version. + // + // This technique would break if `wgpu_core` and `wgpu-types` ever switch to having + // distinct major version numbering. An alternative would be to hardcode the + // corresponding `wgpu_core` version, but that would give us another thing to forget + // to update. + env!("CARGO_PKG_VERSION_MAJOR"), + "/wgpu_core/", + $url_path + ) + }; +} + +/// Stage of the programmable pipeline. +#[derive(Clone, Copy, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize))] +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ShaderStage { + /// A vertex shader, in a render pipeline. + Vertex, + + /// A task shader, in a mesh render pipeline. + Task, + + /// A mesh shader, in a mesh render pipeline. + Mesh, + + /// A fragment shader, in a render pipeline. + Fragment, + + /// Compute pipeline shader. + Compute, + + /// A ray generation shader, in a ray tracing pipeline. + RayGeneration, + + /// A miss shader, in a ray tracing pipeline. + Miss, + + /// A any hit shader, in a ray tracing pipeline. + AnyHit, + + /// A closest hit shader, in a ray tracing pipeline. + ClosestHit, +} + +impl ShaderStage { + pub const fn compute_like(self) -> bool { + match self { + Self::Vertex | Self::Fragment => false, + Self::Compute | Self::Task | Self::Mesh => true, + Self::RayGeneration | Self::AnyHit | Self::ClosestHit | Self::Miss => false, + } + } + + /// Mesh or task shader + pub const fn mesh_like(self) -> bool { + matches!(self, Self::Task | Self::Mesh) + } +} + +/// Hash map that is faster but not resilient to DoS attacks. +/// (Similar to rustc_hash::FxHashMap but using hashbrown::HashMap instead of alloc::collections::HashMap.) +/// To construct a new instance: `FastHashMap::default()` +pub type FastHashMap = + hashbrown::HashMap>; + +/// Hash set that is faster but not resilient to DoS attacks. +/// (Similar to rustc_hash::FxHashSet but using hashbrown::HashSet instead of alloc::collections::HashMap.) +pub type FastHashSet = + hashbrown::HashSet>; + +/// Insertion-order-preserving hash set (`IndexSet`), but with the same +/// hasher as `FastHashSet` (faster but not resilient to DoS attacks). +pub type FastIndexSet = + indexmap::IndexSet>; + +/// Insertion-order-preserving hash map (`IndexMap`), but with the same +/// hasher as `FastHashMap` (faster but not resilient to DoS attacks). +pub type FastIndexMap = + indexmap::IndexMap>; + +/// Pipeline binding information for global resources. +#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize))] +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ResourceBinding { + /// The bind group index. + pub group: u32, + /// Binding number within the group. + pub binding: u32, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize))] +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] +pub struct TaskDispatchLimits { + pub max_mesh_workgroups_per_dim: u32, + pub max_mesh_workgroups_total: u32, +} + +/// Corresponds to [WebGPU `GPUVertexFormat`]( +/// https://gpuweb.github.io/gpuweb/#enumdef-gpuvertexformat). +#[repr(u32)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize))] +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] +#[cfg_attr( + any(feature = "serialize", feature = "deserialize"), + serde(rename_all = "lowercase") +)] +pub enum VertexFormat { + /// One unsigned byte (u8). `u32` in shaders. + Uint8 = 0, + /// Two unsigned bytes (u8). `vec2` in shaders. + Uint8x2 = 1, + /// Four unsigned bytes (u8). `vec4` in shaders. + Uint8x4 = 2, + /// One signed byte (i8). `i32` in shaders. + Sint8 = 3, + /// Two signed bytes (i8). `vec2` in shaders. + Sint8x2 = 4, + /// Four signed bytes (i8). `vec4` in shaders. + Sint8x4 = 5, + /// One unsigned byte (u8). [0, 255] converted to float [0, 1] `f32` in shaders. + Unorm8 = 6, + /// Two unsigned bytes (u8). [0, 255] converted to float [0, 1] `vec2` in shaders. + Unorm8x2 = 7, + /// Four unsigned bytes (u8). [0, 255] converted to float [0, 1] `vec4` in shaders. + Unorm8x4 = 8, + /// One signed byte (i8). [-127, 127] converted to float [-1, 1] `f32` in shaders. + Snorm8 = 9, + /// Two signed bytes (i8). [-127, 127] converted to float [-1, 1] `vec2` in shaders. + Snorm8x2 = 10, + /// Four signed bytes (i8). [-127, 127] converted to float [-1, 1] `vec4` in shaders. + Snorm8x4 = 11, + /// One unsigned short (u16). `u32` in shaders. + Uint16 = 12, + /// Two unsigned shorts (u16). `vec2` in shaders. + Uint16x2 = 13, + /// Four unsigned shorts (u16). `vec4` in shaders. + Uint16x4 = 14, + /// One signed short (i16). `i32` in shaders. + Sint16 = 15, + /// Two signed shorts (i16). `vec2` in shaders. + Sint16x2 = 16, + /// Four signed shorts (i16). `vec4` in shaders. + Sint16x4 = 17, + /// One unsigned short (u16). [0, 65535] converted to float [0, 1] `f32` in shaders. + Unorm16 = 18, + /// Two unsigned shorts (u16). [0, 65535] converted to float [0, 1] `vec2` in shaders. + Unorm16x2 = 19, + /// Four unsigned shorts (u16). [0, 65535] converted to float [0, 1] `vec4` in shaders. + Unorm16x4 = 20, + /// One signed short (i16). [32767, 32767] converted to float [-1, 1] `f32` in shaders. + Snorm16 = 21, + /// Two signed shorts (i16). [-32767, 32767] converted to float [-1, 1] `vec2` in shaders. + Snorm16x2 = 22, + /// Four signed shorts (i16). [-32767, 32767] converted to float [-1, 1] `vec4` in shaders. + Snorm16x4 = 23, + /// One half-precision float (no Rust equiv). `f32` in shaders. + Float16 = 24, + /// Two half-precision floats (no Rust equiv). `vec2` in shaders. + Float16x2 = 25, + /// Four half-precision floats (no Rust equiv). `vec4` in shaders. + Float16x4 = 26, + /// One single-precision float (f32). `f32` in shaders. + Float32 = 27, + /// Two single-precision floats (f32). `vec2` in shaders. + Float32x2 = 28, + /// Three single-precision floats (f32). `vec3` in shaders. + Float32x3 = 29, + /// Four single-precision floats (f32). `vec4` in shaders. + Float32x4 = 30, + /// One unsigned int (u32). `u32` in shaders. + Uint32 = 31, + /// Two unsigned ints (u32). `vec2` in shaders. + Uint32x2 = 32, + /// Three unsigned ints (u32). `vec3` in shaders. + Uint32x3 = 33, + /// Four unsigned ints (u32). `vec4` in shaders. + Uint32x4 = 34, + /// One signed int (i32). `i32` in shaders. + Sint32 = 35, + /// Two signed ints (i32). `vec2` in shaders. + Sint32x2 = 36, + /// Three signed ints (i32). `vec3` in shaders. + Sint32x3 = 37, + /// Four signed ints (i32). `vec4` in shaders. + Sint32x4 = 38, + /// One double-precision float (f64). `f32` in shaders. Requires [`Features::VERTEX_ATTRIBUTE_64BIT`]. + /// + /// [`Features::VERTEX_ATTRIBUTE_64BIT`]: ../wgpu/struct.Features.html#associatedconstant.VERTEX_ATTRIBUTE_64BIT + Float64 = 39, + /// Two double-precision floats (f64). `vec2` in shaders. Requires [`Features::VERTEX_ATTRIBUTE_64BIT`]. + /// + /// [`Features::VERTEX_ATTRIBUTE_64BIT`]: ../wgpu/struct.Features.html#associatedconstant.VERTEX_ATTRIBUTE_64BIT + Float64x2 = 40, + /// Three double-precision floats (f64). `vec3` in shaders. Requires [`Features::VERTEX_ATTRIBUTE_64BIT`]. + /// + /// [`Features::VERTEX_ATTRIBUTE_64BIT`]: ../wgpu/struct.Features.html#associatedconstant.VERTEX_ATTRIBUTE_64BIT + Float64x3 = 41, + /// Four double-precision floats (f64). `vec4` in shaders. Requires [`Features::VERTEX_ATTRIBUTE_64BIT`]. + /// + /// [`Features::VERTEX_ATTRIBUTE_64BIT`]: ../wgpu/struct.Features.html#associatedconstant.VERTEX_ATTRIBUTE_64BIT + Float64x4 = 42, + /// Three unsigned 10-bit integers and one 2-bit integer, packed into a 32-bit integer (u32). [0, 1023] and [0, 3] converted to float [0, 1] `vec4` in shaders. + #[cfg_attr( + any(feature = "serialize", feature = "deserialize"), + serde(rename = "unorm10-10-10-2") + )] + Unorm10_10_10_2 = 43, + /// Four unsigned 8-bit integers (u8) in BGRA. [0, 255] converted to float [0, 1] `vec4` RGBA in shaders. + #[cfg_attr( + any(feature = "serialize", feature = "deserialize"), + serde(rename = "unorm8x4-bgra") + )] + Unorm8x4Bgra = 44, +} + +impl VertexFormat { + /// Returns the byte size of the format. + #[must_use] + pub const fn size(&self) -> u64 { + match self { + Self::Uint8 | Self::Sint8 | Self::Unorm8 | Self::Snorm8 => 1, + Self::Uint8x2 + | Self::Sint8x2 + | Self::Unorm8x2 + | Self::Snorm8x2 + | Self::Uint16 + | Self::Sint16 + | Self::Unorm16 + | Self::Snorm16 + | Self::Float16 => 2, + Self::Uint8x4 + | Self::Sint8x4 + | Self::Unorm8x4 + | Self::Snorm8x4 + | Self::Uint16x2 + | Self::Sint16x2 + | Self::Unorm16x2 + | Self::Snorm16x2 + | Self::Float16x2 + | Self::Float32 + | Self::Uint32 + | Self::Sint32 + | Self::Unorm10_10_10_2 + | Self::Unorm8x4Bgra => 4, + Self::Uint16x4 + | Self::Sint16x4 + | Self::Unorm16x4 + | Self::Snorm16x4 + | Self::Float16x4 + | Self::Float32x2 + | Self::Uint32x2 + | Self::Sint32x2 + | Self::Float64 => 8, + Self::Float32x3 | Self::Uint32x3 | Self::Sint32x3 => 12, + Self::Float32x4 | Self::Uint32x4 | Self::Sint32x4 | Self::Float64x2 => 16, + Self::Float64x3 => 24, + Self::Float64x4 => 32, + } + } + + /// Returns the size read by an acceleration structure build of the vertex format. This is + /// slightly different from [`Self::size`] because the alpha component of 4-component formats + /// are not read in an acceleration structure build, allowing for a smaller stride. + #[must_use] + pub const fn min_acceleration_structure_vertex_stride(&self) -> u64 { + match self { + Self::Float16x2 | Self::Snorm16x2 => 4, + Self::Float32x3 => 12, + Self::Float32x2 => 8, + // This is the minimum value from DirectX + // > A16 component is ignored, other data can be packed there, such as setting vertex stride to 6 bytes + // + // https://microsoft.github.io/DirectX-Specs/d3d/Raytracing.html#d3d12_raytracing_geometry_triangles_desc + // + // Vulkan does not express a minimum stride. + Self::Float16x4 | Self::Snorm16x4 => 6, + _ => unreachable!(), + } + } + + /// Returns the alignment required for `wgpu::BlasTriangleGeometry::vertex_stride` + #[must_use] + pub const fn acceleration_structure_stride_alignment(&self) -> u64 { + match self { + Self::Float16x4 | Self::Float16x2 | Self::Snorm16x4 | Self::Snorm16x2 => 2, + Self::Float32x2 | Self::Float32x3 => 4, + _ => unreachable!(), + } + } +} + +/// Vertex inputs (attributes) to shaders. +/// +/// These are used to specify the individual attributes within a [`VertexBufferLayout`]. +/// See its documentation for an example. +/// +/// The [`vertex_attr_array!`] macro can help create these with appropriate offsets. +/// +/// Corresponds to [WebGPU `GPUVertexAttribute`]( +/// https://gpuweb.github.io/gpuweb/#dictdef-gpuvertexattribute). +/// +#[doc = link_to_wgpu_docs!(["`vertex_attr_array!`"]: "macro.vertex_attr_array.html")] +#[doc = link_to_wgpu_item!(struct VertexBufferLayout)] +#[repr(C)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize))] +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] +#[cfg_attr( + any(feature = "serialize", feature = "deserialize"), + serde(rename_all = "camelCase") +)] +pub struct VertexAttribute { + /// Format of the input + pub format: VertexFormat, + /// Byte offset of the start of the input + pub offset: u64, + /// Location for this input. Must match the location in the shader. + pub shader_location: u32, +} + +/// Whether a vertex buffer is indexed by vertex or by instance. +/// +/// Consider a call to [`RenderPass::draw`] like this: +/// +/// ```ignore +/// render_pass.draw(vertices, instances) +/// ``` +/// +/// where `vertices` is a `Range` of vertex indices, and +/// `instances` is a `Range` of instance indices. +/// +/// For this call, `wgpu` invokes the vertex shader entry point once +/// for every possible `(v, i)` pair, where `v` is drawn from +/// `vertices` and `i` is drawn from `instances`. These invocations +/// may happen in any order, and will usually run in parallel. +/// +/// Each vertex buffer has a step mode, established by the +/// [`step_mode`] field of its [`VertexBufferLayout`], given when the +/// pipeline was created. Buffers whose step mode is [`Vertex`] use +/// `v` as the index into their contents, whereas buffers whose step +/// mode is [`Instance`] use `i`. The indicated buffer element then +/// contributes zero or more attribute values for the `(v, i)` vertex +/// shader invocation to use, based on the [`VertexBufferLayout`]'s +/// [`attributes`] list. +/// +/// You can visualize the results from all these vertex shader +/// invocations as a matrix with a row for each `i` from `instances`, +/// and with a column for each `v` from `vertices`. In one sense, `v` +/// and `i` are symmetrical: both are used to index vertex buffers and +/// provide attribute values. But the key difference between `v` and +/// `i` is that line and triangle primitives are built from the values +/// of each row, along which `i` is constant and `v` varies, not the +/// columns. +/// +/// An indexed draw call works similarly: +/// +/// ```ignore +/// render_pass.draw_indexed(indices, base_vertex, instances) +/// ``` +/// +/// The only difference is that `v` values are drawn from the contents +/// of the index buffer—specifically, the subrange of the index +/// buffer given by `indices`—instead of simply being sequential +/// integers, as they are in a `draw` call. +/// +/// A non-instanced call, where `instances` is `0..1`, is simply a +/// matrix with only one row. +/// +/// Corresponds to [WebGPU `GPUVertexStepMode`]( +/// https://gpuweb.github.io/gpuweb/#enumdef-gpuvertexstepmode). +/// +#[doc = link_to_wgpu_docs!(["`RenderPass::draw`"]: "struct.RenderPass.html#method.draw")] +#[doc = link_to_wgpu_item!(struct VertexBufferLayout)] +#[doc = link_to_wgpu_docs!(["`step_mode`"]: "struct.VertexBufferLayout.html#structfield.step_mode")] +#[doc = link_to_wgpu_docs!(["`attributes`"]: "struct.VertexBufferLayout.html#structfield.attributes")] +/// [`Vertex`]: VertexStepMode::Vertex +/// [`Instance`]: VertexStepMode::Instance +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize))] +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] +#[cfg_attr( + any(feature = "serialize", feature = "deserialize"), + serde(rename_all = "kebab-case") +)] +pub enum VertexStepMode { + /// Vertex data is advanced every vertex. + #[default] + Vertex = 0, + /// Vertex data is advanced every instance. + Instance = 1, +} + +/// Primitive type the input mesh is composed of. +/// +/// Corresponds to [WebGPU `GPUPrimitiveTopology`]( +/// https://gpuweb.github.io/gpuweb/#enumdef-gpuprimitivetopology). +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Hash, Eq, PartialEq)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize))] +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] +#[cfg_attr( + any(feature = "serialize", feature = "deserialize"), + serde(rename_all = "kebab-case") +)] +pub enum PrimitiveTopology { + /// Vertex data is a list of points. Each vertex is a new point. + PointList = 0, + /// Vertex data is a list of lines. Each pair of vertices composes a new line. + /// + /// Vertices `0 1 2 3` create two lines `0 1` and `2 3` + LineList = 1, + /// Vertex data is a strip of lines. Each set of two adjacent vertices form a line. + /// + /// Vertices `0 1 2 3` create three lines `0 1`, `1 2`, and `2 3`. + LineStrip = 2, + /// Vertex data is a list of triangles. Each set of 3 vertices composes a new triangle. + /// + /// Vertices `0 1 2 3 4 5` create two triangles `0 1 2` and `3 4 5` + #[default] + TriangleList = 3, + /// Vertex data is a triangle strip. Each set of three adjacent vertices form a triangle. + /// + /// Vertices `0 1 2 3 4 5` create four triangles `0 1 2`, `2 1 3`, `2 3 4`, and `4 3 5` + TriangleStrip = 4, +} + +impl PrimitiveTopology { + /// Returns true for strip topologies. + #[must_use] + pub fn is_strip(&self) -> bool { + match *self { + Self::PointList | Self::LineList | Self::TriangleList => false, + Self::LineStrip | Self::TriangleStrip => true, + } + } + + /// Returns true for triangle topologies. + #[must_use] + pub fn is_triangles(&self) -> bool { + match *self { + Self::TriangleList | Self::TriangleStrip => true, + Self::PointList | Self::LineList | Self::LineStrip => false, + } + } +} diff --git a/naga-types/src/msl.rs b/naga-types/src/msl.rs new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/naga-types/src/msl.rs @@ -0,0 +1 @@ + diff --git a/naga-types/src/spv.rs b/naga-types/src/spv.rs new file mode 100644 index 00000000000..f5a9c729945 --- /dev/null +++ b/naga-types/src/spv.rs @@ -0,0 +1,12 @@ +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize))] +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] +pub struct BindingInfo { + pub descriptor_set: u32, + pub binding: u32, + /// If the binding is an unsized binding array, this overrides the size. + pub binding_array_size: Option, +} + +// Using `BTreeMap` instead of `HashMap` so that we can hash itself. +pub type BindingMap = alloc::collections::BTreeMap; diff --git a/naga/Cargo.toml b/naga/Cargo.toml index 8cc5eddc557..74aaa1b5752 100644 --- a/naga/Cargo.toml +++ b/naga/Cargo.toml @@ -8,6 +8,7 @@ repository.workspace = true keywords = ["shader", "SPIR-V", "GLSL", "MSL"] license.workspace = true exclude = ["bin/**/*", "tests/**/*", "Cargo.lock", "target/**/*"] +readme = "README.md" # Override the workspace's `rust-version` key. Firefox uses `cargo vendor` to # copy the crates it actually uses out of the workspace, so it's meaningful for @@ -40,21 +41,21 @@ serialize = [ "bitflags/serde", "half/serde", "hashbrown/serde", - "indexmap/serde", + "naga-types/serialize", ] deserialize = [ "dep:serde", "bitflags/serde", "half/serde", "hashbrown/serde", - "indexmap/serde", + "naga-types/deserialize", ] arbitrary = [ "dep:arbitrary", "bitflags/arbitrary", - "indexmap/arbitrary", "half/arbitrary", "half/std", + "naga-types/arbitrary", ] spv-in = ["dep:petgraph", "petgraph/graphmap", "dep:spirv"] spv-out = ["dep:spirv"] @@ -82,6 +83,8 @@ stderr = ["codespan-reporting/std"] fs = [] [dependencies] +naga-types = { workspace = true } + arbitrary = { workspace = true, features = ["derive"], optional = true } arrayvec.workspace = true bitflags.workspace = true @@ -90,8 +93,8 @@ cfg-if.workspace = true codespan-reporting = { workspace = true } hashbrown.workspace = true half = { workspace = true, features = ["num-traits"] } -rustc-hash.workspace = true indexmap.workspace = true +rustc-hash.workspace = true libm = { workspace = true, default-features = false } log.workspace = true num-traits.workspace = true diff --git a/naga/src/back/glsl/mod.rs b/naga/src/back/glsl/mod.rs index 0c768a3caa1..e8c6c04af5f 100644 --- a/naga/src/back/glsl/mod.rs +++ b/naga/src/back/glsl/mod.rs @@ -43,6 +43,8 @@ to output a [`Module`](crate::Module) into glsl // Additions that are relevant for the backend are the discard keyword, the introduction of // vector, matrices, samplers, image types and functions that provide common shader operations +pub use nt::glsl::*; + pub use features::Features; pub use writer::Writer; @@ -54,7 +56,6 @@ use alloc::{ vec::Vec, }; use core::{ - cmp::Ordering, fmt::{self, Error as FmtError, Write}, mem, }; @@ -80,11 +81,6 @@ mod keywords; /// Contains the [`Writer`] type. mod writer; -/// List of supported `core` GLSL versions. -pub const SUPPORTED_CORE_VERSIONS: &[u16] = &[140, 150, 330, 400, 410, 420, 430, 440, 450, 460]; -/// List of supported `es` GLSL versions. -pub const SUPPORTED_ES_VERSIONS: &[u16] = &[300, 310, 320]; - /// The suffix of the variable that will hold the calculated clamped level /// of detail for bounds checking in `ImageLoad` const CLAMPED_LOD_SUFFIX: &str = "_clamped_lod"; @@ -92,9 +88,6 @@ const CLAMPED_LOD_SUFFIX: &str = "_clamped_lod"; pub(crate) const MODF_FUNCTION: &str = "naga_modf"; pub(crate) const FREXP_FUNCTION: &str = "naga_frexp"; -// Must match code in glsl_built_in -pub const FIRST_INSTANCE_BINDING: &str = "naga_vs_first_instance"; - #[cfg(feature = "deserialize")] #[derive(serde::Deserialize)] struct BindingMapSerialization { @@ -117,9 +110,6 @@ where Ok(map) } -/// Mapping between resources and bindings. -pub type BindingMap = alloc::collections::BTreeMap; - impl crate::AtomicFunction { const fn to_glsl(self) -> &'static str { match self { @@ -154,144 +144,6 @@ impl crate::AddressSpace { } } -/// A GLSL version. -#[derive(Debug, Copy, Clone, PartialEq)] -#[cfg_attr(feature = "serialize", derive(serde::Serialize))] -#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] -pub enum Version { - /// `core` GLSL. - Desktop(u16), - /// `es` GLSL. - Embedded { version: u16, is_webgl: bool }, -} - -impl Version { - /// Create a new gles version - pub const fn new_gles(version: u16) -> Self { - Self::Embedded { - version, - is_webgl: false, - } - } - - /// Returns true if self is `Version::Embedded` (i.e. is a es version) - const fn is_es(&self) -> bool { - match *self { - Version::Desktop(_) => false, - Version::Embedded { .. } => true, - } - } - - /// Returns true if targeting WebGL - const fn is_webgl(&self) -> bool { - match *self { - Version::Desktop(_) => false, - Version::Embedded { is_webgl, .. } => is_webgl, - } - } - - /// Checks the list of currently supported versions and returns true if it contains the - /// specified version - /// - /// # Notes - /// As an invalid version number will never be added to the supported version list - /// so this also checks for version validity - fn is_supported(&self) -> bool { - match *self { - Version::Desktop(v) => SUPPORTED_CORE_VERSIONS.contains(&v), - Version::Embedded { version: v, .. } => SUPPORTED_ES_VERSIONS.contains(&v), - } - } - - fn supports_io_locations(&self) -> bool { - *self >= Version::Desktop(330) || *self >= Version::new_gles(300) - } - - /// Checks if the version supports all of the explicit layouts: - /// - `location=` qualifiers for bindings - /// - `binding=` qualifiers for resources - /// - /// Note: `location=` for vertex inputs and fragment outputs is supported - /// unconditionally for GLES 300. - fn supports_explicit_locations(&self) -> bool { - *self >= Version::Desktop(420) || *self >= Version::new_gles(310) - } - - fn supports_early_depth_test(&self) -> bool { - *self >= Version::Desktop(130) || *self >= Version::new_gles(310) - } - - fn supports_std140_layout(&self) -> bool { - *self >= Version::Desktop(140) || *self >= Version::new_gles(300) - } - - fn supports_std430_layout(&self) -> bool { - // std430 is available from 400 via GL_ARB_shader_storage_buffer_object. - *self >= Version::Desktop(400) || *self >= Version::new_gles(310) - } - - fn supports_fma_function(&self) -> bool { - *self >= Version::Desktop(400) || *self >= Version::new_gles(320) - } - - fn supports_integer_functions(&self) -> bool { - *self >= Version::Desktop(400) || *self >= Version::new_gles(310) - } - - fn supports_frexp_function(&self) -> bool { - *self >= Version::Desktop(400) || *self >= Version::new_gles(310) - } - - fn supports_derivative_control(&self) -> bool { - *self >= Version::Desktop(450) - } - - // For supports_pack_unpack_4x8, supports_pack_unpack_snorm_2x16, supports_pack_unpack_unorm_2x16 - // see: - // https://registry.khronos.org/OpenGL-Refpages/gl4/html/unpackUnorm.xhtml - // https://registry.khronos.org/OpenGL-Refpages/es3/html/unpackUnorm.xhtml - // https://registry.khronos.org/OpenGL-Refpages/gl4/html/packUnorm.xhtml - // https://registry.khronos.org/OpenGL-Refpages/es3/html/packUnorm.xhtml - fn supports_pack_unpack_4x8(&self) -> bool { - *self >= Version::Desktop(400) || *self >= Version::new_gles(310) - } - fn supports_pack_unpack_snorm_2x16(&self) -> bool { - *self >= Version::Desktop(420) || *self >= Version::new_gles(300) - } - fn supports_pack_unpack_unorm_2x16(&self) -> bool { - *self >= Version::Desktop(400) || *self >= Version::new_gles(300) - } - - // https://registry.khronos.org/OpenGL-Refpages/gl4/html/unpackHalf2x16.xhtml - // https://registry.khronos.org/OpenGL-Refpages/gl4/html/packHalf2x16.xhtml - // https://registry.khronos.org/OpenGL-Refpages/es3/html/unpackHalf2x16.xhtml - // https://registry.khronos.org/OpenGL-Refpages/es3/html/packHalf2x16.xhtml - fn supports_pack_unpack_half_2x16(&self) -> bool { - *self >= Version::Desktop(420) || *self >= Version::new_gles(300) - } -} - -impl PartialOrd for Version { - fn partial_cmp(&self, other: &Self) -> Option { - match (*self, *other) { - (Version::Desktop(x), Version::Desktop(y)) => Some(x.cmp(&y)), - (Version::Embedded { version: x, .. }, Version::Embedded { version: y, .. }) => { - Some(x.cmp(&y)) - } - _ => None, - } - } -} - -impl fmt::Display for Version { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Version::Desktop(v) => write!(f, "{v} core"), - Version::Embedded { version: v, .. } => write!(f, "{v} es"), - } - } -} - bitflags::bitflags! { /// Configuration flags for the [`Writer`]. #[cfg_attr(feature = "serialize", derive(serde::Serialize))] @@ -446,7 +298,7 @@ pub struct ImmediateItem { /// pub access_path: String, /// Type of the uniform. This will only ever be a scalar, vector, or matrix. - pub ty: Handle, + pub ty: GlslUniformType, /// The offset in the immediate data memory block this uniform maps to. /// /// The size of the uniform can be derived from the type. @@ -533,19 +385,17 @@ impl fmt::Display for VaryingName<'_> { } } -impl ShaderStage { - const fn to_str(self) -> &'static str { - match self { - ShaderStage::Compute => "cs", - ShaderStage::Fragment => "fs", - ShaderStage::Vertex => "vs", - ShaderStage::Task - | ShaderStage::Mesh - | ShaderStage::RayGeneration - | ShaderStage::AnyHit - | ShaderStage::ClosestHit - | ShaderStage::Miss => unreachable!(), - } +const fn shader_stage_to_str(st: ShaderStage) -> &'static str { + match st { + ShaderStage::Compute => "cs", + ShaderStage::Fragment => "fs", + ShaderStage::Vertex => "vs", + ShaderStage::Task + | ShaderStage::Mesh + | ShaderStage::RayGeneration + | ShaderStage::AnyHit + | ShaderStage::ClosestHit + | ShaderStage::Miss => unreachable!(), } } diff --git a/naga/src/back/glsl/writer.rs b/naga/src/back/glsl/writer.rs index dd116e03b09..0c216f61cf3 100644 --- a/naga/src/back/glsl/writer.rs +++ b/naga/src/back/glsl/writer.rs @@ -961,11 +961,14 @@ impl<'a, W: Write> Writer<'a, W> { "_group_{}_binding_{}_{}", br.group, br.binding, - self.entry_point.stage.to_str() + shader_stage_to_str(self.entry_point.stage) ) } (&None, crate::AddressSpace::Immediate) => { - format!("_immediates_binding_{}", self.entry_point.stage.to_str()) + format!( + "_immediates_binding_{}", + shader_stage_to_str(self.entry_point.stage) + ) } (&None, _) => self.names[&NameKey::GlobalVariable(handle)].clone(), } @@ -983,12 +986,12 @@ impl<'a, W: Write> Writer<'a, W> { "_group_{}_binding_{}_{}", br.group, br.binding, - self.entry_point.stage.to_str() + shader_stage_to_str(self.entry_point.stage) )?, (&None, crate::AddressSpace::Immediate) => write!( self.out, "_immediates_binding_{}", - self.entry_point.stage.to_str() + shader_stage_to_str(self.entry_point.stage) )?, (&None, _) => write!( self.out, @@ -4586,7 +4589,7 @@ impl<'a, W: Write> Writer<'a, W> { items.push(ImmediateItem { access_path: name, offset: *offset, - ty, + ty: (&self.module.types[ty].inner).try_into().unwrap(), }); *offset += layout.size; } diff --git a/naga/src/back/hlsl/mod.rs b/naga/src/back/hlsl/mod.rs index 6918ac113f9..d78b8cc4650 100644 --- a/naga/src/back/hlsl/mod.rs +++ b/naga/src/back/hlsl/mod.rs @@ -283,16 +283,17 @@ impl ShaderModel { } } -impl crate::ShaderStage { - pub const fn to_hlsl_str(self) -> &'static str { - match self { - Self::Vertex => "vs", - Self::Fragment => "ps", - Self::Compute => "cs", - Self::Task => "as", - Self::Mesh => "ms", - Self::RayGeneration | Self::AnyHit | Self::ClosestHit | Self::Miss => "lib", - } +pub const fn shader_stage_to_hlsl_str(st: nt::ShaderStage) -> &'static str { + match st { + nt::ShaderStage::Vertex => "vs", + nt::ShaderStage::Fragment => "ps", + nt::ShaderStage::Compute => "cs", + nt::ShaderStage::Task => "as", + nt::ShaderStage::Mesh => "ms", + nt::ShaderStage::RayGeneration + | nt::ShaderStage::AnyHit + | nt::ShaderStage::ClosestHit + | nt::ShaderStage::Miss => "lib", } } diff --git a/naga/src/back/mod.rs b/naga/src/back/mod.rs index 24b3ec83ae1..2713edb8059 100644 --- a/naga/src/back/mod.rs +++ b/naga/src/back/mod.rs @@ -30,6 +30,8 @@ pub mod pipeline_constants; #[cfg(any(hlsl_out, glsl_out))] mod continue_forward; +pub use nt::TaskDispatchLimits; + /// Names of vector components. pub const COMPONENTS: &[char] = &['x', 'y', 'z', 'w']; /// Indent for backends. @@ -392,11 +394,3 @@ pub enum RayIntersectionType { Triangle = 1, BoundingBox = 4, } - -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "serialize", derive(serde::Serialize))] -#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] -pub struct TaskDispatchLimits { - pub max_mesh_workgroups_per_dim: u32, - pub max_mesh_workgroups_total: u32, -} diff --git a/naga/src/back/msl/mod.rs b/naga/src/back/msl/mod.rs index 381a2b12796..da8f697814a 100644 --- a/naga/src/back/msl/mod.rs +++ b/naga/src/back/msl/mod.rs @@ -338,105 +338,6 @@ impl Default for Options { } } -/// Corresponds to [WebGPU `GPUVertexFormat`]( -/// https://gpuweb.github.io/gpuweb/#enumdef-gpuvertexformat). -#[repr(u32)] -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -#[cfg_attr(feature = "serialize", derive(serde::Serialize))] -#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] -pub enum VertexFormat { - /// One unsigned byte (u8). `u32` in shaders. - Uint8 = 0, - /// Two unsigned bytes (u8). `vec2` in shaders. - Uint8x2 = 1, - /// Four unsigned bytes (u8). `vec4` in shaders. - Uint8x4 = 2, - /// One signed byte (i8). `i32` in shaders. - Sint8 = 3, - /// Two signed bytes (i8). `vec2` in shaders. - Sint8x2 = 4, - /// Four signed bytes (i8). `vec4` in shaders. - Sint8x4 = 5, - /// One unsigned byte (u8). [0, 255] converted to float [0, 1] `f32` in shaders. - Unorm8 = 6, - /// Two unsigned bytes (u8). [0, 255] converted to float [0, 1] `vec2` in shaders. - Unorm8x2 = 7, - /// Four unsigned bytes (u8). [0, 255] converted to float [0, 1] `vec4` in shaders. - Unorm8x4 = 8, - /// One signed byte (i8). [-127, 127] converted to float [-1, 1] `f32` in shaders. - Snorm8 = 9, - /// Two signed bytes (i8). [-127, 127] converted to float [-1, 1] `vec2` in shaders. - Snorm8x2 = 10, - /// Four signed bytes (i8). [-127, 127] converted to float [-1, 1] `vec4` in shaders. - Snorm8x4 = 11, - /// One unsigned short (u16). `u32` in shaders. - Uint16 = 12, - /// Two unsigned shorts (u16). `vec2` in shaders. - Uint16x2 = 13, - /// Four unsigned shorts (u16). `vec4` in shaders. - Uint16x4 = 14, - /// One signed short (u16). `i32` in shaders. - Sint16 = 15, - /// Two signed shorts (i16). `vec2` in shaders. - Sint16x2 = 16, - /// Four signed shorts (i16). `vec4` in shaders. - Sint16x4 = 17, - /// One unsigned short (u16). [0, 65535] converted to float [0, 1] `f32` in shaders. - Unorm16 = 18, - /// Two unsigned shorts (u16). [0, 65535] converted to float [0, 1] `vec2` in shaders. - Unorm16x2 = 19, - /// Four unsigned shorts (u16). [0, 65535] converted to float [0, 1] `vec4` in shaders. - Unorm16x4 = 20, - /// One signed short (i16). [-32767, 32767] converted to float [-1, 1] `f32` in shaders. - Snorm16 = 21, - /// Two signed shorts (i16). [-32767, 32767] converted to float [-1, 1] `vec2` in shaders. - Snorm16x2 = 22, - /// Four signed shorts (i16). [-32767, 32767] converted to float [-1, 1] `vec4` in shaders. - Snorm16x4 = 23, - /// One half-precision float (no Rust equiv). `f32` in shaders. - Float16 = 24, - /// Two half-precision floats (no Rust equiv). `vec2` in shaders. - Float16x2 = 25, - /// Four half-precision floats (no Rust equiv). `vec4` in shaders. - Float16x4 = 26, - /// One single-precision float (f32). `f32` in shaders. - Float32 = 27, - /// Two single-precision floats (f32). `vec2` in shaders. - Float32x2 = 28, - /// Three single-precision floats (f32). `vec3` in shaders. - Float32x3 = 29, - /// Four single-precision floats (f32). `vec4` in shaders. - Float32x4 = 30, - /// One unsigned int (u32). `u32` in shaders. - Uint32 = 31, - /// Two unsigned ints (u32). `vec2` in shaders. - Uint32x2 = 32, - /// Three unsigned ints (u32). `vec3` in shaders. - Uint32x3 = 33, - /// Four unsigned ints (u32). `vec4` in shaders. - Uint32x4 = 34, - /// One signed int (i32). `i32` in shaders. - Sint32 = 35, - /// Two signed ints (i32). `vec2` in shaders. - Sint32x2 = 36, - /// Three signed ints (i32). `vec3` in shaders. - Sint32x3 = 37, - /// Four signed ints (i32). `vec4` in shaders. - Sint32x4 = 38, - /// Three unsigned 10-bit integers and one 2-bit integer, packed into a 32-bit integer (u32). [0, 1024] converted to float [0, 1] `vec4` in shaders. - #[cfg_attr( - any(feature = "serialize", feature = "deserialize"), - serde(rename = "unorm10-10-10-2") - )] - Unorm10_10_10_2 = 43, - /// Four unsigned 8-bit integers, packed into a 32-bit integer (u32). [0, 255] converted to float [0, 1] `vec4` in shaders. - #[cfg_attr( - any(feature = "serialize", feature = "deserialize"), - serde(rename = "unorm8x4-bgra") - )] - Unorm8x4Bgra = 44, -} - /// Defines how to advance the data in vertex buffers. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serialize", derive(serde::Serialize))] @@ -463,7 +364,7 @@ pub struct AttributeMapping { /// . /// The conversion process is described by /// . - pub format: VertexFormat, + pub format: nt::VertexFormat, } /// A description of a vertex buffer with all the information we diff --git a/naga/src/back/msl/writer.rs b/naga/src/back/msl/writer.rs index 584bf9f86be..57fc0a08383 100644 --- a/naga/src/back/msl/writer.rs +++ b/naga/src/back/msl/writer.rs @@ -4767,10 +4767,10 @@ template fn write_unpacking_function( &mut self, - format: back::msl::VertexFormat, + format: nt::VertexFormat, ) -> Result<(String, u32, Option, crate::Scalar), Error> { use crate::{Scalar, VectorSize}; - use back::msl::VertexFormat::*; + use nt::VertexFormat::*; match format { Uint8 => { let name = self.namer.call("unpackUint8"); @@ -5568,6 +5568,7 @@ template writeln!(self.out, "}}")?; Ok((name, 4, Some(VectorSize::Quad), Scalar::F32)) } + Float64 | Float64x2 | Float64x3 | Float64x4 => unreachable!(), } } @@ -6512,7 +6513,7 @@ template options: &Options, pipeline_options: &PipelineOptions, ) -> Result { - use back::msl::VertexFormat; + use nt::VertexFormat; // Define structs to hold resolved/generated data for vertex buffers and // their attributes. diff --git a/naga/src/back/spv/mod.rs b/naga/src/back/spv/mod.rs index c7b54cdb5ff..14d402a4588 100644 --- a/naga/src/back/spv/mod.rs +++ b/naga/src/back/spv/mod.rs @@ -110,6 +110,8 @@ mod selection; mod subgroup; mod writer; +pub use nt::spv::*; + pub use mesh_shader::{MeshReturnInfo, MeshReturnMember}; pub use spirv::{Capability, SourceLanguage}; @@ -1015,19 +1017,6 @@ bitflags::bitflags! { } } -#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "serialize", derive(serde::Serialize))] -#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] -pub struct BindingInfo { - pub descriptor_set: u32, - pub binding: u32, - /// If the binding is an unsized binding array, this overrides the size. - pub binding_array_size: Option, -} - -// Using `BTreeMap` instead of `HashMap` so that we can hash itself. -pub type BindingMap = alloc::collections::BTreeMap; - #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ZeroInitializeWorkgroupMemoryMode { /// Via `VK_KHR_zero_initialize_workgroup_memory` or Vulkan 1.3 diff --git a/naga/src/ir/mod.rs b/naga/src/ir/mod.rs index 7cfe810e411..2ad5d0963c4 100644 --- a/naga/src/ir/mod.rs +++ b/naga/src/ir/mod.rs @@ -236,6 +236,7 @@ use crate::diagnostic_filter::DiagnosticFilterNode; use crate::{FastIndexMap, NamedExpressions}; pub use block::Block; +pub use naga_types::{ResourceBinding, ShaderStage}; /// Explicitly allows early depth/stencil tests. /// @@ -315,40 +316,6 @@ pub enum ConservativeDepth { Unchanged, } -/// Stage of the programmable pipeline. -#[derive(Clone, Copy, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serialize", derive(Serialize))] -#[cfg_attr(feature = "deserialize", derive(Deserialize))] -#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] -pub enum ShaderStage { - /// A vertex shader, in a render pipeline. - Vertex, - - /// A task shader, in a mesh render pipeline. - Task, - - /// A mesh shader, in a mesh render pipeline. - Mesh, - - /// A fragment shader, in a render pipeline. - Fragment, - - /// Compute pipeline shader. - Compute, - - /// A ray generation shader, in a ray tracing pipeline. - RayGeneration, - - /// A miss shader, in a ray tracing pipeline. - Miss, - - /// A any hit shader, in a ray tracing pipeline. - AnyHit, - - /// A closest hit shader, in a ray tracing pipeline. - ClosestHit, -} - /// Addressing space of variables. #[derive(Clone, Copy, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serialize", derive(Serialize))] @@ -1159,18 +1126,6 @@ pub enum Binding { }, } -/// Pipeline binding information for global resources. -#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -#[cfg_attr(feature = "serialize", derive(Serialize))] -#[cfg_attr(feature = "deserialize", derive(Deserialize))] -#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] -pub struct ResourceBinding { - /// The bind group index. - pub group: u32, - /// Binding number within the group. - pub binding: u32, -} - /// Variable defined at module level. #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(Serialize))] diff --git a/naga/src/lib.rs b/naga/src/lib.rs index 1bf72e47544..04a1936de3a 100644 --- a/naga/src/lib.rs +++ b/naga/src/lib.rs @@ -106,6 +106,8 @@ extern crate std; extern crate alloc; +extern crate naga_types as nt; + mod arena; pub mod back; pub mod common; @@ -126,6 +128,8 @@ use alloc::string::String; pub use crate::arena::{Arena, Handle, Range, UniqueArena}; pub use crate::span::{SourceLocation, Span, SpanContext, WithSpan}; +pub use nt::{FastHashMap, FastHashSet, FastIndexMap, FastIndexSet}; + // TODO: Eliminate this re-export and migrate uses of `crate::Foo` to `use crate::ir; ir::Foo`. pub use ir::*; @@ -135,26 +139,5 @@ pub const BOOL_WIDTH: Bytes = 1; /// Width of abstract types, in bytes. pub const ABSTRACT_WIDTH: Bytes = 8; -/// Hash map that is faster but not resilient to DoS attacks. -/// (Similar to rustc_hash::FxHashMap but using hashbrown::HashMap instead of alloc::collections::HashMap.) -/// To construct a new instance: `FastHashMap::default()` -pub type FastHashMap = - hashbrown::HashMap>; - -/// Hash set that is faster but not resilient to DoS attacks. -/// (Similar to rustc_hash::FxHashSet but using hashbrown::HashSet instead of alloc::collections::HashMap.) -pub type FastHashSet = - hashbrown::HashSet>; - -/// Insertion-order-preserving hash set (`IndexSet`), but with the same -/// hasher as `FastHashSet` (faster but not resilient to DoS attacks). -pub type FastIndexSet = - indexmap::IndexSet>; - -/// Insertion-order-preserving hash map (`IndexMap`), but with the same -/// hasher as `FastHashMap` (faster but not resilient to DoS attacks). -pub type FastIndexMap = - indexmap::IndexMap>; - /// Map of expressions that have associated variable names pub(crate) type NamedExpressions = FastIndexMap, String>; diff --git a/naga/src/proc/mod.rs b/naga/src/proc/mod.rs index 112399ded9a..bcdf8f2a9e2 100644 --- a/naga/src/proc/mod.rs +++ b/naga/src/proc/mod.rs @@ -677,24 +677,6 @@ pub fn flatten_compose<'arenas>( .take(size) } -impl super::ShaderStage { - pub const fn compute_like(self) -> bool { - match self { - Self::Vertex | Self::Fragment => false, - Self::Compute | Self::Task | Self::Mesh => true, - Self::RayGeneration | Self::AnyHit | Self::ClosestHit | Self::Miss => false, - } - } - - /// Mesh or task shader - pub const fn mesh_like(self) -> bool { - match self { - Self::Task | Self::Mesh => true, - _ => false, - } - } -} - #[test] fn test_matrix_size() { let module = crate::Module::default(); @@ -965,3 +947,62 @@ impl crate::AddressSpace { matches!(self, Self::WorkGroup | Self::TaskPayload) } } + +impl TryFrom for nt::glsl::GlslScalarKind { + type Error = (); + + fn try_from(value: crate::ScalarKind) -> Result { + Ok(match value { + crate::ScalarKind::Sint => nt::glsl::GlslScalarKind::Sint, + crate::ScalarKind::Uint => nt::glsl::GlslScalarKind::Uint, + crate::ScalarKind::Float => nt::glsl::GlslScalarKind::Float, + _ => return Err(()), + }) + } +} + +impl From for nt::glsl::GlslVectorSize { + fn from(val: crate::VectorSize) -> Self { + match val { + crate::VectorSize::Bi => nt::glsl::GlslVectorSize::Bi, + crate::VectorSize::Tri => nt::glsl::GlslVectorSize::Tri, + crate::VectorSize::Quad => nt::glsl::GlslVectorSize::Quad, + } + } +} + +impl TryFrom for nt::glsl::GlslScalar { + type Error = (); + + fn try_from(value: crate::Scalar) -> Result { + Ok(nt::glsl::GlslScalar { + kind: value.kind.try_into()?, + width: value.width, + }) + } +} + +impl TryFrom<&crate::TypeInner> for nt::glsl::GlslUniformType { + type Error = (); + fn try_from(value: &crate::TypeInner) -> Result { + match *value { + crate::TypeInner::Scalar(scalar) => { + Ok(nt::glsl::GlslUniformType::Scalar(scalar.try_into()?)) + } + crate::TypeInner::Vector { size, scalar } => Ok(nt::glsl::GlslUniformType::Vector { + size: size.into(), + scalar: scalar.try_into()?, + }), + crate::TypeInner::Matrix { + columns, + rows, + scalar, + } => Ok(nt::glsl::GlslUniformType::Matrix { + columns: columns.into(), + rows: rows.into(), + scalar: scalar.try_into()?, + }), + _ => Err(()), + } + } +} diff --git a/naga/tests/naga/snapshots.rs b/naga/tests/naga/snapshots.rs index 2bc29591b5f..ed1ebaae7c1 100644 --- a/naga/tests/naga/snapshots.rs +++ b/naga/tests/naga/snapshots.rs @@ -389,7 +389,7 @@ fn write_output_hlsl( entry_point: name.clone(), target_profile: format!( "{}_{}", - ep.stage.to_hlsl_str(), + naga::back::hlsl::shader_stage_to_hlsl_str(ep.stage), options.shader_model.to_str() ), }); diff --git a/wgpu-core/Cargo.toml b/wgpu-core/Cargo.toml index 7f08033afdb..c1c34de202c 100644 --- a/wgpu-core/Cargo.toml +++ b/wgpu-core/Cargo.toml @@ -170,6 +170,7 @@ wgpu-core-deps-windows-linux-android = { workspace = true, optional = true } [dependencies] naga.workspace = true +naga-types.workspace = true wgpu-naga-bridge.workspace = true wgpu-hal.workspace = true wgpu-types.workspace = true diff --git a/wgpu-core/src/hash_utils.rs b/wgpu-core/src/hash_utils.rs deleted file mode 100644 index 5c5b253888d..00000000000 --- a/wgpu-core/src/hash_utils.rs +++ /dev/null @@ -1,14 +0,0 @@ -//! Module for hashing utilities. -//! -//! Named hash_utils to prevent clashing with the core::hash module. - -/// HashMap using a fast, non-cryptographic hash algorithm. -pub type FastHashMap = - hashbrown::HashMap>; -/// HashSet using a fast, non-cryptographic hash algorithm. -pub type FastHashSet = - hashbrown::HashSet>; - -/// IndexMap using a fast, non-cryptographic hash algorithm. -pub type FastIndexMap = - indexmap::IndexMap>; diff --git a/wgpu-core/src/lib.rs b/wgpu-core/src/lib.rs index 8a981a437d7..39f15c170b1 100644 --- a/wgpu-core/src/lib.rs +++ b/wgpu-core/src/lib.rs @@ -60,6 +60,7 @@ #![cfg_attr(not(send_sync), allow(clippy::arc_with_non_send_sync))] extern crate alloc; +extern crate naga_types as nt; #[cfg(feature = "std")] extern crate std; extern crate wgpu_hal as hal; @@ -72,7 +73,6 @@ mod conv; pub mod device; pub mod error; pub mod global; -mod hash_utils; pub mod hub; pub mod id; pub mod identity; @@ -110,7 +110,7 @@ use alloc::{ string::String, }; -pub(crate) use hash_utils::*; +pub(crate) use nt::{FastHashMap, FastHashSet, FastIndexMap}; /// The index of a queue submission. /// diff --git a/wgpu-hal/Cargo.toml b/wgpu-hal/Cargo.toml index 73936b64d22..741f38ed8cb 100644 --- a/wgpu-hal/Cargo.toml +++ b/wgpu-hal/Cargo.toml @@ -200,6 +200,7 @@ required-features = ["gles"] [dependencies] naga.workspace = true wgpu-naga-bridge.workspace = true +naga-types = { workspace = true } wgpu-types = { workspace = true, default-features = false } # Dependencies in the lib and empty backend diff --git a/wgpu-hal/src/dx12/device.rs b/wgpu-hal/src/dx12/device.rs index 23a08da8ca9..e0bef3e38c3 100644 --- a/wgpu-hal/src/dx12/device.rs +++ b/wgpu-hal/src/dx12/device.rs @@ -418,7 +418,11 @@ impl super::Device { let source_name = stage.module.raw_name.as_deref(); - let full_stage = format!("{}_{}", naga_stage.to_hlsl_str(), key.shader_model.to_str()); + let full_stage = format!( + "{}_{}", + naga::back::hlsl::shader_stage_to_hlsl_str(naga_stage), + key.shader_model.to_str() + ); let compiled_shader = self.compiler_container.compile( self, diff --git a/wgpu-hal/src/gles/device.rs b/wgpu-hal/src/gles/device.rs index 1f63a808100..cd94c995bb2 100644 --- a/wgpu-hal/src/gles/device.rs +++ b/wgpu-hal/src/gles/device.rs @@ -503,22 +503,14 @@ impl super::Device { let mut uniforms = ArrayVec::new(); - for (stage_idx, stage_items) in immediates_items.into_iter().enumerate() { + for stage_items in immediates_items { for item in stage_items { - let source = &shaders[stage_idx].1.module.source; - let super::ShaderModuleSource::Naga(naga_module) = source else { - // ImmediateItem can only be constructed given a naga module, as it requires a type handle. - // Passthrough shaders will have immediates_items empty - unreachable!("Passthrough shaders don't currently support immediates on GLES"); - }; - let type_inner = &naga_module.module.types[item.ty].inner; - let location = unsafe { gl.get_uniform_location(program, &item.access_path) }; log::trace!( "immediate data item: name={}, ty={:?}, offset={}, location={:?}", item.access_path, - type_inner, + item.ty, item.offset, location, ); @@ -527,8 +519,8 @@ impl super::Device { uniforms.push(super::ImmediateDesc { location, offset: item.offset, - size_bytes: type_inner.size(naga_module.module.to_ctx()), - ty: type_inner.clone(), + size_bytes: item.ty.size(), + ty: item.ty, }); } } diff --git a/wgpu-hal/src/gles/mod.rs b/wgpu-hal/src/gles/mod.rs index 1a0292d4a0f..a448c5c9797 100644 --- a/wgpu-hal/src/gles/mod.rs +++ b/wgpu-hal/src/gles/mod.rs @@ -660,7 +660,7 @@ struct VertexBufferDesc { #[derive(Clone, Debug)] struct ImmediateDesc { location: glow::UniformLocation, - ty: naga::TypeInner, + ty: nt::glsl::GlslUniformType, offset: u32, size_bytes: u32, } diff --git a/wgpu-hal/src/gles/queue.rs b/wgpu-hal/src/gles/queue.rs index 09ce9da9cac..57b3f3ab423 100644 --- a/wgpu-hal/src/gles/queue.rs +++ b/wgpu-hal/src/gles/queue.rs @@ -1680,32 +1680,32 @@ impl super::Queue { } let location = Some(&uniform.location); - + use nt::glsl::{GlslScalar, GlslUniformType, GlslVectorSize}; match uniform.ty { // // --- Float 1-4 Component --- // - naga::TypeInner::Scalar(naga::Scalar::F32) => { + GlslUniformType::Scalar(GlslScalar::F32) => { let data = get_data::(data_bytes, offset)[0]; unsafe { gl.uniform_1_f32(location, data) }; } - naga::TypeInner::Vector { - size: naga::VectorSize::Bi, - scalar: naga::Scalar::F32, + GlslUniformType::Vector { + size: GlslVectorSize::Bi, + scalar: GlslScalar::F32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_2_f32_slice(location, data) }; } - naga::TypeInner::Vector { - size: naga::VectorSize::Tri, - scalar: naga::Scalar::F32, + GlslUniformType::Vector { + size: GlslVectorSize::Tri, + scalar: GlslScalar::F32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_3_f32_slice(location, data) }; } - naga::TypeInner::Vector { - size: naga::VectorSize::Quad, - scalar: naga::Scalar::F32, + GlslUniformType::Vector { + size: GlslVectorSize::Quad, + scalar: GlslScalar::F32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_4_f32_slice(location, data) }; @@ -1714,27 +1714,27 @@ impl super::Queue { // // --- Int 1-4 Component --- // - naga::TypeInner::Scalar(naga::Scalar::I32) => { + GlslUniformType::Scalar(GlslScalar::I32) => { let data = get_data::(data_bytes, offset)[0]; unsafe { gl.uniform_1_i32(location, data) }; } - naga::TypeInner::Vector { - size: naga::VectorSize::Bi, - scalar: naga::Scalar::I32, + GlslUniformType::Vector { + size: GlslVectorSize::Bi, + scalar: GlslScalar::I32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_2_i32_slice(location, data) }; } - naga::TypeInner::Vector { - size: naga::VectorSize::Tri, - scalar: naga::Scalar::I32, + GlslUniformType::Vector { + size: GlslVectorSize::Tri, + scalar: GlslScalar::I32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_3_i32_slice(location, data) }; } - naga::TypeInner::Vector { - size: naga::VectorSize::Quad, - scalar: naga::Scalar::I32, + GlslUniformType::Vector { + size: GlslVectorSize::Quad, + scalar: GlslScalar::I32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_4_i32_slice(location, data) }; @@ -1743,27 +1743,27 @@ impl super::Queue { // // --- Uint 1-4 Component --- // - naga::TypeInner::Scalar(naga::Scalar::U32) => { + GlslUniformType::Scalar(GlslScalar::U32) => { let data = get_data::(data_bytes, offset)[0]; unsafe { gl.uniform_1_u32(location, data) }; } - naga::TypeInner::Vector { - size: naga::VectorSize::Bi, - scalar: naga::Scalar::U32, + GlslUniformType::Vector { + size: GlslVectorSize::Bi, + scalar: GlslScalar::U32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_2_u32_slice(location, data) }; } - naga::TypeInner::Vector { - size: naga::VectorSize::Tri, - scalar: naga::Scalar::U32, + GlslUniformType::Vector { + size: GlslVectorSize::Tri, + scalar: GlslScalar::U32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_3_u32_slice(location, data) }; } - naga::TypeInner::Vector { - size: naga::VectorSize::Quad, - scalar: naga::Scalar::U32, + GlslUniformType::Vector { + size: GlslVectorSize::Quad, + scalar: GlslScalar::U32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_4_u32_slice(location, data) }; @@ -1772,18 +1772,18 @@ impl super::Queue { // // --- Matrix 2xR --- // - naga::TypeInner::Matrix { - columns: naga::VectorSize::Bi, - rows: naga::VectorSize::Bi, - scalar: naga::Scalar::F32, + GlslUniformType::Matrix { + columns: GlslVectorSize::Bi, + rows: GlslVectorSize::Bi, + scalar: GlslScalar::F32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_matrix_2_f32_slice(location, false, data) }; } - naga::TypeInner::Matrix { - columns: naga::VectorSize::Bi, - rows: naga::VectorSize::Tri, - scalar: naga::Scalar::F32, + GlslUniformType::Matrix { + columns: GlslVectorSize::Bi, + rows: GlslVectorSize::Tri, + scalar: GlslScalar::F32, } => { // repack 2 vec3s into 6 values. let unpacked_data = &get_data::(data_bytes, offset); @@ -1794,10 +1794,10 @@ impl super::Queue { ]; unsafe { gl.uniform_matrix_2x3_f32_slice(location, false, &packed_data) }; } - naga::TypeInner::Matrix { - columns: naga::VectorSize::Bi, - rows: naga::VectorSize::Quad, - scalar: naga::Scalar::F32, + GlslUniformType::Matrix { + columns: GlslVectorSize::Bi, + rows: GlslVectorSize::Quad, + scalar: GlslScalar::F32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_matrix_2x4_f32_slice(location, false, data) }; @@ -1806,18 +1806,18 @@ impl super::Queue { // // --- Matrix 3xR --- // - naga::TypeInner::Matrix { - columns: naga::VectorSize::Tri, - rows: naga::VectorSize::Bi, - scalar: naga::Scalar::F32, + GlslUniformType::Matrix { + columns: GlslVectorSize::Tri, + rows: GlslVectorSize::Bi, + scalar: GlslScalar::F32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_matrix_3x2_f32_slice(location, false, data) }; } - naga::TypeInner::Matrix { - columns: naga::VectorSize::Tri, - rows: naga::VectorSize::Tri, - scalar: naga::Scalar::F32, + GlslUniformType::Matrix { + columns: GlslVectorSize::Tri, + rows: GlslVectorSize::Tri, + scalar: GlslScalar::F32, } => { // repack 3 vec3s into 9 values. let unpacked_data = &get_data::(data_bytes, offset); @@ -1829,10 +1829,10 @@ impl super::Queue { ]; unsafe { gl.uniform_matrix_3_f32_slice(location, false, &packed_data) }; } - naga::TypeInner::Matrix { - columns: naga::VectorSize::Tri, - rows: naga::VectorSize::Quad, - scalar: naga::Scalar::F32, + GlslUniformType::Matrix { + columns: GlslVectorSize::Tri, + rows: GlslVectorSize::Quad, + scalar: GlslScalar::F32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_matrix_3x4_f32_slice(location, false, data) }; @@ -1841,18 +1841,18 @@ impl super::Queue { // // --- Matrix 4xR --- // - naga::TypeInner::Matrix { - columns: naga::VectorSize::Quad, - rows: naga::VectorSize::Bi, - scalar: naga::Scalar::F32, + GlslUniformType::Matrix { + columns: GlslVectorSize::Quad, + rows: GlslVectorSize::Bi, + scalar: GlslScalar::F32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_matrix_4x2_f32_slice(location, false, data) }; } - naga::TypeInner::Matrix { - columns: naga::VectorSize::Quad, - rows: naga::VectorSize::Tri, - scalar: naga::Scalar::F32, + GlslUniformType::Matrix { + columns: GlslVectorSize::Quad, + rows: GlslVectorSize::Tri, + scalar: GlslScalar::F32, } => { // repack 4 vec3s into 12 values. let unpacked_data = &get_data::(data_bytes, offset); @@ -1865,10 +1865,10 @@ impl super::Queue { ]; unsafe { gl.uniform_matrix_4x3_f32_slice(location, false, &packed_data) }; } - naga::TypeInner::Matrix { - columns: naga::VectorSize::Quad, - rows: naga::VectorSize::Quad, - scalar: naga::Scalar::F32, + GlslUniformType::Matrix { + columns: GlslVectorSize::Quad, + rows: GlslVectorSize::Quad, + scalar: GlslScalar::F32, } => { let data = &get_data::(data_bytes, offset); unsafe { gl.uniform_matrix_4_f32_slice(location, false, data) }; diff --git a/wgpu-hal/src/lib.rs b/wgpu-hal/src/lib.rs index 51815774dec..73ce51d2fea 100644 --- a/wgpu-hal/src/lib.rs +++ b/wgpu-hal/src/lib.rs @@ -241,6 +241,8 @@ )] extern crate alloc; +#[allow(unused_extern_crates)] +extern crate naga_types as nt; extern crate wgpu_types as wgt; // Each of these backends needs `std` in some fashion; usually `std::thread` functions. #[cfg(any(dx12, gles_with_std, metal, vulkan))] diff --git a/wgpu-hal/src/metal/device.rs b/wgpu-hal/src/metal/device.rs index 6a66826e889..400a4e8e814 100644 --- a/wgpu-hal/src/metal/device.rs +++ b/wgpu-hal/src/metal/device.rs @@ -81,49 +81,49 @@ fn create_depth_stencil_desc( desc } -const fn convert_vertex_format_to_naga(format: wgt::VertexFormat) -> naga::back::msl::VertexFormat { +const fn convert_vertex_format_to_naga(format: wgt::VertexFormat) -> nt::VertexFormat { match format { - wgt::VertexFormat::Uint8 => naga::back::msl::VertexFormat::Uint8, - wgt::VertexFormat::Uint8x2 => naga::back::msl::VertexFormat::Uint8x2, - wgt::VertexFormat::Uint8x4 => naga::back::msl::VertexFormat::Uint8x4, - wgt::VertexFormat::Sint8 => naga::back::msl::VertexFormat::Sint8, - wgt::VertexFormat::Sint8x2 => naga::back::msl::VertexFormat::Sint8x2, - wgt::VertexFormat::Sint8x4 => naga::back::msl::VertexFormat::Sint8x4, - wgt::VertexFormat::Unorm8 => naga::back::msl::VertexFormat::Unorm8, - wgt::VertexFormat::Unorm8x2 => naga::back::msl::VertexFormat::Unorm8x2, - wgt::VertexFormat::Unorm8x4 => naga::back::msl::VertexFormat::Unorm8x4, - wgt::VertexFormat::Snorm8 => naga::back::msl::VertexFormat::Snorm8, - wgt::VertexFormat::Snorm8x2 => naga::back::msl::VertexFormat::Snorm8x2, - wgt::VertexFormat::Snorm8x4 => naga::back::msl::VertexFormat::Snorm8x4, - wgt::VertexFormat::Uint16 => naga::back::msl::VertexFormat::Uint16, - wgt::VertexFormat::Uint16x2 => naga::back::msl::VertexFormat::Uint16x2, - wgt::VertexFormat::Uint16x4 => naga::back::msl::VertexFormat::Uint16x4, - wgt::VertexFormat::Sint16 => naga::back::msl::VertexFormat::Sint16, - wgt::VertexFormat::Sint16x2 => naga::back::msl::VertexFormat::Sint16x2, - wgt::VertexFormat::Sint16x4 => naga::back::msl::VertexFormat::Sint16x4, - wgt::VertexFormat::Unorm16 => naga::back::msl::VertexFormat::Unorm16, - wgt::VertexFormat::Unorm16x2 => naga::back::msl::VertexFormat::Unorm16x2, - wgt::VertexFormat::Unorm16x4 => naga::back::msl::VertexFormat::Unorm16x4, - wgt::VertexFormat::Snorm16 => naga::back::msl::VertexFormat::Snorm16, - wgt::VertexFormat::Snorm16x2 => naga::back::msl::VertexFormat::Snorm16x2, - wgt::VertexFormat::Snorm16x4 => naga::back::msl::VertexFormat::Snorm16x4, - wgt::VertexFormat::Float16 => naga::back::msl::VertexFormat::Float16, - wgt::VertexFormat::Float16x2 => naga::back::msl::VertexFormat::Float16x2, - wgt::VertexFormat::Float16x4 => naga::back::msl::VertexFormat::Float16x4, - wgt::VertexFormat::Float32 => naga::back::msl::VertexFormat::Float32, - wgt::VertexFormat::Float32x2 => naga::back::msl::VertexFormat::Float32x2, - wgt::VertexFormat::Float32x3 => naga::back::msl::VertexFormat::Float32x3, - wgt::VertexFormat::Float32x4 => naga::back::msl::VertexFormat::Float32x4, - wgt::VertexFormat::Uint32 => naga::back::msl::VertexFormat::Uint32, - wgt::VertexFormat::Uint32x2 => naga::back::msl::VertexFormat::Uint32x2, - wgt::VertexFormat::Uint32x3 => naga::back::msl::VertexFormat::Uint32x3, - wgt::VertexFormat::Uint32x4 => naga::back::msl::VertexFormat::Uint32x4, - wgt::VertexFormat::Sint32 => naga::back::msl::VertexFormat::Sint32, - wgt::VertexFormat::Sint32x2 => naga::back::msl::VertexFormat::Sint32x2, - wgt::VertexFormat::Sint32x3 => naga::back::msl::VertexFormat::Sint32x3, - wgt::VertexFormat::Sint32x4 => naga::back::msl::VertexFormat::Sint32x4, - wgt::VertexFormat::Unorm10_10_10_2 => naga::back::msl::VertexFormat::Unorm10_10_10_2, - wgt::VertexFormat::Unorm8x4Bgra => naga::back::msl::VertexFormat::Unorm8x4Bgra, + wgt::VertexFormat::Uint8 => nt::VertexFormat::Uint8, + wgt::VertexFormat::Uint8x2 => nt::VertexFormat::Uint8x2, + wgt::VertexFormat::Uint8x4 => nt::VertexFormat::Uint8x4, + wgt::VertexFormat::Sint8 => nt::VertexFormat::Sint8, + wgt::VertexFormat::Sint8x2 => nt::VertexFormat::Sint8x2, + wgt::VertexFormat::Sint8x4 => nt::VertexFormat::Sint8x4, + wgt::VertexFormat::Unorm8 => nt::VertexFormat::Unorm8, + wgt::VertexFormat::Unorm8x2 => nt::VertexFormat::Unorm8x2, + wgt::VertexFormat::Unorm8x4 => nt::VertexFormat::Unorm8x4, + wgt::VertexFormat::Snorm8 => nt::VertexFormat::Snorm8, + wgt::VertexFormat::Snorm8x2 => nt::VertexFormat::Snorm8x2, + wgt::VertexFormat::Snorm8x4 => nt::VertexFormat::Snorm8x4, + wgt::VertexFormat::Uint16 => nt::VertexFormat::Uint16, + wgt::VertexFormat::Uint16x2 => nt::VertexFormat::Uint16x2, + wgt::VertexFormat::Uint16x4 => nt::VertexFormat::Uint16x4, + wgt::VertexFormat::Sint16 => nt::VertexFormat::Sint16, + wgt::VertexFormat::Sint16x2 => nt::VertexFormat::Sint16x2, + wgt::VertexFormat::Sint16x4 => nt::VertexFormat::Sint16x4, + wgt::VertexFormat::Unorm16 => nt::VertexFormat::Unorm16, + wgt::VertexFormat::Unorm16x2 => nt::VertexFormat::Unorm16x2, + wgt::VertexFormat::Unorm16x4 => nt::VertexFormat::Unorm16x4, + wgt::VertexFormat::Snorm16 => nt::VertexFormat::Snorm16, + wgt::VertexFormat::Snorm16x2 => nt::VertexFormat::Snorm16x2, + wgt::VertexFormat::Snorm16x4 => nt::VertexFormat::Snorm16x4, + wgt::VertexFormat::Float16 => nt::VertexFormat::Float16, + wgt::VertexFormat::Float16x2 => nt::VertexFormat::Float16x2, + wgt::VertexFormat::Float16x4 => nt::VertexFormat::Float16x4, + wgt::VertexFormat::Float32 => nt::VertexFormat::Float32, + wgt::VertexFormat::Float32x2 => nt::VertexFormat::Float32x2, + wgt::VertexFormat::Float32x3 => nt::VertexFormat::Float32x3, + wgt::VertexFormat::Float32x4 => nt::VertexFormat::Float32x4, + wgt::VertexFormat::Uint32 => nt::VertexFormat::Uint32, + wgt::VertexFormat::Uint32x2 => nt::VertexFormat::Uint32x2, + wgt::VertexFormat::Uint32x3 => nt::VertexFormat::Uint32x3, + wgt::VertexFormat::Uint32x4 => nt::VertexFormat::Uint32x4, + wgt::VertexFormat::Sint32 => nt::VertexFormat::Sint32, + wgt::VertexFormat::Sint32x2 => nt::VertexFormat::Sint32x2, + wgt::VertexFormat::Sint32x3 => nt::VertexFormat::Sint32x3, + wgt::VertexFormat::Sint32x4 => nt::VertexFormat::Sint32x4, + wgt::VertexFormat::Unorm10_10_10_2 => nt::VertexFormat::Unorm10_10_10_2, + wgt::VertexFormat::Unorm8x4Bgra => nt::VertexFormat::Unorm8x4Bgra, wgt::VertexFormat::Float64 | wgt::VertexFormat::Float64x2 diff --git a/wgpu-types/Cargo.toml b/wgpu-types/Cargo.toml index 7c62d605ca9..a1edd2d5526 100644 --- a/wgpu-types/Cargo.toml +++ b/wgpu-types/Cargo.toml @@ -37,10 +37,15 @@ alloc_instead_of_core = "warn" [features] default = ["std"] -std = ["js-sys?/std", "web-sys?/std"] +std = ["js-sys?/std", "web-sys?/std", "naga-types/std"] strict_asserts = [] fragile-send-sync-non-atomic-wasm = [] -serde = ["dep:serde", "bitflags/serde"] +serde = [ + "dep:serde", + "bitflags/serde", + "naga-types/serialize", + "naga-types/deserialize", +] # Enables some internal instrumentation for debugging purposes. counters = [] # Enables variants of `Trace` other than `Trace::Off` @@ -50,6 +55,8 @@ web = ["dep:js-sys", "dep:web-sys"] exhaust = ["dep:exhaust"] [dependencies] +naga-types = { workspace = true, features = [] } + bitflags.workspace = true bytemuck = { workspace = true, features = ["derive"] } exhaust = { workspace = true, optional = true } diff --git a/wgpu-types/src/lib.rs b/wgpu-types/src/lib.rs index e68588e327b..71dc9ee0218 100644 --- a/wgpu-types/src/lib.rs +++ b/wgpu-types/src/lib.rs @@ -19,6 +19,8 @@ extern crate std; extern crate alloc; +extern crate naga_types as nt; + use core::{fmt, hash::Hash, time::Duration}; #[cfg(any(feature = "serde", test))] @@ -48,9 +50,10 @@ mod surface; mod texture; mod tokens; mod transfers; -mod vertex; mod write_only; +pub use nt::{VertexAttribute, VertexFormat, VertexStepMode}; + pub use adapter::*; pub use backend::*; pub use binding::*; @@ -70,105 +73,9 @@ pub use surface::*; pub use texture::*; pub use tokens::*; pub use transfers::*; -pub use vertex::*; pub use write_only::*; -/// Create a Markdown link definition referring to the `wgpu` crate. -/// -/// This macro should be used inside a `#[doc = ...]` attribute. -/// The two arguments should be string literals or macros that expand to string literals. -/// If the module in which the item using this macro is located is not the crate root, -/// use the `../` syntax. -/// -/// We cannot simply use rustdoc links to `wgpu` because it is one of our dependents. -/// This link adapts to work in locally generated documentation (`cargo doc`) by default, -/// and work with `docs.rs` URL structure when building for `docs.rs`. -/// -/// Note: This macro cannot be used outside this crate, because `cfg(docsrs)` will not apply. -#[cfg(not(docsrs))] -macro_rules! link_to_wgpu_docs { - ([$reference:expr]: $url_path:expr) => { - concat!("[", $reference, "]: ../wgpu/", $url_path) - }; - - (../ [$reference:expr]: $url_path:expr) => { - concat!("[", $reference, "]: ../../wgpu/", $url_path) - }; -} -#[cfg(docsrs)] -macro_rules! link_to_wgpu_docs { - ($(../)? [$reference:expr]: $url_path:expr) => { - concat!( - "[", - $reference, - // URL path will have a base URL of https://docs.rs/ - "]: /wgpu/", - // The version of wgpu-types is not necessarily the same as the version of wgpu - // if a patch release of either has been published, so we cannot use the full version - // number. docs.rs will interpret this single number as a Cargo-style version - // requirement and redirect to the latest compatible version. - // - // This technique would break if `wgpu` and `wgpu-types` ever switch to having distinct - // major version numbering. An alternative would be to hardcode the corresponding `wgpu` - // version, but that would give us another thing to forget to update. - env!("CARGO_PKG_VERSION_MAJOR"), - "/wgpu/", - $url_path - ) - }; -} - -/// Create a Markdown link definition referring to an item in the `wgpu` crate. -/// -/// This macro should be used inside a `#[doc = ...]` attribute. -/// See [`link_to_wgpu_docs`] for more details. -macro_rules! link_to_wgpu_item { - ($kind:ident $name:ident) => { - $crate::link_to_wgpu_docs!( - [concat!("`", stringify!($name), "`")]: concat!(stringify!($kind), ".", stringify!($name), ".html") - ) - }; -} - -/// Create a Markdown link definition referring to the `wgpu_core` crate. -/// -/// This macro should be used inside a `#[doc = ...]` attribute. -/// See [`link_to_wgpu_docs`] for more details. -#[cfg(not(docsrs))] -macro_rules! link_to_wgc_docs { - ([$reference:expr]: $url_path:expr) => { - concat!("[", $reference, "]: ../wgpu_core/", $url_path) - }; - - (../ [$reference:expr]: $url_path:expr) => { - concat!("[", $reference, "]: ../../wgpu_core/", $url_path) - }; -} -#[cfg(docsrs)] -macro_rules! link_to_wgc_docs { - ($(../)? [$reference:expr]: $url_path:expr) => { - concat!( - "[", - $reference, - // URL path will have a base URL of https://docs.rs/ - "]: /wgpu_core/", - // The version of wgpu-types is not necessarily the same as the version of wgpu_core - // if a patch release of either has been published, so we cannot use the full version - // number. docs.rs will interpret this single number as a Cargo-style version - // requirement and redirect to the latest compatible version. - // - // This technique would break if `wgpu_core` and `wgpu-types` ever switch to having - // distinct major version numbering. An alternative would be to hardcode the - // corresponding `wgpu_core` version, but that would give us another thing to forget - // to update. - env!("CARGO_PKG_VERSION_MAJOR"), - "/wgpu_core/", - $url_path - ) - }; -} - -pub(crate) use {link_to_wgc_docs, link_to_wgpu_docs, link_to_wgpu_item}; +pub(crate) use naga_types::{link_to_wgc_docs, link_to_wgpu_docs, link_to_wgpu_item}; /// Integral type used for [`Buffer`] offsets and sizes. /// diff --git a/wgpu-types/src/render.rs b/wgpu-types/src/render.rs index cbf8a7d8920..759a0dc07b8 100644 --- a/wgpu-types/src/render.rs +++ b/wgpu-types/src/render.rs @@ -1,5 +1,7 @@ //! Types for configuring render passes and render pipelines (except for vertex attributes). +pub use nt::PrimitiveTopology; + use bytemuck::{Pod, Zeroable}; #[cfg(any(feature = "serde", test))] @@ -273,56 +275,6 @@ impl Default for ColorWrites { } } -/// Primitive type the input mesh is composed of. -/// -/// Corresponds to [WebGPU `GPUPrimitiveTopology`]( -/// https://gpuweb.github.io/gpuweb/#enumdef-gpuprimitivetopology). -#[repr(C)] -#[derive(Copy, Clone, Debug, Default, Hash, Eq, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "serde", serde(rename_all = "kebab-case"))] -pub enum PrimitiveTopology { - /// Vertex data is a list of points. Each vertex is a new point. - PointList = 0, - /// Vertex data is a list of lines. Each pair of vertices composes a new line. - /// - /// Vertices `0 1 2 3` create two lines `0 1` and `2 3` - LineList = 1, - /// Vertex data is a strip of lines. Each set of two adjacent vertices form a line. - /// - /// Vertices `0 1 2 3` create three lines `0 1`, `1 2`, and `2 3`. - LineStrip = 2, - /// Vertex data is a list of triangles. Each set of 3 vertices composes a new triangle. - /// - /// Vertices `0 1 2 3 4 5` create two triangles `0 1 2` and `3 4 5` - #[default] - TriangleList = 3, - /// Vertex data is a triangle strip. Each set of three adjacent vertices form a triangle. - /// - /// Vertices `0 1 2 3 4 5` create four triangles `0 1 2`, `2 1 3`, `2 3 4`, and `4 3 5` - TriangleStrip = 4, -} - -impl PrimitiveTopology { - /// Returns true for strip topologies. - #[must_use] - pub fn is_strip(&self) -> bool { - match *self { - Self::PointList | Self::LineList | Self::TriangleList => false, - Self::LineStrip | Self::TriangleStrip => true, - } - } - - /// Returns true for triangle topologies. - #[must_use] - pub fn is_triangles(&self) -> bool { - match *self { - Self::TriangleList | Self::TriangleStrip => true, - Self::PointList | Self::LineList | Self::LineStrip => false, - } - } -} - /// Vertex winding order which classifies the "front" face of a triangle. /// /// Corresponds to [WebGPU `GPUFrontFace`]( diff --git a/wgpu-types/src/vertex.rs b/wgpu-types/src/vertex.rs deleted file mode 100644 index 4a3bc405c88..00000000000 --- a/wgpu-types/src/vertex.rs +++ /dev/null @@ -1,282 +0,0 @@ -//! Types for defining vertex attributes and their buffers. - -#[cfg(any(feature = "serde", test))] -use serde::{Deserialize, Serialize}; - -use crate::{link_to_wgpu_docs, link_to_wgpu_item}; - -#[cfg(doc)] -use crate::Features; - -/// Whether a vertex buffer is indexed by vertex or by instance. -/// -/// Consider a call to [`RenderPass::draw`] like this: -/// -/// ```ignore -/// render_pass.draw(vertices, instances) -/// ``` -/// -/// where `vertices` is a `Range` of vertex indices, and -/// `instances` is a `Range` of instance indices. -/// -/// For this call, `wgpu` invokes the vertex shader entry point once -/// for every possible `(v, i)` pair, where `v` is drawn from -/// `vertices` and `i` is drawn from `instances`. These invocations -/// may happen in any order, and will usually run in parallel. -/// -/// Each vertex buffer has a step mode, established by the -/// [`step_mode`] field of its [`VertexBufferLayout`], given when the -/// pipeline was created. Buffers whose step mode is [`Vertex`] use -/// `v` as the index into their contents, whereas buffers whose step -/// mode is [`Instance`] use `i`. The indicated buffer element then -/// contributes zero or more attribute values for the `(v, i)` vertex -/// shader invocation to use, based on the [`VertexBufferLayout`]'s -/// [`attributes`] list. -/// -/// You can visualize the results from all these vertex shader -/// invocations as a matrix with a row for each `i` from `instances`, -/// and with a column for each `v` from `vertices`. In one sense, `v` -/// and `i` are symmetrical: both are used to index vertex buffers and -/// provide attribute values. But the key difference between `v` and -/// `i` is that line and triangle primitives are built from the values -/// of each row, along which `i` is constant and `v` varies, not the -/// columns. -/// -/// An indexed draw call works similarly: -/// -/// ```ignore -/// render_pass.draw_indexed(indices, base_vertex, instances) -/// ``` -/// -/// The only difference is that `v` values are drawn from the contents -/// of the index buffer—specifically, the subrange of the index -/// buffer given by `indices`—instead of simply being sequential -/// integers, as they are in a `draw` call. -/// -/// A non-instanced call, where `instances` is `0..1`, is simply a -/// matrix with only one row. -/// -/// Corresponds to [WebGPU `GPUVertexStepMode`]( -/// https://gpuweb.github.io/gpuweb/#enumdef-gpuvertexstepmode). -/// -#[doc = link_to_wgpu_docs!(["`RenderPass::draw`"]: "struct.RenderPass.html#method.draw")] -#[doc = link_to_wgpu_item!(struct VertexBufferLayout)] -#[doc = link_to_wgpu_docs!(["`step_mode`"]: "struct.VertexBufferLayout.html#structfield.step_mode")] -#[doc = link_to_wgpu_docs!(["`attributes`"]: "struct.VertexBufferLayout.html#structfield.attributes")] -/// [`Vertex`]: VertexStepMode::Vertex -/// [`Instance`]: VertexStepMode::Instance -#[repr(C)] -#[derive(Copy, Clone, Debug, Default, Hash, Eq, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "serde", serde(rename_all = "kebab-case"))] -pub enum VertexStepMode { - /// Vertex data is advanced every vertex. - #[default] - Vertex = 0, - /// Vertex data is advanced every instance. - Instance = 1, -} - -/// Vertex inputs (attributes) to shaders. -/// -/// These are used to specify the individual attributes within a [`VertexBufferLayout`]. -/// See its documentation for an example. -/// -/// The [`vertex_attr_array!`] macro can help create these with appropriate offsets. -/// -/// Corresponds to [WebGPU `GPUVertexAttribute`]( -/// https://gpuweb.github.io/gpuweb/#dictdef-gpuvertexattribute). -/// -#[doc = link_to_wgpu_docs!(["`vertex_attr_array!`"]: "macro.vertex_attr_array.html")] -#[doc = link_to_wgpu_item!(struct VertexBufferLayout)] -#[repr(C)] -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] -pub struct VertexAttribute { - /// Format of the input - pub format: VertexFormat, - /// Byte offset of the start of the input - pub offset: crate::BufferAddress, - /// Location for this input. Must match the location in the shader. - pub shader_location: crate::ShaderLocation, -} - -/// Vertex Format for a [`VertexAttribute`] (input). -/// -/// Corresponds to [WebGPU `GPUVertexFormat`]( -/// https://gpuweb.github.io/gpuweb/#enumdef-gpuvertexformat). -#[repr(C)] -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "serde", serde(rename_all = "lowercase"))] -pub enum VertexFormat { - /// One unsigned byte (u8). `u32` in shaders. - Uint8 = 0, - /// Two unsigned bytes (u8). `vec2` in shaders. - Uint8x2 = 1, - /// Four unsigned bytes (u8). `vec4` in shaders. - Uint8x4 = 2, - /// One signed byte (i8). `i32` in shaders. - Sint8 = 3, - /// Two signed bytes (i8). `vec2` in shaders. - Sint8x2 = 4, - /// Four signed bytes (i8). `vec4` in shaders. - Sint8x4 = 5, - /// One unsigned byte (u8). [0, 255] converted to float [0, 1] `f32` in shaders. - Unorm8 = 6, - /// Two unsigned bytes (u8). [0, 255] converted to float [0, 1] `vec2` in shaders. - Unorm8x2 = 7, - /// Four unsigned bytes (u8). [0, 255] converted to float [0, 1] `vec4` in shaders. - Unorm8x4 = 8, - /// One signed byte (i8). [−127, 127] converted to float [−1, 1] `f32` in shaders. - Snorm8 = 9, - /// Two signed bytes (i8). [−127, 127] converted to float [−1, 1] `vec2` in shaders. - Snorm8x2 = 10, - /// Four signed bytes (i8). [−127, 127] converted to float [−1, 1] `vec4` in shaders. - Snorm8x4 = 11, - /// One unsigned short (u16). `u32` in shaders. - Uint16 = 12, - /// Two unsigned shorts (u16). `vec2` in shaders. - Uint16x2 = 13, - /// Four unsigned shorts (u16). `vec4` in shaders. - Uint16x4 = 14, - /// One signed short (i16). `i32` in shaders. - Sint16 = 15, - /// Two signed shorts (i16). `vec2` in shaders. - Sint16x2 = 16, - /// Four signed shorts (i16). `vec4` in shaders. - Sint16x4 = 17, - /// One unsigned short (u16). [0, 65535] converted to float [0, 1] `f32` in shaders. - Unorm16 = 18, - /// Two unsigned shorts (u16). [0, 65535] converted to float [0, 1] `vec2` in shaders. - Unorm16x2 = 19, - /// Four unsigned shorts (u16). [0, 65535] converted to float [0, 1] `vec4` in shaders. - Unorm16x4 = 20, - /// One signed short (i16). [−32767, 32767] converted to float [−1, 1] `f32` in shaders. - Snorm16 = 21, - /// Two signed shorts (i16). [−32767, 32767] converted to float [−1, 1] `vec2` in shaders. - Snorm16x2 = 22, - /// Four signed shorts (i16). [−32767, 32767] converted to float [−1, 1] `vec4` in shaders. - Snorm16x4 = 23, - /// One half-precision float (no Rust equiv). `f32` in shaders. - Float16 = 24, - /// Two half-precision floats (no Rust equiv). `vec2` in shaders. - Float16x2 = 25, - /// Four half-precision floats (no Rust equiv). `vec4` in shaders. - Float16x4 = 26, - /// One single-precision float (f32). `f32` in shaders. - Float32 = 27, - /// Two single-precision floats (f32). `vec2` in shaders. - Float32x2 = 28, - /// Three single-precision floats (f32). `vec3` in shaders. - Float32x3 = 29, - /// Four single-precision floats (f32). `vec4` in shaders. - Float32x4 = 30, - /// One unsigned int (u32). `u32` in shaders. - Uint32 = 31, - /// Two unsigned ints (u32). `vec2` in shaders. - Uint32x2 = 32, - /// Three unsigned ints (u32). `vec3` in shaders. - Uint32x3 = 33, - /// Four unsigned ints (u32). `vec4` in shaders. - Uint32x4 = 34, - /// One signed int (i32). `i32` in shaders. - Sint32 = 35, - /// Two signed ints (i32). `vec2` in shaders. - Sint32x2 = 36, - /// Three signed ints (i32). `vec3` in shaders. - Sint32x3 = 37, - /// Four signed ints (i32). `vec4` in shaders. - Sint32x4 = 38, - /// One double-precision float (f64). `f32` in shaders. Requires [`Features::VERTEX_ATTRIBUTE_64BIT`]. - Float64 = 39, - /// Two double-precision floats (f64). `vec2` in shaders. Requires [`Features::VERTEX_ATTRIBUTE_64BIT`]. - Float64x2 = 40, - /// Three double-precision floats (f64). `vec3` in shaders. Requires [`Features::VERTEX_ATTRIBUTE_64BIT`]. - Float64x3 = 41, - /// Four double-precision floats (f64). `vec4` in shaders. Requires [`Features::VERTEX_ATTRIBUTE_64BIT`]. - Float64x4 = 42, - /// Three unsigned 10-bit integers and one 2-bit integer, packed into a 32-bit integer (u32). [0, 1023] and [0, 3] converted to float [0, 1] `vec4` in shaders. - #[cfg_attr(feature = "serde", serde(rename = "unorm10-10-10-2"))] - Unorm10_10_10_2 = 43, - /// Four unsigned 8-bit integers (u8) in BGRA. [0, 255] converted to float [0, 1] `vec4` RGBA in shaders. - #[cfg_attr(feature = "serde", serde(rename = "unorm8x4-bgra"))] - Unorm8x4Bgra = 44, -} - -impl VertexFormat { - /// Returns the byte size of the format. - #[must_use] - pub const fn size(&self) -> u64 { - match self { - Self::Uint8 | Self::Sint8 | Self::Unorm8 | Self::Snorm8 => 1, - Self::Uint8x2 - | Self::Sint8x2 - | Self::Unorm8x2 - | Self::Snorm8x2 - | Self::Uint16 - | Self::Sint16 - | Self::Unorm16 - | Self::Snorm16 - | Self::Float16 => 2, - Self::Uint8x4 - | Self::Sint8x4 - | Self::Unorm8x4 - | Self::Snorm8x4 - | Self::Uint16x2 - | Self::Sint16x2 - | Self::Unorm16x2 - | Self::Snorm16x2 - | Self::Float16x2 - | Self::Float32 - | Self::Uint32 - | Self::Sint32 - | Self::Unorm10_10_10_2 - | Self::Unorm8x4Bgra => 4, - Self::Uint16x4 - | Self::Sint16x4 - | Self::Unorm16x4 - | Self::Snorm16x4 - | Self::Float16x4 - | Self::Float32x2 - | Self::Uint32x2 - | Self::Sint32x2 - | Self::Float64 => 8, - Self::Float32x3 | Self::Uint32x3 | Self::Sint32x3 => 12, - Self::Float32x4 | Self::Uint32x4 | Self::Sint32x4 | Self::Float64x2 => 16, - Self::Float64x3 => 24, - Self::Float64x4 => 32, - } - } - - /// Returns the size read by an acceleration structure build of the vertex format. This is - /// slightly different from [`Self::size`] because the alpha component of 4-component formats - /// are not read in an acceleration structure build, allowing for a smaller stride. - #[must_use] - pub const fn min_acceleration_structure_vertex_stride(&self) -> u64 { - match self { - Self::Float16x2 | Self::Snorm16x2 => 4, - Self::Float32x3 => 12, - Self::Float32x2 => 8, - // This is the minimum value from DirectX - // > A16 component is ignored, other data can be packed there, such as setting vertex stride to 6 bytes - // - // https://microsoft.github.io/DirectX-Specs/d3d/Raytracing.html#d3d12_raytracing_geometry_triangles_desc - // - // Vulkan does not express a minimum stride. - Self::Float16x4 | Self::Snorm16x4 => 6, - _ => unreachable!(), - } - } - - /// Returns the alignment required for `wgpu::BlasTriangleGeometry::vertex_stride` - #[must_use] - pub const fn acceleration_structure_stride_alignment(&self) -> u64 { - match self { - Self::Float16x4 | Self::Float16x2 | Self::Snorm16x4 | Self::Snorm16x2 => 2, - Self::Float32x2 | Self::Float32x3 => 4, - _ => unreachable!(), - } - } -}