Skip to content

Commit

Permalink
Remove duplicate pipeline ref (#372)
Browse files Browse the repository at this point in the history
  • Loading branch information
djeedai authored Oct 17, 2024
1 parent 1ad6298 commit 85c7c57
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 42 deletions.
6 changes: 4 additions & 2 deletions examples/utils/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@ use bevy::{
#[cfg(feature = "examples_world_inspector")]
use bevy_inspector_egui::quick::WorldInspectorPlugin;

/// Helper system to enable closing the example application by pressing the escape key (ESC).
/// Helper system to enable closing the example application by pressing the
/// escape key (ESC).
pub fn close_on_esc(mut ev_app_exit: EventWriter<AppExit>, input: Res<ButtonInput<KeyCode>>) {
if input.just_pressed(KeyCode::Escape) {
ev_app_exit.send(AppExit::Success);
Expand Down Expand Up @@ -81,7 +82,8 @@ impl Display for ExampleFailedError {

impl std::error::Error for ExampleFailedError {}

/// Convert an [`AppExit`] into a `Result`, for error code propagation to the OS.
/// Convert an [`AppExit`] into a `Result`, for error code propagation to the
/// OS.
pub trait AppExitIntoResult {
fn into_result(&self) -> Result<(), Box<dyn std::error::Error>>;
}
Expand Down
72 changes: 32 additions & 40 deletions src/render/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1777,9 +1777,6 @@ pub struct EffectsMeta {
// FIXME - This is a per-effect thing, unless we merge all meshes into a single buffer (makes
// sense) but in that case we need a vertex slice too to know which mesh to draw per effect.
vertices: BufferVec<GpuParticleVertex>,
/// The pipeline for the indirect dispatch shader, which populates the
/// indirect compute dispatch buffers.
indirect_dispatch_pipeline: Option<ComputePipeline>,
/// Various GPU limits and aligned sizes lazily allocated and cached for
/// convenience.
gpu_limits: GpuLimits,
Expand Down Expand Up @@ -1845,7 +1842,6 @@ impl EffectsMeta {
Some("hanabi:buffer:particle_group".to_string()),
),
vertices,
indirect_dispatch_pipeline: None,
gpu_limits,
}
}
Expand Down Expand Up @@ -2060,7 +2056,6 @@ pub(crate) fn prepare_effects(
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
pipeline_cache: Res<PipelineCache>,
dispatch_indirect_pipeline: Res<DispatchIndirectPipeline>,
init_pipeline: Res<ParticlesInitPipeline>,
update_pipeline: Res<ParticlesUpdatePipeline>,
mut specialized_init_pipelines: ResMut<SpecializedComputePipelines<ParticlesInitPipeline>>,
Expand All @@ -2084,8 +2079,6 @@ pub(crate) fn prepare_effects(
.vertices
.write_buffer(&render_device, &render_queue);

effects_meta.indirect_dispatch_pipeline = Some(dispatch_indirect_pipeline.pipeline.clone());

// Clear last frame's buffer resizes which may have occured during last frame,
// during `Node::run()` while the `BufferTable` could not be mutated.
effects_meta
Expand Down Expand Up @@ -3635,6 +3628,7 @@ impl Node for VfxSimulateNode {
let effects_meta = world.resource::<EffectsMeta>();
let effect_cache = world.resource::<EffectCache>();
let effect_bind_groups = world.resource::<EffectBindGroups>();
let dispatch_indirect_pipeline = world.resource::<DispatchIndirectPipeline>();
// let render_queue = world.resource::<RenderQueue>();

// Make sure to schedule any buffer copy from changed effects before accessing
Expand Down Expand Up @@ -3799,39 +3793,37 @@ impl Node for VfxSimulateNode {
});

// Dispatch indirect dispatch compute job
if let Some(indirect_dispatch_pipeline) = &effects_meta.indirect_dispatch_pipeline {
trace!("record commands for indirect dispatch pipeline...");

// FIXME - The `vfx_indirect` shader assumes a contiguous array of ParticleGroup
// structures. So we need to pass the full array size, and we
// just update the unused groups for nothing. Otherwise we might
// update some unused group and miss some used ones, if there's any gap
// in the array.
const WORKGROUP_SIZE: u32 = 64;
let total_group_count = effects_meta.particle_group_buffer.len() as u32;
let workgroup_count = (total_group_count + WORKGROUP_SIZE - 1) / WORKGROUP_SIZE;

// Setup compute pass
compute_pass.set_pipeline(indirect_dispatch_pipeline);
compute_pass.set_bind_group(
0,
// FIXME - got some unwrap() panic here, investigate... possibly race
// condition!
effects_meta.dr_indirect_bind_group.as_ref().unwrap(),
&[],
);
compute_pass.set_bind_group(
1,
effects_meta.sim_params_bind_group.as_ref().unwrap(),
&[],
);
compute_pass.dispatch_workgroups(workgroup_count, 1, 1);
trace!(
"indirect dispatch compute dispatched: num_batches={} workgroup_count={}",
total_group_count,
workgroup_count
);
}
trace!("record commands for indirect dispatch pipeline...");

// FIXME - The `vfx_indirect` shader assumes a contiguous array of ParticleGroup
// structures. So we need to pass the full array size, and we
// just update the unused groups for nothing. Otherwise we might
// update some unused group and miss some used ones, if there's any gap
// in the array.
const WORKGROUP_SIZE: u32 = 64;
let total_group_count = effects_meta.particle_group_buffer.len() as u32;
let workgroup_count = (total_group_count + WORKGROUP_SIZE - 1) / WORKGROUP_SIZE;

// Setup compute pass
compute_pass.set_pipeline(&dispatch_indirect_pipeline.pipeline);
compute_pass.set_bind_group(
0,
// FIXME - got some unwrap() panic here, investigate... possibly race
// condition!
effects_meta.dr_indirect_bind_group.as_ref().unwrap(),
&[],
);
compute_pass.set_bind_group(
1,
effects_meta.sim_params_bind_group.as_ref().unwrap(),
&[],
);
compute_pass.dispatch_workgroups(workgroup_count, 1, 1);
trace!(
"indirect dispatch compute dispatched: num_batches={} workgroup_count={}",
total_group_count,
workgroup_count
);
}

// Compute update pass
Expand Down

0 comments on commit 85c7c57

Please sign in to comment.