use std::{convert::TryInto, num::NonZeroU32};
use crate::{
vertex::SQUARE, Camera, Instance, InstanceBuffer, RenderWindowConfig, TextureAtlas, Vertex,
};
use pollster::FutureExt;
use thiserror::Error;
use wgpu::{include_wgsl, util::DeviceExt};
use winit::{
dpi::PhysicalSize,
error::OsError,
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop},
window::Window,
};
/// No device could be found which supports the given surface
#[derive(Clone, Copy, Debug, Error)]
#[error("No GPU could be found on this machine")]
pub struct NoGpuError {
/// Prevents this type from being constructed
_priv: (),
}
impl NoGpuError {
/// Create a new error
const fn new() -> Self {
Self { _priv: () }
}
}
/// No device could be found which supports the given surface
#[derive(Clone, Copy, Debug, Error)]
#[error("A WebGPU or WebGL context could not be obtained")]
pub struct NoWebContextError {
/// Prevents this type from being constructed
_priv: (),
}
impl NoWebContextError {
/// Create a new error
const fn new() -> Self {
Self { _priv: () }
}
}
#[derive(Debug, Error)]
pub enum NewRendererError {
#[error(transparent)]
NoGpu(#[from] NoGpuError),
#[error(transparent)]
NoWebContext(#[from] NoWebContextError),
#[error(transparent)]
// TODO better error
WindowInitError(#[from] OsError),
}
// TODO make this Debug
pub struct Renderer {
// TODO move some of this data elsewhere
surface: wgpu::Surface,
surface_config: wgpu::SurfaceConfiguration,
supported_present_modes: Box<[wgpu::PresentMode]>,
device: wgpu::Device,
queue: wgpu::Queue,
render_pipeline: wgpu::RenderPipeline,
square_vertex_buffer: wgpu::Buffer,
square_vertices: u32,
instances: InstanceBuffer,
camera: Camera,
textures: TextureAtlas,
event_loop: Option<EventLoop<()>>,
window: Window,
}
fn get_adapter(
instance: &wgpu::Instance,
surface: &wgpu::Surface,
power_preference: wgpu::PowerPreference,
) -> Result<wgpu::Adapter, NoGpuError> {
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference,
compatible_surface: Some(surface),
force_fallback_adapter: false,
})
.block_on(); // TODO this takes too long
let adapter = adapter.or_else(|| {
instance
.enumerate_adapters(wgpu::Backends::PRIMARY)
.find(|adapter| !surface.get_capabilities(adapter).formats.is_empty())
});
adapter.ok_or(NoGpuError::new())
}
fn sprite_render_pipeline(
device: &wgpu::Device,
texture_format: wgpu::TextureFormat,
render_pipeline_layout: &wgpu::PipelineLayout,
) -> wgpu::RenderPipeline {
let shader = device.create_shader_module(include_wgsl!("../shaders/sprite.wgsl"));
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Sprite Render Pipeline"),
layout: Some(render_pipeline_layout),
// information about the vertex shader
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[Vertex::desc(), Instance::desc()],
},
// information about the fragment shader
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: texture_format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
primitive: wgpu::PrimitiveState {
// don't render the back of a sprite
cull_mode: Some(wgpu::Face::Back),
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
multiview: None,
})
}
impl Renderer {
/// Initializes the renderer
///
/// # Errors
///
/// Returns a [`NoGpu`] error if no device could be detected that can
/// display to the window
///
/// # Panics
///
/// This function **must** be called on the main thread, or else it may
/// panic on some platforms.
// TODO make it possible to use without a window (ie, use a bitmap in memory as a surface)
// TODO this function needs to be smaller
pub fn new(config: &RenderWindowConfig) -> Result<Self, NewRendererError> {
// build the window
let event_loop = EventLoop::new();
let window = config.to_window().build(&event_loop)?;
let event_loop = Some(event_loop);
// the instance's main purpose is to create an adapter and a surface
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::VULKAN,
dx12_shader_compiler: wgpu::Dx12Compiler::Fxc, // TODO support DXC
});
// the surface is the part of the screen we'll draw to
let surface =
unsafe { instance.create_surface(&window) }.map_err(|_| NoWebContextError::new())?;
let power_preference = config.power_preference();
// the adapter is the handle to the GPU
let adapter = get_adapter(&instance, &surface, power_preference)?;
// gets a connection to the device, as well as a handle to its command queue
// the options chosen here ensure that this is guaranteed to not panic
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
..Default::default()
},
None,
)
.block_on()
.expect("there was no device with the selected features");
// configuration for the surface
let capabilities = surface.get_capabilities(&adapter);
let supported_present_modes = capabilities.present_modes.into_boxed_slice();
let supported_alpha_modes = capabilities.alpha_modes.into_boxed_slice();
let supported_texture_formats = capabilities.formats;
let surface_config = config.to_surface_configuration(
&supported_present_modes,
&supported_alpha_modes,
supported_texture_formats,
);
surface.configure(&device, &surface_config);
// create the camera
let width = window.inner_size().width;
let height = window.inner_size().height;
let (camera, camera_bind_group_layout) = Camera::new(&device, width, height);
// the vertex buffer used for rendering squares
let square_vertices = SQUARE
.len()
.try_into()
.expect("expected fewer than 3 billion vertices in a square");
let square_vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Square Vertex Buffer"),
contents: bytemuck::cast_slice(&SQUARE),
usage: wgpu::BufferUsages::VERTEX,
});
// create the instance buffer
let instances = InstanceBuffer::new(&device, config.instance_capacity);
// TODO make this configurable
let (textures, texture_layout) = TextureAtlas::new(
&device,
window.inner_size().width,
window.inner_size().height,
);
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Sprite Render Pipeline Layout"),
bind_group_layouts: &[&camera_bind_group_layout, &texture_layout],
push_constant_ranges: &[],
});
// set up a pipeline for sprite rendering
let render_pipeline =
sprite_render_pipeline(&device, surface_config.format, &render_pipeline_layout);
Ok(Self {
surface,
surface_config,
supported_present_modes,
device,
queue,
render_pipeline,
square_vertex_buffer,
square_vertices,
instances,
camera,
textures,
event_loop,
window,
})
}
/// Reconfigure the surface
fn reconfigure(&mut self) {
self.surface.configure(&self.device, &self.surface_config);
}
/// Resize just the renderer. The window will remain unchanged
fn resize_renderer(&mut self, size: PhysicalSize<u32>) {
if size.width == 0 || size.height == 0 {
log::error!("The window was somehow set to a size of zero");
return;
}
self.surface_config.height = size.height;
self.surface_config.width = size.width;
self.camera.set_size(size.width, size.height);
self.reconfigure();
}
/// Set the physical window and renderer size
pub fn resize(&mut self, width: NonZeroU32, height: NonZeroU32) {
let size = PhysicalSize::new(width.get(), height.get());
self.window.set_inner_size(size);
self.resize_renderer(size);
}
/// Set vsync on or off. See `[RenderWindowConfig::present_mode]` for more details.
pub fn set_vsync(&mut self, vsync: bool) {
self.surface_config.present_mode =
RenderWindowConfig::present_mode(vsync, &self.supported_present_modes);
self.reconfigure();
}
/// Set the window's title
pub fn set_title(&mut self, title: &str) {
self.window.set_title(title);
}
/// The reference buffer
pub const fn instances(&self) -> &InstanceBuffer {
&self.instances
}
/// The reference buffer
pub fn instances_mut(&mut self) -> &mut InstanceBuffer {
&mut self.instances
}
/// Get the camera information
pub const fn camera(&self) -> &Camera {
&self.camera
}
/// Get a mutable reference to the camera
pub fn camera_mut(&mut self) -> &mut Camera {
&mut self.camera
}
/// Get a reference to the texture atlas
pub const fn textures(&self) -> &TextureAtlas {
&self.textures
}
/// Get a mutable reference to the texture atlas
pub fn textures_mut(&mut self) -> &mut TextureAtlas {
&mut self.textures
}
/// Renders a new frame to the window
///
/// # Errors
///
/// A number of problems could occur here. A timeout could occur while
/// trying to acquire the next frame. There may also be no more memory left
/// that can be used for the new frame.
// TODO this needs to be smaller
#[profiling::function]
fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
// this will allow us to send commands to the gpu
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let num_instances = self.instances.len();
self.instances.fill_buffer(&self.device, &self.queue);
self.camera.refresh(&self.queue);
self.textures.fill_textures(&self.queue);
// the new texture we can render to
let output = self.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
{
profiling::scope!("encode render pass");
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: true,
},
})],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, self.camera.bind_group(), &[]);
render_pass.set_bind_group(1, self.textures.bind_group(), &[]);
render_pass.set_vertex_buffer(0, self.square_vertex_buffer.slice(..));
render_pass.set_vertex_buffer(1, self.instances.buffer_slice());
render_pass.draw(0..self.square_vertices, 0..num_instances);
}
// the encoder can't finish building the command buffer until the
// render pass is dropped
// submit the command buffer to the GPU
profiling::scope!("submit render");
self.queue.submit(std::iter::once(encoder.finish()));
output.present();
Ok(())
}
/// Take the event loop out of the Renderer, without moving it
///
/// # Panics
///
/// This method must only be called once
// TODO This is a quick fix to get the event loop inside the renderer.
// In the future, we should make a separate struct that contains the
// renderer and the event loop, which we move the event loop out of
// while still being able to move the renderer.
fn event_loop(&mut self) -> EventLoop<()> {
self.event_loop.take().unwrap()
}
/// Run the renderer indefinitely
// TODO this needs to be smaller
pub fn run<F: FnMut(&mut Self) + 'static>(mut self, mut f: F) -> ! {
self.window.set_visible(true);
let event_loop = self.event_loop();
event_loop.run(move |event, _, control_flow| match event {
Event::WindowEvent { window_id, event } => {
if window_id == self.window.id() {
match event {
WindowEvent::Resized(size) => self.resize_renderer(size),
WindowEvent::CloseRequested => {
*control_flow = ControlFlow::ExitWithCode(0);
}
_ => (),
}
}
}
Event::MainEventsCleared => {
f(&mut self);
// a memory leak occurs if we render a zero-size window,
// along with a `SurfaceError::Outdated`. I don't know why that
// happens, but let's make wgpu happy.
// https://github.com/gfx-rs/wgpu/issues/1783#issuecomment-1328463201
if self.window.inner_size().width != 0 && self.window.inner_size().height != 0 {
match self.render() {
Ok(_) => {}
// reconfigure the surface if it's been lost
Err(wgpu::SurfaceError::Lost) => {
self.reconfigure();
}
// if we ran out of memory, then we'll die
Err(wgpu::SurfaceError::OutOfMemory) => {
*control_flow = ControlFlow::ExitWithCode(1);
}
// otherwise, we'll just log the error
Err(e) => log::error!("{}", e),
}
} else {
*control_flow = ControlFlow::Wait;
}
profiling::finish_frame!();
}
_ => {}
});
}
}
|