1
0
Fork 0
mirror of https://github.com/doukutsu-rs/doukutsu-rs synced 2025-12-10 22:38:30 +00:00

Compare commits

...

3 commits

Author SHA1 Message Date
biroder 84032d8c55
Merge 8dc676d5d8 into e615239b73 2025-11-09 19:05:53 -08:00
Peter Opatril e615239b73 Fix SW rendering on SDL2
Unconditionally create canvas (that is later asserted to exist)
in SW rendering path of SDL2 renderer creation. Also removes
the requirement of HW acceleration and vsync from SDL renderers.
2025-11-09 11:54:21 +01:00
biroder bd0332d38e Reapply "sound: reuse buffers (#336)"
This reverts commit 52636fa296.
2025-11-06 22:20:29 +02:00
5 changed files with 58 additions and 57 deletions

View file

@ -128,8 +128,6 @@ impl WindowOrCanvas {
if let WindowOrCanvas::Win(window) = self {
let canvas = window
.into_canvas()
.accelerated()
.present_vsync()
.build()
.map_err(|e| GameError::RenderError(e.to_string()))?;
@ -489,7 +487,9 @@ impl BackendEventLoop for SDL2EventLoop {
GLContext { gles2_mode: false, is_sdl: true, get_proc_address, swap_buffers, user_data, ctx };
return Ok(Box::new(OpenGLRenderer::new(gl_context, UnsafeCell::new(imgui))));
} else {
}
{
let mut refs = self.refs.borrow_mut();
let window = std::mem::take(&mut refs.window);
refs.window = window.make_canvas()?;

View file

@ -84,16 +84,16 @@ impl SoundManager {
}
let bnk = wave_bank::SoundBank::load_from(filesystem::open(ctx, "/builtin/organya-wavetable-doukutsu.bin")?)?;
Ok(SoundManager::bootstrap(&bnk, tx, rx)?)
Ok(SoundManager::bootstrap(bnk, tx, rx)?)
}
fn bootstrap(
soundbank: &SoundBank,
soundbank: SoundBank,
tx: Sender<PlaybackMessage>,
rx: Receiver<PlaybackMessage>,
) -> GameResult<SoundManager> {
let mut sound_manager = SoundManager {
soundbank: Some(soundbank.to_owned()),
soundbank: Some(soundbank.clone()),
tx,
prev_song_id: 0,
current_song_id: 0,
@ -126,16 +126,16 @@ impl SoundManager {
let config = config_result.unwrap();
let res = match config.sample_format() {
cpal::SampleFormat::I8 => run::<i8>(rx, soundbank.to_owned(), device, config.into()),
cpal::SampleFormat::I16 => run::<i16>(rx, soundbank.to_owned(), device, config.into()),
cpal::SampleFormat::I32 => run::<i32>(rx, soundbank.to_owned(), device, config.into()),
cpal::SampleFormat::I64 => run::<i64>(rx, soundbank.to_owned(), device, config.into()),
cpal::SampleFormat::U8 => run::<u8>(rx, soundbank.to_owned(), device, config.into()),
cpal::SampleFormat::U16 => run::<u16>(rx, soundbank.to_owned(), device, config.into()),
cpal::SampleFormat::U32 => run::<u32>(rx, soundbank.to_owned(), device, config.into()),
cpal::SampleFormat::U64 => run::<u64>(rx, soundbank.to_owned(), device, config.into()),
cpal::SampleFormat::F32 => run::<f32>(rx, soundbank.to_owned(), device, config.into()),
cpal::SampleFormat::F64 => run::<f64>(rx, soundbank.to_owned(), device, config.into()),
cpal::SampleFormat::I8 => run::<i8>(rx, soundbank, device, config.into()),
cpal::SampleFormat::I16 => run::<i16>(rx, soundbank, device, config.into()),
cpal::SampleFormat::I32 => run::<i32>(rx, soundbank, device, config.into()),
cpal::SampleFormat::I64 => run::<i64>(rx, soundbank, device, config.into()),
cpal::SampleFormat::U8 => run::<u8>(rx, soundbank, device, config.into()),
cpal::SampleFormat::U16 => run::<u16>(rx, soundbank, device, config.into()),
cpal::SampleFormat::U32 => run::<u32>(rx, soundbank, device, config.into()),
cpal::SampleFormat::U64 => run::<u64>(rx, soundbank, device, config.into()),
cpal::SampleFormat::F32 => run::<f32>(rx, soundbank, device, config.into()),
cpal::SampleFormat::F64 => run::<f64>(rx, soundbank, device, config.into()),
_ => Err(AudioError("Unsupported sample format.".to_owned())),
};
@ -157,7 +157,7 @@ impl SoundManager {
let (tx, rx): (Sender<PlaybackMessage>, Receiver<PlaybackMessage>) = mpsc::channel();
let soundbank = self.soundbank.take().unwrap();
*self = SoundManager::bootstrap(&soundbank, tx, rx)?;
*self = SoundManager::bootstrap(soundbank, tx, rx)?;
Ok(())
}

View file

@ -4,14 +4,15 @@
use std::cmp::min;
use std::hint::unreachable_unchecked;
use std::mem::MaybeUninit;
use std::sync::Arc;
use crate::sound::fir::FIR;
use crate::sound::fir::FIR_STEP;
use crate::sound::InterpolationMode;
use crate::sound::organya::{Song as Organya, Version};
use crate::sound::stuff::*;
use crate::sound::wav::*;
use crate::sound::wave_bank::SoundBank;
use crate::sound::InterpolationMode;
#[derive(Clone)]
pub struct FIRData {
@ -110,7 +111,7 @@ impl OrgPlaybackEngine {
let format = WavFormat { channels: 1, sample_rate: 22050, bit_depth: 8 };
let rbuf = RenderBuffer::new_organya(WavSample { format, data: sound });
let rbuf = RenderBuffer::new_organya(format, sound);
for j in 0..8 {
for &k in &[0, 64] {
@ -123,10 +124,14 @@ impl OrgPlaybackEngine {
for (idx, (track, buf)) in song.tracks[8..].iter().zip(self.track_buffers[128..].iter_mut()).enumerate() {
if song.version == Version::Extended {
// Check for OOB track count, instruments outside of the sample range will be set to the last valid sample
let index = if track.inst.inst as usize >= samples.samples.len() {samples.samples.len() - 1} else {track.inst.inst as usize} ;
let index = if track.inst.inst as usize >= samples.samples.len() {
samples.samples.len() - 1
} else {
track.inst.inst as usize
};
*buf = RenderBuffer::new(samples.samples[index].clone());
} else {
let index = if idx >= samples.samples.len() {samples.samples.len() - 1} else {idx};
let index = if idx >= samples.samples.len() { samples.samples.len() - 1 } else { idx };
*buf = RenderBuffer::new(samples.samples[index].clone());
}
}
@ -359,27 +364,21 @@ impl OrgPlaybackEngine {
let (sl1, sr1, sl2, sr2) = match (is_16bit, is_stereo) {
(true, true) => unsafe {
let ps = pos << 2;
let sl1 = (*sample_data_ptr.add(ps) as u16
| (*sample_data_ptr.add(ps + 1) as u16) << 8)
let sl1 = (*sample_data_ptr.add(ps) as u16 | (*sample_data_ptr.add(ps + 1) as u16) << 8)
as f32
/ 32768.0;
let sr1 = (*sample_data_ptr.add(ps + 2) as u16 | (*sample_data_ptr.add(ps + 3) as u16) << 8)
as f32
/ 32768.0;
let sr1 =
(*sample_data_ptr.add(ps + 2) as u16
| (*sample_data_ptr.add(ps + 3) as u16) << 8)
as f32
/ 32768.0;
let ps = min(pos + 1, buf.base_pos + buf.len - 1) << 2;
let sl2 = (*sample_data_ptr.add(ps) as u16
| (*sample_data_ptr.add(ps + 1) as u16) << 8)
let sl2 = (*sample_data_ptr.add(ps) as u16 | (*sample_data_ptr.add(ps + 1) as u16) << 8)
as f32
/ 32768.0;
let sr2 = (*sample_data_ptr.add(ps + 2) as u16 | (*sample_data_ptr.add(ps + 3) as u16) << 8)
as f32
/ 32768.0;
let sr2 =
(*sample_data_ptr.add(ps + 2) as u16
| (*sample_data_ptr.add(ps + 3) as u16) << 8)
as f32
/ 32768.0;
(sl1, sr1, sl2, sr2)
}
},
(false, true) => unsafe {
let ps = pos << 1;
let sl1 = (*sample_data_ptr.add(ps) as f32 - 128.0) / 128.0;
@ -388,26 +387,24 @@ impl OrgPlaybackEngine {
let sl2 = (*sample_data_ptr.add(ps) as f32 - 128.0) / 128.0;
let sr2 = (*sample_data_ptr.add(ps + 1) as f32 - 128.0) / 128.0;
(sl1, sr1, sl2, sr2)
}
},
(true, false) => unsafe {
let ps = pos << 1;
let s1 = (*sample_data_ptr.add(ps) as u16
| (*sample_data_ptr.add(ps + 1) as u16) << 8)
let s1 = (*sample_data_ptr.add(ps) as u16 | (*sample_data_ptr.add(ps + 1) as u16) << 8)
as f32
/ 32768.0;
let ps = min(pos + 1, buf.base_pos + buf.len - 1) << 1;
let s2 = (*sample_data_ptr.add(ps) as u16
| (*sample_data_ptr.add(ps + 1) as u16) << 8)
let s2 = (*sample_data_ptr.add(ps) as u16 | (*sample_data_ptr.add(ps + 1) as u16) << 8)
as f32
/ 32768.0;
(s1, s1, s2, s2)
}
},
(false, false) => unsafe {
let s1 = (*sample_data_ptr.add(pos) as f32 - 128.0) / 128.0;
let pos = min(pos + 1, buf.base_pos + buf.len - 1);
let s2 = (*sample_data_ptr.add(pos) as f32 - 128.0) / 128.0;
(s1, s1, s2, s2)
}
},
};
let r1 = buf.position.fract() as f32;
@ -637,7 +634,10 @@ impl RenderBuffer {
volume: 0,
pan: 0,
len: 0,
sample: WavSample { format: WavFormat { channels: 2, sample_rate: 22050, bit_depth: 16 }, data: vec![] },
sample: WavSample {
format: WavFormat { channels: 2, sample_rate: 22050, bit_depth: 16 },
data: Arc::new([]),
},
playing: false,
looping: false,
base_pos: 0,
@ -648,16 +648,16 @@ impl RenderBuffer {
}
}
pub fn new_organya(mut sample: WavSample) -> RenderBuffer {
let wave = sample.data.clone();
sample.data.clear();
pub fn new_organya(format: WavFormat, wave: Vec<u8>) -> RenderBuffer {
const SIZES: &[usize] = &[256, 256, 128, 128, 64, 32, 16, 8];
let mut sample_data = Vec::with_capacity(SIZES.iter().sum());
for size in &[256_usize, 256, 128, 128, 64, 32, 16, 8] {
for size in SIZES {
let step = 256 / size;
let mut acc = 0;
for _ in 0..*size {
sample.data.push(wave[acc]);
sample_data.push(wave[acc]);
acc += step;
if acc >= 256 {
@ -666,7 +666,7 @@ impl RenderBuffer {
}
}
RenderBuffer::new(sample)
RenderBuffer::new(WavSample { format, data: sample_data.into() })
}
#[inline]

View file

@ -4,6 +4,7 @@
use std::fmt;
use std::io;
use std::io::ErrorKind;
use std::sync::Arc;
use byteorder::{LE, ReadBytesExt};
@ -45,7 +46,7 @@ impl fmt::Display for WavFormat {
#[derive(Clone)]
pub struct WavSample {
pub format: WavFormat,
pub data: Vec<u8>,
pub data: Arc<[u8]>,
}
impl fmt::Display for WavSample {
@ -119,6 +120,6 @@ impl WavSample {
f.read_exact(&mut buf)?;
Ok(WavSample { format: WavFormat { channels, sample_rate: samples, bit_depth: bits }, data: buf })
Ok(WavSample { format: WavFormat { channels, sample_rate: samples, bit_depth: bits }, data: buf.into() })
}
}

View file

@ -3,21 +3,21 @@
// Copyright (c) 2020 doukutsu-rs contributors (see AUTHORS.md)
use std::fmt;
use std::io;
use std::sync::Arc;
use crate::sound::wav;
#[derive(Clone)]
pub struct SoundBank {
pub wave100: Box<[u8; 25600]>,
pub samples: Vec<wav::WavSample>,
pub wave100: Arc<[u8; 25600]>,
pub samples: Arc<[wav::WavSample]>,
}
impl fmt::Display for SoundBank {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "WAVE100: {:2X?}...", &self.wave100[..8])?;
for sample in &self.samples {
for sample in self.samples.iter() {
writeln!(f, "{}", sample)?;
}
@ -41,7 +41,7 @@ impl SoundBank {
}
Err(err) => {
log::error!("Failed to read next sample: {}", err);
return Ok(SoundBank { wave100, samples });
return Ok(SoundBank { wave100: wave100.into(), samples: samples.into() });
}
}
}