1use crate::havok::HavokAnimation;
5use crate::havok::byte_reader::ByteReader;
6use crate::havok::object::HavokObject;
7use crate::havok::transform::HavokTransform;
8use core::{cell::RefCell, cmp};
9use std::f32;
10use std::sync::Arc;
11
12#[repr(u8)]
13#[allow(clippy::upper_case_acronyms)]
14enum RotationQuantization {
15 POLAR32 = 0,
16 THREECOMP40 = 1,
17 THREECOMP48 = 2,
18 THREECOMP24 = 3,
19 STRAIGHT16 = 4,
20 UNCOMPRESSED = 5,
21}
22
23impl RotationQuantization {
24 pub fn from_raw(raw: u8) -> Self {
25 match raw {
26 0 => Self::POLAR32,
27 1 => Self::THREECOMP40,
28 2 => Self::THREECOMP48,
29 3 => Self::THREECOMP24,
30 4 => Self::STRAIGHT16,
31 5 => Self::UNCOMPRESSED,
32 _ => panic!(),
33 }
34 }
35
36 pub fn align(&self) -> usize {
37 match self {
38 Self::POLAR32 => 4,
39 Self::THREECOMP40 => 1,
40 Self::THREECOMP48 => 2,
41 Self::THREECOMP24 => 1,
42 Self::STRAIGHT16 => 2,
43 Self::UNCOMPRESSED => 4,
44 }
45 }
46
47 pub fn bytes_per_quaternion(&self) -> usize {
48 match self {
49 Self::POLAR32 => 4,
50 Self::THREECOMP40 => 5,
51 Self::THREECOMP48 => 6,
52 Self::THREECOMP24 => 3,
53 Self::STRAIGHT16 => 2,
54 Self::UNCOMPRESSED => 16,
55 }
56 }
57}
58
59#[repr(u8)]
60#[allow(clippy::upper_case_acronyms)]
61enum ScalarQuantization {
62 BITS8 = 0,
63 BITS16 = 1,
64}
65
66impl ScalarQuantization {
67 pub fn from_raw(raw: u8) -> Self {
68 match raw {
69 0 => Self::BITS8,
70 1 => Self::BITS16,
71 _ => panic!(),
72 }
73 }
74
75 pub fn bytes_per_component(&self) -> usize {
76 match self {
77 Self::BITS8 => 1,
78 Self::BITS16 => 2,
79 }
80 }
81}
82
83pub struct HavokSplineCompressedAnimation {
84 duration: f32,
85 number_of_transform_tracks: usize,
86 num_frames: usize,
87 num_blocks: usize,
88 max_frames_per_block: usize,
89 mask_and_quantization_size: u32,
90 block_inverse_duration: f32,
91 frame_duration: f32,
92 block_offsets: Vec<u32>,
93 data: Vec<u8>,
94}
95
96impl HavokSplineCompressedAnimation {
97 pub fn new(object: Arc<RefCell<HavokObject>>) -> Self {
98 let root = object.borrow();
99
100 let duration = root.get("duration").as_real();
101 let number_of_transform_tracks = root.get("numberOfTransformTracks").as_int() as usize;
102 let num_frames = root.get("numFrames").as_int() as usize;
103 let num_blocks = root.get("numBlocks").as_int() as usize;
104 let max_frames_per_block = root.get("maxFramesPerBlock").as_int() as usize;
105 let mask_and_quantization_size = root.get("maskAndQuantizationSize").as_int() as u32;
106 let block_inverse_duration = root.get("blockInverseDuration").as_real();
107 let frame_duration = root.get("frameDuration").as_real();
108
109 let raw_block_offsets = root.get("blockOffsets").as_array();
110 let block_offsets = raw_block_offsets
111 .iter()
112 .map(|x| x.as_int() as u32)
113 .collect::<Vec<_>>();
114
115 let raw_data = root.get("data").as_array();
116 let data = raw_data
117 .iter()
118 .map(|x| x.as_int() as u8)
119 .collect::<Vec<_>>();
120
121 Self {
122 duration,
123 number_of_transform_tracks,
124 num_frames,
125 num_blocks,
126 max_frames_per_block,
127 mask_and_quantization_size,
128 block_inverse_duration,
129 frame_duration,
130 block_offsets,
131 data,
132 }
133 }
134
135 fn get_block_and_time(&self, frame: usize, delta: f32) -> (usize, f32, u8) {
136 let mut block_out = frame / (self.max_frames_per_block - 1);
137
138 block_out = cmp::max(block_out, 0);
139 block_out = cmp::min(block_out, self.num_blocks - 1);
140
141 let first_frame_of_block = block_out * (self.max_frames_per_block - 1);
142 let real_frame = (frame - first_frame_of_block) as f32 + delta;
143 let block_time_out = real_frame * self.frame_duration;
144
145 let quantized_time_out = ((block_time_out * self.block_inverse_duration)
146 * (self.max_frames_per_block as f32 - 1.)) as u8;
147
148 (block_out, block_time_out, quantized_time_out)
149 }
150
151 #[allow(non_snake_case)]
152 fn find_span(n: usize, p: usize, u: u8, U: &[u8]) -> usize {
153 if u >= U[n + 1] {
154 return n;
155 }
156 if u <= U[0] {
157 return p;
158 }
159
160 let mut low = p;
161 let mut high = n + 1;
162 let mut mid = (low + high) / 2;
163 while u < U[mid] || u >= U[mid + 1] {
164 if u < U[mid] {
165 high = mid;
166 } else {
167 low = mid;
168 }
169 mid = (low + high) / 2;
170 }
171 mid
172 }
173
174 fn read_knots(
175 data: &mut ByteReader,
176 u: u8,
177 frame_duration: f32,
178 ) -> (usize, usize, Vec<f32>, usize) {
179 let n = data.read_u16_le() as usize;
180 let p = data.read() as usize;
181 let raw = data.raw();
182 let span = Self::find_span(n, p, u, raw);
183
184 #[allow(non_snake_case)]
185 let mut U = vec![0.; 2 * p];
186
187 for i in 0..2 * p {
188 let item = raw[i + 1] as usize + span - p;
189 U[i] = (item as f32) * frame_duration;
190 }
191
192 data.seek(n + p + 2);
193
194 (n, p, U, span)
195 }
196
197 fn unpack_signed_quaternion_32(data: &[u8]) -> [f32; 4] {
198 let input = u32::from_le_bytes([data[0], data[1], data[2], data[3]]);
199
200 let low = (input & 0x3FFFF) as f32;
201 let high = ((input & 0xFFC0000) >> 18) as f32 / 1023.0;
202
203 let value = 1. - high * high;
204
205 let a = f32::sqrt(low);
206 let b = low - a * a;
207 let c = if a == 0. { f32::MAX } else { 1. / (a + a) };
208
209 let theta = a / 511.0 * (f32::consts::PI / 2.);
210 let phi = b * c * (f32::consts::PI / 2.);
211
212 let mut result = [
214 f32::sin(theta) * f32::cos(phi),
215 f32::sin(theta) * f32::sin(phi),
216 f32::cos(theta),
217 1.,
218 ];
219 for item in result.iter_mut() {
220 *item *= f32::sqrt(1. - value * value);
221 }
222 result[3] = value;
223
224 let mask = input >> 28;
225 for (i, item) in result.iter_mut().enumerate() {
226 if mask & (1 << i) != 0 {
227 *item = -*item;
228 }
229 }
230
231 result
232 }
233
234 fn unpack_signed_quaternion_40(data: &[u8]) -> [f32; 4] {
235 let permute = [256, 513, 1027, 0];
236 let data_mask_and = [4095, 4095, 4095, 0];
237 let data_mask_or = [0, 0, 0, 2047];
238 let data = [
239 u32::from_le_bytes([data[0], data[1], data[2], data[3]]),
240 u32::from_le_bytes([data[4], data[5], data[6], data[7]]),
241 ];
242
243 let mut buf = [0u32; 4];
244 unsafe {
245 let m = core::slice::from_raw_parts(
246 permute.as_ptr() as *const u8,
247 permute.len() * core::mem::size_of::<u32>(),
248 );
249 let a = core::slice::from_raw_parts(
250 data.as_ptr() as *const u8,
251 data.len() * core::mem::size_of::<u32>(),
252 );
253 let r = core::slice::from_raw_parts_mut(
254 buf.as_mut_ptr() as *mut u8,
255 buf.len() * core::mem::size_of::<u32>(),
256 );
257 for i in 0..16 {
258 r[i] = a[m[i] as usize];
259 }
260 }
261
262 let mask = 2;
263 for (i, item) in buf.iter_mut().enumerate() {
264 if mask & (1 << i) != 0 {
265 *item >>= 4;
266 }
267 }
268
269 for (i, item) in buf.iter_mut().enumerate() {
270 *item = (*item & data_mask_and[i]) | data_mask_or[i];
271 }
272
273 let mut result = [0., 0., 0., 1.];
274 for (i, &item) in buf.iter().enumerate() {
275 result[i] = ((item as f32) - 2047.0) / 2895.0;
276 }
277
278 let length_square = result.iter().map(|x| x * x).sum::<f32>();
279 let mut remaining = f32::sqrt(1. - length_square);
280 if (data[1] & 64) == 64 {
281 remaining = -remaining;
282 }
283
284 match data[1] & 48 {
285 0 => [remaining, result[0], result[1], result[2]],
286 16 => [result[0], remaining, result[1], result[2]],
287 32 => [result[0], result[1], remaining, result[2]],
288 48 => [result[0], result[1], result[2], remaining],
289 _ => panic!(),
290 }
291 }
292
293 fn unpack_signed_quaternion_48(data: &[u8]) -> [f32; 4] {
294 let data = [
295 u16::from_le_bytes([data[0], data[1]]),
296 u16::from_le_bytes([data[2], data[3]]),
297 u16::from_le_bytes([data[4], data[5]]),
298 ];
299
300 let item1 = data[0] & 0x7FFF;
301 let item2 = data[1] & 0x7FFF;
302 let item3 = data[2] & 0x7FFF;
303 let missing_index = (((data[1] & 0x8000) >> 14) | ((data[0] & 0x8000) >> 15)) as usize;
304 let mut vals = [0x3fff, 0x3fff, 0x3fff, 0x3fff];
305
306 let mut index = usize::from(missing_index == 0);
307 vals[index] = item1;
308 index += 1 + (usize::from(missing_index == index + 1));
309 vals[index] = item2;
310 index += 1 + (usize::from(missing_index == index + 1));
311 vals[index] = item3;
312
313 let mut result = [0., 0., 0., 1.];
314 for (i, &item) in vals.iter().enumerate() {
315 result[i] = ((item as f32) - 16383.0) / 23169.0;
316 }
317
318 let length_square = result.iter().map(|x| x * x).sum::<f32>();
319 let mut remaining = f32::sqrt(1. - length_square);
320 if data[2] & 0x8000 != 0 {
321 remaining = -remaining;
322 }
323
324 result[missing_index] = remaining;
325
326 result
327 }
328
329 fn unpack_quaternion(quantization: &RotationQuantization, data: &[u8]) -> [f32; 4] {
330 match quantization {
331 RotationQuantization::POLAR32 => Self::unpack_signed_quaternion_32(data),
332 RotationQuantization::THREECOMP40 => Self::unpack_signed_quaternion_40(data),
333 RotationQuantization::THREECOMP48 => Self::unpack_signed_quaternion_48(data),
334 _ => panic!(),
335 }
336 }
337
338 fn read_packed_quaternions(
339 quantization: RotationQuantization,
340 data: &mut ByteReader,
341 n: usize,
342 p: usize,
343 span: usize,
344 ) -> Vec<[f32; 4]> {
345 data.align(quantization.align());
346 let bytes_per_quaternion = quantization.bytes_per_quaternion();
347
348 let mut result = Vec::new();
349 for i in 0..(p + 1) {
350 result.push(Self::unpack_quaternion(
351 &quantization,
352 &data.raw()[bytes_per_quaternion * (i + span - p)..],
353 ));
354 }
355
356 data.seek(bytes_per_quaternion * (n + 1));
357
358 result
359 }
360
361 fn unpack_vec_8(min_p: [f32; 4], max_p: [f32; 4], vals: &[u8]) -> [f32; 4] {
362 let mut result = [0., 0., 0., 1.];
363 for i in 0..4 {
364 result[i] = ((vals[i] as f32) / 255.) * (max_p[i] - min_p[i]) + min_p[i];
365 }
366
367 result
368 }
369
370 fn unpack_vec_16(min_p: [f32; 4], max_p: [f32; 4], vals: &[u16]) -> [f32; 4] {
371 let mut result = [0., 0., 0., 1.];
372 for i in 0..4 {
373 result[i] = ((vals[i] as f32) / 65535.) * (max_p[i] - min_p[i]) + min_p[i];
374 }
375
376 result
377 }
378
379 #[allow(non_snake_case)]
380 fn recompose(stat_mask: u8, dyn_mask: u8, S: [f32; 4], I: [f32; 4], in_out: &mut [f32; 4]) {
381 for i in 0..4 {
382 if stat_mask & (1 << i) != 0 {
383 in_out[i] = S[i];
384 }
385 }
386
387 for i in 0..4 {
388 if dyn_mask & (1 << i) != 0 {
389 in_out[i] = I[i];
390 }
391 }
392 }
393
394 #[allow(non_snake_case)]
395 fn evaluate(time: f32, p: usize, U: &[f32], P: &[[f32; 4]]) -> [f32; 4] {
396 if p > 3 {
397 panic!()
398 }
399
400 let mut result = [0., 0., 0., 0.];
401 if p == 1 {
402 let t = (time - U[0]) / (U[1] - U[0]);
403
404 for (i, item) in result.iter_mut().enumerate() {
405 *item = P[0][i] + t * (P[1][i] - P[0][i]);
406 }
407 } else {
408 let p_minus_1 = p - 1;
410 let mut values = [1.; 16];
411 let mut low = [0.; 16];
412 let mut high = [0.; 16];
413
414 for i in 1..(p + 1) {
415 high[4 * i] = time - U[p_minus_1 + 1 - i];
416 low[4 * i] = U[i + p_minus_1] - time;
417 let mut val = 0.;
418 for j in 0..i {
419 let a = low[4 * (j + 1)] + high[4 * (i - j)];
420 let b = values[4 * j] / a;
421 let c = low[4 * (j + 1)] * b;
422 values[4 * j] = val + c;
423 val = high[4 * (i - j)] * b;
424 }
425 values[4 * i] = val;
426 }
427 for i in 0..(p + 1) {
428 for (j, item) in result.iter_mut().enumerate() {
429 *item += values[4 * i] * P[i][j];
430 }
431 }
432 }
433
434 result
435 }
436
437 fn compute_packed_nurbs_offsets<'a>(base: &'a [u8], p: &[u32], o2: usize, o3: u32) -> &'a [u8] {
438 let offset = (p[o2] + (o3 & 0x7fff_ffff)) as usize;
439
440 &base[offset..]
441 }
442
443 fn unpack_quantization_types(
444 packed_quantization_types: u8,
445 ) -> (ScalarQuantization, RotationQuantization, ScalarQuantization) {
446 let translation = ScalarQuantization::from_raw(packed_quantization_types & 0x03);
447 let rotation = RotationQuantization::from_raw((packed_quantization_types >> 2) & 0x0F);
448 let scale = ScalarQuantization::from_raw((packed_quantization_types >> 6) & 0x03);
449
450 (translation, rotation, scale)
451 }
452
453 fn sample_translation(
454 &self,
455 quantization: ScalarQuantization,
456 time: f32,
457 quantized_time: u8,
458 mask: u8,
459 data: &mut ByteReader,
460 ) -> [f32; 4] {
461 let result = if mask != 0 {
462 Self::read_nurbs_curve(
463 quantization,
464 data,
465 quantized_time,
466 self.frame_duration,
467 time,
468 mask,
469 [0., 0., 0., 0.],
470 )
471 } else {
472 [0., 0., 0., 0.]
473 };
474
475 data.align(4);
476
477 result
478 }
479
480 fn sample_rotation(
481 &self,
482 quantization: RotationQuantization,
483 time: f32,
484 quantized_time: u8,
485 mask: u8,
486 data: &mut ByteReader,
487 ) -> [f32; 4] {
488 let result = Self::read_nurbs_quaternion(
489 quantization,
490 data,
491 quantized_time,
492 self.frame_duration,
493 time,
494 mask,
495 );
496
497 data.align(4);
498
499 result
500 }
501
502 fn sample_scale(
503 &self,
504 quantization: ScalarQuantization,
505 time: f32,
506 quantized_time: u8,
507 mask: u8,
508 data: &mut ByteReader,
509 ) -> [f32; 4] {
510 let result = if mask != 0 {
511 Self::read_nurbs_curve(
512 quantization,
513 data,
514 quantized_time,
515 self.frame_duration,
516 time,
517 mask,
518 [1., 1., 1., 1.],
519 )
520 } else {
521 [1., 1., 1., 1.]
522 };
523
524 data.align(4);
525
526 result
527 }
528
529 #[allow(non_snake_case)]
530 fn read_nurbs_curve(
531 quantization: ScalarQuantization,
532 data: &mut ByteReader,
533 quantized_time: u8,
534 frame_duration: f32,
535 u: f32,
536 mask: u8,
537 I: [f32; 4],
538 ) -> [f32; 4] {
539 let mut max_p = [0., 0., 0., 1.];
540 let mut min_p = [0., 0., 0., 1.];
541 let mut S = [0., 0., 0., 1.];
542
543 let (n, p, U, span) = if mask & 0xf0 != 0 {
544 Self::read_knots(data, quantized_time, frame_duration)
545 } else {
546 (0, 0, vec![0.; 10], 0)
547 };
548 data.align(4);
549
550 for i in 0..3 {
551 if (1 << i) & mask != 0 {
552 S[i] = data.read_f32_le();
553 } else if (1 << (i + 4)) & mask != 0 {
554 min_p[i] = data.read_f32_le();
555 max_p[i] = data.read_f32_le();
556 }
557 }
558
559 let stat_mask = mask & 0x0f;
560 let dyn_mask = (!mask >> 4) & (!mask & 0x0f);
561
562 if mask & 0xf0 != 0 {
563 let bytes_per_component = quantization.bytes_per_component();
564 data.align(2);
565
566 let sizes = [0, 1, 1, 2, 1, 2, 2, 3];
567 let size = sizes[((mask >> 4) & 7) as usize];
568 let mut new_data = data.clone();
569 new_data.seek(bytes_per_component * size * (span - p));
570
571 let mut P = [[0., 0., 0., 1.]; 4];
572 for pv in P.iter_mut().take(p + 1) {
573 match quantization {
574 ScalarQuantization::BITS8 => {
575 let mut vals = [0; 4];
576 for (j, item) in vals.iter_mut().enumerate().take(3) {
577 if (1 << (j + 4)) & mask != 0 {
578 *item = new_data.read();
579 }
580 }
581
582 *pv = Self::unpack_vec_8(min_p, max_p, &vals);
583 }
584 ScalarQuantization::BITS16 => {
585 let mut vals = [0; 4];
586 for (j, item) in vals.iter_mut().enumerate().take(3) {
587 if (1 << (j + 4)) & mask != 0 {
588 *item = new_data.read_u16_le();
589 }
590 }
591
592 *pv = Self::unpack_vec_16(min_p, max_p, &vals);
593 }
594 }
595
596 Self::recompose(stat_mask, dyn_mask, S, I, pv);
597 }
598
599 let result = Self::evaluate(u, p, &U, &P);
600
601 data.seek(bytes_per_component * size * (n + 1));
602
603 result
604 } else {
605 let mut result = I;
606 Self::recompose(stat_mask, dyn_mask, S, I, &mut result);
607
608 result
609 }
610 }
611
612 #[allow(non_snake_case)]
613 fn read_nurbs_quaternion(
614 quantization: RotationQuantization,
615 data: &mut ByteReader,
616 quantized_time: u8,
617 frame_duration: f32,
618 u: f32,
619 mask: u8,
620 ) -> [f32; 4] {
621 if mask & 0xf0 != 0 {
622 let (n, p, U, span) = Self::read_knots(data, quantized_time, frame_duration);
623 let P = Self::read_packed_quaternions(quantization, data, n, p, span);
624 Self::evaluate(u, p, &U, &P)
625 } else if mask & 0x0f != 0 {
626 data.align(quantization.align());
627 let result = Self::unpack_quaternion(&quantization, data.raw());
628 data.seek(quantization.bytes_per_quaternion());
629
630 result
631 } else {
632 [0., 0., 0., 1.]
633 }
634 }
635}
636
637impl HavokAnimation for HavokSplineCompressedAnimation {
638 fn duration(&self) -> f32 {
639 self.duration
640 }
641
642 fn sample(&self, time: f32) -> Vec<HavokTransform> {
643 let frame_float = ((time / 1000.) / self.duration) * (self.num_frames as f32 - 1.);
644 let frame = frame_float as usize;
645 let delta = frame_float - frame as f32;
646
647 let (block, block_time, quantized_time) = self.get_block_and_time(frame, delta);
648
649 let mut data = ByteReader::new(Self::compute_packed_nurbs_offsets(
650 &self.data,
651 &self.block_offsets,
652 block,
653 self.mask_and_quantization_size,
654 ));
655 let mut mask = ByteReader::new(Self::compute_packed_nurbs_offsets(
656 &self.data,
657 &self.block_offsets,
658 block,
659 0x8000_0000,
660 ));
661
662 let mut result = Vec::with_capacity(self.number_of_transform_tracks);
663 for _ in 0..self.number_of_transform_tracks {
664 let packed_quantization_types = mask.read();
665
666 let (translation_type, rotation_type, scale_type) =
667 Self::unpack_quantization_types(packed_quantization_types);
668
669 let translation = self.sample_translation(
670 translation_type,
671 block_time,
672 quantized_time,
673 mask.read(),
674 &mut data,
675 );
676 let rotation = self.sample_rotation(
677 rotation_type,
678 block_time,
679 quantized_time,
680 mask.read(),
681 &mut data,
682 );
683 let scale = self.sample_scale(
684 scale_type,
685 block_time,
686 quantized_time,
687 mask.read(),
688 &mut data,
689 );
690
691 result.push(HavokTransform::from_trs(translation, rotation, scale));
692 }
693
694 result
695 }
696}