File: | client/cinematic/cl_cinematic_ogm.cpp |
Location: | line 310, column 4 |
Description: | Value stored to 'usedBytes' is never read |
1 | /** |
2 | * @file |
3 | * |
4 | * @note Taken from World Of Padman Engine |
5 | * @note This is a "ogm"-decoder to use a "better" (smaller files, higher resolutions) Cinematic-Format than roq |
6 | * In this code "ogm" is only: ogg wrapper, vorbis audio, xvid video (or theora video) |
7 | * (ogm(Ogg Media) in general is ogg wrapper with all kind of audio/video/subtitle/...) |
8 | */ |
9 | |
10 | #include "cl_cinematic_ogm.h" |
11 | #include "cl_cinematic.h" |
12 | #include "../client.h" |
13 | #include "../renderer/r_draw.h" |
14 | #include "../sound/s_main.h" |
15 | #include "../sound/s_music.h" |
16 | |
17 | #include <ogg/ogg.h> |
18 | #include <vorbis/codec.h> |
19 | |
20 | #ifdef HAVE_XVID_H1 |
21 | #include <xvid.h> |
22 | #endif |
23 | #ifdef HAVE_THEORA_THEORA_H1 |
24 | #include <theora/theora.h> |
25 | #endif |
26 | |
27 | |
28 | typedef struct { |
29 | long vr[256]; |
30 | long ug[256]; |
31 | long vg[256]; |
32 | long ub[256]; |
33 | long yy[256]; |
34 | } yuvTable_t; |
35 | |
36 | #define OGG_BUFFER_SIZE(8 * 1024) (8 * 1024) |
37 | |
38 | typedef struct |
39 | { |
40 | qFILE ogmFile; |
41 | |
42 | ogg_sync_state oy; /**< sync and verify incoming physical bitstream */ |
43 | ogg_stream_state os_audio; |
44 | ogg_stream_state os_video; |
45 | |
46 | vorbis_dsp_state vd; /**< central working state for the packet->PCM decoder */ |
47 | vorbis_info vi; /**< struct that stores all the static vorbis bitstream settings */ |
48 | vorbis_comment vc; /**< struct that stores all the bitstream user comments */ |
49 | |
50 | /** @todo atm there isn't really a check for this (all "video" streams are handled |
51 | * as xvid, because xvid support more than one "subtype") */ |
52 | bool videoStreamIsXvid; |
53 | #ifdef HAVE_XVID_H1 |
54 | xvid_dec_stats_t xvidDecodeStats; |
55 | void *xvidDecodeHandle; |
56 | #endif |
57 | bool videoStreamIsTheora; |
58 | #ifdef HAVE_THEORA_THEORA_H1 |
59 | theora_info th_info; |
60 | theora_comment th_comment; |
61 | theora_state th_state; |
62 | |
63 | yuv_buffer th_yuvbuffer; |
64 | #endif |
65 | |
66 | unsigned* outputBuffer; |
67 | int outputWidth; |
68 | int outputHeight; |
69 | int outputBufferSize; /**< in pixel (so "real bytesize" = outputBufferSize * 4) */ |
70 | int videoFrameCount; /**< output video-stream */ |
71 | ogg_int64_t Vtime_unit; |
72 | int currentTime; /**< input from run-function */ |
73 | int startTime; |
74 | |
75 | musicStream_t musicStream; |
76 | } ogmCinematic_t; |
77 | |
78 | static yuvTable_t ogmCin_yuvTable; |
79 | |
80 | #define OGMCIN(*((ogmCinematic_t*)cin->codecData)) (*((ogmCinematic_t*)cin->codecData)) |
81 | |
82 | #ifdef HAVE_XVID_H1 |
83 | |
84 | static int CIN_XVID_Init (cinematic_t *cin) |
85 | { |
86 | int ret; |
87 | |
88 | xvid_gbl_init_t xvid_gbl_init; |
89 | xvid_dec_create_t xvid_dec_create; |
90 | |
91 | /* Reset the structure with zeros */ |
92 | OBJZERO(xvid_gbl_init)(memset(&((xvid_gbl_init)), (0), sizeof((xvid_gbl_init))) ); |
93 | OBJZERO(xvid_dec_create)(memset(&((xvid_dec_create)), (0), sizeof((xvid_dec_create )))); |
94 | |
95 | /* Version */ |
96 | xvid_gbl_init.version = XVID_VERSION((((1)&0xff)<<16) | (((3)&0xff)<<8) | ((2 )&0xff)); |
97 | |
98 | xvid_gbl_init.cpu_flags = 0; |
99 | xvid_gbl_init.debug = 0; |
100 | |
101 | xvid_global(NULL__null, 0, &xvid_gbl_init, NULL__null); |
102 | |
103 | /* Version */ |
104 | xvid_dec_create.version = XVID_VERSION((((1)&0xff)<<16) | (((3)&0xff)<<8) | ((2 )&0xff)); |
105 | |
106 | /* Image dimensions -- set to 0, xvidcore will resize when ever it is needed */ |
107 | xvid_dec_create.width = 0; |
108 | xvid_dec_create.height = 0; |
109 | |
110 | ret = xvid_decore(NULL__null, XVID_DEC_CREATE0, &xvid_dec_create, NULL__null); |
111 | |
112 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeHandle = xvid_dec_create.handle; |
113 | |
114 | return ret; |
115 | } |
116 | |
117 | static int CIN_XVID_Decode (cinematic_t *cin, unsigned char *input, int inputSize) |
118 | { |
119 | int ret; |
120 | |
121 | xvid_dec_frame_t xvid_dec_frame; |
122 | |
123 | /* Reset all structures */ |
124 | OBJZERO(xvid_dec_frame)(memset(&((xvid_dec_frame)), (0), sizeof((xvid_dec_frame) ))); |
125 | OBJZERO(OGMCIN.xvidDecodeStats)(memset(&(((*((ogmCinematic_t*)cin->codecData)).xvidDecodeStats )), (0), sizeof(((*((ogmCinematic_t*)cin->codecData)).xvidDecodeStats )))); |
126 | |
127 | /* Set version */ |
128 | xvid_dec_frame.version = XVID_VERSION((((1)&0xff)<<16) | (((3)&0xff)<<8) | ((2 )&0xff)); |
129 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeStats.version = XVID_VERSION((((1)&0xff)<<16) | (((3)&0xff)<<8) | ((2 )&0xff)); |
130 | |
131 | /* No general flags to set */ |
132 | xvid_dec_frame.general = XVID_LOWDELAY(1<<0); |
133 | |
134 | /* Input stream */ |
135 | xvid_dec_frame.bitstream = input; |
136 | xvid_dec_frame.length = inputSize; |
137 | |
138 | /* Output frame structure */ |
139 | xvid_dec_frame.output.plane[0] = OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBuffer; |
140 | xvid_dec_frame.output.stride[0] = OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputWidth * sizeof(*OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBuffer); |
141 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBuffer == NULL__null) |
142 | xvid_dec_frame.output.csp = XVID_CSP_NULL(1<<14); |
143 | else |
144 | xvid_dec_frame.output.csp = XVID_CSP_RGBA(1<< 8); |
145 | |
146 | ret = xvid_decore(OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeHandle, XVID_DEC_DECODE2, &xvid_dec_frame, &OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeStats); |
147 | |
148 | return ret; |
149 | } |
150 | |
151 | static int CIN_XVID_Shutdown (cinematic_t *cin) |
152 | { |
153 | int ret = 0; |
154 | |
155 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeHandle) |
156 | ret = xvid_decore(OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeHandle, XVID_DEC_DESTROY1, NULL__null, NULL__null); |
157 | |
158 | return ret; |
159 | } |
160 | #endif |
161 | |
162 | /** |
163 | * @returns !0 -> no data transferred |
164 | */ |
165 | static int CIN_OGM_LoadBlockToSync (cinematic_t *cin) |
166 | { |
167 | int r = -1; |
168 | |
169 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).ogmFile.f || OGMCIN(*((ogmCinematic_t*)cin->codecData)).ogmFile.z) { |
170 | char *buffer = ogg_sync_buffer(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).oy, OGG_BUFFER_SIZE(8 * 1024)); |
171 | const int bytes = FS_Read(buffer, OGG_BUFFER_SIZE(8 * 1024), &OGMCIN(*((ogmCinematic_t*)cin->codecData)).ogmFile); |
172 | if (bytes > 0) |
173 | ogg_sync_wrote(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).oy, bytes); |
174 | |
175 | r = (bytes == 0); |
176 | } |
177 | |
178 | return r; |
179 | } |
180 | |
181 | /** |
182 | * @return !0 -> no data transferred (or not for all streams) |
183 | */ |
184 | static int CIN_OGM_LoadPagesToStream (cinematic_t *cin) |
185 | { |
186 | int r = -1; |
187 | int audioPages = 0; |
188 | int videoPages = 0; |
189 | ogg_stream_state* osptr = NULL__null; |
190 | ogg_page og; |
191 | |
192 | while (!audioPages || !videoPages) { |
193 | if (ogg_sync_pageout(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).oy, &og) != 1) |
194 | break; |
195 | |
196 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_audio.serialno == ogg_page_serialno(&og)) { |
197 | osptr = &OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_audio; |
198 | ++audioPages; |
199 | } |
200 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video.serialno == ogg_page_serialno(&og)) { |
201 | osptr = &OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video; |
202 | ++videoPages; |
203 | } |
204 | |
205 | if (osptr != NULL__null) { |
206 | ogg_stream_pagein(osptr, &og); |
207 | } |
208 | } |
209 | |
210 | if (audioPages && videoPages) |
211 | r = 0; |
212 | |
213 | return r; |
214 | } |
215 | |
216 | #define SIZEOF_RAWBUFF4 * 1024 SAMPLE_SIZE4 * 1024 |
217 | static byte rawBuffer[SIZEOF_RAWBUFF4 * 1024]; |
218 | |
219 | /** |
220 | * @return true if audio wants more packets |
221 | */ |
222 | static bool CIN_OGM_LoadAudioFrame (cinematic_t *cin) |
223 | { |
224 | vorbis_block vb; |
225 | |
226 | OBJZERO(vb)(memset(&((vb)), (0), sizeof((vb)))); |
227 | vorbis_block_init(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).vd, &vb); |
228 | |
229 | while (OGMCIN(*((ogmCinematic_t*)cin->codecData)).currentTime > (int) (OGMCIN(*((ogmCinematic_t*)cin->codecData)).vd.granulepos * 1000 / OGMCIN(*((ogmCinematic_t*)cin->codecData)).vi.rate)) { |
230 | float **pcm; |
231 | const int samples = vorbis_synthesis_pcmout(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).vd, &pcm); |
232 | |
233 | if (samples > 0) { |
234 | /* vorbis -> raw */ |
235 | const int width = 2; |
236 | const int channel = 2; |
237 | int samplesNeeded = sizeof(rawBuffer) / (width * channel); |
238 | const float *left = pcm[0]; |
239 | const float *right = (OGMCIN(*((ogmCinematic_t*)cin->codecData)).vi.channels > 1) ? pcm[1] : pcm[0]; |
240 | short *ptr = (short*)rawBuffer; |
241 | int i; |
242 | |
243 | if (samples < samplesNeeded) |
244 | samplesNeeded = samples; |
245 | |
246 | for (i = 0; i < samplesNeeded; ++i, ptr += channel) { |
247 | ptr[0] = (left[i] >= -1.0f && left[i] <= 1.0f) ? left[i] * 32767.f : 32767 * ((left[i] > 0.0f) - (left[i] < 0.0f)); |
248 | ptr[1] = (right[i] >= -1.0f && right[i] <= 1.0f) ? right[i] * 32767.f : 32767 * ((right[i] > 0.0f) - (right[i] < 0.0f)); |
249 | } |
250 | |
251 | /* tell libvorbis how many samples we actually consumed */ |
252 | vorbis_synthesis_read(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).vd, i); |
253 | |
254 | if (!cin->noSound) |
255 | M_AddToSampleBuffer(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).musicStream, OGMCIN(*((ogmCinematic_t*)cin->codecData)).vi.rate, i, rawBuffer); |
256 | } else { |
257 | ogg_packet op; |
258 | /* op -> vorbis */ |
259 | if (ogg_stream_packetout(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_audio, &op)) { |
260 | if (vorbis_synthesis(&vb, &op) == 0) |
261 | vorbis_synthesis_blockin(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).vd, &vb); |
262 | } else |
263 | break; |
264 | } |
265 | } |
266 | |
267 | vorbis_block_clear(&vb); |
268 | |
269 | return OGMCIN(*((ogmCinematic_t*)cin->codecData)).currentTime > (int)(OGMCIN(*((ogmCinematic_t*)cin->codecData)).vd.granulepos * 1000 / OGMCIN(*((ogmCinematic_t*)cin->codecData)).vi.rate); |
270 | } |
271 | |
272 | /** |
273 | * @return 1 -> loaded a new frame (OGMCIN.outputBuffer points to the actual frame), 0 -> no new frame |
274 | * <0 -> error |
275 | */ |
276 | #ifdef HAVE_XVID_H1 |
277 | static int CIN_XVID_LoadVideoFrame (cinematic_t *cin) |
278 | { |
279 | int r = 0; |
280 | ogg_packet op; |
281 | |
282 | OBJZERO(op)(memset(&((op)), (0), sizeof((op)))); |
283 | |
284 | while (!r && (ogg_stream_packetout(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video, &op))) { |
285 | int usedBytes = CIN_XVID_Decode(cin, op.packet, op.bytes); |
286 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeStats.type == XVID_TYPE_VOL-1) { |
287 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputWidth != OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeStats.data.vol.width || OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputHeight |
288 | != OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeStats.data.vol.height) { |
289 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputWidth = OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeStats.data.vol.width; |
290 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputHeight = OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeStats.data.vol.height; |
291 | Com_DPrintf(DEBUG_CLIENT0x20, "[XVID]new resolution %dx%d\n", OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputWidth, OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputHeight); |
292 | } |
293 | |
294 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBufferSize < OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeStats.data.vol.width * OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeStats.data.vol.height) { |
295 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBufferSize = OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeStats.data.vol.width * OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeStats.data.vol.height; |
296 | |
297 | /* Free old output buffer*/ |
298 | Mem_Free(OGMCIN.outputBuffer)_Mem_Free(((*((ogmCinematic_t*)cin->codecData)).outputBuffer ),"src/client/cinematic/cl_cinematic_ogm.cpp",298); |
299 | |
300 | /* Allocate the new buffer */ |
301 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBuffer = Mem_PoolAllocTypeN(unsigned, OGMCIN.outputBufferSize, cl_genericPool)static_cast<unsigned*>(_Mem_Alloc((sizeof(unsigned) * ( (*((ogmCinematic_t*)cin->codecData)).outputBufferSize)),true ,((cl_genericPool)),(0),"src/client/cinematic/cl_cinematic_ogm.cpp" ,301)); |
302 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBuffer == NULL__null) { |
303 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBufferSize = 0; |
304 | r = -2; |
305 | break; |
306 | } |
307 | } |
308 | |
309 | /* use the rest of this packet */ |
310 | usedBytes += CIN_XVID_Decode(cin, op.packet + usedBytes, op.bytes - usedBytes); |
Value stored to 'usedBytes' is never read | |
311 | } |
312 | |
313 | /* we got a real output frame ... */ |
314 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).xvidDecodeStats.type > 0) { |
315 | r = 1; |
316 | |
317 | ++OGMCIN(*((ogmCinematic_t*)cin->codecData)).videoFrameCount; |
318 | } |
319 | } |
320 | |
321 | return r; |
322 | } |
323 | #endif |
324 | |
325 | #ifdef HAVE_THEORA_THEORA_H1 |
326 | /** |
327 | * @brief how many >> are needed to make y == x (shifting y >> i) |
328 | * @return -1 -> no match, >=0 -> number of shifts |
329 | */ |
330 | static int CIN_THEORA_FindSizeShift (int x, int y) |
331 | { |
332 | int i; |
333 | |
334 | for (i = 0; (y >> i); ++i) |
335 | if (x == (y >> i)) |
336 | return i; |
337 | |
338 | return -1; |
339 | } |
340 | |
341 | |
342 | /** |
343 | * @brief Clamps integer value into byte |
344 | */ |
345 | static inline byte CIN_THEORA_ClampByte (int value) |
346 | { |
347 | if (value < 0) |
348 | return 0; |
349 | |
350 | if (value > 255) |
351 | return 255; |
352 | |
353 | return value; |
354 | } |
355 | |
356 | static void CIN_THEORA_FrameYUVtoRGB24 (const unsigned char* y, const unsigned char* u, const unsigned char* v, int width, |
357 | int height, int y_stride, int uv_stride, int yWShift, int uvWShift, int yHShift, int uvHShift, |
358 | uint32_t* output) |
359 | { |
360 | int i, j; |
361 | |
362 | for (j = 0; j < height; ++j) { |
363 | for (i = 0; i < width; ++i) { |
364 | const long YY = (long) (ogmCin_yuvTable.yy[(y[(i >> yWShift) + (j >> yHShift) * y_stride])]); |
365 | const int uvI = (i >> uvWShift) + (j >> uvHShift) * uv_stride; |
366 | |
367 | const byte r = CIN_THEORA_ClampByte((YY + ogmCin_yuvTable.vr[v[uvI]]) >> 6); |
368 | const byte g = CIN_THEORA_ClampByte((YY + ogmCin_yuvTable.ug[u[uvI]] + ogmCin_yuvTable.vg[v[uvI]]) >> 6); |
369 | const byte b = CIN_THEORA_ClampByte((YY + ogmCin_yuvTable.ub[u[uvI]]) >> 6); |
370 | |
371 | const uint32_t rgb24 = LittleLong(r | (g << 8) | (b << 16) | (255 << 24))(int)(r | (g << 8) | (b << 16) | (255 << 24 )); |
372 | *output++ = rgb24; |
373 | } |
374 | } |
375 | } |
376 | |
377 | static int CIN_THEORA_NextNeededFrame (cinematic_t *cin) |
378 | { |
379 | return (int) (OGMCIN(*((ogmCinematic_t*)cin->codecData)).currentTime * (ogg_int64_t) 10000 / OGMCIN(*((ogmCinematic_t*)cin->codecData)).Vtime_unit); |
380 | } |
381 | |
382 | /** |
383 | * @return 1 -> loaded a new frame (OGMCIN.outputBuffer points to the actual frame) |
384 | * 0 -> no new frame <0 -> error |
385 | */ |
386 | static int CIN_THEORA_LoadVideoFrame (cinematic_t *cin) |
387 | { |
388 | int r = 0; |
389 | ogg_packet op; |
390 | |
391 | OBJZERO(op)(memset(&((op)), (0), sizeof((op)))); |
392 | |
393 | while (!r && (ogg_stream_packetout(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video, &op))) { |
394 | ogg_int64_t th_frame; |
395 | theora_decode_packetin(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_state, &op); |
396 | |
397 | th_frame = theora_granule_frame(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_state, OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_state.granulepos); |
398 | |
399 | if ((OGMCIN(*((ogmCinematic_t*)cin->codecData)).videoFrameCount < th_frame && th_frame >= CIN_THEORA_NextNeededFrame(cin)) || !OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBuffer) { |
400 | int yWShift, uvWShift; |
401 | int yHShift, uvHShift; |
402 | |
403 | if (theora_decode_YUVout(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_state, &OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_yuvbuffer)) |
404 | continue; |
405 | |
406 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputWidth != OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.width || OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputHeight != OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.height) { |
407 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputWidth = OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.width; |
408 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputHeight = OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.height; |
409 | Com_DPrintf(DEBUG_CLIENT0x20, "[Theora(ogg)]new resolution %dx%d\n", OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputWidth, OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputHeight); |
410 | } |
411 | |
412 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBufferSize < OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.width * OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.height) { |
413 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBufferSize = OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.width * OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.height; |
414 | |
415 | /* Free old output buffer*/ |
416 | Mem_Free(OGMCIN.outputBuffer)_Mem_Free(((*((ogmCinematic_t*)cin->codecData)).outputBuffer ),"src/client/cinematic/cl_cinematic_ogm.cpp",416); |
417 | |
418 | /* Allocate the new buffer */ |
419 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBuffer = Mem_PoolAllocTypeN(unsigned, OGMCIN.outputBufferSize, cl_genericPool)static_cast<unsigned*>(_Mem_Alloc((sizeof(unsigned) * ( (*((ogmCinematic_t*)cin->codecData)).outputBufferSize)),true ,((cl_genericPool)),(0),"src/client/cinematic/cl_cinematic_ogm.cpp" ,419)); |
420 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBuffer == NULL__null) { |
421 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBufferSize = 0; |
422 | r = -2; |
423 | break; |
424 | } |
425 | } |
426 | |
427 | yWShift = CIN_THEORA_FindSizeShift(OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_yuvbuffer.y_width, OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.width); |
428 | uvWShift = CIN_THEORA_FindSizeShift(OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_yuvbuffer.uv_width, OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.width); |
429 | yHShift = CIN_THEORA_FindSizeShift(OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_yuvbuffer.y_height, OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.height); |
430 | uvHShift = CIN_THEORA_FindSizeShift(OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_yuvbuffer.uv_height, OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.height); |
431 | |
432 | if (yWShift < 0 || uvWShift < 0 || yHShift < 0 || uvHShift < 0) { |
433 | Com_Printf("[Theora] unexpected resolution in a yuv-frame\n"); |
434 | r = -1; |
435 | } else { |
436 | CIN_THEORA_FrameYUVtoRGB24(OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_yuvbuffer.y, OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_yuvbuffer.u, OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_yuvbuffer.v, |
437 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.width, OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.height, OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_yuvbuffer.y_stride, |
438 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_yuvbuffer.uv_stride, yWShift, uvWShift, yHShift, uvHShift, |
439 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBuffer); |
440 | |
441 | r = 1; |
442 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).videoFrameCount = th_frame; |
443 | } |
444 | } |
445 | } |
446 | |
447 | return r; |
448 | } |
449 | #endif |
450 | |
451 | /** |
452 | * @return 1 -> loaded a new frame (OGMCIN.outputBuffer points to the actual frame), 0 -> no new frame |
453 | * <0 -> error |
454 | */ |
455 | static int CIN_OGM_LoadVideoFrame (cinematic_t *cin) |
456 | { |
457 | #ifdef HAVE_XVID_H1 |
458 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).videoStreamIsXvid) |
459 | return CIN_XVID_LoadVideoFrame(cin); |
460 | #endif |
461 | #ifdef HAVE_THEORA_THEORA_H1 |
462 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).videoStreamIsTheora) |
463 | return CIN_THEORA_LoadVideoFrame(cin); |
464 | #endif |
465 | |
466 | /* if we come to this point, there will be no codec that use the stream content ... */ |
467 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video.serialno) { |
468 | ogg_packet op; |
469 | |
470 | while (ogg_stream_packetout(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video, &op)) |
471 | ; |
472 | } |
473 | |
474 | return 1; |
475 | } |
476 | |
477 | /** |
478 | * @return true => noDataTransfered |
479 | */ |
480 | static bool CIN_OGM_LoadFrame (cinematic_t *cin) |
481 | { |
482 | bool anyDataTransferred = true; |
483 | bool needVOutputData = true; |
484 | bool audioWantsMoreData = false; |
485 | int status; |
486 | |
487 | while (anyDataTransferred && (needVOutputData || audioWantsMoreData)) { |
488 | anyDataTransferred = false; |
489 | if (needVOutputData && (status = CIN_OGM_LoadVideoFrame(cin))) { |
490 | needVOutputData = false; |
491 | if (status > 0) |
492 | anyDataTransferred = true; |
493 | else |
494 | /* error (we don't need any videodata and we had no transferred) */ |
495 | anyDataTransferred = false; |
496 | } |
497 | |
498 | if (needVOutputData || audioWantsMoreData) { |
499 | /* try to transfer Pages to the audio- and video-Stream */ |
500 | if (CIN_OGM_LoadPagesToStream(cin)) |
501 | /* try to load a datablock from file */ |
502 | anyDataTransferred |= !CIN_OGM_LoadBlockToSync(cin); |
503 | else |
504 | /* successful loadPagesToStreams() */ |
505 | anyDataTransferred = true; |
506 | } |
507 | |
508 | /* load all audio after loading new pages ... */ |
509 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).videoFrameCount > 1) |
510 | /* wait some videoframes (it's better to have some delay, than a laggy sound) */ |
511 | audioWantsMoreData = CIN_OGM_LoadAudioFrame(cin); |
512 | } |
513 | |
514 | return !anyDataTransferred; |
515 | } |
516 | |
517 | #ifdef HAVE_XVID_H1 |
518 | /** @brief from VLC ogg.c (http://trac.videolan.org/vlc/browser/modules/demux/ogg.c) */ |
519 | typedef struct |
520 | { |
521 | char streamtype[8]; |
522 | char subtype[4]; |
523 | |
524 | ogg_int32_t size; /* size of the structure */ |
525 | |
526 | /* in 10^-7 seconds (dT between frames) */ |
527 | ogg_int64_t time_unit; /* in reference time */ |
528 | ogg_int64_t samples_per_unit; |
529 | ogg_int32_t default_len; /* in media time */ |
530 | |
531 | ogg_int32_t buffersize; |
532 | ogg_int16_t bits_per_sample; |
533 | union |
534 | { |
535 | struct |
536 | { |
537 | ogg_int32_t width; |
538 | ogg_int32_t height; |
539 | } stream_header_video; |
540 | |
541 | struct |
542 | { |
543 | ogg_int16_t channels; |
544 | ogg_int16_t blockalign; |
545 | ogg_int32_t avgbytespersec; |
546 | } stream_header_audio; |
547 | } sh; |
548 | } stream_header_t; |
549 | #endif |
550 | |
551 | /** |
552 | * @return 0 -> no problem |
553 | * @todo vorbis/theora-header & init in sub-functions |
554 | * @todo "clean" error-returns ... |
555 | */ |
556 | int CIN_OGM_OpenCinematic (cinematic_t *cin, const char* filename) |
557 | { |
558 | int status; |
559 | ogg_page og; |
560 | ogg_packet op; |
561 | int i; |
562 | |
563 | if (cin->codecData && (OGMCIN(*((ogmCinematic_t*)cin->codecData)).ogmFile.f || OGMCIN(*((ogmCinematic_t*)cin->codecData)).ogmFile.z)) { |
564 | Com_Printf("WARNING: it seams there was already a ogm running, it will be killed to start %s\n", filename); |
565 | CIN_OGM_CloseCinematic(cin); |
566 | } |
567 | |
568 | /* alloc memory for decoding of this video */ |
569 | assert(cin->codecData == NULL)(__builtin_expect(!(cin->codecData == __null), 0) ? __assert_rtn (__func__, "src/client/cinematic/cl_cinematic_ogm.cpp", 569, "cin->codecData == NULL" ) : (void)0); |
570 | cin->codecData = Mem_PoolAllocType(ogmCinematic_t, vid_genericPool)static_cast<ogmCinematic_t*>(static_cast<ogmCinematic_t *>(_Mem_Alloc((sizeof(ogmCinematic_t) * (1)),true,(((vid_genericPool ))),(0),"src/client/cinematic/cl_cinematic_ogm.cpp",570))); |
571 | |
572 | if (FS_OpenFile(filename, &OGMCIN(*((ogmCinematic_t*)cin->codecData)).ogmFile, FILE_READ) == -1) { |
573 | Com_Printf("Can't open ogm-file for reading (%s)\n", filename); |
574 | return -1; |
575 | } |
576 | |
577 | cin->cinematicType = CINEMATIC_TYPE_OGM; |
578 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).startTime = CL_Milliseconds(); |
579 | |
580 | ogg_sync_init(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).oy); /* Now we can read pages */ |
581 | |
582 | /** @todo FIXME? can serialno be 0 in ogg? (better way to check initialized?) */ |
583 | /** @todo support for more than one audio stream? / detect files with one stream(or without correct ones) */ |
584 | while (!OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_audio.serialno || !OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video.serialno) { |
585 | if (ogg_sync_pageout(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).oy, &og) == 1) { |
586 | if (og.body_len >= 7 && !memcmp(og.body, "\x01vorbis", 7)) { |
587 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_audio.serialno) { |
588 | Com_Printf("more than one audio stream, in ogm-file(%s) ... we will stay at the first one\n", |
589 | filename); |
590 | } else { |
591 | ogg_stream_init(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_audio, ogg_page_serialno(&og)); |
592 | ogg_stream_pagein(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_audio, &og); |
593 | } |
594 | } |
595 | #ifdef HAVE_THEORA_THEORA_H1 |
596 | else if (og.body_len >= 7 && !memcmp(og.body, "\x80theora", 7)) { |
597 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video.serialno) { |
598 | Com_Printf("more than one video stream, in ogm-file(%s) ... we will stay at the first one\n", |
599 | filename); |
600 | } else { |
601 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).videoStreamIsTheora = true; |
602 | ogg_stream_init(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video, ogg_page_serialno(&og)); |
603 | ogg_stream_pagein(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video, &og); |
604 | } |
605 | } |
606 | #endif |
607 | #ifdef HAVE_XVID_H1 |
608 | else if (strstr((const char*) (og.body + 1), "video")) { /** @todo better way to find video stream */ |
609 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video.serialno) { |
610 | Com_Printf("more than one video stream, in ogm-file(%s) ... we will stay at the first one\n", |
611 | filename); |
612 | } else { |
613 | stream_header_t* sh; |
614 | |
615 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).videoStreamIsXvid = true; |
616 | |
617 | sh = (stream_header_t*) (og.body + 1); |
618 | |
619 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).Vtime_unit = sh->time_unit; |
620 | |
621 | ogg_stream_init(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video, ogg_page_serialno(&og)); |
622 | ogg_stream_pagein(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video, &og); |
623 | } |
624 | } |
625 | #endif |
626 | } else if (CIN_OGM_LoadBlockToSync(cin)) |
627 | break; |
628 | } |
629 | |
630 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).videoStreamIsXvid && OGMCIN(*((ogmCinematic_t*)cin->codecData)).videoStreamIsTheora) { |
631 | Com_Printf("Found \"video\"- and \"theora\"-stream, ogm-file (%s)\n", filename); |
632 | return -2; |
633 | } |
634 | |
635 | if (!OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_audio.serialno) { |
636 | Com_Printf("Haven't found an audio (vorbis) stream in ogm-file (%s)\n", filename); |
637 | return -2; |
638 | } |
639 | if (!OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video.serialno) { |
640 | Com_Printf("Haven't found a video stream in ogm-file (%s)\n", filename); |
641 | return -3; |
642 | } |
643 | |
644 | /* load vorbis header */ |
645 | vorbis_info_init(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).vi); |
646 | vorbis_comment_init(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).vc); |
647 | i = 0; |
648 | while (i < 3) { |
649 | status = ogg_stream_packetout(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_audio, &op); |
650 | if (status < 0) { |
651 | Com_Printf("Corrupt ogg packet while loading vorbis-headers, ogm-file(%s)\n", filename); |
652 | return -8; |
653 | } |
654 | if (status > 0) { |
655 | status = vorbis_synthesis_headerin(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).vi, &OGMCIN(*((ogmCinematic_t*)cin->codecData)).vc, &op); |
656 | if (i == 0 && status < 0) { |
657 | Com_Printf("This Ogg bitstream does not contain Vorbis audio data, ogm-file(%s)\n", filename); |
658 | return -9; |
659 | } |
660 | ++i; |
661 | } else if (CIN_OGM_LoadPagesToStream(cin)) { |
662 | if (CIN_OGM_LoadBlockToSync(cin)) { |
663 | Com_Printf("Couldn't find all vorbis headers before end of ogm-file (%s)\n", filename); |
664 | return -10; |
665 | } |
666 | } |
667 | } |
668 | |
669 | vorbis_synthesis_init(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).vd, &OGMCIN(*((ogmCinematic_t*)cin->codecData)).vi); |
670 | |
671 | #ifdef HAVE_XVID_H1 |
672 | status = CIN_XVID_Init(cin); |
673 | if (status) { |
674 | Com_Printf("[Xvid]Decore INIT problem, return value %d(ogm-file: %s)\n", status, filename); |
675 | return -4; |
676 | } |
677 | #endif |
678 | |
679 | #ifdef HAVE_THEORA_THEORA_H1 |
680 | if (OGMCIN(*((ogmCinematic_t*)cin->codecData)).videoStreamIsTheora) { |
681 | theora_info_init(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info); |
682 | theora_comment_init(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_comment); |
683 | |
684 | i = 0; |
685 | while (i < 3) { |
686 | status = ogg_stream_packetout(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video, &op); |
687 | if (status < 0) { |
688 | Com_Printf("Corrupt ogg packet while loading theora-headers, ogm-file(%s)\n", filename); |
689 | |
690 | return -8; |
691 | } |
692 | if (status > 0) { |
693 | status = theora_decode_header(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info, &OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_comment, &op); |
694 | if (i == 0 && status != 0) { |
695 | Com_Printf("This Ogg bitstream does not contain theora data, ogm-file(%s)\n", filename); |
696 | |
697 | return -9; |
698 | } |
699 | ++i; |
700 | } else if (CIN_OGM_LoadPagesToStream(cin)) { |
701 | if (CIN_OGM_LoadBlockToSync(cin)) { |
702 | Com_Printf("Couldn't find all theora headers before end of ogm-file (%s)\n", filename); |
703 | |
704 | return -10; |
705 | } |
706 | } |
707 | } |
708 | |
709 | theora_decode_init(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_state, &OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info); |
710 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).Vtime_unit = ((ogg_int64_t) OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.fps_denominator * 1000 * 10000 / OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info.fps_numerator); |
711 | } |
712 | #endif |
713 | |
714 | M_PlayMusicStream(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).musicStream); |
715 | |
716 | return 0; |
717 | } |
718 | |
719 | /** |
720 | * @sa R_UploadData |
721 | */ |
722 | static void CIN_OGM_DrawCinematic (cinematic_t *cin) |
723 | { |
724 | int texnum; |
725 | |
726 | assert(cin->status != CIN_STATUS_NONE)(__builtin_expect(!(cin->status != CIN_STATUS_NONE), 0) ? __assert_rtn (__func__, "src/client/cinematic/cl_cinematic_ogm.cpp", 726, "cin->status != CIN_STATUS_NONE" ) : (void)0); |
727 | |
728 | if (!OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBuffer) |
729 | return; |
730 | texnum = R_UploadData("***cinematic***", OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBuffer, OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputWidth, OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputHeight); |
731 | R_DrawTexture(texnum, cin->x, cin->y, cin->w, cin->h); |
732 | } |
733 | |
734 | /** |
735 | * @return true if the cinematic is still running, false otherwise |
736 | */ |
737 | bool CIN_OGM_RunCinematic (cinematic_t *cin) |
738 | { |
739 | /* no video stream found */ |
740 | if (!OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video.serialno) |
741 | return false; |
742 | |
743 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).currentTime = CL_Milliseconds() - OGMCIN(*((ogmCinematic_t*)cin->codecData)).startTime; |
744 | |
745 | while (!OGMCIN(*((ogmCinematic_t*)cin->codecData)).videoFrameCount || OGMCIN(*((ogmCinematic_t*)cin->codecData)).currentTime + 20 >= (int) (OGMCIN(*((ogmCinematic_t*)cin->codecData)).videoFrameCount * OGMCIN(*((ogmCinematic_t*)cin->codecData)).Vtime_unit / 10000)) { |
746 | if (CIN_OGM_LoadFrame(cin)) |
747 | return false; |
748 | } |
749 | |
750 | CIN_OGM_DrawCinematic(cin); |
751 | |
752 | return true; |
753 | } |
754 | |
755 | void CIN_OGM_CloseCinematic (cinematic_t *cin) |
756 | { |
757 | #ifdef HAVE_XVID_H1 |
758 | /** @todo is it at the right place? StopCinematic mean we only stop 1 cinematic */ |
759 | CIN_XVID_Shutdown(cin); |
760 | #endif |
761 | |
762 | #ifdef HAVE_THEORA_THEORA_H1 |
763 | theora_clear(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_state); |
764 | theora_comment_clear(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_comment); |
765 | theora_info_clear(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).th_info); |
766 | #endif |
767 | |
768 | M_StopMusicStream(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).musicStream); |
769 | |
770 | Mem_Free(OGMCIN.outputBuffer)_Mem_Free(((*((ogmCinematic_t*)cin->codecData)).outputBuffer ),"src/client/cinematic/cl_cinematic_ogm.cpp",770); |
771 | OGMCIN(*((ogmCinematic_t*)cin->codecData)).outputBuffer = NULL__null; |
772 | |
773 | vorbis_dsp_clear(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).vd); |
774 | vorbis_comment_clear(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).vc); |
775 | vorbis_info_clear(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).vi); /* must be called last (comment from vorbis example code) */ |
776 | |
777 | ogg_stream_clear(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_audio); |
778 | ogg_stream_clear(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).os_video); |
779 | |
780 | ogg_sync_clear(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).oy); |
781 | |
782 | FS_CloseFile(&OGMCIN(*((ogmCinematic_t*)cin->codecData)).ogmFile); |
783 | |
784 | /* free data allocated for decodage */ |
785 | Mem_Free(cin->codecData)_Mem_Free((cin->codecData),"src/client/cinematic/cl_cinematic_ogm.cpp" ,785); |
786 | cin->codecData = NULL__null; |
787 | } |
788 | |
789 | void CIN_OGM_Init (void) |
790 | { |
791 | long i; |
792 | const float t_ub = (1.77200f / 2.0f) * (float)(1 << 6) + 0.5f; |
793 | const float t_vr = (1.40200f / 2.0f) * (float)(1 << 6) + 0.5f; |
794 | const float t_ug = (0.34414f / 2.0f) * (float)(1 << 6) + 0.5f; |
795 | const float t_vg = (0.71414f / 2.0f) * (float)(1 << 6) + 0.5f; |
796 | |
797 | for (i = 0; i < 256; i++) { |
798 | const float x = (float)(2 * i - 255); |
799 | |
800 | ogmCin_yuvTable.ub[i] = (long)(( t_ub * x) + (1 << 5)); |
801 | ogmCin_yuvTable.vr[i] = (long)(( t_vr * x) + (1 << 5)); |
802 | ogmCin_yuvTable.ug[i] = (long)((-t_ug * x)); |
803 | ogmCin_yuvTable.vg[i] = (long)((-t_vg * x) + (1 << 5)); |
804 | ogmCin_yuvTable.yy[i] = (long)((i << 6) | (i >> 2)); |
805 | } |
806 | } |