1 /*
2 Copyright (c) 2005-2021 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
17 /*-------------------------------------------------------------*/
18 /*--- Compression machinery (not incl block sorting) ---*/
19 /*--- compress.cpp ---*/
20 /*-------------------------------------------------------------*/
21
22 /* ------------------------------------------------------------------
23 The original source for this example:
24 This file is part of bzip2/libbzip2, a program and library for
25 lossless, block-sorting data compression.
26
27 bzip2/libbzip2 version 1.0.6 of 6 September 2010
28 Copyright (C) 1996-2010 Julian Seward <[email protected]>
29
30 This program, "bzip2", the associated library "libbzip2", and all
31 documentation, are copyright (C) 1996-2010 Julian R Seward. All
32 rights reserved.
33
34 Redistribution and use in source and binary forms, with or without
35 modification, are permitted provided that the following conditions
36 are met:
37
38 1. Redistributions of source code must retain the above copyright
39 notice, this list of conditions and the following disclaimer.
40
41 2. The origin of this software must not be misrepresented; you must
42 not claim that you wrote the original software. If you use this
43 software in a product, an acknowledgment in the product
44 documentation would be appreciated but is not required.
45
46 3. Altered source versions must be plainly marked as such, and must
47 not be misrepresented as being the original software.
48
49 4. The name of the author may not be used to endorse or promote
50 products derived from this software without specific prior written
51 permission.
52
53 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
54 OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
55 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
57 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
59 GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
61 WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
62 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
63 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64
65 Julian Seward, [email protected]
66 bzip2/libbzip2 version 1.0.6 of 6 September 2010
67 ------------------------------------------------------------------ */
68
69 /* CHANGES
70 0.9.0 -- original version.
71 0.9.0a/b -- no changes in this file.
72 0.9.0c -- changed setting of nGroups in sendMTFValues()
73 so as to do a bit better on small files
74 */
75
76 #include "bzlib_private.hpp"
77
78 /*---------------------------------------------------*/
79 /*--- Bit stream I/O ---*/
80 /*---------------------------------------------------*/
81
82 /*---------------------------------------------------*/
BZ2_bsInitWrite(EState * s)83 void BZ2_bsInitWrite(EState* s) {
84 s->bsLive = 0;
85 s->bsBuff = 0;
86 }
87
88 /*---------------------------------------------------*/
bsFinishWrite(EState * s)89 static void bsFinishWrite(EState* s) {
90 while (s->bsLive > 0) {
91 s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24);
92 s->numZ++;
93 s->bsBuff <<= 8;
94 s->bsLive -= 8;
95 }
96 }
97
98 /*---------------------------------------------------*/
99 #define bsNEEDW(nz) \
100 { \
101 while (s->bsLive >= 8) { \
102 s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24); \
103 s->numZ++; \
104 s->bsBuff <<= 8; \
105 s->bsLive -= 8; \
106 } \
107 }
108
109 /*---------------------------------------------------*/
bsW(EState * s,Int32 n,UInt32 v)110 static __inline__ void bsW(EState* s, Int32 n, UInt32 v) {
111 bsNEEDW(n);
112 s->bsBuff |= (v << (32 - s->bsLive - n));
113 s->bsLive += n;
114 }
115
116 /*---------------------------------------------------*/
bsPutUInt32(EState * s,UInt32 u)117 static void bsPutUInt32(EState* s, UInt32 u) {
118 bsW(s, 8, (u >> 24) & 0xffL);
119 bsW(s, 8, (u >> 16) & 0xffL);
120 bsW(s, 8, (u >> 8) & 0xffL);
121 bsW(s, 8, u & 0xffL);
122 }
123
124 /*---------------------------------------------------*/
bsPutUChar(EState * s,UChar c)125 static void bsPutUChar(EState* s, UChar c) {
126 bsW(s, 8, (UInt32)c);
127 }
128
129 /*---------------------------------------------------*/
130 /*--- The back end proper ---*/
131 /*---------------------------------------------------*/
132
133 /*---------------------------------------------------*/
makeMaps_e(EState * s)134 static void makeMaps_e(EState* s) {
135 Int32 i;
136 s->nInUse = 0;
137 for (i = 0; i < 256; i++)
138 if (s->inUse[i]) {
139 s->unseqToSeq[i] = s->nInUse;
140 s->nInUse++;
141 }
142 }
143
144 /*---------------------------------------------------*/
generateMTFValues(EState * s)145 static void generateMTFValues(EState* s) {
146 UChar yy[256];
147 Int32 i, j;
148 Int32 zPend;
149 Int32 wr;
150 Int32 EOB;
151
152 /*
153 After sorting (eg, here),
154 s->arr1 [ 0 .. s->nblock-1 ] holds sorted order,
155 and
156 ((UChar*)s->arr2) [ 0 .. s->nblock-1 ]
157 holds the original block data.
158
159 The first thing to do is generate the MTF values,
160 and put them in
161 ((UInt16*)s->arr1) [ 0 .. s->nblock-1 ].
162 Because there are strictly fewer or equal MTF values
163 than block values, ptr values in this area are overwritten
164 with MTF values only when they are no longer needed.
165
166 The final compressed bitstream is generated into the
167 area starting at
168 (UChar*) (&((UChar*)s->arr2)[s->nblock])
169
170 These storage aliases are set up in bzCompressInit(),
171 except for the last one, which is arranged in
172 compressBlock().
173 */
174 UInt32* ptr = s->ptr;
175 UChar* block = s->block;
176 UInt16* mtfv = s->mtfv;
177
178 makeMaps_e(s);
179 EOB = s->nInUse + 1;
180
181 for (i = 0; i <= EOB; i++)
182 s->mtfFreq[i] = 0;
183
184 wr = 0;
185 zPend = 0;
186 for (i = 0; i < s->nInUse; i++)
187 yy[i] = (UChar)i;
188
189 for (i = 0; i < s->nblock; i++) {
190 UChar ll_i;
191 AssertD(wr <= i, "generateMTFValues(1)");
192 j = ptr[i] - 1;
193 if (j < 0)
194 j += s->nblock;
195 ll_i = s->unseqToSeq[block[j]];
196 AssertD(ll_i < s->nInUse, "generateMTFValues(2a)");
197
198 if (yy[0] == ll_i) {
199 zPend++;
200 }
201 else {
202 if (zPend > 0) {
203 zPend--;
204 while (True) {
205 if (zPend & 1) {
206 mtfv[wr] = BZ_RUNB;
207 wr++;
208 s->mtfFreq[BZ_RUNB]++;
209 }
210 else {
211 mtfv[wr] = BZ_RUNA;
212 wr++;
213 s->mtfFreq[BZ_RUNA]++;
214 }
215 if (zPend < 2)
216 break;
217 zPend = (zPend - 2) / 2;
218 };
219 zPend = 0;
220 }
221 {
222 UChar rtmp;
223 UChar* ryy_j;
224 UChar rll_i;
225 rtmp = yy[1];
226 yy[1] = yy[0];
227 ryy_j = &(yy[1]);
228 rll_i = ll_i;
229 while (rll_i != rtmp) {
230 UChar rtmp2;
231 ryy_j++;
232 rtmp2 = rtmp;
233 rtmp = *ryy_j;
234 *ryy_j = rtmp2;
235 };
236 yy[0] = rtmp;
237 j = ryy_j - &(yy[0]);
238 mtfv[wr] = j + 1;
239 wr++;
240 s->mtfFreq[j + 1]++;
241 }
242 }
243 }
244
245 if (zPend > 0) {
246 zPend--;
247 while (True) {
248 if (zPend & 1) {
249 mtfv[wr] = BZ_RUNB;
250 wr++;
251 s->mtfFreq[BZ_RUNB]++;
252 }
253 else {
254 mtfv[wr] = BZ_RUNA;
255 wr++;
256 s->mtfFreq[BZ_RUNA]++;
257 }
258 if (zPend < 2)
259 break;
260 zPend = (zPend - 2) / 2;
261 };
262 zPend = 0;
263 }
264
265 mtfv[wr] = EOB;
266 wr++;
267 s->mtfFreq[EOB]++;
268
269 s->nMTF = wr;
270 }
271
272 /*---------------------------------------------------*/
273 #define BZ_LESSER_ICOST 0
274 #define BZ_GREATER_ICOST 15
275
sendMTFValues(EState * s)276 static void sendMTFValues(EState* s) {
277 Int32 t, i, j, k, gs, ge, totc, bt, bc;
278 Int32 nSelectors, alphaSize, minLen, maxLen, selCtr;
279 Int32 nGroups, nBytes;
280
281 /*--
282 UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
283 is a global since the decoder also needs it.
284
285 Int32 code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
286 Int32 rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
287 are also globals only used in this proc.
288 Made global to keep stack frame size small.
289 --*/
290
291 UInt16 cost[BZ_N_GROUPS];
292 Int32 fave[BZ_N_GROUPS];
293
294 UInt16* mtfv = s->mtfv;
295
296 if (s->verbosity >= 3)
297 VPrintf3(" %d in block, %d after MTF & 1-2 coding, "
298 "%d+2 syms in use\n",
299 s->nblock,
300 s->nMTF,
301 s->nInUse);
302
303 alphaSize = s->nInUse + 2;
304 for (i = 0; i < BZ_N_GROUPS; i++)
305 for (j = 0; j < alphaSize; j++)
306 s->len[i][j] = BZ_GREATER_ICOST;
307
308 /*--- Decide how many coding tables to use ---*/
309 AssertH(s->nMTF > 0, 3001);
310 if (s->nMTF < 200)
311 nGroups = 2;
312 else if (s->nMTF < 600)
313 nGroups = 3;
314 else if (s->nMTF < 1200)
315 nGroups = 4;
316 else if (s->nMTF < 2400)
317 nGroups = 5;
318 else
319 nGroups = 6;
320
321 /*--- Generate an initial set of coding tables ---*/
322 {
323 Int32 nPart, remF, tFreq, aFreq;
324
325 nPart = nGroups;
326 remF = s->nMTF;
327 gs = 0;
328 while (nPart > 0) {
329 tFreq = remF / nPart;
330 ge = gs - 1;
331 aFreq = 0;
332 while (aFreq < tFreq && ge < alphaSize - 1) {
333 ge++;
334 aFreq += s->mtfFreq[ge];
335 }
336
337 if (ge > gs && nPart != nGroups && nPart != 1 && ((nGroups - nPart) % 2 == 1)) {
338 aFreq -= s->mtfFreq[ge];
339 ge--;
340 }
341
342 if (s->verbosity >= 3)
343 VPrintf5(" initial group %d, [%d .. %d], "
344 "has %d syms (%4.1f%%)\n",
345 nPart,
346 gs,
347 ge,
348 aFreq,
349 (100.0 * (float)aFreq) / (float)(s->nMTF));
350
351 for (i = 0; i < alphaSize; i++)
352 if (i >= gs && i <= ge)
353 s->len[nPart - 1][i] = BZ_LESSER_ICOST;
354 else
355 s->len[nPart - 1][i] = BZ_GREATER_ICOST;
356
357 nPart--;
358 gs = ge + 1;
359 remF -= aFreq;
360 }
361 }
362
363 /*---
364 Iterate up to BZ_N_ITERS times to improve the tables.
365 ---*/
366 for (k = 0; k < BZ_N_ITERS; k++) {
367 for (i = 0; i < nGroups; i++)
368 fave[i] = 0;
369
370 for (i = 0; i < nGroups; i++)
371 for (j = 0; j < alphaSize; j++)
372 s->rfreq[i][j] = 0;
373
374 /*---
375 Set up an auxiliary length table which is used to fast-track
376 the common case (nGroups == 6).
377 ---*/
378 if (nGroups == 6) {
379 for (i = 0; i < alphaSize; i++) {
380 s->len_pack[i][0] = (s->len[1][i] << 16) | s->len[0][i];
381 s->len_pack[i][1] = (s->len[3][i] << 16) | s->len[2][i];
382 s->len_pack[i][2] = (s->len[5][i] << 16) | s->len[4][i];
383 }
384 }
385
386 nSelectors = 0;
387 totc = 0;
388 gs = 0;
389 while (True) {
390 /*--- Set group start & end marks. --*/
391 if (gs >= s->nMTF)
392 break;
393 ge = gs + BZ_G_SIZE - 1;
394 if (ge >= s->nMTF)
395 ge = s->nMTF - 1;
396
397 /*--
398 Calculate the cost of this group as coded
399 by each of the coding tables.
400 --*/
401 for (i = 0; i < nGroups; i++)
402 cost[i] = 0;
403
404 if (nGroups == 6 && 50 == ge - gs + 1) {
405 /*--- fast track the common case ---*/
406 UInt32 cost01, cost23, cost45;
407 UInt16 icv;
408 cost01 = cost23 = cost45 = 0;
409
410 #define BZ_ITER(nn) \
411 icv = mtfv[gs + (nn)]; \
412 cost01 += s->len_pack[icv][0]; \
413 cost23 += s->len_pack[icv][1]; \
414 cost45 += s->len_pack[icv][2];
415
416 BZ_ITER(0);
417 BZ_ITER(1);
418 BZ_ITER(2);
419 BZ_ITER(3);
420 BZ_ITER(4);
421 BZ_ITER(5);
422 BZ_ITER(6);
423 BZ_ITER(7);
424 BZ_ITER(8);
425 BZ_ITER(9);
426 BZ_ITER(10);
427 BZ_ITER(11);
428 BZ_ITER(12);
429 BZ_ITER(13);
430 BZ_ITER(14);
431 BZ_ITER(15);
432 BZ_ITER(16);
433 BZ_ITER(17);
434 BZ_ITER(18);
435 BZ_ITER(19);
436 BZ_ITER(20);
437 BZ_ITER(21);
438 BZ_ITER(22);
439 BZ_ITER(23);
440 BZ_ITER(24);
441 BZ_ITER(25);
442 BZ_ITER(26);
443 BZ_ITER(27);
444 BZ_ITER(28);
445 BZ_ITER(29);
446 BZ_ITER(30);
447 BZ_ITER(31);
448 BZ_ITER(32);
449 BZ_ITER(33);
450 BZ_ITER(34);
451 BZ_ITER(35);
452 BZ_ITER(36);
453 BZ_ITER(37);
454 BZ_ITER(38);
455 BZ_ITER(39);
456 BZ_ITER(40);
457 BZ_ITER(41);
458 BZ_ITER(42);
459 BZ_ITER(43);
460 BZ_ITER(44);
461 BZ_ITER(45);
462 BZ_ITER(46);
463 BZ_ITER(47);
464 BZ_ITER(48);
465 BZ_ITER(49);
466
467 #undef BZ_ITER
468
469 cost[0] = cost01 & 0xffff;
470 cost[1] = cost01 >> 16;
471 cost[2] = cost23 & 0xffff;
472 cost[3] = cost23 >> 16;
473 cost[4] = cost45 & 0xffff;
474 cost[5] = cost45 >> 16;
475 }
476 else {
477 /*--- slow version which correctly handles all situations ---*/
478 for (i = gs; i <= ge; i++) {
479 UInt16 icv = mtfv[i];
480 for (j = 0; j < nGroups; j++)
481 cost[j] += s->len[j][icv];
482 }
483 }
484
485 /*--
486 Find the coding table which is best for this group,
487 and record its identity in the selector table.
488 --*/
489 bc = 999999999;
490 bt = -1;
491 for (i = 0; i < nGroups; i++)
492 if (cost[i] < bc) {
493 bc = cost[i];
494 bt = i;
495 };
496 totc += bc;
497 fave[bt]++;
498 s->selector[nSelectors] = bt;
499 nSelectors++;
500
501 /*--
502 Increment the symbol frequencies for the selected table.
503 --*/
504 if (nGroups == 6 && 50 == ge - gs + 1) {
505 /*--- fast track the common case ---*/
506
507 #define BZ_ITUR(nn) s->rfreq[bt][mtfv[gs + (nn)]]++
508
509 BZ_ITUR(0);
510 BZ_ITUR(1);
511 BZ_ITUR(2);
512 BZ_ITUR(3);
513 BZ_ITUR(4);
514 BZ_ITUR(5);
515 BZ_ITUR(6);
516 BZ_ITUR(7);
517 BZ_ITUR(8);
518 BZ_ITUR(9);
519 BZ_ITUR(10);
520 BZ_ITUR(11);
521 BZ_ITUR(12);
522 BZ_ITUR(13);
523 BZ_ITUR(14);
524 BZ_ITUR(15);
525 BZ_ITUR(16);
526 BZ_ITUR(17);
527 BZ_ITUR(18);
528 BZ_ITUR(19);
529 BZ_ITUR(20);
530 BZ_ITUR(21);
531 BZ_ITUR(22);
532 BZ_ITUR(23);
533 BZ_ITUR(24);
534 BZ_ITUR(25);
535 BZ_ITUR(26);
536 BZ_ITUR(27);
537 BZ_ITUR(28);
538 BZ_ITUR(29);
539 BZ_ITUR(30);
540 BZ_ITUR(31);
541 BZ_ITUR(32);
542 BZ_ITUR(33);
543 BZ_ITUR(34);
544 BZ_ITUR(35);
545 BZ_ITUR(36);
546 BZ_ITUR(37);
547 BZ_ITUR(38);
548 BZ_ITUR(39);
549 BZ_ITUR(40);
550 BZ_ITUR(41);
551 BZ_ITUR(42);
552 BZ_ITUR(43);
553 BZ_ITUR(44);
554 BZ_ITUR(45);
555 BZ_ITUR(46);
556 BZ_ITUR(47);
557 BZ_ITUR(48);
558 BZ_ITUR(49);
559
560 #undef BZ_ITUR
561 }
562 else {
563 /*--- slow version which correctly handles all situations ---*/
564 for (i = gs; i <= ge; i++)
565 s->rfreq[bt][mtfv[i]]++;
566 }
567
568 gs = ge + 1;
569 }
570 if (s->verbosity >= 3) {
571 VPrintf2(" pass %d: size is %d, grp uses are ", k + 1, totc / 8);
572 for (i = 0; i < nGroups; i++)
573 VPrintf1("%d ", fave[i]);
574 VPrintf0("\n");
575 }
576
577 /*--
578 Recompute the tables based on the accumulated frequencies.
579 --*/
580 /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
581 comment in huffman.c for details. */
582 for (i = 0; i < nGroups; i++)
583 BZ2_hbMakeCodeLengths(&(s->len[i][0]), &(s->rfreq[i][0]), alphaSize, 17 /*20*/);
584 }
585
586 AssertH(nGroups < 8, 3002);
587 AssertH(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)), 3003);
588
589 /*--- Compute MTF values for the selectors. ---*/
590 {
591 UChar pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
592 for (i = 0; i < nGroups; i++)
593 pos[i] = i;
594 for (i = 0; i < nSelectors; i++) {
595 ll_i = s->selector[i];
596 j = 0;
597 tmp = pos[j];
598 while (ll_i != tmp) {
599 j++;
600 tmp2 = tmp;
601 tmp = pos[j];
602 pos[j] = tmp2;
603 };
604 pos[0] = tmp;
605 s->selectorMtf[i] = j;
606 }
607 };
608
609 /*--- Assign actual codes for the tables. --*/
610 for (j = 0; j < nGroups; j++) {
611 minLen = 32;
612 maxLen = 0;
613 for (i = 0; i < alphaSize; i++) {
614 if (s->len[j][i] > maxLen)
615 maxLen = s->len[j][i];
616 if (s->len[j][i] < minLen)
617 minLen = s->len[j][i];
618 }
619 AssertH(!(maxLen > 17 /*20*/), 3004);
620 AssertH(!(minLen < 1), 3005);
621 BZ2_hbAssignCodes(&(s->code[j][0]), &(s->len[j][0]), minLen, maxLen, alphaSize);
622 }
623
624 /*--- Transmit the mapping table. ---*/
625 {
626 Bool inUse16[16];
627 for (i = 0; i < 16; i++) {
628 inUse16[i] = False;
629 for (j = 0; j < 16; j++)
630 if (s->inUse[i * 16 + j])
631 inUse16[i] = True;
632 }
633
634 nBytes = s->numZ;
635 for (i = 0; i < 16; i++)
636 if (inUse16[i])
637 bsW(s, 1, 1);
638 else
639 bsW(s, 1, 0);
640
641 for (i = 0; i < 16; i++)
642 if (inUse16[i])
643 for (j = 0; j < 16; j++) {
644 if (s->inUse[i * 16 + j])
645 bsW(s, 1, 1);
646 else
647 bsW(s, 1, 0);
648 }
649
650 if (s->verbosity >= 3)
651 VPrintf1(" bytes: mapping %d, ", s->numZ - nBytes);
652 }
653
654 /*--- Now the selectors. ---*/
655 nBytes = s->numZ;
656 bsW(s, 3, nGroups);
657 bsW(s, 15, nSelectors);
658 for (i = 0; i < nSelectors; i++) {
659 for (j = 0; j < s->selectorMtf[i]; j++)
660 bsW(s, 1, 1);
661 bsW(s, 1, 0);
662 }
663 if (s->verbosity >= 3)
664 VPrintf1("selectors %d, ", s->numZ - nBytes);
665
666 /*--- Now the coding tables. ---*/
667 nBytes = s->numZ;
668
669 for (t = 0; t < nGroups; t++) {
670 Int32 curr = s->len[t][0];
671 bsW(s, 5, curr);
672 for (i = 0; i < alphaSize; i++) {
673 while (curr < s->len[t][i]) {
674 bsW(s, 2, 2);
675 curr++; /* 10 */
676 };
677 while (curr > s->len[t][i]) {
678 bsW(s, 2, 3);
679 curr--; /* 11 */
680 };
681 bsW(s, 1, 0);
682 }
683 }
684
685 if (s->verbosity >= 3)
686 VPrintf1("code lengths %d, ", s->numZ - nBytes);
687
688 /*--- And finally, the block data proper ---*/
689 nBytes = s->numZ;
690 selCtr = 0;
691 gs = 0;
692 while (True) {
693 if (gs >= s->nMTF)
694 break;
695 ge = gs + BZ_G_SIZE - 1;
696 if (ge >= s->nMTF)
697 ge = s->nMTF - 1;
698 AssertH(s->selector[selCtr] < nGroups, 3006);
699
700 if (nGroups == 6 && 50 == ge - gs + 1) {
701 /*--- fast track the common case ---*/
702 UInt16 mtfv_i;
703 UChar* s_len_sel_selCtr = &(s->len[s->selector[selCtr]][0]);
704 Int32* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
705
706 #define BZ_ITAH(nn) \
707 mtfv_i = mtfv[gs + (nn)]; \
708 bsW(s, s_len_sel_selCtr[mtfv_i], s_code_sel_selCtr[mtfv_i])
709
710 BZ_ITAH(0);
711 BZ_ITAH(1);
712 BZ_ITAH(2);
713 BZ_ITAH(3);
714 BZ_ITAH(4);
715 BZ_ITAH(5);
716 BZ_ITAH(6);
717 BZ_ITAH(7);
718 BZ_ITAH(8);
719 BZ_ITAH(9);
720 BZ_ITAH(10);
721 BZ_ITAH(11);
722 BZ_ITAH(12);
723 BZ_ITAH(13);
724 BZ_ITAH(14);
725 BZ_ITAH(15);
726 BZ_ITAH(16);
727 BZ_ITAH(17);
728 BZ_ITAH(18);
729 BZ_ITAH(19);
730 BZ_ITAH(20);
731 BZ_ITAH(21);
732 BZ_ITAH(22);
733 BZ_ITAH(23);
734 BZ_ITAH(24);
735 BZ_ITAH(25);
736 BZ_ITAH(26);
737 BZ_ITAH(27);
738 BZ_ITAH(28);
739 BZ_ITAH(29);
740 BZ_ITAH(30);
741 BZ_ITAH(31);
742 BZ_ITAH(32);
743 BZ_ITAH(33);
744 BZ_ITAH(34);
745 BZ_ITAH(35);
746 BZ_ITAH(36);
747 BZ_ITAH(37);
748 BZ_ITAH(38);
749 BZ_ITAH(39);
750 BZ_ITAH(40);
751 BZ_ITAH(41);
752 BZ_ITAH(42);
753 BZ_ITAH(43);
754 BZ_ITAH(44);
755 BZ_ITAH(45);
756 BZ_ITAH(46);
757 BZ_ITAH(47);
758 BZ_ITAH(48);
759 BZ_ITAH(49);
760
761 #undef BZ_ITAH
762 }
763 else {
764 /*--- slow version which correctly handles all situations ---*/
765 for (i = gs; i <= ge; i++) {
766 bsW(s, s->len[s->selector[selCtr]][mtfv[i]], s->code[s->selector[selCtr]][mtfv[i]]);
767 }
768 }
769
770 gs = ge + 1;
771 selCtr++;
772 }
773 AssertH(selCtr == nSelectors, 3007);
774
775 if (s->verbosity >= 3)
776 VPrintf1("codes %d\n", s->numZ - nBytes);
777 }
778
779 /*---------------------------------------------------*/
BZ2_compressBlock(EState * s,Bool is_last_block)780 void BZ2_compressBlock(EState* s, Bool is_last_block) {
781 if (s->nblock > 0) {
782 BZ_FINALISE_CRC(s->blockCRC);
783 s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
784 s->combinedCRC ^= s->blockCRC;
785 if (s->blockNo > 1)
786 s->numZ = 0;
787
788 if (s->verbosity >= 2)
789 VPrintf4(" block %d: crc = 0x%08x, "
790 "combined CRC = 0x%08x, size = %d\n",
791 s->blockNo,
792 s->blockCRC,
793 s->combinedCRC,
794 s->nblock);
795
796 BZ2_blockSort(s);
797 }
798
799 s->zbits = (UChar*)(&((UChar*)s->arr2)[s->nblock]);
800
801 /*-- If this is the first block, create the stream header. --*/
802 if (s->blockNo == 1) {
803 BZ2_bsInitWrite(s);
804 bsPutUChar(s, BZ_HDR_B);
805 bsPutUChar(s, BZ_HDR_Z);
806 bsPutUChar(s, BZ_HDR_h);
807 bsPutUChar(s, (UChar)(BZ_HDR_0 + s->blockSize100k));
808 }
809
810 if (s->nblock > 0) {
811 bsPutUChar(s, 0x31);
812 bsPutUChar(s, 0x41);
813 bsPutUChar(s, 0x59);
814 bsPutUChar(s, 0x26);
815 bsPutUChar(s, 0x53);
816 bsPutUChar(s, 0x59);
817
818 /*-- Now the block's CRC, so it is in a known place. --*/
819 bsPutUInt32(s, s->blockCRC);
820
821 /*--
822 Now a single bit indicating (non-)randomisation.
823 As of version 0.9.5, we use a better sorting algorithm
824 which makes randomisation unnecessary. So always set
825 the randomised bit to 'no'. Of course, the decoder
826 still needs to be able to handle randomised blocks
827 so as to maintain backwards compatibility with
828 older versions of bzip2.
829 --*/
830 bsW(s, 1, 0);
831
832 bsW(s, 24, s->origPtr);
833 generateMTFValues(s);
834 sendMTFValues(s);
835 }
836
837 /*-- If this is the last block, add the stream trailer. --*/
838 if (is_last_block) {
839 bsPutUChar(s, 0x17);
840 bsPutUChar(s, 0x72);
841 bsPutUChar(s, 0x45);
842 bsPutUChar(s, 0x38);
843 bsPutUChar(s, 0x50);
844 bsPutUChar(s, 0x90);
845 bsPutUInt32(s, s->combinedCRC);
846 if (s->verbosity >= 2)
847 VPrintf1(" final combined CRC = 0x%08x\n ", s->combinedCRC);
848 bsFinishWrite(s);
849 }
850 }
851
852 /*-------------------------------------------------------------*/
853 /*--- end compress.c ---*/
854 /*-------------------------------------------------------------*/
855