Lines Matching refs:blockTable

159     blockParam_t* const blockTable = (blockParam_t*) malloc(maxNbBlocks * sizeof(blockParam_t));  in BMK_benchMem()  local
168 if (!compressedBuffer || !resultBuffer || !blockTable || !ctx || !dctx) in BMK_benchMem()
185 blockTable[nbBlocks].srcPtr = srcPtr; in BMK_benchMem()
186 blockTable[nbBlocks].cPtr = cPtr; in BMK_benchMem()
187 blockTable[nbBlocks].resPtr = resPtr; in BMK_benchMem()
188 blockTable[nbBlocks].srcSize = thisBlockSize; in BMK_benchMem()
189 blockTable[nbBlocks].cRoom = ZSTD_compressBound(thisBlockSize); in BMK_benchMem()
191 cPtr += blockTable[nbBlocks].cRoom; in BMK_benchMem()
247blockTable[blockNb].cPtr, blockTable[blockNb].cRoom, in BMK_benchMem()
248blockTable[blockNb].srcPtr,blockTable[blockNb].srcSize, in BMK_benchMem()
252blockTable[blockNb].cPtr, blockTable[blockNb].cRoom, in BMK_benchMem()
253blockTable[blockNb].srcPtr,blockTable[blockNb].srcSize, cLevel); in BMK_benchMem()
256 blockTable[blockNb].cSize = rSize; in BMK_benchMem()
273 rSize = ZSTD_resetCStream(zbc, blockTable[blockNb].srcSize); in BMK_benchMem()
275 inBuffer.src = blockTable[blockNb].srcPtr; in BMK_benchMem()
276 inBuffer.size = blockTable[blockNb].srcSize; in BMK_benchMem()
278 outBuffer.dst = blockTable[blockNb].cPtr; in BMK_benchMem()
279 outBuffer.size = blockTable[blockNb].cRoom; in BMK_benchMem()
285 blockTable[blockNb].cSize = outBuffer.pos; in BMK_benchMem()
318 def.next_in = (z_const z_Bytef*) blockTable[blockNb].srcPtr; in BMK_benchMem()
319 def.avail_in = (uInt)blockTable[blockNb].srcSize; in BMK_benchMem()
321 def.next_out = (z_Bytef*) blockTable[blockNb].cPtr; in BMK_benchMem()
322 def.avail_out = (uInt)blockTable[blockNb].cRoom; in BMK_benchMem()
325 …REAM_END) EXM_THROW(1, "deflate failure ret=%d srcSize=%d" , ret, (int)blockTable[blockNb].srcSize… in BMK_benchMem()
326 blockTable[blockNb].cSize = def.total_out; in BMK_benchMem()
349 def.next_in = (z_const z_Bytef*) blockTable[blockNb].srcPtr; in BMK_benchMem()
350 def.avail_in = (uInt)blockTable[blockNb].srcSize; in BMK_benchMem()
352 def.next_out = (z_Bytef*) blockTable[blockNb].cPtr; in BMK_benchMem()
353 def.avail_out = (uInt)blockTable[blockNb].cRoom; in BMK_benchMem()
359 blockTable[blockNb].cSize = def.total_out; in BMK_benchMem()
371 … { U32 blockNb; for (blockNb=0; blockNb<nbBlocks; blockNb++) cSize += blockTable[blockNb].cSize; } in BMK_benchMem()
396 blockTable[blockNb].resPtr, blockTable[blockNb].srcSize, in BMK_benchMem()
397 blockTable[blockNb].cPtr, blockTable[blockNb].cSize, in BMK_benchMem()
405 blockTable[blockNb].resSize = regenSize; in BMK_benchMem()
423 inBuffer.src = blockTable[blockNb].cPtr; in BMK_benchMem()
424 inBuffer.size = blockTable[blockNb].cSize; in BMK_benchMem()
426 outBuffer.dst = blockTable[blockNb].resPtr; in BMK_benchMem()
427 outBuffer.size = blockTable[blockNb].srcSize; in BMK_benchMem()
431 blockTable[blockNb].resSize = outBuffer.pos; in BMK_benchMem()
454 inf.next_in = (z_const z_Bytef*) blockTable[blockNb].cPtr; in BMK_benchMem()
455 inf.avail_in = (uInt)blockTable[blockNb].cSize; in BMK_benchMem()
457 inf.next_out = (z_Bytef*) blockTable[blockNb].resPtr; in BMK_benchMem()
458 inf.avail_out = (uInt)blockTable[blockNb].srcSize; in BMK_benchMem()
467 blockTable[blockNb].resSize = inf.total_out; in BMK_benchMem()
486 inf.next_in = (z_const z_Bytef*) blockTable[blockNb].cPtr; in BMK_benchMem()
487 inf.avail_in = (uInt)blockTable[blockNb].cSize; in BMK_benchMem()
489 inf.next_out = (z_Bytef*) blockTable[blockNb].resPtr; in BMK_benchMem()
490 inf.avail_out = (uInt)blockTable[blockNb].srcSize; in BMK_benchMem()
501 blockTable[blockNb].resSize = inf.total_out; in BMK_benchMem()
529 if (bacc + blockTable[segNb].srcSize > u) break; in BMK_benchMem()
530 bacc += blockTable[segNb].srcSize; in BMK_benchMem()
557 free(blockTable); in BMK_benchMem()