1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4 
5 #ifndef _RTE_COMP_H_
6 #define _RTE_COMP_H_
7 
8 /**
9  * @file rte_comp.h
10  *
11  * RTE definitions for Data Compression Service
12  *
13  */
14 
15 #ifdef __cplusplus
16 extern "C" {
17 #endif
18 
19 #include <rte_mempool.h>
20 #include <rte_mbuf.h>
21 
22 /**
23  * compression service feature flags
24  *
25  * @note New features flags should be added to the end of the list
26  *
27  * Keep these flags synchronised with rte_comp_get_feature_name()
28  */
29 #define RTE_COMP_FF_STATEFUL_COMPRESSION	(1ULL << 0)
30 /**< Stateful compression is supported */
31 #define RTE_COMP_FF_STATEFUL_DECOMPRESSION	(1ULL << 1)
32 /**< Stateful decompression is supported */
33 #define RTE_COMP_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 2)
34 /**< Out-of-place Scatter-gather (SGL) buffers,
35  * with multiple segments, are supported in input and output
36  */
37 #define RTE_COMP_FF_OOP_SGL_IN_LB_OUT		(1ULL << 3)
38 /**< Out-of-place Scatter-gather (SGL) buffers are supported
39  * in input, combined with linear buffers (LB), with a
40  * single segment, in output
41  */
42 #define RTE_COMP_FF_OOP_LB_IN_SGL_OUT		(1ULL << 4)
43 /**< Out-of-place Scatter-gather (SGL) buffers are supported
44  * in output, combined with linear buffers (LB) in input
45  */
46 #define RTE_COMP_FF_ADLER32_CHECKSUM		(1ULL << 5)
47 /**< Adler-32 Checksum is supported */
48 #define RTE_COMP_FF_CRC32_CHECKSUM		(1ULL << 6)
49 /**< CRC32 Checksum is supported */
50 #define RTE_COMP_FF_CRC32_ADLER32_CHECKSUM	(1ULL << 7)
51 /**< Adler-32/CRC32 Checksum is supported */
52 #define RTE_COMP_FF_MULTI_PKT_CHECKSUM		(1ULL << 8)
53 /**< Generation of checksum across multiple stateless packets is supported */
54 #define RTE_COMP_FF_SHA1_HASH			(1ULL << 9)
55 /**< SHA1 Hash is supported */
56 #define RTE_COMP_FF_SHA2_SHA256_HASH		(1ULL << 10)
57 /**< SHA256 Hash of SHA2 family is supported */
58 #define RTE_COMP_FF_NONCOMPRESSED_BLOCKS	(1ULL << 11)
59 /**< Creation of non-compressed blocks using RTE_COMP_LEVEL_NONE is supported */
60 #define RTE_COMP_FF_SHAREABLE_PRIV_XFORM	(1ULL << 12)
61 /**< Private xforms created by the PMD can be shared
62  * across multiple stateless operations. If not set, then app needs
63  * to create as many priv_xforms as it expects to have stateless
64  * operations in-flight.
65  */
66 #define RTE_COMP_FF_HUFFMAN_FIXED		(1ULL << 13)
67 /**< Fixed huffman encoding is supported */
68 #define RTE_COMP_FF_HUFFMAN_DYNAMIC		(1ULL << 14)
69 /**< Dynamic huffman encoding is supported */
70 
71 /** Status of comp operation */
72 enum rte_comp_op_status {
73 	RTE_COMP_OP_STATUS_SUCCESS = 0,
74 	/**< Operation completed successfully */
75 	RTE_COMP_OP_STATUS_NOT_PROCESSED,
76 	/**< Operation has not yet been processed by the device */
77 	RTE_COMP_OP_STATUS_INVALID_ARGS,
78 	/**< Operation failed due to invalid arguments in request */
79 	RTE_COMP_OP_STATUS_ERROR,
80 	/**< Error handling operation */
81 	RTE_COMP_OP_STATUS_INVALID_STATE,
82 	/**< Operation is invoked in invalid state */
83 	RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED,
84 	/**< Output buffer ran out of space before operation completed.
85 	 * Error case. Application must resubmit all data with a larger
86 	 * output buffer.
87 	 */
88 	RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE,
89 	/**< Output buffer ran out of space before operation completed, but this
90 	 * is not an error case. Output data up to op.produced can be used and
91 	 * next op in the stream should continue on from op.consumed+1.
92 	 */
93 };
94 
95 /** Compression Algorithms */
96 enum rte_comp_algorithm {
97 	RTE_COMP_ALGO_UNSPECIFIED = 0,
98 	/** No Compression algorithm */
99 	RTE_COMP_ALGO_NULL,
100 	/**< No compression.
101 	 * Pass-through, data is copied unchanged from source buffer to
102 	 * destination buffer.
103 	 */
104 	RTE_COMP_ALGO_DEFLATE,
105 	/**< DEFLATE compression algorithm
106 	 * https://tools.ietf.org/html/rfc1951
107 	 */
108 	RTE_COMP_ALGO_LZS,
109 	/**< LZS compression algorithm
110 	 * https://tools.ietf.org/html/rfc2395
111 	 */
112 	RTE_COMP_ALGO_LIST_END
113 };
114 
115 /** Compression Hash Algorithms */
116 enum rte_comp_hash_algorithm {
117 	RTE_COMP_HASH_ALGO_NONE = 0,
118 	/**< No hash */
119 	RTE_COMP_HASH_ALGO_SHA1,
120 	/**< SHA1 hash algorithm */
121 	RTE_COMP_HASH_ALGO_SHA2_256,
122 	/**< SHA256 hash algorithm of SHA2 family */
123 	RTE_COMP_HASH_ALGO_LIST_END
124 };
125 
126 /**< Compression Level.
127  * The number is interpreted by each PMD differently. However, lower numbers
128  * give fastest compression, at the expense of compression ratio while
129  * higher numbers may give better compression ratios but are likely slower.
130  */
131 #define	RTE_COMP_LEVEL_PMD_DEFAULT	(-1)
132 /** Use PMD Default */
133 #define	RTE_COMP_LEVEL_NONE		(0)
134 /** Output uncompressed blocks if supported by the specified algorithm */
135 #define RTE_COMP_LEVEL_MIN		(1)
136 /** Use minimum compression level supported by the PMD */
137 #define RTE_COMP_LEVEL_MAX		(9)
138 /** Use maximum compression level supported by the PMD */
139 
140 /** Compression checksum types */
141 enum rte_comp_checksum_type {
142 	RTE_COMP_CHECKSUM_NONE,
143 	/**< No checksum generated */
144 	RTE_COMP_CHECKSUM_CRC32,
145 	/**< Generates a CRC32 checksum, as used by gzip */
146 	RTE_COMP_CHECKSUM_ADLER32,
147 	/**< Generates an Adler-32 checksum, as used by zlib */
148 	RTE_COMP_CHECKSUM_CRC32_ADLER32,
149 	/**< Generates both Adler-32 and CRC32 checksums, concatenated.
150 	 * CRC32 is in the lower 32bits, Adler-32 in the upper 32 bits.
151 	 */
152 };
153 
154 
155 /** Compression Huffman Type - used by DEFLATE algorithm */
156 enum rte_comp_huffman {
157 	RTE_COMP_HUFFMAN_DEFAULT,
158 	/**< PMD may choose which Huffman codes to use */
159 	RTE_COMP_HUFFMAN_FIXED,
160 	/**< Use Fixed Huffman codes */
161 	RTE_COMP_HUFFMAN_DYNAMIC,
162 	/**< Use Dynamic Huffman codes */
163 };
164 
165 /** Compression flush flags */
166 enum rte_comp_flush_flag {
167 	RTE_COMP_FLUSH_NONE,
168 	/**< Data is not flushed. Output may remain in the compressor and be
169 	 * processed during a following op. It may not be possible to decompress
170 	 * output until a later op with some other flush flag has been sent.
171 	 */
172 	RTE_COMP_FLUSH_SYNC,
173 	/**< All data should be flushed to output buffer. Output data can be
174 	 * decompressed. However state and history is not cleared, so future
175 	 * operations may use history from this operation.
176 	 */
177 	RTE_COMP_FLUSH_FULL,
178 	/**< All data should be flushed to output buffer. Output data can be
179 	 * decompressed. State and history data is cleared, so future
180 	 * ops will be independent of ops processed before this.
181 	 */
182 	RTE_COMP_FLUSH_FINAL
183 	/**< Same as RTE_COMP_FLUSH_FULL but if op.algo is RTE_COMP_ALGO_DEFLATE
184 	 * then bfinal bit is set in the last block.
185 	 */
186 };
187 
188 /** Compression transform types */
189 enum rte_comp_xform_type {
190 	RTE_COMP_COMPRESS,
191 	/**< Compression service - compress */
192 	RTE_COMP_DECOMPRESS,
193 	/**< Compression service - decompress */
194 };
195 
196 /** Compression operation type */
197 enum rte_comp_op_type {
198 	RTE_COMP_OP_STATELESS,
199 	/**< All data to be processed is submitted in the op, no state or
200 	 * history from previous ops is used and none will be stored for future
201 	 * ops. Flush flag must be set to either FLUSH_FULL or FLUSH_FINAL.
202 	 */
203 	RTE_COMP_OP_STATEFUL
204 	/**< There may be more data to be processed after this op, it's part of
205 	 * a stream of data. State and history from previous ops can be used
206 	 * and resulting state and history can be stored for future ops,
207 	 * depending on flush flag.
208 	 */
209 };
210 
211 
212 /** Parameters specific to the deflate algorithm */
213 struct rte_comp_deflate_params {
214 	enum rte_comp_huffman huffman;
215 	/**< Compression huffman encoding type */
216 };
217 
218 /** Setup Data for compression */
219 struct rte_comp_compress_xform {
220 	enum rte_comp_algorithm algo;
221 	/**< Algorithm to use for compress operation */
222 	union {
223 		struct rte_comp_deflate_params deflate;
224 		/**< Parameters specific to the deflate algorithm */
225 	}; /**< Algorithm specific parameters */
226 	int level;
227 	/**< Compression level */
228 	uint8_t window_size;
229 	/**< Base two log value of sliding window to be used. If window size
230 	 * can't be supported by the PMD then it may fall back to a smaller
231 	 * size. This is likely to result in a worse compression ratio.
232 	 */
233 	enum rte_comp_checksum_type chksum;
234 	/**< Type of checksum to generate on the uncompressed data */
235 	enum rte_comp_hash_algorithm hash_algo;
236 	/**< Hash algorithm to be used with compress operation. Hash is always
237 	 * done on plaintext.
238 	 */
239 };
240 
241 /**
242  * Setup Data for decompression.
243  */
244 struct rte_comp_decompress_xform {
245 	enum rte_comp_algorithm algo;
246 	/**< Algorithm to use for decompression */
247 	enum rte_comp_checksum_type chksum;
248 	/**< Type of checksum to generate on the decompressed data */
249 	uint8_t window_size;
250 	/**< Base two log value of sliding window which was used to generate
251 	 * compressed data. If window size can't be supported by the PMD then
252 	 * setup of stream or private_xform should fail.
253 	 */
254 	enum rte_comp_hash_algorithm hash_algo;
255 	/**< Hash algorithm to be used with decompress operation. Hash is always
256 	 * done on plaintext.
257 	 */
258 };
259 
260 /**
261  * Compression transform structure.
262  *
263  * This is used to specify the compression transforms required.
264  * Each transform structure can hold a single transform, the type field is
265  * used to specify which transform is contained within the union.
266  */
267 struct rte_comp_xform {
268 	enum rte_comp_xform_type type;
269 	/**< xform type */
270 	union {
271 		struct rte_comp_compress_xform compress;
272 		/**< xform for compress operation */
273 		struct rte_comp_decompress_xform decompress;
274 		/**< decompress xform */
275 	};
276 };
277 
278 /**
279  * Compression Operation.
280  *
281  * This structure contains data relating to performing a compression
282  * operation on the referenced mbuf data buffers.
283  *
284  * Comp operations are enqueued and dequeued in comp PMDs using the
285  * rte_compressdev_enqueue_burst() / rte_compressdev_dequeue_burst() APIs
286  */
287 struct rte_comp_op {
288 	enum rte_comp_op_type op_type;
289 	union {
290 		void *private_xform;
291 		/**< Stateless private PMD data derived from an rte_comp_xform.
292 		 * A handle returned by rte_compressdev_private_xform_create()
293 		 * must be attached to operations of op_type RTE_COMP_STATELESS.
294 		 */
295 		void *stream;
296 		/**< Private PMD data derived initially from an rte_comp_xform,
297 		 * which holds state and history data and evolves as operations
298 		 * are processed. rte_compressdev_stream_create() must be called
299 		 * on a device for all STATEFUL data streams and the resulting
300 		 * stream attached to the one or more operations associated
301 		 * with the data stream.
302 		 * All operations in a stream must be sent to the same device.
303 		 */
304 	};
305 
306 	struct rte_mempool *mempool;
307 	/**< Pool from which operation is allocated */
308 	rte_iova_t iova_addr;
309 	/**< IOVA address of this operation */
310 	struct rte_mbuf *m_src;
311 	/**< source mbuf
312 	 * The total size of the input buffer(s) can be retrieved using
313 	 * rte_pktmbuf_data_len(m_src). The max data size which can fit in a
314 	 * single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1.
315 	 * If the input data is bigger than this it can be passed to the PMD in
316 	 * a chain of mbufs if the PMD's capabilities indicate it supports this.
317 	 */
318 	struct rte_mbuf *m_dst;
319 	/**< destination mbuf
320 	 * The total size of the output buffer(s) can be retrieved using
321 	 * rte_pktmbuf_data_len(m_dst). The max data size which can fit in a
322 	 * single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1.
323 	 * If the output data is expected to be bigger than this a chain of
324 	 * mbufs can be passed to the PMD if the PMD's capabilities indicate
325 	 * it supports this.
326 	 */
327 
328 	struct {
329 		uint32_t offset;
330 		/**< Starting point for compression or decompression,
331 		 * specified as number of bytes from start of packet in
332 		 * source buffer.
333 		 * This offset starts from the first segment
334 		 * of the buffer, in case the m_src is a chain of mbufs.
335 		 * Starting point for checksum generation in compress direction.
336 		 */
337 		uint32_t length;
338 		/**< The length, in bytes, of the data in source buffer
339 		 * to be compressed or decompressed.
340 		 * Also the length of the data over which the checksum
341 		 * should be generated in compress direction
342 		 */
343 	} src;
344 	struct {
345 		uint32_t offset;
346 		/**< Starting point for writing output data, specified as
347 		 * number of bytes from start of packet in dest
348 		 * buffer.
349 		 * This offset starts from the first segment
350 		 * of the buffer, in case the m_dst is a chain of mbufs.
351 		 * Starting point for checksum generation in
352 		 * decompress direction.
353 		 */
354 	} dst;
355 	struct {
356 		uint8_t *digest;
357 		/**< Output buffer to store hash output, if enabled in xform.
358 		 * Buffer would contain valid value only after an op with
359 		 * flush flag = RTE_COMP_FLUSH_FULL/FLUSH_FINAL is processed
360 		 * successfully.
361 		 *
362 		 * Length of buffer should be contiguous and large enough to
363 		 * accommodate digest produced by specific hash algo.
364 		 */
365 		rte_iova_t iova_addr;
366 		/**< IO address of the buffer */
367 	} hash;
368 	enum rte_comp_flush_flag flush_flag;
369 	/**< Defines flush characteristics for the output data.
370 	 * Only applicable in compress direction
371 	 */
372 	uint64_t input_chksum;
373 	/**< An input checksum can be provided to generate a
374 	 * cumulative checksum across sequential blocks in a STATELESS stream.
375 	 * Checksum type is as specified in xform chksum_type
376 	 */
377 	uint64_t output_chksum;
378 	/**< If a checksum is generated it will be written in here.
379 	 * Checksum type is as specified in xform chksum_type.
380 	 */
381 	uint32_t consumed;
382 	/**< The number of bytes from the source buffer
383 	 * which were compressed/decompressed.
384 	 */
385 	uint32_t produced;
386 	/**< The number of bytes written to the destination buffer
387 	 * which were compressed/decompressed.
388 	 */
389 	uint64_t debug_status;
390 	/**<
391 	 * Status of the operation is returned in the status param.
392 	 * This field allows the PMD to pass back extra
393 	 * pmd-specific debug information. Value is not defined on the API.
394 	 */
395 	uint8_t status;
396 	/**<
397 	 * Operation status - use values from enum rte_comp_status.
398 	 * This is reset to
399 	 * RTE_COMP_OP_STATUS_NOT_PROCESSED on allocation from mempool and
400 	 * will be set to RTE_COMP_OP_STATUS_SUCCESS after operation
401 	 * is successfully processed by a PMD
402 	 */
403 } __rte_cache_aligned;
404 
405 /**
406  * Creates an operation pool
407  *
408  * @param name
409  *   Compress pool name
410  * @param nb_elts
411  *   Number of elements in pool
412  * @param cache_size
413  *   Number of elements to cache on lcore, see
414  *   *rte_mempool_create* for further details about cache size
415  * @param user_size
416  *   Size of private data to allocate for user with each operation
417  * @param socket_id
418  *   Socket to identifier allocate memory on
419  * @return
420  *  - On success pointer to mempool
421  *  - On failure NULL
422  */
423 struct rte_mempool * __rte_experimental
424 rte_comp_op_pool_create(const char *name,
425 		unsigned int nb_elts, unsigned int cache_size,
426 		uint16_t user_size, int socket_id);
427 
428 /**
429  * Allocate an operation from a mempool with default parameters set
430  *
431  * @param mempool
432  *   Compress operation mempool
433  *
434  * @return
435  * - On success returns a valid rte_comp_op structure
436  * - On failure returns NULL
437  */
438 struct rte_comp_op * __rte_experimental
439 rte_comp_op_alloc(struct rte_mempool *mempool);
440 
441 /**
442  * Bulk allocate operations from a mempool with default parameters set
443  *
444  * @param mempool
445  *   Compress operation mempool
446  * @param ops
447  *   Array to place allocated operations
448  * @param nb_ops
449  *   Number of operations to allocate
450  * @return
451  *   - nb_ops: Success, the nb_ops requested was allocated
452  *   - 0: Not enough entries in the mempool; no ops are retrieved.
453  */
454 int __rte_experimental
455 rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
456 		struct rte_comp_op **ops, uint16_t nb_ops);
457 
458 /**
459  * Free operation structure
460  * If operation has been allocate from a rte_mempool, then the operation will
461  * be returned to the mempool.
462  *
463  * @param op
464  *   Compress operation
465  */
466 void __rte_experimental
467 rte_comp_op_free(struct rte_comp_op *op);
468 
469 /**
470  * Get the name of a compress service feature flag
471  *
472  * @param flag
473  *   The mask describing the flag
474  *
475  * @return
476  *   The name of this flag, or NULL if it's not a valid feature flag.
477  */
478 const char * __rte_experimental
479 rte_comp_get_feature_name(uint64_t flag);
480 
481 #ifdef __cplusplus
482 }
483 #endif
484 
485 #endif /* _RTE_COMP_H_ */
486