1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
3 */
4
5 #include "rte_comp.h"
6 #include "rte_compressdev_internal.h"
7
8 const char *
rte_comp_get_feature_name(uint64_t flag)9 rte_comp_get_feature_name(uint64_t flag)
10 {
11 switch (flag) {
12 case RTE_COMP_FF_STATEFUL_COMPRESSION:
13 return "STATEFUL_COMPRESSION";
14 case RTE_COMP_FF_STATEFUL_DECOMPRESSION:
15 return "STATEFUL_DECOMPRESSION";
16 case RTE_COMP_FF_OOP_SGL_IN_SGL_OUT:
17 return "OOP_SGL_IN_SGL_OUT";
18 case RTE_COMP_FF_OOP_SGL_IN_LB_OUT:
19 return "OOP_SGL_IN_LB_OUT";
20 case RTE_COMP_FF_OOP_LB_IN_SGL_OUT:
21 return "OOP_LB_IN_SGL_OUT";
22 case RTE_COMP_FF_MULTI_PKT_CHECKSUM:
23 return "MULTI_PKT_CHECKSUM";
24 case RTE_COMP_FF_ADLER32_CHECKSUM:
25 return "ADLER32_CHECKSUM";
26 case RTE_COMP_FF_CRC32_CHECKSUM:
27 return "CRC32_CHECKSUM";
28 case RTE_COMP_FF_CRC32_ADLER32_CHECKSUM:
29 return "CRC32_ADLER32_CHECKSUM";
30 case RTE_COMP_FF_NONCOMPRESSED_BLOCKS:
31 return "NONCOMPRESSED_BLOCKS";
32 case RTE_COMP_FF_SHA1_HASH:
33 return "SHA1_HASH";
34 case RTE_COMP_FF_SHA2_SHA256_HASH:
35 return "SHA2_SHA256_HASH";
36 case RTE_COMP_FF_SHAREABLE_PRIV_XFORM:
37 return "SHAREABLE_PRIV_XFORM";
38 case RTE_COMP_FF_HUFFMAN_FIXED:
39 return "HUFFMAN_FIXED";
40 case RTE_COMP_FF_HUFFMAN_DYNAMIC:
41 return "HUFFMAN_DYNAMIC";
42 default:
43 return NULL;
44 }
45 }
46
47 /**
48 * Reset the fields of an operation to their default values.
49 *
50 * @note The private data associated with the operation is not zeroed.
51 *
52 * @param op
53 * The operation to be reset
54 */
55 static inline void
rte_comp_op_reset(struct rte_comp_op * op)56 rte_comp_op_reset(struct rte_comp_op *op)
57 {
58 struct rte_mempool *tmp_mp = op->mempool;
59 rte_iova_t tmp_iova_addr = op->iova_addr;
60
61 memset(op, 0, sizeof(struct rte_comp_op));
62 op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
63 op->iova_addr = tmp_iova_addr;
64 op->mempool = tmp_mp;
65 }
66
67 /**
68 * Private data structure belonging to an operation pool.
69 */
70 struct rte_comp_op_pool_private {
71 uint16_t user_size;
72 /**< Size of private user data with each operation. */
73 };
74
75 /**
76 * Bulk allocate raw element from mempool and return as comp operations
77 *
78 * @param mempool
79 * Compress operation mempool
80 * @param ops
81 * Array to place allocated operations
82 * @param nb_ops
83 * Number of operations to allocate
84 * @return
85 * - nb_ops: Success, the nb_ops requested was allocated
86 * - 0: Not enough entries in the mempool; no ops are retrieved.
87 */
88 static inline int
rte_comp_op_raw_bulk_alloc(struct rte_mempool * mempool,struct rte_comp_op ** ops,uint16_t nb_ops)89 rte_comp_op_raw_bulk_alloc(struct rte_mempool *mempool,
90 struct rte_comp_op **ops, uint16_t nb_ops)
91 {
92 if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0)
93 return nb_ops;
94
95 return 0;
96 }
97
98 /** Initialise rte_comp_op mempool element */
99 static void
rte_comp_op_init(struct rte_mempool * mempool,__rte_unused void * opaque_arg,void * _op_data,__rte_unused unsigned int i)100 rte_comp_op_init(struct rte_mempool *mempool,
101 __rte_unused void *opaque_arg,
102 void *_op_data,
103 __rte_unused unsigned int i)
104 {
105 struct rte_comp_op *op = _op_data;
106
107 memset(_op_data, 0, mempool->elt_size);
108
109 op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
110 op->iova_addr = rte_mem_virt2iova(_op_data);
111 op->mempool = mempool;
112 }
113
114 struct rte_mempool *
rte_comp_op_pool_create(const char * name,unsigned int nb_elts,unsigned int cache_size,uint16_t user_size,int socket_id)115 rte_comp_op_pool_create(const char *name,
116 unsigned int nb_elts, unsigned int cache_size,
117 uint16_t user_size, int socket_id)
118 {
119 struct rte_comp_op_pool_private *priv;
120
121 unsigned int elt_size = sizeof(struct rte_comp_op) + user_size;
122
123 /* lookup mempool in case already allocated */
124 struct rte_mempool *mp = rte_mempool_lookup(name);
125
126 if (mp != NULL) {
127 priv = (struct rte_comp_op_pool_private *)
128 rte_mempool_get_priv(mp);
129
130 if (mp->elt_size != elt_size ||
131 mp->cache_size < cache_size ||
132 mp->size < nb_elts ||
133 priv->user_size < user_size) {
134 mp = NULL;
135 COMPRESSDEV_LOG(ERR,
136 "Mempool %s already exists but with incompatible parameters",
137 name);
138 return NULL;
139 }
140 return mp;
141 }
142
143 mp = rte_mempool_create(
144 name,
145 nb_elts,
146 elt_size,
147 cache_size,
148 sizeof(struct rte_comp_op_pool_private),
149 NULL,
150 NULL,
151 rte_comp_op_init,
152 NULL,
153 socket_id,
154 0);
155
156 if (mp == NULL) {
157 COMPRESSDEV_LOG(ERR, "Failed to create mempool %s", name);
158 return NULL;
159 }
160
161 priv = (struct rte_comp_op_pool_private *)
162 rte_mempool_get_priv(mp);
163
164 priv->user_size = user_size;
165
166 return mp;
167 }
168
169 struct rte_comp_op *
rte_comp_op_alloc(struct rte_mempool * mempool)170 rte_comp_op_alloc(struct rte_mempool *mempool)
171 {
172 struct rte_comp_op *op = NULL;
173 int retval;
174
175 retval = rte_comp_op_raw_bulk_alloc(mempool, &op, 1);
176 if (unlikely(retval != 1))
177 return NULL;
178
179 rte_comp_op_reset(op);
180
181 return op;
182 }
183
184 int
rte_comp_op_bulk_alloc(struct rte_mempool * mempool,struct rte_comp_op ** ops,uint16_t nb_ops)185 rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
186 struct rte_comp_op **ops, uint16_t nb_ops)
187 {
188 int retval;
189 uint16_t i;
190
191 retval = rte_comp_op_raw_bulk_alloc(mempool, ops, nb_ops);
192 if (unlikely(retval != nb_ops))
193 return 0;
194
195 for (i = 0; i < nb_ops; i++)
196 rte_comp_op_reset(ops[i]);
197
198 return nb_ops;
199 }
200
201 /**
202 * free operation structure
203 * If operation has been allocate from a rte_mempool, then the operation will
204 * be returned to the mempool.
205 *
206 * @param op
207 * Compress operation
208 */
209 void
rte_comp_op_free(struct rte_comp_op * op)210 rte_comp_op_free(struct rte_comp_op *op)
211 {
212 if (op != NULL && op->mempool != NULL)
213 rte_mempool_put(op->mempool, op);
214 }
215
216 void
rte_comp_op_bulk_free(struct rte_comp_op ** ops,uint16_t nb_ops)217 rte_comp_op_bulk_free(struct rte_comp_op **ops, uint16_t nb_ops)
218 {
219 uint16_t i;
220
221 for (i = 0; i < nb_ops; i++) {
222 if (ops[i] != NULL && ops[i]->mempool != NULL)
223 rte_mempool_put(ops[i]->mempool, ops[i]);
224 ops[i] = NULL;
225 }
226 }
227