1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
3 */
4
5 #include <rte_atomic.h>
6 #include <rte_malloc.h>
7 #include <rte_log.h>
8
9 #include "otx2_common.h"
10 #include "otx2_dev.h"
11 #include "otx2_mbox.h"
12
13 /**
14 * @internal
15 * Set default NPA configuration.
16 */
17 void
otx2_npa_set_defaults(struct otx2_idev_cfg * idev)18 otx2_npa_set_defaults(struct otx2_idev_cfg *idev)
19 {
20 idev->npa_pf_func = 0;
21 rte_atomic16_set(&idev->npa_refcnt, 0);
22 }
23
24 /**
25 * @internal
26 * Get intra device config structure.
27 */
28 struct otx2_idev_cfg *
otx2_intra_dev_get_cfg(void)29 otx2_intra_dev_get_cfg(void)
30 {
31 const char name[] = "octeontx2_intra_device_conf";
32 const struct rte_memzone *mz;
33 struct otx2_idev_cfg *idev;
34
35 mz = rte_memzone_lookup(name);
36 if (mz != NULL)
37 return mz->addr;
38
39 /* Request for the first time */
40 mz = rte_memzone_reserve_aligned(name, sizeof(struct otx2_idev_cfg),
41 SOCKET_ID_ANY, 0, OTX2_ALIGN);
42 if (mz != NULL) {
43 idev = mz->addr;
44 idev->sso_pf_func = 0;
45 idev->npa_lf = NULL;
46 otx2_npa_set_defaults(idev);
47 return idev;
48 }
49 return NULL;
50 }
51
52 /**
53 * @internal
54 * Get SSO PF_FUNC.
55 */
56 uint16_t
otx2_sso_pf_func_get(void)57 otx2_sso_pf_func_get(void)
58 {
59 struct otx2_idev_cfg *idev;
60 uint16_t sso_pf_func;
61
62 sso_pf_func = 0;
63 idev = otx2_intra_dev_get_cfg();
64
65 if (idev != NULL)
66 sso_pf_func = idev->sso_pf_func;
67
68 return sso_pf_func;
69 }
70
71 /**
72 * @internal
73 * Set SSO PF_FUNC.
74 */
75 void
otx2_sso_pf_func_set(uint16_t sso_pf_func)76 otx2_sso_pf_func_set(uint16_t sso_pf_func)
77 {
78 struct otx2_idev_cfg *idev;
79
80 idev = otx2_intra_dev_get_cfg();
81
82 if (idev != NULL) {
83 idev->sso_pf_func = sso_pf_func;
84 rte_smp_wmb();
85 }
86 }
87
88 /**
89 * @internal
90 * Get NPA PF_FUNC.
91 */
92 uint16_t
otx2_npa_pf_func_get(void)93 otx2_npa_pf_func_get(void)
94 {
95 struct otx2_idev_cfg *idev;
96 uint16_t npa_pf_func;
97
98 npa_pf_func = 0;
99 idev = otx2_intra_dev_get_cfg();
100
101 if (idev != NULL)
102 npa_pf_func = idev->npa_pf_func;
103
104 return npa_pf_func;
105 }
106
107 /**
108 * @internal
109 * Get NPA lf object.
110 */
111 struct otx2_npa_lf *
otx2_npa_lf_obj_get(void)112 otx2_npa_lf_obj_get(void)
113 {
114 struct otx2_idev_cfg *idev;
115
116 idev = otx2_intra_dev_get_cfg();
117
118 if (idev != NULL && rte_atomic16_read(&idev->npa_refcnt))
119 return idev->npa_lf;
120
121 return NULL;
122 }
123
124 /**
125 * @internal
126 * Is NPA lf active for the given device?.
127 */
128 int
otx2_npa_lf_active(void * otx2_dev)129 otx2_npa_lf_active(void *otx2_dev)
130 {
131 struct otx2_dev *dev = otx2_dev;
132 struct otx2_idev_cfg *idev;
133
134 /* Check if npalf is actively used on this dev */
135 idev = otx2_intra_dev_get_cfg();
136 if (!idev || !idev->npa_lf || idev->npa_lf->mbox != dev->mbox)
137 return 0;
138
139 return rte_atomic16_read(&idev->npa_refcnt);
140 }
141
142 /*
143 * @internal
144 * Gets reference only to existing NPA LF object.
145 */
otx2_npa_lf_obj_ref(void)146 int otx2_npa_lf_obj_ref(void)
147 {
148 struct otx2_idev_cfg *idev;
149 uint16_t cnt;
150 int rc;
151
152 idev = otx2_intra_dev_get_cfg();
153
154 /* Check if ref not possible */
155 if (idev == NULL)
156 return -EINVAL;
157
158
159 /* Get ref only if > 0 */
160 cnt = rte_atomic16_read(&idev->npa_refcnt);
161 while (cnt != 0) {
162 rc = rte_atomic16_cmpset(&idev->npa_refcnt_u16, cnt, cnt + 1);
163 if (rc)
164 break;
165
166 cnt = rte_atomic16_read(&idev->npa_refcnt);
167 }
168
169 return cnt ? 0 : -EINVAL;
170 }
171
172 static int
parse_npa_lock_mask(const char * key,const char * value,void * extra_args)173 parse_npa_lock_mask(const char *key, const char *value, void *extra_args)
174 {
175 RTE_SET_USED(key);
176 uint64_t val;
177
178 val = strtoull(value, NULL, 16);
179
180 *(uint64_t *)extra_args = val;
181
182 return 0;
183 }
184
185 /*
186 * @internal
187 * Parse common device arguments
188 */
otx2_parse_common_devargs(struct rte_kvargs * kvlist)189 void otx2_parse_common_devargs(struct rte_kvargs *kvlist)
190 {
191
192 struct otx2_idev_cfg *idev;
193 uint64_t npa_lock_mask = 0;
194
195 idev = otx2_intra_dev_get_cfg();
196
197 if (idev == NULL)
198 return;
199
200 rte_kvargs_process(kvlist, OTX2_NPA_LOCK_MASK,
201 &parse_npa_lock_mask, &npa_lock_mask);
202
203 idev->npa_lock_mask = npa_lock_mask;
204 }
205
206 RTE_LOG_REGISTER(otx2_logtype_base, pmd.octeontx2.base, NOTICE);
207 RTE_LOG_REGISTER(otx2_logtype_mbox, pmd.octeontx2.mbox, NOTICE);
208 RTE_LOG_REGISTER(otx2_logtype_npa, pmd.mempool.octeontx2, NOTICE);
209 RTE_LOG_REGISTER(otx2_logtype_nix, pmd.net.octeontx2, NOTICE);
210 RTE_LOG_REGISTER(otx2_logtype_npc, pmd.net.octeontx2.flow, NOTICE);
211 RTE_LOG_REGISTER(otx2_logtype_tm, pmd.net.octeontx2.tm, NOTICE);
212 RTE_LOG_REGISTER(otx2_logtype_sso, pmd.event.octeontx2, NOTICE);
213 RTE_LOG_REGISTER(otx2_logtype_tim, pmd.event.octeontx2.timer, NOTICE);
214 RTE_LOG_REGISTER(otx2_logtype_dpi, pmd.raw.octeontx2.dpi, NOTICE);
215 RTE_LOG_REGISTER(otx2_logtype_ep, pmd.raw.octeontx2.ep, NOTICE);
216 RTE_LOG_REGISTER(otx2_logtype_ree, pmd.regex.octeontx2, NOTICE);
217