1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
3 */
4
5 #include <rte_ethdev_driver.h>
6 #include <rte_io.h>
7
8 #include "hns3_ethdev.h"
9 #include "hns3_regs.h"
10 #include "hns3_logs.h"
11 #include "hns3_intr.h"
12 #include "hns3_rxtx.h"
13
14 #define HNS3_CMD_CODE_OFFSET 2
15
16 static const struct errno_respcode_map err_code_map[] = {
17 {0, 0},
18 {1, -EPERM},
19 {2, -ENOENT},
20 {5, -EIO},
21 {11, -EAGAIN},
22 {12, -ENOMEM},
23 {16, -EBUSY},
24 {22, -EINVAL},
25 {28, -ENOSPC},
26 {95, -EOPNOTSUPP},
27 };
28
29 static int
hns3_resp_to_errno(uint16_t resp_code)30 hns3_resp_to_errno(uint16_t resp_code)
31 {
32 uint32_t i, num;
33
34 num = sizeof(err_code_map) / sizeof(struct errno_respcode_map);
35 for (i = 0; i < num; i++) {
36 if (err_code_map[i].resp_code == resp_code)
37 return err_code_map[i].err_no;
38 }
39
40 return -EIO;
41 }
42
43 static void
hns3_poll_all_sync_msg(void)44 hns3_poll_all_sync_msg(void)
45 {
46 struct rte_eth_dev *eth_dev;
47 struct hns3_adapter *adapter;
48 const char *name;
49 uint16_t port_id;
50
51 RTE_ETH_FOREACH_DEV(port_id) {
52 eth_dev = &rte_eth_devices[port_id];
53 name = eth_dev->device->driver->name;
54 if (strcmp(name, "net_hns3") && strcmp(name, "net_hns3_vf"))
55 continue;
56 adapter = eth_dev->data->dev_private;
57 if (!adapter || adapter->hw.adapter_state == HNS3_NIC_CLOSED)
58 continue;
59 /* Synchronous msg, the mbx_resp.req_msg_data is non-zero */
60 if (adapter->hw.mbx_resp.req_msg_data)
61 hns3_dev_handle_mbx_msg(&adapter->hw);
62 }
63 }
64
65 static int
hns3_get_mbx_resp(struct hns3_hw * hw,uint16_t code0,uint16_t code1,uint8_t * resp_data,uint16_t resp_len)66 hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
67 uint8_t *resp_data, uint16_t resp_len)
68 {
69 #define HNS3_MAX_RETRY_MS 500
70 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
71 struct hns3_mbx_resp_status *mbx_resp;
72 bool in_irq = false;
73 uint64_t now;
74 uint64_t end;
75
76 if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
77 hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)",
78 resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE);
79 return -EINVAL;
80 }
81
82 now = get_timeofday_ms();
83 end = now + HNS3_MAX_RETRY_MS;
84 while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) &&
85 (now < end)) {
86 if (rte_atomic16_read(&hw->reset.disable_cmd)) {
87 hns3_err(hw, "Don't wait for mbx respone because of "
88 "disable_cmd");
89 return -EBUSY;
90 }
91
92 if (is_reset_pending(hns)) {
93 hw->mbx_resp.req_msg_data = 0;
94 hns3_err(hw, "Don't wait for mbx respone because of "
95 "reset pending");
96 return -EIO;
97 }
98
99 /*
100 * The mbox response is running on the interrupt thread.
101 * Sending mbox in the interrupt thread cannot wait for the
102 * response, so polling the mbox response on the irq thread.
103 */
104 if (pthread_equal(hw->irq_thread_id, pthread_self())) {
105 in_irq = true;
106 hns3_poll_all_sync_msg();
107 } else {
108 rte_delay_ms(HNS3_POLL_RESPONE_MS);
109 }
110 now = get_timeofday_ms();
111 }
112 hw->mbx_resp.req_msg_data = 0;
113 if (now >= end) {
114 hw->mbx_resp.lost++;
115 hns3_err(hw,
116 "VF could not get mbx(%u,%u) head(%u) tail(%u) lost(%u) from PF in_irq:%d",
117 code0, code1, hw->mbx_resp.head, hw->mbx_resp.tail,
118 hw->mbx_resp.lost, in_irq);
119 return -ETIME;
120 }
121 rte_io_rmb();
122 mbx_resp = &hw->mbx_resp;
123
124 if (mbx_resp->resp_status)
125 return mbx_resp->resp_status;
126
127 if (resp_data)
128 memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
129
130 return 0;
131 }
132
133 int
hns3_send_mbx_msg(struct hns3_hw * hw,uint16_t code,uint16_t subcode,const uint8_t * msg_data,uint8_t msg_len,bool need_resp,uint8_t * resp_data,uint16_t resp_len)134 hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
135 const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
136 uint8_t *resp_data, uint16_t resp_len)
137 {
138 struct hns3_mbx_vf_to_pf_cmd *req;
139 struct hns3_cmd_desc desc;
140 bool is_ring_vector_msg;
141 int offset;
142 int ret;
143
144 req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
145
146 /* first two bytes are reserved for code & subcode */
147 if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) {
148 hns3_err(hw,
149 "VF send mbx msg fail, msg len %u exceeds max payload len %d",
150 msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET);
151 return -EINVAL;
152 }
153
154 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
155 req->msg[0] = code;
156 is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) ||
157 (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) ||
158 (code == HNS3_MBX_GET_RING_VECTOR_MAP);
159 if (!is_ring_vector_msg)
160 req->msg[1] = subcode;
161 if (msg_data) {
162 offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET;
163 memcpy(&req->msg[offset], msg_data, msg_len);
164 }
165
166 /* synchronous send */
167 if (need_resp) {
168 req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
169 rte_spinlock_lock(&hw->mbx_resp.lock);
170 hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
171 hw->mbx_resp.head++;
172 ret = hns3_cmd_send(hw, &desc, 1);
173 if (ret) {
174 rte_spinlock_unlock(&hw->mbx_resp.lock);
175 hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
176 ret);
177 return ret;
178 }
179
180 ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len);
181 rte_spinlock_unlock(&hw->mbx_resp.lock);
182 } else {
183 /* asynchronous send */
184 ret = hns3_cmd_send(hw, &desc, 1);
185 if (ret) {
186 hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
187 ret);
188 return ret;
189 }
190 }
191
192 return ret;
193 }
194
195 static bool
hns3_cmd_crq_empty(struct hns3_hw * hw)196 hns3_cmd_crq_empty(struct hns3_hw *hw)
197 {
198 uint32_t tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
199
200 return tail == hw->cmq.crq.next_to_use;
201 }
202
203 static void
hns3_mbx_handler(struct hns3_hw * hw)204 hns3_mbx_handler(struct hns3_hw *hw)
205 {
206 struct hns3_mac *mac = &hw->mac;
207 enum hns3_reset_level reset_level;
208 uint16_t *msg_q;
209 uint8_t opcode;
210 uint32_t tail;
211
212 tail = hw->arq.tail;
213
214 /* process all the async queue messages */
215 while (tail != hw->arq.head) {
216 msg_q = hw->arq.msg_q[hw->arq.head];
217
218 opcode = msg_q[0] & 0xff;
219 switch (opcode) {
220 case HNS3_MBX_LINK_STAT_CHANGE:
221 memcpy(&mac->link_speed, &msg_q[2],
222 sizeof(mac->link_speed));
223 mac->link_status = rte_le_to_cpu_16(msg_q[1]);
224 mac->link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
225 break;
226 case HNS3_MBX_ASSERTING_RESET:
227 /* PF has asserted reset hence VF should go in pending
228 * state and poll for the hardware reset status till it
229 * has been completely reset. After this stack should
230 * eventually be re-initialized.
231 */
232 reset_level = rte_le_to_cpu_16(msg_q[1]);
233 hns3_atomic_set_bit(reset_level, &hw->reset.pending);
234
235 hns3_warn(hw, "PF inform reset level %d", reset_level);
236 hw->reset.stats.request_cnt++;
237 hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
238 break;
239 default:
240 hns3_err(hw, "Fetched unsupported(%u) message from arq",
241 opcode);
242 break;
243 }
244
245 hns3_mbx_head_ptr_move_arq(hw->arq);
246 msg_q = hw->arq.msg_q[hw->arq.head];
247 }
248 }
249
250 /*
251 * Case1: receive response after timeout, req_msg_data
252 * is 0, not equal resp_msg, do lost--
253 * Case2: receive last response during new send_mbx_msg,
254 * req_msg_data is different with resp_msg, let
255 * lost--, continue to wait for response.
256 */
257 static void
hns3_update_resp_position(struct hns3_hw * hw,uint32_t resp_msg)258 hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)
259 {
260 struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
261 uint32_t tail = resp->tail + 1;
262
263 if (tail > resp->head)
264 tail = resp->head;
265 if (resp->req_msg_data != resp_msg) {
266 if (resp->lost)
267 resp->lost--;
268 hns3_warn(hw, "Received a mismatched response req_msg(%x) "
269 "resp_msg(%x) head(%u) tail(%u) lost(%u)",
270 resp->req_msg_data, resp_msg, resp->head, tail,
271 resp->lost);
272 } else if (tail + resp->lost > resp->head) {
273 resp->lost--;
274 hns3_warn(hw, "Received a new response again resp_msg(%x) "
275 "head(%u) tail(%u) lost(%u)", resp_msg,
276 resp->head, tail, resp->lost);
277 }
278 rte_io_wmb();
279 resp->tail = tail;
280 }
281
282 static void
hns3_link_fail_parse(struct hns3_hw * hw,uint8_t link_fail_code)283 hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)
284 {
285 switch (link_fail_code) {
286 case HNS3_MBX_LF_NORMAL:
287 break;
288 case HNS3_MBX_LF_REF_CLOCK_LOST:
289 hns3_warn(hw, "Reference clock lost!");
290 break;
291 case HNS3_MBX_LF_XSFP_TX_DISABLE:
292 hns3_warn(hw, "SFP tx is disabled!");
293 break;
294 case HNS3_MBX_LF_XSFP_ABSENT:
295 hns3_warn(hw, "SFP is absent!");
296 break;
297 default:
298 hns3_warn(hw, "Unknown fail code:%u!", link_fail_code);
299 break;
300 }
301 }
302
303 static void
hns3_handle_link_change_event(struct hns3_hw * hw,struct hns3_mbx_pf_to_vf_cmd * req)304 hns3_handle_link_change_event(struct hns3_hw *hw,
305 struct hns3_mbx_pf_to_vf_cmd *req)
306 {
307 #define LINK_STATUS_OFFSET 1
308 #define LINK_FAIL_CODE_OFFSET 2
309
310 if (!req->msg[LINK_STATUS_OFFSET])
311 hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
312
313 hns3_update_link_status(hw);
314 }
315
316 static void
hns3_update_port_base_vlan_info(struct hns3_hw * hw,struct hns3_mbx_pf_to_vf_cmd * req)317 hns3_update_port_base_vlan_info(struct hns3_hw *hw,
318 struct hns3_mbx_pf_to_vf_cmd *req)
319 {
320 #define PVID_STATE_OFFSET 1
321 uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ?
322 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
323 /*
324 * Currently, hardware doesn't support more than two layers VLAN offload
325 * based on hns3 network engine, which would cause packets loss or wrong
326 * packets for these types of packets. If the hns3 PF kernel ethdev
327 * driver sets the PVID for VF device after initialization of the
328 * related VF device, the PF driver will notify VF driver to update the
329 * PVID configuration state. The VF driver will update the PVID
330 * configuration state immediately to ensure that the VLAN process in Tx
331 * and Rx is correct. But in the window period of this state transition,
332 * packets loss or packets with wrong VLAN may occur.
333 */
334 if (hw->port_base_vlan_cfg.state != new_pvid_state) {
335 hw->port_base_vlan_cfg.state = new_pvid_state;
336 hns3_update_all_queues_pvid_proc_en(hw);
337 }
338 }
339
340 static void
hns3_handle_promisc_info(struct hns3_hw * hw,uint16_t promisc_en)341 hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
342 {
343 if (!promisc_en) {
344 /*
345 * When promisc/allmulti mode is closed by the hns3 PF kernel
346 * ethdev driver for untrusted, modify VF's related status.
347 */
348 hns3_warn(hw, "Promisc mode will be closed by host for being "
349 "untrusted.");
350 hw->data->promiscuous = 0;
351 hw->data->all_multicast = 0;
352 }
353 }
354
355 void
hns3_dev_handle_mbx_msg(struct hns3_hw * hw)356 hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
357 {
358 struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
359 struct hns3_cmq_ring *crq = &hw->cmq.crq;
360 struct hns3_mbx_pf_to_vf_cmd *req;
361 struct hns3_cmd_desc *desc;
362 uint32_t msg_data;
363 uint16_t *msg_q;
364 uint8_t opcode;
365 uint16_t flag;
366 uint8_t *temp;
367 int i;
368
369 while (!hns3_cmd_crq_empty(hw)) {
370 if (rte_atomic16_read(&hw->reset.disable_cmd))
371 return;
372
373 desc = &crq->desc[crq->next_to_use];
374 req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
375 opcode = req->msg[0] & 0xff;
376
377 flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
378 if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
379 hns3_warn(hw,
380 "dropped invalid mailbox message, code = %u",
381 opcode);
382
383 /* dropping/not processing this invalid message */
384 crq->desc[crq->next_to_use].flag = 0;
385 hns3_mbx_ring_ptr_move_crq(crq);
386 continue;
387 }
388
389 switch (opcode) {
390 case HNS3_MBX_PF_VF_RESP:
391 resp->resp_status = hns3_resp_to_errno(req->msg[3]);
392
393 temp = (uint8_t *)&req->msg[4];
394 for (i = 0; i < HNS3_MBX_MAX_RESP_DATA_SIZE; i++) {
395 resp->additional_info[i] = *temp;
396 temp++;
397 }
398 msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
399 hns3_update_resp_position(hw, msg_data);
400 break;
401 case HNS3_MBX_LINK_STAT_CHANGE:
402 case HNS3_MBX_ASSERTING_RESET:
403 msg_q = hw->arq.msg_q[hw->arq.tail];
404 memcpy(&msg_q[0], req->msg,
405 HNS3_MBX_MAX_ARQ_MSG_SIZE * sizeof(uint16_t));
406 hns3_mbx_tail_ptr_move_arq(hw->arq);
407
408 hns3_mbx_handler(hw);
409 break;
410 case HNS3_MBX_PUSH_LINK_STATUS:
411 hns3_handle_link_change_event(hw, req);
412 break;
413 case HNS3_MBX_PUSH_VLAN_INFO:
414 /*
415 * When the PVID configuration status of VF device is
416 * changed by the hns3 PF kernel driver, VF driver will
417 * receive this mailbox message from PF driver.
418 */
419 hns3_update_port_base_vlan_info(hw, req);
420 break;
421 case HNS3_MBX_PUSH_PROMISC_INFO:
422 /*
423 * When the trust status of VF device changed by the
424 * hns3 PF kernel driver, VF driver will receive this
425 * mailbox message from PF driver.
426 */
427 hns3_handle_promisc_info(hw, req->msg[1]);
428 break;
429 default:
430 hns3_err(hw,
431 "VF received unsupported(%u) mbx msg from PF",
432 req->msg[0]);
433 break;
434 }
435
436 crq->desc[crq->next_to_use].flag = 0;
437 hns3_mbx_ring_ptr_move_crq(crq);
438 }
439
440 /* Write back CMDQ_RQ header pointer, IMP need this pointer */
441 hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
442 }
443