Lines Matching refs:dev_info
295 struct rte_kni_device_info dev_info; in kni_ioctl_create() local
301 if (_IOC_SIZE(ioctl_num) > sizeof(dev_info)) in kni_ioctl_create()
305 if (copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info))) in kni_ioctl_create()
309 if (strnlen(dev_info.name, sizeof(dev_info.name)) == sizeof(dev_info.name)) { in kni_ioctl_create()
317 if (dev_info.force_bind && !cpu_online(dev_info.core_id)) { in kni_ioctl_create()
318 pr_err("cpu %u is not online\n", dev_info.core_id); in kni_ioctl_create()
325 if (kni_check_param(dev, &dev_info) < 0) { in kni_ioctl_create()
332 net_dev = alloc_netdev(sizeof(struct kni_dev), dev_info.name, in kni_ioctl_create()
338 pr_err("error allocating device \"%s\"\n", dev_info.name); in kni_ioctl_create()
347 kni->core_id = dev_info.core_id; in kni_ioctl_create()
348 strncpy(kni->name, dev_info.name, RTE_KNI_NAMESIZE); in kni_ioctl_create()
351 if (dev_info.iova_mode) { in kni_ioctl_create()
353 kni->tx_q = iova_to_kva(current, dev_info.tx_phys); in kni_ioctl_create()
354 kni->rx_q = iova_to_kva(current, dev_info.rx_phys); in kni_ioctl_create()
355 kni->alloc_q = iova_to_kva(current, dev_info.alloc_phys); in kni_ioctl_create()
356 kni->free_q = iova_to_kva(current, dev_info.free_phys); in kni_ioctl_create()
358 kni->req_q = iova_to_kva(current, dev_info.req_phys); in kni_ioctl_create()
359 kni->resp_q = iova_to_kva(current, dev_info.resp_phys); in kni_ioctl_create()
360 kni->sync_va = dev_info.sync_va; in kni_ioctl_create()
361 kni->sync_kva = iova_to_kva(current, dev_info.sync_phys); in kni_ioctl_create()
370 kni->tx_q = phys_to_virt(dev_info.tx_phys); in kni_ioctl_create()
371 kni->rx_q = phys_to_virt(dev_info.rx_phys); in kni_ioctl_create()
372 kni->alloc_q = phys_to_virt(dev_info.alloc_phys); in kni_ioctl_create()
373 kni->free_q = phys_to_virt(dev_info.free_phys); in kni_ioctl_create()
375 kni->req_q = phys_to_virt(dev_info.req_phys); in kni_ioctl_create()
376 kni->resp_q = phys_to_virt(dev_info.resp_phys); in kni_ioctl_create()
377 kni->sync_va = dev_info.sync_va; in kni_ioctl_create()
378 kni->sync_kva = phys_to_virt(dev_info.sync_phys); in kni_ioctl_create()
382 kni->mbuf_size = dev_info.mbuf_size; in kni_ioctl_create()
385 (unsigned long long) dev_info.tx_phys, kni->tx_q); in kni_ioctl_create()
387 (unsigned long long) dev_info.rx_phys, kni->rx_q); in kni_ioctl_create()
389 (unsigned long long) dev_info.alloc_phys, kni->alloc_q); in kni_ioctl_create()
391 (unsigned long long) dev_info.free_phys, kni->free_q); in kni_ioctl_create()
393 (unsigned long long) dev_info.req_phys, kni->req_q); in kni_ioctl_create()
395 (unsigned long long) dev_info.resp_phys, kni->resp_q); in kni_ioctl_create()
399 if (is_valid_ether_addr(dev_info.mac_addr)) in kni_ioctl_create()
400 memcpy(net_dev->dev_addr, dev_info.mac_addr, ETH_ALEN); in kni_ioctl_create()
408 if (dev_info.mtu) in kni_ioctl_create()
409 net_dev->mtu = dev_info.mtu; in kni_ioctl_create()
413 if (dev_info.min_mtu) in kni_ioctl_create()
414 net_dev->min_mtu = dev_info.min_mtu; in kni_ioctl_create()
416 if (dev_info.max_mtu) in kni_ioctl_create()
417 net_dev->max_mtu = dev_info.max_mtu; in kni_ioctl_create()
423 ret, dev_info.name); in kni_ioctl_create()
432 ret = kni_run_thread(knet, kni, dev_info.force_bind); in kni_ioctl_create()
450 struct rte_kni_device_info dev_info; in kni_ioctl_release() local
452 if (_IOC_SIZE(ioctl_num) > sizeof(dev_info)) in kni_ioctl_release()
455 if (copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info))) in kni_ioctl_release()
459 if (strlen(dev_info.name) == 0) in kni_ioctl_release()
464 if (strncmp(dev->name, dev_info.name, RTE_KNI_NAMESIZE) != 0) in kni_ioctl_release()
479 (ret == 0 ? "Successfully" : "Unsuccessfully"), dev_info.name); in kni_ioctl_release()