Lines Matching refs:dev_info

301 	struct rte_kni_device_info dev_info;  in kni_ioctl_create()  local
307 if (_IOC_SIZE(ioctl_num) > sizeof(dev_info)) in kni_ioctl_create()
311 if (copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info))) in kni_ioctl_create()
315 if (strnlen(dev_info.name, sizeof(dev_info.name)) == sizeof(dev_info.name)) { in kni_ioctl_create()
323 if (dev_info.force_bind && !cpu_online(dev_info.core_id)) { in kni_ioctl_create()
324 pr_err("cpu %u is not online\n", dev_info.core_id); in kni_ioctl_create()
331 if (kni_check_param(dev, &dev_info) < 0) { in kni_ioctl_create()
338 net_dev = alloc_netdev(sizeof(struct kni_dev), dev_info.name, in kni_ioctl_create()
344 pr_err("error allocating device \"%s\"\n", dev_info.name); in kni_ioctl_create()
353 kni->core_id = dev_info.core_id; in kni_ioctl_create()
354 strncpy(kni->name, dev_info.name, RTE_KNI_NAMESIZE); in kni_ioctl_create()
357 if (dev_info.iova_mode) { in kni_ioctl_create()
359 kni->tx_q = iova_to_kva(current, dev_info.tx_phys); in kni_ioctl_create()
360 kni->rx_q = iova_to_kva(current, dev_info.rx_phys); in kni_ioctl_create()
361 kni->alloc_q = iova_to_kva(current, dev_info.alloc_phys); in kni_ioctl_create()
362 kni->free_q = iova_to_kva(current, dev_info.free_phys); in kni_ioctl_create()
364 kni->req_q = iova_to_kva(current, dev_info.req_phys); in kni_ioctl_create()
365 kni->resp_q = iova_to_kva(current, dev_info.resp_phys); in kni_ioctl_create()
366 kni->sync_va = dev_info.sync_va; in kni_ioctl_create()
367 kni->sync_kva = iova_to_kva(current, dev_info.sync_phys); in kni_ioctl_create()
376 kni->tx_q = phys_to_virt(dev_info.tx_phys); in kni_ioctl_create()
377 kni->rx_q = phys_to_virt(dev_info.rx_phys); in kni_ioctl_create()
378 kni->alloc_q = phys_to_virt(dev_info.alloc_phys); in kni_ioctl_create()
379 kni->free_q = phys_to_virt(dev_info.free_phys); in kni_ioctl_create()
381 kni->req_q = phys_to_virt(dev_info.req_phys); in kni_ioctl_create()
382 kni->resp_q = phys_to_virt(dev_info.resp_phys); in kni_ioctl_create()
383 kni->sync_va = dev_info.sync_va; in kni_ioctl_create()
384 kni->sync_kva = phys_to_virt(dev_info.sync_phys); in kni_ioctl_create()
388 kni->mbuf_size = dev_info.mbuf_size; in kni_ioctl_create()
391 (unsigned long long) dev_info.tx_phys, kni->tx_q); in kni_ioctl_create()
393 (unsigned long long) dev_info.rx_phys, kni->rx_q); in kni_ioctl_create()
395 (unsigned long long) dev_info.alloc_phys, kni->alloc_q); in kni_ioctl_create()
397 (unsigned long long) dev_info.free_phys, kni->free_q); in kni_ioctl_create()
399 (unsigned long long) dev_info.req_phys, kni->req_q); in kni_ioctl_create()
401 (unsigned long long) dev_info.resp_phys, kni->resp_q); in kni_ioctl_create()
405 if (is_valid_ether_addr(dev_info.mac_addr)) in kni_ioctl_create()
406 memcpy(net_dev->dev_addr, dev_info.mac_addr, ETH_ALEN); in kni_ioctl_create()
411 if (dev_info.mtu) in kni_ioctl_create()
412 net_dev->mtu = dev_info.mtu; in kni_ioctl_create()
416 if (dev_info.min_mtu) in kni_ioctl_create()
417 net_dev->min_mtu = dev_info.min_mtu; in kni_ioctl_create()
419 if (dev_info.max_mtu) in kni_ioctl_create()
420 net_dev->max_mtu = dev_info.max_mtu; in kni_ioctl_create()
426 ret, dev_info.name); in kni_ioctl_create()
435 ret = kni_run_thread(knet, kni, dev_info.force_bind); in kni_ioctl_create()
453 struct rte_kni_device_info dev_info; in kni_ioctl_release() local
455 if (_IOC_SIZE(ioctl_num) > sizeof(dev_info)) in kni_ioctl_release()
458 if (copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info))) in kni_ioctl_release()
462 if (strlen(dev_info.name) == 0) in kni_ioctl_release()
467 if (strncmp(dev->name, dev_info.name, RTE_KNI_NAMESIZE) != 0) in kni_ioctl_release()
482 (ret == 0 ? "Successfully" : "Unsuccessfully"), dev_info.name); in kni_ioctl_release()