Lines Matching refs:pkg
167 tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg) in tb_cfg_request_find() argument
171 mutex_lock(&pkg->ctl->request_queue_lock); in tb_cfg_request_find()
172 list_for_each_entry(iter, &pkg->ctl->request_queue, list) { in tb_cfg_request_find()
174 if (iter->match(iter, pkg)) { in tb_cfg_request_find()
180 mutex_unlock(&pkg->ctl->request_queue_lock); in tb_cfg_request_find()
188 static int check_header(const struct ctl_pkg *pkg, u32 len, in check_header() argument
191 struct tb_cfg_header *header = pkg->buffer; in check_header()
194 if (WARN(len != pkg->frame.size, in check_header()
196 len, pkg->frame.size)) in check_header()
198 if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n", in check_header()
199 type, pkg->frame.eof)) in check_header()
201 if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n", in check_header()
202 pkg->frame.sof)) in check_header()
240 struct cfg_error_pkg *pkg = response->buffer; in decode_error() local
242 res.response_route = tb_cfg_get_route(&pkg->header); in decode_error()
244 res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR, in decode_error()
245 tb_cfg_get_route(&pkg->header)); in decode_error()
250 res.tb_error = pkg->error; in decode_error()
251 res.response_port = pkg->port; in decode_error()
256 static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len, in parse_header() argument
259 struct tb_cfg_header *header = pkg->buffer; in parse_header()
262 if (pkg->frame.eof == TB_CFG_PKG_ERROR) in parse_header()
263 return decode_error(pkg); in parse_header()
267 res.err = check_header(pkg, len, type, route); in parse_header()
318 static void tb_ctl_pkg_free(struct ctl_pkg *pkg) in tb_ctl_pkg_free() argument
320 if (pkg) { in tb_ctl_pkg_free()
321 dma_pool_free(pkg->ctl->frame_pool, in tb_ctl_pkg_free()
322 pkg->buffer, pkg->frame.buffer_phy); in tb_ctl_pkg_free()
323 kfree(pkg); in tb_ctl_pkg_free()
329 struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL); in tb_ctl_pkg_alloc() local
330 if (!pkg) in tb_ctl_pkg_alloc()
332 pkg->ctl = ctl; in tb_ctl_pkg_alloc()
333 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL, in tb_ctl_pkg_alloc()
334 &pkg->frame.buffer_phy); in tb_ctl_pkg_alloc()
335 if (!pkg->buffer) { in tb_ctl_pkg_alloc()
336 kfree(pkg); in tb_ctl_pkg_alloc()
339 return pkg; in tb_ctl_pkg_alloc()
348 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); in tb_ctl_tx_callback() local
349 tb_ctl_pkg_free(pkg); in tb_ctl_tx_callback()
363 struct ctl_pkg *pkg; in tb_ctl_tx() local
373 pkg = tb_ctl_pkg_alloc(ctl); in tb_ctl_tx()
374 if (!pkg) in tb_ctl_tx()
376 pkg->frame.callback = tb_ctl_tx_callback; in tb_ctl_tx()
377 pkg->frame.size = len + 4; in tb_ctl_tx()
378 pkg->frame.sof = type; in tb_ctl_tx()
379 pkg->frame.eof = type; in tb_ctl_tx()
383 cpu_to_be32_array(pkg->buffer, data, len / 4); in tb_ctl_tx()
384 *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len); in tb_ctl_tx()
386 res = tb_ring_tx(ctl->tx, &pkg->frame); in tb_ctl_tx()
388 tb_ctl_pkg_free(pkg); in tb_ctl_tx()
396 struct ctl_pkg *pkg, size_t size) in tb_ctl_handle_event() argument
398 trace_tb_event(ctl->index, type, pkg->buffer, size); in tb_ctl_handle_event()
399 return ctl->callback(ctl->callback_data, type, pkg->buffer, size); in tb_ctl_handle_event()
402 static void tb_ctl_rx_submit(struct ctl_pkg *pkg) in tb_ctl_rx_submit() argument
404 tb_ring_rx(pkg->ctl->rx, &pkg->frame); /* in tb_ctl_rx_submit()
412 static int tb_async_error(const struct ctl_pkg *pkg) in tb_async_error() argument
414 const struct cfg_error_pkg *error = pkg->buffer; in tb_async_error()
416 if (pkg->frame.eof != TB_CFG_PKG_ERROR) in tb_async_error()
441 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); in tb_ctl_rx_callback() local
452 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n", in tb_ctl_rx_callback()
458 crc32 = tb_crc(pkg->buffer, frame->size); in tb_ctl_rx_callback()
459 be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4); in tb_ctl_rx_callback()
467 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { in tb_ctl_rx_callback()
468 tb_ctl_err(pkg->ctl, in tb_ctl_rx_callback()
472 if (tb_async_error(pkg)) { in tb_ctl_rx_callback()
473 tb_ctl_handle_event(pkg->ctl, frame->eof, in tb_ctl_rx_callback()
474 pkg, frame->size); in tb_ctl_rx_callback()
482 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { in tb_ctl_rx_callback()
483 tb_ctl_err(pkg->ctl, in tb_ctl_rx_callback()
489 if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size)) in tb_ctl_rx_callback()
503 req = tb_cfg_request_find(pkg->ctl, pkg); in tb_ctl_rx_callback()
505 trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req); in tb_ctl_rx_callback()
508 if (req->copy(req, pkg)) in tb_ctl_rx_callback()
514 tb_ctl_rx_submit(pkg); in tb_ctl_rx_callback()
768 struct cfg_ack_pkg pkg = { in tb_cfg_ack_notification() local
815 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_NOTIFY_ACK); in tb_cfg_ack_notification()
830 struct cfg_error_pkg pkg = { in tb_cfg_ack_plug() local
839 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR); in tb_cfg_ack_plug()
843 const struct ctl_pkg *pkg) in tb_cfg_match() argument
845 u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63); in tb_cfg_match()
847 if (pkg->frame.eof == TB_CFG_PKG_ERROR) in tb_cfg_match()
850 if (pkg->frame.eof != req->response_type) in tb_cfg_match()
854 if (pkg->frame.size != req->response_size) in tb_cfg_match()
857 if (pkg->frame.eof == TB_CFG_PKG_READ || in tb_cfg_match()
858 pkg->frame.eof == TB_CFG_PKG_WRITE) { in tb_cfg_match()
860 const struct cfg_read_pkg *res_hdr = pkg->buffer; in tb_cfg_match()
869 static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) in tb_cfg_copy() argument
874 res = parse_header(pkg, req->response_size, req->response_type, in tb_cfg_copy()
877 memcpy(req->response, pkg->buffer, req->response_size); in tb_cfg_copy()