| /linux-6.15/net/rxrpc/ |
| H A D | peer_event.c | 105 unsigned int max_data; in rxrpc_adjust_mtu() local 126 if (max_data < peer->max_data) { in rxrpc_adjust_mtu() 133 peer->max_data = max_data; in rxrpc_adjust_mtu() 371 unsigned int max_data = peer->max_data; in rxrpc_input_probe_for_pmtud() local 398 if (bad <= max_data) in rxrpc_input_probe_for_pmtud() 399 max_data = bad - 1; in rxrpc_input_probe_for_pmtud() 404 if (good > max_data) in rxrpc_input_probe_for_pmtud() 405 max_data = good; in rxrpc_input_probe_for_pmtud() 408 max_data = umin(max_data, peer->ackr_max_data); in rxrpc_input_probe_for_pmtud() 409 if (max_data != peer->max_data) in rxrpc_input_probe_for_pmtud() [all …]
|
| H A D | peer_object.c | 165 if (peer->max_data < peer->if_mtu - peer->hdrsize) { in rxrpc_assess_MTU_size() 168 peer->max_data = peer->if_mtu - peer->hdrsize; in rxrpc_assess_MTU_size() 211 peer->max_data = umin(RXRPC_JUMBO(1), peer->if_mtu - peer->hdrsize); in rxrpc_assess_MTU_size() 214 peer->pmtud_trial = umin(peer->max_data, peer->pmtud_bad - 1); in rxrpc_assess_MTU_size() 279 peer->max_data = peer->if_mtu - peer->hdrsize; in rxrpc_init_peer()
|
| H A D | input.c | 791 unsigned int max_data, capacity; in rxrpc_input_ack_trailer() local 810 if (max_mtu < peer->max_data) { in rxrpc_input_ack_trailer() 813 peer->max_data = max_mtu; in rxrpc_input_ack_trailer() 816 max_data = umin(max_mtu, peer->max_data); in rxrpc_input_ack_trailer() 817 capacity = max_data; in rxrpc_input_ack_trailer()
|
| H A D | output.c | 223 max_mtu = umax(call->peer->max_data, rxrpc_rx_mtu); in rxrpc_fill_out_ack() 679 len >= sizeof(struct rxrpc_wire_header) + call->peer->max_data) { in rxrpc_send_data_packet() 744 _leave(" = %d [%u]", ret, call->peer->max_data); in rxrpc_send_data_packet()
|
| H A D | conn_event.c | 155 max_mtu = umax(conn->peer->max_data, rxrpc_rx_mtu); in rxrpc_conn_retransmit_call()
|
| H A D | proc.c | 304 peer->max_data, in rxrpc_peer_seq_show()
|
| H A D | ar-internal.h | 365 unsigned int max_data; /* Maximum packet data capacity for this peer */ member
|
| /linux-6.15/net/sctp/ |
| H A D | chunk.c | 150 size_t len, first_len, max_data, remaining; in sctp_datamsg_from_user() local 174 max_data = asoc->frag_point; in sctp_datamsg_from_user() 175 if (unlikely(!max_data)) { in sctp_datamsg_from_user() 176 max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk), in sctp_datamsg_from_user() 179 __func__, asoc, max_data); in sctp_datamsg_from_user() 190 max_data -= SCTP_PAD4(sizeof(struct sctp_auth_chunk) + in sctp_datamsg_from_user() 206 first_len = max_data; in sctp_datamsg_from_user() 217 msg_len > max_data) in sctp_datamsg_from_user() 245 len = max_data; in sctp_datamsg_from_user()
|
| /linux-6.15/drivers/crypto/intel/qat/qat_common/ |
| H A D | adf_pfvf_vf_proto.c | 114 u8 max_data; in adf_vf2pf_blkmsg_data_req() local 122 max_data = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX; in adf_vf2pf_blkmsg_data_req() 128 max_data = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX; in adf_vf2pf_blkmsg_data_req() 134 max_data = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX; in adf_vf2pf_blkmsg_data_req() 141 if (*data > max_data) { in adf_vf2pf_blkmsg_data_req()
|
| /linux-6.15/drivers/gpu/drm/ |
| H A D | drm_panic_qr.rs | 214 .find(|&v| v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum()) in from_segments() 221 fn max_data(&self) -> usize { in max_data() method 511 for i in pad_offset..self.version.max_data() { in add_segments() 956 let max_data = Version(version as usize).max_data(); in drm_panic_qr_max_data_size() localVariable 960 if url_len + 5 >= max_data { in drm_panic_qr_max_data_size() 963 let max = max_data - url_len - 5; in drm_panic_qr_max_data_size() 968 max_data - 3 in drm_panic_qr_max_data_size()
|
| /linux-6.15/drivers/net/ethernet/intel/idpf/ |
| H A D | idpf_singleq_txrx.c | 220 unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; in idpf_tx_singleq_map() local 231 max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1); in idpf_tx_singleq_map() 240 max_data, in idpf_tx_singleq_map() 253 dma += max_data; in idpf_tx_singleq_map() 254 size -= max_data; in idpf_tx_singleq_map() 256 max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; in idpf_tx_singleq_map()
|
| H A D | idpf_txrx.c | 2368 unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; in idpf_tx_splitq_map() local 2424 max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1); in idpf_tx_splitq_map() 2427 max_data); in idpf_tx_splitq_map() 2459 dma += max_data; in idpf_tx_splitq_map() 2460 size -= max_data; in idpf_tx_splitq_map() 2465 max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; in idpf_tx_splitq_map()
|
| /linux-6.15/fs/gfs2/ |
| H A D | file.c | 1235 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); in calc_max_reserv() local 1237 for (tmp = max_data; tmp > sdp->sd_diptrs;) { in calc_max_reserv() 1239 max_data -= tmp; in calc_max_reserv() 1242 *data_blocks = max_data; in calc_max_reserv() 1243 *ind_blocks = max_blocks - max_data; in calc_max_reserv() 1244 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift; in calc_max_reserv()
|
| /linux-6.15/drivers/net/ethernet/intel/iavf/ |
| H A D | iavf_txrx.c | 2173 unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED; in iavf_tx_map() local 2183 max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1); in iavf_tx_map() 2189 max_data, td_tag); in iavf_tx_map() 2199 dma += max_data; in iavf_tx_map() 2200 size -= max_data; in iavf_tx_map() 2202 max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED; in iavf_tx_map()
|
| /linux-6.15/include/trace/events/ |
| H A D | rxrpc.h | 2359 __field(unsigned short, max_data) 2368 __entry->max_data = conn->peer->max_data; 2377 __entry->max_data, 2409 unsigned int max_data, enum rxrpc_pmtud_reduce_trace reason), 2411 TP_ARGS(peer, serial, max_data, reason), 2416 __field(unsigned int, max_data) 2423 __entry->max_data = max_data; 2430 __entry->serial, __entry->max_data)
|
| /linux-6.15/drivers/net/ethernet/intel/ice/ |
| H A D | ice_txrx.c | 1691 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; in ice_tx_map() local 1701 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); in ice_tx_map() 1709 ice_build_ctob(td_cmd, td_offset, max_data, in ice_tx_map() 1720 dma += max_data; in ice_tx_map() 1721 size -= max_data; in ice_tx_map() 1723 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; in ice_tx_map()
|
| /linux-6.15/drivers/net/ethernet/intel/i40e/ |
| H A D | i40e_txrx.c | 3599 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; in i40e_tx_map() local 3609 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); in i40e_tx_map() 3615 max_data, td_tag); in i40e_tx_map() 3626 dma += max_data; in i40e_tx_map() 3627 size -= max_data; in i40e_tx_map() 3629 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; in i40e_tx_map()
|
| /linux-6.15/kernel/trace/ |
| H A D | trace.c | 1914 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); in __update_max_tr() local 1919 max_data->saved_latency = tr->max_latency; in __update_max_tr() 1920 max_data->critical_start = data->critical_start; in __update_max_tr() 1921 max_data->critical_end = data->critical_end; in __update_max_tr() 1923 strscpy(max_data->comm, tsk->comm); in __update_max_tr() 1924 max_data->pid = tsk->pid; in __update_max_tr() 1930 max_data->uid = current_uid(); in __update_max_tr() 1932 max_data->uid = task_uid(tsk); in __update_max_tr() 1934 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; in __update_max_tr() 1935 max_data->policy = tsk->policy; in __update_max_tr() [all …]
|
| /linux-6.15/drivers/media/platform/ti/am437x/ |
| H A D | am437x-vpfe.c | 280 u8 max_gamma, max_data; in vpfe_ccdc_validate_param() local 286 max_data = ccdc_data_size_max_bit(ccdcparam->data_sz); in vpfe_ccdc_validate_param() 290 max_gamma > max_data) { in vpfe_ccdc_validate_param()
|