Lines Matching refs:pfvf

21 static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf)  in otx2_is_pfc_enabled()  argument
23 return IS_ENABLED(CONFIG_DCB) && !!pfvf->pfc_en; in otx2_is_pfc_enabled()
27 struct otx2_nic *pfvf, int qidx) in otx2_nix_rq_op_stats() argument
32 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS); in otx2_nix_rq_op_stats()
35 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS); in otx2_nix_rq_op_stats()
40 struct otx2_nic *pfvf, int qidx) in otx2_nix_sq_op_stats() argument
45 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS); in otx2_nix_sq_op_stats()
48 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS); in otx2_nix_sq_op_stats()
52 void otx2_update_lmac_stats(struct otx2_nic *pfvf) in otx2_update_lmac_stats() argument
56 if (!netif_running(pfvf->netdev)) in otx2_update_lmac_stats()
59 mutex_lock(&pfvf->mbox.lock); in otx2_update_lmac_stats()
60 req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox); in otx2_update_lmac_stats()
62 mutex_unlock(&pfvf->mbox.lock); in otx2_update_lmac_stats()
66 otx2_sync_mbox_msg(&pfvf->mbox); in otx2_update_lmac_stats()
67 mutex_unlock(&pfvf->mbox.lock); in otx2_update_lmac_stats()
70 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf) in otx2_update_lmac_fec_stats() argument
74 if (!netif_running(pfvf->netdev)) in otx2_update_lmac_fec_stats()
76 mutex_lock(&pfvf->mbox.lock); in otx2_update_lmac_fec_stats()
77 req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox); in otx2_update_lmac_fec_stats()
79 otx2_sync_mbox_msg(&pfvf->mbox); in otx2_update_lmac_fec_stats()
80 mutex_unlock(&pfvf->mbox.lock); in otx2_update_lmac_fec_stats()
83 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) in otx2_update_rq_stats() argument
85 struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx]; in otx2_update_rq_stats()
87 if (!pfvf->qset.rq) in otx2_update_rq_stats()
90 otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); in otx2_update_rq_stats()
95 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) in otx2_update_sq_stats() argument
97 struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx]; in otx2_update_sq_stats()
99 if (!pfvf->qset.sq) in otx2_update_sq_stats()
102 if (qidx >= pfvf->hw.non_qos_queues) { in otx2_update_sq_stats()
103 if (!test_bit(qidx - pfvf->hw.non_qos_queues, pfvf->qos.qos_sq_bmap)) in otx2_update_sq_stats()
107 otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx); in otx2_update_sq_stats()
112 void otx2_get_dev_stats(struct otx2_nic *pfvf) in otx2_get_dev_stats() argument
114 struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats; in otx2_get_dev_stats()
138 struct otx2_nic *pfvf = netdev_priv(netdev); in otx2_get_stats64() local
141 otx2_get_dev_stats(pfvf); in otx2_get_stats64()
143 dev_stats = &pfvf->hw.dev_stats; in otx2_get_stats64()
156 static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac) in otx2_hw_set_mac_addr() argument
161 mutex_lock(&pfvf->mbox.lock); in otx2_hw_set_mac_addr()
162 req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox); in otx2_hw_set_mac_addr()
164 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_set_mac_addr()
170 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_hw_set_mac_addr()
171 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_set_mac_addr()
175 static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf, in otx2_hw_get_mac_addr() argument
183 mutex_lock(&pfvf->mbox.lock); in otx2_hw_get_mac_addr()
184 req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox); in otx2_hw_get_mac_addr()
186 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_get_mac_addr()
190 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_hw_get_mac_addr()
192 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_get_mac_addr()
196 msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); in otx2_hw_get_mac_addr()
198 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_get_mac_addr()
203 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_get_mac_addr()
210 struct otx2_nic *pfvf = netdev_priv(netdev); in otx2_set_mac_address() local
216 if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) { in otx2_set_mac_address()
220 pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) in otx2_set_mac_address()
221 otx2_install_rxvlan_offload_flow(pfvf); in otx2_set_mac_address()
223 if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) in otx2_set_mac_address()
224 otx2_dmacflt_update_pfmac_flow(pfvf); in otx2_set_mac_address()
233 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) in otx2_hw_set_mtu() argument
239 maxlen = pfvf->hw.max_mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; in otx2_hw_set_mtu()
241 mutex_lock(&pfvf->mbox.lock); in otx2_hw_set_mtu()
242 req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox); in otx2_hw_set_mtu()
244 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_set_mtu()
251 if (is_otx2_lbkvf(pfvf->pdev)) in otx2_hw_set_mtu()
254 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_hw_set_mtu()
255 mutex_unlock(&pfvf->mbox.lock); in otx2_hw_set_mtu()
260 int otx2_config_pause_frm(struct otx2_nic *pfvf) in otx2_config_pause_frm() argument
265 if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) in otx2_config_pause_frm()
268 mutex_lock(&pfvf->mbox.lock); in otx2_config_pause_frm()
269 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); in otx2_config_pause_frm()
275 req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED); in otx2_config_pause_frm()
276 req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED); in otx2_config_pause_frm()
279 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_config_pause_frm()
281 mutex_unlock(&pfvf->mbox.lock); in otx2_config_pause_frm()
286 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) in otx2_set_flowkey_cfg() argument
288 struct otx2_rss_info *rss = &pfvf->hw.rss_info; in otx2_set_flowkey_cfg()
293 mutex_lock(&pfvf->mbox.lock); in otx2_set_flowkey_cfg()
294 req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox); in otx2_set_flowkey_cfg()
296 mutex_unlock(&pfvf->mbox.lock); in otx2_set_flowkey_cfg()
303 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_set_flowkey_cfg()
308 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); in otx2_set_flowkey_cfg()
314 pfvf->hw.flowkey_alg_idx = rsp->alg_idx; in otx2_set_flowkey_cfg()
316 mutex_unlock(&pfvf->mbox.lock); in otx2_set_flowkey_cfg()
320 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id) in otx2_set_rss_table() argument
322 struct otx2_rss_info *rss = &pfvf->hw.rss_info; in otx2_set_rss_table()
324 struct mbox *mbox = &pfvf->mbox; in otx2_set_rss_table()
362 void otx2_set_rss_key(struct otx2_nic *pfvf) in otx2_set_rss_key() argument
364 struct otx2_rss_info *rss = &pfvf->hw.rss_info; in otx2_set_rss_key()
376 otx2_write64(pfvf, NIX_LF_RX_SECRETX(5), in otx2_set_rss_key()
381 otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++); in otx2_set_rss_key()
385 int otx2_rss_init(struct otx2_nic *pfvf) in otx2_rss_init() argument
387 struct otx2_rss_info *rss = &pfvf->hw.rss_info; in otx2_rss_init()
396 otx2_set_rss_key(pfvf); in otx2_rss_init()
398 if (!netif_is_rxfh_configured(pfvf->netdev)) { in otx2_rss_init()
409 pfvf->hw.rx_queues); in otx2_rss_init()
411 ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP); in otx2_rss_init()
422 ret = otx2_set_flowkey_cfg(pfvf); in otx2_rss_init()
464 void otx2_setup_segmentation(struct otx2_nic *pfvf) in otx2_setup_segmentation() argument
468 struct otx2_hw *hw = &pfvf->hw; in otx2_setup_segmentation()
471 mutex_lock(&pfvf->mbox.lock); in otx2_setup_segmentation()
474 lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox); in otx2_setup_segmentation()
481 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_setup_segmentation()
486 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr); in otx2_setup_segmentation()
493 lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox); in otx2_setup_segmentation()
500 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_setup_segmentation()
505 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr); in otx2_setup_segmentation()
510 mutex_unlock(&pfvf->mbox.lock); in otx2_setup_segmentation()
513 mutex_unlock(&pfvf->mbox.lock); in otx2_setup_segmentation()
514 netdev_info(pfvf->netdev, in otx2_setup_segmentation()
516 pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4; in otx2_setup_segmentation()
519 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx) in otx2_config_irq_coalescing() argument
527 otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx), in otx2_config_irq_coalescing()
528 ((u64)(pfvf->hw.cq_time_wait * 10) << 48) | in otx2_config_irq_coalescing()
529 ((u64)pfvf->hw.cq_qcount_wait << 32) | in otx2_config_irq_coalescing()
530 (pfvf->hw.cq_ecount_wait - 1)); in otx2_config_irq_coalescing()
533 static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, in otx2_alloc_pool_buf() argument
551 static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, in __otx2_alloc_rbuf() argument
557 return otx2_alloc_pool_buf(pfvf, pool, dma); in __otx2_alloc_rbuf()
563 *dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize, in __otx2_alloc_rbuf()
565 if (unlikely(dma_mapping_error(pfvf->dev, *dma))) { in __otx2_alloc_rbuf()
573 int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, in otx2_alloc_rbuf() argument
579 ret = __otx2_alloc_rbuf(pfvf, pool, dma); in otx2_alloc_rbuf()
584 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, in otx2_alloc_buffer() argument
587 if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) in otx2_alloc_buffer()
594 struct otx2_nic *pfvf = netdev_priv(netdev); in otx2_tx_timeout() local
596 schedule_work(&pfvf->reset_task); in otx2_tx_timeout()
602 struct otx2_nic *pfvf = netdev_priv(netdev); in otx2_get_mac_from_af() local
605 err = otx2_hw_get_mac_addr(pfvf, netdev); in otx2_get_mac_from_af()
607 dev_warn(pfvf->dev, "Failed to read mac from hardware\n"); in otx2_get_mac_from_af()
615 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for_pfc) in otx2_txschq_config() argument
618 struct otx2_hw *hw = &pfvf->hw; in otx2_txschq_config()
623 dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); in otx2_txschq_config()
625 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); in otx2_txschq_config()
635 schq_list = pfvf->pfc_schq_list; in otx2_txschq_config()
642 req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU; in otx2_txschq_config()
646 if (!is_dev_otx2(pfvf->pdev)) in otx2_txschq_config()
661 if (is_otx2_sdp_rep(pfvf->pdev)) in otx2_txschq_config()
669 if (is_otx2_sdp_rep(pfvf->pdev)) { in otx2_txschq_config()
683 !is_otx2_sdp_rep(pfvf->pdev)) { in otx2_txschq_config()
701 !is_otx2_sdp_rep(pfvf->pdev)) { in otx2_txschq_config()
730 return otx2_sync_mbox_msg(&pfvf->mbox); in otx2_txschq_config()
734 int otx2_smq_flush(struct otx2_nic *pfvf, int smq) in otx2_smq_flush() argument
739 mutex_lock(&pfvf->mbox.lock); in otx2_smq_flush()
741 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); in otx2_smq_flush()
743 mutex_unlock(&pfvf->mbox.lock); in otx2_smq_flush()
752 rc = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_smq_flush()
753 mutex_unlock(&pfvf->mbox.lock); in otx2_smq_flush()
758 int otx2_txsch_alloc(struct otx2_nic *pfvf) in otx2_txsch_alloc() argument
760 int chan_cnt = pfvf->hw.tx_chan_cnt; in otx2_txsch_alloc()
766 req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox); in otx2_txsch_alloc()
774 if (is_otx2_sdp_rep(pfvf->pdev) && chan_cnt > 1) { in otx2_txsch_alloc()
779 rc = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_txsch_alloc()
784 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); in otx2_txsch_alloc()
790 pfvf->hw.txschq_cnt[lvl] = rsp->schq[lvl]; in otx2_txsch_alloc()
792 pfvf->hw.txschq_list[lvl][schq] = in otx2_txsch_alloc()
796 pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl; in otx2_txsch_alloc()
797 pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio; in otx2_txsch_alloc()
802 void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq) in otx2_txschq_free_one() argument
807 mutex_lock(&pfvf->mbox.lock); in otx2_txschq_free_one()
809 free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox); in otx2_txschq_free_one()
811 mutex_unlock(&pfvf->mbox.lock); in otx2_txschq_free_one()
812 netdev_err(pfvf->netdev, in otx2_txschq_free_one()
820 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_txschq_free_one()
822 netdev_err(pfvf->netdev, in otx2_txschq_free_one()
826 mutex_unlock(&pfvf->mbox.lock); in otx2_txschq_free_one()
830 void otx2_txschq_stop(struct otx2_nic *pfvf) in otx2_txschq_stop() argument
836 for (idx = 0; idx < pfvf->hw.txschq_cnt[lvl]; idx++) { in otx2_txschq_stop()
837 otx2_txschq_free_one(pfvf, lvl, in otx2_txschq_stop()
838 pfvf->hw.txschq_list[lvl][idx]); in otx2_txschq_stop()
845 pfvf->hw.txschq_list[lvl][schq] = 0; in otx2_txschq_stop()
850 void otx2_sqb_flush(struct otx2_nic *pfvf) in otx2_sqb_flush() argument
856 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); in otx2_sqb_flush()
857 for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { in otx2_sqb_flush()
858 sq = &pfvf->qset.sq[qidx]; in otx2_sqb_flush()
887 static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) in otx2_rq_init() argument
889 struct otx2_qset *qset = &pfvf->qset; in otx2_rq_init()
893 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); in otx2_rq_init()
901 aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1; in otx2_rq_init()
907 aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); in otx2_rq_init()
908 aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); in otx2_rq_init()
917 return otx2_sync_mbox_msg(&pfvf->mbox); in otx2_rq_init()
922 struct otx2_nic *pfvf = dev; in otx2_sq_aq_init() local
926 sq = &pfvf->qset.sq[qidx]; in otx2_sq_aq_init()
927 sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx)); in otx2_sq_aq_init()
929 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); in otx2_sq_aq_init()
933 aq->sq.cq = pfvf->hw.rx_queues + qidx; in otx2_sq_aq_init()
937 aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); in otx2_sq_aq_init()
938 aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); in otx2_sq_aq_init()
939 aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset; in otx2_sq_aq_init()
947 aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt)); in otx2_sq_aq_init()
954 return otx2_sync_mbox_msg(&pfvf->mbox); in otx2_sq_aq_init()
957 int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) in otx2_sq_init() argument
959 struct otx2_qset *qset = &pfvf->qset; in otx2_sq_init()
965 pool = &pfvf->qset.pool[sqb_aura]; in otx2_sq_init()
970 err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size); in otx2_sq_init()
988 err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt, in otx2_sq_init()
993 err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64); in otx2_sq_init()
997 if (qidx < pfvf->hw.tx_queues) { in otx2_sq_init()
998 err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, in otx2_sq_init()
1009 if (pfvf->ptp && qidx < pfvf->hw.tx_queues) { in otx2_sq_init()
1010 err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, in otx2_sq_init()
1021 sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1; in otx2_sq_init()
1027 sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0)); in otx2_sq_init()
1032 chan_offset = qidx % pfvf->hw.tx_chan_cnt; in otx2_sq_init()
1033 err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura); in otx2_sq_init()
1044 static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) in otx2_cq_init() argument
1046 struct otx2_qset *qset = &pfvf->qset; in otx2_cq_init()
1053 non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues; in otx2_cq_init()
1054 if (qidx < pfvf->hw.rx_queues) { in otx2_cq_init()
1058 if (pfvf->xdp_prog) in otx2_cq_init()
1059 xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0); in otx2_cq_init()
1062 cq->cint_idx = qidx - pfvf->hw.rx_queues; in otx2_cq_init()
1065 if (pfvf->hw.xdp_queues && in otx2_cq_init()
1066 qidx < non_xdp_queues + pfvf->hw.xdp_queues) { in otx2_cq_init()
1073 pfvf->hw.xdp_queues; in otx2_cq_init()
1077 cq->cqe_size = pfvf->qset.xqe_size; in otx2_cq_init()
1080 err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size); in otx2_cq_init()
1090 (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx; in otx2_cq_init()
1095 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); in otx2_cq_init()
1108 if (qidx < pfvf->hw.rx_queues) { in otx2_cq_init()
1109 aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt); in otx2_cq_init()
1112 if (!is_otx2_lbkvf(pfvf->pdev)) { in otx2_cq_init()
1116 aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]]; in otx2_cq_init()
1118 aq->cq.bpid = pfvf->bpid[0]; in otx2_cq_init()
1122 aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); in otx2_cq_init()
1131 return otx2_sync_mbox_msg(&pfvf->mbox); in otx2_cq_init()
1138 struct otx2_nic *pfvf; in otx2_pool_refill_task() local
1142 pfvf = wrk->pf; in otx2_pool_refill_task()
1143 qidx = wrk - pfvf->refill_wrk; in otx2_pool_refill_task()
1144 cq = &pfvf->qset.cq[qidx]; in otx2_pool_refill_task()
1153 int otx2_config_nix_queues(struct otx2_nic *pfvf) in otx2_config_nix_queues() argument
1158 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { in otx2_config_nix_queues()
1159 u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); in otx2_config_nix_queues()
1161 err = otx2_rq_init(pfvf, qidx, lpb_aura); in otx2_config_nix_queues()
1167 for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) { in otx2_config_nix_queues()
1168 u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); in otx2_config_nix_queues()
1170 err = otx2_sq_init(pfvf, qidx, sqb_aura); in otx2_config_nix_queues()
1176 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { in otx2_config_nix_queues()
1177 err = otx2_cq_init(pfvf, qidx); in otx2_config_nix_queues()
1182 pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf, in otx2_config_nix_queues()
1186 pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt, in otx2_config_nix_queues()
1188 if (!pfvf->refill_wrk) in otx2_config_nix_queues()
1191 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { in otx2_config_nix_queues()
1192 pfvf->refill_wrk[qidx].pf = pfvf; in otx2_config_nix_queues()
1193 INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work, in otx2_config_nix_queues()
1199 int otx2_config_nix(struct otx2_nic *pfvf) in otx2_config_nix() argument
1205 pfvf->qset.xqe_size = pfvf->hw.xqe_size; in otx2_config_nix()
1208 nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox); in otx2_config_nix()
1213 nixlf->rq_cnt = pfvf->hw.rx_queues; in otx2_config_nix()
1214 nixlf->sq_cnt = otx2_get_total_tx_queues(pfvf); in otx2_config_nix()
1215 nixlf->cq_cnt = pfvf->qset.cq_cnt; in otx2_config_nix()
1218 nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64; in otx2_config_nix()
1229 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_config_nix()
1233 rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, in otx2_config_nix()
1244 void otx2_sq_free_sqbs(struct otx2_nic *pfvf) in otx2_sq_free_sqbs() argument
1246 struct otx2_qset *qset = &pfvf->qset; in otx2_sq_free_sqbs()
1247 struct otx2_hw *hw = &pfvf->hw; in otx2_sq_free_sqbs()
1252 for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { in otx2_sq_free_sqbs()
1260 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); in otx2_sq_free_sqbs()
1261 dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size, in otx2_sq_free_sqbs()
1270 void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, in otx2_free_bufs() argument
1276 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); in otx2_free_bufs()
1282 dma_unmap_page_attrs(pfvf->dev, iova, size, in otx2_free_bufs()
1290 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) in otx2_free_aura_ptr() argument
1297 pool_start = otx2_get_pool_idx(pfvf, type, 0); in otx2_free_aura_ptr()
1298 pool_end = pool_start + pfvf->hw.sqpool_cnt; in otx2_free_aura_ptr()
1299 size = pfvf->hw.sqb_size; in otx2_free_aura_ptr()
1302 pool_start = otx2_get_pool_idx(pfvf, type, 0); in otx2_free_aura_ptr()
1303 pool_end = pfvf->hw.rqpool_cnt; in otx2_free_aura_ptr()
1304 size = pfvf->rbsize; in otx2_free_aura_ptr()
1309 iova = otx2_aura_allocptr(pfvf, pool_id); in otx2_free_aura_ptr()
1310 pool = &pfvf->qset.pool[pool_id]; in otx2_free_aura_ptr()
1315 otx2_free_bufs(pfvf, pool, iova, size); in otx2_free_aura_ptr()
1317 iova = otx2_aura_allocptr(pfvf, pool_id); in otx2_free_aura_ptr()
1322 void otx2_aura_pool_free(struct otx2_nic *pfvf) in otx2_aura_pool_free() argument
1327 if (!pfvf->qset.pool) in otx2_aura_pool_free()
1330 for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) { in otx2_aura_pool_free()
1331 pool = &pfvf->qset.pool[pool_id]; in otx2_aura_pool_free()
1332 qmem_free(pfvf->dev, pool->stack); in otx2_aura_pool_free()
1333 qmem_free(pfvf->dev, pool->fc_addr); in otx2_aura_pool_free()
1337 devm_kfree(pfvf->dev, pfvf->qset.pool); in otx2_aura_pool_free()
1338 pfvf->qset.pool = NULL; in otx2_aura_pool_free()
1341 int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, in otx2_aura_init() argument
1348 pool = &pfvf->qset.pool[pool_id]; in otx2_aura_init()
1354 err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); in otx2_aura_init()
1360 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); in otx2_aura_init()
1363 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_aura_init()
1366 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); in otx2_aura_init()
1385 if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) { in otx2_aura_init()
1399 if (pfvf->nix_blkaddr == BLKADDR_NIX1) in otx2_aura_init()
1402 aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]]; in otx2_aura_init()
1404 aq->aura.nix0_bpid = pfvf->bpid[0]; in otx2_aura_init()
1418 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, in otx2_pool_init() argument
1426 pool = &pfvf->qset.pool[pool_id]; in otx2_pool_init()
1428 err = qmem_alloc(pfvf->dev, &pool->stack, in otx2_pool_init()
1429 stack_pages, pfvf->hw.stack_pg_bytes); in otx2_pool_init()
1436 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); in otx2_pool_init()
1439 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_pool_init()
1441 qmem_free(pfvf->dev, pool->stack); in otx2_pool_init()
1444 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); in otx2_pool_init()
1446 qmem_free(pfvf->dev, pool->stack); in otx2_pool_init()
1474 pp_params.dev = pfvf->dev; in otx2_pool_init()
1478 netdev_err(pfvf->netdev, "Creation of page pool failed\n"); in otx2_pool_init()
1485 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) in otx2_sq_aura_pool_init() argument
1488 struct otx2_qset *qset = &pfvf->qset; in otx2_sq_aura_pool_init()
1489 struct otx2_hw *hw = &pfvf->hw; in otx2_sq_aura_pool_init()
1508 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); in otx2_sq_aura_pool_init()
1510 err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs); in otx2_sq_aura_pool_init()
1515 err = otx2_pool_init(pfvf, pool_id, stack_pages, in otx2_sq_aura_pool_init()
1522 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_sq_aura_pool_init()
1528 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); in otx2_sq_aura_pool_init()
1529 pool = &pfvf->qset.pool[pool_id]; in otx2_sq_aura_pool_init()
1540 err = otx2_alloc_rbuf(pfvf, pool, &bufptr); in otx2_sq_aura_pool_init()
1543 pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); in otx2_sq_aura_pool_init()
1552 otx2_mbox_reset(&pfvf->mbox.mbox, 0); in otx2_sq_aura_pool_init()
1553 otx2_aura_pool_free(pfvf); in otx2_sq_aura_pool_init()
1557 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) in otx2_rq_aura_pool_init() argument
1559 struct otx2_hw *hw = &pfvf->hw; in otx2_rq_aura_pool_init()
1565 num_ptrs = pfvf->qset.rqe_cnt; in otx2_rq_aura_pool_init()
1571 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq); in otx2_rq_aura_pool_init()
1573 err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs); in otx2_rq_aura_pool_init()
1578 err = otx2_pool_init(pfvf, pool_id, stack_pages, in otx2_rq_aura_pool_init()
1579 num_ptrs, pfvf->rbsize, AURA_NIX_RQ); in otx2_rq_aura_pool_init()
1585 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_rq_aura_pool_init()
1591 pool = &pfvf->qset.pool[pool_id]; in otx2_rq_aura_pool_init()
1593 err = otx2_alloc_rbuf(pfvf, pool, &bufptr); in otx2_rq_aura_pool_init()
1596 pfvf->hw_ops->aura_freeptr(pfvf, pool_id, in otx2_rq_aura_pool_init()
1602 otx2_mbox_reset(&pfvf->mbox.mbox, 0); in otx2_rq_aura_pool_init()
1603 otx2_aura_pool_free(pfvf); in otx2_rq_aura_pool_init()
1607 int otx2_config_npa(struct otx2_nic *pfvf) in otx2_config_npa() argument
1609 struct otx2_qset *qset = &pfvf->qset; in otx2_config_npa()
1611 struct otx2_hw *hw = &pfvf->hw; in otx2_config_npa()
1621 qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt, in otx2_config_npa()
1627 npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox); in otx2_config_npa()
1636 return otx2_sync_mbox_msg(&pfvf->mbox); in otx2_config_npa()
1660 int otx2_attach_npa_nix(struct otx2_nic *pfvf) in otx2_attach_npa_nix() argument
1666 mutex_lock(&pfvf->mbox.lock); in otx2_attach_npa_nix()
1668 attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox); in otx2_attach_npa_nix()
1670 mutex_unlock(&pfvf->mbox.lock); in otx2_attach_npa_nix()
1678 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_attach_npa_nix()
1680 mutex_unlock(&pfvf->mbox.lock); in otx2_attach_npa_nix()
1684 pfvf->nix_blkaddr = BLKADDR_NIX0; in otx2_attach_npa_nix()
1689 if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL) in otx2_attach_npa_nix()
1690 pfvf->nix_blkaddr = BLKADDR_NIX1; in otx2_attach_npa_nix()
1693 msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox); in otx2_attach_npa_nix()
1695 mutex_unlock(&pfvf->mbox.lock); in otx2_attach_npa_nix()
1699 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_attach_npa_nix()
1701 mutex_unlock(&pfvf->mbox.lock); in otx2_attach_npa_nix()
1704 mutex_unlock(&pfvf->mbox.lock); in otx2_attach_npa_nix()
1706 if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID || in otx2_attach_npa_nix()
1707 pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) { in otx2_attach_npa_nix()
1708 dev_err(pfvf->dev, in otx2_attach_npa_nix()
1736 dev_err(mbox->pfvf->dev, "%s failed to disable context\n", in otx2_ctx_disable()
1742 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) in otx2_nix_config_bp() argument
1747 req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox); in otx2_nix_config_bp()
1749 req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox); in otx2_nix_config_bp()
1755 if (otx2_is_pfc_enabled(pfvf)) { in otx2_nix_config_bp()
1763 return otx2_sync_mbox_msg(&pfvf->mbox); in otx2_nix_config_bp()
1767 int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable) in otx2_nix_cpt_config_bp() argument
1772 req = otx2_mbox_alloc_msg_nix_cpt_bp_enable(&pfvf->mbox); in otx2_nix_cpt_config_bp()
1774 req = otx2_mbox_alloc_msg_nix_cpt_bp_disable(&pfvf->mbox); in otx2_nix_cpt_config_bp()
1780 if (otx2_is_pfc_enabled(pfvf)) { in otx2_nix_cpt_config_bp()
1788 return otx2_sync_mbox_msg(&pfvf->mbox); in otx2_nix_cpt_config_bp()
1793 void mbox_handler_cgx_stats(struct otx2_nic *pfvf, in mbox_handler_cgx_stats() argument
1799 pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id]; in mbox_handler_cgx_stats()
1801 pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id]; in mbox_handler_cgx_stats()
1804 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf, in mbox_handler_cgx_fec_stats() argument
1807 pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks; in mbox_handler_cgx_fec_stats()
1808 pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks; in mbox_handler_cgx_fec_stats()
1811 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, in mbox_handler_npa_lf_alloc() argument
1814 pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs; in mbox_handler_npa_lf_alloc()
1815 pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes; in mbox_handler_npa_lf_alloc()
1819 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, in mbox_handler_nix_lf_alloc() argument
1822 pfvf->hw.sqb_size = rsp->sqb_size; in mbox_handler_nix_lf_alloc()
1823 pfvf->hw.rx_chan_base = rsp->rx_chan_base; in mbox_handler_nix_lf_alloc()
1824 pfvf->hw.tx_chan_base = rsp->tx_chan_base; in mbox_handler_nix_lf_alloc()
1825 pfvf->hw.rx_chan_cnt = rsp->rx_chan_cnt; in mbox_handler_nix_lf_alloc()
1826 pfvf->hw.tx_chan_cnt = rsp->tx_chan_cnt; in mbox_handler_nix_lf_alloc()
1827 pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx; in mbox_handler_nix_lf_alloc()
1828 pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; in mbox_handler_nix_lf_alloc()
1829 pfvf->hw.cgx_links = rsp->cgx_links; in mbox_handler_nix_lf_alloc()
1830 pfvf->hw.lbk_links = rsp->lbk_links; in mbox_handler_nix_lf_alloc()
1831 pfvf->hw.tx_link = rsp->tx_link; in mbox_handler_nix_lf_alloc()
1835 void mbox_handler_msix_offset(struct otx2_nic *pfvf, in mbox_handler_msix_offset() argument
1838 pfvf->hw.npa_msixoff = rsp->npa_msixoff; in mbox_handler_msix_offset()
1839 pfvf->hw.nix_msixoff = rsp->nix_msixoff; in mbox_handler_msix_offset()
1843 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, in mbox_handler_nix_bp_enable() argument
1850 pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF; in mbox_handler_nix_bp_enable()
1855 void otx2_free_cints(struct otx2_nic *pfvf, int n) in otx2_free_cints() argument
1857 struct otx2_qset *qset = &pfvf->qset; in otx2_free_cints()
1858 struct otx2_hw *hw = &pfvf->hw; in otx2_free_cints()
1864 int vector = pci_irq_vector(pfvf->pdev, irq); in otx2_free_cints()
1873 void otx2_set_cints_affinity(struct otx2_nic *pfvf) in otx2_set_cints_affinity() argument
1875 struct otx2_hw *hw = &pfvf->hw; in otx2_set_cints_affinity()
1882 for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) { in otx2_set_cints_affinity()
1888 irq = pci_irq_vector(pfvf->pdev, vec); in otx2_set_cints_affinity()
1897 static u32 get_dwrr_mtu(struct otx2_nic *pfvf, struct nix_hw_info *hw) in get_dwrr_mtu() argument
1899 if (is_otx2_lbkvf(pfvf->pdev)) { in get_dwrr_mtu()
1900 pfvf->hw.smq_link_type = SMQ_LINK_TYPE_LBK; in get_dwrr_mtu()
1904 pfvf->hw.smq_link_type = SMQ_LINK_TYPE_RPM; in get_dwrr_mtu()
1908 u16 otx2_get_max_mtu(struct otx2_nic *pfvf) in otx2_get_max_mtu() argument
1915 mutex_lock(&pfvf->mbox.lock); in otx2_get_max_mtu()
1917 req = otx2_mbox_alloc_msg_nix_get_hw_info(&pfvf->mbox); in otx2_get_max_mtu()
1923 rc = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_get_max_mtu()
1926 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); in otx2_get_max_mtu()
1941 pfvf->hw.dwrr_mtu = get_dwrr_mtu(pfvf, rsp); in otx2_get_max_mtu()
1942 if (!pfvf->hw.dwrr_mtu) in otx2_get_max_mtu()
1943 pfvf->hw.dwrr_mtu = 1; in otx2_get_max_mtu()
1947 mutex_unlock(&pfvf->mbox.lock); in otx2_get_max_mtu()
1949 dev_warn(pfvf->dev, in otx2_get_max_mtu()
1960 struct otx2_nic *pfvf = netdev_priv(netdev); in otx2_handle_ntuple_tc_features() local
1965 otx2_destroy_ntuple_flows(pfvf); in otx2_handle_ntuple_tc_features()
1968 if (!pfvf->flow_cfg->max_flows) { in otx2_handle_ntuple_tc_features()
1976 otx2_tc_flower_rule_cnt(pfvf)) { in otx2_handle_ntuple_tc_features()
1982 otx2_tc_flower_rule_cnt(pfvf) && !(changed & NETIF_F_HW_TC)) { in otx2_handle_ntuple_tc_features()
1994 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
2006 dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, in otx2_dma_map_skb_frag() argument
2031 return otx2_dma_map_page(pfvf, page, offset, *len, dir); in otx2_dma_map_skb_frag()
2034 void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) in otx2_dma_unmap_skb_frags() argument
2045 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], in otx2_dma_unmap_skb_frags()