143 lines
4.3 KiB
Diff
143 lines
4.3 KiB
Diff
From: Arthur Kiyanovski <akiyano@amazon.com>
|
|
Date: Thu, 11 Oct 2018 11:26:16 +0300
|
|
Subject: [PATCH 02/19] net: ena: minor performance improvement
|
|
Origin: 0e575f8542d1f4d74df30b5a9ba419c5373d01a1
|
|
|
|
Reduce fastpath overhead by making ena_com_tx_comp_req_id_get() inline.
|
|
Also move it to ena_eth_com.h file with its dependency function
|
|
ena_com_cq_inc_head().
|
|
|
|
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
|
|
Signed-off-by: David S. Miller <davem@davemloft.net>
|
|
---
|
|
drivers/net/ethernet/amazon/ena/ena_eth_com.c | 43 -----------------
|
|
drivers/net/ethernet/amazon/ena/ena_eth_com.h | 46 ++++++++++++++++++-
|
|
2 files changed, 44 insertions(+), 45 deletions(-)
|
|
|
|
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
|
|
index 2b3ff0c20155..9c0511e9f9a2 100644
|
|
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
|
|
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
|
|
@@ -59,15 +59,6 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
|
|
return cdesc;
|
|
}
|
|
|
|
-static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
|
|
-{
|
|
- io_cq->head++;
|
|
-
|
|
- /* Switch phase bit in case of wrap around */
|
|
- if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
|
|
- io_cq->phase ^= 1;
|
|
-}
|
|
-
|
|
static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
|
|
{
|
|
u16 tail_masked;
|
|
@@ -477,40 +468,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
|
|
return 0;
|
|
}
|
|
|
|
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
|
|
-{
|
|
- u8 expected_phase, cdesc_phase;
|
|
- struct ena_eth_io_tx_cdesc *cdesc;
|
|
- u16 masked_head;
|
|
-
|
|
- masked_head = io_cq->head & (io_cq->q_depth - 1);
|
|
- expected_phase = io_cq->phase;
|
|
-
|
|
- cdesc = (struct ena_eth_io_tx_cdesc *)
|
|
- ((uintptr_t)io_cq->cdesc_addr.virt_addr +
|
|
- (masked_head * io_cq->cdesc_entry_size_in_bytes));
|
|
-
|
|
- /* When the current completion descriptor phase isn't the same as the
|
|
- * expected, it mean that the device still didn't update
|
|
- * this completion.
|
|
- */
|
|
- cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
|
|
- if (cdesc_phase != expected_phase)
|
|
- return -EAGAIN;
|
|
-
|
|
- dma_rmb();
|
|
- if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
|
|
- pr_err("Invalid req id %d\n", cdesc->req_id);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- ena_com_cq_inc_head(io_cq);
|
|
-
|
|
- *req_id = READ_ONCE(cdesc->req_id);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
|
|
{
|
|
struct ena_eth_io_rx_cdesc_base *cdesc;
|
|
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
|
|
index 2f7657227cfe..4930324e9d8d 100644
|
|
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
|
|
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
|
|
@@ -86,8 +86,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
|
|
struct ena_com_buf *ena_buf,
|
|
u16 req_id);
|
|
|
|
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
|
|
-
|
|
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
|
|
|
|
static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
|
|
@@ -159,4 +157,48 @@ static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
|
|
io_sq->next_to_comp += elem;
|
|
}
|
|
|
|
+static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
|
|
+{
|
|
+ io_cq->head++;
|
|
+
|
|
+ /* Switch phase bit in case of wrap around */
|
|
+ if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
|
|
+ io_cq->phase ^= 1;
|
|
+}
|
|
+
|
|
+static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
|
|
+ u16 *req_id)
|
|
+{
|
|
+ u8 expected_phase, cdesc_phase;
|
|
+ struct ena_eth_io_tx_cdesc *cdesc;
|
|
+ u16 masked_head;
|
|
+
|
|
+ masked_head = io_cq->head & (io_cq->q_depth - 1);
|
|
+ expected_phase = io_cq->phase;
|
|
+
|
|
+ cdesc = (struct ena_eth_io_tx_cdesc *)
|
|
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
|
|
+ (masked_head * io_cq->cdesc_entry_size_in_bytes));
|
|
+
|
|
+ /* When the current completion descriptor phase isn't the same as the
|
|
+ * expected, it mean that the device still didn't update
|
|
+ * this completion.
|
|
+ */
|
|
+ cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
|
|
+ if (cdesc_phase != expected_phase)
|
|
+ return -EAGAIN;
|
|
+
|
|
+ dma_rmb();
|
|
+
|
|
+ *req_id = READ_ONCE(cdesc->req_id);
|
|
+ if (unlikely(*req_id >= io_cq->q_depth)) {
|
|
+ pr_err("Invalid req id %d\n", cdesc->req_id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ ena_com_cq_inc_head(io_cq);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
#endif /* ENA_ETH_COM_H_ */
|
|
--
|
|
2.19.2
|
|
|