forward_packet(struct rte_mbuf *mbuf, struct dev_info *s_dev) { struct rte_ether_hdr *pkt_hdr; struct dev_info *dev; struct rte_mbuf *tbuf; int ret; /* Get the Ethernet header and find destination output */ pkt_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *); ret = rte_hash_lookup(Mac_output_map, &pkt_hdr->d_addr); /* Broadcast */ if(ret < 0) { TAILQ_FOREACH(dev, &Dev_list, dev_entry) { printf(" forward_packet \n " ); if(dev == s_dev) continue; if(dev->virtual) { if(unlikely(dev->state == DEVICE_CLOSING)) { continue; }
//不要执行rte_pktmbuf_clone,virtio_dev_rx_split这些函数会copy
rte_vhost_enqueue_burst(dev->id, VIRTIO_RXQ, &mbuf, 1); } else { tbuf = rte_pktmbuf_clone(mbuf, Mbuf_pool); ret = rte_eth_tx_burst(dev->id, 0, &tbuf, 1); if(unlikely(ret == 0)) rte_pktmbuf_free(tbuf); } } rte_pktmbuf_free(mbuf); return; } /* Unicast */ dev = Output_table[ret]; if(dev->virtual) { if(unlikely(dev->state != DEVICE_CLOSING)) rte_vhost_enqueue_burst(dev->id, VIRTIO_RXQ, &mbuf, 1); rte_pktmbuf_free(mbuf); } else { ret = rte_eth_tx_burst(dev->id, 0, &mbuf, 1); if(unlikely(ret == 0)) rte_pktmbuf_free(mbuf); } }
rte_vhost_enqueue_burst(int vid, uint16_t queue_id, struct rte_mbuf **pkts, uint16_t count) { struct virtio_net *dev = get_device(vid); if (!dev) return 0; if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) { RTE_LOG(ERR, VHOST_DATA, "(%d) %s: built-in vhost net backend is disabled.\n", dev->vid, __func__); return 0; } return virtio_dev_rx(dev, queue_id, pkts, count); }
virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count) { struct vhost_virtqueue *vq; uint32_t nb_tx = 0; VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", dev->vid, __func__, queue_id); return 0; } vq = dev->virtqueue[queue_id]; rte_spinlock_lock(&vq->access_lock); if (unlikely(vq->enabled == 0)) goto out_access_unlock; if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) vhost_user_iotlb_rd_lock(vq); if (unlikely(vq->access_ok == 0)) if (unlikely(vring_translate(dev, vq) < 0)) goto out; count = RTE_MIN((uint32_t)MAX_PKT_BURST, count); if (count == 0) goto out; if (vq_is_packed(dev)) nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count); else nb_tx = virtio_dev_rx_split(dev, vq, pkts, count); out: if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) vhost_user_iotlb_rd_unlock(vq); out_access_unlock: rte_spinlock_unlock(&vq->access_lock); return nb_tx; }
static __rte_noinline uint32_t virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mbuf **pkts, uint32_t count) { uint32_t pkt_idx = 0; uint32_t remained = count; do { rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]); if (remained >= PACKED_BATCH_SIZE) { if (!virtio_dev_rx_batch_packed(dev, vq, &pkts[pkt_idx])) { pkt_idx += PACKED_BATCH_SIZE; remained -= PACKED_BATCH_SIZE; continue; } } if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx])) break; pkt_idx++; remained--; } while (pkt_idx < count); if (vq->shadow_used_idx) { do_data_copy_enqueue(dev, vq); vhost_flush_enqueue_shadow_packed(dev, vq); } if (pkt_idx) vhost_vring_call_packed(dev, vq); return pkt_idx; }
static __rte_always_inline uint32_t virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mbuf **pkts, uint32_t count) { uint32_t pkt_idx = 0; uint16_t num_buffers; struct buf_vector buf_vec[BUF_VECTOR_MAX]; uint16_t avail_head; rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); avail_head = *((volatile uint16_t *)&vq->avail->idx); for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; uint16_t nr_vec = 0; /* 为拷贝当前mbuf后续预留avail desc */ if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec, &num_buffers, avail_head, &nr_vec) < 0)) { vq->shadow_used_idx -= num_buffers; break; } /* 拷贝mbuf到avail desc */ if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers) < 0) { vq->shadow_used_idx -= num_buffers; break; } /* 更新last_avail_idx */ vq->last_avail_idx += num_buffers; } /* 小包的批处理拷贝 */ do_data_copy_enqueue(dev, vq); if (likely(vq->shadow_used_idx)) { flush_shadow_used_ring_split(dev, vq); /* 更新used ring */ vhost_vring_call_split(dev, vq); /* 通知前端 */ } return pkt_idx; }
原文:https://www.cnblogs.com/dream397/p/14155582.html