Hi Alan,
On 08/25/2016 05:39 PM, Alan Ott wrote:
Change the receive buffers to be single-use only. This involves a few changes that work together:
- Change msk_newbuf() to not attempt to re-use or free a buffer,
- Change msk_rxeof() free the buffer when it is done with it,
- Store a temporary copy of the received data for passing to the Receive Queue,
- Call the required Flush() and Unmap() on the DMA buffer.
In addition this means failure to allocate a new buffer is a failure of msk_rxeof().
Note that this change makes the driver work the way the FreeBSD driver (from which this driver was derived) works, and this simply removes an optimization (the code in msk_newbuf() which re-uses the buffers. This removal of the optimization is done for two reasons:
- The optimization failed to work for 64-bit DMA transfers;
- The UEFI specification, version 2.6, section 13.4 requires calls to Flush() and Unmap() before reading a DMA write buffer from the CPU, which doesn't fit with the optimization as it existed.
Reverting back to the behavior as it was in the FreeBSD driver solves number 1 and 2 above, and makes this driver more consistent with something we know to be working. There is slightly more overhead, but it is more consistent with the UEFI standard.
Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Alan Ott alan@softiron.co.uk
Drivers/Net/MarvellYukonDxe/if_msk.c | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-)
diff --git a/Drivers/Net/MarvellYukonDxe/if_msk.c b/Drivers/Net/MarvellYukonDxe/if_msk.c index 7979bf3..573dea5 100644 --- a/Drivers/Net/MarvellYukonDxe/if_msk.c +++ b/Drivers/Net/MarvellYukonDxe/if_msk.c @@ -581,13 +581,6 @@ msk_newbuf ( rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
- if ((rxd->rx_m.Buf != NULL) && (rxd->rx_m.Length >= Length)) {
- return EFI_ALREADY_STARTED;
- } else if (rxd->rx_m.Buf != NULL) {
- mPciIo->Unmap (mPciIo, rxd->rx_m.DmaMapping);
- mPciIo->FreeBuffer (mPciIo, EFI_SIZE_TO_PAGES (rxd->rx_m.Length), rxd->rx_m.Buf);
- }
- Status = mPciIo->AllocateBuffer (mPciIo, AllocateAnyPages, EfiBootServicesData, EFI_SIZE_TO_PAGES (Length), &Buffer, 0); if (EFI_ERROR (Status)) { return Status;
@@ -1848,6 +1841,7 @@ msk_rxeof ( struct msk_rxdesc *rxd; INTN cons; INTN rxlen;
- MSK_DMA_BUF m;
DEBUG ((EFI_D_NET, "Marvell Yukon: rxeof\n")); @@ -1871,26 +1865,40 @@ msk_rxeof ( break; }
- m = rxd->rx_m; Status = msk_newbuf (sc_if, cons); if (EFI_ERROR (Status)) {
// This is a dropped packet, but we aren't counting drops // Reuse old buffer msk_discard_rxbuf (sc_if, cons);
break;
- }
- Status = mPciIo->Flush (mPciIo);
- if (EFI_ERROR (Status)) {
DEBUG ((EFI_D_ERROR, "Marvell Yukon: failed to Flush DMA\n"));
This is network related error message, please change the debug level to EFI_D_NET.
}
- Status = mPciIo->Unmap (mPciIo, m.DmaMapping);
- if (EFI_ERROR (Status)) {
DEBUG ((EFI_D_ERROR, "Marvell Yukon: failed to Unmap DMA\n"));
This is network related error message, please change the debug level to EFI_D_NET.
- }
Status = gBS->AllocatePool (EfiBootServicesData, sizeof (MSK_LINKED_DMA_BUF), (VOID**) &m_link); if (!EFI_ERROR (Status)) { gBS->SetMem (m_link, sizeof (MSK_LINKED_DMA_BUF), 0);
rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
m_link->Signature = RX_MBUF_SIGNATURE; Status = gBS->AllocatePool (EfiBootServicesData, len, (VOID**) &m_link->DmaBuf.Buf); if(!EFI_ERROR (Status)) {
gBS->CopyMem (m_link->DmaBuf.Buf, rxd->rx_m.Buf, len);
CopyMem (m_link->DmaBuf.Buf, m.Buf, len);
Please use gBS->CopyMem() instead of CopyMem(). It's needed for EBC code optimization.
m_link->DmaBuf.Length = len;
m_link->DmaBuf.DmaMapping = rxd->rx_m.DmaMapping;
m_link->DmaBuf.DmaMapping = m.DmaMapping;
mPciIo->FreeBuffer (mPciIo, EFI_SIZE_TO_PAGES (m.Length), m.Buf);
InsertTailList (&mSoftc->ReceiveQueueHead, &m_link->Link); } else {