On Fri, Apr 17, 2020 at 02:49:11PM -0700, Scott Branden wrote:
+static int bcm_vk_dma_alloc(struct device *dev,
struct bcm_vk_dma *dma,
int direction,
struct _vk_data *vkdata)
+{
- dma_addr_t addr, sg_addr;
- int err;
- int i;
- int offset;
- uint32_t size;
- uint32_t remaining_size;
- uint32_t transfer_size;
- uint64_t data;
- unsigned long first, last;
- struct _vk_data *sgdata;
- /* Get 64-bit user address */
- data = get_unaligned(&(vkdata->address));
Extra parens.
removed
- /* offset into first page */
- offset = offset_in_page(data);
- /* Calculate number of pages */
- first = (data & PAGE_MASK) >> PAGE_SHIFT;
- last = ((data + vkdata->size - 1) & PAGE_MASK) >> PAGE_SHIFT;
- dma->nr_pages = last - first + 1;
- /* Allocate DMA pages */
- dma->pages = kmalloc_array(dma->nr_pages,
sizeof(struct page *),
GFP_KERNEL);
- if (dma->pages == NULL)
return -ENOMEM;
- dev_dbg(dev, "Alloc DMA Pages [0x%llx+0x%x => %d pages]\n",
data, vkdata->size, dma->nr_pages);
- dma->direction = direction;
- /* Get user pages into memory */
- err = get_user_pages_fast(data & PAGE_MASK,
dma->nr_pages,
direction == DMA_FROM_DEVICE,
dma->pages);
- if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
dev_err(dev, "get_user_pages_fast, err=%d [%d]\n",
err, dma->nr_pages);
return err < 0 ? err : -EINVAL;
- }
- /* Max size of sg list is 1 per mapped page + fields at start */
- dma->sglen = (dma->nr_pages * sizeof(*sgdata)) +
(sizeof(uint32_t) * SGLIST_VKDATA_START);
- /* Allocate sglist */
- dma->sglist = dma_alloc_coherent(dev,
dma->sglen,
&dma->handle,
GFP_KERNEL);
dma->sglist = dma_alloc_coherent(dev, dma->sglen, &dma->handle, GFP_KERNEL);
done
- if (!dma->sglist)
return -ENOMEM;
No cleanup?
what needs to be cleaned up?
dma->pages should be freed probably? And a put_user_pages_fast()?
regards, dan carpenter
On Sat, Apr 18, 2020 at 02:45:16PM +0300, Dan Carpenter wrote:
On Fri, Apr 17, 2020 at 02:49:11PM -0700, Scott Branden wrote:
+static int bcm_vk_dma_alloc(struct device *dev,
struct bcm_vk_dma *dma,
int direction,
struct _vk_data *vkdata)
+{
- dma_addr_t addr, sg_addr;
- int err;
- int i;
- int offset;
- uint32_t size;
- uint32_t remaining_size;
- uint32_t transfer_size;
- uint64_t data;
- unsigned long first, last;
- struct _vk_data *sgdata;
- /* Get 64-bit user address */
- data = get_unaligned(&(vkdata->address));
Extra parens.
removed
- /* offset into first page */
- offset = offset_in_page(data);
- /* Calculate number of pages */
- first = (data & PAGE_MASK) >> PAGE_SHIFT;
- last = ((data + vkdata->size - 1) & PAGE_MASK) >> PAGE_SHIFT;
- dma->nr_pages = last - first + 1;
- /* Allocate DMA pages */
- dma->pages = kmalloc_array(dma->nr_pages,
sizeof(struct page *),
GFP_KERNEL);
- if (dma->pages == NULL)
return -ENOMEM;
- dev_dbg(dev, "Alloc DMA Pages [0x%llx+0x%x => %d pages]\n",
data, vkdata->size, dma->nr_pages);
- dma->direction = direction;
- /* Get user pages into memory */
- err = get_user_pages_fast(data & PAGE_MASK,
dma->nr_pages,
direction == DMA_FROM_DEVICE,
dma->pages);
- if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
dev_err(dev, "get_user_pages_fast, err=%d [%d]\n",
err, dma->nr_pages);
return err < 0 ? err : -EINVAL;
- }
- /* Max size of sg list is 1 per mapped page + fields at start */
- dma->sglen = (dma->nr_pages * sizeof(*sgdata)) +
(sizeof(uint32_t) * SGLIST_VKDATA_START);
- /* Allocate sglist */
- dma->sglist = dma_alloc_coherent(dev,
dma->sglen,
&dma->handle,
GFP_KERNEL);
dma->sglist = dma_alloc_coherent(dev, dma->sglen, &dma->handle, GFP_KERNEL);
done
- if (!dma->sglist)
return -ENOMEM;
No cleanup?
what needs to be cleaned up?
dma->pages should be freed probably? And a put_user_pages_fast()?
Sorry put_user_pages_fast() isn't a function. My bad.
regards, dan carpenter
Thanks Dan, I'll send out new version as soon as my other patch (you had requested for test_fx mutex cleanups) https://lore.kernel.org/linux-arm-msm/20200415002517.4328-1-scott.branden@br...
hits the linux-next tree so this patch series will apply cleanly to linux-next.
On 2020-04-18 4:47 a.m., Dan Carpenter wrote:
On Sat, Apr 18, 2020 at 02:45:16PM +0300, Dan Carpenter wrote:
On Fri, Apr 17, 2020 at 02:49:11PM -0700, Scott Branden wrote:
+static int bcm_vk_dma_alloc(struct device *dev,
struct bcm_vk_dma *dma,
int direction,
struct _vk_data *vkdata)
+{
- dma_addr_t addr, sg_addr;
- int err;
- int i;
- int offset;
- uint32_t size;
- uint32_t remaining_size;
- uint32_t transfer_size;
- uint64_t data;
- unsigned long first, last;
- struct _vk_data *sgdata;
- /* Get 64-bit user address */
- data = get_unaligned(&(vkdata->address));
Extra parens.
removed
- /* offset into first page */
- offset = offset_in_page(data);
- /* Calculate number of pages */
- first = (data & PAGE_MASK) >> PAGE_SHIFT;
- last = ((data + vkdata->size - 1) & PAGE_MASK) >> PAGE_SHIFT;
- dma->nr_pages = last - first + 1;
- /* Allocate DMA pages */
- dma->pages = kmalloc_array(dma->nr_pages,
sizeof(struct page *),
GFP_KERNEL);
- if (dma->pages == NULL)
return -ENOMEM;
- dev_dbg(dev, "Alloc DMA Pages [0x%llx+0x%x => %d pages]\n",
data, vkdata->size, dma->nr_pages);
- dma->direction = direction;
- /* Get user pages into memory */
- err = get_user_pages_fast(data & PAGE_MASK,
dma->nr_pages,
direction == DMA_FROM_DEVICE,
dma->pages);
- if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
dev_err(dev, "get_user_pages_fast, err=%d [%d]\n",
err, dma->nr_pages);
return err < 0 ? err : -EINVAL;
- }
- /* Max size of sg list is 1 per mapped page + fields at start */
- dma->sglen = (dma->nr_pages * sizeof(*sgdata)) +
(sizeof(uint32_t) * SGLIST_VKDATA_START);
- /* Allocate sglist */
- dma->sglist = dma_alloc_coherent(dev,
dma->sglen,
&dma->handle,
GFP_KERNEL);
dma->sglist = dma_alloc_coherent(dev, dma->sglen, &dma->handle, GFP_KERNEL);
done
- if (!dma->sglist)
return -ENOMEM;
No cleanup?
what needs to be cleaned up?
dma->pages should be freed probably? And a put_user_pages_fast()?
Sorry put_user_pages_fast() isn't a function. My bad.
regards, dan carpenter
linux-kselftest-mirror@lists.linaro.org