From: Rob Clark rob@ti.com
Re-sending first patch, with a wider audience. Apparently I didn't spam enough inboxes the first time.
And, at Daniel Vetter's suggestion, adding some helper functions in dma-buf to get the most restrictive parameters of all the attached devices.
Rob Clark (2): device: add dma_params->max_segment_count dma-buf: add helpers for attacher dma-parms
drivers/base/dma-buf.c | 63 +++++++++++++++++++++++++++++++++++++++++++ include/linux/device.h | 1 + include/linux/dma-buf.h | 19 +++++++++++++ include/linux/dma-mapping.h | 16 +++++++++++ 4 files changed, 99 insertions(+)
From: Rob Clark rob@ti.com
For devices which have constraints about maximum number of segments in an sglist. For example, a device which could only deal with contiguous buffers would set max_segment_count to 1.
The initial motivation is for devices sharing buffers via dma-buf, to allow the buffer exporter to know the constraints of other devices which have attached to the buffer. The dma_mask and fields in 'struct device_dma_parameters' tell the exporter everything else that is needed, except whether the importer has constraints about maximum number of segments.
Signed-off-by: Rob Clark rob@ti.com --- include/linux/device.h | 1 + include/linux/dma-mapping.h | 16 ++++++++++++++++ 2 files changed, 17 insertions(+)
diff --git a/include/linux/device.h b/include/linux/device.h index 161d962..3813735 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -568,6 +568,7 @@ struct device_dma_parameters { * sg limitations. */ unsigned int max_segment_size; + unsigned int max_segment_count; /* zero for unlimited */ unsigned long segment_boundary_mask; };
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index dfc099e..f380f79 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -111,6 +111,22 @@ static inline unsigned int dma_set_max_seg_size(struct device *dev, return -EIO; }
+static inline unsigned int dma_get_max_seg_count(struct device *dev) +{ + return dev->dma_parms ? dev->dma_parms->max_segment_count : 0; +} + +static inline int dma_set_max_seg_count(struct device *dev, + unsigned int count) +{ + if (dev->dma_parms) { + dev->dma_parms->max_segment_count = count; + return 0; + } else + return -EIO; +} + + static inline unsigned long dma_get_seg_boundary(struct device *dev) { return dev->dma_parms ?
Hello,
On Thursday, July 19, 2012 6:24 PM Rob Clark wrote:
From: Rob Clark rob@ti.com
For devices which have constraints about maximum number of segments in an sglist. For example, a device which could only deal with contiguous buffers would set max_segment_count to 1.
The initial motivation is for devices sharing buffers via dma-buf, to allow the buffer exporter to know the constraints of other devices which have attached to the buffer. The dma_mask and fields in 'struct device_dma_parameters' tell the exporter everything else that is needed, except whether the importer has constraints about maximum number of segments.
Signed-off-by: Rob Clark rob@ti.com
Yea, it is a really good idea to add this to struct device_dma_parameters. We only need to initialize it to '1' in platform startup code for all devices relevant to buffer sharing.
Acked-by: Marek Szyprowski m.szyprowski@samsung.com
include/linux/device.h | 1 + include/linux/dma-mapping.h | 16 ++++++++++++++++ 2 files changed, 17 insertions(+)
diff --git a/include/linux/device.h b/include/linux/device.h index 161d962..3813735 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -568,6 +568,7 @@ struct device_dma_parameters { * sg limitations. */ unsigned int max_segment_size;
- unsigned int max_segment_count; /* zero for unlimited */ unsigned long segment_boundary_mask;
};
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index dfc099e..f380f79 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -111,6 +111,22 @@ static inline unsigned int dma_set_max_seg_size(struct device *dev, return -EIO; }
+static inline unsigned int dma_get_max_seg_count(struct device *dev) +{
- return dev->dma_parms ? dev->dma_parms->max_segment_count : 0;
+}
+static inline int dma_set_max_seg_count(struct device *dev,
unsigned int count)
+{
- if (dev->dma_parms) {
dev->dma_parms->max_segment_count = count;
return 0;
- } else
return -EIO;
+}
static inline unsigned long dma_get_seg_boundary(struct device *dev) { return dev->dma_parms ? -- 1.7.9.5
Best regards
From: Rob Clark rob@ti.com
Add some helpers to iterate through all attachers and get the most restrictive segment size/count/boundary.
Signed-off-by: Rob Clark rob@ti.com --- drivers/base/dma-buf.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/dma-buf.h | 19 ++++++++++++++ 2 files changed, 82 insertions(+)
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index 24e88fe..757ee20 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c @@ -192,6 +192,69 @@ void dma_buf_put(struct dma_buf *dmabuf) EXPORT_SYMBOL_GPL(dma_buf_put);
/** + * dma_buf_max_seg_size - helper for exporters to get the minimum of + * all attached device's max segment size + */ +unsigned int dma_buf_max_seg_size(struct dma_buf *dmabuf) +{ + struct dma_buf_attachment *attach; + unsigned int max = (unsigned int)-1; + + if (WARN_ON(!dmabuf)) + return 0; + + mutex_lock(&dmabuf->lock); + list_for_each_entry(attach, &dmabuf->attachments, node) + max = min(max, dma_get_max_seg_size(attach->dev)); + mutex_unlock(&dmabuf->lock); + + return max; +} +EXPORT_SYMBOL_GPL(dma_buf_max_seg_size); + +/** + * dma_buf_max_seg_count - helper for exporters to get the minimum of + * all attached device's max segment count + */ +unsigned int dma_buf_max_seg_count(struct dma_buf *dmabuf) +{ + struct dma_buf_attachment *attach; + unsigned int max = (unsigned int)-1; + + if (WARN_ON(!dmabuf)) + return 0; + + mutex_lock(&dmabuf->lock); + list_for_each_entry(attach, &dmabuf->attachments, node) + max = min(max, dma_get_max_seg_count(attach->dev)); + mutex_unlock(&dmabuf->lock); + + return max; +} +EXPORT_SYMBOL_GPL(dma_buf_max_seg_count); + +/** + * dma_buf_get_seg_boundary - helper for exporters to get the most + * restrictive segment alignment of all the attached devices + */ +unsigned int dma_buf_get_seg_boundary(struct dma_buf *dmabuf) +{ + struct dma_buf_attachment *attach; + unsigned int mask = (unsigned int)-1; + + if (WARN_ON(!dmabuf)) + return 0; + + mutex_lock(&dmabuf->lock); + list_for_each_entry(attach, &dmabuf->attachments, node) + mask &= dma_get_seg_boundary(attach->dev); + mutex_unlock(&dmabuf->lock); + + return mask; +} +EXPORT_SYMBOL_GPL(dma_buf_get_seg_boundary); + +/** * dma_buf_attach - Add the device to dma_buf's attachments list; optionally, * calls attach() of dma_buf_ops to allow device-specific attach functionality * @dmabuf: [in] buffer to attach device to. diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index eb48f38..9533b9b 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -167,6 +167,10 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags); struct dma_buf *dma_buf_get(int fd); void dma_buf_put(struct dma_buf *dmabuf);
+unsigned int dma_buf_max_seg_size(struct dma_buf *dmabuf); +unsigned int dma_buf_max_seg_count(struct dma_buf *dmabuf); +unsigned int dma_buf_get_seg_boundary(struct dma_buf *dmabuf); + struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, enum dma_data_direction); void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, @@ -220,6 +224,21 @@ static inline void dma_buf_put(struct dma_buf *dmabuf) return; }
+static inline unsigned int dma_buf_max_seg_size(struct dma_buf *dmabuf) +{ + return 0; +} + +static inline unsigned int dma_buf_max_seg_count(struct dma_buf *dmabuf) +{ + return 0; +} + +static inline unsigned int dma_buf_get_seg_boundary(struct dma_buf *dmabuf) +{ + return 0; +} + static inline struct sg_table *dma_buf_map_attachment( struct dma_buf_attachment *attach, enum dma_data_direction write) {
Fyi, Daniel Vetter had suggested on IRC that it would be cleaner to have a single helper fxn that most-restrictive union of all attached device's dma_parms. Really this should include dma_mask and coherent_dma_mask, I think. But that touches a lot of other places in the code. If no one objects to the cleanup of moving dma_mask/coherent_dma_mask into dma_parms, I'll do this first.
So anyways, don't consider this patch yet for inclusion, I'll make an updated one based on dma_parms..
BR, -R
On Thu, Jul 19, 2012 at 11:23 AM, Rob Clark rob.clark@linaro.org wrote:
From: Rob Clark rob@ti.com
Add some helpers to iterate through all attachers and get the most restrictive segment size/count/boundary.
Signed-off-by: Rob Clark rob@ti.com
drivers/base/dma-buf.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/dma-buf.h | 19 ++++++++++++++ 2 files changed, 82 insertions(+)
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index 24e88fe..757ee20 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c @@ -192,6 +192,69 @@ void dma_buf_put(struct dma_buf *dmabuf) EXPORT_SYMBOL_GPL(dma_buf_put);
/**
- dma_buf_max_seg_size - helper for exporters to get the minimum of
- all attached device's max segment size
- */
+unsigned int dma_buf_max_seg_size(struct dma_buf *dmabuf) +{
struct dma_buf_attachment *attach;
unsigned int max = (unsigned int)-1;
if (WARN_ON(!dmabuf))
return 0;
mutex_lock(&dmabuf->lock);
list_for_each_entry(attach, &dmabuf->attachments, node)
max = min(max, dma_get_max_seg_size(attach->dev));
mutex_unlock(&dmabuf->lock);
return max;
+} +EXPORT_SYMBOL_GPL(dma_buf_max_seg_size);
+/**
- dma_buf_max_seg_count - helper for exporters to get the minimum of
- all attached device's max segment count
- */
+unsigned int dma_buf_max_seg_count(struct dma_buf *dmabuf) +{
struct dma_buf_attachment *attach;
unsigned int max = (unsigned int)-1;
if (WARN_ON(!dmabuf))
return 0;
mutex_lock(&dmabuf->lock);
list_for_each_entry(attach, &dmabuf->attachments, node)
max = min(max, dma_get_max_seg_count(attach->dev));
mutex_unlock(&dmabuf->lock);
return max;
+} +EXPORT_SYMBOL_GPL(dma_buf_max_seg_count);
+/**
- dma_buf_get_seg_boundary - helper for exporters to get the most
- restrictive segment alignment of all the attached devices
- */
+unsigned int dma_buf_get_seg_boundary(struct dma_buf *dmabuf) +{
struct dma_buf_attachment *attach;
unsigned int mask = (unsigned int)-1;
if (WARN_ON(!dmabuf))
return 0;
mutex_lock(&dmabuf->lock);
list_for_each_entry(attach, &dmabuf->attachments, node)
mask &= dma_get_seg_boundary(attach->dev);
mutex_unlock(&dmabuf->lock);
return mask;
+} +EXPORT_SYMBOL_GPL(dma_buf_get_seg_boundary);
+/**
- dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
- calls attach() of dma_buf_ops to allow device-specific attach functionality
- @dmabuf: [in] buffer to attach device to.
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index eb48f38..9533b9b 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -167,6 +167,10 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags); struct dma_buf *dma_buf_get(int fd); void dma_buf_put(struct dma_buf *dmabuf);
+unsigned int dma_buf_max_seg_size(struct dma_buf *dmabuf); +unsigned int dma_buf_max_seg_count(struct dma_buf *dmabuf); +unsigned int dma_buf_get_seg_boundary(struct dma_buf *dmabuf);
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, enum dma_data_direction); void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, @@ -220,6 +224,21 @@ static inline void dma_buf_put(struct dma_buf *dmabuf) return; }
+static inline unsigned int dma_buf_max_seg_size(struct dma_buf *dmabuf) +{
return 0;
+}
+static inline unsigned int dma_buf_max_seg_count(struct dma_buf *dmabuf) +{
return 0;
+}
+static inline unsigned int dma_buf_get_seg_boundary(struct dma_buf *dmabuf) +{
return 0;
+}
static inline struct sg_table *dma_buf_map_attachment( struct dma_buf_attachment *attach, enum dma_data_direction write) { -- 1.7.9.5
On Fri, Jul 20, 2012 at 10:09 PM, Rob Clark rob.clark@linaro.org wrote:
Fyi, Daniel Vetter had suggested on IRC that it would be cleaner to have a single helper fxn that most-restrictive union of all attached device's dma_parms. Really this should include dma_mask and coherent_dma_mask, I think. But that touches a lot of other places in the code. If no one objects to the cleanup of moving dma_mask/coherent_dma_mask into dma_parms, I'll do this first.
So anyways, don't consider this patch yet for inclusion, I'll make an updated one based on dma_parms..
Hi Rob, Any news on this patch-set?
BR, -R
BR, ~Sumit.
On Thu, Jul 19, 2012 at 11:23 AM, Rob Clark rob.clark@linaro.org wrote:
From: Rob Clark rob@ti.com
Add some helpers to iterate through all attachers and get the most restrictive segment size/count/boundary.
Signed-off-by: Rob Clark rob@ti.com
drivers/base/dma-buf.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/dma-buf.h | 19 ++++++++++++++ 2 files changed, 82 insertions(+)
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index 24e88fe..757ee20 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c @@ -192,6 +192,69 @@ void dma_buf_put(struct dma_buf *dmabuf) EXPORT_SYMBOL_GPL(dma_buf_put);
/**
- dma_buf_max_seg_size - helper for exporters to get the minimum of
- all attached device's max segment size
- */
+unsigned int dma_buf_max_seg_size(struct dma_buf *dmabuf) +{
struct dma_buf_attachment *attach;
unsigned int max = (unsigned int)-1;
if (WARN_ON(!dmabuf))
return 0;
mutex_lock(&dmabuf->lock);
list_for_each_entry(attach, &dmabuf->attachments, node)
max = min(max, dma_get_max_seg_size(attach->dev));
mutex_unlock(&dmabuf->lock);
return max;
+} +EXPORT_SYMBOL_GPL(dma_buf_max_seg_size);
+/**
- dma_buf_max_seg_count - helper for exporters to get the minimum of
- all attached device's max segment count
- */
+unsigned int dma_buf_max_seg_count(struct dma_buf *dmabuf) +{
struct dma_buf_attachment *attach;
unsigned int max = (unsigned int)-1;
if (WARN_ON(!dmabuf))
return 0;
mutex_lock(&dmabuf->lock);
list_for_each_entry(attach, &dmabuf->attachments, node)
max = min(max, dma_get_max_seg_count(attach->dev));
mutex_unlock(&dmabuf->lock);
return max;
+} +EXPORT_SYMBOL_GPL(dma_buf_max_seg_count);
+/**
- dma_buf_get_seg_boundary - helper for exporters to get the most
- restrictive segment alignment of all the attached devices
- */
+unsigned int dma_buf_get_seg_boundary(struct dma_buf *dmabuf) +{
struct dma_buf_attachment *attach;
unsigned int mask = (unsigned int)-1;
if (WARN_ON(!dmabuf))
return 0;
mutex_lock(&dmabuf->lock);
list_for_each_entry(attach, &dmabuf->attachments, node)
mask &= dma_get_seg_boundary(attach->dev);
mutex_unlock(&dmabuf->lock);
return mask;
+} +EXPORT_SYMBOL_GPL(dma_buf_get_seg_boundary);
+/**
- dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
- calls attach() of dma_buf_ops to allow device-specific attach functionality
- @dmabuf: [in] buffer to attach device to.
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index eb48f38..9533b9b 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -167,6 +167,10 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags); struct dma_buf *dma_buf_get(int fd); void dma_buf_put(struct dma_buf *dmabuf);
+unsigned int dma_buf_max_seg_size(struct dma_buf *dmabuf); +unsigned int dma_buf_max_seg_count(struct dma_buf *dmabuf); +unsigned int dma_buf_get_seg_boundary(struct dma_buf *dmabuf);
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, enum dma_data_direction); void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, @@ -220,6 +224,21 @@ static inline void dma_buf_put(struct dma_buf *dmabuf) return; }
+static inline unsigned int dma_buf_max_seg_size(struct dma_buf *dmabuf) +{
return 0;
+}
+static inline unsigned int dma_buf_max_seg_count(struct dma_buf *dmabuf) +{
return 0;
+}
+static inline unsigned int dma_buf_get_seg_boundary(struct dma_buf *dmabuf) +{
return 0;
+}
static inline struct sg_table *dma_buf_map_attachment( struct dma_buf_attachment *attach, enum dma_data_direction write) { -- 1.7.9.5
Hi Rob,
On 07/19/2012 06:23 PM, Rob Clark wrote:
From: Rob Clark rob@ti.com
Add some helpers to iterate through all attachers and get the most restrictive segment size/count/boundary.
Signed-off-by: Rob Clark rob@ti.com
drivers/base/dma-buf.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/dma-buf.h | 19 ++++++++++++++ 2 files changed, 82 insertions(+)
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index 24e88fe..757ee20 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c @@ -192,6 +192,69 @@ void dma_buf_put(struct dma_buf *dmabuf) EXPORT_SYMBOL_GPL(dma_buf_put); /**
- dma_buf_max_seg_size - helper for exporters to get the minimum of
- all attached device's max segment size
- */
+unsigned int dma_buf_max_seg_size(struct dma_buf *dmabuf) +{
- struct dma_buf_attachment *attach;
- unsigned int max = (unsigned int)-1;
- if (WARN_ON(!dmabuf))
return 0;
Maybe you should change return type to 'int' and return -EINVAL here?
- mutex_lock(&dmabuf->lock);
- list_for_each_entry(attach, &dmabuf->attachments, node)
max = min(max, dma_get_max_seg_size(attach->dev));
- mutex_unlock(&dmabuf->lock);
- return max;
+} +EXPORT_SYMBOL_GPL(dma_buf_max_seg_size);
+/**
- dma_buf_max_seg_count - helper for exporters to get the minimum of
- all attached device's max segment count
- */
+unsigned int dma_buf_max_seg_count(struct dma_buf *dmabuf) +{
- struct dma_buf_attachment *attach;
- unsigned int max = (unsigned int)-1;
- if (WARN_ON(!dmabuf))
return 0;
maybe return -EINVAL here?
- mutex_lock(&dmabuf->lock);
- list_for_each_entry(attach, &dmabuf->attachments, node)
max = min(max, dma_get_max_seg_count(attach->dev));
I think that there is a bug here. Assume that there are two deices on the list, one using unlimited number of segments (value 0), the second one needs a contiguous buffer (value 1). The result of the function is 0 = min(0, 2).
The return value 0 indicates that the unlimited number of sg segments is accepted what is *wrong* because the correct value should be 1.
I recommend to change the semantics for unlimited number of segments from 'value 0' to:
#define DMA_SEGMENTS_COUNT_UNLIMITED ((unsigned long)INT_MAX)
Using INT_MAX will allow using safe conversions between signed and unsigned integers.
- mutex_unlock(&dmabuf->lock);
- return max;
+} +EXPORT_SYMBOL_GPL(dma_buf_max_seg_count);
+/**
- dma_buf_get_seg_boundary - helper for exporters to get the most
- restrictive segment alignment of all the attached devices
- */
+unsigned int dma_buf_get_seg_boundary(struct dma_buf *dmabuf) +{
- struct dma_buf_attachment *attach;
- unsigned int mask = (unsigned int)-1;
- if (WARN_ON(!dmabuf))
return 0;
- mutex_lock(&dmabuf->lock);
- list_for_each_entry(attach, &dmabuf->attachments, node)
mask &= dma_get_seg_boundary(attach->dev);
- mutex_unlock(&dmabuf->lock);
- return mask;
+} +EXPORT_SYMBOL_GPL(dma_buf_get_seg_boundary);
+/**
- dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
- calls attach() of dma_buf_ops to allow device-specific attach functionality
- @dmabuf: [in] buffer to attach device to.
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index eb48f38..9533b9b 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -167,6 +167,10 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags); struct dma_buf *dma_buf_get(int fd); void dma_buf_put(struct dma_buf *dmabuf); +unsigned int dma_buf_max_seg_size(struct dma_buf *dmabuf); +unsigned int dma_buf_max_seg_count(struct dma_buf *dmabuf); +unsigned int dma_buf_get_seg_boundary(struct dma_buf *dmabuf);
Instead of adding an army of new handlers you could provide a single helper:
int dma_buf_get_parameters(struct dma_buf *dmabuf, struct device_dma_parameters *params);
This function will fill *params with lowest common DMA requirements for all devices on attachment list. Return value can be used to diagnose errors like incorrectly initialized dma_buf pointer (like no attachments on an attachment list).
Moreover, there will be no need to add a new handler every time device_dma_parameters is extended.
Regards, Tomasz Stanislawski
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, enum dma_data_direction); void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, @@ -220,6 +224,21 @@ static inline void dma_buf_put(struct dma_buf *dmabuf) return; } +static inline unsigned int dma_buf_max_seg_size(struct dma_buf *dmabuf) +{
- return 0;
+}
+static inline unsigned int dma_buf_max_seg_count(struct dma_buf *dmabuf) +{
- return 0;
+}
+static inline unsigned int dma_buf_get_seg_boundary(struct dma_buf *dmabuf) +{
- return 0;
+}
static inline struct sg_table *dma_buf_map_attachment( struct dma_buf_attachment *attach, enum dma_data_direction write) {
Tomasz Stanislawski t.stanislaws@samsung.com writes:
I recommend to change the semantics for unlimited number of segments from 'value 0' to:
#define DMA_SEGMENTS_COUNT_UNLIMITED ((unsigned long)INT_MAX)
Using INT_MAX will allow using safe conversions between signed and unsigned integers.
LONG_MAX seems cleaner regardless.
On 08/06/2012 01:58 PM, Michal Nazarewicz wrote:
Tomasz Stanislawski t.stanislaws@samsung.com writes:
I recommend to change the semantics for unlimited number of segments from 'value 0' to:
#define DMA_SEGMENTS_COUNT_UNLIMITED ((unsigned long)INT_MAX)
Sorry. It should be: #define DMA_SEGMENTS_COUNT_UNLIMITED ((unsigned int)INT_MAX)
Using INT_MAX will allow using safe conversions between signed and unsigned integers.
LONG_MAX seems cleaner regardless.
linaro-mm-sig@lists.linaro.org