Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 297198
b: refs/heads/master
c: fc13020
h: refs/heads/master
v: v3
  • Loading branch information
Daniel Vetter authored and Sumit Semwal committed Mar 26, 2012
1 parent 1355702 commit da84dfe
Show file tree
Hide file tree
Showing 3 changed files with 183 additions and 2 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6b607e3a658fee490bdabfdeb739a3eb498b1bff
refs/heads/master: fc13020e086bfedf2afb95c91c026d5af1f80107
124 changes: 123 additions & 1 deletion trunk/drivers/base/dma-buf.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,9 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
if (WARN_ON(!priv || !ops
|| !ops->map_dma_buf
|| !ops->unmap_dma_buf
|| !ops->release)) {
|| !ops->release
|| !ops->kmap_atomic
|| !ops->kmap)) {
return ERR_PTR(-EINVAL);
}

Expand Down Expand Up @@ -284,3 +286,123 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
direction);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);


/**
* dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
* cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
* preparations. Coherency is only guaranteed in the specified range for the
* specified access direction.
* @dma_buf: [in] buffer to prepare cpu access for.
* @start: [in] start of range for cpu access.
* @len: [in] length of range for cpu access.
* @direction: [in] length of range for cpu access.
*
* Can return negative error values, returns 0 on success.
*/
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
enum dma_data_direction direction)
{
int ret = 0;

if (WARN_ON(!dmabuf))
return -EINVAL;

if (dmabuf->ops->begin_cpu_access)
ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction);

return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);

/**
* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
* cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
* actions. Coherency is only guaranteed in the specified range for the
* specified access direction.
* @dma_buf: [in] buffer to complete cpu access for.
* @start: [in] start of range for cpu access.
* @len: [in] length of range for cpu access.
* @direction: [in] length of range for cpu access.
*
* This call must always succeed.
*/
void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
enum dma_data_direction direction)
{
WARN_ON(!dmabuf);

if (dmabuf->ops->end_cpu_access)
dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);

/**
* dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
* space. The same restrictions as for kmap_atomic and friends apply.
* @dma_buf: [in] buffer to map page from.
* @page_num: [in] page in PAGE_SIZE units to map.
*
* This call must always succeed, any necessary preparations that might fail
* need to be done in begin_cpu_access.
*/
void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
{
WARN_ON(!dmabuf);

return dmabuf->ops->kmap_atomic(dmabuf, page_num);
}
EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);

/**
* dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
* @dma_buf: [in] buffer to unmap page from.
* @page_num: [in] page in PAGE_SIZE units to unmap.
* @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
*
* This call must always succeed.
*/
void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
void *vaddr)
{
WARN_ON(!dmabuf);

if (dmabuf->ops->kunmap_atomic)
dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
}
EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);

/**
* dma_buf_kmap - Map a page of the buffer object into kernel address space. The
* same restrictions as for kmap and friends apply.
* @dma_buf: [in] buffer to map page from.
* @page_num: [in] page in PAGE_SIZE units to map.
*
* This call must always succeed, any necessary preparations that might fail
* need to be done in begin_cpu_access.
*/
void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
{
WARN_ON(!dmabuf);

return dmabuf->ops->kmap(dmabuf, page_num);
}
EXPORT_SYMBOL_GPL(dma_buf_kmap);

/**
* dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
* @dma_buf: [in] buffer to unmap page from.
* @page_num: [in] page in PAGE_SIZE units to unmap.
* @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
*
* This call must always succeed.
*/
void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
void *vaddr)
{
WARN_ON(!dmabuf);

if (dmabuf->ops->kunmap)
dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
}
EXPORT_SYMBOL_GPL(dma_buf_kunmap);
59 changes: 59 additions & 0 deletions trunk/include/linux/dma-buf.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,17 @@ struct dma_buf_attachment;
* @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
* pages.
* @release: release this buffer; to be called after the last dma_buf_put.
* @begin_cpu_access: [optional] called before cpu access to invalidate cpu
* caches and allocate backing storage (if not yet done)
* respectively pin the objet into memory.
* @end_cpu_access: [optional] called after cpu access to flush cashes.
* @kmap_atomic: maps a page from the buffer into kernel address
* space, users may not block until the subsequent unmap call.
* This callback must not sleep.
* @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
* This Callback must not sleep.
* @kmap: maps a page from the buffer into kernel address space.
* @kunmap: [optional] unmaps a page from the buffer.
*/
struct dma_buf_ops {
int (*attach)(struct dma_buf *, struct device *,
Expand All @@ -73,6 +84,14 @@ struct dma_buf_ops {
/* after final dma_buf_put() */
void (*release)(struct dma_buf *);

int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
enum dma_data_direction);
void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
enum dma_data_direction);
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long);
void (*kunmap)(struct dma_buf *, unsigned long, void *);
};

/**
Expand Down Expand Up @@ -140,6 +159,14 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
enum dma_data_direction dir);
void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
enum dma_data_direction dir);
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
void *dma_buf_kmap(struct dma_buf *, unsigned long);
void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
#else

static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
Expand Down Expand Up @@ -188,6 +215,38 @@ static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
return;
}

static inline int dma_buf_begin_cpu_access(struct dma_buf *,
size_t, size_t,
enum dma_data_direction)
{
return -ENODEV;
}

static inline void dma_buf_end_cpu_access(struct dma_buf *,
size_t, size_t,
enum dma_data_direction)
{
}

static inline void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long)
{
return NULL;
}

static inline void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long,
void *)
{
}

static inline void *dma_buf_kmap(struct dma_buf *, unsigned long)
{
return NULL;
}

static inline void dma_buf_kunmap(struct dma_buf *, unsigned long,
void *)
{
}
#endif /* CONFIG_DMA_SHARED_BUFFER */

#endif /* __DMA_BUF_H__ */

0 comments on commit da84dfe

Please sign in to comment.