版权声明:本文为CSDN博主「何小龙」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/hexiaolong2009/article/details/102596845
1. 前言
在前面的 dma-buf 系列文章中,exporter 所分配的内存都是通过 kzalloc() 来分配的。本篇我们换个方式,使用 alloc_page()
来分配内存。
对比
与之前的 kzalloc 方式相比,alloc_page
方式的主要区别如下:
dma_buf_ops | kzalloc 方式 | alloc_page 方式 |
---|---|---|
map_dma_buf | dma_map_single() | dma_map_page() |
unmap_dma_buf | dma_unmap_single() | dma_unmap_page() |
begin_cpu_access | dma_sync_single_for_cpu() | dma_sync_sg_for_cpu() |
end_cpu_access | dma_sync_single_for_device() | dma_sync_sg_for_device() |
kmap | return dmabuf->priv; | kmap() |
kmap_atomic | return dmabuf->priv; | kmap_atomic() |
vmap | return dmabuf->priv; | vmap() |
release | kfree() | put_page() |
2. 示例
2.1 exporter 驱动
结合前面几篇文章的示例代码,将 dma_buf_ops
全部替换成 page 方式。
exporter-page.c
#include <linux/dma-buf.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>
static struct dma_buf *dmabuf_exported;
static int exporter_attach(struct dma_buf *dmabuf, struct device *dev,
struct dma_buf_attachment *attachment)
{
pr_info("dmabuf attach device: %s\n", dev_name(dev));
return 0;
}
static void exporter_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
{
pr_info("dmabuf detach device: %s\n", dev_name(attachment->dev));
}
static struct sg_table *exporter_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
struct page *page = attachment->dmabuf->priv;
struct sg_table *table;
table = kmalloc(sizeof(*table), GFP_KERNEL);
sg_alloc_table(table, 1, GFP_KERNEL);
sg_set_page(table->sgl, page, PAGE_SIZE, 0);
sg_dma_address(table->sgl) = dma_map_page(NULL, page, 0, PAGE_SIZE, dir);
return table;
}
static void exporter_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *table,
enum dma_data_direction dir)
{
dma_unmap_page(NULL, sg_dma_address(table->sgl), PAGE_SIZE, dir);
sg_free_table(table);
kfree(table);
}
static void exporter_release(struct dma_buf *dma_buf)
{
struct page *page = dma_buf->priv;
pr_info("dmabuf release\n");
put_page(page);
}
static void *exporter_vmap(struct dma_buf *dma_buf)
{
struct page *page = dma_buf->priv;
return vmap(&page, 1, 0, PAGE_KERNEL);
}
static void exporter_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
vunmap(vaddr);
}
static void *exporter_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
{
struct page *page = dma_buf->priv;
return kmap_atomic(page);
}
static void exporter_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
{
kunmap_atomic(addr);
}
static void *exporter_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
struct page *page = dma_buf->priv;
return kmap(page);
}
static void exporter_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
{
struct page *page = dma_buf->priv;
return kunmap(page);
}
static int exporter_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
struct page *page = dma_buf->priv;
return remap_pfn_range(vma, vma->vm_start, page_to_pfn(page),
PAGE_SIZE, vma->vm_page_prot);
}
static int exporter_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction dir)
{
struct dma_buf_attachment *attachment;
struct sg_table *table;
if (list_empty(&dmabuf->attachments))
return 0;
attachment = list_first_entry(&dmabuf->attachments, struct dma_buf_attachment, node);
table = attachment->priv;
dma_sync_sg_for_cpu(NULL, table->sgl, 1, dir);
return 0;
}
static int exporter_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction dir)
{
struct dma_buf_attachment *attachment;
struct sg_table *table;
if (list_empty(&dmabuf->attachments))
return 0;
attachment = list_first_entry(&dmabuf->attachments, struct dma_buf_attachment, node);
table = attachment->priv;
dma_sync_sg_for_device(NULL, table->sgl, 1, dir);
return 0;
}
static const struct dma_buf_ops exp_dmabuf_ops = {
.attach = exporter_attach,
.detach = exporter_detach,
.map_dma_buf = exporter_map_dma_buf,
.unmap_dma_buf = exporter_unmap_dma_buf,
.release = exporter_release,
.map = exporter_kmap,
.unmap = exporter_kunmap,
.map_atomic = exporter_kmap_atomic,
.unmap_atomic = exporter_kunmap_atomic,
.mmap = exporter_mmap,
.vmap = exporter_vmap,
.vunmap = exporter_vunmap,
.begin_cpu_access = exporter_begin_cpu_access,
.end_cpu_access = exporter_end_cpu_access,
};
static struct dma_buf *exporter_alloc_page(void)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct dma_buf *dmabuf;
struct page *page;
page = alloc_page(GFP_KERNEL);
exp_info.ops = &exp_dmabuf_ops;
exp_info.size = PAGE_SIZE;
exp_info.flags = O_CLOEXEC;
exp_info.priv = page;
dmabuf = dma_buf_export(&exp_info);
sprintf(page_address(page), "hello world!");
return dmabuf;
}
static long exporter_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int fd = dma_buf_fd(dmabuf_exported, O_CLOEXEC);
if (copy_to_user((int __user *)arg, &fd, sizeof(fd)))
return -EFAULT;
return 0;
}
static struct file_operations exporter_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = exporter_ioctl,
};
static struct miscdevice mdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "exporter",
.fops = &exporter_fops,
};
static int __init exporter_init(void)
{
dmabuf_exported = exporter_alloc_page();
return misc_register(&mdev);
}
static void __exit exporter_exit(void)
{
misc_deregister(&mdev);
}
module_init(exporter_init);
module_exit(exporter_exit);
2.2 importer 驱动
将前几篇的 importer-kmap.c 和 importer-sg.c 合二为一,如下:
importer-page.c
#include <linux/dma-buf.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
static int importer_test1(struct dma_buf *dmabuf)
{
void *vaddr;
vaddr = dma_buf_kmap(dmabuf, 0);
pr_info("read from dmabuf kmap: %s\n", (char *)vaddr);
dma_buf_kunmap(dmabuf, 0, vaddr);
vaddr = dma_buf_vmap(dmabuf);
pr_info("read from dmabuf vmap: %s\n", (char *)vaddr);
dma_buf_vunmap(dmabuf, vaddr);
return 0;
}
static int importer_test2(struct dma_buf *dmabuf)
{
struct dma_buf_attachment *attachment;
struct sg_table *table;
struct device *dev;
unsigned int reg_addr, reg_size;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
dev_set_name(dev, "importer");
attachment = dma_buf_attach(dmabuf, dev);
table = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
reg_addr = sg_dma_address(table->sgl);
reg_size = sg_dma_len(table->sgl);
pr_info("reg_addr = 0x%08x, reg_size = 0x%08x\n", reg_addr, reg_size);
dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL);
dma_buf_detach(dmabuf, attachment);
return 0;
}
static long importer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int fd;
struct dma_buf *dmabuf;
if (copy_from_user(&fd, (void __user *)arg, sizeof(int)))
return -EFAULT;
dmabuf = dma_buf_get(fd);
importer_test1(dmabuf);
importer_test2(dmabuf);
dma_buf_put(dmabuf);
return 0;
}
static struct file_operations importer_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = importer_ioctl,
};
static struct miscdevice mdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "importer",
.fops = &importer_fops,
};
static int __init importer_init(void)
{
return misc_register(&mdev);
}
static void __exit importer_exit(void)
{
misc_deregister(&mdev);
}
module_init(importer_init);
module_exit(importer_exit);
3. 运行
在 my-qemu 仿真环境中执行如下命令:
# insmod /lib/modules/4.14.143/kernel/drivers/dma-buf/exporter-page.ko
# insmod /lib/modules/4.14.143/kernel/drivers/dma-buf/importer-page.ko
将看到如下打印结果:
read from dmabuf kmap: hello world!
read from dmabuf vmap: hello world!
dmabuf attach device: importer
reg_addr = 0x7f6ee000, reg_size = 0x00001000
dmabuf detach device: importer
4. 结语
其实本篇的真实目的是为下篇《dma-buf 由浅入深 —— ION 简化版》做铺垫的,通过本篇能够对 page 的相关操作有个印象,这样才方便下一篇的理解。好了,我们赶紧去看下一篇吧!
评论 (0)