Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 | // SPDX-License-Identifier: GPL-2.0 /* * DMABUF CMA heap exporter * * Copyright (C) 2012, 2019 Linaro Ltd. * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. */ #include <linux/cma.h> #include <linux/device.h> #include <linux/dma-buf.h> #include <linux/dma-heap.h> #include <linux/dma-contiguous.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/highmem.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/scatterlist.h> #include <linux/sched/signal.h> #include "heap-helpers.h" struct cma_heap { struct dma_heap *heap; struct cma *cma; }; static void cma_heap_free(struct heap_helper_buffer *buffer) { struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap); unsigned long nr_pages = buffer->pagecount; struct page *cma_pages = buffer->priv_virt; /* free page list */ kfree(buffer->pages); /* release memory */ cma_release(cma_heap->cma, cma_pages, nr_pages); kfree(buffer); } /* dmabuf heap CMA operations functions */ static int cma_heap_allocate(struct dma_heap *heap, unsigned long len, unsigned long fd_flags, unsigned long heap_flags) { struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); struct heap_helper_buffer *helper_buffer; struct page *cma_pages; size_t size = PAGE_ALIGN(len); unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long align = get_order(size); struct dma_buf *dmabuf; int ret = -ENOMEM; pgoff_t pg; if (align > CONFIG_CMA_ALIGNMENT) align = CONFIG_CMA_ALIGNMENT; helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); if (!helper_buffer) return -ENOMEM; init_heap_helper_buffer(helper_buffer, cma_heap_free); helper_buffer->heap = heap; helper_buffer->size = len; cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false); if (!cma_pages) goto free_buf; if (PageHighMem(cma_pages)) { unsigned long nr_clear_pages = nr_pages; struct page *page = cma_pages; while (nr_clear_pages > 0) { void *vaddr = kmap_atomic(page); memset(vaddr, 0, PAGE_SIZE); kunmap_atomic(vaddr); /* * Avoid wasting time zeroing memory if the process * has been killed by by SIGKILL */ if (fatal_signal_pending(current)) goto free_cma; page++; nr_clear_pages--; } } else { memset(page_address(cma_pages), 0, size); } helper_buffer->pagecount = nr_pages; helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, sizeof(*helper_buffer->pages), GFP_KERNEL); if (!helper_buffer->pages) { ret = -ENOMEM; goto free_cma; } for (pg = 0; pg < helper_buffer->pagecount; pg++) helper_buffer->pages[pg] = &cma_pages[pg]; /* create the dmabuf */ dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); if (IS_ERR(dmabuf)) { ret = PTR_ERR(dmabuf); goto free_pages; } helper_buffer->dmabuf = dmabuf; helper_buffer->priv_virt = cma_pages; ret = dma_buf_fd(dmabuf, fd_flags); if (ret < 0) { dma_buf_put(dmabuf); /* just return, as put will call release and that will free */ return ret; } return ret; free_pages: kfree(helper_buffer->pages); free_cma: cma_release(cma_heap->cma, cma_pages, nr_pages); free_buf: kfree(helper_buffer); return ret; } static const struct dma_heap_ops cma_heap_ops = { .allocate = cma_heap_allocate, }; static int __add_cma_heap(struct cma *cma, void *data) { struct cma_heap *cma_heap; struct dma_heap_export_info exp_info; cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); if (!cma_heap) return -ENOMEM; cma_heap->cma = cma; exp_info.name = cma_get_name(cma); exp_info.ops = &cma_heap_ops; exp_info.priv = cma_heap; cma_heap->heap = dma_heap_add(&exp_info); if (IS_ERR(cma_heap->heap)) { int ret = PTR_ERR(cma_heap->heap); kfree(cma_heap); return ret; } return 0; } static int add_default_cma_heap(void) { struct cma *default_cma = dev_get_cma_area(NULL); int ret = 0; if (default_cma) ret = __add_cma_heap(default_cma, NULL); return ret; } module_init(add_default_cma_heap); MODULE_DESCRIPTION("DMA-BUF CMA Heap"); MODULE_LICENSE("GPL v2"); |