mm: remove alloc_vm_area
All users are gone now. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Juergen Gross <jgross@suse.com> Cc: Matthew Auld <matthew.auld@intel.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Uladzislau Rezki (Sony) <urezki@gmail.com> Link: https://lkml.kernel.org/r/20201002122204.1534411-12-hch@lst.de Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
5dd63bf1d0
commit
301fa9f2dd
|
@ -169,6 +169,7 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size,
|
||||||
unsigned long flags,
|
unsigned long flags,
|
||||||
unsigned long start, unsigned long end,
|
unsigned long start, unsigned long end,
|
||||||
const void *caller);
|
const void *caller);
|
||||||
|
void free_vm_area(struct vm_struct *area);
|
||||||
extern struct vm_struct *remove_vm_area(const void *addr);
|
extern struct vm_struct *remove_vm_area(const void *addr);
|
||||||
extern struct vm_struct *find_vm_area(const void *addr);
|
extern struct vm_struct *find_vm_area(const void *addr);
|
||||||
|
|
||||||
|
@ -204,10 +205,6 @@ static inline void set_vm_flush_reset_perms(void *addr)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Allocate/destroy a 'vmalloc' VM area. */
|
|
||||||
extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
|
|
||||||
extern void free_vm_area(struct vm_struct *area);
|
|
||||||
|
|
||||||
/* for /dev/kmem */
|
/* for /dev/kmem */
|
||||||
extern long vread(char *buf, char *addr, unsigned long count);
|
extern long vread(char *buf, char *addr, unsigned long count);
|
||||||
extern long vwrite(char *buf, char *addr, unsigned long count);
|
extern long vwrite(char *buf, char *addr, unsigned long count);
|
||||||
|
|
|
@ -354,13 +354,6 @@ void vm_unmap_aliases(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
|
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
|
||||||
|
|
||||||
struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
|
|
||||||
{
|
|
||||||
BUG();
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(alloc_vm_area);
|
|
||||||
|
|
||||||
void free_vm_area(struct vm_struct *area)
|
void free_vm_area(struct vm_struct *area)
|
||||||
{
|
{
|
||||||
BUG();
|
BUG();
|
||||||
|
|
48
mm/vmalloc.c
48
mm/vmalloc.c
|
@ -3083,54 +3083,6 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(remap_vmalloc_range);
|
EXPORT_SYMBOL(remap_vmalloc_range);
|
||||||
|
|
||||||
static int f(pte_t *pte, unsigned long addr, void *data)
|
|
||||||
{
|
|
||||||
pte_t ***p = data;
|
|
||||||
|
|
||||||
if (p) {
|
|
||||||
*(*p) = pte;
|
|
||||||
(*p)++;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* alloc_vm_area - allocate a range of kernel address space
|
|
||||||
* @size: size of the area
|
|
||||||
* @ptes: returns the PTEs for the address space
|
|
||||||
*
|
|
||||||
* Returns: NULL on failure, vm_struct on success
|
|
||||||
*
|
|
||||||
* This function reserves a range of kernel address space, and
|
|
||||||
* allocates pagetables to map that range. No actual mappings
|
|
||||||
* are created.
|
|
||||||
*
|
|
||||||
* If @ptes is non-NULL, pointers to the PTEs (in init_mm)
|
|
||||||
* allocated for the VM area are returned.
|
|
||||||
*/
|
|
||||||
struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
|
|
||||||
{
|
|
||||||
struct vm_struct *area;
|
|
||||||
|
|
||||||
area = get_vm_area_caller(size, VM_IOREMAP,
|
|
||||||
__builtin_return_address(0));
|
|
||||||
if (area == NULL)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This ensures that page tables are constructed for this region
|
|
||||||
* of kernel virtual address space and mapped into init_mm.
|
|
||||||
*/
|
|
||||||
if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
|
|
||||||
size, f, ptes ? &ptes : NULL)) {
|
|
||||||
free_vm_area(area);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return area;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(alloc_vm_area);
|
|
||||||
|
|
||||||
void free_vm_area(struct vm_struct *area)
|
void free_vm_area(struct vm_struct *area)
|
||||||
{
|
{
|
||||||
struct vm_struct *ret;
|
struct vm_struct *ret;
|
||||||
|
|
Loading…
Reference in New Issue