blob: fd473b51c90372244c3acd1c9f1cffcb494cd451 [file] [log] [blame]
Michael S. Tsirkin3d2d8272009-09-21 17:03:51 -07001/* Copyright (C) 2009 Red Hat, Inc.
2 *
3 * See ../COPYING for licensing terms.
4 */
5
6#include <linux/mm.h>
7#include <linux/mmu_context.h>
8#include <linux/sched.h>
9
10#include <asm/mmu_context.h>
11
12/*
13 * use_mm
14 * Makes the calling kernel thread take on the specified
15 * mm context.
16 * Called by the retry thread execute retries within the
17 * iocb issuer's mm context, so that copy_from/to_user
18 * operations work seamlessly for aio.
19 * (Note: this routine is intended to be called only
20 * from a kernel thread context)
21 */
22void use_mm(struct mm_struct *mm)
23{
24 struct mm_struct *active_mm;
25 struct task_struct *tsk = current;
26
27 task_lock(tsk);
28 active_mm = tsk->active_mm;
29 atomic_inc(&mm->mm_count);
30 tsk->mm = mm;
31 tsk->active_mm = mm;
32 switch_mm(active_mm, mm, tsk);
33 task_unlock(tsk);
34
35 mmdrop(active_mm);
36}
37
38/*
39 * unuse_mm
40 * Reverses the effect of use_mm, i.e. releases the
41 * specified mm context which was earlier taken on
42 * by the calling kernel thread
43 * (Note: this routine is intended to be called only
44 * from a kernel thread context)
45 */
46void unuse_mm(struct mm_struct *mm)
47{
48 struct task_struct *tsk = current;
49
50 task_lock(tsk);
51 tsk->mm = NULL;
52 /* active_mm is still 'mm' */
53 enter_lazy_tlb(mm, tsk);
54 task_unlock(tsk);
55}