【原創】(十三)Linux內存管理之vma/malloc/mmap

背景

  • Read the fucking source code! --By 魯迅
  • A picture is worth a thousand words. --By 高爾基

說明:node

  1. Kernel版本:4.14
  2. ARM64處理器,Contex-A53,雙核
  3. 使用工具:Source Insight 3.5, Visio

1. 概述

這篇文章,讓咱們來看看用戶態進程的地址空間狀況,主要會包括如下:linux

  • vma;
  • malloc;
  • mmap;

進程地址空間中,咱們常見的代碼段,數據段,bss段等,實際上都是一段地址空間區域。Linux將地址空間中的區域稱爲Virtual Memory Area, 簡稱VMA,使用struct vm_area_struct來描述。數據結構

在進行內存申請和映射時,都會去地址空間中申請一段虛擬地址區域,而這部分操做也與vma關係密切,所以本文將vma/malloc/mmap三個放到一塊來進行分析。
開啓探索之旅吧。app

2. 數據結構

主要涉及兩個結構體:struct mm_structstruct vm_area_structdom

  • struct mm_struct
    用於描述與進程地址空間有關的所有信息,這個結構也包含在進程描述符中,關鍵字段的描述見註釋。
struct mm_struct {
    struct vm_area_struct *mmap;        /* list of VMAs */                              //指向VMA對象的鏈表頭
    struct rb_root mm_rb;                                                                     //指向VMA對象的紅黑樹的根
    u64 vmacache_seqnum;                   /* per-thread vmacache */
#ifdef CONFIG_MMU
    unsigned long (*get_unmapped_area) (struct file *filp,
                unsigned long addr, unsigned long len,
                unsigned long pgoff, unsigned long flags);              // 在進程地址空間中搜索有效線性地址區間的方法
#endif
    unsigned long mmap_base;        /* base of mmap area */
    unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
    /* Base adresses for compatible mmap() */
    unsigned long mmap_compat_base;
    unsigned long mmap_compat_legacy_base;
#endif
    unsigned long task_size;        /* size of task vm space */
    unsigned long highest_vm_end;       /* highest vma end address */
    pgd_t * pgd;        //指向頁全局目錄

    /**
     * @mm_users: The number of users including userspace.
     *
     * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
     * to 0 (i.e. when the task exits and there are no other temporary
     * reference holders), we also release a reference on @mm_count
     * (which may then free the &struct mm_struct if @mm_count also
     * drops to 0).
     */
    atomic_t mm_users;      //使用計數器

    /**
     * @mm_count: The number of references to &struct mm_struct
     * (@mm_users count as 1).
     *
     * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
     * &struct mm_struct is freed.
     */
    atomic_t mm_count;      //使用計數器

    atomic_long_t nr_ptes;          /* PTE page table pages */      //進程頁表數
#if CONFIG_PGTABLE_LEVELS > 2
    atomic_long_t nr_pmds;          /* PMD page table pages */
#endif
    int map_count;              /* number of VMAs */        //VMA的個數

    spinlock_t page_table_lock;     /* Protects page tables and some counters */
    struct rw_semaphore mmap_sem;

    struct list_head mmlist;        /* List of maybe swapped mm's.  These are globally strung
                         * together off init_mm.mmlist, and are protected
                         * by mmlist_lock
                         */


    unsigned long hiwater_rss;  /* High-watermark of RSS usage */
    unsigned long hiwater_vm;   /* High-water virtual memory usage */

    unsigned long total_vm;     /* Total pages mapped */    //進程地址空間的頁數
    unsigned long locked_vm;    /* Pages that have PG_mlocked set */    //鎖住的頁數,不能換出
    unsigned long pinned_vm;    /* Refcount permanently increased */
    unsigned long data_vm;      /* VM_WRITE & ~VM_SHARED & ~VM_STACK */     //數據段內存的頁數
    unsigned long exec_vm;      /* VM_EXEC & ~VM_WRITE & ~VM_STACK */         //可執行內存映射的頁數
    unsigned long stack_vm;     /* VM_STACK */                                              //用戶態堆棧的頁數
    unsigned long def_flags;
    unsigned long start_code, end_code, start_data, end_data;       //代碼段,數據段等的地址
    unsigned long start_brk, brk, start_stack;      //堆棧段的地址,start_stack表示用戶態堆棧的起始地址,brk爲堆的當前最後地址
    unsigned long arg_start, arg_end, env_start, env_end;  //命令行參數的地址,環境變量的地址

    unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */

    /*
     * Special counters, in some configurations protected by the
     * page_table_lock, in other configurations by being atomic.
     */
    struct mm_rss_stat rss_stat;

    struct linux_binfmt *binfmt;

    cpumask_var_t cpu_vm_mask_var;

    /* Architecture-specific MM context */
    mm_context_t context;

    unsigned long flags; /* Must use atomic bitops to access the bits */

    struct core_state *core_state; /* coredumping support */
#ifdef CONFIG_MEMBARRIER
    atomic_t membarrier_state;
#endif
#ifdef CONFIG_AIO
    spinlock_t          ioctx_lock;
    struct kioctx_table __rcu   *ioctx_table;
#endif
#ifdef CONFIG_MEMCG
    /*
     * "owner" points to a task that is regarded as the canonical
     * user/owner of this mm. All of the following must be true in
     * order for it to be changed:
     *
     * current == mm->owner
     * current->mm != mm
     * new_owner->mm == mm
     * new_owner->alloc_lock is held
     */
    struct task_struct __rcu *owner;
#endif
    struct user_namespace *user_ns;

    /* store ref to file /proc/<pid>/exe symlink points to */
    struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
    struct mmu_notifier_mm *mmu_notifier_mm;
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
    pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_CPUMASK_OFFSTACK
    struct cpumask cpumask_allocation;
#endif
#ifdef CONFIG_NUMA_BALANCING
    /*
     * numa_next_scan is the next time that the PTEs will be marked
     * pte_numa. NUMA hinting faults will gather statistics and migrate
     * pages to new nodes if necessary.
     */
    unsigned long numa_next_scan;

    /* Restart point for scanning and setting pte_numa */
    unsigned long numa_scan_offset;

    /* numa_scan_seq prevents two threads setting pte_numa */
    int numa_scan_seq;
#endif
    /*
     * An operation with batched TLB flushing is going on. Anything that
     * can move process memory needs to flush the TLB when moving a
     * PROT_NONE or PROT_NUMA mapped page.
     */
    atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
    /* See flush_tlb_batched_pending() */
    bool tlb_flush_batched;
#endif
    struct uprobes_state uprobes_state;
#ifdef CONFIG_HUGETLB_PAGE
    atomic_long_t hugetlb_usage;
#endif
    struct work_struct async_put_work;

#if IS_ENABLED(CONFIG_HMM)
    /* HMM needs to track a few things per mm */
    struct hmm *hmm;
#endif
} __randomize_layout;
  • struct vm_area_struct
    用於描述進程地址空間中的一段虛擬區域,每個VMA都對應一個struct vm_area_struct
/*
 * This struct defines a memory VMM memory area. There is one of these
 * per VM-area/task.  A VM area is any part of the process virtual memory
 * space that has a special rule for the page-fault handlers (ie a shared
 * library, the executable area etc).
 */
struct vm_area_struct {
    /* The first cache line has the info for VMA tree walking. */

    unsigned long vm_start;     /* Our start address within vm_mm. */       //起始地址
    unsigned long vm_end;       /* The first byte after our end address
                       within vm_mm. */         //結束地址,區間中不包含結束地址

    /* linked list of VM areas per task, sorted by address */       //按起始地址排序的鏈表
    struct vm_area_struct *vm_next, *vm_prev;

    struct rb_node vm_rb;       //紅黑樹節點

    /*
     * Largest free memory gap in bytes to the left of this VMA.
     * Either between this VMA and vma->vm_prev, or between one of the
     * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
     * get_unmapped_area find a free area of the right size.
     */
    unsigned long rb_subtree_gap;

    /* Second cache line starts here. */

    struct mm_struct *vm_mm;    /* The address space we belong to. */
    pgprot_t vm_page_prot;      /* Access permissions of this VMA. */
    unsigned long vm_flags;     /* Flags, see mm.h. */

    /*
     * For areas with an address space and backing store,
     * linkage into the address_space->i_mmap interval tree.
     */
    struct {
        struct rb_node rb;
        unsigned long rb_subtree_last;
    } shared;

    /*
     * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
     * list, after a COW of one of the file pages.  A MAP_SHARED vma
     * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
     * or brk vma (with NULL file) can only be in an anon_vma list.
     */
    struct list_head anon_vma_chain; /* Serialized by mmap_sem &
                      * page_table_lock */
    struct anon_vma *anon_vma;  /* Serialized by page_table_lock */

    /* Function pointers to deal with this struct. */
    const struct vm_operations_struct *vm_ops;

    /* Information about our backing store: */
    unsigned long vm_pgoff;     /* Offset (within vm_file) in PAGE_SIZE
                       units */
    struct file * vm_file;      /* File we map to (can be NULL). */     //指向文件的一個打開實例
    void * vm_private_data;     /* was vm_pte (shared mem) */

    atomic_long_t swap_readahead_info;
#ifndef CONFIG_MMU
    struct vm_region *vm_region;    /* NOMMU mapping region */
#endif
#ifdef CONFIG_NUMA
    struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
#endif
    struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
} __randomize_layout;

關係圖來了:
async

是否是有點眼熟?這個跟內核中的vmap機制很相似。函數

宏觀的看一下進程地址空間中的各個VMA
工具

針對VMA的操做,有以下接口:ui

/*  VMA的查找 */
/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); //查找第一個知足addr < vm_end的VMA塊
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
                         struct vm_area_struct **pprev); //與find_vma功能相似,不一樣之處在於還會返回VMA連接的前一個VMA;
 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr); //查找與start_addr~end_addr區域有交集的VMA
 
 /* VMA的插入 */
 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); //插入VMA到紅黑樹中和鏈表中
 
 /* VMA的合併 */
 extern struct vm_area_struct *vma_merge(struct mm_struct *,
    struct vm_area_struct *prev, unsigned long addr, unsigned long end,
    unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
    struct mempolicy *, struct vm_userfaultfd_ctx); //將VMA與附近的VMA進行融合操做
 
 /* VMA的拆分 */
 extern int split_vma(struct mm_struct *, struct vm_area_struct *,
    unsigned long addr, int new_below); //將VMA以addr爲界線分紅兩個VMA

上述的操做基本上也就是針對紅黑樹的操做。this

3. malloc

malloc你們都很熟悉,那麼它是怎麼與底層去交互並申請到內存的呢?

圖來了:

如圖所示,malloc最終會調到底層的sys_brk函數和sys_mmap函數,在分配小內存時調用sys_brk函數,動態的調整進程地址空間中的brk位置;在分配大塊內存時,調用sys_mmap函數,在堆和棧之間找到一片區域進行映射處理。

先來看sys_brk函數,經過SYSCALL_DEFINE1來定義,總體的函數調用流程以下:

從函數的調用過程當中能夠看出有很多操做是針對vma的,那麼結合起來的效果圖以下:

整個過程看起來就比較清晰和簡單了,每一個進程都用struct mm_struct來描述自身的進程地址空間,這些空間都是一些vma區域,經過一個紅黑樹和鏈表來管理。所以針對malloc的處理,會去動態的調整brk的位置,具體的大小則由struct vm_area_struct結構中的vm_start ~ vm_end來指定。在實際過程當中,會根據請求分配區域是否與現有vma重疊的狀況來進行處理,或者從新申請一個vma來描述這段區域,並最終插入到紅黑樹和鏈表中。

完成這段申請後,只是開闢了一段區域,一般還不會立馬分配物理內存,物理內存的分配會發生在訪問時出現缺頁異常後再處理,這個後續也會有文章來進一步分析。

4. mmap

mmap用於內存映射,也就是將一段區域映射到本身的進程地址空間中,分爲兩種:

  • 文件映射: 將文件區域映射到進程空間,文件存放在存儲設備上;
  • 匿名映射:沒有文件對應的區域映射,內容存放在物理內存上;

同時,針對其餘進程是否可見,又分爲兩種:

  • 私有映射:將數據源拷貝副本,不影響其餘進程;
  • 共享映射:共享的進程都能看到;

根據排列組合,就存在如下幾種狀況了:

  1. 私有匿名映射: 一般分配大塊內存時使用,堆,棧,bss段等;
  2. 共享匿名映射:經常使用於父子進程間通訊,在內存文件系統中建立/dev/zero設備;
  3. 私有文件映射:經常使用的好比動態庫加載,代碼段,數據段等;
  4. 共享文件映射:經常使用於進程間通訊,文件讀寫等;

常見的prot權限和flags以下:

#define PROT_READ   0x1     /* page can be read */
#define PROT_WRITE  0x2     /* page can be written */
#define PROT_EXEC   0x4     /* page can be executed */
#define PROT_SEM    0x8     /* page may be used for atomic ops */
#define PROT_NONE   0x0     /* page can not be accessed */
#define PROT_GROWSDOWN  0x01000000  /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP    0x02000000  /* mprotect flag: extend change to end of growsup vma */

#define MAP_SHARED  0x01        /* Share changes */
#define MAP_PRIVATE 0x02        /* Changes are private */
#define MAP_TYPE    0x0f        /* Mask for type of mapping */
#define MAP_FIXED   0x10        /* Interpret addr exactly */
#define MAP_ANONYMOUS   0x20        /* don't use a file */

#define MAP_GROWSDOWN   0x0100      /* stack-like segment */
#define MAP_DENYWRITE   0x0800      /* ETXTBSY */
#define MAP_EXECUTABLE  0x1000      /* mark it as an executable */
#define MAP_LOCKED  0x2000      /* pages are locked */
#define MAP_NORESERVE   0x4000      /* don't check for reservations */
#define MAP_POPULATE    0x8000      /* populate (prefault) pagetables */
#define MAP_NONBLOCK    0x10000     /* do not block on IO */
#define MAP_STACK   0x20000     /* give out an address that is best suited for process/thread stacks */
#define MAP_HUGETLB 0x40000     /* create a huge page mapping */

mmap的操做,最終會調用到do_mmap函數,最後來一張調用圖:

相關文章
相關標籤/搜索