Binder是Android使用的進程間通訊工具。Android自己是一個複雜的操做系統,運行着衆多的應用和服務,它們之間的通訊就顯得尤其重要。應用程序在使用Binder進行進程間通訊時,實現方法很是簡單,只須要編寫AIDL文件,系統在編譯時就會生成IPC通訊代碼。這種簡單的方式也使得應用開發者能夠不用瞭解Binder底層的傳輸機制。但對於系統開發人員,熟知Binder的原理是必要的,在分析解決問題時多數狀況都會和Binder糾纏在一塊兒。本文講解使用Binder的第一步,打開和初始化一個Binder設備。node
Binder驅動在初始化時註冊爲一個字符設備「/dev/binder」,並實現了poll、ioctl、mmap等接口。應用層對binder的控制都是經過ioctl完成的,而IPC通訊的內容就是經過ioctl的命令「BINDER_WRITE_READ」中傳輸的。android
Android中大多數的Binder通訊都是經過ProcessState來初始化Binder設備的。併發
frameworks/native/libs/binder/ProcessState.cpp #define BINDER_VM_SIZE ((1*1024*1024) - (4096 *2)) ...... ProcessState::ProcessState() : mDriverFD(open_driver()) //打開binder設備 , mVMStart(MAP_FAILED) , mManagesContexts(false) , mBinderContextCheckFunc(NULL) , mBinderContextUserData(NULL) , mThreadPoolStarted(false) , mThreadPoolSeq(1) { if (mDriverFD >= 0) { // XXX Ideally, there should be a specific define for whether we // have mmap (or whether we could possibly have the kernel module // availabla). #if !defined(HAVE_WIN32_IPC) // mmap the binder, providing a chunk of virtual address space to receive transactions. // 映射binder地址空間 mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0); if (mVMStart == MAP_FAILED) { // *sigh* ALOGE("Using /dev/binder failed: unable to mmap transaction memory.\n"); close(mDriverFD); mDriverFD = -1; } #else mDriverFD = -1; #endif } LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened. Terminating."); }
能夠看到,ProcessState構造過程當中先打開Binder設備得到設備描述符,而後經過mmap映射了(1M - 8K)大小的地址空間用於數據傳輸。先看一下open_driver()。異步
frameworks/native/libs/binder/ProcessState.cpp static int open_driver() { int fd = open("/dev/binder", O_RDWR); if (fd >= 0) { fcntl(fd, F_SETFD, FD_CLOEXEC); int vers = 0; status_t result = ioctl(fd, BINDER_VERSION, &vers); if (result == -1) { ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno)); close(fd); fd = -1; } if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) { ALOGE("Binder driver protocol does not match user space protocol!"); close(fd); fd = -1; } size_t maxThreads = 15; result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads); if (result == -1) { ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno)); } } else { ALOGW("Opening '/dev/binder' failed: %s\n", strerror(errno)); } return fd; }
這裏打開設備「/dev/binder」,而後驗證版本信息。以後經過BINDER_SET_MAX_THREADS設置一個進程中最大的Binder線程數,在驅動中設置給proc->max_threads。代碼中設置的值爲15,但驅動中的計數是從0開始的,因此實際上Binder最大線程數是16。接下來看下驅動的open函數,比較簡單,就是建立Binder進程結構體並初始化。async
drivers/staging/android/binder.c static int binder_open(struct inode *nodp, struct file *filp) { struct binder_proc *proc; // Binder進程結構體 ...... proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; get_task_struct(current); proc->tsk = current; INIT_LIST_HEAD(&proc->todo); init_waitqueue_head(&proc->wait); proc->default_priority = task_nice(current); // 缺省優先級 binder_lock(__func__); binder_stats_created(BINDER_STAT_PROC); // 記錄binder狀態 hlist_add_head(&proc->proc_node, &binder_procs); proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); filp->private_data = proc; // 設備私有數據設置爲binder進程結構體 binder_unlock(__func__); ...... }
Bind_proc用於管理Binder進程,一個進程只會有一個binder_proc,記錄了Binder傳輸的全部信息。下面看一下binder_proc的定義。函數
drivers/staging/android/binder.c struct binder_proc { struct hlist_node proc_node; // 該binder進程的節點, 掛載到binder_procs鏈表中 struct rb_root threads; // binder線程ID紅黑樹 struct rb_root nodes; // binder實體對象紅黑樹 struct rb_root refs_by_desc; // binder引用對象紅黑樹,以handle爲key struct rb_root refs_by_node; // binder引用對象紅黑樹,以node爲key int pid; // 進程ID struct vm_area_struct *vma; // 該進程的虛擬地址空間指針 struct mm_struct *vma_vm_mm; // 該進程的內存結構體 struct task_struct *tsk; // 該進程的task結構體 struct files_struct *files; // 該進程的file結構體 struct hlist_node deferred_work_node; // 延遲工做隊列節點,掛載到binder_deferred_list鏈表中 int deferred_work; // 延遲工做的類型 void *buffer; // 映射的內核空間地址 ptrdiff_t user_buffer_offset; // 映射的內核空間與用戶空間的偏移 struct list_head buffers; // 所有buffer鏈表 struct rb_root free_buffers; // 空閒buffer紅黑樹 struct rb_root allocated_buffers; // 已分配的buffer紅黑樹 size_t free_async_space; // 剩餘異步傳輸空間大小 struct page **pages; // 物理內存頁 size_t buffer_size; // 映射的buffer大小 uint32_t buffer_free; // 剩餘可用buffer大小 struct list_head todo; // 進程工做鏈表 wait_queue_head_t wait; // 等待隊列 struct binder_stats stats; // binder統計信息 struct list_head delivered_death; // 已經發布的死亡通知鏈表 int max_threads; // 最大線程數 int requested_threads; // 請求的線程數 int requested_threads_started; // 請求的線程已經啓動的數量 int ready_threads; // 已經準備好的線程數 long default_priority; // 默認優先級 struct dentry *debugfs_entry; // debugf入口指針 };
建立binder_proc,完成初始化後,應用層會調用mmap()來映射地址空間。工具
drivers/staging/android/binder.c static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; struct vm_struct *area; struct binder_proc *proc = filp->private_data; const char *failure_string; struct binder_buffer *buffer; if (proc->tsk != current) return -EINVAL; // 映射空間不能大於4M if ((vma->vm_end - vma->vm_start) > SZ_4M) vma->vm_end = vma->vm_start + SZ_4M; ...... // fork的子進程沒法複製映射空間,而且不容許修改寫屬性 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; ...... // 獲取內核虛擬地址空間 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); ...... proc->buffer = area->addr; proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; mutex_unlock(&binder_mmap_lock); ...... // 建立物理頁結構體 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); ...... proc->buffer_size = vma->vm_end - vma->vm_start; vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; // 分配一個物理頁,並映射到虛擬地址空間 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { ...... // 建立buffers鏈表,插入第一個free buffer buffer = proc->buffer; INIT_LIST_HEAD(&proc->buffers); list_add(&buffer->entry, &proc->buffers); buffer->free = 1; binder_insert_free_buffer(proc, buffer); // 異步傳輸可用空間大小設置爲映射大小的一半 proc->free_async_space = proc->buffer_size / 2; barrier(); proc->files = get_files_struct(current); proc->vma = vma; proc->vma_vm_mm = vma->vm_mm; ...... }
Binder_mmap()主要完成了內核虛擬地址空間分配,地址空間映射,binder_proc中內存相關參數的初始化等工做。真正的物理頁分配時經過binder_update_page_range()進行的,這裏只分配了一個物理頁放入free_buffers樹中。以後的Binder傳輸過程當中會根據須要分配和回收buffer。oop
drivers/staging/android/binder.c static int binder_update_page_range(struct binder_proc *proc, int allocate, void *start, void *end, struct vm_area_struct *vma) { void *page_addr; unsigned long user_page_addr; struct vm_struct tmp_area; struct page **page; struct mm_struct *mm; ...... // 只有mmap時vma不爲NULL,其餘狀況都會根據proc來獲取內存相關數據 if (vma) mm = NULL; else // 獲取內存描述符,並增長用戶計數,防止mm_struct被釋放 mm = get_task_mm(proc->tsk); if (mm) { down_write(&mm->mmap_sem); vma = proc->vma; if (vma && mm != proc->vma_vm_mm) { pr_err("%d: vma mm and task mm mismatch\n", proc->pid); vma = NULL; } } // 回收內存時allocate爲0 if (allocate == 0) goto free_range; ...... // 循環分配物理頁,每次分配一頁 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { int ret; struct page **page_array_ptr; page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; BUG_ON(*page); // 分配一個物理頁,保存地址到proc中 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); ...... tmp_area.addr = page_addr; tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; page_array_ptr = page; // 創建頁表與物理頁的映射 ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); ...... user_page_addr = (uintptr_t)page_addr + proc->user_buffer_offset; // 插入物理頁到用戶虛擬地址空間 ret = vm_insert_page(vma, user_page_addr, page[0]); ...... } if (mm) { up_write(&mm->mmap_sem); // 減小內存描述符的用戶計數 mmput(mm); } return 0; free_range: for (page_addr = end - PAGE_SIZE; page_addr >= start; page_addr -= PAGE_SIZE) { page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; if (vma) // 解除用戶虛擬地址空間與物理頁的映射 zap_page_range(vma, (uintptr_t)page_addr + proc->user_buffer_offset, PAGE_SIZE, NULL); err_vm_insert_page_failed: // 解除物理頁與內核頁表的映射 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); err_map_kernel_failed: // 釋放物理頁 __free_page(*page); *page = NULL; err_alloc_page_failed: ; } ........ }
ProcessState構造函數執行完,也就完成了對Binder設備的初始化和內存空間映射。應用層得到了Binder設備的描述符mDriverFD,以後經過ioctl操做就能夠進行IPC傳輸。ui
以前分析到,一個Binder進程最多能夠有16個Binder線程,每一個Binder線程均可以獨立進行Binder傳輸。第一個Binder線程通常在建立ProcessState對象後,經過ProcessState::startThreadPool()來建立。this
frameworks/native/libs/binder/ProcessState.cpp class PoolThread : public Thread { public: PoolThread(bool isMain) : mIsMain(isMain) { } protected: virtual bool threadLoop() { // 經過startThreadPool建立時,mIsMain是true,爲主線程 IPCThreadState::self()->joinThreadPool(mIsMain); return false; } const bool mIsMain; }; ...... void ProcessState::startThreadPool() { AutoMutex _l(mLock); // 第一個線程必須經過startThreadPool來建立 if (!mThreadPoolStarted) { mThreadPoolStarted = true; spawnPooledThread(true); } } ...... void ProcessState::spawnPooledThread(bool isMain) { if (mThreadPoolStarted) { // 構造binder名字:Binder_XX String8 name = makeBinderThreadName(); ALOGV("Spawning new pooled thread, name=%s\n", name.string()); sp<Thread> t = new PoolThread(isMain); t->run(name.string()); } }
最終真正的Binder線程建立是經過IPCThreadState::joinThreadPool()來實現的。
frameworks/native/libs/binder/IPCThreadState.cpp void IPCThreadState::joinThreadPool(bool isMain) { ...... // 主線程使用命令BC_ENTER_LOOPER,之後再建立線程都經過BC_REGISTER_LOOPER mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER); ...... // 設置線程爲前臺group,保證Binder傳輸初始化 set_sched_policy(mMyThreadId, SP_FOREGROUND); status_t result; do { // 清理命令隊列 processPendingDerefs(); // now get the next command to be processed, waiting if necessary // 獲取一個命令並執行 result = getAndExecuteCommand(); ...... // 非主線程時,若是線程不在須要將退出。主線程將一直存活 if(result == TIMED_OUT && !isMain) { break; } } while (result != -ECONNREFUSED && result != -EBADF); ...... // 線程退出命令 mOut.writeInt32(BC_EXIT_LOOPER); talkWithDriver(false); }
joinThreadPool()發送命令BC_ENTER_LOOPER建立第一個Binder線程。以後進入循環,經過getAndExecuteCommand()獲取命令並執行。簡單看下是如何與驅動交互的。
frameworks/native/libs/binder/IPCThreadState.cpp status_t IPCThreadState::getAndExecuteCommand() { status_t result; int32_t cmd; // 與Binder驅動進行交互 result = talkWithDriver(); if (result >= NO_ERROR) { size_t IN = mIn.dataAvail(); ...... // 執行命令 result = executeCommand(cmd); ...... // 執行命令時可能將線程設置爲後臺group,再次設置爲前臺group set_sched_policy(mMyThreadId, SP_FOREGROUND); } return result; } ...... status_t IPCThreadState::talkWithDriver(bool doReceive) { ...... // 寫數據爲mOut bwr.write_size = outAvail; bwr.write_buffer = (uintptr_t)mOut.data(); // This is what we'll read. // 讀數據爲mIn if (doReceive && needRead) { bwr.read_size = mIn.dataCapacity(); bwr.read_buffer = (uintptr_t)mIn.data(); } else { bwr.read_size = 0; bwr.read_buffer = 0; } ...... // 使用BINDER_WRITE_READ對驅動進行讀寫 if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0) ...... }
應用對Binder驅動的讀寫是經過ioctl的BINDER_WRITE_READ命令完成的,讀寫數據爲mIn和mOut。mIn和mOut爲Parcel實例,Binder的數據都須要通過Parcel將數據序列化後才傳輸的,應用接收的數據再通過反序列化讀出。咱們先關心線程的建立,看一下驅動中對BC_ENTER_LOOPER的處理。
drivers/staging/android/binder.c static int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed) { ...... case BC_ENTER_LOOPER: ...... // 設置thread looper狀態 thread->looper |= BINDER_LOOPER_STATE_ENTERED; break; ...... } ...... static struct binder_thread *binder_get_thread(struct binder_proc *proc) { struct binder_thread *thread = NULL; struct rb_node *parent = NULL; struct rb_node **p = &proc->threads.rb_node; // 在proc threads樹上查找線程 while (*p) { parent = *p; thread = rb_entry(parent, struct binder_thread, rb_node); if (current->pid < thread->pid) p = &(*p)->rb_left; else if (current->pid > thread->pid) p = &(*p)->rb_right; else break; } // 若是threads樹上沒有找到線程則建立一個線程,並插入到樹中 if (*p == NULL) { thread = kzalloc(sizeof(*thread), GFP_KERNEL); if (thread == NULL) return NULL; binder_stats_created(BINDER_STAT_THREAD); thread->proc = proc; thread->pid = current->pid; init_waitqueue_head(&thread->wait); INIT_LIST_HEAD(&thread->todo); rb_link_node(&thread->rb_node, parent, p); rb_insert_color(&thread->rb_node, &proc->threads); thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; thread->return_error = BR_OK; thread->return_error2 = BR_OK; } return thread; } ...... static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { ...... // 在binder進程中獲取當前線程的binder線程 thread = binder_get_thread(proc); ...... switch (cmd) { case BINDER_WRITE_READ: { if (bwr.write_size > 0) { // 寫數據 ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed); ...... if (bwr.read_size > 0) { // 讀數據 ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); ...... if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto err; } break;BC_ENTER_LOOPER } ...... }
整理一下第一個Binder線程建立的流程,
進程中的第一個Binder線程建立後,再建立Binder線程有兩種方式。
大部分後續Binder線程的建立都是經過BR_SPAWN_LOOPER命令來完成的。這個命令是由Binder驅動發出的。當Binder驅動檢查到Binder線程不足時,就會發送BR_SPAWN_LOOPER給應用層,用來建立Binder線程。
drivers/staging/android/binder.c static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block) { ...... // 是否從proc中獲取工做項 wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo); ...... // 設置狀態爲Binder線程正在等待處理 thread->looper |= BINDER_LOOPER_STATE_WAITING; // 若是處理proc工做,則讓ready_threads加1 if (wait_for_proc_work) proc->ready_threads++; binder_unlock(__func__); ...... // 線程進入休眠,等待數據到來 if (wait_for_proc_work) { ...... ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); } else { ...... ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); } binder_lock(__func__); // 若是處理proc工做,則讓ready_threads減1,表面Binder線程開始處理數據 if (wait_for_proc_work) proc->ready_threads--; // 移除Binder線程等待處理狀態 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; ...... // 若是沒有剩餘的可用Binder線程,而且沒有達到最大線程數,則發送BR_SPAWN_LOOPER if (proc->requested_threads + proc->ready_threads == 0 && proc->requested_threads_started < proc->max_threads && (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ /*spawn a new thread if we leave this out */) { proc->requested_threads++; binder_debug(BINDER_DEBUG_THREADS, "%d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid); if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) return -EFAULT; binder_stat_br(proc, thread, BR_SPAWN_LOOPER); } return 0; }
當進程中沒有剩餘可用的Binder線程時,驅動就會發送BR_SPAWN_LOOPER命令,應用層接收到該命令則執行spawnPooledThread(false)。這個函數在上面分析過,會向驅動發送BC_REGISTER_LOOPER命令。
drivers/staging/android/binder.c static int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed) { ...... case BC_REGISTER_LOOPER: ...... } else { // 更新proc中線程狀態計數 proc->requested_threads--; proc->requested_threads_started++; } // 設置thread looper狀態 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; break; ...... }
能夠看到,proc中線程相關的計數有三個,
根據上面分析,能夠知道Binder線程有兩種建立方式,
上圖是對Binder線程建立流程的簡單整理。能夠看到若是Binder主線程接收到消息就會再建立一個Binder線程,以後若是接收消息時沒有空餘線程纔會建立。因此在一些Native Service中,會在初始化時就調用joinThreadPool()建立一個副線程,這樣Service在開始運行時就會有兩個Binder線程,由於它們很明確必定會使用跨進程通訊。