看過上篇C服務應用篇
內容你確定已經瞭解binder的一個使用過程,可是確定還會有不少疑問:node
這篇開始結合binder驅動進行數據交互的分析;緩存
數據結構 | 說明 |
---|---|
binder_proc | 每一個使用open打開binder設備文件的進程都會在驅動中建立一個binder_proc的結構, 用來記錄該<bar>進程的各類信息和狀態.例如:線程表,binder節點表,節點引用表 |
binder_thread | 每一個binder線程在binder驅動中都有一個對應的binder_thread結構.記錄了線程相關的信息,例如須要完成的任務等. |
binder_node | bindder_proc 中有一張binder節點對象表,表項是binder_node結構. |
binder_ref | binder_proc還有一張節點引用表,表象是binder_ref結構. 保存所引用對象的binder_node指針. |
binder_buffer | 驅動經過mmap的方式建立了一塊大的緩存區,每次binder傳輸數據,會在緩存區分配一個binder_buffer的結構來保存數據. |
先講解關於binder應用層調用binder_open()
相關的公用流程的驅動代碼;cookie
應用層open了binder驅動,對應驅動層代碼以下:數據結構
static int binder_open(struct inode *nodp, struct file *filp) { struct binder_proc *proc; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", current->group_leader->pid, current->pid); proc = kzalloc(sizeof(*proc), GFP_KERNEL); ① if (proc == NULL) return -ENOMEM; get_task_struct(current); ② proc->tsk = current; INIT_LIST_HEAD(&proc->todo); ③ init_waitqueue_head(&proc->wait); proc->default_priority = task_nice(current); binder_lock(__func__); binder_stats_created(BINDER_STAT_PROC); hlist_add_head(&proc->proc_node, &binder_procs); ④ proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); filp->private_data = proc; ⑤ binder_unlock(__func__); if (binder_debugfs_dir_entry_proc) { char strbuf[11]; snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); } return 0; }
①: 爲當前進程分配一個struct binder_proc
寬度空間給proc;
②: 獲取當前進程的task結構;
③: 初始化binder_proc
中的todo鏈表;
④: 將當前進程的binder_proc
插入全局變量binder_procs
中;
⑤: 將proc保存到文件結構中,供下次調用使用;
binder_procs
這是一個全局的紅黑樹變量,該全局變量在binder驅動的最前方使用 static HLIST_HEAD(binder_procs);
進行的初始化;
binder_open()函數的主要功能是打開binder驅動的設備文件,爲當前進程建立和初始化binder_proc
結構體proc.將proc插入到全局的紅黑樹binder_procs
中,供未來查找用;
同時變量proc還放到file結構的private_data字段中,調用驅動的其餘操做時可從file結構中取出表明當前進程的binder_proc
結構體使用;app
應用層調用ioctl
時對應的驅動層代碼:async
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; struct binder_proc *proc = filp->private_data; struct binder_thread *thread; unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ trace_binder_ioctl(cmd, arg); ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret) goto err_unlocked; binder_lock(__func__); thread = binder_get_thread(proc); ① if (thread == NULL) { ret = -ENOMEM; goto err; } switch (cmd) { case BINDER_WRITE_READ: ret = binder_ioctl_write_read(filp, cmd, arg, thread); if (ret) goto err; break; case BINDER_SET_MAX_THREADS: if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { ret = -EINVAL; goto err; } break; case BINDER_SET_CONTEXT_MGR: ret = binder_ioctl_set_ctx_mgr(filp); if (ret) goto err; break; case BINDER_THREAD_EXIT: binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", proc->pid, thread->pid); binder_free_thread(proc, thread); thread = NULL; break; case BINDER_VERSION: { struct binder_version __user *ver = ubuf; if (size != sizeof(struct binder_version)) { ret = -EINVAL; goto err; } if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) { ret = -EINVAL; goto err; } break; } default: ret = -EINVAL; goto err; } ret = 0; err: if (thread) thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; binder_unlock(__func__); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret && ret != -ERESTARTSYS) pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); err_unlocked: trace_binder_ioctl_done(ret); return ret; }
獲取binder版本號很簡單,發BINDER_VERSION
命令給驅動,驅動回覆BINDER_CURRENT_PROTOCOL_VERSION
給用戶空間;
這裏講下各個命令的意義:函數
命令 | 含義 | 數據格式 |
---|---|---|
BINDER_WRITE_READ | 向驅動讀取和寫入數據.可同時讀和寫 | struct binder_write_read |
BINDER_SET_MAX_THREADS | 設置線程池的最大的線程數,達到上限後驅動將不會在通知應用層啓動新線程 | size_t |
BINDER_SET_CONTEXT_MGR | 將本進程設置爲binder系統的管理進程,只有servicemanager進程纔會使用這個命令且只能調用一次 | int |
BINDER_THREAD_EXIT | 通知驅動當前線程要退出了,以便驅動清理該線程相關的數據 | int |
BINDER_VERSION | 獲取binder的版本號 | struct binder_version |
可是這有個注意點:oop
①: 第一次調用
ioctl
時會爲該進程建立一個線程;
static struct binder_thread *binder_get_thread(struct binder_proc *proc) { struct binder_thread *thread = NULL; struct rb_node *parent = NULL; struct rb_node **p = &proc->threads.rb_node; while (*p) { ① parent = *p; thread = rb_entry(parent, struct binder_thread, rb_node); if (current->pid < thread->pid) p = &(*p)->rb_left; else if (current->pid > thread->pid) p = &(*p)->rb_right; else break; } if (*p == NULL) { thread = kzalloc(sizeof(*thread), GFP_KERNEL); ② if (thread == NULL) return NULL; binder_stats_created(BINDER_STAT_THREAD); thread->proc = proc; thread->pid = current->pid; init_waitqueue_head(&thread->wait); ③ INIT_LIST_HEAD(&thread->todo); rb_link_node(&thread->rb_node, parent, p); rb_insert_color(&thread->rb_node, &proc->threads); ④ thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; thread->return_error = BR_OK; thread->return_error2 = BR_OK; } return thread; }
該函數的主要功能是從當前進程信息表中找到掛在下面的線程,struct binder_thread
是用掛在進程信息表下threads
節點的紅黑樹鏈表下;ui
①: 先遍歷threads
節點的紅黑樹鏈表;
②: 若是沒有查找到,則分配一個struct binder_thread
長度的空間;
③: 初始化等待隊列頭節點和thread的todo鏈表;
④: 將該線程插入到進程的threads
節點;
首先先遍歷該鏈表,若是爲找到線程信息則建立一個binder_thread
線程,接着初始化線程等待隊列和線程的todo
鏈表,再將該線程節點掛在進程的信息表中的threads節點的紅黑樹中;this
static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; struct vm_struct *area; struct binder_proc *proc = filp->private_data; ① const char *failure_string; struct binder_buffer *buffer; if (proc->tsk != current) return -EINVAL; if ((vma->vm_end - vma->vm_start) > SZ_4M) vma->vm_end = vma->vm_start + SZ_4M; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { ret = -EPERM; failure_string = "bad vm_flags"; goto err_bad_arg; } vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; mutex_lock(&binder_mmap_lock); if (proc->buffer) { ret = -EBUSY; failure_string = "already mapped"; goto err_already_mapped; } area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); ② if (area == NULL) { ret = -ENOMEM; failure_string = "get_vm_area"; goto err_get_vm_area_failed; } proc->buffer = area->addr; ③ proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; mutex_unlock(&binder_mmap_lock); #ifdef CONFIG_CPU_CACHE_VIPT if (cache_is_vipt_aliasing()) { while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); vma->vm_start += PAGE_SIZE; } } #endif proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); ④ if (proc->pages == NULL) { ret = -ENOMEM; failure_string = "alloc page array"; goto err_alloc_pages_failed; } proc->buffer_size = vma->vm_end - vma->vm_start; vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { ⑤ ret = -ENOMEM; failure_string = "alloc small buf"; goto err_alloc_small_buf_failed; } buffer = proc->buffer; INIT_LIST_HEAD(&proc->buffers); list_add(&buffer->entry, &proc->buffers); ⑥ buffer->free = 1; binder_insert_free_buffer(proc, buffer); proc->free_async_space = proc->buffer_size / 2; barrier(); proc->files = get_files_struct(current); proc->vma = vma; proc->vma_vm_mm = vma->vm_mm; /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ return 0; err_alloc_small_buf_failed: kfree(proc->pages); proc->pages = NULL; err_alloc_pages_failed: mutex_lock(&binder_mmap_lock); vfree(proc->buffer); proc->buffer = NULL; err_get_vm_area_failed: err_already_mapped: mutex_unlock(&binder_mmap_lock); err_bad_arg: pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; }
①:filp->private_data
保存了咱們open設備時建立的binder_proc
信息;
②: 爲用戶進程分配一塊內核空間做爲緩衝區;
③: 把分配的緩衝區指針存放到binder_proc
的buffer字段;
④: 分配pages空間;
④: 在內核分配一塊一樣頁數的內核空間,並把它的物理內存和前面爲用戶進程分配的內存地址關聯;
⑤: 將剛纔分配的內存塊加入用戶進程內存鏈表;
binder_mmap
函數首先調用get_vm_area()
分配一塊地址空間,這裏建立的爲虛擬內存,位於用戶進程空間,接着調用binder_update_page_range()
創建虛擬內存到物理內存的映射,這樣用戶空間和內核空間就能共享一塊空間了;
binder運用了mmap機制,在進程間的數據傳輸時就減少了拷貝次數; 若是不用mmap,從發送進程拷貝到內核空間調用一次copy_from_user
,從內核空間到目標進程又須要調用copy_to_user
,這樣就發生兩次數據拷貝.但運用了mmap後,只須要把發送的進程用戶空間數據拷貝到發送進程的內核空間調用一次copy_from_user
,由於目標進程內核空間緩存區和發送進程內核空間的緩衝區是共享;
static int binder_ioctl_set_ctx_mgr(struct file *filp) { int ret = 0; struct binder_proc *proc = filp->private_data; kuid_t curr_euid = current_euid(); if (binder_context_mgr_node != NULL) { pr_err("BINDER_SET_CONTEXT_MGR already set\n"); ret = -EBUSY; goto out; } ret = security_binder_set_context_mgr(proc->tsk); if (ret < 0) goto out; if (uid_valid(binder_context_mgr_uid)) { if (!uid_eq(binder_context_mgr_uid, curr_euid)) { pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", from_kuid(&init_user_ns, curr_euid), from_kuid(&init_user_ns, binder_context_mgr_uid)); ret = -EPERM; goto out; } } else { binder_context_mgr_uid = curr_euid; ① } binder_context_mgr_node = binder_new_node(proc, 0, 0); ② if (binder_context_mgr_node == NULL) { ret = -ENOMEM; goto out; } binder_context_mgr_node->local_weak_refs++; binder_context_mgr_node->local_strong_refs++; binder_context_mgr_node->has_strong_ref = 1; binder_context_mgr_node->has_weak_ref = 1; out: return ret; }
①: 保存當前進程的用戶id到全局變量binder_context_mgr_uid
;
②: 爲當前進程建立一個binder_node
節點,保存到全局變量binder_context_mgr_node
;
調用ioctl
函數寫入BC_ENTER_LOOPER
命令給驅動,進入循環;
當調用ioctl
函數時命令爲BINDER_WRITE_READ
則調用下面函數:
static int binder_ioctl_write_read(struct file *filp, unsigned int cmd, unsigned long arg, struct binder_thread *thread) { int ret = 0; struct binder_proc *proc = filp->private_data; unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; struct binder_write_read bwr; if (size != sizeof(struct binder_write_read)) { ret = -EINVAL; goto out; } if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { ① ret = -EFAULT; goto out; } binder_debug(BINDER_DEBUG_READ_WRITE, "%d:%d write %lld at %016llx, read %lld at %016llx\n", proc->pid, thread->pid, (u64)bwr.write_size, (u64)bwr.write_buffer, (u64)bwr.read_size, (u64)bwr.read_buffer); if (bwr.write_size > 0) { ② ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed); trace_binder_write_done(ret); if (ret < 0) { bwr.read_consumed = 0; if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto out; } } if (bwr.read_size > 0) { ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); trace_binder_read_done(ret); if (!list_empty(&proc->todo)) wake_up_interruptible(&proc->wait); if (ret < 0) { if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto out; } } binder_debug(BINDER_DEBUG_READ_WRITE, "%d:%d wrote %lld of %lld, read return %lld of %lld\n", proc->pid, thread->pid, (u64)bwr.write_consumed, (u64)bwr.write_size, (u64)bwr.read_consumed, (u64)bwr.read_size); if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto out; } out: return ret; }
①: 將用戶空間的數據拷貝到內核空間;
②: 這裏能夠看到驅動判斷讀寫是根據讀寫buf的size來分辨且讀寫操做互不干擾;
struct binder_write_read { binder_size_t write_size; /* bytes to write */ binder_size_t write_consumed; /* bytes consumed by driver */ binder_uintptr_t write_buffer; binder_size_t read_size; /* bytes to read */ binder_size_t read_consumed; /* bytes consumed by driver */ binder_uintptr_t read_buffer; };
這個結構體數據很簡單,通常只要填寫size和buf指針就能夠,buf的數據個數C服務應用篇有介紹;
由於binder_thread_write()
太長了因此每次用到了哪一個命令再細講;
case BC_ENTER_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "%d:%d BC_ENTER_LOOPER\n", proc->pid, thread->pid); if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", proc->pid, thread->pid); } thread->looper |= BINDER_LOOPER_STATE_ENTERED; break;
這個命令有解釋過告訴該線程進入循環狀態,這個線程就是前面第一次調用ioctl建立的struct binder_thread
結構的線程;
接下來進入for循環後,又調用了一次ioctl進行讀操做
static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block) { void __user *buffer = (void __user *)(uintptr_t)binder_buffer; void __user *ptr = buffer + *consumed; void __user *end = buffer + size; int ret = 0; int wait_for_proc_work; if (*consumed == 0) { ① if (put_user(BR_NOOP, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); } retry: wait_for_proc_work = thread->transaction_stack == NULL && ② list_empty(&thread->todo); if (thread->return_error != BR_OK && ptr < end) { if (thread->return_error2 != BR_OK) { if (put_user(thread->return_error2, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, thread->return_error2); if (ptr == end) goto done; thread->return_error2 = BR_OK; } if (put_user(thread->return_error, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, thread->return_error); thread->return_error = BR_OK; goto done; } thread->looper |= BINDER_LOOPER_STATE_WAITING; if (wait_for_proc_work) ③ proc->ready_threads++; binder_unlock(__func__); trace_binder_wait_for_work(wait_for_proc_work, !!thread->transaction_stack, !list_empty(&thread->todo)); if (wait_for_proc_work) { if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | ④ BINDER_LOOPER_STATE_ENTERED))) { binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", proc->pid, thread->pid, thread->looper); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); } binder_set_nice(proc->default_priority); if (non_block) { if (!binder_has_proc_work(proc, thread)) ret = -EAGAIN; } else ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); ⑤ } else { if (non_block) { if (!binder_has_thread_work(thread)) ret = -EAGAIN; } else ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); } binder_lock(__func__); if (wait_for_proc_work) ⑥ proc->ready_threads--; thread->looper &= ~BINDER_LOOPER_STATE_WAITING; if (ret) return ret; while (1) { uint32_t cmd; struct binder_transaction_data tr; struct binder_work *w; struct binder_transaction *t = NULL; if (!list_empty(&thread->todo)) { ⑦ w = list_first_entry(&thread->todo, struct binder_work, entry); } else if (!list_empty(&proc->todo) && wait_for_proc_work) { ⑧ w = list_first_entry(&proc->todo, struct binder_work, entry); } else { /* no data added */ if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) goto retry; break; } if (end - ptr < sizeof(tr) + 4) break; switch (w->type) { case BINDER_WORK_TRANSACTION: { t = container_of(w, struct binder_transaction, work); ⑨ } break; case BINDER_WORK_TRANSACTION_COMPLETE: { cmd = BR_TRANSACTION_COMPLETE; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, "%d:%d BR_TRANSACTION_COMPLETE\n", proc->pid, thread->pid); list_del(&w->entry); ⑩ kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_NODE: { struct binder_node *node = container_of(w, struct binder_node, work); uint32_t cmd = BR_NOOP; const char *cmd_name; int strong = node->internal_strong_refs || node->local_strong_refs; int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; if (weak && !node->has_weak_ref) { cmd = BR_INCREFS; cmd_name = "BR_INCREFS"; node->has_weak_ref = 1; node->pending_weak_ref = 1; node->local_weak_refs++; } else if (strong && !node->has_strong_ref) { cmd = BR_ACQUIRE; cmd_name = "BR_ACQUIRE"; node->has_strong_ref = 1; node->pending_strong_ref = 1; node->local_strong_refs++; } else if (!strong && node->has_strong_ref) { cmd = BR_RELEASE; cmd_name = "BR_RELEASE"; node->has_strong_ref = 0; } else if (!weak && node->has_weak_ref) { cmd = BR_DECREFS; cmd_name = "BR_DECREFS"; node->has_weak_ref = 0; } if (cmd != BR_NOOP) { if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (put_user(node->ptr, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); if (put_user(node->cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", proc->pid, thread->pid, cmd_name, node->debug_id, (u64)node->ptr, (u64)node->cookie); } else { list_del_init(&w->entry); if (!weak && !strong) { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%016llx c%016llx deleted\n", proc->pid, thread->pid, node->debug_id, (u64)node->ptr, (u64)node->cookie); rb_erase(&node->rb_node, &proc->nodes); kfree(node); binder_stats_deleted(BINDER_STAT_NODE); } else { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%016llx c%016llx state unchanged\n", proc->pid, thread->pid, node->debug_id, (u64)node->ptr, (u64)node->cookie); } } } break; case BINDER_WORK_DEAD_BINDER: case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; uint32_t cmd; death = container_of(w, struct binder_ref_death, work); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; else cmd = BR_DEAD_BINDER; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (put_user(death->cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "%d:%d %s %016llx\n", proc->pid, thread->pid, cmd == BR_DEAD_BINDER ? "BR_DEAD_BINDER" : "BR_CLEAR_DEATH_NOTIFICATION_DONE", (u64)death->cookie); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { list_del(&w->entry); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } else list_move(&w->entry, &proc->delivered_death); if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; } if (!t) continue; BUG_ON(t->buffer == NULL); if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; tr.target.ptr = target_node->ptr; tr.cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) binder_set_nice(t->priority); else if (!(t->flags & TF_ONE_WAY) || t->saved_priority > target_node->min_priority) binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; ①① } else { tr.target.ptr = 0; tr.cookie = 0; cmd = BR_REPLY; } tr.code = t->code; ①② tr.flags = t->flags; tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); if (t->from) { struct task_struct *sender = t->from->proc->tsk; tr.sender_pid = task_tgid_nr_ns(sender, task_active_pid_ns(current)); } else { tr.sender_pid = 0; } tr.data_size = t->buffer->data_size; tr.offsets_size = t->buffer->offsets_size; tr.data.ptr.buffer = (binder_uintptr_t)( (uintptr_t)t->buffer->data + proc->user_buffer_offset); tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (copy_to_user(ptr, &tr, sizeof(tr))) ①③ return -EFAULT; ptr += sizeof(tr); trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION, "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY", t->debug_id, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); list_del(&t->work.entry); t->buffer->allow_user_free = 1; if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { t->to_parent = thread->transaction_stack; t->to_thread = thread; thread->transaction_stack = t; } else { t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } break; } done: *consumed = ptr - buffer; ①④ if (proc->requested_threads + proc->ready_threads == 0 && proc->requested_threads_started < proc->max_threads && (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ /*spawn a new thread if we leave this out */) { proc->requested_threads++; binder_debug(BINDER_DEBUG_THREADS, "%d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid); if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) return -EFAULT; binder_stat_br(proc, thread, BR_SPAWN_LOOPER); } return 0; }
①: 先判斷readbuf結構傳進來的值是否爲0,爲0則返回一個BR_NOOP
到用戶空間,可是此時用戶空間線程是睡眠的;
②: 若是當前線程的todo鏈表爲空且傳送數據棧無數據時,則表示當前進程空閒;
③: 若是當前進程在writeBC_REGISTER_LOOPER or BC_ENTER_LOOPER
前就開始執行讀操做,則進入休眠;
④: 進入休眠,喚醒後會檢測binder_has_proc_work
,當前進程是否工做(判斷當前進程的todo鏈表是否爲空和thread的looper狀態),未工做則繼續休眠;
⑤: 進入休眠,喚醒後會檢測當前線程是否工做;
⑥: 能走到這說明已經喚醒了,進程的ready線程計數減一,且線程的looper等待狀態清除;
⑦: 先查看線程的todo鏈表中是否有要執行的工做;
⑧: 再檢查進程的todo鏈表中是否有須要執行的工做;
⑨: 經過binder_work節點找到struct binder_transaction
結構體地址;
⑩:若是工做類型是TRANSACTION_COMPLETE
則表示工做已經執行完了,能夠將此工做從線程或進程的todo鏈表中刪除;
①①: 回覆命令BR_TRANSACTION
或BR_REPLY
;
①②: 填充回覆的數據;
①③: 將tr
的數據拷貝到用戶空間,ptr
指針指向的是用戶空間的那個readbuf;
①④: 最後consumed
中保存了這次回覆數據的長度;
注意看第十三點:
tr.data.ptr.buffer = (binder_uintptr_t)( (uintptr_t)t->buffer->data + proc->user_buffer_offset);
拷貝個體用戶空間的僅僅是數據buf地址,由於使用了mmap,用戶空間能夠直接使用這塊內存,這裏也體現了拷貝一次的效率;
這裏你能夠發現read到的數據格式通常都是BR_NOOP+CMD+數據+CMD+數據....
;
ServiceManager在剛進入循環開始第一次讀操做時,沒有其餘線程就緒,此時只是返回一個BR_NOOP
就開始休眠了;
調用led_control_server.c
中:
svcmgr_publish(bs, svcmgr, LED_CONTROL_SERVER_NAME, led_control);
註冊爲一個服務者,這裏面主要調用了binder_call()
寫入了BC_TRANSACTION
命令,詳細的流程C服務應用
篇已經寫過了,如今就主要結合驅動分析;
這裏須要注意的是binder_call()
調用是同時讀寫binder驅動的,先看下寫操做再看讀操做;
case BC_TRANSACTION: case BC_REPLY: { struct binder_transaction_data tr; if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); binder_transaction(proc, thread, &tr, cmd == BC_REPLY); break; }
先將用戶空間write_buffer
數據拷貝到內核的struct binder_transaction_data tr
中, 再調用binder_transaction()
處理這些數據;
這個函數巨長....
static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply) { struct binder_transaction *t; struct binder_work *tcomplete; binder_size_t *offp, *off_end; struct binder_proc *target_proc; struct binder_thread *target_thread = NULL; struct binder_node *target_node = NULL; struct list_head *target_list; wait_queue_head_t *target_wait; struct binder_transaction *in_reply_to = NULL; struct binder_transaction_log_entry *e; uint32_t return_error; e = binder_transaction_log_add(&binder_transaction_log); e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); e->from_proc = proc->pid; e->from_thread = thread->pid; e->target_handle = tr->target.handle; e->data_size = tr->data_size; e->offsets_size = tr->offsets_size; if (reply) { ...... } else { if (tr->target.handle) { struct binder_ref *ref; ref = binder_get_ref(proc, tr->target.handle); ① if (ref == NULL) { binder_user_error("%d:%d got transaction to invalid handle\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_invalid_target_handle; } target_node = ref->node; } else { target_node = binder_context_mgr_node; ② if (target_node == NULL) { return_error = BR_DEAD_REPLY; goto err_no_context_mgr_node; } } e->to_node = target_node->debug_id; target_proc = target_node->proc; ③ if (target_proc == NULL) { return_error = BR_DEAD_REPLY; goto err_dead_binder; } if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) { return_error = BR_FAILED_REPLY; goto err_invalid_target_handle; } if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { ④ struct binder_transaction *tmp; tmp = thread->transaction_stack; if (tmp->to_thread != thread) { binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", proc->pid, thread->pid, tmp->debug_id, tmp->to_proc ? tmp->to_proc->pid : 0, tmp->to_thread ? tmp->to_thread->pid : 0); return_error = BR_FAILED_REPLY; goto err_bad_call_stack; } while (tmp) { if (tmp->from && tmp->from->proc == target_proc) ⑤ target_thread = tmp->from; tmp = tmp->from_parent; } } } if (target_thread) { e->to_thread = target_thread->pid; target_list = &target_thread->todo; target_wait = &target_thread->wait; } else { target_list = &target_proc->todo; target_wait = &target_proc->wait; } e->to_proc = target_proc->pid; /* TODO: reuse incoming transaction for reply */ t = kzalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) { return_error = BR_FAILED_REPLY; goto err_alloc_t_failed; } binder_stats_created(BINDER_STAT_TRANSACTION); tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); if (tcomplete == NULL) { return_error = BR_FAILED_REPLY; goto err_alloc_tcomplete_failed; } binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); t->debug_id = ++binder_last_id; e->debug_id = t->debug_id; if (reply) binder_debug(BINDER_DEBUG_TRANSACTION, "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n", proc->pid, thread->pid, t->debug_id, target_proc->pid, target_thread->pid, (u64)tr->data.ptr.buffer, (u64)tr->data.ptr.offsets, (u64)tr->data_size, (u64)tr->offsets_size); else binder_debug(BINDER_DEBUG_TRANSACTION, "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n", proc->pid, thread->pid, t->debug_id, target_proc->pid, target_node->debug_id, (u64)tr->data.ptr.buffer, (u64)tr->data.ptr.offsets, (u64)tr->data_size, (u64)tr->offsets_size); if (!reply && !(tr->flags & TF_ONE_WAY)) t->from = thread; else t->from = NULL; t->sender_euid = task_euid(proc->tsk); t->to_proc = target_proc; t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; t->priority = task_nice(current); trace_binder_transaction(reply, t, target_node); t->buffer = binder_alloc_buf(target_proc, tr->data_size, ⑥ tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); if (t->buffer == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_alloc_buf_failed; } t->buffer->allow_user_free = 0; t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; trace_binder_transaction_alloc_buf(t->buffer); if (target_node) binder_inc_node(target_node, 1, 0, NULL); offp = (binder_size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) tr->data.ptr.buffer, tr->data_size)) { binder_user_error("%d:%d got transaction with invalid data ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_copy_data_failed; } if (copy_from_user(offp, (const void __user *)(uintptr_t) tr->data.ptr.offsets, tr->offsets_size)) { binder_user_error("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_copy_data_failed; } if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", proc->pid, thread->pid, (u64)tr->offsets_size); return_error = BR_FAILED_REPLY; goto err_bad_offset; } off_end = (void *)offp + tr->offsets_size; for (; offp < off_end; offp++) { struct flat_binder_object *fp; if (*offp > t->buffer->data_size - sizeof(*fp) || t->buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(u32))) { binder_user_error("%d:%d got transaction with invalid offset, %lld\n", proc->pid, thread->pid, (u64)*offp); return_error = BR_FAILED_REPLY; goto err_bad_offset; } fp = (struct flat_binder_object *)(t->buffer->data + *offp); ⑦ switch (fp->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct binder_ref *ref; struct binder_node *node = binder_get_node(proc, fp->binder); ⑧ if (node == NULL) { node = binder_new_node(proc, fp->binder, fp->cookie); if (node == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_new_node_failed; } node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); } if (fp->cookie != node->cookie) { binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", proc->pid, thread->pid, (u64)fp->binder, node->debug_id, (u64)fp->cookie, (u64)node->cookie); return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } ref = binder_get_ref_for_node(target_proc, node); ⑨ if (ref == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } if (fp->type == BINDER_TYPE_BINDER) ⑩ fp->type = BINDER_TYPE_HANDLE; else fp->type = BINDER_TYPE_WEAK_HANDLE; fp->handle = ref->desc; ①① binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo); trace_binder_transaction_node_to_ref(t, node, ref); binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%016llx -> ref %d desc %d\n", node->debug_id, (u64)node->ptr, ref->debug_id, ref->desc); } break; ......... if (reply) { BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction(target_thread, in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); if (target_node->has_async_transaction) { target_list = &target_node->async_todo; target_wait = NULL; } else target_node->has_async_transaction = 1; } t->work.type = BINDER_WORK_TRANSACTION; list_add_tail(&t->work.entry, target_list); ①② tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; list_add_tail(&tcomplete->entry, &thread->todo); if (target_wait) wake_up_interruptible(target_wait); return; ..... }
①: 目標handle不爲0時說明是客戶端調用服務端的狀況;
②: 目標handle爲0,說明是請求ServiceManager服務,保存目標節點struct binder_node
;
③: 根據binder_node
獲取到binder_proc
;
④: 判斷這次調用是否須要reply;
⑤: 根據transaction_stack
找到目標線程(第一次傳輸不會進來);
⑥: 目標進程的在mmap空間分配一塊buf,接着調用copy_from_use
將用戶空間數據拷貝進剛分配的buf中,這樣目標進程能夠直接讀取數據;
⑦: 獲取struct flat_binder_object
的首地址,offp
保存的是object距數據頭的偏移值,詳細能夠看下C服務應用
的3.2節;
⑧: 爲新傳進來的binder實體構造一個binder_node
;
⑨: 查看目標進程的refs_by_node
紅黑樹上是否有ref指向該節點,若是沒有則爲該目標進程建立一個ref指向該node;
⑩: 原來的類型是binder實體,如今要傳給ServiceManager就須要改變爲handle引用類型;
①①: 把剛纔建立的ref中的des賦值給等下要串給應用層的數據, 接着給該節點增長一次引用標記且會將一些事務加入自身線程的todo鏈表;
①②: 將須要處理的事務加入目標進程或目標線程的todo鏈表,並喚醒它;
如今ServiceManager
進程的binder_proc
中的refs_by_node
紅黑樹上掛有一個新的binder_ref
指向了傳進來的binder實體;
且這個新掛上去的binder_ref
中desc
成員爲1(即傳給應用層的handle),由於這是第一個指向該節點的引用,之後會遞增;
在寫操做完後就開始讀操做了,由於剛開始進程和線程的todo鏈表中沒有須要處理的事務,再回復了BR_NOOP
後就開始睡眠了;
寫操做的時候有爲建立的binder實體的node增長引用並加入了todo鏈表,這時led_control_service
進程被喚醒;
開始處理BINDER_WORK_NODE
事務,命令爲BR_INCREFS
, BR_ACQUIRE
等;
此次read是在處理完BR_INCREFS
, BR_ACQUIRE
等命令之後,又一次讀數據,並進入睡眠;
PS: 這能夠先不看,繼續往下看ServiceManager被喚醒的流程;
好了,回到led_control_service
進程了,被ServiceManager
喚醒了;
這也沒作啥事,就是構造數據後將其傳回了用戶空間;
接着發送釋放buf的命令給內核空間,讓它釋放了內核mmap分配的數據;
該函數解析詳細看3.2.3,這裏說下ServiceManager進程被喚醒後作了哪些事情,
①: 先取出進程todo鏈表中須要處理的事務;
②: 再找處處理事務的struct binder_transaction
結構體地址,取出剛纔從發送進程拷貝進mmap分配的空間中數據進行處理;
③: 發送給ServiceManager的用戶空間;
接着用戶空間就會開始處理數據,數據格式以下:
![]()
用戶空間在在收到數據後解析到BR_TRANSACTION
命令後作了的流程分析見C服務應用
篇2.4節;
BC_ACQUIRE
和BC_REQUEST_DEATH_NOTIFICATION
);BC_FREE_BUFFER
和BC_REPLY
);switch (cmd) { case BC_INCREFS: case BC_ACQUIRE: case BC_RELEASE: case BC_DECREFS: { uint32_t target; struct binder_ref *ref; const char *debug_string; if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (target == 0 && binder_context_mgr_node && ① (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { ref = binder_get_ref_for_node(proc, binder_context_mgr_node); if (ref->desc != target) { binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", proc->pid, thread->pid, ref->desc); } } else ref = binder_get_ref(proc, target); if (ref == NULL) { binder_user_error("%d:%d refcount change on invalid ref %d\n", proc->pid, thread->pid, target); break; } switch (cmd) { case BC_INCREFS: debug_string = "IncRefs"; binder_inc_ref(ref, 0, NULL); break; case BC_ACQUIRE: ② debug_string = "Acquire"; binder_inc_ref(ref, 1, NULL); break; case BC_RELEASE: debug_string = "Release"; binder_dec_ref(ref, 1); break; case BC_DECREFS: default: debug_string = "DecRefs"; binder_dec_ref(ref, 0); break; } binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s ref %d desc %d s %d w %d for node %d\n", proc->pid, thread->pid, debug_string, ref->debug_id, ref->desc, ref->strong, ref->weak, ref->node->debug_id); break; }
①: 這個是根據傳進來的handle獲取binder_ref
;
②: 對剛纔獲取到ref增長強引用;
case BC_REQUEST_DEATH_NOTIFICATION: case BC_CLEAR_DEATH_NOTIFICATION: { uint32_t target; binder_uintptr_t cookie; struct binder_ref *ref; struct binder_ref_death *death; if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); ref = binder_get_ref(proc, target); ① if (ref == NULL) { break; } if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { if (ref->death) { break; } death = kzalloc(sizeof(*death), GFP_KERNEL); if (death == NULL) { thread->return_error = BR_ERROR; break; } binder_stats_created(BINDER_STAT_DEATH); INIT_LIST_HEAD(&death->work.entry); death->cookie = cookie; ref->death = death; ② if (ref->node->proc == NULL) { ref->death->work.type = BINDER_WORK_DEAD_BINDER; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { list_add_tail(&ref->death->work.entry, &thread->todo); } else { list_add_tail(&ref->death->work.entry, &proc->todo); wake_up_interruptible(&proc->wait); } } } else { if (ref->death == NULL) { break; } death = ref->death; if (death->cookie != cookie) { break; } ref->death = NULL; if (list_empty(&death->work.entry)) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { list_add_tail(&death->work.entry, &thread->todo); } else { list_add_tail(&death->work.entry, &proc->todo); wake_up_interruptible(&proc->wait); } } else { BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; } } } break;
①: 根據handle獲取ref;
②: 講死亡通知掛到ref的death節點上;
這樣操做後在這個ref指向的node節點的進程(這個場景爲led_control_service),在死亡時會反饋給ServiceManager;
case BC_FREE_BUFFER: { binder_uintptr_t data_ptr; struct binder_buffer *buffer; if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); buffer = binder_buffer_lookup(proc, data_ptr); ① if (buffer == NULL) { break; } if (!buffer->allow_user_free) { break; } if (buffer->transaction) { buffer->transaction->buffer = NULL; buffer->transaction = NULL; } if (buffer->async_transaction && buffer->target_node) { BUG_ON(!buffer->target_node->has_async_transaction); if (list_empty(&buffer->target_node->async_todo)) buffer->target_node->has_async_transaction = 0; else list_move_tail(buffer->target_node->async_todo.next, &thread->todo); } trace_binder_transaction_buffer_release(buffer); binder_transaction_buffer_release(proc, buffer, NULL); ② binder_free_buf(proc, buffer); break; }
①: 根據data.ptr.buffer
的地址找到前面爲拷貝led_control_service
寫入內核的數據而分配的mmap緩存區地址(詳見4.1.1.2);
②: 釋放那塊buf;
這裏須要注意data_ptr
雖然是用戶空間傳來的,可是這也是由內核空間拷貝給用戶空間的且該值在用戶空間未改變;
case BC_TRANSACTION: case BC_REPLY: { struct binder_transaction_data tr; if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); binder_transaction(proc, thread, &tr, cmd == BC_REPLY); break; }
這個流程在前面將BC_TRANSACTION
爲講解,這裏咱們單獨講解下;
static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply) { ....... if (reply) { in_reply_to = thread->transaction_stack; if (in_reply_to == NULL) { binder_user_error("%d:%d got reply transaction with no transaction stack\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_empty_call_stack; } binder_set_nice(in_reply_to->saved_priority); if (in_reply_to->to_thread != thread) { return_error = BR_FAILED_REPLY; in_reply_to = NULL; goto err_bad_call_stack; } thread->transaction_stack = in_reply_to->to_parent; ① target_thread = in_reply_to->from; if (target_thread == NULL) { return_error = BR_DEAD_REPLY; goto err_dead_binder; } if (target_thread->transaction_stack != in_reply_to) { return_error = BR_FAILED_REPLY; in_reply_to = NULL; target_thread = NULL; goto err_dead_binder; } target_proc = target_thread->proc; ② } else { ...... } ..... if (target_thread) { ③ e->to_thread = target_thread->pid; target_list = &target_thread->todo; target_wait = &target_thread->wait; } else { target_list = &target_proc->todo; target_wait = &target_proc->wait; } ......
①: 從線程的傳輸棧上找到目標線程(當前進程爲ServiceManager進程);
②: 經過目標線程查找到目標線程;
③: 這裏能夠看出reply是用線程的來完成的,由於是將要處理事情的是線程的todo鏈表;
接下來從用戶空間拷貝數據,而後在將事務掛到目標線程的todo鏈表,再喚醒目標線程;
這樣又回到了led_control_service
進程了,請看4.1.2;
case BINDER_SET_MAX_THREADS: if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { ret = -EINVAL; goto err; } break;
將上限值拷貝到proc的max_threads成員中保存;
從註冊服務開始提及:
client獲取服務和service註冊服務的流程幾乎同樣,流程中涉及到的驅動相關代碼,上一篇都有講解,這裏放一個具體流程;
具體流程以下圖:
調用服務的時候,是根據剛纔獲取的handle去調用和client獲取服務的流程相同,只是目標進程變成了led_control_service
,這裏再也不重複講解了,請參考上圖;
如下進程都是在內核態的描述;
在查看驅動源碼時,發現註冊服務時led_control_service
進程將用戶空間數據拷貝到內核後,再喚醒ServiceManager
進程後,ServiceManager
進程內核空間能夠直接使用;
還有一點,在ServiceManager
將內核空間數據拷貝到用戶空間時,僅僅只是把剛纔在led_control_service
進程分配的mmap空間的地址傳給了 ServiceManager
的用戶空間,而用戶空間能夠經過該地址直接訪問數據了;
以上爲mmap在binder的兩點用法,跨進程,內核;
PS: 篇幅太長了,binder的知識點不少,還有好多須要更新; 流程從上往下看哈,順序執行;