Binder驅動之最簡單的通訊

在Android系統中能夠所是無處不Binder,Binder傳輸在每時每刻都發生着。不少狀況下,一個進程中都不會只存在一個獨立的Binder傳輸,常常是併發多個Binder傳輸,並且會存在Binder嵌套。尤爲像system_server這種重要的進程Binder傳輸會更多。在系統發生問題時,若是追蹤到system_server,會發現大部分狀況都是在Binder傳輸中。但不管有多少Binder傳輸或多複雜的Binder嵌套,最終都是經過兩種Binder傳輸實現的:同步傳輸和異步傳輸。這裏試圖經過最簡單的傳輸來解釋Binder通訊流程。node

Binder同步傳輸

Binder傳輸中最多見的就是同步傳輸。同步傳輸中,IPC通訊的發起端須要等到對端處理完消息才能繼續。一個完整的同步傳輸以下圖所示。
圖片描述
跳過Binder設備初始化的過程,直接看傳輸過程。客戶端經過ioctl的BINDER_WRITE_READ發送BC_TRANSACTION命令到Binder驅動。android

drivers/staging/android/binder.c

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    ......
    switch (cmd) {
    case BINDER_WRITE_READ: {
        struct binder_write_read bwr;
        ......
        // 須要寫數據
        if (bwr.write_size > 0) {
            ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
            trace_binder_write_done(ret);
            if (ret < 0) {
                bwr.read_consumed = 0;
                if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                    ret = -EFAULT;
                goto err;
            }
        }
        // 須要讀數據
        if (bwr.read_size > 0) {
            ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
            trace_binder_read_done(ret);
            if (!list_empty(&proc->todo))
                wake_up_interruptible(&proc->wait);
            if (ret < 0) {
                if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                    ret = -EFAULT;
                goto err;
            }
        }
        ......
        break;
    }
    ......

1.BC_TRANSACTION

發起Binder傳輸時,須要寫入BC_TRANSACTION命令,而後等待命令返回。cookie

drivers/staging/android/binder.c

static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,
            binder_uintptr_t binder_buffer, size_t size,
            binder_size_t *consumed)
{
    ......
        case BC_TRANSACTION:
        case BC_REPLY: {
            struct binder_transaction_data tr;

            if (copy_from_user(&tr, ptr, sizeof(tr)))
                return -EFAULT;
            ptr += sizeof(tr);
            binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
            break;
        }
    ......
}

BC_TRANSACTION和BC_REPLY都會調用binder_transaction(),區別在因而否設置reply。binder_transaction()也是寫數據的核心函數。函數很長,邏輯不少,儘可能分析一下。併發

drivers/staging/android/binder.c

static void binder_transaction(struct binder_proc *proc,
                   struct binder_thread *thread,
                   struct binder_transaction_data *tr, int reply)
{
    ......
    if (reply) {
    ......
    } else {
        if (tr->target.handle) {
            // 根據handle找到相應的binder實體
            struct binder_ref *ref;
            ref = binder_get_ref(proc, tr->target.handle, true);
            ......
            target_node = ref->node;
        } else {
            // handle爲0時爲service manager的binder實體
            target_node = binder_context_mgr_node;
            ......
        }
        e->to_node = target_node->debug_id;
        // binder實體的binder_proc
        target_proc = target_node->proc;
        ......
        if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
            struct binder_transaction *tmp;
            tmp = thread->transaction_stack;
            ......
            // 若是是同步傳輸,尋找是否傳輸棧中是否有來自對端的傳輸,若是有就使用對端線程處理傳輸
            while (tmp) {
                if (tmp->from && tmp->from->proc == target_proc)
                    target_thread = tmp->from;
                tmp = tmp->from_parent;
            }
        }
    }
    // 找到對端線程這使用線程todo list,不然使用進程todo list
    if (target_thread) {
        e->to_thread = target_thread->pid;
        target_list = &target_thread->todo;
        target_wait = &target_thread->wait;
    } else {
        target_list = &target_proc->todo;
        target_wait = &target_proc->wait;
    }
    e->to_proc = target_proc->pid;

    // 分配binder transaction
    t = kzalloc(sizeof(*t), GFP_KERNEL);
    ......
    // 分配binder_work用於處理傳輸完成
    tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
    ......
    // 同步的非reply傳輸,設置當前線程爲from
    if (!reply && !(tr->flags & TF_ONE_WAY))
        t->from = thread;
    else
        t->from = NULL;
    t->sender_euid = proc->tsk->cred->euid;
    // 設置傳輸的目標進程和線程
    t->to_proc = target_proc;
    t->to_thread = target_thread;
    t->code = tr->code;
    t->flags = tr->flags;
    t->priority = task_nice(current);

    
    // 從目標進程中分配傳輸空間
    t->buffer = binder_alloc_buf(target_proc, tr->data_size,
        tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
    ......
    t->buffer->allow_user_free = 0;
    t->buffer->debug_id = t->debug_id;
    t->buffer->transaction = t;
    t->buffer->target_node = target_node;
    // 增長binder實體的引用計數
    if (target_node)
        binder_inc_node(target_node, 1, 0, NULL);

    offp = (binder_size_t *)(t->buffer->data +
                 ALIGN(tr->data_size, sizeof(void *)));
    // 拷貝用戶數據到binder實體的傳輸空間中
    if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
               tr->data.ptr.buffer, tr->data_size)) {
        ......
    }
    // 拷貝用戶數據的flat_binder_object對象信息
    if (copy_from_user(offp, (const void __user *)(uintptr_t)
               tr->data.ptr.offsets, tr->offsets_size)) {
        ......
    }
    ......
    off_end = (void *)offp + tr->offsets_size;
    off_min = 0;
    // 處理flat_binder_object對象信息
    for (; offp < off_end; offp++) {
        struct flat_binder_object *fp;
        ......
        fp = (struct flat_binder_object *)(t->buffer->data + *offp);
        off_min = *offp + sizeof(struct flat_binder_object);
        switch (fp->type) {
        // 類型爲binder實體,用於server註冊
        case BINDER_TYPE_BINDER:
        case BINDER_TYPE_WEAK_BINDER: {
            struct binder_ref *ref;
            // 若是找不到binder實體就建立一個
            struct binder_node *node = binder_get_node(proc, fp->binder);
            if (node == NULL) {
                node = binder_new_node(proc, fp->binder, fp->cookie);
                ......
            }
            ......
            // 在目標進程中建立引用
            ref = binder_get_ref_for_node(target_proc, node);
            ......
            // 修改binder對象的類型爲handle
            if (fp->type == BINDER_TYPE_BINDER)
                fp->type = BINDER_TYPE_HANDLE;
            else
                fp->type = BINDER_TYPE_WEAK_HANDLE;
            fp->binder = 0;
            // 將引用的handle賦值給對象
            fp->handle = ref->desc;
            fp->cookie = 0;
            // 增長引用計數
            binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
                       &thread->todo);
            ......
        } break;
        // 類型爲binder引用,client向server傳輸
        case BINDER_TYPE_HANDLE:
        case BINDER_TYPE_WEAK_HANDLE: {
            // 獲取當前進程中的binder引用
            struct binder_ref *ref = binder_get_ref(
                    proc, fp->handle,
                    fp->type == BINDER_TYPE_HANDLE);
            ......
            if (ref->node->proc == target_proc) {
                // 若是binder傳輸發生在同一進程中則直接使用binder實體
                if (fp->type == BINDER_TYPE_HANDLE)
                    fp->type = BINDER_TYPE_BINDER;
                else
                    fp->type = BINDER_TYPE_WEAK_BINDER;
                fp->binder = ref->node->ptr;
                fp->cookie = ref->node->cookie;
                binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
                ......
            } else {
                struct binder_ref *new_ref;
                // 在目標進程中建立binder引用
                new_ref = binder_get_ref_for_node(target_proc, ref->node);
                ......
                fp->binder = 0;
                fp->handle = new_ref->desc;
                fp->cookie = 0;
                binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
                ......
            }
        } break;
        // 類型爲文件描述符,用於共享文件或內存
        case BINDER_TYPE_FD: {
            ......
        } break;
        ......
        }
    }
    if (reply) {
        ......
    } else if (!(t->flags & TF_ONE_WAY)) {
        // 當前線程的傳輸入棧
        t->need_reply = 1;
        t->from_parent = thread->transaction_stack;
        thread->transaction_stack = t;
    } else {
        // 異步傳輸使用aync todo list
        if (target_node->has_async_transaction) {
            target_list = &target_node->async_todo;
            target_wait = NULL;
        } else
            target_node->has_async_transaction = 1;
    }
    // 將傳輸添加到目標隊列中
    t->work.type = BINDER_WORK_TRANSACTION;
    list_add_tail(&t->work.entry, target_list);
    // 將傳輸完成添加到當前線程todo隊列中
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
    list_add_tail(&tcomplete->entry, &thread->todo);
    // 喚醒目標線程或進程
    if (target_wait)
        wake_up_interruptible(target_wait);
    return;
    ......
}

BC_TRANSACTION簡單來講流程以下,異步

  • 找到目標進程或線程。
  • 將用戶空間的數據拷貝到目前進程空間,並解析flat_binder_object。
  • 將傳輸入棧到當前線程中。
  • 將BINDER_WORK_TRANSACTION加入到目標隊列,將BINDER_WORK_TRANSACTION_COMPLETE加入到當前線程隊列。
  • 喚醒目標進程或線程進行處理。

2.BR_TRANSACTION_COMPLETE

Client在執行BINDER_WRITE_READ時,先經過binder_thread_write()寫數據,將BINDER_WORK_TRANSACTION_COMPLETE放入工做隊列。緊接着就執行binder_thread_read()讀取返回數據。這裏會將命令BR_TRANSACTION_COMPLETE返回給Client線程。async

drivers/staging/android/binder.c

static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)
{
    ......
    // 第一次讀時,插入命令BR_NOOP返回給用戶
    if (*consumed == 0) {
        if (put_user(BR_NOOP, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
    }

retry:
    // 當前線程沒有傳輸而且todo隊列爲空時,處理進程的工做隊列
    wait_for_proc_work = thread->transaction_stack == NULL &&
                list_empty(&thread->todo);
    ......
    thread->looper |= BINDER_LOOPER_STATE_WAITING;
    // 若是處理進程工做隊列,則當前線程爲空閒線程
    if (wait_for_proc_work)
        proc->ready_threads++;
    ......
    // 等待進程或線程工做隊列被喚醒
    if (wait_for_proc_work) {
        ......
            ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
    } else {
        ......
            ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
    }
    ......
    // 喚醒後,開始處理傳輸,空閒線程減1
    if (wait_for_proc_work)
        proc->ready_threads--;
    thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
    ......
    while (1) {
        ......
       // 優先處理線程工做隊列,再處理進程工做隊列
        if (!list_empty(&thread->todo))
            w = list_first_entry(&thread->todo, struct binder_work, entry);
        else if (!list_empty(&proc->todo) && wait_for_proc_work)
            w = list_first_entry(&proc->todo, struct binder_work, entry);
        else {
            if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
                goto retry;
            break;
        }
        ......
        switch (w->type) {
        ......
        case BINDER_WORK_TRANSACTION_COMPLETE: {
            // 發送命令BR_TRANSACTION_COMPLETE給用戶
            cmd = BR_TRANSACTION_COMPLETE;
            if (put_user(cmd, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
            ......
            list_del(&w->entry);
            kfree(w);
            binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
        } break;
        ......
        if (!t)
            continue;
        ......
    }

3.BR_TRANSACTION

Server端線程啓動後就調用talkWithDriver()等待讀取數據。Binder驅動處理Client發送的BC_TRANSACTION命令後,會喚醒Server線程。Server線程讀取數據進行處理一樣是在binder_thread_read()中完成的。函數

drivers/staging/android/binder.c

static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)
{
    ......
    while (1) {
        switch (w->type) {
        // binder_transaction()將工做BINDER_WORK_TRANSACTION加入隊列後喚醒目標進程
        case BINDER_WORK_TRANSACTION: {
            t = container_of(w, struct binder_transaction, work);
        } break;
        ......
        // 只有BINDER_WORK_TRANSACTION取出傳輸事件,因此能夠繼續執行
        if (!t)
            continue;

        BUG_ON(t->buffer == NUL);
        // target_node存在時代表是BC_TRANSACTION產生的工做事件,須要回覆BR_TRANSACTION。
        // 不然是BC_REPLY產生的工做事件,回覆BR_REPLY
        if (t->buffer->target_node) {
            struct binder_node *target_node = t->buffer->target_node;
            tr.target.ptr = target_node->ptr;
            tr.cookie =  target_node->cookie;
            t->saved_priority = task_nice(current);
            if (t->priority < target_node->min_priority &&
                !(t->flags & TF_ONE_WAY))
                binder_set_nice(t->priority);
            else if (!(t->flags & TF_ONE_WAY) ||
                 t->saved_priority > target_node->min_priority)
                binder_set_nice(target_node->min_priority);
            cmd = BR_TRANSACTION;
        } else {
            tr.target.ptr = 0;
            tr.cookie = 0;
            cmd = BR_REPLY;
        }
        tr.code = t->code;
        tr.flags = t->flags;
        tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);

        // 同步傳輸時,sender_pid爲調用進程的pid。異步傳輸時爲0。
        if (t->from) {
            struct task_struct *sender = t->from->proc->tsk;
            tr.sender_pid = task_tgid_nr_ns(sender,
                            task_active_pid_ns(current));
        } else {
            tr.sender_pid = 0;
        }
        ......
        // 將數據拷貝到用戶空間
        if (put_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        if (copy_to_user(ptr, &tr, sizeof(tr)))
            return -EFAULT;
        ptr += sizeof(tr);
        ......
        // 從隊列中移除當前工做事件
        list_del(&t->work.entry);
        t->buffer->allow_user_free = 1;
        if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
            // 同步傳輸時,命令爲BR_TRANSACTION的狀況下,將工做事件入棧
            t->to_parent = thread->transaction_stack;
            t->to_thread = thread;
            thread->transaction_stack = t;
        } else {
            // 其餘狀況下,代表傳輸已經完成,釋放工做事件
            t->buffer->transaction = NULL;
            kfree(t);
            binder_stats_deleted(BINDER_STAT_TRANSACTION);
        }
        break;
    }
    ......
}

BR_REPLY也是一樣的流程,區別在於發送BR_REPLY意味着傳輸已經完成,能夠釋放工做事件。oop

4.BC_REPLY

Server端接收到BR_TRANSACTION命令後,取出buffer進行處理,完成後會發送BC_REPLY給Binder驅動。ui

frameworks/native/libs/binder/IPCThreadState.cpp

status_t IPCThreadState::executeCommand(int32_t cmd)
{
    ......
    case BR_TRANSACTION:
        {
            // 取出傳輸數據
            binder_transaction_data tr;
            result = mIn.read(&tr, sizeof(tr));
            ......
            Parcel reply;
            ......
            // BBinder對數據進行解析
            if (tr.target.ptr) {
                sp<BBinder> b((BBinder*)tr.cookie);
                error = b->transact(tr.code, buffer, &reply, tr.flags);

            } else {
                error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
            }

            if ((tr.flags & TF_ONE_WAY) == 0) {
                LOG_ONEWAY("Sending reply to %d!", mCallingPid);
                if (error < NO_ERROR) reply.setError(error);
                // 同步傳輸須要發送BC_REPLY
                sendReply(reply, 0);
            } else {
                LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
            }
            ......
        }
        break;
    ......
}

BC_REPLY也是經過binder_transaction()處理,只是須要設置參數reply。下面只分析與以前不一樣的地方。this

drivers/staging/android/binder.c

static void binder_transaction(struct binder_proc *proc,
                   struct binder_thread *thread,
                   struct binder_transaction_data *tr, int reply)
{
    ......
    if (reply) {
    // 從當前線程中出棧
        in_reply_to = thread->transaction_stack;
        ......
        thread->transaction_stack = in_reply_to->to_parent;
        // 目標線程爲發起端線程
        target_thread = in_reply_to->from;
        ......
        target_proc = target_thread->proc;
    } else {
        ......
    }
    if (target_thread) {
        e->to_thread = target_thread->pid;
        target_list = &target_thread->todo;
        target_wait = &target_thread->wait;
    } else {
        ......
    }
    ......
    // reply傳輸的from爲空
    if (!reply && !(tr->flags & TF_ONE_WAY))
        t->from = thread;
    else
        t->from = NULL;
    ......
    if (reply) {
        // 從目標線程中出棧
        binder_pop_transaction(target_thread, in_reply_to);
    } else if (!(t->flags & TF_ONE_WAY)) {
        ......
    } else {
        ......
    }
    t->work.type = BINDER_WORK_TRANSACTION;
    list_add_tail(&t->work.entry, target_list);
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
    list_add_tail(&tcomplete->entry, &thread->todo);
    if (target_wait)
        wake_up_interruptible(target_wait);
    return;
    ......
}

binder_transaction()執行完BC_REPLY後一樣是加入工做隊列,喚醒target。BINDER_WORK_TRANSACTION_COMPLETE會將BR_TRANSACTION_COMPLETE返回給當前線程,也就是Server端。BINDER_WORK_TRANSACTION由target處理,這時的target爲Client端。根據上面分析,驅動將返回BR_REPLY給Client端。

5.BC_FREE_BUFFER

Binder的每一次傳輸,不管是從Client到Sever仍是Server到Client,在對端接收到數據並處理完成後,都會經過BC_FREE_BUFFER來釋放傳輸空間。在同步傳輸中會包含兩次傳輸,由Client發出的BC_TRANSACTION和由Server發出的BC_REPLY。
在BC_TRANSACTION中,Server端接收到BR_TRANSACTION命令開始處理Binder數據,處理完成後就會發出BC_FREE_BUFFER來釋放buffer。這個釋放命令不是直接發出的,是經過Parcel的釋放函數完成的。將freeBuffer設置爲Parcel實例buffer的釋放函數,在buffer析構時會調用釋放函數freeBuffer。

frameworks/native/libs/binder/IPCThreadState.cpp

status_t IPCThreadState::executeCommand(int32_t cmd)
{
......
    case BR_TRANSACTION:
        {
            binder_transaction_data tr;
            result = mIn.read(&tr, sizeof(tr));
            ......
            Parcel buffer;
            // 設置buffer的釋放函數爲freeBuffer
            buffer.ipcSetDataReference(
                reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                tr.data_size,
                reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
            ......
                sp<BBinder> b((BBinder*)tr.cookie);
                error = b->transact(tr.code, buffer, &reply, tr.flags);
            ......
        }
        break;
    ......
}
......
void IPCThreadState::freeBuffer(Parcel* parcel, const uint8_t* data,                                                                                                                                        
                                size_t /*dataSize*/,
                                const binder_size_t* /*objects*/,
                                size_t /*objectsSize*/, void* /*cookie*/)
{
    ......
    if (parcel != NULL) parcel->closeFileDescriptors();
    // 發送BC_FREE_BUFFER命令
    IPCThreadState* state = self();
    state->mOut.writeInt32(BC_FREE_BUFFER);
    state->mOut.writePointer((uintptr_t)data);
}

在BC_REPLY中,Client端接收到BR_REPLY會將freeBuffer設置爲釋放函數或直接調用freeBuffer。

frameworks/native/libs/binder/IPCThreadState.cpp

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    int32_t cmd;
    int32_t err;

    while (1) {
    ......
        case BR_REPLY:
            {
                binder_transaction_data tr;
                err = mIn.read(&tr, sizeof(tr));
                ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
                if (err != NO_ERROR) goto finish;

                if (reply) {
                    if ((tr.flags & TF_STATUS_CODE) == 0) {
                        // 設置freeBuffer爲釋放函數
                        reply->ipcSetDataReference(
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(binder_size_t),
                            freeBuffer, this);
                    } else {
                        // 發生錯誤時直接調用freeBuffer
                        err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
                        freeBuffer(NULL,
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(binder_size_t), this);
                    }
                } else {
                    freeBuffer(NULL,
                        reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                        tr.data_size,
                        reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                        tr.offsets_size/sizeof(binder_size_t), this);
                    continue;
                }
            }
            goto finish;
        ......
    }
    ......

FreeBuffer()發送BC_FREE_BUFFER命令給Binder驅動。

drivers/staging/android/binder.c 

static void binder_free_buf(struct binder_proc *proc,
                struct binder_buffer *buffer)
{
    size_t size, buffer_size;

    // 獲取buffer的大小
    buffer_size = binder_buffer_size(proc, buffer);

    size = ALIGN(buffer->data_size, sizeof(void *)) +
        ALIGN(buffer->offsets_size, sizeof(void *));
    ......
    // 更新異步傳輸的free_async_space
    if (buffer->async_transaction) {
        proc->free_async_space += size + sizeof(struct binder_buffer);
        ......
    }

    // 釋放物理內存
    binder_update_page_range(proc, 0,
        (void *)PAGE_ALIGN((uintptr_t)buffer->data),
        (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
        NULL);
    // 將buffer從allocated_buffers樹上擦除
    rb_erase(&buffer->rb_node, &proc->allocated_buffers);
    buffer->free = 1;
    // 向後合併空閒buffer
    if (!list_is_last(&buffer->entry, &proc->buffers)) {
        struct binder_buffer *next = list_entry(buffer->entry.next,
                        struct binder_buffer, entry);
        if (next->free) {
            rb_erase(&next->rb_node, &proc->free_buffers);
            binder_delete_free_buffer(proc, next);
        }
    }
    // 向前合併空閒buffer
    if (proc->buffers.next != &buffer->entry) {
        struct binder_buffer *prev = list_entry(buffer->entry.prev,
                        struct binder_buffer, entry);
        if (prev->free) {
            binder_delete_free_buffer(proc, buffer);
            rb_erase(&prev->rb_node, &proc->free_buffers);
            buffer = prev;
        }
    }
    // 將合併後的buffer插入到free_buffers上
    binder_insert_free_buffer(proc, buffer);
}
......
static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,
            binder_uintptr_t binder_buffer, size_t size,
            binder_size_t *consumed)
{
    ......
        case BC_FREE_BUFFER: {
            binder_uintptr_t data_ptr;
            struct binder_buffer *buffer;

            // 獲取用戶空間數據
            if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(binder_uintptr_t);

            // 從buffer樹中找到相應的binder_buffer
            buffer = binder_buffer_lookup(proc, data_ptr);
            ......
            if (buffer->transaction) {
                buffer->transaction->buffer = NULL;
                buffer->transaction = NULL;
            }
            // 異步傳輸在釋放buffer時將未完成的async_todo工做移動到線程的todo隊列上
            if (buffer->async_transaction && buffer->target_node) {
                BUG_ON(!buffer->target_node->has_async_transaction);
                if (list_empty(&buffer->target_node->async_todo))
                    buffer->target_node->has_async_transaction = 0;
                else
                    list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
            }
            trace_binder_transaction_buffer_release(buffer);
            // 減小binder引用計數
            binder_transaction_buffer_release(proc, buffer, NULL);
            // 釋放buffer內存空間
            binder_free_buf(proc, buffer);
            break;
        }
    ......
}

Binder異步傳輸

Binder通訊中,若是Client端只但願發送數據而無論Server端的執行結果,可使用異步傳輸。異步傳輸須要在傳輸數據的flag中設置TF_ONE_WAY位,簡單的傳輸流程以下圖。
圖片描述
異步傳輸在Binder驅動中的處理流程與同步傳輸同樣,咱們重點看一下對TF_ONE_WAY標誌的處理流程。

drivers/staging/android/binder.c

static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
                          size_t data_size,
                          size_t offsets_size, int is_async)
{
    ......
    // 異步傳輸須要考慮free_async_space
    if (is_async &&
        proc->free_async_space < size + sizeof(struct binder_buffer)) {
        binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
                 "%d: binder_alloc_buf size %zd failed, no async space left\n",
                  proc->pid, size);
        return NULL;
    }
    ......
    buffer->data_size = data_size;
    buffer->offsets_size = offsets_size;
    // buffer中設置is_async標誌
    buffer->async_transaction = is_async;
    if (is_async) {
        // 更新free_async_space
        proc->free_async_space -= size + sizeof(struct binder_buffer);
        binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
                 "%d: binder_alloc_buf size %zd async free %zd\n",
                  proc->pid, size, proc->free_async_space);
    }

    return buffer;
}
......
static void binder_transaction(struct binder_proc *proc,
                   struct binder_thread *thread,
                   struct binder_transaction_data *tr, int reply)
{
    ......
    // 異步傳輸時須要將傳輸事件的from設置爲空
    if (!reply && !(tr->flags & TF_ONE_WAY))
        t->from = thread;
    else
        t->from = NULL;
    ......
    // 分配buffer時帶有異步標誌位
    t->buffer = binder_alloc_buf(target_proc, tr->data_size,
        tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
    ......
    if (reply) {
        ......
    } else if (!(t->flags & TF_ONE_WAY)) {
        ......
    } else {
        // 異步傳輸使用async_todo隊列
        if (target_node->has_async_transaction) {
            target_list = &target_node->async_todo;
            target_wait = NULL;
        } else
            target_node->has_async_transaction = 1;
    }
    t->work.type = BINDER_WORK_TRANSACTION;
    list_add_tail(&t->work.entry, target_list);
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
    list_add_tail(&tcomplete->entry, &thread->todo);
    if (target_wait)
        wake_up_interruptible(target_wait);
    return;
    ......
}
......
static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)
{
    ......
        if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
            ......
        } else {
            // 異步傳輸是單向的,不須要回復。
            t->buffer->transaction = NULL;
            kfree(t);
            binder_stats_deleted(BINDER_STAT_TRANSACTION);
        }
        break;
    ......
}
相關文章
相關標籤/搜索