android servicemanager與binder源碼分析一 ------ native層的ServiceManager

前一陣子在忙項目,沒什麼更新,此次開始寫點android源碼內部的東西分析下。以6.0.1_r10版本android源碼爲例。
servicemanager是android服務管理,很是基礎的組件之一,分析他的目的是可以深刻看到binder的一些處理方式。在開始前先說下閱讀源碼或者很是複雜代碼的方式,個人方式是層級進入,一層掌握脈絡以後若是感興趣再對具體的點深刻分析瞭解,而且每層進行總結,這樣我認爲會比較好理解,也不容易產生一個點一直走下去,最後迷失在複雜繁瑣的代碼裏的狀況。固然我只表明我我的的體驗。東西是寫給本身的,若是能幫到他人我會很是高興。linux

而後這裏推薦下羅昇陽先生的博客文章,確實很是不錯,能夠做爲閱讀參考。android

servicemanager源碼位於/frameworks/native/cmds/servicemanager/service_manager.c下:api

347int main(int argc, char **argv)
348{
349    struct binder_state *bs;
350
351    bs = binder_open(128*1024);
352    if (!bs) {
353        ALOGE("failed to open binder driver\n");
354        return -1;
355    }
356
357    if (binder_become_context_manager(bs)) {
358        ALOGE("cannot become context manager (%s)\n", strerror(errno));
359        return -1;
360    }
361
362    selinux_enabled = is_selinux_enabled();
363    sehandle = selinux_android_service_context_handle();
364    selinux_status_open(true);
365
366    if (selinux_enabled > 0) {
367        if (sehandle == NULL) {
368            ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
369            abort();
370        }
371
372        if (getcon(&service_manager_context) != 0) {
373            ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
374            abort();
375        }
376    }
377
378    union selinux_callback cb;
379    cb.func_audit = audit_callback;
380    selinux_set_callback(SELINUX_CB_AUDIT, cb);
381    cb.func_log = selinux_log_callback;
382    selinux_set_callback(SELINUX_CB_LOG, cb);
383
384    binder_loop(bs, svcmgr_handler);
385
386    return 0;
387}

1.binder_open打開binder驅動設備;
2.binder_become_context_manager(bs),將本身做爲binder的管理者;
3.binder_loop(bs, svcmgr_handler),進入循環,做爲server等待client的請求;cookie

binder_open

位於/frameworks/native/cmds/servicemanager/binder.c:app

96struct binder_state *binder_open(size_t mapsize)
97{
98    struct binder_state *bs;
99    struct binder_version vers;
100
101    bs = malloc(sizeof(*bs));
102    if (!bs) {
103        errno = ENOMEM;
104        return NULL;
105    }
106
107    bs->fd = open("/dev/binder", O_RDWR);
108    if (bs->fd < 0) {
109        fprintf(stderr,"binder: cannot open device (%s)\n",
110                strerror(errno));
111        goto fail_open;
112    }
113
114    if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
115        (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
116        fprintf(stderr,
117                "binder: kernel driver version (%d) differs from user space version (%d)\n",
118                vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
119        goto fail_open;
120    }
121
122    bs->mapsize = mapsize;
123    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
124    if (bs->mapped == MAP_FAILED) {
125        fprintf(stderr,"binder: cannot map device (%s)\n",
126                strerror(errno));
127        goto fail_map;
128    }
129
130    return bs;
131
132fail_map:
133    close(bs->fd);
134fail_open:
135    free(bs);
136    return NULL;
137}

首先,創建一個結構體binder_state,而後剩下的就是給這個結構體的成員賦值。bs->fd給打開的驅動設備文件描述符;bs->mapped給內存映射地址;
插一句,這裏對goto的應用很規範,可見任何語句並不是有好與很差,而在於怎麼用。
看到這裏其實能夠猜想,binder的機制就是內存映射,或者能夠說是文件映射,由於在linux上任何的設備均可以看作是文件。
如今不要深刻,往回看,以前的service_manager.c的main函數裏,後面就要走binder_become_context_manager這個將本身設爲binder管理者。函數

146int binder_become_context_manager(struct binder_state *bs)
147{
148    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
149}

這裏就作了一件事兒,就是下發控制字,告訴驅動設置context管理者爲0,這裏也能夠猜想,這個0表明必定含義,應該就是servicemanager本身,後面再繼續解釋這個問題。oop

binder_looper

372void binder_loop(struct binder_state *bs, binder_handler func)
373{
374    int res;
375    struct binder_write_read bwr;
376    uint32_t readbuf[32];
377
378    bwr.write_size = 0;
379    bwr.write_consumed = 0;
380    bwr.write_buffer = 0;
381
382    readbuf[0] = BC_ENTER_LOOPER;
383    binder_write(bs, readbuf, sizeof(uint32_t));
384
385    for (;;) {
386        bwr.read_size = sizeof(readbuf);
387        bwr.read_consumed = 0;
388        bwr.read_buffer = (uintptr_t) readbuf;
389
390        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
391
392        if (res < 0) {
393            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
394            break;
395        }
396
397        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
398        if (res == 0) {
399            ALOGE("binder_loop: unexpected reply?!\n");
400            break;
401        }
402        if (res < 0) {
403            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
404            break;
405        }
406    }

1.先經過binder_write下發了一個BC_ENTER_LOOPER控制字,表示要驅動設備進入looper狀態(binder_write內部也是走的ioctrl BINDER_WRITE_READ寫入驅動設備);
2.進入死循環,不停從設備讀取數據,成功讀取到以後,進入binder_parse函數;
3.binder_parse,從字面看是解析binder,可是具體作什麼不清楚,只能猜想是對剛纔讀取到的內容進行處理。
同屬於binder.c這一層,所以咱們看看binder_parse具體內容:ui

204int binder_parse(struct binder_state *bs, struct binder_io *bio,
205                 uintptr_t ptr, size_t size, binder_handler func)
206{
207    int r = 1;
208    uintptr_t end = ptr + (uintptr_t) size;
209
210    while (ptr < end) {
211        uint32_t cmd = *(uint32_t *) ptr;
212        ptr += sizeof(uint32_t);
213#if TRACE
214        fprintf(stderr,"%s:\n", cmd_name(cmd));
215#endif
216        switch(cmd) {
217        case BR_NOOP:
218            break;
219        case BR_TRANSACTION_COMPLETE:
220            break;
221        case BR_INCREFS:
222        case BR_ACQUIRE:
223        case BR_RELEASE:
224        case BR_DECREFS:
225#if TRACE
226            fprintf(stderr,"  %p, %p\n", (void *)ptr, (void *)(ptr + sizeof(void *)));
227#endif
228            ptr += sizeof(struct binder_ptr_cookie);
229            break;
230        case BR_TRANSACTION: {
231            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
232            if ((end - ptr) < sizeof(*txn)) {
233                ALOGE("parse: txn too small!\n");
234                return -1;
235            }
236            binder_dump_txn(txn);
237            if (func) {
238                unsigned rdata[256/4];
239                struct binder_io msg;
240                struct binder_io reply;
241                int res;
242
243                bio_init(&reply, rdata, sizeof(rdata), 4);
244                bio_init_from_txn(&msg, txn);
245                res = func(bs, txn, &msg, &reply);
246                binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
247            }
248            ptr += sizeof(*txn);
249            break;
250        }
251        case BR_REPLY: {
252            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
253            if ((end - ptr) < sizeof(*txn)) {
254                ALOGE("parse: reply too small!\n");
255                return -1;
256            }
257            binder_dump_txn(txn);
258            if (bio) {
259                bio_init_from_txn(bio, txn);
260                bio = 0;
261            } else {
262                /* todo FREE BUFFER */
263            }
264            ptr += sizeof(*txn);
265            r = 0;
266            break;
267        }
268        case BR_DEAD_BINDER: {
269            struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;
270            ptr += sizeof(binder_uintptr_t);
271            death->func(bs, death->ptr);
272            break;
273        }
274        case BR_FAILED_REPLY:
275            r = -1;
276            break;
277        case BR_DEAD_REPLY:
278            r = -1;
279            break;
280        default:
281            ALOGE("parse: OOPS %d\n", cmd);
282            return -1;
283        }
284    }
285
286    return r;
287}

剛纔從驅動設備讀取的buffer的前32位取出來做爲cmd進行switch判斷處理。BR_表明從設備驅動反饋的命令,BR_TRANSACTION字面看是交易,那麼能夠猜想是對接受到的發送方(client)的內容進行處理。往下看,BR_TRANSACTION流程裏,先把收到的數據轉成binder_transaction_data結構,而後走了binder_dump_txn,這裏基本上就是輸出一些信息,不太關注。以後是關鍵的部分,調用了func,這個東西是個binder_handler,其實看看定義就知道,是個回調函數,回到servicemanager裏面的main,能夠看到是個svcmgr_handler,具體內容也在servicemanager裏面,以下:spa

244int svcmgr_handler(struct binder_state *bs,
245                   struct binder_transaction_data *txn,
246                   struct binder_io *msg,
247                   struct binder_io *reply)
248{
249    struct svcinfo *si;
250    uint16_t *s;
251    size_t len;
252    uint32_t handle;
253    uint32_t strict_policy;
254    int allow_isolated;
255
256    //ALOGI("target=%p code=%d pid=%d uid=%d\n",
257    //      (void*) txn->target.ptr, txn->code, txn->sender_pid, txn->sender_euid);
258
259    if (txn->target.ptr != BINDER_SERVICE_MANAGER)
260        return -1;
261
262    if (txn->code == PING_TRANSACTION)
263        return 0;
264
265    // Equivalent to Parcel::enforceInterface(), reading the RPC
266    // header with the strict mode policy mask and the interface name.
267    // Note that we ignore the strict_policy and don't propagate it
268    // further (since we do no outbound RPCs anyway).
269    strict_policy = bio_get_uint32(msg);
270    s = bio_get_string16(msg, &len);
271    if (s == NULL) {
272        return -1;
273    }
274
275    if ((len != (sizeof(svcmgr_id) / 2)) ||
276        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
277        fprintf(stderr,"invalid id %s\n", str8(s, len));
278        return -1;
279    }
280
281    if (sehandle && selinux_status_updated() > 0) {
282        struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
283        if (tmp_sehandle) {
284            selabel_close(sehandle);
285            sehandle = tmp_sehandle;
286        }
287    }
288
289    switch(txn->code) {
290    case SVC_MGR_GET_SERVICE:
291    case SVC_MGR_CHECK_SERVICE:
292        s = bio_get_string16(msg, &len);
293        if (s == NULL) {
294            return -1;
295        }
296        handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
297        if (!handle)
298            break;
299        bio_put_ref(reply, handle);
300        return 0;
301
302    case SVC_MGR_ADD_SERVICE:
303        s = bio_get_string16(msg, &len);
304        if (s == NULL) {
305            return -1;
306        }
307        handle = bio_get_ref(msg);
308        allow_isolated = bio_get_uint32(msg) ? 1 : 0;
309        if (do_add_service(bs, s, len, handle, txn->sender_euid,
310            allow_isolated, txn->sender_pid))
311            return -1;
312        break;
313
314    case SVC_MGR_LIST_SERVICES: {
315        uint32_t n = bio_get_uint32(msg);
316
317        if (!svc_can_list(txn->sender_pid)) {
318            ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
319                    txn->sender_euid);
320            return -1;
321        }
322        si = svclist;
323        while ((n-- > 0) && si)
324            si = si->next;
325        if (si) {
326            bio_put_string16(reply, si->name);
327            return 0;
328        }
329        return -1;
330    }
331    default:
332        ALOGE("unknown code %d\n", txn->code);
333        return -1;
334    }
335
336    bio_put_uint32(reply, 0);
337    return 0;
338}

簡單看下,就是對傳遞的數據的具體處理,包括了addservice等具體的過程處理。暫時先不深究。code

至此咱們能夠看出來,servicemanager->binder.c這層基本上就是servicemanager提供系統的服務管理,binder.c提供對驅動設備的操做api。整個過程再梳理下:1.打開binder驅動設備;2.將本身做爲binder上下文的管理者,經過binder.c傳遞0給設備驅動(ioctrl);3.進入binder_looper循環,不停從binder設備驅動讀取內容,並解析,而後根據cmd判斷後拋給servicemanager進行真正處理;4.servicemanager裏再根據讀取到的數據內容來決定進行各類cmd動做的處理,包括addservice等;這麼看這一層的脈絡基本上比較清晰了。這麼寫把binder獨立了出來做爲一個api層,能夠搭載任何的生成調用,也就是說binder.c這一層只管與binder設備驅動通信,其他的拋給調用者,很標準聰明的解耦。

相關文章
相關標籤/搜索