binder驱动

  • startActivity执行的时候跨进程了吗?
  • 如果是跨进程,那么是于哪个进程通信呢?
  • service, broadcast这些呢?

binder的概念

  • Binder是android提供的一种IPC机制,Binder通信机制类似于C/S架构,除了C/S架构外,还有一个管理全局的ServiceManager

avatar

穿梭于各个进程之间的binder

  • avatar

  • servicemanager 的作用是什么?

  • servicemanger 是如何告知binder驱动它是binder机制的上下文管理者?

MediaServer作为例子

MediaServer包括以下这些服务:
  • AudioFlinger
  • AudioPolicService
  • MediaPlayerService
  • CameraService

来看下MerdiaServer入口的源码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

int main(int argc __unused, char **argv __unused)
{
signal(SIGPIPE, SIG_IGN);

sp<ProcessState> proc(ProcessState::self());
sp<IServiceManager> sm(defaultServiceManager());
ALOGI("ServiceManager: %p", sm.get());
InitializeIcuOrDie();
MediaPlayerService::instantiate();
ResourceManagerService::instantiate();
registerExtensions();
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}

ProcessState

1
2
3
4
5
6
7
8
9
10
11

sp<ProcessState> ProcessState::self()
{
//可以看得出这是一个单例:从这里得到一个信息,每个进程只有一个ProcessState对象
Mutex::Autolock _l(gProcessMutex);
if (gProcess != NULL) {
return gProcess;
}
gProcess = new ProcessState("/dev/binder");
return gProcess;
}
  • ProcessState的构造函数
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30

ProcessState::ProcessState(const char *driver)
: mDriverName(String8(driver))
, mDriverFD(open_driver(driver)) //打开/dev/binder 设备
, mVMStart(MAP_FAILED) //映射内存的起始地址
, mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
, mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
, mExecutingThreadsCount(0)
, mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
, mStarvationStartTimeMs(0)
, mManagesContexts(false)
, mBinderContextCheckFunc(NULL)
, mBinderContextUserData(NULL)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
{
if (mDriverFD >= 0) {
// mmap the binder, providing a chunk of virtual address space to receive transactions.
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
if (mVMStart == MAP_FAILED) {
// *sigh*
ALOGE("Using %s failed: unable to mmap transaction memory.\n", mDriverName.c_str());
close(mDriverFD);
mDriverFD = -1;
mDriverName.clear();
}
}

LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened. Terminating.");
}
  • ProcessState做了什么?总结一下:
  1. 打开/dev/binder设备,相当于与内核的Binder驱动有了交互的通道(ioctl)。
  2. mmap为Binder驱动分配一块内存来接收数据。
  3. ProcessState具有唯一性,因此一个进程只会打开一次设备

IServiceManager

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17

sp<IServiceManager> defaultServiceManager()
{
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;

{
AutoMutex _l(gDefaultServiceManagerLock);
while (gDefaultServiceManager == NULL) {
gDefaultServiceManager = interface_cast<IServiceManager>(
ProcessState::self()->getContextObject(NULL));
if (gDefaultServiceManager == NULL)
sleep(1);
}
}

return gDefaultServiceManager;
}
1
2
3
4
5

sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
return getStrongProxyForHandle(0); //这个其实是个资源数组
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;

AutoMutex _l(mLock);

handle_entry* e = lookupHandleLocked(handle);

if (e != NULL) {
// We need to create a new BpBinder if there isn't currently one, OR we
// are unable to acquire a weak reference on this current one. See comment
// in getWeakProxyForHandle() for more info about this.
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
// Special case for context manager...
// The context manager is the only object for which we create
// a BpBinder proxy without already holding a reference.
// Perform a dummy transaction to ensure the context manager
// is registered before we create the first local reference
// to it (which will occur when creating the BpBinder).
// If a local reference is created for the BpBinder when the
// context manager is not present, the driver will fail to
// provide a reference to the context manager, but the
// driver API does not return status.
//
// Note that this is not race-free if the context manager
// dies while this code runs.
//
// TODO: add a driver API to wait for context manager, or
// stop special casing handle 0 for context manager and add
// a driver API to get a handle to the context manager with
// proper reference counting.

Parcel data;
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);
if (status == DEAD_OBJECT)
return NULL;
}

b = BpBinder::create(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
// This little bit of nastyness is to allow us to add a primary
// reference to the remote proxy when this team doesn't have one
// but another team is sending the handle to us.
result.force_set(b);
e->refs->decWeak(this);
}
}

return result; //这里的hande=0,所以此处的result为BpBinder::create(handle)
}

这里引出了一个BpBinder的概念,BpBinder和BBinder都是IBinder类派生而来,BpBinder和BBinder是一一对应的,上面创建了BpBinder,对应的BBinder会找对应的handle等于0的BBinder, 在android系统中handle=0的是ServiceManager

BpBinder

看得出interface_cast(ProcessState::self()->getContextObject(NULL));相当于interface_cast(new BpBinder(0));

IInterface.h

1
2
3
4
5
6

template<typename INTERFACE>
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)
{
return INTERFACE::asInterface(obj);
}

模板函数,所以interface_cast()相当于

1
2
3
4
5

inline sp<IServiceManager> interface_cast(const sp<IBinder>& obj)
{
return IServiceManager::asInterface(obj);
}

所以回过头来再看IServiceManager.h这个类

1
2
3

//这里我们特别需要关注下这个宏
DECLARE_META_INTERFACE(ServiceManager)

这个宏的定义在IInterface.h文件中

1
2
3
4
5
6
7
8

#define DECLARE_META_INTERFACE(INTERFACE) \
static const ::android::String16 descriptor; \
static ::android::sp<I##INTERFACE> asInterface( \
const ::android::sp<::android::IBinder>& obj); \
virtual const ::android::String16& getInterfaceDescriptor() const; \
I##INTERFACE(); \
virtual ~I##INTERFACE();

我们将ServiceManager对宏定义替换下,得到如下:

1
2
3
4
5
6
7
8

#define DECLARE_META_INTERFACE(INTERFACE) \
static const ::android::String16 descriptor; \
static ::android::sp<ServiceManager> asInterface( \
const ::android::sp<::android::IBinder>& obj); \
virtual const ::android::String16& getInterfaceDescriptor() const; \
ServiceManager(); \
virtual ~ServiceManager();

IServiceManager对IMPLEMENT_META_INTERFACE宏的使用如下:

1
2

IMPLEMENT_META_INTERFACE(ServiceManager, "android.os.IServiceManager");

对宏定义展开便是如下所示:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23

#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME) \
const ::android::String16 IServiceManager::descriptor("android.os.IServiceManager"); \
const ::android::String16& \
IServiceManager::getInterfaceDescriptor() const { \
return IServiceManager::descriptor; \
} \
::android::sp<IServiceManager> IServiceManager::asInterface( \
const ::android::sp<::android::IBinder>& obj) \
{ \
::android::sp<IServiceManager> intr; \
if (obj != NULL) { \
intr = static_cast<IServiceManager*>( \
obj->queryLocalInterface( \
IServiceManager::descriptor).get()); \
if (intr == NULL) { \
intr = new BpServiceManager(obj); \
} \
} \
return intr; \
} \
IServiceManager::IServiceManager() { } \
IServiceManager::~IServiceManager() { } \

从上面的代码上可以看出asInterface得到的是一个BpServiceManager对象。简单总结一下:上面传递new BpBinder(0)作为参数,通过IServiceManager::asInterface()方法得到一个BpServiceManagerr对象。

BpServiceManager又是什么鬼?

avatar

来看下BpServiceManager的源码,还是在IServiceManager.cpp类中

1
2
3
4
5
6
7
8
9
10

class BpServiceManager : public BpInterface<IServiceManager>
{
public:
explicit BpServiceManager(const sp<IBinder>& impl)
: BpInterface<IServiceManager>(impl)
//这里知道传进来的impl就是BpBinder()
{
}
...

从下面转换的代码中可以看出remote就是BpBinder

1
2
3
4
5
6
7
8

//http://www.androidos.net.cn/android/9.0.0_r8/xref/frameworks/native/libs/binder/include/binder/IInterface.h

template<typename INTERFACE>
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
: BpRefBase(remote)
{
}

因此BpServiceManager实现IServiceManager的业务函数,而BpBinder作为通信对象。
总结:defaultServiceManager()实际上像是初始化了BpServiceManager对象,以及建立了以BpBinder作为通信参数的通道.

接下来继续分析MediaPlayerService.cpp

1
2
3
4
5
6

//注册服务,defaultServiceManager()函数其实得到的是BpServiceManager对象
void MediaPlayerService::instantiate() {
defaultServiceManager()->addService(
String16("media.player"), new MediaPlayerService());
}

那么就看下BpServiceManager.addService()业务方法

1
2
3
4
5
6
7
8
9
10
11
12

virtual status_t addService(const String16& name, const sp<IBinder>& service,
bool allowIsolated, int dumpsysPriority) {
Parcel data, reply; //打包数据到data
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
data.writeInt32(allowIsolated ? 1 : 0);
data.writeInt32(dumpsysPriority);
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);//上面提到过remote就是BpBinder,所以这里就是请求的数据data打包给了BpBinder通信层
return err == NO_ERROR ? reply.readExceptionCode() : err;
}

接下来再看下BpBinder::transact()方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14

status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
// Once a binder has died, it will never come back to life.
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);//这里把工作交给了IPCThreadState去处理了
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}

return DEAD_OBJECT;
}

继续看IPCThreadState::self()

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38

/*

TLS是Thread Local Stroage(线程本地存储空间)的简称。
这种空间每个线程都会有,线程间不共享这些空间。
通过pthread_getspecific()可以获取这些空间
有pthread_getspecific的地方,肯定有调用pthread_setspecifi的地方
这里创建了IPCThreadState指针
*/
IPCThreadState* IPCThreadState::self()
{
if (gHaveTLS) {
restart:
const pthread_key_t k = gTLS;
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
return new IPCThreadState;
}

if (gShutdown) {
ALOGW("Calling IPCThreadState::self() during shutdown is dangerous, expect a crash.\n");
return NULL;
}

pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS) {
int key_create_value = pthread_key_create(&gTLS, threadDestructor);
if (key_create_value != 0) {
pthread_mutex_unlock(&gTLSMutex);
ALOGW("IPCThreadState::self() unable to create TLS key, expect a crash: %s\n",
strerror(key_create_value));
return NULL;
}
gHaveTLS = true;
}
pthread_mutex_unlock(&gTLSMutex);
goto restart;
}

IPCThreadState的构造函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20

IPCThreadState::IPCThreadState()
: mProcess(ProcessState::self()),
mStrictModePolicy(0),
mLastTransactionBinderFlags(0)
{
//把自己设置到线程本地存储中
pthread_setspecific(gTLS, this);
clearCaller();
mIn.setDataCapacity(256);
mOut.setDataCapacity(256);
}

/*

每个线程都有一个IPCThreadState,每个IPCThreadState都有一个mIn,一个mOut,
mIn:接收来自Binder设备的数据
mOut: 存储发往Binder设备的数据

*/

ok, 以上获得了IPCThreadState对象,下一步看transact()方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31

status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
...
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);

if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}

if ((flags & TF_ONE_WAY) == 0) {
#if 0
if (code == 4) { // relayout
ALOGI(">>>>>> CALLING transaction 4");
} else {
ALOGI(">>>>>> CALLING transaction %d", code);
}
#endif
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
...

return err;
}

继续分析writeTransactionData()方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36

status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
956 int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
957{
958 binder_transaction_data tr;
959
960 tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
961 tr.target.handle = handle; //用来表识目的端,其中0就是ServiceManager的标志, 这里的handle是BpBinder构造方法里传过来的handler==0
962 tr.code = code; //code消息码,用来switch/case
963 tr.flags = binderFlags;
964 tr.cookie = 0;
965 tr.sender_pid = 0; //进程id
966 tr.sender_euid = 0; //用户id
967
968 const status_t err = data.errorCheck();
969 if (err == NO_ERROR) {
970 tr.data_size = data.ipcDataSize();
971 tr.data.ptr.buffer = data.ipcData();
972 tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
973 tr.data.ptr.offsets = data.ipcObjects();
974 } else if (statusBuffer) {
975 tr.flags |= TF_STATUS_CODE;
976 *statusBuffer = err;
977 tr.data_size = sizeof(status_t);
978 tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
979 tr.offsets_size = 0;
980 tr.data.ptr.offsets = 0;
981 } else {
982 return (mLastError = err);
983 }
984
985 mOut.writeInt32(cmd);//把命令写到mOut中
986 mOut.write(&tr, sizeof(tr));
987
988 return NO_ERROR;
989}

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;

while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;

cmd = (uint32_t)mIn.readInt32();

IF_LOG_COMMANDS() {
alog << "Processing waitForResponse Command: "
<< getReturnString(cmd) << endl;
}
...

default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}


finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
}

return err;
}

来看下talkWithDriver()方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103

status_t IPCThreadState::talkWithDriver(bool doReceive)
853{
854 if (mProcess->mDriverFD <= 0) {
855 return -EBADF;
856 }
857
858 binder_write_read bwr;//用来与binder设备交换数据的结构
859
860 // Is the read buffer empty?
861 const bool needRead = mIn.dataPosition() >= mIn.dataSize();
862
863 // We don't want to write anything if we are still reading
864 // from data left in the input buffer and the caller
865 // has requested to read the next data.
866 const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
867
868 bwr.write_size = outAvail;
869 bwr.write_buffer = (uintptr_t)mOut.data();
870
871 // This is what we'll read.
872 if (doReceive && needRead) {
873 bwr.read_size = mIn.dataCapacity();//接收缓冲区信息
874 bwr.read_buffer = (uintptr_t)mIn.data();
875 } else {
876 bwr.read_size = 0;
877 bwr.read_buffer = 0;
878 }
879
880 IF_LOG_COMMANDS() {
881 TextOutput::Bundle _b(alog);
882 if (outAvail != 0) {
883 alog << "Sending commands to driver: " << indent;
884 const void* cmds = (const void*)bwr.write_buffer;
885 const void* end = ((const uint8_t*)cmds)+bwr.write_size;
886 alog << HexDump(cmds, bwr.write_size) << endl;
887 while (cmds < end) cmds = printCommand(alog, cmds);
888 alog << dedent;
889 }
890 alog << "Size of receive buffer: " << bwr.read_size
891 << ", needRead: " << needRead << ", doReceive: " << doReceive << endl;
892 }
893
894 // Return immediately if there is nothing to do.
895 if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
896
897 bwr.write_consumed = 0;
898 bwr.read_consumed = 0;
899 status_t err;
900 do {
901 IF_LOG_COMMANDS() {
902 alog << "About to read/write, write size = " << mOut.dataSize() << endl;
903 }
904#if defined(__ANDROID__)
905 if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0) //ioctl. 原来是ioctl通信的方式
906 err = NO_ERROR;
907 else
908 err = -errno;
909#else
910 err = INVALID_OPERATION;
911#endif
912 if (mProcess->mDriverFD <= 0) {
913 err = -EBADF;
914 }
915 IF_LOG_COMMANDS() {
916 alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
917 }
918 } while (err == -EINTR);
919
920 IF_LOG_COMMANDS() {
921 alog << "Our err: " << (void*)(intptr_t)err << ", write consumed: "
922 << bwr.write_consumed << " (of " << mOut.dataSize()
923 << "), read consumed: " << bwr.read_consumed << endl;
924 }
925
926 if (err >= NO_ERROR) {
927 if (bwr.write_consumed > 0) {
928 if (bwr.write_consumed < mOut.dataSize())
929 mOut.remove(0, bwr.write_consumed);
930 else {
931 mOut.setDataSize(0);
932 processPostWriteDerefs();
933 }
934 }
935 if (bwr.read_consumed > 0) {
936 mIn.setDataSize(bwr.read_consumed);
937 mIn.setDataPosition(0);
938 }
939 IF_LOG_COMMANDS() {
940 TextOutput::Bundle _b(alog);
941 alog << "Remaining data size: " << mOut.dataSize() << endl;
942 alog << "Received commands from driver: " << indent;
943 const void* cmds = mIn.data();
944 const void* end = mIn.data() + mIn.dataSize();
945 alog << HexDump(cmds, mIn.dataSize()) << endl;
946 while (cmds < end) cmds = printReturnCommand(alog, cmds);
947 alog << dedent;
948 }
949 return NO_ERROR;
950 }
951
952 return err;
953}

再看看status_t IPCThreadState::executeCommand(int32_t cmd)方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114

8status_t IPCThreadState::executeCommand(int32_t cmd)
999{
1000 BBinder* obj;
1001 RefBase::weakref_type* refs;
1002 status_t result = NO_ERROR;
1003
1004 switch ((uint32_t)cmd) {
1005 ...
1076
1077 case BR_TRANSACTION:
1078 {
1079 binder_transaction_data tr;
1080 result = mIn.read(&tr, sizeof(tr));
1081 ALOG_ASSERT(result == NO_ERROR,
1082 "Not enough command data for brTRANSACTION");
1083 if (result != NO_ERROR) break;
1084
1085 Parcel buffer;
1086 buffer.ipcSetDataReference(
1087 reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
1088 tr.data_size,
1089 reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
1090 tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
1091
1092 const pid_t origPid = mCallingPid;
1093 const uid_t origUid = mCallingUid;
1094 const int32_t origStrictModePolicy = mStrictModePolicy;
1095 const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;
1096
1097 mCallingPid = tr.sender_pid;
1098 mCallingUid = tr.sender_euid;
1099 mLastTransactionBinderFlags = tr.flags;
1100
1101 //ALOGI(">>>> TRANSACT from pid %d uid %d\n", mCallingPid, mCallingUid);
1102
1103 Parcel reply;
1104 status_t error;
1105 IF_LOG_TRANSACTIONS() {
1106 TextOutput::Bundle _b(alog);
1107 alog << "BR_TRANSACTION thr " << (void*)pthread_self()
1108 << " / obj " << tr.target.ptr << " / code "
1109 << TypeCode(tr.code) << ": " << indent << buffer
1110 << dedent << endl
1111 << "Data addr = "
1112 << reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer)
1113 << ", offsets addr="
1114 << reinterpret_cast<const size_t*>(tr.data.ptr.offsets) << endl;
1115 }
1116 if (tr.target.ptr) {
1117 // We only have a weak reference on the target object, so we must first try to
1118 // safely acquire a strong reference before doing anything else with it.
1119 if (reinterpret_cast<RefBase::weakref_type*>(
1120 tr.target.ptr)->attemptIncStrong(this)) {
1121 error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,
1122 &reply, tr.flags); //BBinder,实际上实现的是BnServiceXXX对象
1123 reinterpret_cast<BBinder*>(tr.cookie)->decStrong(this);
1124 } else {
1125 error = UNKNOWN_TRANSACTION;
1126 }
1127
1128 } else {
1129 error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
1130 }
1131
1132 //ALOGI("<<<< TRANSACT from pid %d restore pid %d uid %d\n",
1133 // mCallingPid, origPid, origUid);
1134
1135 if ((tr.flags & TF_ONE_WAY) == 0) {
1136 LOG_ONEWAY("Sending reply to %d!", mCallingPid);
1137 if (error < NO_ERROR) reply.setError(error);
1138 sendReply(reply, 0);
1139 } else {
1140 LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
1141 }
1142
1143 mCallingPid = origPid;
1144 mCallingUid = origUid;
1145 mStrictModePolicy = origStrictModePolicy;
1146 mLastTransactionBinderFlags = origTransactionBinderFlags;
1147
1148 IF_LOG_TRANSACTIONS() {
1149 TextOutput::Bundle _b(alog);
1150 alog << "BC_REPLY thr " << (void*)pthread_self() << " / obj "
1151 << tr.target.ptr << ": " << indent << reply << dedent << endl;
1152 }
1153
1154 }
1155 break;
1156
1157 case BR_DEAD_BINDER://收到binder驱动发来的service死掉的消息
1158 {
1159 BpBinder *proxy = (BpBinder*)mIn.readPointer();
1160 proxy->sendObituary();
1161 mOut.writeInt32(BC_DEAD_BINDER_DONE);
1162 mOut.writePointer((uintptr_t)proxy);
1163 } break;
1177 ...
1178 case BR_SPAWN_LOOPER:
1179 mProcess->spawnPooledThread(false);
1180 break;
1181
1182 default:
1183 ALOGE("*** BAD COMMAND %d received from Binder driver\n", cmd);
1184 result = UNKNOWN_ERROR;
1185 break;
1186 }
1187
1188 if (result != NO_ERROR) {
1189 mLastError = result;
1190 }
1191
1192 return result;
1193}

总结一下: 上面MediaPlayerService::instantiate();大概执行的是通过ioctl与Binder通信,Binder通过handle通知到ServiceManager

再来看下线程池的内容startThreadPool

1
2
3
4
5
6
7
8
9

void ProcessState::startThreadPool()
158{
159 AutoMutex _l(mLock);
160 if (!mThreadPoolStarted) {
161 mThreadPoolStarted = true;
162 spawnPooledThread(true);
163 }
164}
1
2
3
4
5
6
7
8
9
10

void ProcessState::spawnPooledThread(bool isMain)
353{
354 if (mThreadPoolStarted) {
355 String8 name = makeBinderThreadName();
356 ALOGV("Spawning new pooled thread, name=%s\n", name.string());
357 sp<Thread> t = new PoolThread(isMain);
358 t->run(name.string());
359 }
360}

再来看下PoolThread函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

class PoolThread : public Thread
51{
52public:
53 explicit PoolThread(bool isMain)
54 : mIsMain(isMain)
55 {
56 }
57
58protected:
59 virtual bool threadLoop()
60 {
61 IPCThreadState::self()->joinThreadPool(mIsMain);
62 return false;
63 }
64
65 const bool mIsMain;
66};

又回到了joinThreadPool

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32

void IPCThreadState::joinThreadPool(bool isMain)
529{
530 LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());
531
532 mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
533
534 status_t result;
535 do {
536 processPendingDerefs();
537 // now get the next command to be processed, waiting if necessary
538 result = getAndExecuteCommand(); //这个方法中执行了result = talkWithDriver();
539
540 if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
541 ALOGE("getAndExecuteCommand(fd=%d) returned unexpected error %d, aborting",
542 mProcess->mDriverFD, result);
543 abort();
544 }
545
546 // Let this thread exit the thread pool if it is no longer
547 // needed and it is not the main process thread.
548 if(result == TIMED_OUT && !isMain) {
549 break;
550 }
551 } while (result != -ECONNREFUSED && result != -EBADF);
552
553 LOG_THREADPOOL("**** THREAD %p (PID %d) IS LEAVING THE THREAD POOL err=%d\n",
554 (void*)pthread_self(), getpid(), result);
555
556 mOut.writeInt32(BC_EXIT_LOOPER);
557 talkWithDriver(false);
558}

这个子线程还是通过talkWithDriver,也就是ioctl与binder想建立通信,所以看得出mediaPlayerService除了自己主线程joinThreadPool读取binder设备外,还通过startThreadPool新启动了一个线程读取Binder设备。

Binder是通信机制,BpBinder,BpServiceManager…这些是业务,要把这些区分开,才便于理解,Binder之所以难于理解,就在于层层封装,巧妙的把通信与业务融合在一起。

上面提到,defaultServiceManager()实际上像是初始化了BpServiceManager对象,以及建立了以BpBinder作为通信参数的通道。传递的handle为0,表示的是ServiceManager。 那么来看下ServiceManager是如何处理请求的。
service_manager.c的源代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58

int main(int argc, char** argv)
{
struct binder_state *bs;
union selinux_callback cb;
char *driver;

if (argc > 1) {
driver = argv[1];
} else {
driver = "/dev/binder";
}

bs = binder_open(driver, 128*1024);//打开binder设备 128k
if (!bs) {
#ifdef VENDORSERVICEMANAGER
ALOGW("failed to open binder driver %s\n", driver);
while (true) {
sleep(UINT_MAX);
}
#else
ALOGE("failed to open binder driver %s\n", driver);
#endif
return -1;
}

if (binder_become_context_manager(bs)) { //成为manager
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}

cb.func_audit = audit_callback;
selinux_set_callback(SELINUX_CB_AUDIT, cb);
cb.func_log = selinux_log_callback;
selinux_set_callback(SELINUX_CB_LOG, cb);

#ifdef VENDORSERVICEMANAGER
sehandle = selinux_android_vendor_service_context_handle();
#else
sehandle = selinux_android_service_context_handle();
#endif
selinux_status_open(true);

if (sehandle == NULL) {
ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
abort();
}

if (getcon(&service_manager_context) != 0) {
ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
abort();
}


binder_loop(bs, svcmgr_handler);//开启一个loop,执行客户端请求

return 0;
}

binder_open()方法要看看

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43

struct binder_state *binder_open(const char* driver, size_t mapsize)
{
struct binder_state *bs;
struct binder_version vers;

bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return NULL;
}

bs->fd = open(driver, O_RDWR | O_CLOEXEC);//打开binder设备
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open %s (%s)\n",
driver, strerror(errno));
goto fail_open;
}

if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
fprintf(stderr,
"binder: kernel driver version (%d) differs from user space version (%d)\n",
vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
goto fail_open;
}

bs->mapsize = mapsize;
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);//内存映射
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}

return bs;

fail_map:
close(bs->fd);
fail_open:
free(bs);
return NULL;
}

binder_become_context_manager(bs)也要看看

1
2
3
4
5

int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);//直接调用ioctl与binder驱动通信
}

最后还在看看binder_loop(bs, svcmgr_handler);

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37

void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];

bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;

readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));

for (;;) { //开启循环
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;

res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}

res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func); //收到请求交给binder_parse去处理, func是一个函数指针
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}

继续看binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func)方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24

int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
...
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;

bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);
if (txn->flags & TF_ONE_WAY) {
binder_free_buffer(bs, txn->data.ptr.buffer);
} else {
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
}
...
return r;
}

最终交给的是func来处理,func是一个函数指针,从main()方法中传递的是svcmgr_handler

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110

int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
uint32_t dumpsys_priority;

//ALOGI("target=%p code=%d pid=%d uid=%d\n",
// (void*) txn->target.ptr, txn->code, txn->sender_pid, txn->sender_euid);

if (txn->target.ptr != BINDER_SERVICE_MANAGER)
return -1;

if (txn->code == PING_TRANSACTION)
return 0;

// Equivalent to Parcel::enforceInterface(), reading the RPC
// header with the strict mode policy mask and the interface name.
// Note that we ignore the strict_policy and don't propagate it
// further (since we do no outbound RPCs anyway).
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}

if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s, len));
return -1;
}

if (sehandle && selinux_status_updated() > 0) {
#ifdef VENDORSERVICEMANAGER
struct selabel_handle *tmp_sehandle = selinux_android_vendor_service_context_handle();
#else
struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
#endif
if (tmp_sehandle) {
selabel_close(sehandle);
sehandle = tmp_sehandle;
}
}

switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;

case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
dumpsys_priority = bio_get_uint32(msg);
if (do_add_service(bs, s, len, handle, txn->sender_euid, allow_isolated, dumpsys_priority,
txn->sender_pid)) //处理addService的请求
return -1;
break;

case SVC_MGR_LIST_SERVICES: {//当前系统所有已注册的service的名字
uint32_t n = bio_get_uint32(msg);
uint32_t req_dumpsys_priority = bio_get_uint32(msg);

if (!svc_can_list(txn->sender_pid, txn->sender_euid)) {
ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
txn->sender_euid);
return -1;
}
si = svclist;
// walk through the list of services n times skipping services that
// do not support the requested priority
while (si) {
if (si->dumpsys_priority & req_dumpsys_priority) {
if (n == 0) break;
n--;
}
si = si->next;
}
if (si) {
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
default:
ALOGE("unknown code %d\n", txn->code);
return -1;
}

bio_put_uint32(reply, 0);
return 0;
}

来看下do_add_service(bs, s, len, handle, txn->sender_euid, allow_isolated, dumpsys_priority,txn->sender_pid)的请求

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48

int do_add_service(struct binder_state *bs, const uint16_t *s, size_t len, uint32_t handle,
uid_t uid, int allow_isolated, uint32_t dumpsys_priority, pid_t spid) {
struct svcinfo *si;

//ALOGI("add_service('%s',%x,%s) uid=%d\n", str8(s, len), handle,
// allow_isolated ? "allow_isolated" : "!allow_isolated", uid);

if (!handle || (len == 0) || (len > 127))
return -1;

if (!svc_can_register(s, len, spid, uid)) { //判断注册的服务的进程是否有权限,有兴趣的可以看看这个方法
ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n",
str8(s, len), handle, uid);
return -1;
}

si = find_svc(s, len);
if (si) {
if (si->handle) {
ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
str8(s, len), handle, uid);
svcinfo_death(bs, si);
}
si->handle = handle;
} else {
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) {
ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n",
str8(s, len), handle, uid);
return -1;
}
si->handle = handle;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '\0';
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
si->dumpsys_priority = dumpsys_priority;
si->next = svclist;//保存当前注册到ServiceManager中到信息
svclist = si;
}

binder_acquire(bs, handle);
binder_link_to_death(bs, handle, &si->death);//退出清理工作
return 0;
}

还是简单看看svc_can_register(s, len, spid, uid)这个方法把

1
2
3
4
5
6
7
8
9
10
11

static int svc_can_register(const uint16_t *name, size_t name_len, pid_t spid, uid_t uid)
{
const char *perm = "add";

if (multiuser_get_app_id(uid) >= AID_APP) {
return 0; /* Don't allow apps to register services */
}

return check_mac_perms_from_lookup(spid, uid, perm, str8(name, name_len)) ? 1 : 0;
}

总结:从上面来看,ServiceManger打开了binder设备,通过ioctl与binder驱动通信,保存了addService信息。

  • 集中管理系统内到所有到服务,施加权限控制。
  • ServiceManager支持通过字符串来查找对应到Service。
  • Client只需要通过查询ServiceManager,就可以知道对应到Server是否存活与通信,这个非常的方便。

上面分析了ServiceManager和他的client,接下来从业务的角度来看,MediaPlayerService通信层如何与client端交互的。

一个client想得到某个Service的信息,就必须与ServiceManager打交道,通过getService方法来获取Service信息,来看下IMediaDeathNotifier::getMediaPlayerService()的源码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26

IMediaDeathNotifier::getMediaPlayerService()
{
ALOGV("getMediaPlayerService");
Mutex::Autolock _l(sServiceLock);
if (sMediaPlayerService == 0) {
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder;
do {
binder = sm->getService(String16("media.player")); //之前注册的服务的name
if (binder != 0) {
break;
}
ALOGW("Media player service not published, waiting...");
usleep(500000); // 0.5 s
} while (true);

if (sDeathNotifier == NULL) {
sDeathNotifier = new DeathNotifier();
}
binder->linkToDeath(sDeathNotifier);
sMediaPlayerService = interface_cast<IMediaPlayerService>(binder);//interface_cast将这个binder转成[BpMediaPlayerService](http://www.androidos.net.cn/android/9.0.0_r8/xref/frameworks/av/media/libmedia/IMediaPlayerService.cpp)
}
ALOGE_IF(sMediaPlayerService == 0, "no media player service!?");
return sMediaPlayerService;
}

有了BpMediaPlayerService,就能够使用任何IMediaPlayerService提供的业务函数,像createMediaRecorder,createMetadataRetriever等。这些方法都调用了remote()->transact(),把数据打包交给了binder驱动,说明都是调用了BpBinder,通过对应都handle找到对应都客户端。

上面都分析中,MediaPlayerService在MediaServer进程中,这个进程有两个线程在talkWithDriver。假设其中一个线程收到消息时,最终会调用executeCommand()方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35

status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;

switch ((uint32_t)cmd) {

...
case BR_TRANSACTION:
{
...
if (tr.target.ptr) {
// We only have a weak reference on the target object, so we must first try to
// safely acquire a strong reference before doing anything else with it.
if (reinterpret_cast<RefBase::weakref_type*>(
tr.target.ptr)->attemptIncStrong(this)) {
error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,
&reply, tr.flags);
reinterpret_cast<BBinder*>(tr.cookie)->decStrong(this);//BBinder,实际上实现的是BnServiceXXX对象
} else {
error = UNKNOWN_TRANSACTION;
}

} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
...

}
break;
}
return result;
}

avatar

从类图关系中可以看出,BnMediaPlayerService实现了onTransact()方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62

status_t BnMediaPlayerService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch (code) {
case CREATE: {
CHECK_INTERFACE(IMediaPlayerService, data, reply);
sp<IMediaPlayerClient> client =
interface_cast<IMediaPlayerClient>(data.readStrongBinder());
audio_session_t audioSessionId = (audio_session_t) data.readInt32();
sp<IMediaPlayer> player = create(client, audioSessionId);
reply->writeStrongBinder(IInterface::asBinder(player));
return NO_ERROR;
} break;
case CREATE_MEDIA_RECORDER: {
CHECK_INTERFACE(IMediaPlayerService, data, reply);
const String16 opPackageName = data.readString16();
sp<IMediaRecorder> recorder = createMediaRecorder(opPackageName);
reply->writeStrongBinder(IInterface::asBinder(recorder));
return NO_ERROR;
} break;
case CREATE_METADATA_RETRIEVER: {
CHECK_INTERFACE(IMediaPlayerService, data, reply);
sp<IMediaMetadataRetriever> retriever = createMetadataRetriever(); //交给子类来实现
reply->writeStrongBinder(IInterface::asBinder(retriever));
return NO_ERROR;
} break;
case ADD_BATTERY_DATA: {
CHECK_INTERFACE(IMediaPlayerService, data, reply);
uint32_t params = data.readInt32();
addBatteryData(params);
return NO_ERROR;
} break;
case PULL_BATTERY_DATA: {
CHECK_INTERFACE(IMediaPlayerService, data, reply);
pullBatteryData(reply);
return NO_ERROR;
} break;
case LISTEN_FOR_REMOTE_DISPLAY: {
CHECK_INTERFACE(IMediaPlayerService, data, reply);
const String16 opPackageName = data.readString16();
sp<IRemoteDisplayClient> client(
interface_cast<IRemoteDisplayClient>(data.readStrongBinder()));
if (client == NULL) {
reply->writeStrongBinder(NULL);
return NO_ERROR;
}
String8 iface(data.readString8());
sp<IRemoteDisplay> display(listenForRemoteDisplay(opPackageName, client, iface));
reply->writeStrongBinder(IInterface::asBinder(display));
return NO_ERROR;
} break;
case GET_CODEC_LIST: {
CHECK_INTERFACE(IMediaPlayerService, data, reply);
sp<IMediaCodecList> mcl = getCodecList();
reply->writeStrongBinder(IInterface::asBinder(mcl));
return NO_ERROR;
} break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
}

看出来了吧,BnServicexxx就是与client通信的业务逻辑

来个demo,纯native实现c/s框架

aidl demo