Skip to content

Commit 6d4bd1f

Browse files
committed
Preview of Binder IPC driver in Rust.
1 parent 7ed5aa1 commit 6d4bd1f

File tree

18 files changed

+3910
-14
lines changed

18 files changed

+3910
-14
lines changed

drivers/android/Kconfig

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,13 @@ config ANDROID_BINDER_IPC
2020
Android process, using Binder to identify, invoke and pass arguments
2121
between said processes.
2222

23+
config ANDROID_BINDER_IPC_RUST
24+
bool "Android Binder IPC Driver in Rust"
25+
depends on MMU && RUST
26+
default n
27+
help
28+
Implementation of the Binder IPC in Rust.
29+
2330
config ANDROID_BINDERFS
2431
bool "Android Binderfs filesystem"
2532
depends on ANDROID_BINDER_IPC

drivers/android/Makefile

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,3 +4,5 @@ ccflags-y += -I$(src) # needed for trace events
44
obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
55
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
66
obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
7+
8+
obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += rust_binder.o

drivers/android/allocation.rs

Lines changed: 253 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,253 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
use alloc::sync::Arc;
4+
use core::mem::{size_of, MaybeUninit};
5+
use kernel::{bindings, prelude::*, user_ptr::UserSlicePtrReader, Error};
6+
7+
use crate::{
8+
node::NodeRef,
9+
process::{AllocationInfo, Process},
10+
thread::{BinderError, BinderResult},
11+
Pages,
12+
};
13+
14+
pub(crate) struct Allocation<'a> {
15+
pub(crate) offset: usize,
16+
size: usize,
17+
pub(crate) ptr: usize,
18+
pages: Arc<[Pages<0>]>,
19+
pub(crate) process: &'a Process,
20+
allocation_info: Option<AllocationInfo>,
21+
free_on_drop: bool,
22+
}
23+
24+
impl<'a> Allocation<'a> {
25+
pub(crate) fn new(
26+
process: &'a Process,
27+
offset: usize,
28+
size: usize,
29+
ptr: usize,
30+
pages: Arc<[Pages<0>]>,
31+
) -> Self {
32+
Self {
33+
process,
34+
offset,
35+
size,
36+
ptr,
37+
pages,
38+
allocation_info: None,
39+
free_on_drop: true,
40+
}
41+
}
42+
43+
fn iterate<T>(&self, mut offset: usize, mut size: usize, mut cb: T) -> KernelResult
44+
where
45+
T: FnMut(&Pages<0>, usize, usize) -> KernelResult,
46+
{
47+
// Check that the request is within the buffer.
48+
if offset.checked_add(size).ok_or(Error::EINVAL)? > self.size {
49+
return Err(Error::EINVAL);
50+
}
51+
offset += self.offset;
52+
let mut page_index = offset >> bindings::PAGE_SHIFT;
53+
offset &= (1 << bindings::PAGE_SHIFT) - 1;
54+
while size > 0 {
55+
let available = core::cmp::min(size, (1 << bindings::PAGE_SHIFT) as usize - offset);
56+
cb(&self.pages[page_index], offset, available)?;
57+
size -= available;
58+
page_index += 1;
59+
offset = 0;
60+
}
61+
Ok(())
62+
}
63+
64+
pub(crate) fn copy_into(
65+
&self,
66+
reader: &mut UserSlicePtrReader,
67+
offset: usize,
68+
size: usize,
69+
) -> KernelResult {
70+
self.iterate(offset, size, |page, offset, to_copy| {
71+
page.copy_into_page(reader, offset, to_copy)
72+
})
73+
}
74+
75+
pub(crate) fn read<T>(&self, offset: usize) -> KernelResult<T> {
76+
let mut out = MaybeUninit::<T>::uninit();
77+
let mut out_offset = 0;
78+
self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
79+
// SAFETY: Data buffer is allocated on the stack.
80+
unsafe {
81+
page.read(
82+
(out.as_mut_ptr() as *mut u8).add(out_offset),
83+
offset,
84+
to_copy,
85+
)
86+
};
87+
out_offset += to_copy;
88+
Ok(())
89+
})?;
90+
// SAFETY: We just initialised the data.
91+
Ok(unsafe { out.assume_init() })
92+
}
93+
94+
pub(crate) fn write<T>(&self, offset: usize, obj: &T) -> KernelResult {
95+
let mut obj_offset = 0;
96+
self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
97+
// SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
98+
let obj_ptr = unsafe { (obj as *const T as *const u8).add(obj_offset) };
99+
// SAFETY: We have a reference to the object, so the pointer is valid.
100+
unsafe { page.write(obj_ptr, offset, to_copy) };
101+
obj_offset += to_copy;
102+
Ok(())
103+
})
104+
}
105+
106+
pub(crate) fn keep_alive(mut self) {
107+
self.process
108+
.buffer_make_freeable(self.offset, self.allocation_info.take());
109+
self.free_on_drop = false;
110+
}
111+
112+
pub(crate) fn set_info(&mut self, info: AllocationInfo) {
113+
self.allocation_info = Some(info);
114+
}
115+
116+
fn cleanup_object(&self, index_offset: usize, view: &AllocationView) -> KernelResult {
117+
let offset = self.read(index_offset)?;
118+
let header = view.read::<bindings::binder_object_header>(offset)?;
119+
// TODO: Handle other types.
120+
match header.type_ {
121+
bindings::BINDER_TYPE_WEAK_BINDER | bindings::BINDER_TYPE_BINDER => {
122+
let obj = view.read::<bindings::flat_binder_object>(offset)?;
123+
let strong = header.type_ == bindings::BINDER_TYPE_BINDER;
124+
// SAFETY: The type is `BINDER_TYPE_{WEAK_}BINDER`, so the `binder` field is
125+
// populated.
126+
let ptr = unsafe { obj.__bindgen_anon_1.binder } as usize;
127+
let cookie = obj.cookie as usize;
128+
self.process.update_node(ptr, cookie, strong, false);
129+
Ok(())
130+
}
131+
bindings::BINDER_TYPE_WEAK_HANDLE | bindings::BINDER_TYPE_HANDLE => {
132+
let obj = view.read::<bindings::flat_binder_object>(offset)?;
133+
let strong = header.type_ == bindings::BINDER_TYPE_HANDLE;
134+
// SAFETY: The type is `BINDER_TYPE_{WEAK_}HANDLE`, so the `handle` field is
135+
// populated.
136+
let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
137+
self.process.update_ref(handle, false, strong)
138+
}
139+
_ => Ok(()),
140+
}
141+
}
142+
}
143+
144+
impl Drop for Allocation<'_> {
145+
fn drop(&mut self) {
146+
if !self.free_on_drop {
147+
return;
148+
}
149+
150+
if let Some(info) = &self.allocation_info {
151+
let view = AllocationView::new(self, info.offsets.start);
152+
for i in info.offsets.clone().step_by(size_of::<usize>()) {
153+
if self.cleanup_object(i, &view).is_err() {
154+
println!("Error cleaning up object at offset {}", i)
155+
}
156+
}
157+
}
158+
159+
self.process.buffer_raw_free(self.ptr);
160+
}
161+
}
162+
163+
pub(crate) struct AllocationView<'a> {
164+
alloc: &'a Allocation<'a>,
165+
limit: usize,
166+
}
167+
168+
impl<'a> AllocationView<'a> {
169+
pub(crate) fn new(alloc: &'a Allocation, limit: usize) -> Self {
170+
AllocationView { alloc, limit }
171+
}
172+
173+
pub fn read<T>(&self, offset: usize) -> KernelResult<T> {
174+
if offset.checked_add(size_of::<T>()).ok_or(Error::EINVAL)? > self.limit {
175+
return Err(Error::EINVAL);
176+
}
177+
self.alloc.read(offset)
178+
}
179+
180+
pub fn write<T>(&self, offset: usize, obj: &T) -> KernelResult {
181+
if offset.checked_add(size_of::<T>()).ok_or(Error::EINVAL)? > self.limit {
182+
return Err(Error::EINVAL);
183+
}
184+
self.alloc.write(offset, obj)
185+
}
186+
187+
pub(crate) fn transfer_binder_object<T>(
188+
&self,
189+
offset: usize,
190+
strong: bool,
191+
get_node: T,
192+
) -> BinderResult
193+
where
194+
T: FnOnce(&bindings::flat_binder_object) -> BinderResult<NodeRef>,
195+
{
196+
// TODO: Do we want this function to take a &mut self?
197+
let obj = self.read::<bindings::flat_binder_object>(offset)?;
198+
let node_ref = get_node(&obj)?;
199+
200+
if core::ptr::eq(&*node_ref.node.owner, self.alloc.process) {
201+
// The receiving process is the owner of the node, so send it a binder object (instead
202+
// of a handle).
203+
let (ptr, cookie) = node_ref.node.get_id();
204+
let newobj = bindings::flat_binder_object {
205+
hdr: bindings::binder_object_header {
206+
type_: if strong {
207+
bindings::BINDER_TYPE_BINDER
208+
} else {
209+
bindings::BINDER_TYPE_WEAK_BINDER
210+
},
211+
},
212+
flags: obj.flags,
213+
__bindgen_anon_1: bindings::flat_binder_object__bindgen_ty_1 { binder: ptr as _ },
214+
cookie: cookie as _,
215+
};
216+
self.write(offset, &newobj)?;
217+
218+
// Increment the user ref count on the node. It will be decremented as part of the
219+
// destruction of the buffer, when we see a binder or weak-binder object.
220+
node_ref.node.update_refcount(true, strong);
221+
} else {
222+
// The receiving process is different from the owner, so we need to insert a handle to
223+
// the binder object.
224+
let handle = self
225+
.alloc
226+
.process
227+
.insert_or_update_handle(node_ref, false)?;
228+
229+
let newobj = bindings::flat_binder_object {
230+
hdr: bindings::binder_object_header {
231+
type_: if strong {
232+
bindings::BINDER_TYPE_HANDLE
233+
} else {
234+
bindings::BINDER_TYPE_WEAK_HANDLE
235+
},
236+
},
237+
flags: obj.flags,
238+
// TODO: To avoid padding, we write to `binder` instead of `handle` here. We need a
239+
// better solution though.
240+
__bindgen_anon_1: bindings::flat_binder_object__bindgen_ty_1 {
241+
binder: handle as _,
242+
},
243+
..bindings::flat_binder_object::default()
244+
};
245+
if self.write(offset, &newobj).is_err() {
246+
// Decrement ref count on the handle we just created.
247+
let _ = self.alloc.process.update_ref(handle, false, strong);
248+
return Err(BinderError::new_failed());
249+
}
250+
}
251+
Ok(())
252+
}
253+
}

drivers/android/context.rs

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
extern crate alloc;
4+
5+
use alloc::sync::Arc;
6+
use core::pin::Pin;
7+
use kernel::{bindings, prelude::*, sync::Mutex, Error};
8+
9+
use crate::{
10+
node::NodeRef,
11+
thread::{BinderError, BinderResult},
12+
};
13+
14+
struct Manager {
15+
node: Option<NodeRef>,
16+
uid: Option<bindings::kuid_t>,
17+
}
18+
19+
pub(crate) struct Context {
20+
manager: Mutex<Manager>,
21+
}
22+
23+
unsafe impl Send for Context {}
24+
unsafe impl Sync for Context {}
25+
26+
impl Context {
27+
pub(crate) fn new() -> KernelResult<Pin<Arc<Self>>> {
28+
let mut ctx_ref = Arc::try_new(Self {
29+
// SAFETY: Init is called below.
30+
manager: unsafe {
31+
Mutex::new(Manager {
32+
node: None,
33+
uid: None,
34+
})
35+
},
36+
})?;
37+
let ctx = Arc::get_mut(&mut ctx_ref).unwrap();
38+
39+
// SAFETY: `manager` is also pinned when `ctx` is.
40+
let manager = unsafe { Pin::new_unchecked(&ctx.manager) };
41+
kernel::mutex_init!(manager, "Context::manager");
42+
43+
// SAFETY: `ctx_ref` is pinned behind the `Arc` reference.
44+
Ok(unsafe { Pin::new_unchecked(ctx_ref) })
45+
}
46+
47+
pub(crate) fn set_manager_node(&self, node_ref: NodeRef) -> KernelResult {
48+
let mut manager = self.manager.lock();
49+
if manager.node.is_some() {
50+
return Err(Error::EBUSY);
51+
}
52+
// TODO: Call security_binder_set_context_mgr.
53+
54+
// TODO: Get the actual caller id.
55+
let caller_uid = bindings::kuid_t::default();
56+
if let Some(ref uid) = manager.uid {
57+
if uid.val != caller_uid.val {
58+
return Err(Error::EPERM);
59+
}
60+
}
61+
62+
manager.node = Some(node_ref);
63+
manager.uid = Some(caller_uid);
64+
Ok(())
65+
}
66+
67+
pub(crate) fn unset_manager_node(&self) {
68+
let node_ref = self.manager.lock().node.take();
69+
drop(node_ref);
70+
}
71+
72+
pub(crate) fn get_manager_node(&self, strong: bool) -> BinderResult<NodeRef> {
73+
self.manager
74+
.lock()
75+
.node
76+
.as_ref()
77+
.ok_or_else(BinderError::new_dead)?
78+
.clone(strong)
79+
}
80+
}

0 commit comments

Comments
 (0)