Skip to content

Commit

Permalink
[RFC] drivers: android: Binder IPC in Rust
Browse files Browse the repository at this point in the history
A port to Rust of the Android Binder IPC mechanism.

This module is a work in progress and will be sent for review later
on, as well as separately from the Rust support.

However, it is included to show how an actual working module
written in Rust may look like.

Co-developed-by: Alex Gaynor <alex.gaynor@gmail.com>
Signed-off-by: Alex Gaynor <alex.gaynor@gmail.com>
Co-developed-by: Finn Behrens <me@kloenk.de>
Signed-off-by: Finn Behrens <me@kloenk.de>
Co-developed-by: Sven Van Asbroeck <thesven73@gmail.com>
Signed-off-by: Sven Van Asbroeck <thesven73@gmail.com>
Co-developed-by: Gary Guo <gary@garyguo.net>
Signed-off-by: Gary Guo <gary@garyguo.net>
Co-developed-by: Wayne Campbell <wcampbell1995@gmail.com>
Signed-off-by: Wayne Campbell <wcampbell1995@gmail.com>
Signed-off-by: Wedson Almeida Filho <wedsonaf@google.com>
Co-developed-by: Miguel Ojeda <ojeda@kernel.org>
Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
  • Loading branch information
Wedson Almeida Filho authored and Miguel Ojeda committed Feb 11, 2022
1 parent 3191308 commit 255b71c
Show file tree
Hide file tree
Showing 12 changed files with 3,403 additions and 13 deletions.
7 changes: 7 additions & 0 deletions drivers/android/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,13 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.

config ANDROID_BINDER_IPC_RUST
bool "Android Binder IPC Driver in Rust"
depends on MMU && RUST
default n
help
Implementation of the Binder IPC in Rust.

config ANDROID_BINDERFS
bool "Android Binderfs filesystem"
depends on ANDROID_BINDER_IPC
Expand Down
2 changes: 2 additions & 0 deletions drivers/android/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,5 @@ ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o

obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += rust_binder.o
266 changes: 266 additions & 0 deletions drivers/android/allocation.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,266 @@
// SPDX-License-Identifier: GPL-2.0

use core::mem::{replace, size_of, MaybeUninit};
use kernel::{
bindings, linked_list::List, pages::Pages, prelude::*, sync::Ref, user_ptr::UserSlicePtrReader,
};

use crate::{
defs::*,
node::NodeRef,
process::{AllocationInfo, Process},
thread::{BinderError, BinderResult},
transaction::FileInfo,
};

pub(crate) struct Allocation<'a> {
pub(crate) offset: usize,
size: usize,
pub(crate) ptr: usize,
pages: Ref<[Pages<0>]>,
pub(crate) process: &'a Process,
allocation_info: Option<AllocationInfo>,
free_on_drop: bool,
file_list: List<Box<FileInfo>>,
}

impl<'a> Allocation<'a> {
pub(crate) fn new(
process: &'a Process,
offset: usize,
size: usize,
ptr: usize,
pages: Ref<[Pages<0>]>,
) -> Self {
Self {
process,
offset,
size,
ptr,
pages,
allocation_info: None,
free_on_drop: true,
file_list: List::new(),
}
}

pub(crate) fn take_file_list(&mut self) -> List<Box<FileInfo>> {
replace(&mut self.file_list, List::new())
}

pub(crate) fn add_file_info(&mut self, file: Box<FileInfo>) {
self.file_list.push_back(file);
}

fn iterate<T>(&self, mut offset: usize, mut size: usize, mut cb: T) -> Result
where
T: FnMut(&Pages<0>, usize, usize) -> Result,
{
// Check that the request is within the buffer.
if offset.checked_add(size).ok_or(Error::EINVAL)? > self.size {
return Err(Error::EINVAL);
}
offset += self.offset;
let mut page_index = offset >> bindings::PAGE_SHIFT;
offset &= (1 << bindings::PAGE_SHIFT) - 1;
while size > 0 {
let available = core::cmp::min(size, (1 << bindings::PAGE_SHIFT) as usize - offset);
cb(&self.pages[page_index], offset, available)?;
size -= available;
page_index += 1;
offset = 0;
}
Ok(())
}

pub(crate) fn copy_into(
&self,
reader: &mut UserSlicePtrReader,
offset: usize,
size: usize,
) -> Result {
self.iterate(offset, size, |page, offset, to_copy| {
page.copy_into_page(reader, offset, to_copy)
})
}

pub(crate) fn read<T>(&self, offset: usize) -> Result<T> {
let mut out = MaybeUninit::<T>::uninit();
let mut out_offset = 0;
self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
// SAFETY: Data buffer is allocated on the stack.
unsafe {
page.read(
(out.as_mut_ptr() as *mut u8).add(out_offset),
offset,
to_copy,
)
}?;
out_offset += to_copy;
Ok(())
})?;
// SAFETY: We just initialised the data.
Ok(unsafe { out.assume_init() })
}

pub(crate) fn write<T>(&self, offset: usize, obj: &T) -> Result {
let mut obj_offset = 0;
self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
// SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
let obj_ptr = unsafe { (obj as *const T as *const u8).add(obj_offset) };
// SAFETY: We have a reference to the object, so the pointer is valid.
unsafe { page.write(obj_ptr, offset, to_copy) }?;
obj_offset += to_copy;
Ok(())
})
}

pub(crate) fn keep_alive(mut self) {
self.process
.buffer_make_freeable(self.offset, self.allocation_info.take());
self.free_on_drop = false;
}

pub(crate) fn set_info(&mut self, info: AllocationInfo) {
self.allocation_info = Some(info);
}
}

impl Drop for Allocation<'_> {
fn drop(&mut self) {
if !self.free_on_drop {
return;
}

if let Some(info) = &self.allocation_info {
let offsets = info.offsets.clone();
let view = AllocationView::new(self, offsets.start);
for i in offsets.step_by(size_of::<usize>()) {
if view.cleanup_object(i).is_err() {
pr_warn!("Error cleaning up object at offset {}\n", i)
}
}
}

self.process.buffer_raw_free(self.ptr);
}
}

pub(crate) struct AllocationView<'a, 'b> {
pub(crate) alloc: &'a mut Allocation<'b>,
limit: usize,
}

impl<'a, 'b> AllocationView<'a, 'b> {
pub(crate) fn new(alloc: &'a mut Allocation<'b>, limit: usize) -> Self {
AllocationView { alloc, limit }
}

pub(crate) fn read<T>(&self, offset: usize) -> Result<T> {
if offset.checked_add(size_of::<T>()).ok_or(Error::EINVAL)? > self.limit {
return Err(Error::EINVAL);
}
self.alloc.read(offset)
}

pub(crate) fn write<T>(&self, offset: usize, obj: &T) -> Result {
if offset.checked_add(size_of::<T>()).ok_or(Error::EINVAL)? > self.limit {
return Err(Error::EINVAL);
}
self.alloc.write(offset, obj)
}

pub(crate) fn transfer_binder_object<T>(
&self,
offset: usize,
strong: bool,
get_node: T,
) -> BinderResult
where
T: FnOnce(&bindings::flat_binder_object) -> BinderResult<NodeRef>,
{
// TODO: Do we want this function to take a &mut self?
let obj = self.read::<bindings::flat_binder_object>(offset)?;
let node_ref = get_node(&obj)?;

if core::ptr::eq(&*node_ref.node.owner, self.alloc.process) {
// The receiving process is the owner of the node, so send it a binder object (instead
// of a handle).
let (ptr, cookie) = node_ref.node.get_id();
let newobj = bindings::flat_binder_object {
hdr: bindings::binder_object_header {
type_: if strong {
BINDER_TYPE_BINDER
} else {
BINDER_TYPE_WEAK_BINDER
},
},
flags: obj.flags,
__bindgen_anon_1: bindings::flat_binder_object__bindgen_ty_1 { binder: ptr as _ },
cookie: cookie as _,
};
self.write(offset, &newobj)?;

// Increment the user ref count on the node. It will be decremented as part of the
// destruction of the buffer, when we see a binder or weak-binder object.
node_ref.node.update_refcount(true, strong);
} else {
// The receiving process is different from the owner, so we need to insert a handle to
// the binder object.
let handle = self
.alloc
.process
.insert_or_update_handle(node_ref, false)?;

let newobj = bindings::flat_binder_object {
hdr: bindings::binder_object_header {
type_: if strong {
BINDER_TYPE_HANDLE
} else {
BINDER_TYPE_WEAK_HANDLE
},
},
flags: obj.flags,
// TODO: To avoid padding, we write to `binder` instead of `handle` here. We need a
// better solution though.
__bindgen_anon_1: bindings::flat_binder_object__bindgen_ty_1 {
binder: handle as _,
},
..bindings::flat_binder_object::default()
};
if self.write(offset, &newobj).is_err() {
// Decrement ref count on the handle we just created.
let _ = self.alloc.process.update_ref(handle, false, strong);
return Err(BinderError::new_failed());
}
}
Ok(())
}

fn cleanup_object(&self, index_offset: usize) -> Result {
let offset = self.alloc.read(index_offset)?;
let header = self.read::<bindings::binder_object_header>(offset)?;
// TODO: Handle other types.
match header.type_ {
BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
let obj = self.read::<bindings::flat_binder_object>(offset)?;
let strong = header.type_ == BINDER_TYPE_BINDER;
// SAFETY: The type is `BINDER_TYPE_{WEAK_}BINDER`, so the `binder` field is
// populated.
let ptr = unsafe { obj.__bindgen_anon_1.binder } as usize;
let cookie = obj.cookie as usize;
self.alloc.process.update_node(ptr, cookie, strong, false);
Ok(())
}
BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
let obj = self.read::<bindings::flat_binder_object>(offset)?;
let strong = header.type_ == BINDER_TYPE_HANDLE;
// SAFETY: The type is `BINDER_TYPE_{WEAK_}HANDLE`, so the `handle` field is
// populated.
let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
self.alloc.process.update_ref(handle, false, strong)
}
_ => Ok(()),
}
}
}
80 changes: 80 additions & 0 deletions drivers/android/context.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
// SPDX-License-Identifier: GPL-2.0

use kernel::{
bindings,
prelude::*,
security,
sync::{Mutex, Ref, UniqueRef},
};

use crate::{
node::NodeRef,
thread::{BinderError, BinderResult},
};

struct Manager {
node: Option<NodeRef>,
uid: Option<bindings::kuid_t>,
}

pub(crate) struct Context {
manager: Mutex<Manager>,
}

#[allow(clippy::non_send_fields_in_send_ty)]
unsafe impl Send for Context {}
unsafe impl Sync for Context {}

impl Context {
pub(crate) fn new() -> Result<Ref<Self>> {
let mut ctx = Pin::from(UniqueRef::try_new(Self {
// SAFETY: Init is called below.
manager: unsafe {
Mutex::new(Manager {
node: None,
uid: None,
})
},
})?);

// SAFETY: `manager` is also pinned when `ctx` is.
let manager = unsafe { ctx.as_mut().map_unchecked_mut(|c| &mut c.manager) };
kernel::mutex_init!(manager, "Context::manager");

Ok(ctx.into())
}

pub(crate) fn set_manager_node(&self, node_ref: NodeRef) -> Result {
let mut manager = self.manager.lock();
if manager.node.is_some() {
return Err(Error::EBUSY);
}
security::binder_set_context_mgr(&node_ref.node.owner.cred)?;

// TODO: Get the actual caller id.
let caller_uid = bindings::kuid_t::default();
if let Some(ref uid) = manager.uid {
if uid.val != caller_uid.val {
return Err(Error::EPERM);
}
}

manager.node = Some(node_ref);
manager.uid = Some(caller_uid);
Ok(())
}

pub(crate) fn unset_manager_node(&self) {
let node_ref = self.manager.lock().node.take();
drop(node_ref);
}

pub(crate) fn get_manager_node(&self, strong: bool) -> BinderResult<NodeRef> {
self.manager
.lock()
.node
.as_ref()
.ok_or_else(BinderError::new_dead)?
.clone(strong)
}
}
Loading

0 comments on commit 255b71c

Please sign in to comment.