use crate::scheduler;
use alloc::vec::Vec;
use alloc::sync::{Arc, Weak};
use crate::sync::SpinLock;
use crate::error::UserspaceError;
use crate::event::Waitable;
use crate::process::ThreadStruct;
use crate::sync::MutexGuard;
use core::convert::TryInto;
use core::sync::atomic::{AtomicUsize, Ordering};
use core::slice;
use crate::paging::{PAGE_SIZE, MappingAccessRights, process_memory::ProcessMemory};
use crate::paging::process_memory::QueryMemory;
use crate::paging::mapping::MappingFrames;
use crate::mem::{UserSpacePtr, UserSpacePtrMut, VirtualAddress};
use bit_field::BitField;
use crate::error::KernelError;
use crate::checks::check_lower_than_usize;
use sunrise_libkern::MemoryType;
use sunrise_libutils::align_up;
use failure::Backtrace;
#[derive(Debug)]
struct SessionRequests {
active_request: Option<Request>,
incoming_requests: Vec<Request>,
}
#[derive(Debug)]
struct Session {
internal: SpinLock<SessionRequests>,
accepters: SpinLock<Vec<Weak<ThreadStruct>>>,
servercount: AtomicUsize,
}
#[derive(Debug, Clone)]
pub struct ClientSession(Arc<Session>);
#[derive(Debug)]
pub struct ServerSession(Arc<Session>);
impl Clone for ServerSession {
fn clone(&self) -> Self {
assert!(self.0.servercount.fetch_add(1, Ordering::SeqCst) != usize::max_value(), "Overflow when incrementing servercount");
ServerSession(self.0.clone())
}
}
impl Drop for ServerSession {
fn drop(&mut self) {
let count = self.0.servercount.fetch_sub(1, Ordering::SeqCst);
assert!(count != 0, "Overflow when decrementing servercount");
if count == 1 {
debug!("Last ServerSession dropped");
let mut internal = self.0.internal.lock();
if let Some(request) = internal.active_request.take() {
*request.answered.lock() = Some(Err(UserspaceError::PortRemoteDead));
scheduler::add_to_schedule_queue(request.sender);
}
for request in internal.incoming_requests.drain(..) {
*request.answered.lock() = Some(Err(UserspaceError::PortRemoteDead));
scheduler::add_to_schedule_queue(request.sender.clone());
}
}
}
}
bitfield! {
pub struct MsgPackedHdr(u64);
impl Debug;
u16, ty, _: 15, 0;
u8, num_x_descriptors, set_num_x_descriptors: 19, 16;
u8, num_a_descriptors, set_num_a_descriptors: 23, 20;
u8, num_b_descriptors, set_num_b_descriptors: 27, 24;
u8, num_w_descriptors, set_num_w_descriptors: 31, 28;
u16, raw_section_size, set_raw_section_size: 41, 32;
u8, c_descriptor_flags, set_c_descriptor_flags: 45, 42;
enable_handle_descriptor, set_enable_handle_descriptor: 63;
}
bitfield! {
pub struct HandleDescriptorHeader(u32);
impl Debug;
send_pid, set_send_pid: 0;
u8, num_copy_handles, set_num_copy_handles: 4, 1;
u8, num_move_handles, set_num_move_handles: 8, 5;
}
impl Session {
fn client(this: Arc<Self>) -> ClientSession {
ClientSession(this)
}
fn server(this: Arc<Self>) -> ServerSession {
this.servercount.fetch_add(1, Ordering::SeqCst);
ServerSession(this)
}
}
pub fn new() -> (ServerSession, ClientSession) {
let sess = Arc::new(Session {
internal: SpinLock::new(SessionRequests {
incoming_requests: Vec::new(),
active_request: None
}),
accepters: SpinLock::new(Vec::new()),
servercount: AtomicUsize::new(0)
});
(Session::server(sess.clone()), Session::client(sess))
}
impl Waitable for ServerSession {
fn is_signaled(&self) -> bool {
let mut internal = self.0.internal.lock();
if internal.active_request.is_none() {
if let Some(s) = internal.incoming_requests.pop() {
internal.active_request = Some(s);
true
} else {
false
}
} else {
true
}
}
fn register(&self) {
let mut accepters = self.0.accepters.lock();
let curproc = scheduler::get_current_thread();
if !accepters.iter().filter_map(|v| v.upgrade()).any(|v| Arc::ptr_eq(&curproc, &v)) {
accepters.push(Arc::downgrade(&curproc));
}
}
}
#[derive(Debug)]
struct Request {
sender_buf: VirtualAddress,
sender_bufsize: usize,
sender: Arc<ThreadStruct>,
answered: Arc<SpinLock<Option<Result<(), UserspaceError>>>>,
buffers: Vec<Buffer>,
}
#[derive(Debug)]
struct Buffer {
writable: bool,
source_addr: VirtualAddress,
dest_addr: VirtualAddress,
size: usize,
}
#[allow(unused)]
fn buf_map(from_buf: &[u8], to_buf: &mut [u8], curoff: &mut usize, from_mem: &mut ProcessMemory, to_mem: &mut ProcessMemory, flags: MappingAccessRights, buffers: &mut Vec<Buffer>) -> Result<(), UserspaceError> {
let lowersize = u32::from_le_bytes(from_buf[*curoff..*curoff + 4].try_into().unwrap());
let loweraddr = u32::from_le_bytes(from_buf[*curoff + 4..*curoff + 8].try_into().unwrap());
let rest = u32::from_le_bytes(from_buf[*curoff + 8..*curoff + 12].try_into().unwrap());
let bufflags = rest.get_bits(0..2);
let addr = *(u64::from(loweraddr))
.set_bits(32..36, u64::from(rest.get_bits(28..32)))
.set_bits(36..39, u64::from(rest.get_bits(2..5)));
let size = *(u64::from(lowersize))
.set_bits(32..36, u64::from(rest.get_bits(24..28)));
check_lower_than_usize(addr, UserspaceError::InvalidAddress)?;
check_lower_than_usize(size, UserspaceError::InvalidSize)?;
check_lower_than_usize(addr.saturating_add(size), UserspaceError::InvalidSize)?;
let addr = addr as usize;
let size = size as usize;
let to_addr = if addr == 0 {
0usize
} else {
let to_addr_full = to_mem.find_available_space(align_up(size + (addr % PAGE_SIZE), PAGE_SIZE))?;
let to_addr = to_addr_full + (addr % PAGE_SIZE);
let mut first_page_info_opt: Option<(VirtualAddress, usize)> = None;
let mut middle_page_info_opt: Option<(VirtualAddress, usize)> = None;
let mut last_page_info_opt: Option<(VirtualAddress, usize)> = None;
let mut mapping_error_handling_logic =
|to_mem: &mut ProcessMemory, error: KernelError, mut first_page_info_opt: Option<(VirtualAddress, usize)>, mut middle_page_info_opt: Option<(VirtualAddress, usize)>, mut last_page_info_opt: Option<(VirtualAddress, usize)>| {
if let Some(first_page_info) = first_page_info_opt.take() {
to_mem.unmap(first_page_info.0, first_page_info.1).expect("Cannot unmap first unaligned page of buffer");;
}
if let Some(middle_page_info) = middle_page_info_opt {
to_mem.unmap(middle_page_info.0, middle_page_info.1).expect("Cannot unmap buffer");;
}
if let Some(last_page_info) = last_page_info_opt.take() {
to_mem.unmap(last_page_info.0, last_page_info.1).expect("Cannot unmap last unaligned page of buffer");;
}
Err(error.into())
};
let mut size_handled = 0;
if addr % PAGE_SIZE != 0 || size < PAGE_SIZE {
let first_page_size = core::cmp::min(PAGE_SIZE - (addr % PAGE_SIZE), size);
let from_mapping = from_mem.mirror_mapping(VirtualAddress(addr), first_page_size)?;
let from = UserSpacePtr::from_raw_parts(from_mapping.addr().addr() as *const u8, from_mapping.len());
let res_mapping = to_mem.create_regular_mapping(to_addr_full, PAGE_SIZE, MemoryType::Ipc, MappingAccessRights::u_rw());
if let Err(error) = res_mapping {
return mapping_error_handling_logic(to_mem, error, first_page_info_opt, middle_page_info_opt, last_page_info_opt);
}
first_page_info_opt = Some((to_addr_full, PAGE_SIZE));
let mut to = UserSpacePtrMut::from_raw_parts_mut(to_addr.addr() as *mut u8, first_page_size);
to.copy_from_slice(&from);
size_handled += first_page_size;
}
if (addr + size) % PAGE_SIZE != 0 && (to_addr + size).floor() != to_addr_full {
let last_page = (VirtualAddress(addr) + size).floor();
let last_page_size = (addr + size) % PAGE_SIZE;
let from_mapping = from_mem.mirror_mapping(last_page, last_page_size)?;
let from = UserSpacePtr::from_raw_parts(from_mapping.addr().addr() as *const u8, from_mapping.len());
let to_last_page = (to_addr + size).floor();
let res_mapping = to_mem.create_regular_mapping(to_last_page, PAGE_SIZE, MemoryType::Ipc, MappingAccessRights::u_rw());
if let Err(error) = res_mapping {
return mapping_error_handling_logic(to_mem, error, first_page_info_opt, middle_page_info_opt, last_page_info_opt);
}
last_page_info_opt = Some((to_last_page, PAGE_SIZE));
let mut to = UserSpacePtrMut::from_raw_parts_mut(to_last_page.addr() as *mut u8, last_page_size);
to.copy_from_slice(&from);
size_handled += last_page_size;
}
if size - size_handled != 0 {
assert!((size - size_handled) % PAGE_SIZE == 0, "Remaining size ({} - {}, {}) should be a multiple of PAGE_SIZE", size, size_handled, size - size_handled);
let addr = align_up(addr, PAGE_SIZE);
let to_addr = to_addr.ceil();
let mapping = match from_mem.query_memory(VirtualAddress(addr)) {
QueryMemory::Used(mapping) => mapping,
QueryMemory::Available(mapping) =>
return mapping_error_handling_logic(to_mem, KernelError::InvalidMemState { address: mapping.address(), ty: mapping.state().ty(), backtrace: Backtrace::new() },
first_page_info_opt,
middle_page_info_opt,
last_page_info_opt),
};
let frames = match mapping.frames() {
MappingFrames::Shared(shared) => shared.clone(),
_ =>
return mapping_error_handling_logic(to_mem, KernelError::InvalidMemState { address: mapping.address(), ty: mapping.state().ty(), backtrace: Backtrace::new() },
first_page_info_opt,
middle_page_info_opt,
last_page_info_opt),
};
let offset = addr - mapping.address().addr();
let res_mapping = to_mem.map_partial_shared_mapping(frames, to_addr, mapping.phys_offset() + offset, size - size_handled, MemoryType::Ipc, MappingAccessRights::u_rw());
if let Err(error) = res_mapping {
return mapping_error_handling_logic(to_mem, error, first_page_info_opt, middle_page_info_opt, last_page_info_opt);
}
middle_page_info_opt = Some((to_addr, size - size_handled));
}
to_addr.addr()
};
let loweraddr = to_addr as u32;
let rest = *0u32
.set_bits(0..2, bufflags)
.set_bits(2..5, (to_addr as u64).get_bits(36..39) as u32)
.set_bits(24..28, (size as u64).get_bits(32..36) as u32)
.set_bits(28..32, (to_addr as u64).get_bits(32..36) as u32);
(&mut to_buf[*curoff + 0..*curoff + 4]).copy_from_slice(&lowersize.to_le_bytes()[..]);
(&mut to_buf[*curoff + 4..*curoff + 8]).copy_from_slice(&loweraddr.to_le_bytes()[..]);
(&mut to_buf[*curoff + 8..*curoff + 12]).copy_from_slice(&rest.to_le_bytes()[..]);
buffers.push(Buffer {
writable: flags.contains(MappingAccessRights::WRITABLE),
source_addr: VirtualAddress(addr),
dest_addr: VirtualAddress(to_addr),
size
});
*curoff += 12;
Ok(())
}
fn buf_unmap(buffer: &Buffer, from_mem: &mut ProcessMemory, to_mem: &mut ProcessMemory) -> Result<(), UserspaceError> {
let addr = buffer.dest_addr;
let size = buffer.size;
let to_addr = buffer.source_addr;
let to_addr_full = to_addr.floor();
let mut size_handled = 0;
let mut result: Result<(), UserspaceError> = Ok(());
if addr.addr() % PAGE_SIZE != 0 || size < PAGE_SIZE {
let first_page_size = core::cmp::min(PAGE_SIZE - (addr.addr() % PAGE_SIZE), size);
if buffer.writable {
let from = UserSpacePtr::from_raw_parts(addr.addr() as *const u8, first_page_size);
result = match to_mem.mirror_mapping(to_addr, first_page_size) {
Ok(to_mapping) => {
let mut to = UserSpacePtrMut::from_raw_parts_mut(to_mapping.addr().addr() as *mut u8, first_page_size);
to.copy_from_slice(&from);
Ok(())
},
Err(err) => Err(err.into()),
};
}
from_mem.unmap(addr.floor(), PAGE_SIZE).expect("Cannot unmap first unaligned page of buffer");
size_handled += first_page_size;
}
if (addr.addr() + size) % PAGE_SIZE != 0 && (to_addr + size).floor() != to_addr_full {
let last_page = (addr + size).floor();
let last_page_size = (addr.addr() + size) % PAGE_SIZE;
if buffer.writable {
let from = UserSpacePtr::from_raw_parts(last_page.addr() as *const u8, last_page_size);
let to_last_page = (to_addr + size).floor();
result = match to_mem.mirror_mapping(to_last_page, last_page_size) {
Ok(to_mapping) => {
let mut to = UserSpacePtrMut::from_raw_parts_mut(to_mapping.addr().addr() as *mut u8, last_page_size);
to.copy_from_slice(&from);
Ok(())
},
Err(err) => Err(err.into()),
};
}
from_mem.unmap((addr + size).floor(), PAGE_SIZE).expect("Cannot unmap last unaligned page of buffer");
size_handled += last_page_size;
}
assert!((size - size_handled) % PAGE_SIZE == 0, "Remaining size should be a multiple of PAGE_SIZE");
if size < size_handled {
from_mem.unmap(addr.ceil(), size - size_handled).expect("Cannot unmap buffer");
}
result
}
impl ClientSession {
pub fn send_request(&self, buf: UserSpacePtrMut<[u8]>) -> Result<(), UserspaceError> {
let answered = Arc::new(SpinLock::new(None));
{
let mut internal = self.0.internal.lock();
if self.0.servercount.load(Ordering::SeqCst) == 0 {
return Err(UserspaceError::PortRemoteDead);
}
internal.incoming_requests.push(Request {
sender_buf: VirtualAddress(buf.as_ptr() as usize),
sender_bufsize: buf.len(),
answered: answered.clone(),
sender: scheduler::get_current_thread(),
buffers: Vec::new(),
})
}
let mut guard = answered.lock();
while guard.is_none() {
while let Some(item) = self.0.accepters.lock().pop() {
if let Some(process) = item.upgrade() {
scheduler::add_to_schedule_queue(process);
break;
}
}
guard = scheduler::unschedule(&*answered, guard)?;
}
(*guard).unwrap()
}
}
fn find_c_descriptors(buf: &mut [u8]) -> Result<CBufBehavior, KernelError> {
let mut curoff = 0;
let hdr = MsgPackedHdr(u64::from_le_bytes(buf[curoff..curoff + 8].try_into().unwrap()));
curoff += 8;
let cflag = hdr.c_descriptor_flags();
match cflag {
0 => return Ok(CBufBehavior::Disabled),
1 => return Ok(CBufBehavior::Inlined),
_ => ()
}
if hdr.enable_handle_descriptor() {
let descriptor = HandleDescriptorHeader(u32::from_le_bytes(buf[curoff..curoff + 4].try_into().unwrap()));
curoff += 4;
if descriptor.send_pid() {
curoff += 8;
}
curoff += 4 * usize::from(descriptor.num_copy_handles() + descriptor.num_move_handles());
}
curoff += 8 * usize::from(hdr.num_x_descriptors());
curoff += 12 * usize::from(hdr.num_a_descriptors() + hdr.num_b_descriptors() + hdr.num_w_descriptors());
curoff += 4 * usize::from(hdr.raw_section_size());
match hdr.c_descriptor_flags() {
0 | 1 => unreachable!(),
2 => {
let word1 = u32::from_le_bytes(buf[curoff..curoff + 4].try_into().unwrap());
let word2 = u32::from_le_bytes(buf[curoff + 4..curoff + 8].try_into().unwrap());
let addr = *u64::from(word1).set_bits(32..48, u64::from(word2.get_bits(0..16)));
let size = u64::from(word2.get_bits(16..32));
Ok(CBufBehavior::Single(addr, size))
},
x => {
let mut bufs = [(0, 0); 13];
for i in 0..x - 2 {
let word1 = u32::from_le_bytes(buf[curoff..curoff + 4].try_into().unwrap());
let word2 = u32::from_le_bytes(buf[curoff + 4..curoff + 8].try_into().unwrap());
let addr = *u64::from(word1).set_bits(32..48, u64::from(word2.get_bits(0..16)));
let size = u64::from(word2.get_bits(16..32));
bufs[i as usize] = (addr, size);
}
Ok(CBufBehavior::Numbered(bufs, (x - 2) as usize))
}
}
}
impl ServerSession {
pub fn receive(&self, mut buf: UserSpacePtrMut<[u8]>, has_c_descriptors: bool) -> Result<(), UserspaceError> {
let mut internal = self.0.internal.lock();
let active = internal.active_request.as_mut().unwrap();
let sender = active.sender.process.clone();
let memlock = sender.pmemory.lock();
let mapping = memlock.mirror_mapping(active.sender_buf, active.sender_bufsize)?;
let sender_buf = unsafe {
slice::from_raw_parts_mut(mapping.addr().addr() as *mut u8, mapping.len())
};
let c_bufs = if has_c_descriptors {
find_c_descriptors(&mut *buf)?
} else {
CBufBehavior::Disabled
};
pass_message(sender_buf, active.sender.clone(), &mut *buf, scheduler::get_current_thread(), false, memlock, &mut active.buffers, c_bufs)?;
Ok(())
}
pub fn reply(&self, buf: UserSpacePtr<[u8]>) -> Result<(), UserspaceError> {
assert!(self.0.internal.lock().active_request.is_some(), "Called reply without an active session");
let mut active = self.0.internal.lock().active_request.take().unwrap();
let sender = active.sender.process.clone();
let memlock = sender.pmemory.lock();
let mapping = memlock.mirror_mapping(active.sender_buf, active.sender_bufsize)?;
let sender_buf = unsafe {
slice::from_raw_parts_mut(mapping.addr().addr() as *mut u8, mapping.len())
};
pass_message(&*buf, scheduler::get_current_thread(), sender_buf, active.sender.clone(), true, memlock, &mut active.buffers, CBufBehavior::Disabled)?;
*active.answered.lock() = Some(Ok(()));
scheduler::add_to_schedule_queue(active.sender);
Ok(())
}
}
#[allow(clippy::large_enum_variant)]
enum CBufBehavior {
Disabled,
Inlined,
Single(u64, u64),
Numbered([(u64, u64); 13], usize)
}
#[allow(unused, clippy::too_many_arguments)]
fn pass_message(from_buf: &[u8], from_proc: Arc<ThreadStruct>, to_buf: &mut [u8], to_proc: Arc<ThreadStruct>, is_reply: bool, mut other_memlock: MutexGuard<ProcessMemory>, buffers: &mut Vec<Buffer>, c_bufs: CBufBehavior) -> Result<(), UserspaceError> {
let mut curoff = 0;
let hdr = MsgPackedHdr(u64::from_le_bytes(from_buf[curoff..curoff + 8].try_into().unwrap()));
(&mut to_buf[curoff..curoff + 8]).copy_from_slice(&hdr.0.to_le_bytes()[..]);
curoff += 8;
let descriptor = if hdr.enable_handle_descriptor() {
let descriptor = HandleDescriptorHeader(u32::from_le_bytes(from_buf[curoff..curoff + 4].try_into().unwrap()));
(&mut to_buf[curoff..curoff + 4]).copy_from_slice(&descriptor.0.to_le_bytes()[..]);
curoff += 4;
descriptor
} else {
HandleDescriptorHeader(0)
};
if descriptor.send_pid() {
(&mut to_buf[curoff..curoff + 8]).copy_from_slice(&(from_proc.process.pid as u64).to_le_bytes()[..]);
curoff += 8;
}
if descriptor.num_copy_handles() != 0 || descriptor.num_move_handles() != 0 {
let mut from_handle_table = from_proc.process.phandles.lock();
let mut to_handle_table = to_proc.process.phandles.lock();
for i in 0..descriptor.num_copy_handles() {
let handle = u32::from_le_bytes(from_buf[curoff..curoff + 4].try_into().unwrap());
let handle = from_handle_table.get_handle(handle)?;
let handle = to_handle_table.add_handle(handle);
(&mut to_buf[curoff..curoff + 4]).copy_from_slice(&handle.to_le_bytes()[..]);
curoff += 4;
}
for i in 0..descriptor.num_move_handles() {
let handle = u32::from_le_bytes(from_buf[curoff..curoff + 4].try_into().unwrap());
let handle = from_handle_table.delete_handle(handle)?;
let handle = to_handle_table.add_handle(handle);
(&mut to_buf[curoff..curoff + 4]).copy_from_slice(&handle.to_le_bytes()[..]);
curoff += 4;
}
}
{
let mut coff = 0;
for i in 0..hdr.num_x_descriptors() {
let word1 = u32::from_le_bytes(from_buf[curoff..curoff + 4].try_into().unwrap());
let counter = word1.get_bits(0..6);
let from_addr = *u64::from(u32::from_le_bytes(from_buf[curoff + 4..curoff + 8].try_into().unwrap()))
.set_bits(32..36, u64::from(word1.get_bits(12..16)))
.set_bits(36..39, u64::from(word1.get_bits(6..9)));
let from_size = u64::from(word1.get_bits(16..32));
let (to_addr, to_size) = match c_bufs {
CBufBehavior::Disabled => return Err(UserspaceError::PortRemoteDead),
CBufBehavior::Inlined => unimplemented!(),
CBufBehavior::Single(addr, size) => {
(addr + coff, size - coff)
},
CBufBehavior::Numbered(bufs, count) => {
let (addr, size) = bufs[..count][counter as usize];
(addr, size)
}
};
check_lower_than_usize(from_addr, UserspaceError::InvalidAddress)?;
check_lower_than_usize(from_size, UserspaceError::InvalidAddress)?;
check_lower_than_usize(from_addr.saturating_add(from_size), UserspaceError::InvalidAddress)?;
check_lower_than_usize(to_addr, UserspaceError::InvalidAddress)?;
check_lower_than_usize(to_size, UserspaceError::InvalidAddress)?;
check_lower_than_usize(to_addr.saturating_add(to_size), UserspaceError::InvalidAddress)?;
let (mapping, mut uspaceptr) = if !is_reply {
let mapping = other_memlock.mirror_mapping(VirtualAddress(from_addr as usize), from_size as usize)?;
let uspaceptr = UserSpacePtrMut::from_raw_parts_mut(to_addr as *mut u8, to_size as usize);
(mapping, uspaceptr)
} else {
let mapping = other_memlock.mirror_mapping(VirtualAddress(to_addr as usize), to_size as usize)?;
let uspaceptr = UserSpacePtrMut::from_raw_parts_mut(from_addr as *mut u8, from_size as usize);
(mapping, uspaceptr)
};
let (from, to) = {
let ref_mapping = unsafe {
slice::from_raw_parts_mut(mapping.addr().addr() as *mut u8, mapping.len())
};
let ref_uspace = &mut *uspaceptr;
if !is_reply {
(ref_mapping, ref_uspace)
} else {
(ref_uspace, ref_mapping)
}
};
to[..from.len()].copy_from_slice(from);
coff += from.len() as u64;
let mut counter = counter;
let counter = *counter
.set_bits(6..9, to_addr.get_bits(36..39) as u32)
.set_bits(12..16, to_addr.get_bits(32..36) as u32)
.set_bits(16..32, from_size as u32);
(&mut to_buf[curoff..curoff + 4]).copy_from_slice(&counter.to_le_bytes()[..]);
(&mut to_buf[curoff + 4..curoff + 8]).copy_from_slice(&(to_addr as u32).to_le_bytes()[..]);
curoff += 8;
}
}
if hdr.num_a_descriptors() != 0 || hdr.num_b_descriptors() != 0 {
if is_reply {
return Err(UserspaceError::PortRemoteDead)
}
let mut current_memlock = to_proc.process.pmemory.lock();
let (mut from_mem, mut to_mem) = (&mut *other_memlock, &mut *current_memlock);
for i in 0..hdr.num_a_descriptors() {
buf_map(from_buf, to_buf, &mut curoff, &mut *from_mem, &mut *to_mem, MappingAccessRights::empty(), buffers)?;
}
for i in 0..hdr.num_b_descriptors() {
buf_map(from_buf, to_buf, &mut curoff, &mut *from_mem, &mut *to_mem, MappingAccessRights::WRITABLE, buffers)?;
}
for i in 0..hdr.num_w_descriptors() {
buf_map(from_buf, to_buf, &mut curoff, &mut *from_mem, &mut *to_mem, MappingAccessRights::WRITABLE, buffers)?;
}
}
if is_reply && !buffers.is_empty() {
let (mut from_mem, mut to_mem) = (from_proc.process.pmemory.lock(), other_memlock);
for buffer in buffers {
buf_unmap(buffer, &mut *from_mem, &mut *to_mem)?;
}
}
(&mut to_buf[curoff..curoff + (hdr.raw_section_size() as usize) * 4])
.copy_from_slice(&from_buf[curoff..curoff + (hdr.raw_section_size() as usize) * 4]);
if hdr.c_descriptor_flags() == 1 {
unimplemented!("Inline C Descriptor");
} else if hdr.c_descriptor_flags() == 2 {
unimplemented!("Single C Descriptor");
} else if hdr.c_descriptor_flags() != 0 {
unimplemented!("Multi C Descriptor");
for i in 0..hdr.c_descriptor_flags() - 2 {
}
}
Ok(())
}