capsules_extra/net/udp/driver.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! UDP userspace interface for transmit and receive.
6//!
7//! Implements a userspace interface for sending and receiving UDP messages.
8//! Processes use this driver to send UDP packets from a common interface
9//! and bind to UDP ports for receiving packets.
10//! Also exposes a list of interface addresses to the application (currently
11//! hard-coded).
12
13use crate::net::ipv6::ip_utils::IPAddr;
14use crate::net::network_capabilities::NetworkCapability;
15use crate::net::stream::encode_u16;
16use crate::net::stream::encode_u8;
17use crate::net::stream::SResult;
18use crate::net::udp::udp_port_table::{PortQuery, UdpPortManager};
19use crate::net::udp::udp_recv::UDPRecvClient;
20use crate::net::udp::udp_send::{UDPSendClient, UDPSender};
21use crate::net::util::host_slice_to_u16;
22
23use core::cell::Cell;
24use core::mem::size_of;
25use core::{cmp, mem};
26
27use kernel::capabilities::UdpDriverCapability;
28use kernel::debug;
29use kernel::grant::{AllowRoCount, AllowRwCount, Grant, UpcallCount};
30use kernel::processbuffer::{ReadableProcessBuffer, WriteableProcessBuffer};
31use kernel::syscall::{CommandReturn, SyscallDriver};
32use kernel::utilities::cells::MapCell;
33use kernel::utilities::leasable_buffer::SubSliceMut;
34use kernel::{ErrorCode, ProcessId};
35
36use capsules_core::driver;
37pub const DRIVER_NUM: usize = driver::NUM::Udp as usize;
38
39/// IDs for subscribed upcalls.
40mod upcall {
41 /// Callback for when packet is received. If no port has been bound, return
42 /// `RESERVE` to indicate that port binding is is a prerequisite to
43 /// reception.
44 pub const PACKET_RECEIVED: usize = 0;
45 /// Callback for when packet is transmitted. Notably, this callback receives
46 /// the result of the send_done callback from udp_send.rs, which does not
47 /// currently pass information regarding whether packets were acked at the
48 /// link layer.
49 pub const PACKET_TRANSMITTED: usize = 1;
50 /// Number of upcalls.
51 pub const COUNT: u8 = 2;
52}
53
54/// Ids for read-only allow buffers
55mod ro_allow {
56 /// Write buffer. Contains the UDP payload to be transmitted. Returns SIZE
57 /// if the passed buffer is too long, and NOSUPPORT if an invalid
58 /// `allow_num` is passed.
59 pub const WRITE: usize = 0;
60 /// The number of allow buffers the kernel stores for this grant
61 pub const COUNT: u8 = 1;
62}
63
64/// Ids for read-write allow buffers
65mod rw_allow {
66 /// Read buffer. Will contain the received payload.
67 pub const READ: usize = 0;
68 /// Config buffer. Used to contain miscellaneous data associated with some
69 /// commands, namely source/destination addresses and ports.
70 pub const CFG: usize = 1;
71 /// Rx config buffer. Used to contain source/destination addresses and ports
72 /// for receives (separate from `2` because receives may be waiting for an
73 /// incoming packet asynchronously).
74 pub const RX_CFG: usize = 2;
75 /// The number of allow buffers the kernel stores for this grant
76 pub const COUNT: u8 = 3;
77}
78
79#[derive(Debug, Copy, Clone, Eq, PartialEq)]
80pub struct UDPEndpoint {
81 addr: IPAddr,
82 port: u16,
83}
84
85impl UDPEndpoint {
86 /// This function serializes the `UDPEndpoint` into the provided buffer.
87 ///
88 /// # Arguments
89 ///
90 /// - `buf` - A mutable buffer to serialize the `UDPEndpoint` into
91 /// - `offset` - The current offset into the provided buffer
92 ///
93 /// # Return Value
94 ///
95 /// This function returns the new offset into the buffer wrapped in an
96 /// SResult.
97 pub fn encode(&self, buf: &mut [u8], offset: usize) -> SResult<usize> {
98 stream_len_cond!(buf, size_of::<UDPEndpoint>() + offset);
99
100 let mut off = offset;
101 for i in 0..16 {
102 off = enc_consume!(buf, off; encode_u8, self.addr.0[i]);
103 }
104 off = enc_consume!(buf, off; encode_u16, self.port);
105 stream_done!(off, off);
106 }
107
108 /// This function checks if the UDPEndpoint specified is the 0 address + 0 port.
109 pub fn is_zero(&self) -> bool {
110 self.addr.is_unspecified() && self.port == 0
111 }
112}
113
114#[derive(Default)]
115pub struct App {
116 pending_tx: Option<[UDPEndpoint; 2]>,
117 bound_port: Option<UDPEndpoint>,
118}
119
120#[allow(dead_code)]
121pub struct UDPDriver<'a> {
122 /// UDP sender
123 sender: &'a dyn UDPSender<'a>,
124
125 /// Grant of apps that use this radio driver.
126 apps: Grant<
127 App,
128 UpcallCount<{ upcall::COUNT }>,
129 AllowRoCount<{ ro_allow::COUNT }>,
130 AllowRwCount<{ rw_allow::COUNT }>,
131 >,
132 /// ID of app whose transmission request is being processed.
133 current_app: Cell<Option<ProcessId>>,
134
135 /// List of IP Addresses of the interfaces on the device
136 interface_list: &'static [IPAddr],
137
138 /// Maximum length payload that an app can transmit via this driver
139 max_tx_pyld_len: usize,
140
141 /// UDP bound port table (manages kernel bindings)
142 port_table: &'static UdpPortManager,
143
144 kernel_buffer: MapCell<SubSliceMut<'static, u8>>,
145
146 driver_send_cap: &'static dyn UdpDriverCapability,
147
148 net_cap: &'static NetworkCapability,
149}
150
151impl<'a> UDPDriver<'a> {
152 pub fn new(
153 sender: &'a dyn UDPSender<'a>,
154 grant: Grant<
155 App,
156 UpcallCount<{ upcall::COUNT }>,
157 AllowRoCount<{ ro_allow::COUNT }>,
158 AllowRwCount<{ rw_allow::COUNT }>,
159 >,
160 interface_list: &'static [IPAddr],
161 max_tx_pyld_len: usize,
162 port_table: &'static UdpPortManager,
163 kernel_buffer: SubSliceMut<'static, u8>,
164 driver_send_cap: &'static dyn UdpDriverCapability,
165 net_cap: &'static NetworkCapability,
166 ) -> UDPDriver<'a> {
167 UDPDriver {
168 sender,
169 apps: grant,
170 current_app: Cell::new(None),
171 interface_list,
172 max_tx_pyld_len,
173 port_table,
174 kernel_buffer: MapCell::new(kernel_buffer),
175 driver_send_cap,
176 net_cap,
177 }
178 }
179
180 /// If the driver is currently idle and there are pending transmissions,
181 /// pick an app with a pending transmission and return its `ProcessId`.
182 fn get_next_tx_if_idle(&self) -> Option<ProcessId> {
183 if self.current_app.get().is_some() {
184 // Tx already in progress
185 return None;
186 }
187 let mut pending_app = None;
188 for app in self.apps.iter() {
189 let processid = app.processid();
190 app.enter(|app, _| {
191 if app.pending_tx.is_some() {
192 pending_app = Some(processid);
193 }
194 });
195 if pending_app.is_some() {
196 break;
197 }
198 }
199 pending_app
200 }
201
202 /// Performs `processid`'s pending transmission asynchronously. If the
203 /// transmission is not successful, the error is returned to the app via its
204 /// `tx_callback`. Assumes that the driver is currently idle and the app has
205 /// a pending transmission.
206 #[inline]
207 fn perform_tx_async(&self, processid: ProcessId) {
208 let result = self.perform_tx_sync(processid);
209 if result != Ok(()) {
210 let _ = self.apps.enter(processid, |_app, upcalls| {
211 upcalls
212 .schedule_upcall(
213 upcall::PACKET_TRANSMITTED,
214 (kernel::errorcode::into_statuscode(result), 0, 0),
215 )
216 .ok();
217 });
218 }
219 }
220
221 /// Performs `processid`'s pending transmission synchronously. The result is
222 /// returned immediately to the app. Assumes that the driver is currently
223 /// idle and the app has a pending transmission.
224 #[inline]
225 fn perform_tx_sync(&self, processid: ProcessId) -> Result<(), ErrorCode> {
226 self.apps.enter(processid, |app, kernel_data| {
227 let addr_ports = match app.pending_tx.take() {
228 Some(pending_tx) => pending_tx,
229 None => {
230 return Ok(());
231 }
232 };
233 let dst_addr = addr_ports[1].addr;
234 let dst_port = addr_ports[1].port;
235 let src_port = addr_ports[0].port;
236
237 // Send UDP payload. Copy payload into packet buffer held by this driver, then queue
238 // it on the udp_mux.
239 let result = kernel_data
240 .get_readonly_processbuffer(ro_allow::WRITE)
241 .and_then(|write| {
242 write.enter(|payload| {
243 self.kernel_buffer.take().map_or(
244 Err(ErrorCode::NOMEM),
245 |mut kernel_buffer| {
246 if payload.len() > kernel_buffer.len() {
247 self.kernel_buffer.replace(kernel_buffer);
248 return Err(ErrorCode::SIZE);
249 }
250 payload.copy_to_slice(&mut kernel_buffer[0..payload.len()]);
251 kernel_buffer.slice(0..payload.len());
252 match self.sender.driver_send_to(
253 dst_addr,
254 dst_port,
255 src_port,
256 kernel_buffer,
257 self.driver_send_cap,
258 self.net_cap,
259 ) {
260 Ok(()) => Ok(()),
261 Err(mut buf) => {
262 buf.reset();
263 self.kernel_buffer.replace(buf);
264 Err(ErrorCode::FAIL)
265 }
266 }
267 },
268 )
269 })
270 })
271 .unwrap_or(Err(ErrorCode::NOMEM));
272 if result == Ok(()) {
273 self.current_app.set(Some(processid));
274 }
275 result
276 })?
277 }
278
279 /// Schedule the next transmission if there is one pending. Performs the
280 /// transmission eventually, returning any errors via asynchronous callbacks.
281 #[inline]
282 #[allow(dead_code)]
283 fn do_next_tx_queued(&self) {
284 self.get_next_tx_if_idle()
285 .map(|processid| self.perform_tx_async(processid));
286 }
287
288 /// Schedule the next transmission if there is one pending. If the next
289 /// transmission happens to be the one that was just queued, then the
290 /// transmission is immediate. Hence, errors must be returned immediately.
291 /// On the other hand, if it is some other app, then return any errors via
292 /// callbacks.
293 #[inline]
294 fn do_next_tx_immediate(&self, new_processid: ProcessId) -> Result<u32, ErrorCode> {
295 self.get_next_tx_if_idle().map_or(Ok(0), |processid| {
296 if processid == new_processid {
297 let sync_result = self.perform_tx_sync(processid);
298 if sync_result == Ok(()) {
299 Ok(1) //Indicates packet passed to radio
300 } else {
301 Err(ErrorCode::try_from(sync_result).unwrap())
302 }
303 } else {
304 self.perform_tx_async(processid);
305 Ok(0) //indicates async transmission
306 }
307 })
308 }
309
310 #[inline]
311 fn parse_ip_port_pair(&self, buf: &[u8]) -> Option<UDPEndpoint> {
312 if buf.len() != size_of::<UDPEndpoint>() {
313 debug!(
314 "[parse] len is {:?}, not {:?} as expected",
315 buf.len(),
316 size_of::<UDPEndpoint>()
317 );
318 None
319 } else {
320 let (a, p) = buf.split_at(size_of::<IPAddr>());
321 let mut addr = IPAddr::new();
322 addr.0.copy_from_slice(a);
323
324 let pair = UDPEndpoint {
325 addr,
326 port: host_slice_to_u16(p),
327 };
328 Some(pair)
329 }
330 }
331}
332
333impl SyscallDriver for UDPDriver<'_> {
334 /// UDP control
335 ///
336 /// ### `command_num`
337 ///
338 /// - `0`: Driver existence check.
339 /// - `1`: Get the interface list app_cfg (out): 16 * `n` bytes: the list of
340 /// interface IPv6 addresses, length limited by `app_cfg` length. Returns
341 /// INVAL if the cfg buffer is the wrong size, or not available.
342 /// - `2`: Transmit payload.
343 /// - Returns BUSY is this process already has a pending tx.
344 /// - Returns INVAL if no valid buffer has been loaded into the write
345 /// buffer, or if the config buffer is the wrong length, or if the
346 /// destination and source port/address pairs cannot be parsed.
347 /// - Otherwise, returns the result of do_next_tx_immediate(). Notably, a
348 /// successful transmit can produce two different success values. If
349 /// success is returned, this simply means that the packet was queued.
350 /// In this case, the app still still needs to wait for a callback to
351 /// check if any errors occurred before the packet was passed to the
352 /// radio. However, if Success_U32 is returned with value 1, this means
353 /// the the packet was successfully passed the radio without any errors,
354 /// which tells the userland application that it does not need to wait
355 /// for a callback to check if any errors occurred while the packet was
356 /// being passed down to the radio. Any successful return value
357 /// indicates that the app should wait for a send_done() callback before
358 /// attempting to queue another packet. Currently, only will transmit if
359 /// the app has bound to the port passed in the tx_cfg buf as the source
360 /// address.
361 /// - If no port is bound, returns RESERVE,
362 /// - if it tries to send on a port other than the port which is bound,
363 /// returns INVALID. Notably, the currently transmit implementation
364 /// allows for starvation: an an app with a lower app id can send
365 /// constantly and starve an app with a later ID.
366 /// - `3`: Bind to the address in rx_cfg. Returns Ok(()) if that addr/port
367 /// combo is free, returns INVAL if the address requested is not a local
368 /// interface, or if the port requested is 0. Returns BUSY if that port is
369 /// already bound to by another app. This command should be called after
370 /// allow() is called on the rx_cfg buffer, and before subscribe() is used
371 /// to set up the recv callback. Additionally, apps can only send on ports
372 /// after they have bound to said port. If this command is called and the
373 /// address in rx_cfg is 0::0 : 0, this command will reset the option
374 /// containing the bound port to None. Notably, the current implementation
375 /// of this only allows for each app to bind to a single port at a time,
376 /// as such an implementation conserves memory (and is similar to the
377 /// approach applied by TinyOS and Riot).
378 /// - `4`: Returns the maximum payload that can be transmitted by apps using
379 /// this driver. This represents the size of the payload buffer in the
380 /// kernel. Apps can use this syscall to ensure they do not attempt to
381 /// send too-large messages.
382
383 fn command(
384 &self,
385 command_num: usize,
386 arg1: usize,
387 _: usize,
388 processid: ProcessId,
389 ) -> CommandReturn {
390 match command_num {
391 0 => CommandReturn::success(),
392
393 // Writes the requested number of network interface addresses
394 // `arg1`: number of interfaces requested that will fit into the buffer
395 1 => {
396 self.apps
397 .enter(processid, |_, kernel_data| {
398 kernel_data
399 .get_readwrite_processbuffer(rw_allow::CFG)
400 .and_then(|cfg| {
401 cfg.mut_enter(|cfg| {
402 if cfg.len() != arg1 * size_of::<IPAddr>() {
403 return CommandReturn::failure(ErrorCode::INVAL);
404 }
405 let n_ifaces_to_copy =
406 cmp::min(arg1, self.interface_list.len());
407 let iface_size = size_of::<IPAddr>();
408 for i in 0..n_ifaces_to_copy {
409 cfg[i * iface_size..(i + 1) * iface_size]
410 .copy_from_slice(&self.interface_list[i].0);
411 }
412 // Returns total number of interfaces
413 CommandReturn::success_u32(self.interface_list.len() as u32)
414 })
415 })
416 .unwrap_or(CommandReturn::failure(ErrorCode::INVAL))
417 })
418 .unwrap_or_else(|err| CommandReturn::failure(err.into()))
419 }
420
421 // Transmits UDP packet stored in tx_buf
422 2 => {
423 let res = self
424 .apps
425 .enter(processid, |app, kernel_data| {
426 if app.pending_tx.is_some() {
427 // Cannot support more than one pending tx per process.
428 return Err(ErrorCode::BUSY);
429 }
430 if app.bound_port.is_none() {
431 // Currently, apps need to bind to a port before they can send from said port
432 return Err(ErrorCode::RESERVE);
433 }
434 let next_tx = kernel_data
435 .get_readwrite_processbuffer(rw_allow::CFG)
436 .and_then(|cfg| {
437 cfg.enter(|cfg| {
438 if cfg.len() != 2 * size_of::<UDPEndpoint>() {
439 return None;
440 }
441
442 let mut tmp_cfg_buffer: [u8; size_of::<UDPEndpoint>() * 2] =
443 [0; size_of::<UDPEndpoint>() * 2];
444 cfg.copy_to_slice(&mut tmp_cfg_buffer);
445
446 if let (Some(dst), Some(src)) = (
447 self.parse_ip_port_pair(
448 &tmp_cfg_buffer[size_of::<UDPEndpoint>()..],
449 ),
450 self.parse_ip_port_pair(
451 &tmp_cfg_buffer[..size_of::<UDPEndpoint>()],
452 ),
453 ) {
454 if Some(src) == app.bound_port {
455 Some([src, dst])
456 } else {
457 None
458 }
459 } else {
460 None
461 }
462 })
463 })
464 .unwrap_or(None);
465 if next_tx.is_none() {
466 return Err(ErrorCode::INVAL);
467 }
468 app.pending_tx = next_tx;
469 Ok(())
470 })
471 .unwrap_or_else(|err| Err(err.into()));
472 match res {
473 Ok(()) => self.do_next_tx_immediate(processid).map_or_else(
474 |err| CommandReturn::failure(err),
475 |v| CommandReturn::success_u32(v),
476 ),
477 Err(e) => CommandReturn::failure(e),
478 }
479 }
480 3 => {
481 let err = self
482 .apps
483 .enter(processid, |app, kernel_data| {
484 // Move UDPEndpoint into udp.rs?
485 let requested_addr_opt = kernel_data
486 .get_readwrite_processbuffer(rw_allow::RX_CFG)
487 .and_then(|rx_cfg| {
488 rx_cfg.enter(|cfg| {
489 if cfg.len() != 2 * mem::size_of::<UDPEndpoint>() {
490 None
491 } else {
492 let mut tmp_endpoint: [u8; mem::size_of::<UDPEndpoint>()] =
493 [0; mem::size_of::<UDPEndpoint>()];
494 cfg[mem::size_of::<UDPEndpoint>()..]
495 .copy_to_slice(&mut tmp_endpoint);
496
497 self.parse_ip_port_pair(&tmp_endpoint)
498 }
499 })
500 })
501 .unwrap_or(None);
502 requested_addr_opt.map_or(Err(Err(ErrorCode::INVAL)), |requested_addr| {
503 // If zero address, close any already bound socket
504 if requested_addr.is_zero() {
505 app.bound_port = None;
506 return Ok(None);
507 }
508 // Check that requested addr is a local interface
509 let mut requested_is_local = false;
510 for i in 0..self.interface_list.len() {
511 if requested_addr.addr == self.interface_list[i] {
512 requested_is_local = true;
513 }
514 }
515 if !requested_is_local {
516 return Err(Err(ErrorCode::INVAL));
517 }
518 Ok(Some(requested_addr))
519 })
520 })
521 .unwrap_or_else(|err| Err(err.into()));
522 match err {
523 Ok(requested_addr_opt) => {
524 requested_addr_opt.map_or(CommandReturn::success(), |requested_addr| {
525 // Check bound ports in the kernel.
526 match self.port_table.is_bound(requested_addr.port) {
527 Ok(bound) => {
528 if bound {
529 CommandReturn::failure(ErrorCode::BUSY)
530 } else {
531 self.apps
532 .enter(processid, |app, _| {
533 // The requested addr is free and valid
534 app.bound_port = Some(requested_addr);
535 CommandReturn::success()
536 })
537 .unwrap_or_else(|err| {
538 CommandReturn::failure(err.into())
539 })
540 }
541 }
542 Err(()) => CommandReturn::failure(ErrorCode::FAIL), //error in port table
543 }
544 })
545 }
546 Err(retcode) => CommandReturn::failure(retcode.try_into().unwrap()),
547 }
548 }
549 4 => CommandReturn::success_u32(self.max_tx_pyld_len as u32),
550 _ => CommandReturn::failure(ErrorCode::NOSUPPORT),
551 }
552 }
553
554 fn allocate_grant(&self, processid: ProcessId) -> Result<(), kernel::process::Error> {
555 self.apps.enter(processid, |_, _| {})
556 }
557}
558
559impl UDPSendClient for UDPDriver<'_> {
560 fn send_done(&self, result: Result<(), ErrorCode>, mut dgram: SubSliceMut<'static, u8>) {
561 // Replace the returned kernel buffer. Now we can send the next msg.
562 dgram.reset();
563 self.kernel_buffer.replace(dgram);
564 self.current_app.get().map(|processid| {
565 let _ = self.apps.enter(processid, |_app, upcalls| {
566 upcalls
567 .schedule_upcall(
568 upcall::PACKET_TRANSMITTED,
569 (kernel::errorcode::into_statuscode(result), 0, 0),
570 )
571 .ok();
572 });
573 });
574 self.current_app.set(None);
575 self.do_next_tx_queued();
576 }
577}
578
579impl UDPRecvClient for UDPDriver<'_> {
580 fn receive(
581 &self,
582 src_addr: IPAddr,
583 dst_addr: IPAddr,
584 src_port: u16,
585 dst_port: u16,
586 payload: &[u8],
587 ) {
588 self.apps.each(|_, app, kernel_data| {
589 if app.bound_port.is_some() {
590 let mut for_me = false;
591 app.bound_port.as_ref().map(|requested_addr| {
592 if requested_addr.addr == dst_addr && requested_addr.port == dst_port {
593 for_me = true;
594 }
595 });
596 if for_me {
597 let len = payload.len();
598 let res = kernel_data
599 .get_readwrite_processbuffer(rw_allow::READ)
600 .and_then(|read| {
601 read.mut_enter(|rbuf| {
602 if rbuf.len() >= len {
603 rbuf[..len].copy_from_slice(&payload[..len]);
604 Ok(())
605 } else {
606 Err(ErrorCode::SIZE) //packet does not fit
607 }
608 })
609 })
610 .unwrap_or(Ok(()));
611 if res.is_ok() {
612 // Write address of sender into rx_cfg so it can be read by client
613 let sender_addr = UDPEndpoint {
614 addr: src_addr,
615 port: src_port,
616 };
617 kernel_data
618 .schedule_upcall(upcall::PACKET_RECEIVED, (len, 0, 0))
619 .ok();
620 const CFG_LEN: usize = 2 * size_of::<UDPEndpoint>();
621 let _ = kernel_data
622 .get_readwrite_processbuffer(rw_allow::RX_CFG)
623 .and_then(|rx_cfg| {
624 rx_cfg.mut_enter(|cfg| {
625 if cfg.len() != CFG_LEN {
626 return Err(ErrorCode::INVAL);
627 }
628 let mut tmp_cfg_buffer: [u8; CFG_LEN] = [0; CFG_LEN];
629 sender_addr.encode(&mut tmp_cfg_buffer, 0);
630 cfg.copy_from_slice(&tmp_cfg_buffer);
631 Ok(())
632 })
633 })
634 .unwrap_or(Err(ErrorCode::INVAL));
635 }
636 }
637 }
638 });
639 }
640}
641
642impl PortQuery for UDPDriver<'_> {
643 // Returns true if |port| is bound (on any iface), false otherwise.
644 fn is_bound(&self, port: u16) -> bool {
645 let mut port_bound = false;
646 for app in self.apps.iter() {
647 app.enter(|other_app, _| {
648 if other_app.bound_port.is_some() {
649 let other_addr_opt = other_app.bound_port;
650 let other_addr = other_addr_opt.unwrap(); // Unwrap fail = Missing other_addr
651 if other_addr.port == port {
652 port_bound = true;
653 }
654 }
655 });
656 }
657 port_bound
658 }
659}