all kernel headers have been moved from include/mango to include/kernel and include definitions that are only relevant to kernel-space. any definitions that are relevant to both kernel- and user-space (i.e. type definitions, syscall IDs) have been moved to include/mango within libmango.
174 lines
3.9 KiB
C
174 lines
3.9 KiB
C
#include <kernel/ringbuffer.h>
|
|
#include <kernel/sched.h>
|
|
|
|
size_t ringbuffer_unread(struct ringbuffer *ring_buffer)
|
|
{
|
|
if (ring_buffer->r_read_ptr == ring_buffer->r_write_ptr) {
|
|
return 0;
|
|
}
|
|
|
|
if (ring_buffer->r_read_ptr > ring_buffer->r_write_ptr) {
|
|
return (ring_buffer->r_size - ring_buffer->r_read_ptr)
|
|
+ ring_buffer->r_write_ptr;
|
|
} else {
|
|
return (ring_buffer->r_write_ptr - ring_buffer->r_read_ptr);
|
|
}
|
|
}
|
|
|
|
size_t ringbuffer_avail(struct ringbuffer *ring_buffer)
|
|
{
|
|
if (ring_buffer->r_read_ptr == ring_buffer->r_write_ptr) {
|
|
return ring_buffer->r_size - 1;
|
|
}
|
|
|
|
if (ring_buffer->r_read_ptr > ring_buffer->r_write_ptr) {
|
|
return ring_buffer->r_read_ptr - ring_buffer->r_write_ptr - 1;
|
|
} else {
|
|
return (ring_buffer->r_size - ring_buffer->r_write_ptr)
|
|
+ ring_buffer->r_read_ptr - 1;
|
|
}
|
|
}
|
|
|
|
static inline void increment_read(struct ringbuffer *ring_buffer)
|
|
{
|
|
ring_buffer->r_read_ptr++;
|
|
if (ring_buffer->r_read_ptr == ring_buffer->r_size) {
|
|
ring_buffer->r_read_ptr = 0;
|
|
}
|
|
}
|
|
|
|
static inline void increment_write(struct ringbuffer *ring_buffer)
|
|
{
|
|
ring_buffer->r_write_ptr++;
|
|
if (ring_buffer->r_write_ptr == ring_buffer->r_size) {
|
|
ring_buffer->r_write_ptr = 0;
|
|
}
|
|
}
|
|
|
|
size_t ringbuffer_read(struct ringbuffer *ring_buffer, size_t size, void *p, mango_flags_t flags)
|
|
{
|
|
if (!ring_buffer) {
|
|
return 0;
|
|
}
|
|
|
|
unsigned char *buffer = p;
|
|
unsigned long lock_flags;
|
|
size_t collected = 0;
|
|
|
|
while (collected < size) {
|
|
spin_lock_irqsave(&ring_buffer->r_lock, &lock_flags);
|
|
while (ringbuffer_unread(ring_buffer) > 0 && collected < size) {
|
|
buffer[collected] = ring_buffer->r_buffer[ring_buffer->r_read_ptr];
|
|
increment_read(ring_buffer);
|
|
collected++;
|
|
}
|
|
|
|
wakeup_queue(&ring_buffer->r_wait_writers);
|
|
|
|
if (flags & S_NOBLOCK) {
|
|
spin_unlock_irqrestore(&ring_buffer->r_lock, lock_flags);
|
|
break;
|
|
}
|
|
|
|
struct wait_item waiter;
|
|
wait_item_init(&waiter, current_thread());
|
|
thread_wait_begin(&waiter, &ring_buffer->r_wait_readers);
|
|
spin_unlock_irqrestore(&ring_buffer->r_lock, lock_flags);
|
|
|
|
if (collected < size) {
|
|
schedule(SCHED_NORMAL);
|
|
}
|
|
|
|
thread_wait_end(&waiter, &ring_buffer->r_wait_readers);
|
|
}
|
|
|
|
wakeup_queue(&ring_buffer->r_wait_writers);
|
|
return collected;
|
|
}
|
|
|
|
size_t ringbuffer_write(struct ringbuffer *ring_buffer, size_t size, const void *p, mango_flags_t flags)
|
|
{
|
|
if (!ring_buffer || !size) {
|
|
return 0;
|
|
}
|
|
|
|
const unsigned char *buffer = p;
|
|
unsigned long lock_flags;
|
|
size_t written = 0;
|
|
|
|
while (written < size) {
|
|
spin_lock_irqsave(&ring_buffer->r_lock, &lock_flags);
|
|
|
|
while (ringbuffer_avail(ring_buffer) > 0 && written < size) {
|
|
ring_buffer->r_buffer[ring_buffer->r_write_ptr] = buffer[written];
|
|
increment_write(ring_buffer);
|
|
written++;
|
|
}
|
|
|
|
wakeup_queue(&ring_buffer->r_wait_readers);
|
|
|
|
if (flags & S_NOBLOCK) {
|
|
spin_unlock_irqrestore(&ring_buffer->r_lock, lock_flags);
|
|
break;
|
|
}
|
|
|
|
struct wait_item waiter;
|
|
wait_item_init(&waiter, current_thread());
|
|
thread_wait_begin(&waiter, &ring_buffer->r_wait_writers);
|
|
spin_unlock_irqrestore(&ring_buffer->r_lock, lock_flags);
|
|
|
|
if (written < size) {
|
|
schedule(SCHED_NORMAL);
|
|
}
|
|
|
|
thread_wait_end(&waiter, &ring_buffer->r_wait_writers);
|
|
}
|
|
|
|
wakeup_queue(&ring_buffer->r_wait_readers);
|
|
return written;
|
|
}
|
|
|
|
struct ringbuffer *ringbuffer_create(size_t size)
|
|
{
|
|
struct ringbuffer *out = kzalloc(sizeof(struct ringbuffer), VM_NORMAL);
|
|
if (!out) {
|
|
return NULL;
|
|
}
|
|
|
|
if (ringbuffer_init(out, size) != KERN_OK) {
|
|
kfree(out);
|
|
return NULL;
|
|
}
|
|
|
|
return out;
|
|
}
|
|
|
|
void ringbuffer_destroy(struct ringbuffer *ring_buffer)
|
|
{
|
|
ringbuffer_deinit(ring_buffer);
|
|
kfree(ring_buffer);
|
|
}
|
|
|
|
kern_status_t ringbuffer_init(struct ringbuffer *buf, size_t size)
|
|
{
|
|
buf->r_buffer = kmalloc(size, VM_NORMAL);
|
|
if (!buf->r_buffer) {
|
|
return KERN_NO_MEMORY;
|
|
}
|
|
|
|
buf->r_write_ptr = 0;
|
|
buf->r_read_ptr = 0;
|
|
buf->r_size = size;
|
|
buf->r_lock = SPIN_LOCK_INIT;
|
|
|
|
return KERN_OK;
|
|
}
|
|
|
|
kern_status_t ringbuffer_deinit(struct ringbuffer *buf)
|
|
{
|
|
kfree(buf->r_buffer);
|
|
buf->r_buffer = NULL;
|
|
|
|
return KERN_OK;
|
|
}
|