*/
#define TTYB_MEM_LIMIT 65536
+/*
+ * We default to dicing tty buffer allocations to this many characters
+ * in order to avoid multiple page allocations. We know the size of
+ * tty_buffer itself but it must also be taken into account that the
+ * the buffer is 256 byte aligned. See tty_buffer_find for the allocation
+ * logic this must match
+ */
+
+#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
+
+
+/**
+ * tty_buffer_lock_exclusive - gain exclusive access to buffer
+ * tty_buffer_unlock_exclusive - release exclusive access
+ *
+ * @port - tty_port owning the flip buffer
+ *
+ * Guarantees safe use of the line discipline's receive_buf() method by
+ * excluding the buffer work and any pending flush from using the flip
+ * buffer. Data can continue to be added concurrently to the flip buffer
+ * from the driver side.
+ *
+ * On release, the buffer work is restarted if there is data in the
+ * flip buffer
+ */
+
+void tty_buffer_lock_exclusive(struct tty_port *port)
+{
+ struct tty_bufhead *buf = &port->buf;
+
+ atomic_inc(&buf->priority);
+ mutex_lock(&buf->lock);
+}
+
+void tty_buffer_unlock_exclusive(struct tty_port *port)
+{
+ struct tty_bufhead *buf = &port->buf;
+ int restart;
+
+ restart = buf->head->commit != buf->head->read;
+
+ atomic_dec(&buf->priority);
+ mutex_unlock(&buf->lock);
+ if (restart)
+ queue_work(system_unbound_wq, &buf->work);
+}
/**
* tty_buffer_space_avail - return unused buffer space
llist_add(&b->free, &buf->free);
}
-/**
- * __tty_buffer_flush - flush full tty buffers
- * @tty: tty to flush
- *
- * flush all the buffers containing receive data. Caller must
- * hold the buffer lock and must have ensured no parallel flush to
- * ldisc is running.
- */
-
-static void __tty_buffer_flush(struct tty_port *port)
-{
- struct tty_bufhead *buf = &port->buf;
- struct tty_buffer *next;
-
- while ((next = buf->head->next) != NULL) {
- tty_buffer_free(port, buf->head);
- buf->head = next;
- }
- WARN_ON(buf->head != buf->tail);
- buf->head->read = buf->head->commit;
-}
-
/**
* tty_buffer_flush - flush full tty buffers
* @tty: tty to flush
* being processed by flush_to_ldisc then we defer the processing
* to that function
*
- * Locking: takes flush_mutex to ensure single-threaded flip buffer
+ * Locking: takes buffer lock to ensure single-threaded flip buffer
* 'consumer'
*/
{
struct tty_port *port = tty->port;
struct tty_bufhead *buf = &port->buf;
+ struct tty_buffer *next;
- buf->flushpending = 1;
+ atomic_inc(&buf->priority);
- mutex_lock(&buf->flush_mutex);
- __tty_buffer_flush(port);
- buf->flushpending = 0;
- mutex_unlock(&buf->flush_mutex);
+ mutex_lock(&buf->lock);
+ while ((next = buf->head->next) != NULL) {
+ tty_buffer_free(port, buf->head);
+ buf->head = next;
+ }
+ buf->head->read = buf->head->commit;
+ atomic_dec(&buf->priority);
+ mutex_unlock(&buf->lock);
}
/**
*
* The receive_buf method is single threaded for each tty instance.
*
- * Locking: takes flush_mutex to ensure single-threaded flip buffer
+ * Locking: takes buffer lock to ensure single-threaded flip buffer
* 'consumer'
*/
if (disc == NULL)
return;
- mutex_lock(&buf->flush_mutex);
+ mutex_lock(&buf->lock);
while (1) {
struct tty_buffer *head = buf->head;
int count;
- /* Ldisc or user is trying to flush the buffers. */
- if (buf->flushpending)
+ /* Ldisc or user is trying to gain exclusive access */
+ if (atomic_read(&buf->priority))
break;
count = head->commit - head->read;
break;
}
- mutex_unlock(&buf->flush_mutex);
+ mutex_unlock(&buf->lock);
tty_ldisc_deref(disc);
}
{
struct tty_bufhead *buf = &port->buf;
- mutex_init(&buf->flush_mutex);
+ mutex_init(&buf->lock);
tty_buffer_reset(&buf->sentinel, 0);
buf->head = &buf->sentinel;
buf->tail = &buf->sentinel;
init_llist_head(&buf->free);
atomic_set(&buf->memory_used, 0);
- buf->flushpending = 0;
+ atomic_set(&buf->priority, 0);
INIT_WORK(&buf->work, flush_to_ldisc);
}
-