Blob Blame History Raw
From 1a401a749cb1f06e637ef0e91fb8c120963aa356 Mon Sep 17 00:00:00 2001
From: Dave Airlie <airlied@gmail.com>
Date: Mon, 25 Feb 2013 14:47:55 +1000
Subject: [PATCH 2/2] drm: add new QXL driver. (v1.3)

QXL is a paravirtual graphics device used by the Spice virtual desktop
interface.

The drivers uses GEM and TTM to manage memory, the qxl hw fencing however
is quite different than normal TTM expects, we have to keep track of a number
of non-linear fence ids per bo that we need to have released by the hardware.

The releases are freed from a workqueue that wakes up and processes the
release ring.

releases are suballocated from a BO, there are 3 release categories, drawables,
surfaces and cursor cmds. The hw also has 3 rings for commands, cursor and release handling.

The hardware also have a surface id tracking mechnaism and the driver encapsulates it completely inside the kernel, userspace never sees the actual hw surface
ids.

This requires a newer version of the QXL userspace driver, so shouldn't be
enabled until that has been placed into your distro of choice.

Authors: Dave Airlie, Alon Levy

v1.1: fixup some issues in the ioctl interface with padding
v1.2: add module device table
v1.3: fix nomodeset, fbcon leak, dumb bo create, release ring irq,
      don't try flush release ring (broken hw), fix -modesetting.

Signed-off-by: Alon Levy <alevy@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
---
 drivers/gpu/drm/Kconfig           |   2 +
 drivers/gpu/drm/Makefile          |   1 +
 drivers/gpu/drm/qxl/Kconfig       |  10 +
 drivers/gpu/drm/qxl/Makefile      |   9 +
 drivers/gpu/drm/qxl/qxl_cmd.c     | 707 +++++++++++++++++++++++++++
 drivers/gpu/drm/qxl/qxl_debugfs.c | 135 ++++++
 drivers/gpu/drm/qxl/qxl_dev.h     | 879 ++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/qxl/qxl_display.c | 981 ++++++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/qxl/qxl_draw.c    | 384 +++++++++++++++
 drivers/gpu/drm/qxl/qxl_drv.c     | 145 ++++++
 drivers/gpu/drm/qxl/qxl_drv.h     | 566 ++++++++++++++++++++++
 drivers/gpu/drm/qxl/qxl_dumb.c    |  93 ++++
 drivers/gpu/drm/qxl/qxl_fb.c      | 567 ++++++++++++++++++++++
 drivers/gpu/drm/qxl/qxl_fence.c   |  97 ++++
 drivers/gpu/drm/qxl/qxl_gem.c     | 178 +++++++
 drivers/gpu/drm/qxl/qxl_image.c   | 120 +++++
 drivers/gpu/drm/qxl/qxl_ioctl.c   | 411 ++++++++++++++++
 drivers/gpu/drm/qxl/qxl_irq.c     |  97 ++++
 drivers/gpu/drm/qxl/qxl_kms.c     | 302 ++++++++++++
 drivers/gpu/drm/qxl/qxl_object.c  | 365 ++++++++++++++
 drivers/gpu/drm/qxl/qxl_object.h  | 112 +++++
 drivers/gpu/drm/qxl/qxl_release.c | 307 ++++++++++++
 drivers/gpu/drm/qxl/qxl_ttm.c     | 577 ++++++++++++++++++++++
 include/uapi/drm/Kbuild           |   1 +
 include/uapi/drm/qxl_drm.h        | 152 ++++++
 25 files changed, 7198 insertions(+)
 create mode 100644 drivers/gpu/drm/qxl/Kconfig
 create mode 100644 drivers/gpu/drm/qxl/Makefile
 create mode 100644 drivers/gpu/drm/qxl/qxl_cmd.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_debugfs.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_dev.h
 create mode 100644 drivers/gpu/drm/qxl/qxl_display.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_draw.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_drv.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_drv.h
 create mode 100644 drivers/gpu/drm/qxl/qxl_dumb.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_fb.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_fence.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_gem.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_image.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_ioctl.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_irq.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_kms.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_object.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_object.h
 create mode 100644 drivers/gpu/drm/qxl/qxl_release.c
 create mode 100644 drivers/gpu/drm/qxl/qxl_ttm.c
 create mode 100644 include/uapi/drm/qxl_drm.h

diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1e82882..19b8e0d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -220,3 +220,5 @@ source "drivers/gpu/drm/tegra/Kconfig"
 source "drivers/gpu/drm/omapdrm/Kconfig"
 
 source "drivers/gpu/drm/tilcdc/Kconfig"
+
+source "drivers/gpu/drm/qxl/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 0d59b24..6a42115 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -52,4 +52,5 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-$(CONFIG_DRM_OMAP)	+= omapdrm/
 obj-$(CONFIG_DRM_TILCDC)	+= tilcdc/
+obj-$(CONFIG_DRM_QXL) += qxl/
 obj-y			+= i2c/
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
new file mode 100644
index 0000000..2f1a57e
--- /dev/null
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -0,0 +1,10 @@
+config DRM_QXL
+	tristate "QXL virtual GPU"
+	depends on DRM && PCI
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+        select DRM_KMS_HELPER
+        select DRM_TTM
+	help
+		QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/Makefile b/drivers/gpu/drm/qxl/Makefile
new file mode 100644
index 0000000..ea046ba
--- /dev/null
+++ b/drivers/gpu/drm/qxl/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_fence.o qxl_release.o
+
+obj-$(CONFIG_DRM_QXL)+= qxl.o
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
new file mode 100644
index 0000000..804b411
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -0,0 +1,707 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+/* QXL cmd/ring handling */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
+
+struct ring {
+	struct qxl_ring_header      header;
+	uint8_t                     elements[0];
+};
+
+struct qxl_ring {
+	struct ring	       *ring;
+	int			element_size;
+	int			n_elements;
+	int			prod_notify;
+	wait_queue_head_t      *push_event;
+	spinlock_t             lock;
+};
+
+void qxl_ring_free(struct qxl_ring *ring)
+{
+	kfree(ring);
+}
+
+struct qxl_ring *
+qxl_ring_create(struct qxl_ring_header *header,
+		int element_size,
+		int n_elements,
+		int prod_notify,
+		bool set_prod_notify,
+		wait_queue_head_t *push_event)
+{
+	struct qxl_ring *ring;
+
+	ring = kmalloc(sizeof(*ring), GFP_KERNEL);
+	if (!ring)
+		return NULL;
+
+	ring->ring = (struct ring *)header;
+	ring->element_size = element_size;
+	ring->n_elements = n_elements;
+	ring->prod_notify = prod_notify;
+	ring->push_event = push_event;
+	if (set_prod_notify)
+		header->notify_on_prod = ring->n_elements;
+	spin_lock_init(&ring->lock);
+	return ring;
+}
+
+static int qxl_check_header(struct qxl_ring *ring)
+{
+	int ret;
+	struct qxl_ring_header *header = &(ring->ring->header);
+	unsigned long flags;
+	spin_lock_irqsave(&ring->lock, flags);
+	ret = header->prod - header->cons < header->num_items;
+	if (ret == 0)
+		header->notify_on_cons = header->cons + 1;
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return ret;
+}
+
+static int qxl_check_idle(struct qxl_ring *ring)
+{
+	int ret;
+	struct qxl_ring_header *header = &(ring->ring->header);
+	unsigned long flags;
+	spin_lock_irqsave(&ring->lock, flags);
+	ret = header->prod == header->cons;
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return ret;
+}
+
+int qxl_ring_push(struct qxl_ring *ring,
+		  const void *new_elt, bool interruptible)
+{
+	struct qxl_ring_header *header = &(ring->ring->header);
+	uint8_t *elt;
+	int idx, ret;
+	unsigned long flags;
+	spin_lock_irqsave(&ring->lock, flags);
+	if (header->prod - header->cons == header->num_items) {
+		header->notify_on_cons = header->cons + 1;
+		mb();
+		spin_unlock_irqrestore(&ring->lock, flags);
+		if (!drm_can_sleep()) {
+			while (!qxl_check_header(ring))
+				udelay(1);
+		} else {
+			if (interruptible) {
+				ret = wait_event_interruptible(*ring->push_event,
+							       qxl_check_header(ring));
+				if (ret)
+					return ret;
+			} else {
+				wait_event(*ring->push_event,
+					   qxl_check_header(ring));
+			}
+
+		}
+		spin_lock_irqsave(&ring->lock, flags);
+	}
+
+	idx = header->prod & (ring->n_elements - 1);
+	elt = ring->ring->elements + idx * ring->element_size;
+
+	memcpy((void *)elt, new_elt, ring->element_size);
+
+	header->prod++;
+
+	mb();
+
+	if (header->prod == header->notify_on_prod)
+		outb(0, ring->prod_notify);
+
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return 0;
+}
+
+bool qxl_ring_pop(struct qxl_ring *ring,
+		  void *element)
+{
+	volatile struct qxl_ring_header *header = &(ring->ring->header);
+	volatile uint8_t *ring_elt;
+	int idx;
+	unsigned long flags;
+	spin_lock_irqsave(&ring->lock, flags);
+	if (header->cons == header->prod) {
+		header->notify_on_prod = header->cons + 1;
+		spin_unlock_irqrestore(&ring->lock, flags);
+		return false;
+	}
+
+	idx = header->cons & (ring->n_elements - 1);
+	ring_elt = ring->ring->elements + idx * ring->element_size;
+
+	memcpy(element, (void *)ring_elt, ring->element_size);
+
+	header->cons++;
+
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return true;
+}
+
+void qxl_ring_wait_idle(struct qxl_ring *ring)
+{
+	struct qxl_ring_header *header = &(ring->ring->header);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ring->lock, flags);
+	if (ring->ring->header.cons < ring->ring->header.prod) {
+		header->notify_on_cons = header->prod;
+		mb();
+		spin_unlock_irqrestore(&ring->lock, flags);
+		wait_event_interruptible(*ring->push_event,
+					 qxl_check_idle(ring));
+		spin_lock_irqsave(&ring->lock, flags);
+	}
+	spin_unlock_irqrestore(&ring->lock, flags);
+}
+
+int
+qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+			      uint32_t type, bool interruptible)
+{
+	struct qxl_command cmd;
+
+	cmd.type = type;
+	cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+
+	return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
+}
+
+int
+qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+			     uint32_t type, bool interruptible)
+{
+	struct qxl_command cmd;
+
+	cmd.type = type;
+	cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+
+	return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
+}
+
+bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
+{
+	if (!qxl_check_idle(qdev->release_ring)) {
+		queue_work(qdev->gc_queue, &qdev->gc_work);
+		if (flush)
+			flush_work(&qdev->gc_work);
+		return true;
+	}
+	return false;
+}
+
+int qxl_garbage_collect(struct qxl_device *qdev)
+{
+	struct qxl_release *release;
+	uint64_t id, next_id;
+	int i = 0;
+	int ret;
+	union qxl_release_info *info;
+
+	while (qxl_ring_pop(qdev->release_ring, &id)) {
+		QXL_INFO(qdev, "popped %lld\n", id);
+		while (id) {
+			release = qxl_release_from_id_locked(qdev, id);
+			if (release == NULL)
+				break;
+
+			ret = qxl_release_reserve(qdev, release, false);
+			if (ret) {
+				qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
+				DRM_ERROR("failed to reserve release %lld\n", id);
+			}
+
+			info = qxl_release_map(qdev, release);
+			next_id = info->next;
+			qxl_release_unmap(qdev, release, info);
+
+			qxl_release_unreserve(qdev, release);
+			QXL_INFO(qdev, "popped %lld, next %lld\n", id,
+				next_id);
+
+			switch (release->type) {
+			case QXL_RELEASE_DRAWABLE:
+			case QXL_RELEASE_SURFACE_CMD:
+			case QXL_RELEASE_CURSOR_CMD:
+				break;
+			default:
+				DRM_ERROR("unexpected release type\n");
+				break;
+			}
+			id = next_id;
+
+			qxl_release_free(qdev, release);
+			++i;
+		}
+	}
+
+	QXL_INFO(qdev, "%s: %lld\n", __func__, i);
+
+	return i;
+}
+
+int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+			  struct qxl_bo **_bo)
+{
+	struct qxl_bo *bo;
+	int ret;
+
+	ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
+			    QXL_GEM_DOMAIN_VRAM, NULL, &bo);
+	if (ret) {
+		DRM_ERROR("failed to allocate VRAM BO\n");
+		return ret;
+	}
+	ret = qxl_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
+		goto out_unref;
+
+	*_bo = bo;
+	return 0;
+out_unref:
+	qxl_bo_unref(&bo);
+	return 0;
+}
+
+static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
+{
+	int irq_num;
+	long addr = qdev->io_base + port;
+	int ret;
+
+	mutex_lock(&qdev->async_io_mutex);
+	irq_num = atomic_read(&qdev->irq_received_io_cmd);
+
+
+	if (qdev->last_sent_io_cmd > irq_num) {
+		ret = wait_event_interruptible(qdev->io_cmd_event,
+					       atomic_read(&qdev->irq_received_io_cmd) > irq_num);
+		if (ret)
+			goto out;
+		irq_num = atomic_read(&qdev->irq_received_io_cmd);
+	}
+	outb(val, addr);
+	qdev->last_sent_io_cmd = irq_num + 1;
+	ret = wait_event_interruptible(qdev->io_cmd_event,
+				       atomic_read(&qdev->irq_received_io_cmd) > irq_num);
+out:
+	mutex_unlock(&qdev->async_io_mutex);
+	return ret;
+}
+
+static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
+{
+	int ret;
+
+restart:
+	ret = wait_for_io_cmd_user(qdev, val, port);
+	if (ret == -ERESTARTSYS)
+		goto restart;
+}
+
+int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
+			const struct qxl_rect *area)
+{
+	int surface_id;
+	uint32_t surface_width, surface_height;
+	int ret;
+
+	if (!surf->hw_surf_alloc)
+		DRM_ERROR("got io update area with no hw surface\n");
+
+	if (surf->is_primary)
+		surface_id = 0;
+	else
+		surface_id = surf->surface_id;
+	surface_width = surf->surf.width;
+	surface_height = surf->surf.height;
+
+	if (area->left < 0 || area->top < 0 ||
+	    area->right > surface_width || area->bottom > surface_height) {
+		qxl_io_log(qdev, "%s: not doing area update for "
+			   "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left,
+			   area->top, area->right, area->bottom, surface_width, surface_height);
+		return -EINVAL;
+	}
+	mutex_lock(&qdev->update_area_mutex);
+	qdev->ram_header->update_area = *area;
+	qdev->ram_header->update_surface = surface_id;
+	ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC);
+	mutex_unlock(&qdev->update_area_mutex);
+	return ret;
+}
+
+void qxl_io_notify_oom(struct qxl_device *qdev)
+{
+	outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
+}
+
+void qxl_io_flush_release(struct qxl_device *qdev)
+{
+	outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
+}
+
+void qxl_io_flush_surfaces(struct qxl_device *qdev)
+{
+	wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
+}
+
+
+void qxl_io_destroy_primary(struct qxl_device *qdev)
+{
+	wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
+}
+
+void qxl_io_create_primary(struct qxl_device *qdev, unsigned width,
+			   unsigned height, unsigned offset, struct qxl_bo *bo)
+{
+	struct qxl_surface_create *create;
+
+	QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev,
+		 qdev->ram_header);
+	create = &qdev->ram_header->create_surface;
+	create->format = bo->surf.format;
+	create->width = width;
+	create->height = height;
+	create->stride = bo->surf.stride;
+	create->mem = qxl_bo_physical_address(qdev, bo, offset);
+
+	QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem,
+		 bo->kptr);
+
+	create->flags = QXL_SURF_FLAG_KEEP_DATA;
+	create->type = QXL_SURF_TYPE_PRIMARY;
+
+	wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
+}
+
+void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
+{
+	QXL_INFO(qdev, "qxl_memslot_add %d\n", id);
+	wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
+}
+
+void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...)
+{
+	va_list args;
+
+	va_start(args, fmt);
+	vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args);
+	va_end(args);
+	/*
+	 * DO not do a DRM output here - this will call printk, which will
+	 * call back into qxl for rendering (qxl_fb)
+	 */
+	outb(0, qdev->io_base + QXL_IO_LOG);
+}
+
+void qxl_io_reset(struct qxl_device *qdev)
+{
+	outb(0, qdev->io_base + QXL_IO_RESET);
+}
+
+void qxl_io_monitors_config(struct qxl_device *qdev)
+{
+	qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__,
+		   qdev->monitors_config ?
+		   qdev->monitors_config->count : -1,
+		   qdev->monitors_config && qdev->monitors_config->count ?
+		   qdev->monitors_config->heads[0].width : -1,
+		   qdev->monitors_config && qdev->monitors_config->count ?
+		   qdev->monitors_config->heads[0].height : -1,
+		   qdev->monitors_config && qdev->monitors_config->count ?
+		   qdev->monitors_config->heads[0].x : -1,
+		   qdev->monitors_config && qdev->monitors_config->count ?
+		   qdev->monitors_config->heads[0].y : -1
+		   );
+
+	wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
+}
+
+int qxl_surface_id_alloc(struct qxl_device *qdev,
+		      struct qxl_bo *surf)
+{
+	uint32_t handle = -ENOMEM;
+	int idr_ret;
+	int count = 0;
+again:
+	if (idr_pre_get(&qdev->surf_id_idr, GFP_ATOMIC) == 0) {
+		DRM_ERROR("Out of memory for surf idr\n");
+		kfree(surf);
+		goto alloc_fail;
+	}
+
+	spin_lock(&qdev->surf_id_idr_lock);
+	idr_ret = idr_get_new_above(&qdev->surf_id_idr, NULL, 1, &handle);
+	spin_unlock(&qdev->surf_id_idr_lock);
+
+	if (idr_ret == -EAGAIN)
+		goto again;
+
+	if (handle >= qdev->rom->n_surfaces) {
+		count++;
+		spin_lock(&qdev->surf_id_idr_lock);
+		idr_remove(&qdev->surf_id_idr, handle);
+		spin_unlock(&qdev->surf_id_idr_lock);
+		qxl_reap_surface_id(qdev, 2);
+		goto again;
+	}
+	surf->surface_id = handle;
+
+	spin_lock(&qdev->surf_id_idr_lock);
+	qdev->last_alloced_surf_id = handle;
+	spin_unlock(&qdev->surf_id_idr_lock);
+ alloc_fail:
+	return 0;
+}
+
+void qxl_surface_id_dealloc(struct qxl_device *qdev,
+			    uint32_t surface_id)
+{
+	spin_lock(&qdev->surf_id_idr_lock);
+	idr_remove(&qdev->surf_id_idr, surface_id);
+	spin_unlock(&qdev->surf_id_idr_lock);
+}
+
+int qxl_hw_surface_alloc(struct qxl_device *qdev,
+			 struct qxl_bo *surf,
+			 struct ttm_mem_reg *new_mem)
+{
+	struct qxl_surface_cmd *cmd;
+	struct qxl_release *release;
+	int ret;
+
+	if (surf->hw_surf_alloc)
+		return 0;
+
+	ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
+						 NULL,
+						 &release);
+	if (ret)
+		return ret;
+
+	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_SURFACE_CMD_CREATE;
+	cmd->u.surface_create.format = surf->surf.format;
+	cmd->u.surface_create.width = surf->surf.width;
+	cmd->u.surface_create.height = surf->surf.height;
+	cmd->u.surface_create.stride = surf->surf.stride;
+	if (new_mem) {
+		int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
+		struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
+
+		/* TODO - need to hold one of the locks to read tbo.offset */
+		cmd->u.surface_create.data = slot->high_bits;
+
+		cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset;
+	} else
+		cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
+	cmd->surface_id = surf->surface_id;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	surf->surf_create = release;
+
+	/* no need to add a release to the fence for this bo,
+	   since it is only released when we ask to destroy the surface
+	   and it would never signal otherwise */
+	qxl_fence_releaseable(qdev, release);
+
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+
+	qxl_release_unreserve(qdev, release);
+
+	surf->hw_surf_alloc = true;
+	spin_lock(&qdev->surf_id_idr_lock);
+	idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
+	spin_unlock(&qdev->surf_id_idr_lock);
+	return 0;
+}
+
+int qxl_hw_surface_dealloc(struct qxl_device *qdev,
+			   struct qxl_bo *surf)
+{
+	struct qxl_surface_cmd *cmd;
+	struct qxl_release *release;
+	int ret;
+	int id;
+
+	if (!surf->hw_surf_alloc)
+		return 0;
+
+	ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
+						 surf->surf_create,
+						 &release);
+	if (ret)
+		return ret;
+
+	surf->surf_create = NULL;
+	/* remove the surface from the idr, but not the surface id yet */
+	spin_lock(&qdev->surf_id_idr_lock);
+	idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
+	spin_unlock(&qdev->surf_id_idr_lock);
+	surf->hw_surf_alloc = false;
+
+	id = surf->surface_id;
+	surf->surface_id = 0;
+
+	release->surface_release_id = id;
+	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_SURFACE_CMD_DESTROY;
+	cmd->surface_id = id;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	qxl_fence_releaseable(qdev, release);
+
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+
+	qxl_release_unreserve(qdev, release);
+
+
+	return 0;
+}
+
+int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
+{
+	struct qxl_rect rect;
+	int ret;
+
+	/* if we are evicting, we need to make sure the surface is up
+	   to date */
+	rect.left = 0;
+	rect.right = surf->surf.width;
+	rect.top = 0;
+	rect.bottom = surf->surf.height;
+retry:
+	ret = qxl_io_update_area(qdev, surf, &rect);
+	if (ret == -ERESTARTSYS)
+		goto retry;
+	return ret;
+}
+
+void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
+{
+	/* no need to update area if we are just freeing the surface normally */
+	if (do_update_area)
+		qxl_update_surface(qdev, surf);
+
+	/* nuke the surface id at the hw */
+	qxl_hw_surface_dealloc(qdev, surf);
+}
+
+void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
+{
+	mutex_lock(&qdev->surf_evict_mutex);
+	qxl_surface_evict_locked(qdev, surf, do_update_area);
+	mutex_unlock(&qdev->surf_evict_mutex);
+}
+
+static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
+{
+	int ret;
+
+	ret = qxl_bo_reserve(surf, false);
+	if (ret == -EBUSY)
+		return -EBUSY;
+
+	if (surf->fence.num_active_releases > 0 && stall == false) {
+		qxl_bo_unreserve(surf);
+		return -EBUSY;
+	}
+
+	if (stall)
+		mutex_unlock(&qdev->surf_evict_mutex);
+
+	spin_lock(&surf->tbo.bdev->fence_lock);
+	ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
+	spin_unlock(&surf->tbo.bdev->fence_lock);
+
+	if (stall)
+		mutex_lock(&qdev->surf_evict_mutex);
+	if (ret == -EBUSY) {
+		qxl_bo_unreserve(surf);
+		return -EBUSY;
+	}
+
+	qxl_surface_evict_locked(qdev, surf, true);
+	qxl_bo_unreserve(surf);
+	return 0;
+}
+
+static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
+{
+	int num_reaped = 0;
+	int i, ret;
+	bool stall = false;
+	int start = 0;
+
+	mutex_lock(&qdev->surf_evict_mutex);
+again:
+
+	spin_lock(&qdev->surf_id_idr_lock);
+	start = qdev->last_alloced_surf_id + 1;
+	spin_unlock(&qdev->surf_id_idr_lock);
+
+	for (i = start; i < start + qdev->rom->n_surfaces; i++) {
+		void *objptr;
+		int surfid = i % qdev->rom->n_surfaces;
+
+		/* this avoids the case where the objects is in the
+		   idr but has been evicted half way - its makes
+		   the idr lookup atomic with the eviction */
+		spin_lock(&qdev->surf_id_idr_lock);
+		objptr = idr_find(&qdev->surf_id_idr, surfid);
+		spin_unlock(&qdev->surf_id_idr_lock);
+
+		if (!objptr)
+			continue;
+
+		ret = qxl_reap_surf(qdev, objptr, stall);
+		if (ret == 0)
+			num_reaped++;
+		if (num_reaped >= max_to_reap)
+			break;
+	}
+	if (num_reaped == 0 && stall == false) {
+		stall = true;
+		goto again;
+	}
+
+	mutex_unlock(&qdev->surf_evict_mutex);
+	if (num_reaped) {
+		usleep_range(500, 1000);
+		qxl_queue_garbage_collect(qdev, true);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
new file mode 100644
index 0000000..c630152
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ *  Alon Levy <alevy@redhat.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "drmP.h"
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+
+static int
+qxl_debugfs_irq_received(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct qxl_device *qdev = node->minor->dev->dev_private;
+
+	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
+	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
+	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
+	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
+	seq_printf(m, "%d\n", qdev->irq_received_error);
+	return 0;
+}
+
+static int
+qxl_debugfs_buffers_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct qxl_device *qdev = node->minor->dev->dev_private;
+	struct qxl_bo *bo;
+
+	list_for_each_entry(bo, &qdev->gem.objects, list) {
+		seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n",
+			   (unsigned long)bo->gem_base.size, bo->pin_count,
+			   bo->tbo.sync_obj, bo->fence.num_active_releases);
+	}
+	return 0;
+}
+
+static struct drm_info_list qxl_debugfs_list[] = {
+	{ "irq_received", qxl_debugfs_irq_received, 0, NULL },
+	{ "qxl_buffers", qxl_debugfs_buffers_info, 0, NULL },
+};
+#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
+
+int
+qxl_debugfs_init(struct drm_minor *minor)
+{
+	drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
+				 minor->debugfs_root, minor);
+	return 0;
+}
+
+void
+qxl_debugfs_takedown(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
+				 minor);
+}
+
+int qxl_debugfs_add_files(struct qxl_device *qdev,
+			  struct drm_info_list *files,
+			  unsigned nfiles)
+{
+	unsigned i;
+
+	for (i = 0; i < qdev->debugfs_count; i++) {
+		if (qdev->debugfs[i].files == files) {
+			/* Already registered */
+			return 0;
+		}
+	}
+
+	i = qdev->debugfs_count + 1;
+	if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
+		DRM_ERROR("Reached maximum number of debugfs components.\n");
+		DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
+		return -EINVAL;
+	}
+	qdev->debugfs[qdev->debugfs_count].files = files;
+	qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
+	qdev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+	drm_debugfs_create_files(files, nfiles,
+				 qdev->ddev->control->debugfs_root,
+				 qdev->ddev->control);
+	drm_debugfs_create_files(files, nfiles,
+				 qdev->ddev->primary->debugfs_root,
+				 qdev->ddev->primary);
+#endif
+	return 0;
+}
+
+void qxl_debugfs_remove_files(struct qxl_device *qdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	unsigned i;
+
+	for (i = 0; i < qdev->debugfs_count; i++) {
+		drm_debugfs_remove_files(qdev->debugfs[i].files,
+					 qdev->debugfs[i].num_files,
+					 qdev->ddev->control);
+		drm_debugfs_remove_files(qdev->debugfs[i].files,
+					 qdev->debugfs[i].num_files,
+					 qdev->ddev->primary);
+	}
+#endif
+}
diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h
new file mode 100644
index 0000000..94c5aec
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_dev.h
@@ -0,0 +1,879 @@
+/*
+   Copyright (C) 2009 Red Hat, Inc.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+	 notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above copyright
+	 notice, this list of conditions and the following disclaimer in
+	 the documentation and/or other materials provided with the
+	 distribution.
+       * Neither the name of the copyright holder nor the names of its
+	 contributors may be used to endorse or promote products derived
+	 from this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
+   IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+   TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+   PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+#ifndef H_QXL_DEV
+#define H_QXL_DEV
+
+#include <linux/types.h>
+
+/*
+ * from spice-protocol
+ * Release 0.10.0
+ */
+
+/* enums.h */
+
+enum SpiceImageType {
+	SPICE_IMAGE_TYPE_BITMAP,
+	SPICE_IMAGE_TYPE_QUIC,
+	SPICE_IMAGE_TYPE_RESERVED,
+	SPICE_IMAGE_TYPE_LZ_PLT = 100,
+	SPICE_IMAGE_TYPE_LZ_RGB,
+	SPICE_IMAGE_TYPE_GLZ_RGB,
+	SPICE_IMAGE_TYPE_FROM_CACHE,
+	SPICE_IMAGE_TYPE_SURFACE,
+	SPICE_IMAGE_TYPE_JPEG,
+	SPICE_IMAGE_TYPE_FROM_CACHE_LOSSLESS,
+	SPICE_IMAGE_TYPE_ZLIB_GLZ_RGB,
+	SPICE_IMAGE_TYPE_JPEG_ALPHA,
+
+	SPICE_IMAGE_TYPE_ENUM_END
+};
+
+enum SpiceBitmapFmt {
+	SPICE_BITMAP_FMT_INVALID,
+	SPICE_BITMAP_FMT_1BIT_LE,
+	SPICE_BITMAP_FMT_1BIT_BE,
+	SPICE_BITMAP_FMT_4BIT_LE,
+	SPICE_BITMAP_FMT_4BIT_BE,
+	SPICE_BITMAP_FMT_8BIT,
+	SPICE_BITMAP_FMT_16BIT,
+	SPICE_BITMAP_FMT_24BIT,
+	SPICE_BITMAP_FMT_32BIT,
+	SPICE_BITMAP_FMT_RGBA,
+
+	SPICE_BITMAP_FMT_ENUM_END
+};
+
+enum SpiceSurfaceFmt {
+	SPICE_SURFACE_FMT_INVALID,
+	SPICE_SURFACE_FMT_1_A,
+	SPICE_SURFACE_FMT_8_A = 8,
+	SPICE_SURFACE_FMT_16_555 = 16,
+	SPICE_SURFACE_FMT_32_xRGB = 32,
+	SPICE_SURFACE_FMT_16_565 = 80,
+	SPICE_SURFACE_FMT_32_ARGB = 96,
+
+	SPICE_SURFACE_FMT_ENUM_END
+};
+
+enum SpiceClipType {
+	SPICE_CLIP_TYPE_NONE,
+	SPICE_CLIP_TYPE_RECTS,
+
+	SPICE_CLIP_TYPE_ENUM_END
+};
+
+enum SpiceRopd {
+	SPICE_ROPD_INVERS_SRC = (1 << 0),
+	SPICE_ROPD_INVERS_BRUSH = (1 << 1),
+	SPICE_ROPD_INVERS_DEST = (1 << 2),
+	SPICE_ROPD_OP_PUT = (1 << 3),
+	SPICE_ROPD_OP_OR = (1 << 4),
+	SPICE_ROPD_OP_AND = (1 << 5),
+	SPICE_ROPD_OP_XOR = (1 << 6),
+	SPICE_ROPD_OP_BLACKNESS = (1 << 7),
+	SPICE_ROPD_OP_WHITENESS = (1 << 8),
+	SPICE_ROPD_OP_INVERS = (1 << 9),
+	SPICE_ROPD_INVERS_RES = (1 << 10),
+
+	SPICE_ROPD_MASK = 0x7ff
+};
+
+enum SpiceBrushType {
+	SPICE_BRUSH_TYPE_NONE,
+	SPICE_BRUSH_TYPE_SOLID,
+	SPICE_BRUSH_TYPE_PATTERN,
+
+	SPICE_BRUSH_TYPE_ENUM_END
+};
+
+enum SpiceCursorType {
+	SPICE_CURSOR_TYPE_ALPHA,
+	SPICE_CURSOR_TYPE_MONO,
+	SPICE_CURSOR_TYPE_COLOR4,
+	SPICE_CURSOR_TYPE_COLOR8,
+	SPICE_CURSOR_TYPE_COLOR16,
+	SPICE_CURSOR_TYPE_COLOR24,
+	SPICE_CURSOR_TYPE_COLOR32,
+
+	SPICE_CURSOR_TYPE_ENUM_END
+};
+
+/* qxl_dev.h */
+
+#pragma pack(push, 1)
+
+#define REDHAT_PCI_VENDOR_ID 0x1b36
+
+/* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */
+#define QXL_DEVICE_ID_STABLE 0x0100
+
+enum {
+	QXL_REVISION_STABLE_V04 = 0x01,
+	QXL_REVISION_STABLE_V06 = 0x02,
+	QXL_REVISION_STABLE_V10 = 0x03,
+	QXL_REVISION_STABLE_V12 = 0x04,
+};
+
+#define QXL_DEVICE_ID_DEVEL 0x01ff
+#define QXL_REVISION_DEVEL 0x01
+
+#define QXL_ROM_MAGIC (*(uint32_t *)"QXRO")
+#define QXL_RAM_MAGIC (*(uint32_t *)"QXRA")
+
+enum {
+	QXL_RAM_RANGE_INDEX,
+	QXL_VRAM_RANGE_INDEX,
+	QXL_ROM_RANGE_INDEX,
+	QXL_IO_RANGE_INDEX,
+
+	QXL_PCI_RANGES
+};
+
+/* qxl-1 compat: append only */
+enum {
+	QXL_IO_NOTIFY_CMD,
+	QXL_IO_NOTIFY_CURSOR,
+	QXL_IO_UPDATE_AREA,
+	QXL_IO_UPDATE_IRQ,
+	QXL_IO_NOTIFY_OOM,
+	QXL_IO_RESET,
+	QXL_IO_SET_MODE,                  /* qxl-1 */
+	QXL_IO_LOG,
+	/* appended for qxl-2 */
+	QXL_IO_MEMSLOT_ADD,
+	QXL_IO_MEMSLOT_DEL,
+	QXL_IO_DETACH_PRIMARY,
+	QXL_IO_ATTACH_PRIMARY,
+	QXL_IO_CREATE_PRIMARY,
+	QXL_IO_DESTROY_PRIMARY,
+	QXL_IO_DESTROY_SURFACE_WAIT,
+	QXL_IO_DESTROY_ALL_SURFACES,
+	/* appended for qxl-3 */
+	QXL_IO_UPDATE_AREA_ASYNC,
+	QXL_IO_MEMSLOT_ADD_ASYNC,
+	QXL_IO_CREATE_PRIMARY_ASYNC,
+	QXL_IO_DESTROY_PRIMARY_ASYNC,
+	QXL_IO_DESTROY_SURFACE_ASYNC,
+	QXL_IO_DESTROY_ALL_SURFACES_ASYNC,
+	QXL_IO_FLUSH_SURFACES_ASYNC,
+	QXL_IO_FLUSH_RELEASE,
+	/* appended for qxl-4 */
+	QXL_IO_MONITORS_CONFIG_ASYNC,
+
+	QXL_IO_RANGE_SIZE
+};
+
+typedef uint64_t QXLPHYSICAL;
+typedef int32_t QXLFIXED; /* fixed 28.4 */
+
+struct qxl_point_fix {
+	QXLFIXED x;
+	QXLFIXED y;
+};
+
+struct qxl_point {
+	int32_t x;
+	int32_t y;
+};
+
+struct qxl_point_1_6 {
+	int16_t x;
+	int16_t y;
+};
+
+struct qxl_rect {
+	int32_t top;
+	int32_t left;
+	int32_t bottom;
+	int32_t right;
+};
+
+struct qxl_urect {
+	uint32_t top;
+	uint32_t left;
+	uint32_t bottom;
+	uint32_t right;
+};
+
+/* qxl-1 compat: append only */
+struct qxl_rom {
+	uint32_t magic;
+	uint32_t id;
+	uint32_t update_id;
+	uint32_t compression_level;
+	uint32_t log_level;
+	uint32_t mode;			  /* qxl-1 */
+	uint32_t modes_offset;
+	uint32_t num_io_pages;
+	uint32_t pages_offset;		  /* qxl-1 */
+	uint32_t draw_area_offset;	  /* qxl-1 */
+	uint32_t surface0_area_size;	  /* qxl-1 name: draw_area_size */
+	uint32_t ram_header_offset;
+	uint32_t mm_clock;
+	/* appended for qxl-2 */
+	uint32_t n_surfaces;
+	uint64_t flags;
+	uint8_t slots_start;
+	uint8_t slots_end;
+	uint8_t slot_gen_bits;
+	uint8_t slot_id_bits;
+	uint8_t slot_generation;
+	/* appended for qxl-4 */
+	uint8_t client_present;
+	uint8_t client_capabilities[58];
+	uint32_t client_monitors_config_crc;
+	struct {
+		uint16_t count;
+	uint16_t padding;
+		struct qxl_urect heads[64];
+	} client_monitors_config;
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_mode {
+	uint32_t id;
+	uint32_t x_res;
+	uint32_t y_res;
+	uint32_t bits;
+	uint32_t stride;
+	uint32_t x_mili;
+	uint32_t y_mili;
+	uint32_t orientation;
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_modes {
+	uint32_t n_modes;
+	struct qxl_mode modes[0];
+};
+
+/* qxl-1 compat: append only */
+enum qxl_cmd_type {
+	QXL_CMD_NOP,
+	QXL_CMD_DRAW,
+	QXL_CMD_UPDATE,
+	QXL_CMD_CURSOR,
+	QXL_CMD_MESSAGE,
+	QXL_CMD_SURFACE,
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_command {
+	QXLPHYSICAL data;
+	uint32_t type;
+	uint32_t padding;
+};
+
+#define QXL_COMMAND_FLAG_COMPAT		(1<<0)
+#define QXL_COMMAND_FLAG_COMPAT_16BPP	(2<<0)
+
+struct qxl_command_ext {
+	struct qxl_command cmd;
+	uint32_t group_id;
+	uint32_t flags;
+};
+
+struct qxl_mem_slot {
+	uint64_t mem_start;
+	uint64_t mem_end;
+};
+
+#define QXL_SURF_TYPE_PRIMARY	   0
+
+#define QXL_SURF_FLAG_KEEP_DATA	   (1 << 0)
+
+struct qxl_surface_create {
+	uint32_t width;
+	uint32_t height;
+	int32_t stride;
+	uint32_t format;
+	uint32_t position;
+	uint32_t mouse_mode;
+	uint32_t flags;
+	uint32_t type;
+	QXLPHYSICAL mem;
+};
+
+#define QXL_COMMAND_RING_SIZE 32
+#define QXL_CURSOR_RING_SIZE 32
+#define QXL_RELEASE_RING_SIZE 8
+
+#define QXL_LOG_BUF_SIZE 4096
+
+#define QXL_INTERRUPT_DISPLAY (1 << 0)
+#define QXL_INTERRUPT_CURSOR (1 << 1)
+#define QXL_INTERRUPT_IO_CMD (1 << 2)
+#define QXL_INTERRUPT_ERROR  (1 << 3)
+#define QXL_INTERRUPT_CLIENT (1 << 4)
+#define QXL_INTERRUPT_CLIENT_MONITORS_CONFIG  (1 << 5)
+
+struct qxl_ring_header {
+	uint32_t num_items;
+	uint32_t prod;
+	uint32_t notify_on_prod;
+	uint32_t cons;
+	uint32_t notify_on_cons;
+};
+
+/* qxl-1 compat: append only */
+struct qxl_ram_header {
+	uint32_t magic;
+	uint32_t int_pending;
+	uint32_t int_mask;
+	uint8_t log_buf[QXL_LOG_BUF_SIZE];
+	struct qxl_ring_header  cmd_ring_hdr;
+	struct qxl_command	cmd_ring[QXL_COMMAND_RING_SIZE];
+	struct qxl_ring_header  cursor_ring_hdr;
+	struct qxl_command	cursor_ring[QXL_CURSOR_RING_SIZE];
+	struct qxl_ring_header  release_ring_hdr;
+	uint64_t		release_ring[QXL_RELEASE_RING_SIZE];
+	struct qxl_rect update_area;
+	/* appended for qxl-2 */
+	uint32_t update_surface;
+	struct qxl_mem_slot mem_slot;
+	struct qxl_surface_create create_surface;
+	uint64_t flags;
+
+	/* appended for qxl-4 */
+
+	/* used by QXL_IO_MONITORS_CONFIG_ASYNC */
+	QXLPHYSICAL monitors_config;
+	uint8_t guest_capabilities[64];
+};
+
+union qxl_release_info {
+	uint64_t id;	  /* in  */
+	uint64_t next;	  /* out */
+};
+
+struct qxl_release_info_ext {
+	union qxl_release_info *info;
+	uint32_t group_id;
+};
+
+struct qxl_data_chunk {
+	uint32_t data_size;
+	QXLPHYSICAL prev_chunk;
+	QXLPHYSICAL next_chunk;
+	uint8_t data[0];
+};
+
+struct qxl_message {
+	union qxl_release_info release_info;
+	uint8_t data[0];
+};
+
+struct qxl_compat_update_cmd {
+	union qxl_release_info release_info;
+	struct qxl_rect area;
+	uint32_t update_id;
+};
+
+struct qxl_update_cmd {
+	union qxl_release_info release_info;
+	struct qxl_rect area;
+	uint32_t update_id;
+	uint32_t surface_id;
+};
+
+struct qxl_cursor_header {
+	uint64_t unique;
+	uint16_t type;
+	uint16_t width;
+	uint16_t height;
+	uint16_t hot_spot_x;
+	uint16_t hot_spot_y;
+};
+
+struct qxl_cursor {
+	struct qxl_cursor_header header;
+	uint32_t data_size;
+	struct qxl_data_chunk chunk;
+};
+
+enum {
+	QXL_CURSOR_SET,
+	QXL_CURSOR_MOVE,
+	QXL_CURSOR_HIDE,
+	QXL_CURSOR_TRAIL,
+};
+
+#define QXL_CURSOR_DEVICE_DATA_SIZE 128
+
+struct qxl_cursor_cmd {
+	union qxl_release_info release_info;
+	uint8_t type;
+	union {
+		struct {
+			struct qxl_point_1_6 position;
+			uint8_t visible;
+			QXLPHYSICAL shape;
+		} set;
+		struct {
+			uint16_t length;
+			uint16_t frequency;
+		} trail;
+		struct qxl_point_1_6 position;
+	} u;
+	/* todo: dynamic size from rom */
+	uint8_t device_data[QXL_CURSOR_DEVICE_DATA_SIZE];
+};
+
+enum {
+	QXL_DRAW_NOP,
+	QXL_DRAW_FILL,
+	QXL_DRAW_OPAQUE,
+	QXL_DRAW_COPY,
+	QXL_COPY_BITS,
+	QXL_DRAW_BLEND,
+	QXL_DRAW_BLACKNESS,
+	QXL_DRAW_WHITENESS,
+	QXL_DRAW_INVERS,
+	QXL_DRAW_ROP3,
+	QXL_DRAW_STROKE,
+	QXL_DRAW_TEXT,
+	QXL_DRAW_TRANSPARENT,
+	QXL_DRAW_ALPHA_BLEND,
+	QXL_DRAW_COMPOSITE
+};
+
+struct qxl_raster_glyph {
+	struct qxl_point render_pos;
+	struct qxl_point glyph_origin;
+	uint16_t width;
+	uint16_t height;
+	uint8_t data[0];
+};
+
+struct qxl_string {
+	uint32_t data_size;
+	uint16_t length;
+	uint16_t flags;
+	struct qxl_data_chunk chunk;
+};
+
+struct qxl_copy_bits {
+	struct qxl_point src_pos;
+};
+
+enum qxl_effect_type {
+	QXL_EFFECT_BLEND = 0,
+	QXL_EFFECT_OPAQUE = 1,
+	QXL_EFFECT_REVERT_ON_DUP = 2,
+	QXL_EFFECT_BLACKNESS_ON_DUP = 3,
+	QXL_EFFECT_WHITENESS_ON_DUP = 4,
+	QXL_EFFECT_NOP_ON_DUP = 5,
+	QXL_EFFECT_NOP = 6,
+	QXL_EFFECT_OPAQUE_BRUSH = 7
+};
+
+struct qxl_pattern {
+	QXLPHYSICAL pat;
+	struct qxl_point pos;
+};
+
+struct qxl_brush {
+	uint32_t type;
+	union {
+		uint32_t color;
+		struct qxl_pattern pattern;
+	} u;
+};
+
+struct qxl_q_mask {
+	uint8_t flags;
+	struct qxl_point pos;
+	QXLPHYSICAL bitmap;
+};
+
+struct qxl_fill {
+	struct qxl_brush brush;
+	uint16_t rop_descriptor;
+	struct qxl_q_mask mask;
+};
+
+struct qxl_opaque {
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+	struct qxl_brush brush;
+	uint16_t rop_descriptor;
+	uint8_t scale_mode;
+	struct qxl_q_mask mask;
+};
+
+struct qxl_copy {
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+	uint16_t rop_descriptor;
+	uint8_t scale_mode;
+	struct qxl_q_mask mask;
+};
+
+struct qxl_transparent {
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+	uint32_t src_color;
+	uint32_t true_color;
+};
+
+struct qxl_alpha_blend {
+	uint16_t alpha_flags;
+	uint8_t alpha;
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+};
+
+struct qxl_compat_alpha_blend {
+	uint8_t alpha;
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+};
+
+struct qxl_rop_3 {
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+	struct qxl_brush brush;
+	uint8_t rop3;
+	uint8_t scale_mode;
+	struct qxl_q_mask mask;
+};
+
+struct qxl_line_attr {
+	uint8_t flags;
+	uint8_t join_style;
+	uint8_t end_style;
+	uint8_t style_nseg;
+	QXLFIXED width;
+	QXLFIXED miter_limit;
+	QXLPHYSICAL style;
+};
+
+struct qxl_stroke {
+	QXLPHYSICAL path;
+	struct qxl_line_attr attr;
+	struct qxl_brush brush;
+	uint16_t fore_mode;
+	uint16_t back_mode;
+};
+
+struct qxl_text {
+	QXLPHYSICAL str;
+	struct qxl_rect back_area;
+	struct qxl_brush fore_brush;
+	struct qxl_brush back_brush;
+	uint16_t fore_mode;
+	uint16_t back_mode;
+};
+
+struct qxl_mask {
+	struct qxl_q_mask mask;
+};
+
+struct qxl_clip {
+	uint32_t type;
+	QXLPHYSICAL data;
+};
+
+enum qxl_operator {
+	QXL_OP_CLEAR			 = 0x00,
+	QXL_OP_SOURCE			 = 0x01,
+	QXL_OP_DST			 = 0x02,
+	QXL_OP_OVER			 = 0x03,
+	QXL_OP_OVER_REVERSE		 = 0x04,
+	QXL_OP_IN			 = 0x05,
+	QXL_OP_IN_REVERSE		 = 0x06,
+	QXL_OP_OUT			 = 0x07,
+	QXL_OP_OUT_REVERSE		 = 0x08,
+	QXL_OP_ATOP			 = 0x09,
+	QXL_OP_ATOP_REVERSE		 = 0x0a,
+	QXL_OP_XOR			 = 0x0b,
+	QXL_OP_ADD			 = 0x0c,
+	QXL_OP_SATURATE			 = 0x0d,
+	/* Note the jump here from 0x0d to 0x30 */
+	QXL_OP_MULTIPLY			 = 0x30,
+	QXL_OP_SCREEN			 = 0x31,
+	QXL_OP_OVERLAY			 = 0x32,
+	QXL_OP_DARKEN			 = 0x33,
+	QXL_OP_LIGHTEN			 = 0x34,
+	QXL_OP_COLOR_DODGE		 = 0x35,
+	QXL_OP_COLOR_BURN		 = 0x36,
+	QXL_OP_HARD_LIGHT		 = 0x37,
+	QXL_OP_SOFT_LIGHT		 = 0x38,
+	QXL_OP_DIFFERENCE		 = 0x39,
+	QXL_OP_EXCLUSION		 = 0x3a,
+	QXL_OP_HSL_HUE			 = 0x3b,
+	QXL_OP_HSL_SATURATION		 = 0x3c,
+	QXL_OP_HSL_COLOR		 = 0x3d,
+	QXL_OP_HSL_LUMINOSITY		 = 0x3e
+};
+
+struct qxl_transform {
+	uint32_t	t00;
+	uint32_t	t01;
+	uint32_t	t02;
+	uint32_t	t10;
+	uint32_t	t11;
+	uint32_t	t12;
+};
+
+/* The flags field has the following bit fields:
+ *
+ *     operator:		[  0 -  7 ]
+ *     src_filter:		[  8 - 10 ]
+ *     mask_filter:		[ 11 - 13 ]
+ *     src_repeat:		[ 14 - 15 ]
+ *     mask_repeat:		[ 16 - 17 ]
+ *     component_alpha:		[ 18 - 18 ]
+ *     reserved:		[ 19 - 31 ]
+ *
+ * The repeat and filter values are those of pixman:
+ *		REPEAT_NONE =		0
+ *              REPEAT_NORMAL =		1
+ *		REPEAT_PAD =		2
+ *		REPEAT_REFLECT =	3
+ *
+ * The filter values are:
+ *		FILTER_NEAREST =	0
+ *		FILTER_BILINEAR	=	1
+ */
+struct qxl_composite {
+	uint32_t		flags;
+
+	QXLPHYSICAL			src;
+	QXLPHYSICAL			src_transform;	/* May be NULL */
+	QXLPHYSICAL			mask;		/* May be NULL */
+	QXLPHYSICAL			mask_transform;	/* May be NULL */
+	struct qxl_point_1_6	src_origin;
+	struct qxl_point_1_6	mask_origin;
+};
+
+struct qxl_compat_drawable {
+	union qxl_release_info release_info;
+	uint8_t effect;
+	uint8_t type;
+	uint16_t bitmap_offset;
+	struct qxl_rect bitmap_area;
+	struct qxl_rect bbox;
+	struct qxl_clip clip;
+	uint32_t mm_time;
+	union {
+		struct qxl_fill fill;
+		struct qxl_opaque opaque;
+		struct qxl_copy copy;
+		struct qxl_transparent transparent;
+		struct qxl_compat_alpha_blend alpha_blend;
+		struct qxl_copy_bits copy_bits;
+		struct qxl_copy blend;
+		struct qxl_rop_3 rop3;
+		struct qxl_stroke stroke;
+		struct qxl_text text;
+		struct qxl_mask blackness;
+		struct qxl_mask invers;
+		struct qxl_mask whiteness;
+	} u;
+};
+
+struct qxl_drawable {
+	union qxl_release_info release_info;
+	uint32_t surface_id;
+	uint8_t effect;
+	uint8_t type;
+	uint8_t self_bitmap;
+	struct qxl_rect self_bitmap_area;
+	struct qxl_rect bbox;
+	struct qxl_clip clip;
+	uint32_t mm_time;
+	int32_t surfaces_dest[3];
+	struct qxl_rect surfaces_rects[3];
+	union {
+		struct qxl_fill fill;
+		struct qxl_opaque opaque;
+		struct qxl_copy copy;
+		struct qxl_transparent transparent;
+		struct qxl_alpha_blend alpha_blend;
+		struct qxl_copy_bits copy_bits;
+		struct qxl_copy blend;
+		struct qxl_rop_3 rop3;
+		struct qxl_stroke stroke;
+		struct qxl_text text;
+		struct qxl_mask blackness;
+		struct qxl_mask invers;
+		struct qxl_mask whiteness;
+		struct qxl_composite composite;
+	} u;
+};
+
+enum qxl_surface_cmd_type {
+	QXL_SURFACE_CMD_CREATE,
+	QXL_SURFACE_CMD_DESTROY,
+};
+
+struct qxl_surface {
+	uint32_t format;
+	uint32_t width;
+	uint32_t height;
+	int32_t stride;
+	QXLPHYSICAL data;
+};
+
+struct qxl_surface_cmd {
+	union qxl_release_info release_info;
+	uint32_t surface_id;
+	uint8_t type;
+	uint32_t flags;
+	union {
+		struct qxl_surface surface_create;
+	} u;
+};
+
+struct qxl_clip_rects {
+	uint32_t num_rects;
+	struct qxl_data_chunk chunk;
+};
+
+enum {
+	QXL_PATH_BEGIN = (1 << 0),
+	QXL_PATH_END = (1 << 1),
+	QXL_PATH_CLOSE = (1 << 3),
+	QXL_PATH_BEZIER = (1 << 4),
+};
+
+struct qxl_path_seg {
+	uint32_t flags;
+	uint32_t count;
+	struct qxl_point_fix points[0];
+};
+
+struct qxl_path {
+	uint32_t data_size;
+	struct qxl_data_chunk chunk;
+};
+
+enum {
+	QXL_IMAGE_GROUP_DRIVER,
+	QXL_IMAGE_GROUP_DEVICE,
+	QXL_IMAGE_GROUP_RED,
+	QXL_IMAGE_GROUP_DRIVER_DONT_CACHE,
+};
+
+struct qxl_image_id {
+	uint32_t group;
+	uint32_t unique;
+};
+
+union qxl_image_id_union {
+	struct qxl_image_id id;
+	uint64_t value;
+};
+
+enum qxl_image_flags {
+	QXL_IMAGE_CACHE = (1 << 0),
+	QXL_IMAGE_HIGH_BITS_SET = (1 << 1),
+};
+
+enum qxl_bitmap_flags {
+	QXL_BITMAP_DIRECT = (1 << 0),
+	QXL_BITMAP_UNSTABLE = (1 << 1),
+	QXL_BITMAP_TOP_DOWN = (1 << 2), /* == SPICE_BITMAP_FLAGS_TOP_DOWN */
+};
+
+#define QXL_SET_IMAGE_ID(image, _group, _unique) {              \
+	(image)->descriptor.id = (((uint64_t)_unique) << 32) | _group;	\
+}
+
+struct qxl_image_descriptor {
+	uint64_t id;
+	uint8_t type;
+	uint8_t flags;
+	uint32_t width;
+	uint32_t height;
+};
+
+struct qxl_palette {
+	uint64_t unique;
+	uint16_t num_ents;
+	uint32_t ents[0];
+};
+
+struct qxl_bitmap {
+	uint8_t format;
+	uint8_t flags;
+	uint32_t x;
+	uint32_t y;
+	uint32_t stride;
+	QXLPHYSICAL palette;
+	QXLPHYSICAL data; /* data[0] ? */
+};
+
+struct qxl_surface_id {
+	uint32_t surface_id;
+};
+
+struct qxl_encoder_data {
+	uint32_t data_size;
+	uint8_t data[0];
+};
+
+struct qxl_image {
+	struct qxl_image_descriptor descriptor;
+	union { /* variable length */
+		struct qxl_bitmap bitmap;
+		struct qxl_encoder_data quic;
+		struct qxl_surface_id surface_image;
+	} u;
+};
+
+/* A QXLHead is a single monitor output backed by a QXLSurface.
+ * x and y offsets are unsigned since they are used in relation to
+ * the given surface, not the same as the x, y coordinates in the guest
+ * screen reference frame. */
+struct qxl_head {
+	uint32_t id;
+	uint32_t surface_id;
+	uint32_t width;
+	uint32_t height;
+	uint32_t x;
+	uint32_t y;
+	uint32_t flags;
+};
+
+struct qxl_monitors_config {
+	uint16_t count;
+	uint16_t max_allowed; /* If it is 0 no fixed limit is given by the
+				 driver */
+	struct qxl_head heads[0];
+};
+
+#pragma pack(pop)
+
+#endif /* _H_QXL_DEV */
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
new file mode 100644
index 0000000..c80ddfe
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -0,0 +1,981 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+
+#include "linux/crc32.h"
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+#include "drm_crtc_helper.h"
+
+static void qxl_crtc_set_to_mode(struct qxl_device *qdev,
+				 struct drm_connector *connector,
+				 struct qxl_head *head)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode, *t;
+	int width = head->width;
+	int height = head->height;
+
+	if (width < 320 || height < 240) {
+		qxl_io_log(qdev, "%s: bad head: %dx%d", width, height);
+		width = 1024;
+		height = 768;
+	}
+	if (width * height * 4 > 16*1024*1024) {
+		width = 1024;
+		height = 768;
+	}
+	/* TODO: go over regular modes and removed preferred? */
+	list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+		drm_mode_remove(connector, mode);
+	mode = drm_cvt_mode(dev, width, height, 60, false, false, false);
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+	mode->status = MODE_OK;
+	drm_mode_probed_add(connector, mode);
+	qxl_io_log(qdev, "%s: %d x %d\n", __func__, width, height);
+}
+
+void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev)
+{
+	struct drm_connector *connector;
+	int i;
+	struct drm_device *dev = qdev->ddev;
+
+	i = 0;
+	qxl_io_log(qdev, "%s: %d, %d\n", __func__,
+		   dev->mode_config.num_connector,
+		   qdev->monitors_config->count);
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (i > qdev->monitors_config->count) {
+			/* crtc will be reported as disabled */
+			continue;
+		}
+		qxl_crtc_set_to_mode(qdev, connector,
+				     &qdev->monitors_config->heads[i]);
+		++i;
+	}
+}
+
+void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
+{
+	if (qdev->client_monitors_config &&
+	    count > qdev->client_monitors_config->count) {
+		kfree(qdev->client_monitors_config);
+	}
+	if (!qdev->client_monitors_config) {
+		qdev->client_monitors_config = kzalloc(
+				sizeof(struct qxl_monitors_config) +
+				sizeof(struct qxl_head) * count, GFP_KERNEL);
+		if (!qdev->client_monitors_config) {
+			qxl_io_log(qdev,
+				   "%s: allocation failure for %u heads\n",
+				   __func__, count);
+			return;
+		}
+	}
+	qdev->client_monitors_config->count = count;
+}
+
+static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
+{
+	int i;
+	int num_monitors;
+	uint32_t crc;
+
+	BUG_ON(!qdev->monitors_config);
+	num_monitors = qdev->rom->client_monitors_config.count;
+	crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
+		  sizeof(qdev->rom->client_monitors_config));
+	if (crc != qdev->rom->client_monitors_config_crc) {
+		qxl_io_log(qdev, "crc mismatch: have %X (%d) != %X\n", crc,
+			   sizeof(qdev->rom->client_monitors_config),
+			   qdev->rom->client_monitors_config_crc);
+		return 1;
+	}
+	if (num_monitors > qdev->monitors_config->max_allowed) {
+		DRM_INFO("client monitors list will be truncated: %d < %d\n",
+			 qdev->monitors_config->max_allowed, num_monitors);
+		num_monitors = qdev->monitors_config->max_allowed;
+	} else {
+		num_monitors = qdev->rom->client_monitors_config.count;
+	}
+	qxl_alloc_client_monitors_config(qdev, num_monitors);
+	/* we copy max from the client but it isn't used */
+	qdev->client_monitors_config->max_allowed =
+				qdev->monitors_config->max_allowed;
+	for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) {
+		struct qxl_urect *c_rect =
+			&qdev->rom->client_monitors_config.heads[i];
+		struct qxl_head *client_head =
+			&qdev->client_monitors_config->heads[i];
+		struct qxl_head *head = &qdev->monitors_config->heads[i];
+		client_head->x = head->x = c_rect->left;
+		client_head->y = head->y = c_rect->top;
+		client_head->width = head->width =
+						c_rect->right - c_rect->left;
+		client_head->height = head->height =
+						c_rect->bottom - c_rect->top;
+		client_head->surface_id = head->surface_id = 0;
+		client_head->id = head->id = i;
+		client_head->flags = head->flags = 0;
+		QXL_DEBUG(qdev, "read %dx%d+%d+%d\n", head->width, head->height,
+			  head->x, head->y);
+	}
+	return 0;
+}
+
+void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
+{
+
+	while (qxl_display_copy_rom_client_monitors_config(qdev)) {
+		qxl_io_log(qdev, "failed crc check for client_monitors_config,"
+				 " retrying\n");
+	}
+	qxl_crtc_set_from_monitors_config(qdev);
+	/* fire off a uevent and let userspace tell us what to do */
+	qxl_io_log(qdev, "calling drm_sysfs_hotplug_event\n");
+	drm_sysfs_hotplug_event(qdev->ddev);
+}
+
+static int qxl_add_monitors_config_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_output *output = drm_connector_to_qxl_output(connector);
+	int h = output->index;
+	struct drm_display_mode *mode = NULL;
+	struct qxl_head *head;
+
+	if (!qdev->monitors_config)
+		return 0;
+	head = &qdev->monitors_config->heads[h];
+
+	mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
+			    false);
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+	drm_mode_probed_add(connector, mode);
+	return 1;
+}
+
+static int qxl_add_common_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode = NULL;
+	int i;
+	struct mode_size {
+		int w;
+		int h;
+	} common_modes[] = {
+		{ 640,  480},
+		{ 720,  480},
+		{ 800,  600},
+		{ 848,  480},
+		{1024,  768},
+		{1152,  768},
+		{1280,  720},
+		{1280,  800},
+		{1280,  854},
+		{1280,  960},
+		{1280, 1024},
+		{1440,  900},
+		{1400, 1050},
+		{1680, 1050},
+		{1600, 1200},
+		{1920, 1080},
+		{1920, 1200}
+	};
+
+	for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+		if (common_modes[i].w < 320 || common_modes[i].h < 200)
+			continue;
+
+		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
+				    60, false, false, false);
+		if (common_modes[i].w == 1024 && common_modes[i].h == 768)
+			mode->type |= DRM_MODE_TYPE_PREFERRED;
+		drm_mode_probed_add(connector, mode);
+	}
+	return i - 1;
+}
+
+static void qxl_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+			       u16 *blue, uint32_t start, uint32_t size)
+{
+	/* TODO */
+}
+
+static void qxl_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
+
+	drm_crtc_cleanup(crtc);
+	kfree(qxl_crtc);
+}
+
+static void
+qxl_hide_cursor(struct qxl_device *qdev)
+{
+	struct qxl_release *release;
+	struct qxl_cursor_cmd *cmd;
+	int ret;
+
+	ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
+					 &release, NULL);
+
+	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_CURSOR_HIDE;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	qxl_fence_releaseable(qdev, release);
+	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+	qxl_release_unreserve(qdev, release);
+}
+
+static int qxl_crtc_cursor_set(struct drm_crtc *crtc,
+			       struct drm_file *file_priv,
+			       uint32_t handle,
+			       uint32_t width,
+			       uint32_t height)
+{
+	struct drm_device *dev = crtc->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+	struct drm_gem_object *obj;
+	struct qxl_cursor *cursor;
+	struct qxl_cursor_cmd *cmd;
+	struct qxl_bo *cursor_bo, *user_bo;
+	struct qxl_release *release;
+	void *user_ptr;
+
+	int size = 64*64*4;
+	int ret = 0;
+	if (!handle) {
+		qxl_hide_cursor(qdev);
+		return 0;
+	}
+
+	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+	if (!obj) {
+		DRM_ERROR("cannot find cursor object\n");
+		return -ENOENT;
+	}
+
+	user_bo = gem_to_qxl_bo(obj);
+
+	ret = qxl_bo_reserve(user_bo, false);
+	if (ret)
+		goto out_unref;
+
+	ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
+	if (ret)
+		goto out_unreserve;
+
+	ret = qxl_bo_kmap(user_bo, &user_ptr);
+	if (ret)
+		goto out_unpin;
+
+	ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
+					 QXL_RELEASE_CURSOR_CMD,
+					 &release, NULL);
+	if (ret)
+		goto out_kunmap;
+	ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size,
+				    &cursor_bo);
+	if (ret)
+		goto out_free_release;
+	ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
+	if (ret)
+		goto out_free_bo;
+
+	cursor->header.unique = 0;
+	cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
+	cursor->header.width = 64;
+	cursor->header.height = 64;
+	cursor->header.hot_spot_x = 0;
+	cursor->header.hot_spot_y = 0;
+	cursor->data_size = size;
+	cursor->chunk.next_chunk = 0;
+	cursor->chunk.prev_chunk = 0;
+	cursor->chunk.data_size = size;
+
+	memcpy(cursor->chunk.data, user_ptr, size);
+
+	qxl_bo_kunmap(cursor_bo);
+
+	/* finish with the userspace bo */
+	qxl_bo_kunmap(user_bo);
+	qxl_bo_unpin(user_bo);
+	qxl_bo_unreserve(user_bo);
+	drm_gem_object_unreference_unlocked(obj);
+
+	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_CURSOR_SET;
+	cmd->u.set.position.x = qcrtc->cur_x;
+	cmd->u.set.position.y = qcrtc->cur_y;
+
+	cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
+	qxl_release_add_res(qdev, release, cursor_bo);
+
+	cmd->u.set.visible = 1;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	qxl_fence_releaseable(qdev, release);
+	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+	qxl_release_unreserve(qdev, release);
+
+	qxl_bo_unreserve(cursor_bo);
+	qxl_bo_unref(&cursor_bo);
+
+	return ret;
+out_free_bo:
+	qxl_bo_unref(&cursor_bo);
+out_free_release:
+	qxl_release_unreserve(qdev, release);
+	qxl_release_free(qdev, release);
+out_kunmap:
+	qxl_bo_kunmap(user_bo);
+out_unpin:
+	qxl_bo_unpin(user_bo);
+out_unreserve:
+	qxl_bo_unreserve(user_bo);
+out_unref:
+	drm_gem_object_unreference_unlocked(obj);
+	return ret;
+}
+
+static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
+				int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+	struct qxl_release *release;
+	struct qxl_cursor_cmd *cmd;
+	int ret;
+
+	ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
+				   &release, NULL);
+
+	qcrtc->cur_x = x;
+	qcrtc->cur_y = y;
+
+	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_CURSOR_MOVE;
+	cmd->u.position.x = qcrtc->cur_x;
+	cmd->u.position.y = qcrtc->cur_y;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	qxl_fence_releaseable(qdev, release);
+	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+	qxl_release_unreserve(qdev, release);
+	return 0;
+}
+
+
+static const struct drm_crtc_funcs qxl_crtc_funcs = {
+	.cursor_set = qxl_crtc_cursor_set,
+	.cursor_move = qxl_crtc_cursor_move,
+	.gamma_set = qxl_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = qxl_crtc_destroy,
+};
+
+static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
+
+	if (qxl_fb->obj)
+		drm_gem_object_unreference_unlocked(qxl_fb->obj);
+	drm_framebuffer_cleanup(fb);
+	kfree(qxl_fb);
+}
+
+int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
+				  struct drm_file *file_priv,
+				  unsigned flags, unsigned color,
+				  struct drm_clip_rect *clips,
+				  unsigned num_clips)
+{
+	/* TODO: vmwgfx where this was cribbed from had locking. Why? */
+	struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
+	struct qxl_device *qdev = qxl_fb->base.dev->dev_private;
+	struct drm_clip_rect norect;
+	struct qxl_bo *qobj;
+	int inc = 1;
+
+	qobj = gem_to_qxl_bo(qxl_fb->obj);
+	if (qxl_fb != qdev->active_user_framebuffer) {
+		DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n",
+			__func__, qxl_fb, qdev->active_user_framebuffer);
+	}
+	if (!num_clips) {
+		num_clips = 1;
+		clips = &norect;
+		norect.x1 = norect.y1 = 0;
+		norect.x2 = fb->width;
+		norect.y2 = fb->height;
+	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+		num_clips /= 2;
+		inc = 2; /* skip source rects */
+	}
+
+	qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color,
+			  clips, num_clips, inc);
+	return 0;
+}
+
+static const struct drm_framebuffer_funcs qxl_fb_funcs = {
+	.destroy = qxl_user_framebuffer_destroy,
+	.dirty = qxl_framebuffer_surface_dirty,
+/*	TODO?
+ *	.create_handle = qxl_user_framebuffer_create_handle, */
+};
+
+int
+qxl_framebuffer_init(struct drm_device *dev,
+		     struct qxl_framebuffer *qfb,
+		     struct drm_mode_fb_cmd2 *mode_cmd,
+		     struct drm_gem_object *obj)
+{
+	int ret;
+
+	qfb->obj = obj;
+	ret = drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs);
+	if (ret) {
+		qfb->obj = NULL;
+		return ret;
+	}
+	drm_helper_mode_fill_fb_struct(&qfb->base, mode_cmd);
+	return 0;
+}
+
+static void qxl_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct qxl_device *qdev = dev->dev_private;
+
+	qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n",
+		   __func__,
+		   mode->hdisplay, mode->vdisplay,
+		   adjusted_mode->hdisplay,
+		   adjusted_mode->vdisplay);
+	return true;
+}
+
+void
+qxl_send_monitors_config(struct qxl_device *qdev)
+{
+	int i;
+
+	BUG_ON(!qdev->ram_header->monitors_config);
+
+	if (qdev->monitors_config->count == 0) {
+		qxl_io_log(qdev, "%s: 0 monitors??\n", __func__);
+		return;
+	}
+	for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
+		struct qxl_head *head = &qdev->monitors_config->heads[i];
+
+		if (head->y > 8192 || head->y < head->x ||
+		    head->width > 8192 || head->height > 8192) {
+			DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
+				  i, head->width, head->height,
+				  head->x, head->y);
+			return;
+		}
+	}
+	qxl_io_monitors_config(qdev);
+}
+
+static void qxl_monitors_config_set_single(struct qxl_device *qdev,
+					   unsigned x, unsigned y,
+					   unsigned width, unsigned height)
+{
+	DRM_DEBUG("%dx%d+%d+%d\n", width, height, x, y);
+	qdev->monitors_config->count = 1;
+	qdev->monitors_config->heads[0].x = x;
+	qdev->monitors_config->heads[0].y = y;
+	qdev->monitors_config->heads[0].width = width;
+	qdev->monitors_config->heads[0].height = height;
+}
+
+static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode,
+			       int x, int y,
+			       struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_mode *m = (void *)mode->private;
+	struct qxl_framebuffer *qfb;
+	struct qxl_bo *bo, *old_bo = NULL;
+	uint32_t width, height, base_offset;
+	bool recreate_primary = false;
+	int ret;
+
+	if (!crtc->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	if (old_fb) {
+		qfb = to_qxl_framebuffer(old_fb);
+		old_bo = gem_to_qxl_bo(qfb->obj);
+	}
+	qfb = to_qxl_framebuffer(crtc->fb);
+	bo = gem_to_qxl_bo(qfb->obj);
+	if (!m)
+		/* and do we care? */
+		DRM_DEBUG("%dx%d: not a native mode\n", x, y);
+	else
+		DRM_DEBUG("%dx%d: qxl id %d\n",
+			  mode->hdisplay, mode->vdisplay, m->id);
+	DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n",
+		  x, y,
+		  mode->hdisplay, mode->vdisplay,
+		  adjusted_mode->hdisplay,
+		  adjusted_mode->vdisplay);
+
+	recreate_primary = true;
+
+	width = mode->hdisplay;
+	height = mode->vdisplay;
+	base_offset = 0;
+
+	ret = qxl_bo_reserve(bo, false);
+	if (ret != 0)
+		return ret;
+	ret = qxl_bo_pin(bo, bo->type, NULL);
+	if (ret != 0) {
+		qxl_bo_unreserve(bo);
+		return -EINVAL;
+	}
+	qxl_bo_unreserve(bo);
+	if (recreate_primary) {
+		qxl_io_destroy_primary(qdev);
+		qxl_io_log(qdev,
+			   "recreate primary: %dx%d (was %dx%d,%d,%d)\n",
+			   width, height, bo->surf.width,
+			   bo->surf.height, bo->surf.stride, bo->surf.format);
+		qxl_io_create_primary(qdev, width, height, base_offset, bo);
+		bo->is_primary = true;
+	}
+
+	if (old_bo && old_bo != bo) {
+		old_bo->is_primary = false;
+		ret = qxl_bo_reserve(old_bo, false);
+		qxl_bo_unpin(old_bo);
+		qxl_bo_unreserve(old_bo);
+	}
+
+	if (qdev->monitors_config->count == 0) {
+		qxl_monitors_config_set_single(qdev, x, y,
+					       mode->hdisplay,
+					       mode->vdisplay);
+	}
+	qdev->mode_set = true;
+	return 0;
+}
+
+static void qxl_crtc_prepare(struct drm_crtc *crtc)
+{
+	DRM_DEBUG("current: %dx%d+%d+%d (%d).\n",
+		  crtc->mode.hdisplay, crtc->mode.vdisplay,
+		  crtc->x, crtc->y, crtc->enabled);
+}
+
+static void qxl_crtc_commit(struct drm_crtc *crtc)
+{
+	DRM_DEBUG("\n");
+}
+
+void qxl_crtc_load_lut(struct drm_crtc *crtc)
+{
+	DRM_DEBUG("\n");
+}
+
+static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
+	.dpms = qxl_crtc_dpms,
+	.mode_fixup = qxl_crtc_mode_fixup,
+	.mode_set = qxl_crtc_mode_set,
+	.prepare = qxl_crtc_prepare,
+	.commit = qxl_crtc_commit,
+	.load_lut = qxl_crtc_load_lut,
+};
+
+int qdev_crtc_init(struct drm_device *dev, int num_crtc)
+{
+	struct qxl_crtc *qxl_crtc;
+
+	qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
+	if (!qxl_crtc)
+		return -ENOMEM;
+
+	drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
+	drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
+	return 0;
+}
+
+static void qxl_enc_dpms(struct drm_encoder *encoder, int mode)
+{
+	DRM_DEBUG("\n");
+}
+
+static bool qxl_enc_mode_fixup(struct drm_encoder *encoder,
+			       const struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	DRM_DEBUG("\n");
+	return true;
+}
+
+static void qxl_enc_prepare(struct drm_encoder *encoder)
+{
+	DRM_DEBUG("\n");
+}
+
+static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev,
+		struct drm_encoder *encoder)
+{
+	int i;
+	struct qxl_head *head;
+	struct drm_display_mode *mode;
+
+	BUG_ON(!encoder);
+	/* TODO: ugly, do better */
+	for (i = 0 ; (encoder->possible_crtcs != (1 << i)) && i < 32; ++i)
+		;
+	if (encoder->possible_crtcs != (1 << i)) {
+		DRM_ERROR("encoder has wrong possible_crtcs: %x\n",
+			  encoder->possible_crtcs);
+		return;
+	}
+	if (!qdev->monitors_config ||
+	    qdev->monitors_config->max_allowed <= i) {
+		DRM_ERROR(
+		"head number too large or missing monitors config: %p, %d",
+		qdev->monitors_config,
+		qdev->monitors_config ?
+			qdev->monitors_config->max_allowed : -1);
+		return;
+	}
+	if (!encoder->crtc) {
+		DRM_ERROR("missing crtc on encoder %p\n", encoder);
+		return;
+	}
+	if (i != 0)
+		DRM_DEBUG("missing for multiple monitors: no head holes\n");
+	head = &qdev->monitors_config->heads[i];
+	head->id = i;
+	head->surface_id = 0;
+	if (encoder->crtc->enabled) {
+		mode = &encoder->crtc->mode;
+		head->width = mode->hdisplay;
+		head->height = mode->vdisplay;
+		head->x = encoder->crtc->x;
+		head->y = encoder->crtc->y;
+		if (qdev->monitors_config->count < i + 1)
+			qdev->monitors_config->count = i + 1;
+	} else {
+		head->width = 0;
+		head->height = 0;
+		head->x = 0;
+		head->y = 0;
+	}
+	DRM_DEBUG("setting head %d to +%d+%d %dx%d\n",
+		  i, head->x, head->y, head->width, head->height);
+	head->flags = 0;
+	/* TODO - somewhere else to call this for multiple monitors
+	 * (config_commit?) */
+	qxl_send_monitors_config(qdev);
+}
+
+static void qxl_enc_commit(struct drm_encoder *encoder)
+{
+	struct qxl_device *qdev = encoder->dev->dev_private;
+
+	qxl_write_monitors_config_for_encoder(qdev, encoder);
+	DRM_DEBUG("\n");
+}
+
+static void qxl_enc_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	DRM_DEBUG("\n");
+}
+
+static int qxl_conn_get_modes(struct drm_connector *connector)
+{
+	int ret = 0;
+	struct qxl_device *qdev = connector->dev->dev_private;
+
+	DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config);
+	/* TODO: what should we do here? only show the configured modes for the
+	 * device, or allow the full list, or both? */
+	if (qdev->monitors_config && qdev->monitors_config->count) {
+		ret = qxl_add_monitors_config_modes(connector);
+		if (ret < 0)
+			return ret;
+	}
+	ret += qxl_add_common_modes(connector);
+	return ret;
+}
+
+static int qxl_conn_mode_valid(struct drm_connector *connector,
+			       struct drm_display_mode *mode)
+{
+	/* TODO: is this called for user defined modes? (xrandr --add-mode)
+	 * TODO: check that the mode fits in the framebuffer */
+	DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
+		  mode->vdisplay, mode->status);
+	return MODE_OK;
+}
+
+struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
+{
+	struct qxl_output *qxl_output =
+		drm_connector_to_qxl_output(connector);
+
+	DRM_DEBUG("\n");
+	return &qxl_output->enc;
+}
+
+
+static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
+	.dpms = qxl_enc_dpms,
+	.mode_fixup = qxl_enc_mode_fixup,
+	.prepare = qxl_enc_prepare,
+	.mode_set = qxl_enc_mode_set,
+	.commit = qxl_enc_commit,
+};
+
+static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
+	.get_modes = qxl_conn_get_modes,
+	.mode_valid = qxl_conn_mode_valid,
+	.best_encoder = qxl_best_encoder,
+};
+
+static void qxl_conn_save(struct drm_connector *connector)
+{
+	DRM_DEBUG("\n");
+}
+
+static void qxl_conn_restore(struct drm_connector *connector)
+{
+	DRM_DEBUG("\n");
+}
+
+static enum drm_connector_status qxl_conn_detect(
+			struct drm_connector *connector,
+			bool force)
+{
+	struct qxl_output *output =
+		drm_connector_to_qxl_output(connector);
+	struct drm_device *ddev = connector->dev;
+	struct qxl_device *qdev = ddev->dev_private;
+	int connected;
+
+	/* The first monitor is always connected */
+	connected = (output->index == 0) ||
+		    (qdev->monitors_config &&
+		     qdev->monitors_config->count > output->index);
+
+	DRM_DEBUG("\n");
+	return connected ? connector_status_connected
+			 : connector_status_disconnected;
+}
+
+static int qxl_conn_set_property(struct drm_connector *connector,
+				   struct drm_property *property,
+				   uint64_t value)
+{
+	DRM_DEBUG("\n");
+	return 0;
+}
+
+static void qxl_conn_destroy(struct drm_connector *connector)
+{
+	struct qxl_output *qxl_output =
+		drm_connector_to_qxl_output(connector);
+
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(qxl_output);
+}
+
+static const struct drm_connector_funcs qxl_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.save = qxl_conn_save,
+	.restore = qxl_conn_restore,
+	.detect = qxl_conn_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = qxl_conn_set_property,
+	.destroy = qxl_conn_destroy,
+};
+
+static void qxl_enc_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs qxl_enc_funcs = {
+	.destroy = qxl_enc_destroy,
+};
+
+int qdev_output_init(struct drm_device *dev, int num_output)
+{
+	struct qxl_output *qxl_output;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+
+	qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
+	if (!qxl_output)
+		return -ENOMEM;
+
+	qxl_output->index = num_output;
+
+	connector = &qxl_output->base;
+	encoder = &qxl_output->enc;
+	drm_connector_init(dev, &qxl_output->base,
+			   &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
+
+	drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
+			 DRM_MODE_ENCODER_VIRTUAL);
+
+	encoder->possible_crtcs = 1 << num_output;
+	drm_mode_connector_attach_encoder(&qxl_output->base,
+					  &qxl_output->enc);
+	drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
+	drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
+
+	drm_sysfs_connector_add(connector);
+	return 0;
+}
+
+static struct drm_framebuffer *
+qxl_user_framebuffer_create(struct drm_device *dev,
+			    struct drm_file *file_priv,
+			    struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct qxl_framebuffer *qxl_fb;
+	struct qxl_device *qdev = dev->dev_private;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+
+	qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL);
+	if (qxl_fb == NULL)
+		return NULL;
+
+	ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj);
+	if (ret) {
+		kfree(qxl_fb);
+		drm_gem_object_unreference_unlocked(obj);
+		return NULL;
+	}
+
+	if (qdev->active_user_framebuffer) {
+		DRM_INFO("%s: active_user_framebuffer %p -> %p\n",
+			 __func__,
+			 qdev->active_user_framebuffer, qxl_fb);
+	}
+	qdev->active_user_framebuffer = qxl_fb;
+
+	return &qxl_fb->base;
+}
+
+static const struct drm_mode_config_funcs qxl_mode_funcs = {
+	.fb_create = qxl_user_framebuffer_create,
+};
+
+int qxl_modeset_init(struct qxl_device *qdev)
+{
+	int i;
+	int ret;
+	struct drm_gem_object *gobj;
+	int max_allowed = QXL_NUM_OUTPUTS;
+	int monitors_config_size = sizeof(struct qxl_monitors_config) +
+				   max_allowed * sizeof(struct qxl_head);
+
+	drm_mode_config_init(qdev->ddev);
+	ret = qxl_gem_object_create(qdev, monitors_config_size, 0,
+				    QXL_GEM_DOMAIN_VRAM,
+				    false, false, NULL, &gobj);
+	if (ret) {
+		DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret);
+		return -ENOMEM;
+	}
+	qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
+	qxl_bo_kmap(qdev->monitors_config_bo, NULL);
+	qdev->monitors_config = qdev->monitors_config_bo->kptr;
+	qdev->ram_header->monitors_config =
+		qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
+
+	memset(qdev->monitors_config, 0, monitors_config_size);
+	qdev->monitors_config->max_allowed = max_allowed;
+
+	qdev->ddev->mode_config.funcs = (void *)&qxl_mode_funcs;
+
+	/* modes will be validated against the framebuffer size */
+	qdev->ddev->mode_config.min_width = 320;
+	qdev->ddev->mode_config.min_height = 200;
+	qdev->ddev->mode_config.max_width = 8192;
+	qdev->ddev->mode_config.max_height = 8192;
+
+	qdev->ddev->mode_config.fb_base = qdev->vram_base;
+	for (i = 0 ; i < QXL_NUM_OUTPUTS; ++i) {
+		qdev_crtc_init(qdev->ddev, i);
+		qdev_output_init(qdev->ddev, i);
+	}
+
+	qdev->mode_info.mode_config_initialized = true;
+
+	/* primary surface must be created by this point, to allow
+	 * issuing command queue commands and having them read by
+	 * spice server. */
+	qxl_fbdev_init(qdev);
+	return 0;
+}
+
+void qxl_modeset_fini(struct qxl_device *qdev)
+{
+	qxl_fbdev_fini(qdev);
+	if (qdev->mode_info.mode_config_initialized) {
+		drm_mode_config_cleanup(qdev->ddev);
+		qdev->mode_info.mode_config_initialized = false;
+	}
+}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
new file mode 100644
index 0000000..7d5396d2
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/* returns a pointer to the already allocated qxl_rect array inside
+ * the qxl_clip_rects. This is *not* the same as the memory allocated
+ * on the device, it is offset to qxl_clip_rects.chunk.data */
+static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
+					      struct qxl_drawable *drawable,
+					      unsigned num_clips,
+					      struct qxl_bo **clips_bo,
+					      struct qxl_release *release)
+{
+	struct qxl_clip_rects *dev_clips;
+	int ret;
+	int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
+	ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
+	if (ret)
+		return NULL;
+
+	ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips);
+	if (ret) {
+		qxl_bo_unref(clips_bo);
+		return NULL;
+	}
+	dev_clips->num_rects = num_clips;
+	dev_clips->chunk.next_chunk = 0;
+	dev_clips->chunk.prev_chunk = 0;
+	dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips;
+	return (struct qxl_rect *)dev_clips->chunk.data;
+}
+
+static int
+make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
+	      const struct qxl_rect *rect,
+	      struct qxl_release **release)
+{
+	struct qxl_drawable *drawable;
+	int i, ret;
+
+	ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable),
+					 QXL_RELEASE_DRAWABLE, release,
+					 NULL);
+	if (ret)
+		return ret;
+
+	drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
+	drawable->type = type;
+
+	drawable->surface_id = surface;		/* Only primary for now */
+	drawable->effect = QXL_EFFECT_OPAQUE;
+	drawable->self_bitmap = 0;
+	drawable->self_bitmap_area.top = 0;
+	drawable->self_bitmap_area.left = 0;
+	drawable->self_bitmap_area.bottom = 0;
+	drawable->self_bitmap_area.right = 0;
+	/* FIXME: add clipping */
+	drawable->clip.type = SPICE_CLIP_TYPE_NONE;
+
+	/*
+	 * surfaces_dest[i] should apparently be filled out with the
+	 * surfaces that we depend on, and surface_rects should be
+	 * filled with the rectangles of those surfaces that we
+	 * are going to use.
+	 */
+	for (i = 0; i < 3; ++i)
+		drawable->surfaces_dest[i] = -1;
+
+	if (rect)
+		drawable->bbox = *rect;
+
+	drawable->mm_time = qdev->rom->mm_clock;
+	qxl_release_unmap(qdev, *release, &drawable->release_info);
+	return 0;
+}
+
+static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
+				   const struct qxl_fb_image *qxl_fb_image)
+{
+	struct qxl_device *qdev = qxl_fb_image->qdev;
+	const struct fb_image *fb_image = &qxl_fb_image->fb_image;
+	uint32_t visual = qxl_fb_image->visual;
+	const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
+	struct qxl_palette *pal;
+	int ret;
+	uint32_t fgcolor, bgcolor;
+	static uint64_t unique; /* we make no attempt to actually set this
+				 * correctly globaly, since that would require
+				 * tracking all of our palettes. */
+
+	ret = qxl_alloc_bo_reserved(qdev,
+				    sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
+				    palette_bo);
+
+	ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
+	pal->num_ents = 2;
+	pal->unique = unique++;
+	if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
+		/* NB: this is the only used branch currently. */
+		fgcolor = pseudo_palette[fb_image->fg_color];
+		bgcolor = pseudo_palette[fb_image->bg_color];
+	} else {
+		fgcolor = fb_image->fg_color;
+		bgcolor = fb_image->bg_color;
+	}
+	pal->ents[0] = bgcolor;
+	pal->ents[1] = fgcolor;
+	qxl_bo_kunmap(*palette_bo);
+	return 0;
+}
+
+void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
+			int stride /* filled in if 0 */)
+{
+	struct qxl_device *qdev = qxl_fb_image->qdev;
+	struct qxl_drawable *drawable;
+	struct qxl_rect rect;
+	const struct fb_image *fb_image = &qxl_fb_image->fb_image;
+	int x = fb_image->dx;
+	int y = fb_image->dy;
+	int width = fb_image->width;
+	int height = fb_image->height;
+	const char *src = fb_image->data;
+	int depth = fb_image->depth;
+	struct qxl_release *release;
+	struct qxl_bo *image_bo;
+	struct qxl_image *image;
+	int ret;
+
+	if (stride == 0)
+		stride = depth * width / 8;
+
+	rect.left = x;
+	rect.right = x + width;
+	rect.top = y;
+	rect.bottom = y + height;
+
+	ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release);
+	if (ret)
+		return;
+
+	ret = qxl_image_create(qdev, release, &image_bo,
+			       (const uint8_t *)src, 0, 0,
+			       width, height, depth, stride);
+	QXL_INFO(qdev, "image_bo offset %llx\n",
+		 image_bo->tbo.addr_space_offset - DRM_FILE_OFFSET);
+	if (depth == 1) {
+		struct qxl_bo *palette_bo;
+
+		ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image);
+		qxl_release_add_res(qdev, release, palette_bo);
+		ret = qxl_bo_kmap(image_bo, (void **)&image);
+		image->u.bitmap.palette =
+			qxl_bo_physical_address(qdev, palette_bo, 0);
+		qxl_bo_kunmap(image_bo);
+		qxl_bo_unreserve(palette_bo);
+		qxl_bo_unref(&palette_bo);
+	}
+
+	drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+
+	drawable->u.copy.src_area.top = 0;
+	drawable->u.copy.src_area.bottom = height;
+	drawable->u.copy.src_area.left = 0;
+	drawable->u.copy.src_area.right = width;
+
+	drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
+	drawable->u.copy.scale_mode = 0;
+	drawable->u.copy.mask.flags = 0;
+	drawable->u.copy.mask.pos.x = 0;
+	drawable->u.copy.mask.pos.y = 0;
+	drawable->u.copy.mask.bitmap = 0;
+
+	drawable->u.copy.src_bitmap =
+		qxl_bo_physical_address(qdev, image_bo, 0);
+	qxl_release_unmap(qdev, release, &drawable->release_info);
+
+	qxl_release_add_res(qdev, release, image_bo);
+	qxl_bo_unreserve(image_bo);
+	qxl_bo_unref(&image_bo);
+
+	qxl_fence_releaseable(qdev, release);
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+	qxl_release_unreserve(qdev, release);
+}
+
+/* push a draw command using the given clipping rectangles as
+ * the sources from the shadow framebuffer.
+ *
+ * Right now implementing with a single draw and a clip list. Clip
+ * lists are known to be a problem performance wise, this can be solved
+ * by treating them differently in the server.
+ */
+void qxl_draw_dirty_fb(struct qxl_device *qdev,
+		       struct qxl_framebuffer *qxl_fb,
+		       struct qxl_bo *bo,
+		       unsigned flags, unsigned color,
+		       struct drm_clip_rect *clips,
+		       unsigned num_clips, int inc)
+{
+	/*
+	 * TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
+	 * send a fill command instead, much cheaper.
+	 *
+	 * See include/drm/drm_mode.h
+	 */
+	struct drm_clip_rect *clips_ptr;
+	int i;
+	int left, right, top, bottom;
+	int width, height;
+	struct qxl_drawable *drawable;
+	struct qxl_rect drawable_rect;
+	struct qxl_rect *rects;
+	int stride = qxl_fb->base.pitches[0];
+	/* depth is not actually interesting, we don't mask with it */
+	int depth = qxl_fb->base.bits_per_pixel;
+	uint8_t *surface_base;
+	struct qxl_release *release;
+	struct qxl_bo *image_bo;
+	struct qxl_bo *clips_bo;
+	int ret;
+
+	left = clips->x1;
+	right = clips->x2;
+	top = clips->y1;
+	bottom = clips->y2;
+
+	/* skip the first clip rect */
+	for (i = 1, clips_ptr = clips + inc;
+	     i < num_clips; i++, clips_ptr += inc) {
+		left = min_t(int, left, (int)clips_ptr->x1);
+		right = max_t(int, right, (int)clips_ptr->x2);
+		top = min_t(int, top, (int)clips_ptr->y1);
+		bottom = max_t(int, bottom, (int)clips_ptr->y2);
+	}
+
+	width = right - left;
+	height = bottom - top;
+	drawable_rect.left = left;
+	drawable_rect.right = right;
+	drawable_rect.top = top;
+	drawable_rect.bottom = bottom;
+	ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
+			    &release);
+	if (ret)
+		return;
+
+	ret = qxl_bo_kmap(bo, (void **)&surface_base);
+	if (ret)
+		return;
+
+	ret = qxl_image_create(qdev, release, &image_bo, surface_base,
+			       left, top, width, height, depth, stride);
+	qxl_bo_kunmap(bo);
+	if (ret)
+		goto out_unref;
+
+	rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
+	if (!rects) {
+		qxl_bo_unref(&image_bo);
+		goto out_unref;
+	}
+	drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+
+	drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
+	drawable->clip.data = qxl_bo_physical_address(qdev,
+						      clips_bo, 0);
+	qxl_release_add_res(qdev, release, clips_bo);
+
+	drawable->u.copy.src_area.top = 0;
+	drawable->u.copy.src_area.bottom = height;
+	drawable->u.copy.src_area.left = 0;
+	drawable->u.copy.src_area.right = width;
+
+	drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
+	drawable->u.copy.scale_mode = 0;
+	drawable->u.copy.mask.flags = 0;
+	drawable->u.copy.mask.pos.x = 0;
+	drawable->u.copy.mask.pos.y = 0;
+	drawable->u.copy.mask.bitmap = 0;
+
+	drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0);
+	qxl_release_unmap(qdev, release, &drawable->release_info);
+	qxl_release_add_res(qdev, release, image_bo);
+	qxl_bo_unreserve(image_bo);
+	qxl_bo_unref(&image_bo);
+	clips_ptr = clips;
+	for (i = 0; i < num_clips; i++, clips_ptr += inc) {
+		rects[i].left   = clips_ptr->x1;
+		rects[i].right  = clips_ptr->x2;
+		rects[i].top    = clips_ptr->y1;
+		rects[i].bottom = clips_ptr->y2;
+	}
+	qxl_bo_kunmap(clips_bo);
+	qxl_bo_unreserve(clips_bo);
+	qxl_bo_unref(&clips_bo);
+
+	qxl_fence_releaseable(qdev, release);
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+	qxl_release_unreserve(qdev, release);
+	return;
+
+out_unref:
+	qxl_release_unreserve(qdev, release);
+	qxl_release_free(qdev, release);
+}
+
+void qxl_draw_copyarea(struct qxl_device *qdev,
+		       u32 width, u32 height,
+		       u32 sx, u32 sy,
+		       u32 dx, u32 dy)
+{
+	struct qxl_drawable *drawable;
+	struct qxl_rect rect;
+	struct qxl_release *release;
+	int ret;
+
+	rect.left = dx;
+	rect.top = dy;
+	rect.right = dx + width;
+	rect.bottom = dy + height;
+	ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release);
+	if (ret)
+		return;
+
+	drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+	drawable->u.copy_bits.src_pos.x = sx;
+	drawable->u.copy_bits.src_pos.y = sy;
+
+	qxl_release_unmap(qdev, release, &drawable->release_info);
+	qxl_fence_releaseable(qdev, release);
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+	qxl_release_unreserve(qdev, release);
+}
+
+void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
+{
+	struct qxl_device *qdev = qxl_draw_fill_rec->qdev;
+	struct qxl_rect rect = qxl_draw_fill_rec->rect;
+	uint32_t color = qxl_draw_fill_rec->color;
+	uint16_t rop = qxl_draw_fill_rec->rop;
+	struct qxl_drawable *drawable;
+	struct qxl_release *release;
+	int ret;
+
+	ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release);
+	if (ret)
+		return;
+
+	drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+	drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
+	drawable->u.fill.brush.u.color = color;
+	drawable->u.fill.rop_descriptor = rop;
+	drawable->u.fill.mask.flags = 0;
+	drawable->u.fill.mask.pos.x = 0;
+	drawable->u.fill.mask.pos.y = 0;
+	drawable->u.fill.mask.bitmap = 0;
+
+	qxl_release_unmap(qdev, release, &drawable->release_info);
+	qxl_fence_releaseable(qdev, release);
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+	qxl_release_unreserve(qdev, release);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
new file mode 100644
index 0000000..d337da0
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -0,0 +1,145 @@
+/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */
+/* qxl_drv.c -- QXL driver -*- linux-c -*-
+ *
+ * Copyright 2011 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dave Airlie <airlie@redhat.com>
+ *    Alon Levy <alevy@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/console.h>
+
+#include "drmP.h"
+#include "drm/drm.h"
+
+#include "qxl_drv.h"
+
+extern int qxl_max_ioctls;
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+	{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8,
+	  0xffff00, 0 },
+	{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8,
+	  0xffff00, 0 },
+	{ 0, 0, 0 },
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+int qxl_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, qxl_modeset, int, 0400);
+
+static struct drm_driver qxl_driver;
+static struct pci_driver qxl_pci_driver;
+
+static int
+qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	if (pdev->revision < 4) {
+		DRM_ERROR("qxl too old, doesn't support client_monitors_config,"
+			  " use xf86-video-qxl in user mode");
+		return -EINVAL; /* TODO: ENODEV ? */
+	}
+	return drm_get_pci_dev(pdev, ent, &qxl_driver);
+}
+
+static void
+qxl_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static struct pci_driver qxl_pci_driver = {
+	 .name = DRIVER_NAME,
+	 .id_table = pciidlist,
+	 .probe = qxl_pci_probe,
+	 .remove = qxl_pci_remove,
+};
+
+static const struct file_operations qxl_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+	.mmap = qxl_mmap,
+};
+
+static struct drm_driver qxl_driver = {
+	.driver_features = DRIVER_GEM | DRIVER_MODESET |
+			   DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+	.dev_priv_size = 0,
+	.load = qxl_driver_load,
+	.unload = qxl_driver_unload,
+
+	.dumb_create = qxl_mode_dumb_create,
+	.dumb_map_offset = qxl_mode_dumb_mmap,
+	.dumb_destroy = qxl_mode_dumb_destroy,
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = qxl_debugfs_init,
+	.debugfs_cleanup = qxl_debugfs_takedown,
+#endif
+	.gem_init_object = qxl_gem_object_init,
+	.gem_free_object = qxl_gem_object_free,
+	.gem_open_object = qxl_gem_object_open,
+	.gem_close_object = qxl_gem_object_close,
+	.fops = &qxl_fops,
+	.ioctls = qxl_ioctls,
+	.irq_handler = qxl_irq_handler,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = 0,
+	.minor = 1,
+	.patchlevel = 0,
+};
+
+static int __init qxl_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force() && qxl_modeset == -1)
+		return -EINVAL;
+#endif
+
+	if (qxl_modeset == 0)
+		return -EINVAL;
+	qxl_driver.num_ioctls = qxl_max_ioctls;
+	return drm_pci_init(&qxl_driver, &qxl_pci_driver);
+}
+
+static void __exit qxl_exit(void)
+{
+	drm_pci_exit(&qxl_driver, &qxl_pci_driver);
+}
+
+module_init(qxl_init);
+module_exit(qxl_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
new file mode 100644
index 0000000..52b582c
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -0,0 +1,566 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+
+#ifndef QXL_DRV_H
+#define QXL_DRV_H
+
+/*
+ * Definitions taken from spice-protocol, plus kernel driver specific bits.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
+#include <drm/qxl_drm.h>
+#include "qxl_dev.h"
+
+#define DRIVER_AUTHOR		"Dave Airlie"
+
+#define DRIVER_NAME		"qxl"
+#define DRIVER_DESC		"RH QXL"
+#define DRIVER_DATE		"20120117"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
+
+#define QXL_NUM_OUTPUTS 1
+
+#define QXL_DEBUGFS_MAX_COMPONENTS		32
+
+extern int qxl_log_level;
+
+enum {
+	QXL_INFO_LEVEL = 1,
+	QXL_DEBUG_LEVEL = 2,
+};
+
+#define QXL_INFO(qdev, fmt, ...) do { \
+		if (qxl_log_level >= QXL_INFO_LEVEL) {	\
+			qxl_io_log(qdev, fmt, __VA_ARGS__); \
+		}	\
+	} while (0)
+#define QXL_DEBUG(qdev, fmt, ...) do { \
+		if (qxl_log_level >= QXL_DEBUG_LEVEL) {	\
+			qxl_io_log(qdev, fmt, __VA_ARGS__); \
+		}	\
+	} while (0)
+#define QXL_INFO_ONCE(qdev, fmt, ...) do { \
+		static int done;		\
+		if (!done) {			\
+			done = 1;			\
+			QXL_INFO(qdev, fmt, __VA_ARGS__);	\
+		}						\
+	} while (0)
+
+#define DRM_FILE_OFFSET 0x100000000ULL
+#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
+
+#define QXL_INTERRUPT_MASK (\
+	QXL_INTERRUPT_DISPLAY |\
+	QXL_INTERRUPT_CURSOR |\
+	QXL_INTERRUPT_IO_CMD |\
+	QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
+
+struct qxl_fence {
+	struct qxl_device *qdev;
+	uint32_t num_active_releases;
+	uint32_t *release_ids;
+	struct radix_tree_root tree;
+};
+
+struct qxl_bo {
+	/* Protected by gem.mutex */
+	struct list_head		list;
+	/* Protected by tbo.reserved */
+	u32				placements[3];
+	struct ttm_placement		placement;
+	struct ttm_buffer_object	tbo;
+	struct ttm_bo_kmap_obj		kmap;
+	unsigned			pin_count;
+	void				*kptr;
+	int                             type;
+	/* Constant after initialization */
+	struct drm_gem_object		gem_base;
+	bool is_primary; /* is this now a primary surface */
+	bool hw_surf_alloc;
+	struct qxl_surface surf;
+	uint32_t surface_id;
+	struct qxl_fence fence; /* per bo fence  - list of releases */
+	struct qxl_release *surf_create;
+	atomic_t reserve_count;
+};
+#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
+
+struct qxl_gem {
+	struct mutex		mutex;
+	struct list_head	objects;
+};
+
+struct qxl_bo_list {
+	struct list_head lhead;
+	struct qxl_bo *bo;
+};
+
+struct qxl_reloc_list {
+	struct list_head bos;
+};
+
+struct qxl_crtc {
+	struct drm_crtc base;
+	int cur_x;
+	int cur_y;
+};
+
+struct qxl_output {
+	int index;
+	struct drm_connector base;
+	struct drm_encoder enc;
+};
+
+struct qxl_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+};
+
+#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
+#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
+#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, base)
+#define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base)
+
+struct qxl_mman {
+	struct ttm_bo_global_ref        bo_global_ref;
+	struct drm_global_reference	mem_global_ref;
+	bool				mem_global_referenced;
+	struct ttm_bo_device		bdev;
+};
+
+struct qxl_mode_info {
+	int num_modes;
+	struct qxl_mode *modes;
+	bool mode_config_initialized;
+
+	/* pointer to fbdev info structure */
+	struct qxl_fbdev *qfbdev;
+};
+
+
+struct qxl_memslot {
+	uint8_t		generation;
+	uint64_t	start_phys_addr;
+	uint64_t	end_phys_addr;
+	uint64_t	high_bits;
+};
+
+enum {
+	QXL_RELEASE_DRAWABLE,
+	QXL_RELEASE_SURFACE_CMD,
+	QXL_RELEASE_CURSOR_CMD,
+};
+
+/* drm_ prefix to differentiate from qxl_release_info in
+ * spice-protocol/qxl_dev.h */
+#define QXL_MAX_RES 96
+struct qxl_release {
+	int id;
+	int type;
+	int bo_count;
+	uint32_t release_offset;
+	uint32_t surface_release_id;
+	struct qxl_bo *bos[QXL_MAX_RES];
+};
+
+struct qxl_fb_image {
+	struct qxl_device *qdev;
+	uint32_t pseudo_palette[16];
+	struct fb_image fb_image;
+	uint32_t visual;
+};
+
+struct qxl_draw_fill {
+	struct qxl_device *qdev;
+	struct qxl_rect rect;
+	uint32_t color;
+	uint16_t rop;
+};
+
+/*
+ * Debugfs
+ */
+struct qxl_debugfs {
+	struct drm_info_list	*files;
+	unsigned		num_files;
+};
+
+int qxl_debugfs_add_files(struct qxl_device *rdev,
+			     struct drm_info_list *files,
+			     unsigned nfiles);
+int qxl_debugfs_fence_init(struct qxl_device *rdev);
+void qxl_debugfs_remove_files(struct qxl_device *qdev);
+
+struct qxl_device;
+
+struct qxl_device {
+	struct device			*dev;
+	struct drm_device		*ddev;
+	struct pci_dev			*pdev;
+	unsigned long flags;
+
+	resource_size_t vram_base, vram_size;
+	resource_size_t surfaceram_base, surfaceram_size;
+	resource_size_t rom_base, rom_size;
+	struct qxl_rom *rom;
+
+	struct qxl_mode *modes;
+	struct qxl_bo *monitors_config_bo;
+	struct qxl_monitors_config *monitors_config;
+
+	/* last received client_monitors_config */
+	struct qxl_monitors_config *client_monitors_config;
+
+	int io_base;
+	void *ram;
+	struct qxl_mman		mman;
+	struct qxl_gem		gem;
+	struct qxl_mode_info mode_info;
+
+	/*
+	 * last created framebuffer with fb_create
+	 * only used by debugfs dumbppm
+	 */
+	struct qxl_framebuffer *active_user_framebuffer;
+
+	struct fb_info			*fbdev_info;
+	struct qxl_framebuffer	*fbdev_qfb;
+	void *ram_physical;
+
+	struct qxl_ring *release_ring;
+	struct qxl_ring *command_ring;
+	struct qxl_ring *cursor_ring;
+
+	struct qxl_ram_header *ram_header;
+	bool mode_set;
+
+	bool primary_created;
+
+	struct qxl_memslot	*mem_slots;
+	uint8_t		n_mem_slots;
+
+	uint8_t		main_mem_slot;
+	uint8_t		surfaces_mem_slot;
+	uint8_t		slot_id_bits;
+	uint8_t		slot_gen_bits;
+	uint64_t	va_slot_mask;
+
+	struct idr	release_idr;
+	spinlock_t release_idr_lock;
+	struct mutex	async_io_mutex;
+	unsigned int last_sent_io_cmd;
+
+	/* interrupt handling */
+	atomic_t irq_received;
+	atomic_t irq_received_display;
+	atomic_t irq_received_cursor;
+	atomic_t irq_received_io_cmd;
+	unsigned irq_received_error;
+	wait_queue_head_t display_event;
+	wait_queue_head_t cursor_event;
+	wait_queue_head_t io_cmd_event;
+	struct work_struct client_monitors_config_work;
+
+	/* debugfs */
+	struct qxl_debugfs	debugfs[QXL_DEBUGFS_MAX_COMPONENTS];
+	unsigned		debugfs_count;
+
+	struct mutex		update_area_mutex;
+
+	struct idr	surf_id_idr;
+	spinlock_t surf_id_idr_lock;
+	int last_alloced_surf_id;
+
+	struct mutex surf_evict_mutex;
+	struct io_mapping *vram_mapping;
+	struct io_mapping *surface_mapping;
+
+	/* */
+	struct mutex release_mutex;
+	struct qxl_bo *current_release_bo[3];
+	int current_release_bo_offset[3];
+
+	struct workqueue_struct *gc_queue;
+	struct work_struct gc_work;
+
+};
+
+/* forward declaration for QXL_INFO_IO */
+void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
+
+extern struct drm_ioctl_desc qxl_ioctls[];
+extern int qxl_max_ioctl;
+
+int qxl_driver_load(struct drm_device *dev, unsigned long flags);
+int qxl_driver_unload(struct drm_device *dev);
+
+int qxl_modeset_init(struct qxl_device *qdev);
+void qxl_modeset_fini(struct qxl_device *qdev);
+
+int qxl_bo_init(struct qxl_device *qdev);
+void qxl_bo_fini(struct qxl_device *qdev);
+
+struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header,
+				 int element_size,
+				 int n_elements,
+				 int prod_notify,
+				 bool set_prod_notify,
+				 wait_queue_head_t *push_event);
+void qxl_ring_free(struct qxl_ring *ring);
+
+static inline void *
+qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical)
+{
+	QXL_INFO(qdev, "not implemented (%lu)\n", physical);
+	return 0;
+}
+
+static inline uint64_t
+qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
+			unsigned long offset)
+{
+	int slot_id = bo->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
+	struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
+
+	/* TODO - need to hold one of the locks to read tbo.offset */
+	return slot->high_bits | (bo->tbo.offset + offset);
+}
+
+/* qxl_fb.c */
+#define QXLFB_CONN_LIMIT 1
+
+int qxl_fbdev_init(struct qxl_device *qdev);
+void qxl_fbdev_fini(struct qxl_device *qdev);
+int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
+				  struct drm_file *file_priv,
+				  uint32_t *handle);
+
+/* qxl_display.c */
+int
+qxl_framebuffer_init(struct drm_device *dev,
+		     struct qxl_framebuffer *rfb,
+		     struct drm_mode_fb_cmd2 *mode_cmd,
+		     struct drm_gem_object *obj);
+void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
+void qxl_send_monitors_config(struct qxl_device *qdev);
+
+/* used by qxl_debugfs only */
+void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev);
+void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count);
+
+/* qxl_gem.c */
+int qxl_gem_init(struct qxl_device *qdev);
+void qxl_gem_fini(struct qxl_device *qdev);
+int qxl_gem_object_create(struct qxl_device *qdev, int size,
+			  int alignment, int initial_domain,
+			  bool discardable, bool kernel,
+			  struct qxl_surface *surf,
+			  struct drm_gem_object **obj);
+int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+			  uint64_t *gpu_addr);
+void qxl_gem_object_unpin(struct drm_gem_object *obj);
+int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+				      struct drm_file *file_priv,
+				      u32 domain,
+				      size_t size,
+				      struct qxl_surface *surf,
+				      struct qxl_bo **qobj,
+				      uint32_t *handle);
+int qxl_gem_object_init(struct drm_gem_object *obj);
+void qxl_gem_object_free(struct drm_gem_object *gobj);
+int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
+void qxl_gem_object_close(struct drm_gem_object *obj,
+			  struct drm_file *file_priv);
+void qxl_bo_force_delete(struct qxl_device *qdev);
+int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+
+/* qxl_dumb.c */
+int qxl_mode_dumb_create(struct drm_file *file_priv,
+			 struct drm_device *dev,
+			 struct drm_mode_create_dumb *args);
+int qxl_mode_dumb_destroy(struct drm_file *file_priv,
+			  struct drm_device *dev,
+			  uint32_t handle);
+int qxl_mode_dumb_mmap(struct drm_file *filp,
+		       struct drm_device *dev,
+		       uint32_t handle, uint64_t *offset_p);
+
+
+/* qxl ttm */
+int qxl_ttm_init(struct qxl_device *qdev);
+void qxl_ttm_fini(struct qxl_device *qdev);
+int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* qxl image */
+
+int qxl_image_create(struct qxl_device *qdev,
+		     struct qxl_release *release,
+		     struct qxl_bo **image_bo,
+		     const uint8_t *data,
+		     int x, int y, int width, int height,
+		     int depth, int stride);
+void qxl_update_screen(struct qxl_device *qxl);
+
+/* qxl io operations (qxl_cmd.c) */
+
+void qxl_io_create_primary(struct qxl_device *qdev,
+			   unsigned width, unsigned height, unsigned offset,
+			   struct qxl_bo *bo);
+void qxl_io_destroy_primary(struct qxl_device *qdev);
+void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
+void qxl_io_notify_oom(struct qxl_device *qdev);
+
+int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
+		       const struct qxl_rect *area);
+
+void qxl_io_reset(struct qxl_device *qdev);
+void qxl_io_monitors_config(struct qxl_device *qdev);
+int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible);
+void qxl_io_flush_release(struct qxl_device *qdev);
+void qxl_io_flush_surfaces(struct qxl_device *qdev);
+
+int qxl_release_reserve(struct qxl_device *qdev,
+			struct qxl_release *release, bool no_wait);
+void qxl_release_unreserve(struct qxl_device *qdev,
+			   struct qxl_release *release);
+union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
+					struct qxl_release *release);
+void qxl_release_unmap(struct qxl_device *qdev,
+		       struct qxl_release *release,
+		       union qxl_release_info *info);
+/*
+ * qxl_bo_add_resource.
+ *
+ */
+void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
+
+int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
+				       enum qxl_surface_cmd_type surface_cmd_type,
+				       struct qxl_release *create_rel,
+				       struct qxl_release **release);
+int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+			       int type, struct qxl_release **release,
+			       struct qxl_bo **rbo);
+int qxl_fence_releaseable(struct qxl_device *qdev,
+			  struct qxl_release *release);
+int
+qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+			      uint32_t type, bool interruptible);
+int
+qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+			     uint32_t type, bool interruptible);
+int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+			  struct qxl_bo **_bo);
+/* qxl drawing commands */
+
+void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
+			int stride /* filled in if 0 */);
+
+void qxl_draw_dirty_fb(struct qxl_device *qdev,
+		       struct qxl_framebuffer *qxl_fb,
+		       struct qxl_bo *bo,
+		       unsigned flags, unsigned color,
+		       struct drm_clip_rect *clips,
+		       unsigned num_clips, int inc);
+
+void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec);
+
+void qxl_draw_copyarea(struct qxl_device *qdev,
+		       u32 width, u32 height,
+		       u32 sx, u32 sy,
+		       u32 dx, u32 dy);
+
+uint64_t
+qxl_release_alloc(struct qxl_device *qdev, int type,
+		  struct qxl_release **ret);
+
+void qxl_release_free(struct qxl_device *qdev,
+		      struct qxl_release *release);
+void qxl_release_add_res(struct qxl_device *qdev,
+			 struct qxl_release *release,
+			 struct qxl_bo *bo);
+/* used by qxl_debugfs_release */
+struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
+						   uint64_t id);
+
+bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush);
+int qxl_garbage_collect(struct qxl_device *qdev);
+
+/* debugfs */
+
+int qxl_debugfs_init(struct drm_minor *minor);
+void qxl_debugfs_takedown(struct drm_minor *minor);
+
+/* qxl_irq.c */
+int qxl_irq_init(struct qxl_device *qdev);
+irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS);
+
+/* qxl_fb.c */
+int qxl_fb_init(struct qxl_device *qdev);
+
+int qxl_debugfs_add_files(struct qxl_device *qdev,
+			  struct drm_info_list *files,
+			  unsigned nfiles);
+
+int qxl_surface_id_alloc(struct qxl_device *qdev,
+			 struct qxl_bo *surf);
+void qxl_surface_id_dealloc(struct qxl_device *qdev,
+			    uint32_t surface_id);
+int qxl_hw_surface_alloc(struct qxl_device *qdev,
+			 struct qxl_bo *surf,
+			 struct ttm_mem_reg *mem);
+int qxl_hw_surface_dealloc(struct qxl_device *qdev,
+			   struct qxl_bo *surf);
+
+int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
+
+struct qxl_drv_surface *
+qxl_surface_lookup(struct drm_device *dev, int surface_id);
+void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
+int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
+
+/* qxl_fence.c */
+int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id);
+int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
+int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
+void qxl_fence_fini(struct qxl_fence *qfence);
+
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
new file mode 100644
index 0000000..847c4ee
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/* dumb ioctls implementation */
+
+int qxl_mode_dumb_create(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    struct drm_mode_create_dumb *args)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_bo *qobj;
+	uint32_t handle;
+	int r;
+	struct qxl_surface surf;
+	uint32_t pitch, format;
+	pitch = args->width * ((args->bpp + 1) / 8);
+	args->size = pitch * args->height;
+	args->size = ALIGN(args->size, PAGE_SIZE);
+
+	switch (args->bpp) {
+	case 16:
+		format = SPICE_SURFACE_FMT_16_565;
+		break;
+	case 32:
+		format = SPICE_SURFACE_FMT_32_xRGB;
+		break;
+	default:
+		return -EINVAL;
+	}
+	  
+	surf.width = args->width;
+	surf.height = args->height;
+	surf.stride = pitch;
+	surf.format = format;
+	r = qxl_gem_object_create_with_handle(qdev, file_priv,
+					      QXL_GEM_DOMAIN_VRAM,
+					      args->size, &surf, &qobj,
+					      &handle);
+	if (r)
+		return r;
+	args->pitch = pitch;
+	args->handle = handle;
+	return 0;
+}
+
+int qxl_mode_dumb_destroy(struct drm_file *file_priv,
+			     struct drm_device *dev,
+			     uint32_t handle)
+{
+	return drm_gem_handle_delete(file_priv, handle);
+}
+
+int qxl_mode_dumb_mmap(struct drm_file *file_priv,
+		       struct drm_device *dev,
+		       uint32_t handle, uint64_t *offset_p)
+{
+	struct drm_gem_object *gobj;
+	struct qxl_bo *qobj;
+
+	BUG_ON(!offset_p);
+	gobj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (gobj == NULL)
+		return -ENOENT;
+	qobj = gem_to_qxl_bo(gobj);
+	*offset_p = qxl_bo_mmap_offset(qobj);
+	drm_gem_object_unreference_unlocked(gobj);
+	return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
new file mode 100644
index 0000000..0c5067d
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright © 2013 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+#include <linux/module.h>
+#include <linux/fb.h>
+
+#include "drmP.h"
+#include "drm/drm.h"
+#include "drm/drm_crtc.h"
+#include "drm/drm_crtc_helper.h"
+#include "qxl_drv.h"
+
+#include "qxl_object.h"
+#include "drm_fb_helper.h"
+
+#define QXL_DIRTY_DELAY (HZ / 30)
+
+struct qxl_fbdev {
+	struct drm_fb_helper helper;
+	struct qxl_framebuffer	qfb;
+	struct list_head	fbdev_list;
+	struct qxl_device	*qdev;
+
+	void *shadow;
+	int size;
+
+	/* dirty memory logging */
+	struct {
+		spinlock_t lock;
+		bool active;
+		unsigned x1;
+		unsigned y1;
+		unsigned x2;
+		unsigned y2;
+	} dirty;
+};
+
+static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
+			      struct qxl_device *qdev, struct fb_info *info,
+			      const struct fb_image *image)
+{
+	qxl_fb_image->qdev = qdev;
+	if (info) {
+		qxl_fb_image->visual = info->fix.visual;
+		if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR ||
+		    qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR)
+			memcpy(&qxl_fb_image->pseudo_palette,
+			       info->pseudo_palette,
+			       sizeof(qxl_fb_image->pseudo_palette));
+	} else {
+		 /* fallback */
+		if (image->depth == 1)
+			qxl_fb_image->visual = FB_VISUAL_MONO10;
+		else
+			qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR;
+	}
+	if (image) {
+		memcpy(&qxl_fb_image->fb_image, image,
+		       sizeof(qxl_fb_image->fb_image));
+	}
+}
+
+static void qxl_fb_dirty_flush(struct fb_info *info)
+{
+	struct qxl_fbdev *qfbdev = info->par;
+	struct qxl_device *qdev = qfbdev->qdev;
+	struct qxl_fb_image qxl_fb_image;
+	struct fb_image *image = &qxl_fb_image.fb_image;
+	u32 x1, x2, y1, y2;
+
+	/* TODO: hard coding 32 bpp */
+	int stride = qfbdev->qfb.base.pitches[0] * 4;
+
+	x1 = qfbdev->dirty.x1;
+	x2 = qfbdev->dirty.x2;
+	y1 = qfbdev->dirty.y1;
+	y2 = qfbdev->dirty.y2;
+	/*
+	 * we are using a shadow draw buffer, at qdev->surface0_shadow
+	 */
+	qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
+	image->dx = x1;
+	image->dy = y1;
+	image->width = x2 - x1;
+	image->height = y2 - y1;
+	image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
+					 warnings */
+	image->bg_color = 0;
+	image->depth = 32;	     /* TODO: take from somewhere? */
+	image->cmap.start = 0;
+	image->cmap.len = 0;
+	image->cmap.red = NULL;
+	image->cmap.green = NULL;
+	image->cmap.blue = NULL;
+	image->cmap.transp = NULL;
+	image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
+
+	qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
+	qxl_draw_opaque_fb(&qxl_fb_image, stride);
+	qfbdev->dirty.x1 = 0;
+	qfbdev->dirty.x2 = 0;
+	qfbdev->dirty.y1 = 0;
+	qfbdev->dirty.y2 = 0;
+}
+
+static void qxl_deferred_io(struct fb_info *info,
+			    struct list_head *pagelist)
+{
+	struct qxl_fbdev *qfbdev = info->par;
+	unsigned long start, end, min, max;
+	struct page *page;
+	int y1, y2;
+
+	min = ULONG_MAX;
+	max = 0;
+	list_for_each_entry(page, pagelist, lru) {
+		start = page->index << PAGE_SHIFT;
+		end = start + PAGE_SIZE - 1;
+		min = min(min, start);
+		max = max(max, end);
+	}
+
+	if (min < max) {
+		y1 = min / info->fix.line_length;
+		y2 = (max / info->fix.line_length) + 1;
+
+		/* TODO: add spin lock? */
+		/* spin_lock_irqsave(&qfbdev->dirty.lock, flags); */
+		qfbdev->dirty.x1 = 0;
+		qfbdev->dirty.y1 = y1;
+		qfbdev->dirty.x2 = info->var.xres;
+		qfbdev->dirty.y2 = y2;
+		/* spin_unlock_irqrestore(&qfbdev->dirty.lock, flags); */
+	}
+
+	qxl_fb_dirty_flush(info);
+};
+
+
+struct fb_deferred_io qxl_defio = {
+	.delay		= QXL_DIRTY_DELAY,
+	.deferred_io	= qxl_deferred_io,
+};
+
+static void qxl_fb_fillrect(struct fb_info *info,
+			    const struct fb_fillrect *fb_rect)
+{
+	struct qxl_fbdev *qfbdev = info->par;
+	struct qxl_device *qdev = qfbdev->qdev;
+	struct qxl_rect rect;
+	uint32_t color;
+	int x = fb_rect->dx;
+	int y = fb_rect->dy;
+	int width = fb_rect->width;
+	int height = fb_rect->height;
+	uint16_t rop;
+	struct qxl_draw_fill qxl_draw_fill_rec;
+
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+		color = ((u32 *) (info->pseudo_palette))[fb_rect->color];
+	else
+		color = fb_rect->color;
+	rect.left = x;
+	rect.right = x + width;
+	rect.top = y;
+	rect.bottom = y + height;
+	switch (fb_rect->rop) {
+	case ROP_XOR:
+		rop = SPICE_ROPD_OP_XOR;
+		break;
+	case ROP_COPY:
+		rop = SPICE_ROPD_OP_PUT;
+		break;
+	default:
+		pr_err("qxl_fb_fillrect(): unknown rop, "
+		       "defaulting to SPICE_ROPD_OP_PUT\n");
+		rop = SPICE_ROPD_OP_PUT;
+	}
+	qxl_draw_fill_rec.qdev = qdev;
+	qxl_draw_fill_rec.rect = rect;
+	qxl_draw_fill_rec.color = color;
+	qxl_draw_fill_rec.rop = rop;
+	if (!drm_can_sleep()) {
+		qxl_io_log(qdev,
+			"%s: TODO use RCU, mysterious locks with spin_lock\n",
+			__func__);
+		return;
+	}
+	qxl_draw_fill(&qxl_draw_fill_rec);
+}
+
+static void qxl_fb_copyarea(struct fb_info *info,
+			    const struct fb_copyarea *region)
+{
+	struct qxl_fbdev *qfbdev = info->par;
+
+	qxl_draw_copyarea(qfbdev->qdev,
+			  region->width, region->height,
+			  region->sx, region->sy,
+			  region->dx, region->dy);
+}
+
+static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
+{
+	qxl_draw_opaque_fb(qxl_fb_image, 0);
+}
+
+static void qxl_fb_imageblit(struct fb_info *info,
+			     const struct fb_image *image)
+{
+	struct qxl_fbdev *qfbdev = info->par;
+	struct qxl_device *qdev = qfbdev->qdev;
+	struct qxl_fb_image qxl_fb_image;
+
+	if (!drm_can_sleep()) {
+		/* we cannot do any ttm_bo allocation since that will fail on
+		 * ioremap_wc..__get_vm_area_node, so queue the work item
+		 * instead This can happen from printk inside an interrupt
+		 * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
+		qxl_io_log(qdev,
+			"%s: TODO use RCU, mysterious locks with spin_lock\n",
+			   __func__);
+		return;
+	}
+
+	/* ensure proper order of rendering operations - TODO: must do this
+	 * for everything. */
+	qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
+	qxl_fb_imageblit_safe(&qxl_fb_image);
+}
+
+int qxl_fb_init(struct qxl_device *qdev)
+{
+	return 0;
+}
+
+static struct fb_ops qxlfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+	.fb_fillrect = qxl_fb_fillrect,
+	.fb_copyarea = qxl_fb_copyarea,
+	.fb_imageblit = qxl_fb_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+	struct qxl_bo *qbo = gem_to_qxl_bo(gobj);
+	int ret;
+
+	ret = qxl_bo_reserve(qbo, false);
+	if (likely(ret == 0)) {
+		qxl_bo_kunmap(qbo);
+		qxl_bo_unpin(qbo);
+		qxl_bo_unreserve(qbo);
+	}
+	drm_gem_object_unreference_unlocked(gobj);
+}
+
+int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
+				  struct drm_file *file_priv,
+				  uint32_t *handle)
+{
+	int r;
+	struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
+
+	BUG_ON(!gobj);
+	/* drm_get_handle_create adds a reference - good */
+	r = drm_gem_handle_create(file_priv, gobj, handle);
+	if (r)
+		return r;
+	return 0;
+}
+
+static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
+				      struct drm_mode_fb_cmd2 *mode_cmd,
+				      struct drm_gem_object **gobj_p)
+{
+	struct qxl_device *qdev = qfbdev->qdev;
+	struct drm_gem_object *gobj = NULL;
+	struct qxl_bo *qbo = NULL;
+	int ret;
+	int aligned_size, size;
+	int height = mode_cmd->height;
+	int bpp;
+	int depth;
+
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
+
+	size = mode_cmd->pitches[0] * height;
+	aligned_size = ALIGN(size, PAGE_SIZE);
+	/* TODO: unallocate and reallocate surface0 for real. Hack to just
+	 * have a large enough surface0 for 1024x768 Xorg 32bpp mode */
+	ret = qxl_gem_object_create(qdev, aligned_size, 0,
+				    QXL_GEM_DOMAIN_SURFACE,
+				    false, /* is discardable */
+				    false, /* is kernel (false means device) */
+				    NULL,
+				    &gobj);
+	if (ret) {
+		pr_err("failed to allocate framebuffer (%d)\n",
+		       aligned_size);
+		return -ENOMEM;
+	}
+	qbo = gem_to_qxl_bo(gobj);
+
+	qbo->surf.width = mode_cmd->width;
+	qbo->surf.height = mode_cmd->height;
+	qbo->surf.stride = mode_cmd->pitches[0];
+	qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
+	ret = qxl_bo_reserve(qbo, false);
+	if (unlikely(ret != 0))
+		goto out_unref;
+	ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
+	if (ret) {
+		qxl_bo_unreserve(qbo);
+		goto out_unref;
+	}
+	ret = qxl_bo_kmap(qbo, NULL);
+	qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */
+	if (ret)
+		goto out_unref;
+
+	*gobj_p = gobj;
+	return 0;
+out_unref:
+	qxlfb_destroy_pinned_object(gobj);
+	*gobj_p = NULL;
+	return ret;
+}
+
+static int qxlfb_create(struct qxl_fbdev *qfbdev,
+			struct drm_fb_helper_surface_size *sizes)
+{
+	struct qxl_device *qdev = qfbdev->qdev;
+	struct fb_info *info;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct drm_gem_object *gobj = NULL;
+	struct qxl_bo *qbo = NULL;
+	struct device *device = &qdev->pdev->dev;
+	int ret;
+	int size;
+	int bpp = sizes->surface_bpp;
+	int depth = sizes->surface_depth;
+	void *shadow;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+
+	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+	ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
+	qbo = gem_to_qxl_bo(gobj);
+	QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
+		 mode_cmd.height, mode_cmd.pitches[0]);
+
+	shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
+	/* TODO: what's the usual response to memory allocation errors? */
+	BUG_ON(!shadow);
+	QXL_INFO(qdev,
+	"surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
+		 qxl_bo_gpu_offset(qbo),
+		 qxl_bo_mmap_offset(qbo),
+		 qbo->kptr,
+		 shadow);
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+
+	info = framebuffer_alloc(0, device);
+	if (info == NULL) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+
+	info->par = qfbdev;
+
+	qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
+
+	fb = &qfbdev->qfb.base;
+
+	/* setup helper with fb data */
+	qfbdev->helper.fb = fb;
+	qfbdev->helper.fbdev = info;
+	qfbdev->shadow = shadow;
+	strcpy(info->fix.id, "qxldrmfb");
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+
+	info->flags = FBINFO_DEFAULT;
+	info->fbops = &qxlfb_ops;
+
+	/*
+	 * TODO: using gobj->size in various places in this function. Not sure
+	 * what the difference between the different sizes is.
+	 */
+	info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
+	info->fix.smem_len = gobj->size;
+	info->screen_base = qfbdev->shadow;
+	info->screen_size = gobj->size;
+
+	drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
+			       sizes->fb_height);
+
+	/* setup aperture base/size for vesafb takeover */
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+	info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
+	info->apertures->ranges[0].size = qdev->vram_size;
+
+	info->fix.mmio_start = 0;
+	info->fix.mmio_len = 0;
+
+	if (info->screen_base == NULL) {
+		ret = -ENOSPC;
+		goto out_unref;
+	}
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+
+	info->fbdefio = &qxl_defio;
+	fb_deferred_io_init(info);
+
+	qdev->fbdev_info = info;
+	qdev->fbdev_qfb = &qfbdev->qfb;
+	DRM_INFO("fb mappable at 0x%lX, size %lu\n",  info->fix.smem_start, (unsigned long)info->screen_size);
+	DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
+	return 0;
+
+out_unref:
+	if (qbo) {
+		ret = qxl_bo_reserve(qbo, false);
+		if (likely(ret == 0)) {
+			qxl_bo_kunmap(qbo);
+			qxl_bo_unpin(qbo);
+			qxl_bo_unreserve(qbo);
+		}
+	}
+	if (fb && ret) {
+		drm_gem_object_unreference(gobj);
+		drm_framebuffer_cleanup(fb);
+		kfree(fb);
+	}
+	drm_gem_object_unreference(gobj);
+	return ret;
+}
+
+static int qxl_fb_find_or_create_single(
+		struct drm_fb_helper *helper,
+		struct drm_fb_helper_surface_size *sizes)
+{
+	struct qxl_fbdev *qfbdev = (struct qxl_fbdev *)helper;
+	int new_fb = 0;
+	int ret;
+
+	if (!helper->fb) {
+		ret = qxlfb_create(qfbdev, sizes);
+		if (ret)
+			return ret;
+		new_fb = 1;
+	}
+	return new_fb;
+}
+
+static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
+{
+	struct fb_info *info;
+	struct qxl_framebuffer *qfb = &qfbdev->qfb;
+
+	if (qfbdev->helper.fbdev) {
+		info = qfbdev->helper.fbdev;
+
+		unregister_framebuffer(info);
+		framebuffer_release(info);
+	}
+	if (qfb->obj) {
+		qxlfb_destroy_pinned_object(qfb->obj);
+		qfb->obj = NULL;
+	}
+	drm_fb_helper_fini(&qfbdev->helper);
+	vfree(qfbdev->shadow);
+	drm_framebuffer_cleanup(&qfb->base);
+
+	return 0;
+}
+
+static struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
+	/* TODO
+	.gamma_set = qxl_crtc_fb_gamma_set,
+	.gamma_get = qxl_crtc_fb_gamma_get,
+	*/
+	.fb_probe = qxl_fb_find_or_create_single,
+};
+
+int qxl_fbdev_init(struct qxl_device *qdev)
+{
+	struct qxl_fbdev *qfbdev;
+	int bpp_sel = 32; /* TODO: parameter from somewhere? */
+	int ret;
+
+	qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
+	if (!qfbdev)
+		return -ENOMEM;
+
+	qfbdev->qdev = qdev;
+	qdev->mode_info.qfbdev = qfbdev;
+	qfbdev->helper.funcs = &qxl_fb_helper_funcs;
+
+	ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
+				 1 /* num_crtc - QXL supports just 1 */,
+				 QXLFB_CONN_LIMIT);
+	if (ret) {
+		kfree(qfbdev);
+		return ret;
+	}
+
+	drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
+	drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
+	return 0;
+}
+
+void qxl_fbdev_fini(struct qxl_device *qdev)
+{
+	if (!qdev->mode_info.qfbdev)
+		return;
+
+	qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
+	kfree(qdev->mode_info.qfbdev);
+	qdev->mode_info.qfbdev = NULL;
+}
+
+
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
new file mode 100644
index 0000000..63c6715
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_fence.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+
+#include "qxl_drv.h"
+
+/* QXL fencing-
+
+   When we submit operations to the GPU we pass a release reference to the GPU
+   with them, the release reference is then added to the release ring when
+   the GPU is finished with that particular operation and has removed it from
+   its tree.
+
+   So we have can have multiple outstanding non linear fences per object.
+
+   From a TTM POV we only care if the object has any outstanding releases on
+   it.
+
+   we wait until all outstanding releases are processeed.
+
+   sync object is just a list of release ids that represent that fence on
+   that buffer.
+
+   we just add new releases onto the sync object attached to the object.
+
+   This currently uses a radix tree to store the list of release ids.
+
+   For some reason every so often qxl hw fails to release, things go wrong.
+*/
+
+
+int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
+{
+	struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+	spin_lock(&bo->tbo.bdev->fence_lock);
+	radix_tree_insert(&qfence->tree, rel_id, qfence);
+	qfence->num_active_releases++;
+	spin_unlock(&bo->tbo.bdev->fence_lock);
+	return 0;
+}
+
+int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
+{
+	void *ret;
+	int retval = 0;
+	struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+	spin_lock(&bo->tbo.bdev->fence_lock);
+
+	ret = radix_tree_delete(&qfence->tree, rel_id);
+	if (ret == qfence)
+		qfence->num_active_releases--;
+	else {
+		DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id);
+		retval = -ENOENT;
+	}
+	spin_unlock(&bo->tbo.bdev->fence_lock);
+	return retval;
+}
+
+
+int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence)
+{
+	qfence->qdev = qdev;
+	qfence->num_active_releases = 0;
+	INIT_RADIX_TREE(&qfence->tree, GFP_ATOMIC);
+	return 0;
+}
+
+void qxl_fence_fini(struct qxl_fence *qfence)
+{
+	kfree(qfence->release_ids);
+	qfence->num_active_releases = 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
new file mode 100644
index 0000000..adc1ee2
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "drmP.h"
+#include "drm/drm.h"
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+int qxl_gem_object_init(struct drm_gem_object *obj)
+{
+	/* we do nothings here */
+	return 0;
+}
+
+void qxl_gem_object_free(struct drm_gem_object *gobj)
+{
+	struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
+
+	if (qobj)
+		qxl_bo_unref(&qobj);
+}
+
+int qxl_gem_object_create(struct qxl_device *qdev, int size,
+			  int alignment, int initial_domain,
+			  bool discardable, bool kernel,
+			  struct qxl_surface *surf,
+			  struct drm_gem_object **obj)
+{
+	struct qxl_bo *qbo;
+	int r;
+
+	*obj = NULL;
+	/* At least align on page size */
+	if (alignment < PAGE_SIZE)
+		alignment = PAGE_SIZE;
+	r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo);
+	if (r) {
+		if (r != -ERESTARTSYS)
+			DRM_ERROR(
+			"Failed to allocate GEM object (%d, %d, %u, %d)\n",
+				  size, initial_domain, alignment, r);
+		return r;
+	}
+	*obj = &qbo->gem_base;
+
+	mutex_lock(&qdev->gem.mutex);
+	list_add_tail(&qbo->list, &qdev->gem.objects);
+	mutex_unlock(&qdev->gem.mutex);
+
+	return 0;
+}
+
+int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+				      struct drm_file *file_priv,
+				      u32 domain,
+				      size_t size,
+				      struct qxl_surface *surf,
+				      struct qxl_bo **qobj,
+				      uint32_t *handle)
+{
+	struct drm_gem_object *gobj;
+	int r;
+
+	BUG_ON(!qobj);
+	BUG_ON(!handle);
+
+	r = qxl_gem_object_create(qdev, size, 0,
+				  domain,
+				  false, false, surf,
+				  &gobj);
+	if (r)
+		return -ENOMEM;
+	r = drm_gem_handle_create(file_priv, gobj, handle);
+	if (r)
+		return r;
+	/* drop reference from allocate - handle holds it now */
+	*qobj = gem_to_qxl_bo(gobj);
+	drm_gem_object_unreference_unlocked(gobj);
+	return 0;
+}
+
+int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+			  uint64_t *gpu_addr)
+{
+	struct qxl_bo *qobj = obj->driver_private;
+	int r;
+
+	r = qxl_bo_reserve(qobj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = qxl_bo_pin(qobj, pin_domain, gpu_addr);
+	qxl_bo_unreserve(qobj);
+	return r;
+}
+
+void qxl_gem_object_unpin(struct drm_gem_object *obj)
+{
+	struct qxl_bo *qobj = obj->driver_private;
+	int r;
+
+	r = qxl_bo_reserve(qobj, false);
+	if (likely(r == 0)) {
+		qxl_bo_unpin(qobj);
+		qxl_bo_unreserve(qobj);
+	}
+}
+
+int qxl_gem_set_domain(struct drm_gem_object *gobj,
+			  uint32_t rdomain, uint32_t wdomain)
+{
+	struct qxl_bo *qobj;
+	uint32_t domain;
+	int r;
+
+	/* FIXME: reeimplement */
+	qobj = gobj->driver_private;
+	/* work out where to validate the buffer to */
+	domain = wdomain;
+	if (!domain)
+		domain = rdomain;
+	if (!domain) {
+		/* Do nothings */
+		pr_warn("Set domain withou domain !\n");
+		return 0;
+	}
+	if (domain == QXL_GEM_DOMAIN_CPU) {
+		/* Asking for cpu access wait for object idle */
+		r = qxl_bo_wait(qobj, NULL, false);
+		if (r) {
+			pr_err("Failed to wait for object !\n");
+			return r;
+		}
+	}
+	return 0;
+}
+
+int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+	return 0;
+}
+
+void qxl_gem_object_close(struct drm_gem_object *obj,
+			  struct drm_file *file_priv)
+{
+}
+
+int qxl_gem_init(struct qxl_device *qdev)
+{
+	INIT_LIST_HEAD(&qdev->gem.objects);
+	return 0;
+}
+
+void qxl_gem_fini(struct qxl_device *qdev)
+{
+	qxl_bo_force_delete(qdev);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
new file mode 100644
index 0000000..7fc7204
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static int
+qxl_image_create_helper(struct qxl_device *qdev,
+			struct qxl_release *release,
+			struct qxl_bo **image_bo,
+			const uint8_t *data,
+			int width, int height,
+			int depth, unsigned int hash,
+			int stride)
+{
+	struct qxl_image *image;
+	struct qxl_data_chunk *chunk;
+	int i;
+	int chunk_stride;
+	int linesize = width * depth / 8;
+	struct qxl_bo *chunk_bo;
+	int ret;
+	/* Chunk */
+	/* FIXME: Check integer overflow */
+	/* TODO: variable number of chunks */
+	chunk_stride = stride; /* TODO: should use linesize, but it renders
+				  wrong (check the bitmaps are sent correctly
+				  first) */
+	ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride,
+				    &chunk_bo);
+	ret = qxl_bo_kmap(chunk_bo, (void **)&chunk);
+	chunk->data_size = height * chunk_stride;
+	chunk->prev_chunk = 0;
+	chunk->next_chunk = 0;
+
+	if (stride == linesize && chunk_stride == stride)
+		memcpy(chunk->data, data, linesize * height);
+	else
+		for (i = 0 ; i < height ; ++i)
+			memcpy(chunk->data + i*chunk_stride, data + i*stride,
+			       linesize);
+
+	qxl_bo_kunmap(chunk_bo);
+
+	/* Image */
+	ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo);
+	qxl_bo_kmap(*image_bo, (void **)&image);
+
+	image->descriptor.id = 0;
+	image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP;
+
+	image->descriptor.flags = 0;
+	image->descriptor.width = width;
+	image->descriptor.height = height;
+
+	switch (depth) {
+	case 1:
+		/* TODO: BE? check by arch? */
+		image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE;
+		break;
+	case 24:
+		image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT;
+		break;
+	case 32:
+		image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT;
+		break;
+	default:
+		DRM_ERROR("unsupported image bit depth\n");
+		return -EINVAL; /* TODO: cleanup */
+	}
+	image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
+	image->u.bitmap.x = width;
+	image->u.bitmap.y = height;
+	image->u.bitmap.stride = chunk_stride;
+	image->u.bitmap.palette = 0;
+	image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
+	qxl_release_add_res(qdev, release, chunk_bo);
+	qxl_bo_unreserve(chunk_bo);
+	qxl_bo_unref(&chunk_bo);
+
+	qxl_bo_kunmap(*image_bo);
+	return 0;
+}
+
+int qxl_image_create(struct qxl_device *qdev,
+		     struct qxl_release *release,
+		     struct qxl_bo **image_bo,
+		     const uint8_t *data,
+		     int x, int y, int width, int height,
+		     int depth, int stride)
+{
+	data += y * stride + x * (depth / 8);
+	return qxl_image_create_helper(qdev, release, image_bo, data,
+				       width, height, depth, 0, stride);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
new file mode 100644
index 0000000..83ca4f7
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/*
+ * TODO: allocating a new gem(in qxl_bo) for each request.
+ * This is wasteful since bo's are page aligned.
+ */
+int qxl_alloc_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_alloc *qxl_alloc = data;
+	int ret;
+	struct qxl_bo *qobj;
+	uint32_t handle;
+	u32 domain = QXL_GEM_DOMAIN_VRAM;
+
+	if (qxl_alloc->size == 0) {
+		DRM_ERROR("invalid size %d\n", qxl_alloc->size);
+		return -EINVAL;
+	}
+	ret = qxl_gem_object_create_with_handle(qdev, file_priv,
+						domain,
+						qxl_alloc->size,
+						NULL,
+						&qobj, &handle);
+	if (ret) {
+		DRM_ERROR("%s: failed to create gem ret=%d\n",
+			  __func__, ret);
+		return -ENOMEM;
+	}
+	qxl_alloc->handle = handle;
+	return 0;
+}
+
+int qxl_map_ioctl(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_map *qxl_map = data;
+
+	return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
+				  &qxl_map->offset);
+}
+
+/*
+ * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
+ * are on vram).
+ * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
+ */
+static void
+apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
+	    struct qxl_bo *src, uint64_t src_off)
+{
+	void *reloc_page;
+
+	reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
+	*(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
+								     src, src_off);
+	qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+}
+
+static void
+apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
+		 struct qxl_bo *src)
+{
+	uint32_t id = 0;
+	void *reloc_page;
+
+	if (src && !src->is_primary)
+		id = src->surface_id;
+
+	reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
+	*(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
+	qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+}
+
+/* return holding the reference to this object */
+struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
+				  struct drm_file *file_priv, uint64_t handle,
+				  struct qxl_reloc_list *reloc_list)
+{
+	struct drm_gem_object *gobj;
+	struct qxl_bo *qobj;
+	int ret;
+
+	gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
+	if (!gobj) {
+		DRM_ERROR("bad bo handle %lld\n", handle);
+		return NULL;
+	}
+	qobj = gem_to_qxl_bo(gobj);
+
+	ret = qxl_bo_list_add(reloc_list, qobj);
+	if (ret)
+		return NULL;
+
+	return qobj;
+}
+
+/*
+ * Usage of execbuffer:
+ * Relocations need to take into account the full QXLDrawable size.
+ * However, the command as passed from user space must *not* contain the initial
+ * QXLReleaseInfo struct (first XXX bytes)
+ */
+int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_execbuffer *execbuffer = data;
+	struct drm_qxl_command user_cmd;
+	int cmd_num;
+	struct qxl_bo *reloc_src_bo;
+	struct qxl_bo *reloc_dst_bo;
+	struct drm_qxl_reloc reloc;
+	void *fb_cmd;
+	int i, ret;
+	struct qxl_reloc_list reloc_list;
+	int unwritten;
+	uint32_t reloc_dst_offset;
+	INIT_LIST_HEAD(&reloc_list.bos);
+
+	for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
+		struct qxl_release *release;
+		struct qxl_bo *cmd_bo;
+		int release_type;
+		struct drm_qxl_command *commands =
+			(struct drm_qxl_command *)execbuffer->commands;
+
+		if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+				       sizeof(user_cmd)))
+			return -EFAULT;
+		switch (user_cmd.type) {
+		case QXL_CMD_DRAW:
+			release_type = QXL_RELEASE_DRAWABLE;
+			break;
+		case QXL_CMD_SURFACE:
+		case QXL_CMD_CURSOR:
+		default:
+			DRM_DEBUG("Only draw commands in execbuffers\n");
+			return -EINVAL;
+			break;
+		}
+
+		if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
+			return -EINVAL;
+
+		ret = qxl_alloc_release_reserved(qdev,
+						 sizeof(union qxl_release_info) +
+						 user_cmd.command_size,
+						 release_type,
+						 &release,
+						 &cmd_bo);
+		if (ret)
+			return ret;
+
+		/* TODO copy slow path code from i915 */
+		fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
+		unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
+		qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
+		if (unwritten) {
+			DRM_ERROR("got unwritten %d\n", unwritten);
+			qxl_release_unreserve(qdev, release);
+			qxl_release_free(qdev, release);
+			return -EFAULT;
+		}
+
+		for (i = 0 ; i < user_cmd.relocs_num; ++i) {
+			if (DRM_COPY_FROM_USER(&reloc,
+					       &((struct drm_qxl_reloc *)user_cmd.relocs)[i],
+					       sizeof(reloc))) {
+				qxl_bo_list_unreserve(&reloc_list, true);
+				qxl_release_unreserve(qdev, release);
+				qxl_release_free(qdev, release);
+				return -EFAULT;
+			}
+
+			/* add the bos to the list of bos to validate -
+			   need to validate first then process relocs? */
+			if (reloc.dst_handle) {
+				reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
+								  reloc.dst_handle, &reloc_list);
+				if (!reloc_dst_bo) {
+					qxl_bo_list_unreserve(&reloc_list, true);
+					qxl_release_unreserve(qdev, release);
+					qxl_release_free(qdev, release);
+					return -EINVAL;
+				}
+				reloc_dst_offset = 0;
+			} else {
+				reloc_dst_bo = cmd_bo;
+				reloc_dst_offset = release->release_offset;
+			}
+
+			/* reserve and validate the reloc dst bo */
+			if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
+				reloc_src_bo =
+					qxlhw_handle_to_bo(qdev, file_priv,
+							   reloc.src_handle, &reloc_list);
+				if (!reloc_src_bo) {
+					if (reloc_dst_bo != cmd_bo)
+						drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
+					qxl_bo_list_unreserve(&reloc_list, true);
+					qxl_release_unreserve(qdev, release);
+					qxl_release_free(qdev, release);
+					return -EINVAL;
+				}
+			} else
+				reloc_src_bo = NULL;
+			if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
+				apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
+					    reloc_src_bo, reloc.src_offset);
+			} else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
+				apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
+			} else {
+				DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
+				return -EINVAL;
+			}
+
+			if (reloc_src_bo && reloc_src_bo != cmd_bo) {
+				qxl_release_add_res(qdev, release, reloc_src_bo);
+				drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
+			}
+
+			if (reloc_dst_bo != cmd_bo)
+				drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
+		}
+		qxl_fence_releaseable(qdev, release);
+
+		ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
+		if (ret == -ERESTARTSYS) {
+			qxl_release_unreserve(qdev, release);
+			qxl_release_free(qdev, release);
+			qxl_bo_list_unreserve(&reloc_list, true);
+			return ret;
+		}
+		qxl_release_unreserve(qdev, release);
+	}
+	qxl_bo_list_unreserve(&reloc_list, 0);
+	return 0;
+}
+
+int qxl_update_area_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_update_area *update_area = data;
+	struct qxl_rect area = {.left = update_area->left,
+				.top = update_area->top,
+				.right = update_area->right,
+				.bottom = update_area->bottom};
+	int ret;
+	struct drm_gem_object *gobj = NULL;
+	struct qxl_bo *qobj = NULL;
+
+	if (update_area->left >= update_area->right ||
+	    update_area->top >= update_area->bottom)
+		return -EINVAL;
+
+	gobj = drm_gem_object_lookup(dev, file, update_area->handle);
+	if (gobj == NULL)
+		return -ENOENT;
+
+	qobj = gem_to_qxl_bo(gobj);
+
+	ret = qxl_bo_reserve(qobj, false);
+	if (ret)
+		goto out;
+
+	if (!qobj->pin_count) {
+		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
+				      true, false);
+		if (unlikely(ret))
+			goto out;
+	}
+
+	ret = qxl_bo_check_id(qdev, qobj);
+	if (ret)
+		goto out2;
+	if (!qobj->surface_id)
+		DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
+	ret = qxl_io_update_area(qdev, qobj, &area);
+
+out2:
+	qxl_bo_unreserve(qobj);
+
+out:
+	drm_gem_object_unreference_unlocked(gobj);
+	return ret;
+}
+
+static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_getparam *param = data;
+
+	switch (param->param) {
+	case QXL_PARAM_NUM_SURFACES:
+		param->value = qdev->rom->n_surfaces;
+		break;
+	case QXL_PARAM_MAX_RELOCS:
+		param->value = QXL_MAX_RES;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_clientcap *param = data;
+	int byte, idx;
+
+	byte = param->index / 8;
+	idx = param->index % 8;
+
+	if (qdev->pdev->revision < 4)
+		return -ENOSYS;
+
+	if (byte > 58)
+		return -ENOSYS;
+
+	if (qdev->rom->client_capabilities[byte] & (1 << idx))
+		return 0;
+	return -ENOSYS;
+}
+
+static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_alloc_surf *param = data;
+	struct qxl_bo *qobj;
+	int handle;
+	int ret;
+	int size, actual_stride;
+	struct qxl_surface surf;
+
+	/* work out size allocate bo with handle */
+	actual_stride = param->stride < 0 ? -param->stride : param->stride;
+	size = actual_stride * param->height + actual_stride;
+
+	surf.format = param->format;
+	surf.width = param->width;
+	surf.height = param->height;
+	surf.stride = param->stride;
+	surf.data = 0;
+
+	ret = qxl_gem_object_create_with_handle(qdev, file,
+						QXL_GEM_DOMAIN_SURFACE,
+						size,
+						&surf,
+						&qobj, &handle);
+	if (ret) {
+		DRM_ERROR("%s: failed to create gem ret=%d\n",
+			  __func__, ret);
+		return -ENOMEM;
+	} else
+		param->handle = handle;
+	return ret;
+}
+
+struct drm_ioctl_desc qxl_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+	DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+	DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
+							DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
+							DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
+							DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
+							DRM_AUTH|DRM_UNLOCKED),
+
+	DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
+			  DRM_AUTH|DRM_UNLOCKED),
+};
+
+int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
new file mode 100644
index 0000000..21393dc
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+
+irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
+	uint32_t pending;
+
+	pending = xchg(&qdev->ram_header->int_pending, 0);
+
+	atomic_inc(&qdev->irq_received);
+
+	if (pending & QXL_INTERRUPT_DISPLAY) {
+		atomic_inc(&qdev->irq_received_display);
+		wake_up_all(&qdev->display_event);
+		qxl_queue_garbage_collect(qdev, false);
+	}
+	if (pending & QXL_INTERRUPT_CURSOR) {
+		atomic_inc(&qdev->irq_received_cursor);
+		wake_up_all(&qdev->cursor_event);
+	}
+	if (pending & QXL_INTERRUPT_IO_CMD) {
+		atomic_inc(&qdev->irq_received_io_cmd);
+		wake_up_all(&qdev->io_cmd_event);
+	}
+	if (pending & QXL_INTERRUPT_ERROR) {
+		/* TODO: log it, reset device (only way to exit this condition)
+		 * (do it a certain number of times, afterwards admit defeat,
+		 * to avoid endless loops).
+		 */
+		qdev->irq_received_error++;
+		qxl_io_log(qdev, "%s: driver is in bug mode.\n", __func__);
+	}
+	if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) {
+		qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n");
+		schedule_work(&qdev->client_monitors_config_work);
+	}
+	qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+	outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ);
+	return IRQ_HANDLED;
+}
+
+static void qxl_client_monitors_config_work_func(struct work_struct *work)
+{
+	struct qxl_device *qdev = container_of(work, struct qxl_device,
+					       client_monitors_config_work);
+
+	qxl_display_read_client_monitors_config(qdev);
+}
+
+int qxl_irq_init(struct qxl_device *qdev)
+{
+	int ret;
+
+	init_waitqueue_head(&qdev->display_event);
+	init_waitqueue_head(&qdev->cursor_event);
+	init_waitqueue_head(&qdev->io_cmd_event);
+	INIT_WORK(&qdev->client_monitors_config_work,
+		  qxl_client_monitors_config_work_func);
+	atomic_set(&qdev->irq_received, 0);
+	atomic_set(&qdev->irq_received_display, 0);
+	atomic_set(&qdev->irq_received_cursor, 0);
+	atomic_set(&qdev->irq_received_io_cmd, 0);
+	qdev->irq_received_error = 0;
+	ret = drm_irq_install(qdev->ddev);
+	qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed installing irq: %d\n", ret);
+		return 1;
+	}
+	return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
new file mode 100644
index 0000000..036e0de
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/io-mapping.h>
+
+int qxl_log_level;
+
+static void qxl_dump_mode(struct qxl_device *qdev, void *p)
+{
+	struct qxl_mode *m = p;
+	DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n",
+		      m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili,
+		      m->y_mili, m->orientation);
+}
+
+static bool qxl_check_device(struct qxl_device *qdev)
+{
+	struct qxl_rom *rom = qdev->rom;
+	int mode_offset;
+	int i;
+
+	if (rom->magic != 0x4f525851) {
+		DRM_ERROR("bad rom signature %x\n", rom->magic);
+		return false;
+	}
+
+	DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
+	DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
+		 rom->log_level);
+	DRM_INFO("Currently using mode #%d, list at 0x%x\n",
+		 rom->mode, rom->modes_offset);
+	DRM_INFO("%d io pages at offset 0x%x\n",
+		 rom->num_io_pages, rom->pages_offset);
+	DRM_INFO("%d byte draw area at offset 0x%x\n",
+		 rom->surface0_area_size, rom->draw_area_offset);
+
+	qdev->vram_size = rom->surface0_area_size;
+	DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
+
+	mode_offset = rom->modes_offset / 4;
+	qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset];
+	DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset,
+		 qdev->mode_info.num_modes);
+	qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1);
+	for (i = 0; i < qdev->mode_info.num_modes; i++)
+		qxl_dump_mode(qdev, qdev->mode_info.modes + i);
+	return true;
+}
+
+static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
+	unsigned long start_phys_addr, unsigned long end_phys_addr)
+{
+	uint64_t high_bits;
+	struct qxl_memslot *slot;
+	uint8_t slot_index;
+	struct qxl_ram_header *ram_header = qdev->ram_header;
+
+	slot_index = qdev->rom->slots_start + slot_index_offset;
+	slot = &qdev->mem_slots[slot_index];
+	slot->start_phys_addr = start_phys_addr;
+	slot->end_phys_addr = end_phys_addr;
+	ram_header->mem_slot.mem_start = slot->start_phys_addr;
+	ram_header->mem_slot.mem_end = slot->end_phys_addr;
+	qxl_io_memslot_add(qdev, slot_index);
+	slot->generation = qdev->rom->slot_generation;
+	high_bits = slot_index << qdev->slot_gen_bits;
+	high_bits |= slot->generation;
+	high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
+	slot->high_bits = high_bits;
+	return slot_index;
+}
+
+static void qxl_gc_work(struct work_struct *work)
+{
+	struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
+	qxl_garbage_collect(qdev);
+}
+
+int qxl_device_init(struct qxl_device *qdev,
+		    struct drm_device *ddev,
+		    struct pci_dev *pdev,
+		    unsigned long flags)
+{
+	int r;
+
+	qdev->dev = &pdev->dev;
+	qdev->ddev = ddev;
+	qdev->pdev = pdev;
+	qdev->flags = flags;
+
+	mutex_init(&qdev->gem.mutex);
+	mutex_init(&qdev->update_area_mutex);
+	mutex_init(&qdev->release_mutex);
+	mutex_init(&qdev->surf_evict_mutex);
+	INIT_LIST_HEAD(&qdev->gem.objects);
+
+	qdev->rom_base = pci_resource_start(pdev, 2);
+	qdev->rom_size = pci_resource_len(pdev, 2);
+	qdev->vram_base = pci_resource_start(pdev, 0);
+	qdev->surfaceram_base = pci_resource_start(pdev, 1);
+	qdev->surfaceram_size = pci_resource_len(pdev, 1);
+	qdev->io_base = pci_resource_start(pdev, 3);
+
+	qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
+	qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
+	DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n",
+		 (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0),
+		 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
+		 (int)pci_resource_len(pdev, 0) / 1024,
+		 (void *)qdev->surfaceram_base,
+		 (void *)pci_resource_end(pdev, 1),
+		 (int)qdev->surfaceram_size / 1024 / 1024,
+		 (int)qdev->surfaceram_size / 1024);
+
+	qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
+	if (!qdev->rom) {
+		pr_err("Unable to ioremap ROM\n");
+		return -ENOMEM;
+	}
+
+	qxl_check_device(qdev);
+
+	r = qxl_bo_init(qdev);
+	if (r) {
+		DRM_ERROR("bo init failed %d\n", r);
+		return r;
+	}
+
+	qdev->ram_header = ioremap(qdev->vram_base +
+				   qdev->rom->ram_header_offset,
+				   sizeof(*qdev->ram_header));
+
+	qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
+					     sizeof(struct qxl_command),
+					     QXL_COMMAND_RING_SIZE,
+					     qdev->io_base + QXL_IO_NOTIFY_CMD,
+					     false,
+					     &qdev->display_event);
+
+	qdev->cursor_ring = qxl_ring_create(
+				&(qdev->ram_header->cursor_ring_hdr),
+				sizeof(struct qxl_command),
+				QXL_CURSOR_RING_SIZE,
+				qdev->io_base + QXL_IO_NOTIFY_CMD,
+				false,
+				&qdev->cursor_event);
+
+	qdev->release_ring = qxl_ring_create(
+				&(qdev->ram_header->release_ring_hdr),
+				sizeof(uint64_t),
+				QXL_RELEASE_RING_SIZE, 0, true,
+				NULL);
+
+	/* TODO - slot initialization should happen on reset. where is our
+	 * reset handler? */
+	qdev->n_mem_slots = qdev->rom->slots_end;
+	qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
+	qdev->slot_id_bits = qdev->rom->slot_id_bits;
+	qdev->va_slot_mask =
+		(~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
+
+	qdev->mem_slots =
+		kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
+			GFP_KERNEL);
+
+	idr_init(&qdev->release_idr);
+	spin_lock_init(&qdev->release_idr_lock);
+
+	idr_init(&qdev->surf_id_idr);
+	spin_lock_init(&qdev->surf_id_idr_lock);
+
+	mutex_init(&qdev->async_io_mutex);
+
+	/* reset the device into a known state - no memslots, no primary
+	 * created, no surfaces. */
+	qxl_io_reset(qdev);
+
+	/* must initialize irq before first async io - slot creation */
+	r = qxl_irq_init(qdev);
+	if (r)
+		return r;
+
+	/*
+	 * Note that virtual is surface0. We rely on the single ioremap done
+	 * before.
+	 */
+	qdev->main_mem_slot = setup_slot(qdev, 0,
+		(unsigned long)qdev->vram_base,
+		(unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
+	qdev->surfaces_mem_slot = setup_slot(qdev, 1,
+		(unsigned long)qdev->surfaceram_base,
+		(unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
+	DRM_INFO("main mem slot %d [%lx,%x)\n",
+		qdev->main_mem_slot,
+		(unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
+
+
+	qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
+	INIT_WORK(&qdev->gc_work, qxl_gc_work);
+
+	r = qxl_fb_init(qdev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+void qxl_device_fini(struct qxl_device *qdev)
+{
+	if (qdev->current_release_bo[0])
+		qxl_bo_unref(&qdev->current_release_bo[0]);
+	if (qdev->current_release_bo[1])
+		qxl_bo_unref(&qdev->current_release_bo[1]);
+	flush_workqueue(qdev->gc_queue);
+	destroy_workqueue(qdev->gc_queue);
+	qdev->gc_queue = NULL;
+
+	qxl_ring_free(qdev->command_ring);
+	qxl_ring_free(qdev->cursor_ring);
+	qxl_ring_free(qdev->release_ring);
+	qxl_bo_fini(qdev);
+	io_mapping_free(qdev->surface_mapping);
+	io_mapping_free(qdev->vram_mapping);
+	iounmap(qdev->ram_header);
+	iounmap(qdev->rom);
+	qdev->rom = NULL;
+	qdev->mode_info.modes = NULL;
+	qdev->mode_info.num_modes = 0;
+	qxl_debugfs_remove_files(qdev);
+}
+
+int qxl_driver_unload(struct drm_device *dev)
+{
+	struct qxl_device *qdev = dev->dev_private;
+
+	if (qdev == NULL)
+		return 0;
+	qxl_modeset_fini(qdev);
+	qxl_device_fini(qdev);
+
+	kfree(qdev);
+	dev->dev_private = NULL;
+	return 0;
+}
+
+int qxl_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct qxl_device *qdev;
+	int r;
+
+	/* require kms */
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
+	if (qdev == NULL)
+		return -ENOMEM;
+
+	dev->dev_private = qdev;
+
+	r = qxl_device_init(qdev, dev, dev->pdev, flags);
+	if (r)
+		goto out;
+
+	r = qxl_modeset_init(qdev);
+	if (r) {
+		qxl_driver_unload(dev);
+		goto out;
+	}
+
+	return 0;
+out:
+	kfree(qdev);
+	return r;
+}
+
+
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
new file mode 100644
index 0000000..51efb94
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/io-mapping.h>
+static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+{
+	struct qxl_bo *bo;
+	struct qxl_device *qdev;
+
+	bo = container_of(tbo, struct qxl_bo, tbo);
+	qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+
+	qxl_surface_evict(qdev, bo, false);
+	qxl_fence_fini(&bo->fence);
+	mutex_lock(&qdev->gem.mutex);
+	list_del_init(&bo->list);
+	mutex_unlock(&qdev->gem.mutex);
+	drm_gem_object_release(&bo->gem_base);
+	kfree(bo);
+}
+
+bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &qxl_ttm_bo_destroy)
+		return true;
+	return false;
+}
+
+void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
+{
+	u32 c = 0;
+
+	qbo->placement.fpfn = 0;
+	qbo->placement.lpfn = 0;
+	qbo->placement.placement = qbo->placements;
+	qbo->placement.busy_placement = qbo->placements;
+	if (domain & QXL_GEM_DOMAIN_VRAM)
+		qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM;
+	if (domain & QXL_GEM_DOMAIN_SURFACE)
+		qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0;
+	if (domain & QXL_GEM_DOMAIN_CPU)
+		qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	if (!c)
+		qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	qbo->placement.num_placement = c;
+	qbo->placement.num_busy_placement = c;
+}
+
+
+int qxl_bo_create(struct qxl_device *qdev,
+		  unsigned long size, bool kernel, u32 domain,
+		  struct qxl_surface *surf,
+		  struct qxl_bo **bo_ptr)
+{
+	struct qxl_bo *bo;
+	enum ttm_bo_type type;
+	int r;
+
+	if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
+		qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
+	if (kernel)
+		type = ttm_bo_type_kernel;
+	else
+		type = ttm_bo_type_device;
+	*bo_ptr = NULL;
+	bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
+	if (bo == NULL)
+		return -ENOMEM;
+	size = roundup(size, PAGE_SIZE);
+	r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
+	if (unlikely(r)) {
+		kfree(bo);
+		return r;
+	}
+	bo->gem_base.driver_private = NULL;
+	bo->type = domain;
+	bo->pin_count = 0;
+	bo->surface_id = 0;
+	qxl_fence_init(qdev, &bo->fence);
+	INIT_LIST_HEAD(&bo->list);
+	atomic_set(&bo->reserve_count, 0);
+	if (surf)
+		bo->surf = *surf;
+
+	qxl_ttm_placement_from_domain(bo, domain);
+
+	r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
+			&bo->placement, 0, !kernel, NULL, size,
+			NULL, &qxl_ttm_bo_destroy);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS)
+			dev_err(qdev->dev,
+				"object_init failed for (%lu, 0x%08X)\n",
+				size, domain);
+		return r;
+	}
+	*bo_ptr = bo;
+	return 0;
+}
+
+int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
+{
+	bool is_iomem;
+	int r;
+
+	if (bo->kptr) {
+		if (ptr)
+			*ptr = bo->kptr;
+		return 0;
+	}
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+	if (r)
+		return r;
+	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+	if (ptr)
+		*ptr = bo->kptr;
+	return 0;
+}
+
+void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
+			      struct qxl_bo *bo, int page_offset)
+{
+	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
+	void *rptr;
+	int ret;
+	struct io_mapping *map;
+
+	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+		map = qdev->vram_mapping;
+	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+		map = qdev->surface_mapping;
+	else
+		goto fallback;
+
+	(void) ttm_mem_io_lock(man, false);
+	ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
+	ttm_mem_io_unlock(man);
+
+	return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
+fallback:
+	if (bo->kptr) {
+		rptr = bo->kptr + (page_offset * PAGE_SIZE);
+		return rptr;
+	}
+
+	ret = qxl_bo_kmap(bo, &rptr);
+	if (ret)
+		return NULL;
+
+	rptr += page_offset * PAGE_SIZE;
+	return rptr;
+}
+
+void qxl_bo_kunmap(struct qxl_bo *bo)
+{
+	if (bo->kptr == NULL)
+		return;
+	bo->kptr = NULL;
+	ttm_bo_kunmap(&bo->kmap);
+}
+
+void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
+			       struct qxl_bo *bo, void *pmap)
+{
+	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
+	struct io_mapping *map;
+
+	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+		map = qdev->vram_mapping;
+	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+		map = qdev->surface_mapping;
+	else
+		goto fallback;
+
+	io_mapping_unmap_atomic(pmap);
+
+	(void) ttm_mem_io_lock(man, false);
+	ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
+	ttm_mem_io_unlock(man);
+	return ;
+ fallback:
+	qxl_bo_kunmap(bo);
+}
+
+void qxl_bo_unref(struct qxl_bo **bo)
+{
+	struct ttm_buffer_object *tbo;
+
+	if ((*bo) == NULL)
+		return;
+	tbo = &((*bo)->tbo);
+	ttm_bo_unref(&tbo);
+	if (tbo == NULL)
+		*bo = NULL;
+}
+
+struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
+{
+	ttm_bo_reference(&bo->tbo);
+	return bo;
+}
+
+int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+{
+	struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+	int r, i;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = qxl_bo_gpu_offset(bo);
+		return 0;
+	}
+	qxl_ttm_placement_from_domain(bo, domain);
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	if (likely(r == 0)) {
+		bo->pin_count = 1;
+		if (gpu_addr != NULL)
+			*gpu_addr = qxl_bo_gpu_offset(bo);
+	}
+	if (unlikely(r != 0))
+		dev_err(qdev->dev, "%p pin failed\n", bo);
+	return r;
+}
+
+int qxl_bo_unpin(struct qxl_bo *bo)
+{
+	struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+	int r, i;
+
+	if (!bo->pin_count) {
+		dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	if (unlikely(r != 0))
+		dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
+	return r;
+}
+
+void qxl_bo_force_delete(struct qxl_device *qdev)
+{
+	struct qxl_bo *bo, *n;
+
+	if (list_empty(&qdev->gem.objects))
+		return;
+	dev_err(qdev->dev, "Userspace still has active objects !\n");
+	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
+		mutex_lock(&qdev->ddev->struct_mutex);
+		dev_err(qdev->dev, "%p %p %lu %lu force free\n",
+			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+			*((unsigned long *)&bo->gem_base.refcount));
+		mutex_lock(&qdev->gem.mutex);
+		list_del_init(&bo->list);
+		mutex_unlock(&qdev->gem.mutex);
+		/* this should unref the ttm bo */
+		drm_gem_object_unreference(&bo->gem_base);
+		mutex_unlock(&qdev->ddev->struct_mutex);
+	}
+}
+
+int qxl_bo_init(struct qxl_device *qdev)
+{
+	return qxl_ttm_init(qdev);
+}
+
+void qxl_bo_fini(struct qxl_device *qdev)
+{
+	qxl_ttm_fini(qdev);
+}
+
+int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
+{
+	int ret;
+	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
+		/* allocate a surface id for this surface now */
+		ret = qxl_surface_id_alloc(qdev, bo);
+		if (ret)
+			return ret;
+
+		ret = qxl_hw_surface_alloc(qdev, bo, NULL);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
+{
+	struct qxl_bo_list *entry, *sf;
+
+	list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
+		qxl_bo_unreserve(entry->bo);
+		list_del(&entry->lhead);
+		kfree(entry);
+	}
+}
+
+int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
+{
+	struct qxl_bo_list *entry;
+	int ret;
+
+	list_for_each_entry(entry, &reloc_list->bos, lhead) {
+		if (entry->bo == bo)
+			return 0;
+	}
+
+	entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->bo = bo;
+	list_add(&entry->lhead, &reloc_list->bos);
+
+	ret = qxl_bo_reserve(bo, false);
+	if (ret)
+		return ret;
+
+	if (!bo->pin_count) {
+		qxl_ttm_placement_from_domain(bo, bo->type);
+		ret = ttm_bo_validate(&bo->tbo, &bo->placement,
+				      true, false);
+		if (ret)
+			return ret;
+	}
+
+	/* allocate a surface for reserved + validated buffers */
+	ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
+	if (ret)
+		return ret;
+	return 0;
+}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
new file mode 100644
index 0000000..b4fd89f
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+#ifndef QXL_OBJECT_H
+#define QXL_OBJECT_H
+
+#include "qxl_drv.h"
+
+static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS) {
+			struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+			dev_err(qdev->dev, "%p reserve failed\n", bo);
+		}
+		return r;
+	}
+	return 0;
+}
+
+static inline void qxl_bo_unreserve(struct qxl_bo *bo)
+{
+	ttm_bo_unreserve(&bo->tbo);
+}
+
+static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo)
+{
+	return bo->tbo.offset;
+}
+
+static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
+{
+	return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline bool qxl_bo_is_reserved(struct qxl_bo *bo)
+{
+	return !!atomic_read(&bo->tbo.reserved);
+}
+
+static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
+{
+	return bo->tbo.addr_space_offset;
+}
+
+static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
+			      bool no_wait)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS) {
+			struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+			dev_err(qdev->dev, "%p reserve failed for wait\n",
+				bo);
+		}
+		return r;
+	}
+	spin_lock(&bo->tbo.bdev->fence_lock);
+	if (mem_type)
+		*mem_type = bo->tbo.mem.mem_type;
+	if (bo->tbo.sync_obj)
+		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+	spin_unlock(&bo->tbo.bdev->fence_lock);
+	ttm_bo_unreserve(&bo->tbo);
+	return r;
+}
+
+extern int qxl_bo_create(struct qxl_device *qdev,
+			 unsigned long size,
+			 bool kernel, u32 domain,
+			 struct qxl_surface *surf,
+			 struct qxl_bo **bo_ptr);
+extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+extern void qxl_bo_kunmap(struct qxl_bo *bo);
+void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
+void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
+extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
+extern void qxl_bo_unref(struct qxl_bo **bo);
+extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
+extern int qxl_bo_unpin(struct qxl_bo *bo);
+extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
+extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
+
+extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
+extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
new file mode 100644
index 0000000..1600781
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/*
+ * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
+ * into 256 byte chunks for now - gives 16 cmds per page.
+ *
+ * use an ida to index into the chunks?
+ */
+/* manage releaseables */
+/* stack them 16 high for now -drawable object is 191 */
+#define RELEASE_SIZE 256
+#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
+/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
+#define SURFACE_RELEASE_SIZE 128
+#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
+
+static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
+static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
+uint64_t
+qxl_release_alloc(struct qxl_device *qdev, int type,
+		  struct qxl_release **ret)
+{
+	struct qxl_release *release;
+	int handle = 0;
+	size_t size = sizeof(*release);
+	int idr_ret;
+
+	release = kmalloc(size, GFP_KERNEL);
+	if (!release) {
+		DRM_ERROR("Out of memory\n");
+		return 0;
+	}
+	release->type = type;
+	release->bo_count = 0;
+	release->release_offset = 0;
+	release->surface_release_id = 0;
+again:
+	if (idr_pre_get(&qdev->release_idr, GFP_KERNEL) == 0) {
+		DRM_ERROR("Out of memory for release idr\n");
+		kfree(release);
+		goto release_fail;
+	}
+	spin_lock(&qdev->release_idr_lock);
+	idr_ret = idr_get_new_above(&qdev->release_idr, release, 1, &handle);
+	spin_unlock(&qdev->release_idr_lock);
+	if (idr_ret == -EAGAIN)
+		goto again;
+	if (ret)
+		*ret = release;
+	QXL_INFO(qdev, "allocated release %lld\n", handle);
+	release->id = handle;
+release_fail:
+
+	return handle;
+}
+
+void
+qxl_release_free(struct qxl_device *qdev,
+		 struct qxl_release *release)
+{
+	int i;
+
+	QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id,
+		 release->type, release->bo_count);
+
+	if (release->surface_release_id)
+		qxl_surface_id_dealloc(qdev, release->surface_release_id);
+
+	for (i = 0 ; i < release->bo_count; ++i) {
+		QXL_INFO(qdev, "release %llx\n",
+			release->bos[i]->tbo.addr_space_offset
+						- DRM_FILE_OFFSET);
+		qxl_fence_remove_release(&release->bos[i]->fence, release->id);
+		qxl_bo_unref(&release->bos[i]);
+	}
+	spin_lock(&qdev->release_idr_lock);
+	idr_remove(&qdev->release_idr, release->id);
+	spin_unlock(&qdev->release_idr_lock);
+	kfree(release);
+}
+
+void
+qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
+		    struct qxl_bo *bo)
+{
+	int i;
+	for (i = 0; i < release->bo_count; i++)
+		if (release->bos[i] == bo)
+			return;
+
+	if (release->bo_count >= QXL_MAX_RES) {
+		DRM_ERROR("exceeded max resource on a qxl_release item\n");
+		return;
+	}
+	release->bos[release->bo_count++] = qxl_bo_ref(bo);
+}
+
+int qxl_release_bo_alloc(struct qxl_device *qdev,
+			 struct qxl_bo **bo)
+{
+	int ret;
+	ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL,
+			    bo);
+	return ret;
+}
+
+int qxl_release_reserve(struct qxl_device *qdev,
+			struct qxl_release *release, bool no_wait)
+{
+	int ret;
+	if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) {
+		ret = qxl_bo_reserve(release->bos[0], no_wait);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+void qxl_release_unreserve(struct qxl_device *qdev,
+			  struct qxl_release *release)
+{
+	if (atomic_dec_and_test(&release->bos[0]->reserve_count))
+		qxl_bo_unreserve(release->bos[0]);
+}
+
+int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
+				       enum qxl_surface_cmd_type surface_cmd_type,
+				       struct qxl_release *create_rel,
+				       struct qxl_release **release)
+{
+	int ret;
+
+	if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
+		int idr_ret;
+		struct qxl_bo *bo;
+		union qxl_release_info *info;
+
+		/* stash the release after the create command */
+		idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
+		bo = qxl_bo_ref(create_rel->bos[0]);
+
+		(*release)->release_offset = create_rel->release_offset + 64;
+
+		qxl_release_add_res(qdev, *release, bo);
+
+		ret = qxl_release_reserve(qdev, *release, false);
+		if (ret) {
+			DRM_ERROR("release reserve failed\n");
+			goto out_unref;
+		}
+		info = qxl_release_map(qdev, *release);
+		info->id = idr_ret;
+		qxl_release_unmap(qdev, *release, info);
+
+
+out_unref:
+		qxl_bo_unref(&bo);
+		return ret;
+	}
+
+	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
+					 QXL_RELEASE_SURFACE_CMD, release, NULL);
+}
+
+int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+				       int type, struct qxl_release **release,
+				       struct qxl_bo **rbo)
+{
+	struct qxl_bo *bo;
+	int idr_ret;
+	int ret;
+	union qxl_release_info *info;
+	int cur_idx;
+
+	if (type == QXL_RELEASE_DRAWABLE)
+		cur_idx = 0;
+	else if (type == QXL_RELEASE_SURFACE_CMD)
+		cur_idx = 1;
+	else if (type == QXL_RELEASE_CURSOR_CMD)
+		cur_idx = 2;
+	else {
+		DRM_ERROR("got illegal type: %d\n", type);
+		return -EINVAL;
+	}
+
+	idr_ret = qxl_release_alloc(qdev, type, release);
+
+	mutex_lock(&qdev->release_mutex);
+	if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
+		qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
+		qdev->current_release_bo_offset[cur_idx] = 0;
+		qdev->current_release_bo[cur_idx] = NULL;
+	}
+	if (!qdev->current_release_bo[cur_idx]) {
+		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
+		if (ret) {
+			mutex_unlock(&qdev->release_mutex);
+			return ret;
+		}
+
+		/* pin releases bo's they are too messy to evict */
+		ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
+		qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
+		qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
+	}
+
+	bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
+
+	(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
+	qdev->current_release_bo_offset[cur_idx]++;
+
+	if (rbo)
+		*rbo = bo;
+
+	qxl_release_add_res(qdev, *release, bo);
+
+	ret = qxl_release_reserve(qdev, *release, false);
+	mutex_unlock(&qdev->release_mutex);
+	if (ret)
+		goto out_unref;
+
+	info = qxl_release_map(qdev, *release);
+	info->id = idr_ret;
+	qxl_release_unmap(qdev, *release, info);
+
+out_unref:
+	qxl_bo_unref(&bo);
+	return ret;
+}
+
+int qxl_fence_releaseable(struct qxl_device *qdev,
+			  struct qxl_release *release)
+{
+	int i, ret;
+	for (i = 0; i < release->bo_count; i++) {
+		if (!release->bos[i]->tbo.sync_obj)
+			release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
+		ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
+						   uint64_t id)
+{
+	struct qxl_release *release;
+
+	spin_lock(&qdev->release_idr_lock);
+	release = idr_find(&qdev->release_idr, id);
+	spin_unlock(&qdev->release_idr_lock);
+	if (!release) {
+		DRM_ERROR("failed to find id in release_idr\n");
+		return NULL;
+	}
+	if (release->bo_count < 1) {
+		DRM_ERROR("read a released resource with 0 bos\n");
+		return NULL;
+	}
+	return release;
+}
+
+union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
+					struct qxl_release *release)
+{
+	void *ptr;
+	union qxl_release_info *info;
+	struct qxl_bo *bo = release->bos[0];
+
+	ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
+	info = ptr + (release->release_offset & ~PAGE_SIZE);
+	return info;
+}
+
+void qxl_release_unmap(struct qxl_device *qdev,
+		       struct qxl_release *release,
+		       union qxl_release_info *info)
+{
+	struct qxl_bo *bo = release->bos[0];
+	void *ptr;
+
+	ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
+	qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
+}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
new file mode 100644
index 0000000..aa9fb9a
--- /dev/null
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -0,0 +1,577 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_page_alloc.h>
+#include <ttm/ttm_module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/qxl_drm.h>
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/delay.h>
+static int qxl_ttm_debugfs_init(struct qxl_device *qdev);
+
+static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
+{
+	struct qxl_mman *mman;
+	struct qxl_device *qdev;
+
+	mman = container_of(bdev, struct qxl_mman, bdev);
+	qdev = container_of(mman, struct qxl_device, mman);
+	return qdev;
+}
+
+static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+static int qxl_ttm_global_init(struct qxl_device *qdev)
+{
+	struct drm_global_reference *global_ref;
+	int r;
+
+	qdev->mman.mem_global_referenced = false;
+	global_ref = &qdev->mman.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &qxl_ttm_mem_global_init;
+	global_ref->release = &qxl_ttm_mem_global_release;
+
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM memory accounting "
+			  "subsystem.\n");
+		return r;
+	}
+
+	qdev->mman.bo_global_ref.mem_glob =
+		qdev->mman.mem_global_ref.object;
+	global_ref = &qdev->mman.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&qdev->mman.mem_global_ref);
+		return r;
+	}
+
+	qdev->mman.mem_global_referenced = true;
+	return 0;
+}
+
+static void qxl_ttm_global_fini(struct qxl_device *qdev)
+{
+	if (qdev->mman.mem_global_referenced) {
+		drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
+		drm_global_item_unref(&qdev->mman.mem_global_ref);
+		qdev->mman.mem_global_referenced = false;
+	}
+}
+
+static struct vm_operations_struct qxl_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops;
+
+static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct ttm_buffer_object *bo;
+	struct qxl_device *qdev;
+	int r;
+
+	bo = (struct ttm_buffer_object *)vma->vm_private_data;
+	if (bo == NULL)
+		return VM_FAULT_NOPAGE;
+	qdev = qxl_get_qdev(bo->bdev);
+	r = ttm_vm_ops->fault(vma, vmf);
+	return r;
+}
+
+int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct qxl_device *qdev;
+	int r;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
+		pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n",
+			__func__, vma->vm_pgoff);
+		return drm_mmap(filp, vma);
+	}
+
+	file_priv = filp->private_data;
+	qdev = file_priv->minor->dev->dev_private;
+	if (qdev == NULL) {
+		DRM_ERROR(
+		 "filp->private_data->minor->dev->dev_private == NULL\n");
+		return -EINVAL;
+	}
+	QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
+		 __func__, filp->private_data, vma->vm_pgoff);
+
+	r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
+	if (unlikely(r != 0))
+		return r;
+	if (unlikely(ttm_vm_ops == NULL)) {
+		ttm_vm_ops = vma->vm_ops;
+		qxl_ttm_vm_ops = *ttm_vm_ops;
+		qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
+	}
+	vma->vm_ops = &qxl_ttm_vm_ops;
+	return 0;
+}
+
+static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+	return 0;
+}
+
+static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+			     struct ttm_mem_type_manager *man)
+{
+	struct qxl_device *qdev;
+
+	qdev = qxl_get_qdev(bdev);
+
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		/* System memory */
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+	case TTM_PL_PRIV0:
+		/* "On-card" video ram */
+		man->func = &ttm_bo_manager_func;
+		man->gpu_offset = 0;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			     TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void qxl_evict_flags(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement)
+{
+	struct qxl_bo *qbo;
+	static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+	if (!qxl_ttm_bo_is_qxl_bo(bo)) {
+		placement->fpfn = 0;
+		placement->lpfn = 0;
+		placement->placement = &placements;
+		placement->busy_placement = &placements;
+		placement->num_placement = 1;
+		placement->num_busy_placement = 1;
+		return;
+	}
+	qbo = container_of(bo, struct qxl_bo, tbo);
+	qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
+	*placement = qbo->placement;
+}
+
+static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	return 0;
+}
+
+static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+				  struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct qxl_device *qdev = qxl_get_qdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_VRAM:
+		mem->bus.is_iomem = true;
+		mem->bus.base = qdev->vram_base;
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		break;
+	case TTM_PL_PRIV0:
+		mem->bus.is_iomem = true;
+		mem->bus.base = qdev->surfaceram_base;
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev,
+				struct ttm_mem_reg *mem)
+{
+}
+
+/*
+ * TTM backend functions.
+ */
+struct qxl_ttm_tt {
+	struct ttm_dma_tt		ttm;
+	struct qxl_device		*qdev;
+	u64				offset;
+};
+
+static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
+				struct ttm_mem_reg *bo_mem)
+{
+	struct qxl_ttm_tt *gtt = (void *)ttm;
+
+	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+	if (!ttm->num_pages) {
+		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+		     ttm->num_pages, bo_mem, ttm);
+	}
+	/* Not implemented */
+	return -1;
+}
+
+static int qxl_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+	/* Not implemented */
+	return -1;
+}
+
+static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+	struct qxl_ttm_tt *gtt = (void *)ttm;
+
+	ttm_dma_tt_fini(&gtt->ttm);
+	kfree(gtt);
+}
+
+static struct ttm_backend_func qxl_backend_func = {
+	.bind = &qxl_ttm_backend_bind,
+	.unbind = &qxl_ttm_backend_unbind,
+	.destroy = &qxl_ttm_backend_destroy,
+};
+
+static int qxl_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	int r;
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	r = ttm_pool_populate(ttm);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+static void qxl_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev,
+				 unsigned long size, uint32_t page_flags,
+				 struct page *dummy_read_page)
+{
+	struct qxl_device *qdev;
+	struct qxl_ttm_tt *gtt;
+
+	qdev = qxl_get_qdev(bdev);
+	gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
+	if (gtt == NULL)
+		return NULL;
+	gtt->ttm.ttm.func = &qxl_backend_func;
+	gtt->qdev = qdev;
+	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
+			    dummy_read_page)) {
+		kfree(gtt);
+		return NULL;
+	}
+	return &gtt->ttm.ttm;
+}
+
+static void qxl_move_null(struct ttm_buffer_object *bo,
+			     struct ttm_mem_reg *new_mem)
+{
+	struct ttm_mem_reg *old_mem = &bo->mem;
+
+	BUG_ON(old_mem->mm_node != NULL);
+	*old_mem = *new_mem;
+	new_mem->mm_node = NULL;
+}
+
+static int qxl_bo_move(struct ttm_buffer_object *bo,
+		       bool evict, bool interruptible,
+		       bool no_wait_gpu,
+		       struct ttm_mem_reg *new_mem)
+{
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+		qxl_move_null(bo, new_mem);
+		return 0;
+	}
+	return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+}
+
+
+static int qxl_sync_obj_wait(void *sync_obj,
+			     bool lazy, bool interruptible)
+{
+	struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
+	int count = 0, sc = 0;
+	struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+	if (qfence->num_active_releases == 0)
+		return 0;
+
+retry:
+	if (sc == 0) {
+		if (bo->type == QXL_GEM_DOMAIN_SURFACE)
+			qxl_update_surface(qfence->qdev, bo);
+	} else if (sc >= 1) {
+		qxl_io_notify_oom(qfence->qdev);
+	}
+
+	sc++;
+
+	for (count = 0; count < 10; count++) {
+		bool ret;
+		ret = qxl_queue_garbage_collect(qfence->qdev, true);
+		if (ret == false)
+			break;
+
+		if (qfence->num_active_releases == 0)
+			return 0;
+	}
+
+	if (qfence->num_active_releases) {
+		bool have_drawable_releases = false;
+		void **slot;
+		struct radix_tree_iter iter;
+		int release_id;
+
+		radix_tree_for_each_slot(slot, &qfence->tree, &iter, 0) {
+			struct qxl_release *release;
+
+			release_id = iter.index;
+			release = qxl_release_from_id_locked(qfence->qdev, release_id);
+			if (release == NULL)
+				continue;
+
+			if (release->type == QXL_RELEASE_DRAWABLE)
+				have_drawable_releases = true;
+		}
+
+		qxl_queue_garbage_collect(qfence->qdev, true);
+
+		if (have_drawable_releases || sc < 4) {
+			if (sc > 2)
+				/* back off */
+				usleep_range(500, 1000);
+			if (have_drawable_releases && sc > 300) {
+				WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, qfence->num_active_releases);
+				return -EBUSY;
+			}
+			goto retry;
+		}
+	}
+	return 0;
+}
+
+static int qxl_sync_obj_flush(void *sync_obj)
+{
+	return 0;
+}
+
+static void qxl_sync_obj_unref(void **sync_obj)
+{
+}
+
+static void *qxl_sync_obj_ref(void *sync_obj)
+{
+	return sync_obj;
+}
+
+static bool qxl_sync_obj_signaled(void *sync_obj)
+{
+	struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
+	return (qfence->num_active_releases == 0);
+}
+
+static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
+			       struct ttm_mem_reg *new_mem)
+{
+	struct qxl_bo *qbo;
+	struct qxl_device *qdev;
+
+	if (!qxl_ttm_bo_is_qxl_bo(bo))
+		return;
+	qbo = container_of(bo, struct qxl_bo, tbo);
+	qdev = qbo->gem_base.dev->dev_private;
+
+	if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
+		qxl_surface_evict(qdev, qbo, new_mem ? true : false);
+}
+
+static struct ttm_bo_driver qxl_bo_driver = {
+	.ttm_tt_create = &qxl_ttm_tt_create,
+	.ttm_tt_populate = &qxl_ttm_tt_populate,
+	.ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
+	.invalidate_caches = &qxl_invalidate_caches,
+	.init_mem_type = &qxl_init_mem_type,
+	.evict_flags = &qxl_evict_flags,
+	.move = &qxl_bo_move,
+	.verify_access = &qxl_verify_access,
+	.io_mem_reserve = &qxl_ttm_io_mem_reserve,
+	.io_mem_free = &qxl_ttm_io_mem_free,
+	.sync_obj_signaled = &qxl_sync_obj_signaled,
+	.sync_obj_wait = &qxl_sync_obj_wait,
+	.sync_obj_flush = &qxl_sync_obj_flush,
+	.sync_obj_unref = &qxl_sync_obj_unref,
+	.sync_obj_ref = &qxl_sync_obj_ref,
+	.move_notify = &qxl_bo_move_notify,
+};
+
+
+
+int qxl_ttm_init(struct qxl_device *qdev)
+{
+	int r;
+	int num_io_pages; /* != rom->num_io_pages, we include surface0 */
+
+	r = qxl_ttm_global_init(qdev);
+	if (r)
+		return r;
+	/* No others user of address space so set it to 0 */
+	r = ttm_bo_device_init(&qdev->mman.bdev,
+			       qdev->mman.bo_global_ref.ref.object,
+			       &qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0);
+	if (r) {
+		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+		return r;
+	}
+	/* NOTE: this includes the framebuffer (aka surface 0) */
+	num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
+	r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM,
+			   num_io_pages);
+	if (r) {
+		DRM_ERROR("Failed initializing VRAM heap.\n");
+		return r;
+	}
+	r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0,
+			   qdev->surfaceram_size / PAGE_SIZE);
+	if (r) {
+		DRM_ERROR("Failed initializing Surfaces heap.\n");
+		return r;
+	}
+	DRM_INFO("qxl: %uM of VRAM memory size\n",
+		 (unsigned)qdev->vram_size / (1024 * 1024));
+	DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
+		 ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
+	if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
+		qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
+	r = qxl_ttm_debugfs_init(qdev);
+	if (r) {
+		DRM_ERROR("Failed to init debugfs\n");
+		return r;
+	}
+	return 0;
+}
+
+void qxl_ttm_fini(struct qxl_device *qdev)
+{
+	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
+	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+	ttm_bo_device_release(&qdev->mman.bdev);
+	qxl_ttm_global_fini(qdev);
+	DRM_INFO("qxl: ttm finalized\n");
+}
+
+
+#define QXL_DEBUGFS_MEM_TYPES 2
+
+#if defined(CONFIG_DEBUG_FS)
+static int qxl_mm_dump_table(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	struct qxl_device *rdev = dev->dev_private;
+	int ret;
+	struct ttm_bo_global *glob = rdev->mman.bdev.glob;
+
+	spin_lock(&glob->lru_lock);
+	ret = drm_mm_dump_table(m, mm);
+	spin_unlock(&glob->lru_lock);
+	return ret;
+}
+#endif
+
+static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
+{
+	static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
+	static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
+	unsigned i;
+
+	for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
+		if (i == 0)
+			sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
+		else
+			sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
+		qxl_mem_types_list[i].name = qxl_mem_types_names[i];
+		qxl_mem_types_list[i].show = &qxl_mm_dump_table;
+		qxl_mem_types_list[i].driver_features = 0;
+		if (i == 0)
+			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
+		else
+			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
+
+	}
+	return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
+}
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild
index ba99ce3..a042a95 100644
--- a/include/uapi/drm/Kbuild
+++ b/include/uapi/drm/Kbuild
@@ -8,6 +8,7 @@ header-y += i810_drm.h
 header-y += i915_drm.h
 header-y += mga_drm.h
 header-y += nouveau_drm.h
+header-y += qxl_drm.h
 header-y += r128_drm.h
 header-y += radeon_drm.h
 header-y += savage_drm.h
diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h
new file mode 100644
index 0000000..ebebd36
--- /dev/null
+++ b/include/uapi/drm/qxl_drm.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2013 Red Hat
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef QXL_DRM_H
+#define QXL_DRM_H
+
+#include <stddef.h>
+#include "drm/drm.h"
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ *
+ * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel
+ * compatibility Keep fields aligned to their size
+ */
+
+#define QXL_GEM_DOMAIN_CPU 0
+#define QXL_GEM_DOMAIN_VRAM 1
+#define QXL_GEM_DOMAIN_SURFACE 2
+
+#define DRM_QXL_ALLOC       0x00
+#define DRM_QXL_MAP         0x01
+#define DRM_QXL_EXECBUFFER  0x02
+#define DRM_QXL_UPDATE_AREA 0x03
+#define DRM_QXL_GETPARAM    0x04
+#define DRM_QXL_CLIENTCAP   0x05
+
+#define DRM_QXL_ALLOC_SURF  0x06
+
+struct drm_qxl_alloc {
+	uint32_t size;
+	uint32_t handle; /* 0 is an invalid handle */
+};
+
+struct drm_qxl_map {
+	uint64_t offset; /* use for mmap system call */
+	uint32_t handle;
+	uint32_t pad;
+};
+
+/*
+ * dest is the bo we are writing the relocation into
+ * src is bo we are relocating.
+ * *(dest_handle.base_addr + dest_offset) = physical_address(src_handle.addr +
+ * src_offset)
+ */
+#define QXL_RELOC_TYPE_BO 1
+#define QXL_RELOC_TYPE_SURF 2
+
+struct drm_qxl_reloc {
+	uint64_t src_offset; /* offset into src_handle or src buffer */
+	uint64_t dst_offset; /* offset in dest handle */
+	uint32_t src_handle; /* dest handle to compute address from */
+	uint32_t dst_handle; /* 0 if to command buffer */
+	uint32_t reloc_type;
+	uint32_t pad;
+};
+
+struct drm_qxl_command {
+	uint64_t	 __user command; /* void* */
+	uint64_t	 __user relocs; /* struct drm_qxl_reloc* */
+	uint32_t		type;
+	uint32_t		command_size;
+	uint32_t		relocs_num;
+	uint32_t                pad;
+};
+
+/* XXX: call it drm_qxl_commands? */
+struct drm_qxl_execbuffer {
+	uint32_t		flags;		/* for future use */
+	uint32_t		commands_num;
+	uint64_t	 __user commands;	/* struct drm_qxl_command* */
+};
+
+struct drm_qxl_update_area {
+	uint32_t handle;
+	uint32_t top;
+	uint32_t left;
+	uint32_t bottom;
+	uint32_t right;
+	uint32_t pad;
+};
+
+#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */
+#define QXL_PARAM_MAX_RELOCS 2
+struct drm_qxl_getparam {
+	uint64_t param;
+	uint64_t value;
+};
+
+/* these are one bit values */
+struct drm_qxl_clientcap {
+	uint32_t index;
+	uint32_t pad;
+};
+
+struct drm_qxl_alloc_surf {
+	uint32_t format;
+	uint32_t width;
+	uint32_t height;
+	int32_t stride;
+	uint32_t handle;
+	uint32_t pad;
+};
+
+#define DRM_IOCTL_QXL_ALLOC \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC, struct drm_qxl_alloc)
+
+#define DRM_IOCTL_QXL_MAP \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_MAP, struct drm_qxl_map)
+
+#define DRM_IOCTL_QXL_EXECBUFFER \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_EXECBUFFER,\
+		struct drm_qxl_execbuffer)
+
+#define DRM_IOCTL_QXL_UPDATE_AREA \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_UPDATE_AREA,\
+		struct drm_qxl_update_area)
+
+#define DRM_IOCTL_QXL_GETPARAM \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_GETPARAM,\
+		struct drm_qxl_getparam)
+
+#define DRM_IOCTL_QXL_CLIENTCAP \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_CLIENTCAP,\
+		struct drm_qxl_clientcap)
+
+#define DRM_IOCTL_QXL_ALLOC_SURF \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\
+		struct drm_qxl_alloc_surf)
+
+#endif
-- 
1.8.1.4