From b5010d0d562335d5065296f2aced54ec26616294 Mon Sep 17 00:00:00 2001 From: Chuck Ebbert Date: Aug 15 2010 02:13:27 +0000 Subject: Add 2.6.33 branch to git repository. --- diff --git a/add-appleir-driver.patch b/add-appleir-driver.patch new file mode 100644 index 0000000..0eb350c --- /dev/null +++ b/add-appleir-driver.patch @@ -0,0 +1,682 @@ +commit 92c912df2a0725d719263357176f98b2201a2acd +Author: Bastien Nocera +Date: Wed Apr 21 14:51:58 2010 +0100 + + Input: add appleir USB driver + + This driver was originally written by James McKenzie, updated by + Greg Kroah-Hartman, further updated by myself, with suspend support + added. + + More recent versions of the IR receiver are also supported through + a patch by Alex Karpenko. + + Tested on a MacbookAir1,1 + + Signed-off-by: Bastien Nocera + +commit 6ffcbf68913840e9e882db14441576ffee6eba0c +Author: Bastien Nocera +Date: Fri Apr 16 17:19:50 2010 +0100 + + Add HID_QUIRK_HIDDEV_FORCE and HID_QUIRK_NO_IGNORE + + Add two quirks to make it possible for usbhid module options to + override whether a device is ignored (HID_QUIRK_NO_IGNORE) and + whether to connect a hiddev device (HID_QUIRK_HIDDEV_FORCE). + + Passing HID_QUIRK_NO_IGNORE for your device means that it will + not be ignored by the HID layer, even if present in a blacklist. + + HID_QUIRK_HIDDEV_FORCE will force the creation of a hiddev for that + device, making it accessible from user-space. + + Tested with an Apple IR Receiver, switching it from using appleir + to using lirc's macmini driver. + + Signed-off-by: Bastien Nocera + +diff --git a/Documentation/input/appleir.txt b/Documentation/input/appleir.txt +new file mode 100644 +index 0000000..0267a4b +--- /dev/null ++++ b/Documentation/input/appleir.txt +@@ -0,0 +1,45 @@ ++Apple IR receiver Driver (appleir) ++---------------------------------- ++ Copyright (C) 2009 Bastien Nocera ++ ++The appleir driver is a kernel input driver to handle Apple's IR ++receivers (and associated remotes) in the kernel. ++ ++The driver is an input driver which only handles "official" remotes ++as built and sold by Apple. ++ ++Authors ++------- ++ ++James McKenzie (original driver) ++Alex Karpenko (05ac:8242 support) ++Greg Kroah-Hartman (cleanups and original submission) ++Bastien Nocera (further cleanups and suspend support) ++ ++Supported hardware ++------------------ ++ ++- All Apple laptops and desktops from 2005 onwards, except: ++ - the unibody Macbook (2009) ++ - Mac Pro (all versions) ++- Apple TV (all revisions) ++ ++The remote will only support the 6 buttons of the original remotes ++as sold by Apple. See the next section if you want to use other remotes ++or want to use lirc with the device instead of the kernel driver. ++ ++Using lirc (native) instead of the kernel driver ++------------------------------------------------ ++ ++First, you will need to disable the kernel driver for the receiver. ++ ++This can be achieved by passing quirks to the usbhid driver. ++The quirk line would be: ++usbhid.quirks=0x05ac:0x8242:0x40000010 ++ ++With 0x05ac being the vendor ID (Apple, you shouldn't need to change this) ++With 0x8242 being the product ID (check the output of lsusb for your hardware) ++And 0x10 being "HID_QUIRK_HIDDEV_FORCE" and 0x40000000 being "HID_QUIRK_NO_IGNORE" ++ ++This should force the creation of a hiddev device for the receiver, and ++make it usable under lirc. +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c +index 5b4d66d..b0e1811 100644 +--- a/drivers/hid/hid-apple.c ++++ b/drivers/hid/hid-apple.c +@@ -353,10 +353,6 @@ static void apple_remove(struct hid_device *hdev) + } + + static const struct hid_device_id apple_devices[] = { +- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL), +- .driver_data = APPLE_HIDDEV | APPLE_IGNORE_HIDINPUT }, +- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4), +- .driver_data = APPLE_HIDDEV | APPLE_IGNORE_HIDINPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE), + .driver_data = APPLE_MIGHTYMOUSE | APPLE_INVERT_HWHEEL }, + +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 8455f3d..e795d8c 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1170,6 +1170,8 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) + unsigned int i; + int len; + ++ if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE) ++ connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV); + if (hdev->bus != BUS_USB) + connect_mask &= ~HID_CONNECT_HIDDEV; + if (hid_hiddev(hdev)) +@@ -1250,8 +1252,6 @@ EXPORT_SYMBOL_GPL(hid_disconnect); + static const struct hid_device_id hid_blacklist[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, + { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, +- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, +- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, +@@ -1545,6 +1545,9 @@ static const struct hid_device_id hid_ignore_list[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM)}, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2)}, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, + { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, +@@ -1749,7 +1752,7 @@ int hid_add_device(struct hid_device *hdev) + + /* we need to kill them here, otherwise they will stay allocated to + * wait for coming driver */ +- if (hid_ignore(hdev)) ++ if (!(hdev->quirks & HID_QUIRK_NO_IGNORE) && hid_ignore(hdev)) + return -ENODEV; + + /* XXX hack, any other cleaner solution after the driver core +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 793691f..9255c1a 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -93,6 +93,7 @@ + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b + #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a + #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b ++#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 + #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 + #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 + +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c +index 2f84237..2de42e1 100644 +--- a/drivers/hid/usbhid/hid-core.c ++++ b/drivers/hid/usbhid/hid-core.c +@@ -1142,6 +1142,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id * + hid->vendor = le16_to_cpu(dev->descriptor.idVendor); + hid->product = le16_to_cpu(dev->descriptor.idProduct); + hid->name[0] = 0; ++ hid->quirks = usbhid_lookup_quirk(hid->vendor, hid->product); + if (intf->cur_altsetting->desc.bInterfaceProtocol == + USB_INTERFACE_PROTOCOL_MOUSE) + hid->type = HID_TYPE_USBMOUSE; +diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig +index 16ec523..4340986 100644 +--- a/drivers/input/misc/Kconfig ++++ b/drivers/input/misc/Kconfig +@@ -149,6 +149,19 @@ config INPUT_KEYSPAN_REMOTE + To compile this driver as a module, choose M here: the module will + be called keyspan_remote. + ++config INPUT_APPLEIR ++ tristate "Apple infrared receiver (built in)" ++ depends on USB_ARCH_HAS_HCD ++ select USB ++ help ++ Say Y here if you want to use a Apple infrared remote control. All ++ the Apple computers from 2005 onwards include such a port, except ++ the unibody Macbook (2009), and Mac Pros. This receiver is also ++ used in the Apple TV set-top box. ++ ++ To compile this driver as a module, choose M here: the module will ++ be called appleir. ++ + config INPUT_POWERMATE + tristate "Griffin PowerMate and Contour Jog support" + depends on USB_ARCH_HAS_HCD +diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile +index a8b8485..041e6f5 100644 +--- a/drivers/input/misc/Makefile ++++ b/drivers/input/misc/Makefile +@@ -5,6 +5,7 @@ + # Each configuration option enables a list of files. + + obj-$(CONFIG_INPUT_APANEL) += apanel.o ++obj-$(CONFIG_INPUT_APPLEIR) += appleir.o + obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o + obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o + obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o +diff --git a/drivers/input/misc/appleir.c b/drivers/input/misc/appleir.c +new file mode 100644 +index 0000000..cff4df6 +--- /dev/null ++++ b/drivers/input/misc/appleir.c +@@ -0,0 +1,453 @@ ++/* ++ * appleir: USB driver for the apple ir device ++ * ++ * Original driver written by James McKenzie ++ * Ported to recent 2.6 kernel versions by Greg Kroah-Hartman ++ * ++ * Copyright (C) 2006 James McKenzie ++ * Copyright (C) 2008 Greg Kroah-Hartman ++ * Copyright (C) 2008 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation, version 2. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DRIVER_VERSION "v1.2" ++#define DRIVER_AUTHOR "James McKenzie" ++#define DRIVER_DESC "Apple infrared receiver driver" ++#define DRIVER_LICENSE "GPL" ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE(DRIVER_LICENSE); ++ ++#define USB_VENDOR_ID_APPLE 0x05ac ++#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 ++#define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 ++#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 ++ ++#define URB_SIZE 32 ++ ++#define MAX_KEYS 8 ++#define MAX_KEYS_MASK (MAX_KEYS - 1) ++ ++#define dbginfo(dev, format, arg...) do { if (debug) dev_info(dev , format , ## arg); } while (0) ++ ++static int debug; ++module_param(debug, int, 0644); ++MODULE_PARM_DESC(debug, "Enable extra debug messages and information"); ++ ++/* I have two devices both of which report the following */ ++/* 25 87 ee 83 0a + */ ++/* 25 87 ee 83 0c - */ ++/* 25 87 ee 83 09 << */ ++/* 25 87 ee 83 06 >> */ ++/* 25 87 ee 83 05 >" */ ++/* 25 87 ee 83 03 menu */ ++/* 26 00 00 00 00 for key repeat*/ ++ ++/* Thomas Glanzmann reports the following responses */ ++/* 25 87 ee ca 0b + */ ++/* 25 87 ee ca 0d - */ ++/* 25 87 ee ca 08 << */ ++/* 25 87 ee ca 07 >> */ ++/* 25 87 ee ca 04 >" */ ++/* 25 87 ee ca 02 menu */ ++/* 26 00 00 00 00 for key repeat*/ ++/* He also observes the following event sometimes */ ++/* sent after a key is release, which I interpret */ ++/* as a flat battery message */ ++/* 25 87 e0 ca 06 flat battery */ ++ ++/* Alexandre Karpenko reports the following responses for Device ID 0x8242 */ ++/* 25 87 ee 47 0b + */ ++/* 25 87 ee 47 0d - */ ++/* 25 87 ee 47 08 << */ ++/* 25 87 ee 47 07 >> */ ++/* 25 87 ee 47 04 >" */ ++/* 25 87 ee 47 02 menu */ ++/* 26 87 ee 47 ** for key repeat (** is the code of the key being held) */ ++ ++static const unsigned short appleir_key_table[] = { ++ KEY_RESERVED, ++ KEY_MENU, ++ KEY_PLAYPAUSE, ++ KEY_FORWARD, ++ KEY_BACK, ++ KEY_VOLUMEUP, ++ KEY_VOLUMEDOWN, ++ KEY_RESERVED, ++}; ++ ++struct appleir { ++ struct input_dev *input_dev; ++ unsigned short keymap[ARRAY_SIZE(appleir_key_table)]; ++ u8 *data; ++ dma_addr_t dma_buf; ++ struct usb_device *usbdev; ++ unsigned int flags; ++ struct urb *urb; ++ struct timer_list key_up_timer; ++ int current_key; ++ char phys[32]; ++}; ++ ++static DEFINE_MUTEX(appleir_mutex); ++ ++enum { ++ APPLEIR_OPENED = 0x1, ++ APPLEIR_SUSPENDED = 0x2, ++}; ++ ++static struct usb_device_id appleir_ids[] = { ++ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, ++ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, ++ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, ++ {} ++}; ++MODULE_DEVICE_TABLE(usb, appleir_ids); ++ ++static void dump_packet(struct appleir *appleir, char *msg, u8 *data, int len) ++{ ++ int i; ++ ++ printk(KERN_ERR "appleir: %s (%d bytes)", msg, len); ++ ++ for (i = 0; i < len; ++i) ++ printk(" %02x", data[i]); ++ printk("\n"); ++} ++ ++static void key_up(struct appleir *appleir, int key) ++{ ++ dbginfo(&appleir->input_dev->dev, "key %d up\n", key); ++ input_report_key(appleir->input_dev, key, 0); ++ input_sync(appleir->input_dev); ++} ++ ++static void key_down(struct appleir *appleir, int key) ++{ ++ dbginfo(&appleir->input_dev->dev, "key %d down\n", key); ++ input_report_key(appleir->input_dev, key, 1); ++ input_sync(appleir->input_dev); ++} ++ ++static void battery_flat(struct appleir *appleir) ++{ ++ dev_err(&appleir->input_dev->dev, "possible flat battery?\n"); ++} ++ ++static void key_up_tick(unsigned long data) ++{ ++ struct appleir *appleir = (struct appleir *)data; ++ ++ if (appleir->current_key) { ++ key_up(appleir, appleir->current_key); ++ appleir->current_key = 0; ++ } ++} ++ ++static void new_data(struct appleir *appleir, u8 *data, int len) ++{ ++ static const u8 keydown[] = { 0x25, 0x87, 0xee }; ++ static const u8 keyrepeat[] = { 0x26, }; ++ static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 }; ++ ++ if (debug) ++ dump_packet(appleir, "received", data, len); ++ ++ if (len != 5) ++ return; ++ ++ if (!memcmp(data, keydown, sizeof(keydown))) { ++ /* If we already have a key down, take it up before marking ++ this one down */ ++ if (appleir->current_key) ++ key_up(appleir, appleir->current_key); ++ appleir->current_key = appleir->keymap[(data[4] >> 1) & MAX_KEYS_MASK]; ++ ++ key_down(appleir, appleir->current_key); ++ /* Remote doesn't do key up, either pull them up, in the test ++ above, or here set a timer which pulls them up after 1/8 s */ ++ mod_timer(&appleir->key_up_timer, jiffies + HZ / 8); ++ ++ return; ++ } ++ ++ if (!memcmp(data, keyrepeat, sizeof(keyrepeat))) { ++ key_down(appleir, appleir->current_key); ++ /* Remote doesn't do key up, either pull them up, in the test ++ above, or here set a timer which pulls them up after 1/8 s */ ++ mod_timer(&appleir->key_up_timer, jiffies + HZ / 8); ++ return; ++ } ++ ++ if (!memcmp(data, flatbattery, sizeof(flatbattery))) { ++ battery_flat(appleir); ++ /* Fall through */ ++ } ++ ++ dump_packet(appleir, "unknown packet", data, len); ++} ++ ++static void appleir_urb(struct urb *urb) ++{ ++ struct appleir *appleir = urb->context; ++ int status = urb->status; ++ int retval; ++ ++ switch (status) { ++ case 0: ++ new_data(appleir, urb->transfer_buffer, urb->actual_length); ++ break; ++ case -ECONNRESET: ++ case -ENOENT: ++ case -ESHUTDOWN: ++ /* This urb is terminated, clean up */ ++ dbginfo(&appleir->input_dev->dev, "%s - urb shutting down with status: %d", __func__, ++ urb->status); ++ return; ++ default: ++ dbginfo(&appleir->input_dev->dev, "%s - nonzero urb status received: %d", __func__, ++ urb->status); ++ } ++ ++ retval = usb_submit_urb(urb, GFP_ATOMIC); ++ if (retval) ++ err("%s - usb_submit_urb failed with result %d", __func__, ++ retval); ++} ++ ++static int appleir_open(struct input_dev *dev) ++{ ++ struct appleir *appleir = input_get_drvdata(dev); ++ struct usb_interface *intf = usb_ifnum_to_if(appleir->usbdev, 0); ++ int r; ++ ++ r = usb_autopm_get_interface(intf); ++ if (r) { ++ dev_err(&intf->dev, ++ "%s(): usb_autopm_get_interface() = %d\n", __func__, r); ++ return r; ++ } ++ ++ mutex_lock(&appleir_mutex); ++ ++ if (usb_submit_urb(appleir->urb, GFP_ATOMIC)) { ++ r = -EIO; ++ goto fail; ++ } ++ ++ appleir->flags |= APPLEIR_OPENED; ++ ++ mutex_unlock(&appleir_mutex); ++ ++ usb_autopm_put_interface(intf); ++ ++ return 0; ++fail: ++ mutex_unlock(&appleir_mutex); ++ usb_autopm_put_interface(intf); ++ return r; ++} ++ ++static void appleir_close(struct input_dev *dev) ++{ ++ struct appleir *appleir = input_get_drvdata(dev); ++ ++ mutex_lock(&appleir_mutex); ++ ++ if (!(appleir->flags & APPLEIR_SUSPENDED)) { ++ usb_kill_urb(appleir->urb); ++ del_timer_sync(&appleir->key_up_timer); ++ } ++ ++ appleir->flags &= ~APPLEIR_OPENED; ++ ++ mutex_unlock(&appleir_mutex); ++} ++ ++static int appleir_probe(struct usb_interface *intf, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *dev = interface_to_usbdev(intf); ++ struct usb_endpoint_descriptor *endpoint; ++ struct appleir *appleir = NULL; ++ struct input_dev *input_dev; ++ int retval = -ENOMEM; ++ int i; ++ ++ appleir = kzalloc(sizeof(struct appleir), GFP_KERNEL); ++ if (!appleir) ++ goto allocfail; ++ ++ appleir->data = usb_buffer_alloc(dev, URB_SIZE, GFP_KERNEL, ++ &appleir->dma_buf); ++ if (!appleir->data) ++ goto usbfail; ++ ++ appleir->urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!appleir->urb) ++ goto urbfail; ++ ++ appleir->usbdev = dev; ++ ++ input_dev = input_allocate_device(); ++ if (!input_dev) ++ goto inputfail; ++ ++ appleir->input_dev = input_dev; ++ ++ usb_make_path(dev, appleir->phys, sizeof(appleir->phys)); ++ strlcpy(appleir->phys, "/input0", sizeof(appleir->phys)); ++ ++ input_dev->name = "Apple Infrared Remote Controller"; ++ input_dev->phys = appleir->phys; ++ usb_to_input_id(dev, &input_dev->id); ++ input_dev->dev.parent = &intf->dev; ++ input_dev->keycode = appleir->keymap; ++ input_dev->keycodesize = sizeof(unsigned short); ++ input_dev->keycodemax = ARRAY_SIZE(appleir->keymap); ++ ++ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP); ++ ++ memcpy(appleir->keymap, appleir_key_table, sizeof(appleir->keymap)); ++ for (i = 0; i < ARRAY_SIZE(appleir_key_table); i++) ++ set_bit(appleir->keymap[i], input_dev->keybit); ++ clear_bit(KEY_RESERVED, input_dev->keybit); ++ ++ input_set_drvdata(input_dev, appleir); ++ input_dev->open = appleir_open; ++ input_dev->close = appleir_close; ++ ++ endpoint = &intf->cur_altsetting->endpoint[0].desc; ++ ++ usb_fill_int_urb(appleir->urb, dev, ++ usb_rcvintpipe(dev, endpoint->bEndpointAddress), ++ appleir->data, 8, ++ appleir_urb, appleir, endpoint->bInterval); ++ ++ appleir->urb->transfer_dma = appleir->dma_buf; ++ appleir->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; ++ ++ setup_timer(&appleir->key_up_timer, ++ key_up_tick, (unsigned long) appleir); ++ ++ retval = input_register_device(appleir->input_dev); ++ if (retval) ++ goto inputfail; ++ ++ usb_set_intfdata(intf, appleir); ++ ++ return 0; ++ ++inputfail: ++ input_free_device(appleir->input_dev); ++ ++urbfail: ++ usb_free_urb(appleir->urb); ++ ++usbfail: ++ usb_buffer_free(dev, URB_SIZE, appleir->data, ++ appleir->dma_buf); ++ ++allocfail: ++ kfree(appleir); ++ ++ return retval; ++} ++ ++static void appleir_disconnect(struct usb_interface *intf) ++{ ++ struct appleir *appleir = usb_get_intfdata(intf); ++ ++ usb_set_intfdata(intf, NULL); ++ input_unregister_device(appleir->input_dev); ++ usb_free_urb(appleir->urb); ++ usb_buffer_free(interface_to_usbdev(intf), URB_SIZE, ++ appleir->data, appleir->dma_buf); ++ kfree(appleir); ++} ++ ++static int appleir_suspend(struct usb_interface *interface, ++ pm_message_t message) ++{ ++ struct appleir *appleir = usb_get_intfdata(interface); ++ ++ mutex_lock(&appleir_mutex); ++ if (appleir->flags & APPLEIR_OPENED) ++ usb_kill_urb(appleir->urb); ++ ++ appleir->flags |= APPLEIR_SUSPENDED; ++ ++ mutex_unlock(&appleir_mutex); ++ ++ return 0; ++} ++ ++static int appleir_resume(struct usb_interface *interface) ++{ ++ struct appleir *appleir; ++ int r = 0; ++ ++ appleir = usb_get_intfdata(interface); ++ ++ mutex_lock(&appleir_mutex); ++ if (appleir->flags & APPLEIR_OPENED) { ++ struct usb_endpoint_descriptor *endpoint; ++ ++ endpoint = &interface->cur_altsetting->endpoint[0].desc; ++ usb_fill_int_urb(appleir->urb, appleir->usbdev, ++ usb_rcvintpipe(appleir->usbdev, endpoint->bEndpointAddress), ++ appleir->data, 8, ++ appleir_urb, appleir, endpoint->bInterval); ++ appleir->urb->transfer_dma = appleir->dma_buf; ++ appleir->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; ++ ++ /* And reset the USB device */ ++ if (usb_submit_urb(appleir->urb, GFP_ATOMIC)) ++ r = -EIO; ++ } ++ ++ appleir->flags &= ~APPLEIR_SUSPENDED; ++ ++ mutex_unlock(&appleir_mutex); ++ ++ return r; ++} ++ ++static struct usb_driver appleir_driver = { ++ .name = "appleir", ++ .probe = appleir_probe, ++ .disconnect = appleir_disconnect, ++ .suspend = appleir_suspend, ++ .resume = appleir_resume, ++ .reset_resume = appleir_resume, ++ .id_table = appleir_ids, ++}; ++ ++static int __init appleir_init(void) ++{ ++ return usb_register(&appleir_driver); ++} ++ ++static void __exit appleir_exit(void) ++{ ++ usb_deregister(&appleir_driver); ++} ++ ++module_init(appleir_init); ++module_exit(appleir_exit); +diff --git a/include/linux/hid.h b/include/linux/hid.h +index 8709365..662596b 100644 +--- a/include/linux/hid.h ++++ b/include/linux/hid.h +@@ -308,11 +308,13 @@ struct hid_item { + #define HID_QUIRK_NOTOUCH 0x00000002 + #define HID_QUIRK_IGNORE 0x00000004 + #define HID_QUIRK_NOGET 0x00000008 ++#define HID_QUIRK_HIDDEV_FORCE 0x00000010 + #define HID_QUIRK_BADPAD 0x00000020 + #define HID_QUIRK_MULTI_INPUT 0x00000040 + #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 + #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 + #define HID_QUIRK_NO_INIT_REPORTS 0x20000000 ++#define HID_QUIRK_NO_IGNORE 0x40000000 + + /* + * This is the global environment of the parser. This information is diff --git a/add-appleir-usb-driver.patch b/add-appleir-usb-driver.patch deleted file mode 100644 index e8073de..0000000 --- a/add-appleir-usb-driver.patch +++ /dev/null @@ -1,666 +0,0 @@ -From e2e8fc4ed31157e9e9e9cbc70febf08c77233aea Mon Sep 17 00:00:00 2001 -From: Kyle McMartin -Date: Thu, 20 May 2010 10:17:58 -0400 -Subject: add-appleir-driver - ---- - Documentation/input/appleir.txt | 45 ++++ - drivers/hid/hid-apple.c | 4 - - drivers/hid/hid-core.c | 9 +- - drivers/hid/hid-ids.h | 1 + - drivers/hid/usbhid/hid-core.c | 1 + - drivers/input/misc/Kconfig | 13 ++ - drivers/input/misc/Makefile | 1 + - drivers/input/misc/appleir.c | 453 +++++++++++++++++++++++++++++++++++++++ - include/linux/hid.h | 2 + - 9 files changed, 522 insertions(+), 7 deletions(-) - create mode 100644 Documentation/input/appleir.txt - create mode 100644 drivers/input/misc/appleir.c - -diff --git a/Documentation/input/appleir.txt b/Documentation/input/appleir.txt -new file mode 100644 -index 0000000..0267a4b ---- /dev/null -+++ b/Documentation/input/appleir.txt -@@ -0,0 +1,45 @@ -+Apple IR receiver Driver (appleir) -+---------------------------------- -+ Copyright (C) 2009 Bastien Nocera -+ -+The appleir driver is a kernel input driver to handle Apple's IR -+receivers (and associated remotes) in the kernel. -+ -+The driver is an input driver which only handles "official" remotes -+as built and sold by Apple. -+ -+Authors -+------- -+ -+James McKenzie (original driver) -+Alex Karpenko (05ac:8242 support) -+Greg Kroah-Hartman (cleanups and original submission) -+Bastien Nocera (further cleanups and suspend support) -+ -+Supported hardware -+------------------ -+ -+- All Apple laptops and desktops from 2005 onwards, except: -+ - the unibody Macbook (2009) -+ - Mac Pro (all versions) -+- Apple TV (all revisions) -+ -+The remote will only support the 6 buttons of the original remotes -+as sold by Apple. See the next section if you want to use other remotes -+or want to use lirc with the device instead of the kernel driver. -+ -+Using lirc (native) instead of the kernel driver -+------------------------------------------------ -+ -+First, you will need to disable the kernel driver for the receiver. -+ -+This can be achieved by passing quirks to the usbhid driver. -+The quirk line would be: -+usbhid.quirks=0x05ac:0x8242:0x40000010 -+ -+With 0x05ac being the vendor ID (Apple, you shouldn't need to change this) -+With 0x8242 being the product ID (check the output of lsusb for your hardware) -+And 0x10 being "HID_QUIRK_HIDDEV_FORCE" and 0x40000000 being "HID_QUIRK_NO_IGNORE" -+ -+This should force the creation of a hiddev device for the receiver, and -+make it usable under lirc. -diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c -index bba05d0..0059d5a 100644 ---- a/drivers/hid/hid-apple.c -+++ b/drivers/hid/hid-apple.c -@@ -361,10 +361,6 @@ static void apple_remove(struct hid_device *hdev) - } - - static const struct hid_device_id apple_devices[] = { -- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL), -- .driver_data = APPLE_HIDDEV | APPLE_IGNORE_HIDINPUT }, -- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4), -- .driver_data = APPLE_HIDDEV | APPLE_IGNORE_HIDINPUT }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE), - .driver_data = APPLE_MIGHTYMOUSE | APPLE_INVERT_HWHEEL }, - -diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c -index 143e788..387bb59 100644 ---- a/drivers/hid/hid-core.c -+++ b/drivers/hid/hid-core.c -@@ -1167,6 +1167,8 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) - unsigned int i; - int len; - -+ if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE) -+ connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV); - if (hdev->bus != BUS_USB) - connect_mask &= ~HID_CONNECT_HIDDEV; - if (hid_hiddev(hdev)) -@@ -1248,8 +1250,6 @@ static const struct hid_device_id hid_blacklist[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) }, - { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, - { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, -- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, -- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, -@@ -1553,6 +1553,9 @@ static const struct hid_device_id hid_ignore_list[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)}, - { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, - { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, -+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, -+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, - { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, - { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, - { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, -@@ -1757,7 +1760,7 @@ int hid_add_device(struct hid_device *hdev) - - /* we need to kill them here, otherwise they will stay allocated to - * wait for coming driver */ -- if (hid_ignore(hdev)) -+ if (!(hdev->quirks & HID_QUIRK_NO_IGNORE) && hid_ignore(hdev)) - return -ENODEV; - - /* XXX hack, any other cleaner solution after the driver core -diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h -index 09d2764..7275a9d 100644 ---- a/drivers/hid/hid-ids.h -+++ b/drivers/hid/hid-ids.h -@@ -97,6 +97,7 @@ - #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b - #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a - #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b -+#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 - #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 - #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 - -diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c -index 7b85b69..66b512c 100644 ---- a/drivers/hid/usbhid/hid-core.c -+++ b/drivers/hid/usbhid/hid-core.c -@@ -1133,6 +1133,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id * - hid->vendor = le16_to_cpu(dev->descriptor.idVendor); - hid->product = le16_to_cpu(dev->descriptor.idProduct); - hid->name[0] = 0; -+ hid->quirks = usbhid_lookup_quirk(hid->vendor, hid->product); - if (intf->cur_altsetting->desc.bInterfaceProtocol == - USB_INTERFACE_PROTOCOL_MOUSE) - hid->type = HID_TYPE_USBMOUSE; -diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig -index 23140a3..46614b2 100644 ---- a/drivers/input/misc/Kconfig -+++ b/drivers/input/misc/Kconfig -@@ -159,6 +159,19 @@ config INPUT_KEYSPAN_REMOTE - To compile this driver as a module, choose M here: the module will - be called keyspan_remote. - -+config INPUT_APPLEIR -+ tristate "Apple infrared receiver (built in)" -+ depends on USB_ARCH_HAS_HCD -+ select USB -+ help -+ Say Y here if you want to use a Apple infrared remote control. All -+ the Apple computers from 2005 onwards include such a port, except -+ the unibody Macbook (2009), and Mac Pros. This receiver is also -+ used in the Apple TV set-top box. -+ -+ To compile this driver as a module, choose M here: the module will -+ be called appleir. -+ - config INPUT_POWERMATE - tristate "Griffin PowerMate and Contour Jog support" - depends on USB_ARCH_HAS_HCD -diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile -index 7e95a5d..3fa4404 100644 ---- a/drivers/input/misc/Makefile -+++ b/drivers/input/misc/Makefile -@@ -6,6 +6,7 @@ - - obj-$(CONFIG_INPUT_88PM860X_ONKEY) += 88pm860x_onkey.o - obj-$(CONFIG_INPUT_APANEL) += apanel.o -+obj-$(CONFIG_INPUT_APPLEIR) += appleir.o - obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o - obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o - obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o -diff --git a/drivers/input/misc/appleir.c b/drivers/input/misc/appleir.c -new file mode 100644 -index 0000000..cff4df6 ---- /dev/null -+++ b/drivers/input/misc/appleir.c -@@ -0,0 +1,453 @@ -+/* -+ * appleir: USB driver for the apple ir device -+ * -+ * Original driver written by James McKenzie -+ * Ported to recent 2.6 kernel versions by Greg Kroah-Hartman -+ * -+ * Copyright (C) 2006 James McKenzie -+ * Copyright (C) 2008 Greg Kroah-Hartman -+ * Copyright (C) 2008 Novell Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the Free -+ * Software Foundation, version 2. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define DRIVER_VERSION "v1.2" -+#define DRIVER_AUTHOR "James McKenzie" -+#define DRIVER_DESC "Apple infrared receiver driver" -+#define DRIVER_LICENSE "GPL" -+ -+MODULE_AUTHOR(DRIVER_AUTHOR); -+MODULE_DESCRIPTION(DRIVER_DESC); -+MODULE_LICENSE(DRIVER_LICENSE); -+ -+#define USB_VENDOR_ID_APPLE 0x05ac -+#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 -+#define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 -+#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 -+ -+#define URB_SIZE 32 -+ -+#define MAX_KEYS 8 -+#define MAX_KEYS_MASK (MAX_KEYS - 1) -+ -+#define dbginfo(dev, format, arg...) do { if (debug) dev_info(dev , format , ## arg); } while (0) -+ -+static int debug; -+module_param(debug, int, 0644); -+MODULE_PARM_DESC(debug, "Enable extra debug messages and information"); -+ -+/* I have two devices both of which report the following */ -+/* 25 87 ee 83 0a + */ -+/* 25 87 ee 83 0c - */ -+/* 25 87 ee 83 09 << */ -+/* 25 87 ee 83 06 >> */ -+/* 25 87 ee 83 05 >" */ -+/* 25 87 ee 83 03 menu */ -+/* 26 00 00 00 00 for key repeat*/ -+ -+/* Thomas Glanzmann reports the following responses */ -+/* 25 87 ee ca 0b + */ -+/* 25 87 ee ca 0d - */ -+/* 25 87 ee ca 08 << */ -+/* 25 87 ee ca 07 >> */ -+/* 25 87 ee ca 04 >" */ -+/* 25 87 ee ca 02 menu */ -+/* 26 00 00 00 00 for key repeat*/ -+/* He also observes the following event sometimes */ -+/* sent after a key is release, which I interpret */ -+/* as a flat battery message */ -+/* 25 87 e0 ca 06 flat battery */ -+ -+/* Alexandre Karpenko reports the following responses for Device ID 0x8242 */ -+/* 25 87 ee 47 0b + */ -+/* 25 87 ee 47 0d - */ -+/* 25 87 ee 47 08 << */ -+/* 25 87 ee 47 07 >> */ -+/* 25 87 ee 47 04 >" */ -+/* 25 87 ee 47 02 menu */ -+/* 26 87 ee 47 ** for key repeat (** is the code of the key being held) */ -+ -+static const unsigned short appleir_key_table[] = { -+ KEY_RESERVED, -+ KEY_MENU, -+ KEY_PLAYPAUSE, -+ KEY_FORWARD, -+ KEY_BACK, -+ KEY_VOLUMEUP, -+ KEY_VOLUMEDOWN, -+ KEY_RESERVED, -+}; -+ -+struct appleir { -+ struct input_dev *input_dev; -+ unsigned short keymap[ARRAY_SIZE(appleir_key_table)]; -+ u8 *data; -+ dma_addr_t dma_buf; -+ struct usb_device *usbdev; -+ unsigned int flags; -+ struct urb *urb; -+ struct timer_list key_up_timer; -+ int current_key; -+ char phys[32]; -+}; -+ -+static DEFINE_MUTEX(appleir_mutex); -+ -+enum { -+ APPLEIR_OPENED = 0x1, -+ APPLEIR_SUSPENDED = 0x2, -+}; -+ -+static struct usb_device_id appleir_ids[] = { -+ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, -+ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, -+ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, -+ {} -+}; -+MODULE_DEVICE_TABLE(usb, appleir_ids); -+ -+static void dump_packet(struct appleir *appleir, char *msg, u8 *data, int len) -+{ -+ int i; -+ -+ printk(KERN_ERR "appleir: %s (%d bytes)", msg, len); -+ -+ for (i = 0; i < len; ++i) -+ printk(" %02x", data[i]); -+ printk("\n"); -+} -+ -+static void key_up(struct appleir *appleir, int key) -+{ -+ dbginfo(&appleir->input_dev->dev, "key %d up\n", key); -+ input_report_key(appleir->input_dev, key, 0); -+ input_sync(appleir->input_dev); -+} -+ -+static void key_down(struct appleir *appleir, int key) -+{ -+ dbginfo(&appleir->input_dev->dev, "key %d down\n", key); -+ input_report_key(appleir->input_dev, key, 1); -+ input_sync(appleir->input_dev); -+} -+ -+static void battery_flat(struct appleir *appleir) -+{ -+ dev_err(&appleir->input_dev->dev, "possible flat battery?\n"); -+} -+ -+static void key_up_tick(unsigned long data) -+{ -+ struct appleir *appleir = (struct appleir *)data; -+ -+ if (appleir->current_key) { -+ key_up(appleir, appleir->current_key); -+ appleir->current_key = 0; -+ } -+} -+ -+static void new_data(struct appleir *appleir, u8 *data, int len) -+{ -+ static const u8 keydown[] = { 0x25, 0x87, 0xee }; -+ static const u8 keyrepeat[] = { 0x26, }; -+ static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 }; -+ -+ if (debug) -+ dump_packet(appleir, "received", data, len); -+ -+ if (len != 5) -+ return; -+ -+ if (!memcmp(data, keydown, sizeof(keydown))) { -+ /* If we already have a key down, take it up before marking -+ this one down */ -+ if (appleir->current_key) -+ key_up(appleir, appleir->current_key); -+ appleir->current_key = appleir->keymap[(data[4] >> 1) & MAX_KEYS_MASK]; -+ -+ key_down(appleir, appleir->current_key); -+ /* Remote doesn't do key up, either pull them up, in the test -+ above, or here set a timer which pulls them up after 1/8 s */ -+ mod_timer(&appleir->key_up_timer, jiffies + HZ / 8); -+ -+ return; -+ } -+ -+ if (!memcmp(data, keyrepeat, sizeof(keyrepeat))) { -+ key_down(appleir, appleir->current_key); -+ /* Remote doesn't do key up, either pull them up, in the test -+ above, or here set a timer which pulls them up after 1/8 s */ -+ mod_timer(&appleir->key_up_timer, jiffies + HZ / 8); -+ return; -+ } -+ -+ if (!memcmp(data, flatbattery, sizeof(flatbattery))) { -+ battery_flat(appleir); -+ /* Fall through */ -+ } -+ -+ dump_packet(appleir, "unknown packet", data, len); -+} -+ -+static void appleir_urb(struct urb *urb) -+{ -+ struct appleir *appleir = urb->context; -+ int status = urb->status; -+ int retval; -+ -+ switch (status) { -+ case 0: -+ new_data(appleir, urb->transfer_buffer, urb->actual_length); -+ break; -+ case -ECONNRESET: -+ case -ENOENT: -+ case -ESHUTDOWN: -+ /* This urb is terminated, clean up */ -+ dbginfo(&appleir->input_dev->dev, "%s - urb shutting down with status: %d", __func__, -+ urb->status); -+ return; -+ default: -+ dbginfo(&appleir->input_dev->dev, "%s - nonzero urb status received: %d", __func__, -+ urb->status); -+ } -+ -+ retval = usb_submit_urb(urb, GFP_ATOMIC); -+ if (retval) -+ err("%s - usb_submit_urb failed with result %d", __func__, -+ retval); -+} -+ -+static int appleir_open(struct input_dev *dev) -+{ -+ struct appleir *appleir = input_get_drvdata(dev); -+ struct usb_interface *intf = usb_ifnum_to_if(appleir->usbdev, 0); -+ int r; -+ -+ r = usb_autopm_get_interface(intf); -+ if (r) { -+ dev_err(&intf->dev, -+ "%s(): usb_autopm_get_interface() = %d\n", __func__, r); -+ return r; -+ } -+ -+ mutex_lock(&appleir_mutex); -+ -+ if (usb_submit_urb(appleir->urb, GFP_ATOMIC)) { -+ r = -EIO; -+ goto fail; -+ } -+ -+ appleir->flags |= APPLEIR_OPENED; -+ -+ mutex_unlock(&appleir_mutex); -+ -+ usb_autopm_put_interface(intf); -+ -+ return 0; -+fail: -+ mutex_unlock(&appleir_mutex); -+ usb_autopm_put_interface(intf); -+ return r; -+} -+ -+static void appleir_close(struct input_dev *dev) -+{ -+ struct appleir *appleir = input_get_drvdata(dev); -+ -+ mutex_lock(&appleir_mutex); -+ -+ if (!(appleir->flags & APPLEIR_SUSPENDED)) { -+ usb_kill_urb(appleir->urb); -+ del_timer_sync(&appleir->key_up_timer); -+ } -+ -+ appleir->flags &= ~APPLEIR_OPENED; -+ -+ mutex_unlock(&appleir_mutex); -+} -+ -+static int appleir_probe(struct usb_interface *intf, -+ const struct usb_device_id *id) -+{ -+ struct usb_device *dev = interface_to_usbdev(intf); -+ struct usb_endpoint_descriptor *endpoint; -+ struct appleir *appleir = NULL; -+ struct input_dev *input_dev; -+ int retval = -ENOMEM; -+ int i; -+ -+ appleir = kzalloc(sizeof(struct appleir), GFP_KERNEL); -+ if (!appleir) -+ goto allocfail; -+ -+ appleir->data = usb_buffer_alloc(dev, URB_SIZE, GFP_KERNEL, -+ &appleir->dma_buf); -+ if (!appleir->data) -+ goto usbfail; -+ -+ appleir->urb = usb_alloc_urb(0, GFP_KERNEL); -+ if (!appleir->urb) -+ goto urbfail; -+ -+ appleir->usbdev = dev; -+ -+ input_dev = input_allocate_device(); -+ if (!input_dev) -+ goto inputfail; -+ -+ appleir->input_dev = input_dev; -+ -+ usb_make_path(dev, appleir->phys, sizeof(appleir->phys)); -+ strlcpy(appleir->phys, "/input0", sizeof(appleir->phys)); -+ -+ input_dev->name = "Apple Infrared Remote Controller"; -+ input_dev->phys = appleir->phys; -+ usb_to_input_id(dev, &input_dev->id); -+ input_dev->dev.parent = &intf->dev; -+ input_dev->keycode = appleir->keymap; -+ input_dev->keycodesize = sizeof(unsigned short); -+ input_dev->keycodemax = ARRAY_SIZE(appleir->keymap); -+ -+ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP); -+ -+ memcpy(appleir->keymap, appleir_key_table, sizeof(appleir->keymap)); -+ for (i = 0; i < ARRAY_SIZE(appleir_key_table); i++) -+ set_bit(appleir->keymap[i], input_dev->keybit); -+ clear_bit(KEY_RESERVED, input_dev->keybit); -+ -+ input_set_drvdata(input_dev, appleir); -+ input_dev->open = appleir_open; -+ input_dev->close = appleir_close; -+ -+ endpoint = &intf->cur_altsetting->endpoint[0].desc; -+ -+ usb_fill_int_urb(appleir->urb, dev, -+ usb_rcvintpipe(dev, endpoint->bEndpointAddress), -+ appleir->data, 8, -+ appleir_urb, appleir, endpoint->bInterval); -+ -+ appleir->urb->transfer_dma = appleir->dma_buf; -+ appleir->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; -+ -+ setup_timer(&appleir->key_up_timer, -+ key_up_tick, (unsigned long) appleir); -+ -+ retval = input_register_device(appleir->input_dev); -+ if (retval) -+ goto inputfail; -+ -+ usb_set_intfdata(intf, appleir); -+ -+ return 0; -+ -+inputfail: -+ input_free_device(appleir->input_dev); -+ -+urbfail: -+ usb_free_urb(appleir->urb); -+ -+usbfail: -+ usb_buffer_free(dev, URB_SIZE, appleir->data, -+ appleir->dma_buf); -+ -+allocfail: -+ kfree(appleir); -+ -+ return retval; -+} -+ -+static void appleir_disconnect(struct usb_interface *intf) -+{ -+ struct appleir *appleir = usb_get_intfdata(intf); -+ -+ usb_set_intfdata(intf, NULL); -+ input_unregister_device(appleir->input_dev); -+ usb_free_urb(appleir->urb); -+ usb_buffer_free(interface_to_usbdev(intf), URB_SIZE, -+ appleir->data, appleir->dma_buf); -+ kfree(appleir); -+} -+ -+static int appleir_suspend(struct usb_interface *interface, -+ pm_message_t message) -+{ -+ struct appleir *appleir = usb_get_intfdata(interface); -+ -+ mutex_lock(&appleir_mutex); -+ if (appleir->flags & APPLEIR_OPENED) -+ usb_kill_urb(appleir->urb); -+ -+ appleir->flags |= APPLEIR_SUSPENDED; -+ -+ mutex_unlock(&appleir_mutex); -+ -+ return 0; -+} -+ -+static int appleir_resume(struct usb_interface *interface) -+{ -+ struct appleir *appleir; -+ int r = 0; -+ -+ appleir = usb_get_intfdata(interface); -+ -+ mutex_lock(&appleir_mutex); -+ if (appleir->flags & APPLEIR_OPENED) { -+ struct usb_endpoint_descriptor *endpoint; -+ -+ endpoint = &interface->cur_altsetting->endpoint[0].desc; -+ usb_fill_int_urb(appleir->urb, appleir->usbdev, -+ usb_rcvintpipe(appleir->usbdev, endpoint->bEndpointAddress), -+ appleir->data, 8, -+ appleir_urb, appleir, endpoint->bInterval); -+ appleir->urb->transfer_dma = appleir->dma_buf; -+ appleir->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; -+ -+ /* And reset the USB device */ -+ if (usb_submit_urb(appleir->urb, GFP_ATOMIC)) -+ r = -EIO; -+ } -+ -+ appleir->flags &= ~APPLEIR_SUSPENDED; -+ -+ mutex_unlock(&appleir_mutex); -+ -+ return r; -+} -+ -+static struct usb_driver appleir_driver = { -+ .name = "appleir", -+ .probe = appleir_probe, -+ .disconnect = appleir_disconnect, -+ .suspend = appleir_suspend, -+ .resume = appleir_resume, -+ .reset_resume = appleir_resume, -+ .id_table = appleir_ids, -+}; -+ -+static int __init appleir_init(void) -+{ -+ return usb_register(&appleir_driver); -+} -+ -+static void __exit appleir_exit(void) -+{ -+ usb_deregister(&appleir_driver); -+} -+ -+module_init(appleir_init); -+module_exit(appleir_exit); -diff --git a/include/linux/hid.h b/include/linux/hid.h -index b1344ec..f1f2b6f 100644 ---- a/include/linux/hid.h -+++ b/include/linux/hid.h -@@ -308,11 +308,13 @@ struct hid_item { - #define HID_QUIRK_NOTOUCH 0x00000002 - #define HID_QUIRK_IGNORE 0x00000004 - #define HID_QUIRK_NOGET 0x00000008 -+#define HID_QUIRK_HIDDEV_FORCE 0x00000010 - #define HID_QUIRK_BADPAD 0x00000020 - #define HID_QUIRK_MULTI_INPUT 0x00000040 - #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 - #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 - #define HID_QUIRK_NO_INIT_REPORTS 0x20000000 -+#define HID_QUIRK_NO_IGNORE 0x40000000 - - /* - * This is the global environment of the parser. This information is --- -1.7.0.1 - diff --git a/alsa-usbmixer-add-possibility-to-remap-dB-values.patch b/alsa-usbmixer-add-possibility-to-remap-dB-values.patch new file mode 100644 index 0000000..b2729bb --- /dev/null +++ b/alsa-usbmixer-add-possibility-to-remap-dB-values.patch @@ -0,0 +1,348 @@ +From 82ff9c3b767ec5bfaed3d99c9ed1160e44cbfd53 Mon Sep 17 00:00:00 2001 +From: Jaroslav Kysela +Date: Thu, 11 Feb 2010 17:50:44 +0100 +Subject: ALSA: usbmixer - add possibility to remap dB values + +USB devices tends to represent dB ranges in different way than ALSA expects. +Add possibility to override these values and add guessed values for +SoundBlaster MP3+. + +Also rename 'Capture Input Source' control to 'Capture Source' for +SoundBlaster MP3+ and Extigy. + +Signed-off-by: Jaroslav Kysela +--- + sound/usb/usbmixer.c | 125 +++++++++++++++++++++++++++------------------ + sound/usb/usbmixer_maps.c | 23 ++++++-- + 2 files changed, 93 insertions(+), 55 deletions(-) + +diff --git a/sound/usb/usbmixer.c b/sound/usb/usbmixer.c +index c998220..c72ad0c 100644 +--- a/sound/usb/usbmixer.c ++++ b/sound/usb/usbmixer.c +@@ -123,6 +123,7 @@ struct usb_mixer_elem_info { + int channels; + int val_type; + int min, max, res; ++ int dBmin, dBmax; + int cached; + int cache_val[MAX_CHANNELS]; + u8 initialized; +@@ -194,42 +195,50 @@ enum { + */ + #include "usbmixer_maps.c" + +-/* get the mapped name if the unit matches */ +-static int check_mapped_name(struct mixer_build *state, int unitid, int control, char *buf, int buflen) ++static const struct usbmix_name_map * ++find_map(struct mixer_build *state, int unitid, int control) + { +- const struct usbmix_name_map *p; ++ const struct usbmix_name_map *p = state->map; + +- if (! state->map) +- return 0; ++ if (!p) ++ return NULL; + + for (p = state->map; p->id; p++) { +- if (p->id == unitid && p->name && +- (! control || ! p->control || control == p->control)) { +- buflen--; +- return strlcpy(buf, p->name, buflen); +- } ++ if (p->id == unitid && ++ (!control || !p->control || control == p->control)) ++ return p; + } +- return 0; ++ return NULL; + } + +-/* check whether the control should be ignored */ +-static int check_ignored_ctl(struct mixer_build *state, int unitid, int control) ++/* get the mapped name if the unit matches */ ++static int ++check_mapped_name(const struct usbmix_name_map *p, char *buf, int buflen) + { +- const struct usbmix_name_map *p; ++ if (!p || !p->name) ++ return 0; + +- if (! state->map) ++ buflen--; ++ return strlcpy(buf, p->name, buflen); ++} ++ ++/* check whether the control should be ignored */ ++static inline int ++check_ignored_ctl(const struct usbmix_name_map *p) ++{ ++ if (!p || p->name || p->dB) + return 0; +- for (p = state->map; p->id; p++) { +- if (p->id == unitid && ! p->name && +- (! control || ! p->control || control == p->control)) { +- /* +- printk(KERN_DEBUG "ignored control %d:%d\n", +- unitid, control); +- */ +- return 1; +- } ++ return 1; ++} ++ ++/* dB mapping */ ++static inline void check_mapped_dB(const struct usbmix_name_map *p, ++ struct usb_mixer_elem_info *cval) ++{ ++ if (p && p->dB) { ++ cval->dBmin = p->dB->min; ++ cval->dBmax = p->dB->max; + } +- return 0; + } + + /* get the mapped selector source name */ +@@ -466,20 +475,8 @@ static int mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag, + + if (size < sizeof(scale)) + return -ENOMEM; +- /* USB descriptions contain the dB scale in 1/256 dB unit +- * while ALSA TLV contains in 1/100 dB unit +- */ +- scale[2] = (convert_signed_value(cval, cval->min) * 100) / 256; +- scale[3] = (convert_signed_value(cval, cval->max) * 100) / 256; +- if (scale[3] <= scale[2]) { +- /* something is wrong; assume it's either from/to 0dB */ +- if (scale[2] < 0) +- scale[3] = 0; +- else if (scale[2] > 0) +- scale[2] = 0; +- else /* totally crap, return an error */ +- return -EINVAL; +- } ++ scale[2] = cval->dBmin; ++ scale[3] = cval->dBmax; + if (copy_to_user(_tlv, scale, sizeof(scale))) + return -EFAULT; + return 0; +@@ -720,6 +717,7 @@ static int get_min_max(struct usb_mixer_elem_info *cval, int default_min) + cval->min = default_min; + cval->max = cval->min + 1; + cval->res = 1; ++ cval->dBmin = cval->dBmax = 0; + + if (cval->val_type == USB_MIXER_BOOLEAN || + cval->val_type == USB_MIXER_INV_BOOLEAN) { +@@ -787,6 +785,24 @@ static int get_min_max(struct usb_mixer_elem_info *cval, int default_min) + + cval->initialized = 1; + } ++ ++ /* USB descriptions contain the dB scale in 1/256 dB unit ++ * while ALSA TLV contains in 1/100 dB unit ++ */ ++ cval->dBmin = (convert_signed_value(cval, cval->min) * 100) / 256; ++ cval->dBmax = (convert_signed_value(cval, cval->max) * 100) / 256; ++ if (cval->dBmin > cval->dBmax) { ++ /* something is wrong; assume it's either from/to 0dB */ ++ if (cval->dBmin < 0) ++ cval->dBmax = 0; ++ else if (cval->dBmin > 0) ++ cval->dBmin = 0; ++ if (cval->dBmin > cval->dBmax) { ++ /* totally crap, return an error */ ++ return -EINVAL; ++ } ++ } ++ + return 0; + } + +@@ -912,6 +928,7 @@ static void build_feature_ctl(struct mixer_build *state, unsigned char *desc, + int nameid = desc[desc[0] - 1]; + struct snd_kcontrol *kctl; + struct usb_mixer_elem_info *cval; ++ const struct usbmix_name_map *map; + + control++; /* change from zero-based to 1-based value */ + +@@ -920,7 +937,8 @@ static void build_feature_ctl(struct mixer_build *state, unsigned char *desc, + return; + } + +- if (check_ignored_ctl(state, unitid, control)) ++ map = find_map(state, unitid, control); ++ if (check_ignored_ctl(map)) + return; + + cval = kzalloc(sizeof(*cval), GFP_KERNEL); +@@ -954,10 +972,11 @@ static void build_feature_ctl(struct mixer_build *state, unsigned char *desc, + } + kctl->private_free = usb_mixer_elem_free; + +- len = check_mapped_name(state, unitid, control, kctl->id.name, sizeof(kctl->id.name)); ++ len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); + mapped_name = len != 0; + if (! len && nameid) +- len = snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name)); ++ len = snd_usb_copy_string_desc(state, nameid, ++ kctl->id.name, sizeof(kctl->id.name)); + + switch (control) { + case USB_FEATURE_MUTE: +@@ -995,6 +1014,7 @@ static void build_feature_ctl(struct mixer_build *state, unsigned char *desc, + kctl->vd[0].access |= + SNDRV_CTL_ELEM_ACCESS_TLV_READ | + SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; ++ check_mapped_dB(map, cval); + } + break; + +@@ -1122,8 +1142,10 @@ static void build_mixer_unit_ctl(struct mixer_build *state, unsigned char *desc, + unsigned int num_outs = desc[5 + input_pins]; + unsigned int i, len; + struct snd_kcontrol *kctl; ++ const struct usbmix_name_map *map; + +- if (check_ignored_ctl(state, unitid, 0)) ++ map = find_map(state, unitid, 0); ++ if (check_ignored_ctl(map)) + return; + + cval = kzalloc(sizeof(*cval), GFP_KERNEL); +@@ -1152,7 +1174,7 @@ static void build_mixer_unit_ctl(struct mixer_build *state, unsigned char *desc, + } + kctl->private_free = usb_mixer_elem_free; + +- len = check_mapped_name(state, unitid, 0, kctl->id.name, sizeof(kctl->id.name)); ++ len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); + if (! len) + len = get_term_name(state, iterm, kctl->id.name, sizeof(kctl->id.name), 0); + if (! len) +@@ -1342,6 +1364,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, unsigned + int i, err, nameid, type, len; + struct procunit_info *info; + struct procunit_value_info *valinfo; ++ const struct usbmix_name_map *map; + static struct procunit_value_info default_value_info[] = { + { 0x01, "Switch", USB_MIXER_BOOLEAN }, + { 0 } +@@ -1371,7 +1394,8 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, unsigned + /* FIXME: bitmap might be longer than 8bit */ + if (! (dsc[12 + num_ins] & (1 << (valinfo->control - 1)))) + continue; +- if (check_ignored_ctl(state, unitid, valinfo->control)) ++ map = find_map(state, unitid, valinfo->control); ++ if (check_ignored_ctl(map)) + continue; + cval = kzalloc(sizeof(*cval), GFP_KERNEL); + if (! cval) { +@@ -1402,8 +1426,9 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, unsigned + } + kctl->private_free = usb_mixer_elem_free; + +- if (check_mapped_name(state, unitid, cval->control, kctl->id.name, sizeof(kctl->id.name))) +- ; ++ if (check_mapped_name(map, kctl->id.name, ++ sizeof(kctl->id.name))) ++ /* nothing */ ; + else if (info->name) + strlcpy(kctl->id.name, info->name, sizeof(kctl->id.name)); + else { +@@ -1542,6 +1567,7 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, unsi + int err; + struct usb_mixer_elem_info *cval; + struct snd_kcontrol *kctl; ++ const struct usbmix_name_map *map; + char **namelist; + + if (! num_ins || desc[0] < 5 + num_ins) { +@@ -1557,7 +1583,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, unsi + if (num_ins == 1) /* only one ? nonsense! */ + return 0; + +- if (check_ignored_ctl(state, unitid, 0)) ++ map = find_map(state, unitid, 0); ++ if (check_ignored_ctl(map)) + return 0; + + cval = kzalloc(sizeof(*cval), GFP_KERNEL); +@@ -1612,7 +1639,7 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, unsi + kctl->private_free = usb_mixer_selector_elem_free; + + nameid = desc[desc[0] - 1]; +- len = check_mapped_name(state, unitid, 0, kctl->id.name, sizeof(kctl->id.name)); ++ len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); + if (len) + ; + else if (nameid) +diff --git a/sound/usb/usbmixer_maps.c b/sound/usb/usbmixer_maps.c +index 77c3588..79e903a 100644 +--- a/sound/usb/usbmixer_maps.c ++++ b/sound/usb/usbmixer_maps.c +@@ -19,11 +19,16 @@ + * + */ + ++struct usbmix_dB_map { ++ u32 min; ++ u32 max; ++}; + + struct usbmix_name_map { + int id; + const char *name; + int control; ++ struct usbmix_dB_map *dB; + }; + + struct usbmix_selector_map { +@@ -72,7 +77,7 @@ static struct usbmix_name_map extigy_map[] = { + { 8, "Line Playback" }, /* FU */ + /* 9: IT mic */ + { 10, "Mic Playback" }, /* FU */ +- { 11, "Capture Input Source" }, /* SU */ ++ { 11, "Capture Source" }, /* SU */ + { 12, "Capture" }, /* FU */ + /* 13: OT pcm capture */ + /* 14: MU (w/o controls) */ +@@ -102,6 +107,9 @@ static struct usbmix_name_map extigy_map[] = { + * e.g. no Master and fake PCM volume + * Pavel Mihaylov + */ ++static struct usbmix_dB_map mp3plus_dB_1 = {-4781, 0}; /* just guess */ ++static struct usbmix_dB_map mp3plus_dB_2 = {-1781, 618}; /* just guess */ ++ + static struct usbmix_name_map mp3plus_map[] = { + /* 1: IT pcm */ + /* 2: IT mic */ +@@ -110,16 +118,19 @@ static struct usbmix_name_map mp3plus_map[] = { + /* 5: OT digital out */ + /* 6: OT speaker */ + /* 7: OT pcm capture */ +- { 8, "Capture Input Source" }, /* FU, default PCM Capture Source */ ++ { 8, "Capture Source" }, /* FU, default PCM Capture Source */ + /* (Mic, Input 1 = Line input, Input 2 = Optical input) */ + { 9, "Master Playback" }, /* FU, default Speaker 1 */ + /* { 10, "Mic Capture", 1 }, */ /* FU, Mic Capture */ +- /* { 10, "Mic Capture", 2 }, */ /* FU, Mic Capture */ ++ { 10, /* "Mic Capture", */ NULL, 2, .dB = &mp3plus_dB_2 }, ++ /* FU, Mic Capture */ + { 10, "Mic Boost", 7 }, /* FU, default Auto Gain Input */ +- { 11, "Line Capture" }, /* FU, default PCM Capture */ ++ { 11, "Line Capture", .dB = &mp3plus_dB_2 }, ++ /* FU, default PCM Capture */ + { 12, "Digital In Playback" }, /* FU, default PCM 1 */ +- /* { 13, "Mic Playback" }, */ /* FU, default Mic Playback */ +- { 14, "Line Playback" }, /* FU, default Speaker */ ++ { 13, /* "Mic Playback", */ .dB = &mp3plus_dB_1 }, ++ /* FU, default Mic Playback */ ++ { 14, "Line Playback", .dB = &mp3plus_dB_1 }, /* FU, default Speaker */ + /* 15: MU */ + { 0 } /* terminator */ + }; +-- +1.7.0.1 + diff --git a/cifs-fix-dns-resolver.patch b/cifs-fix-dns-resolver.patch deleted file mode 100644 index 6a74fba..0000000 --- a/cifs-fix-dns-resolver.patch +++ /dev/null @@ -1,47 +0,0 @@ -From: Chuck Ebbert - -CIFS: Fix DNS resolver build - -In file included from fs/cifs/dns_resolve.c:29: -fs/cifs/dns_resolve.h:27: error: expected '=', ',', ';', 'asm' or '__attribute__' before 'cifs_init_dns_resolver' - -Just remove the __init and __exit attributes from the init and exit -functions. __exit was removed upstream in 51c20fcced5badee0e2021c6c89f44aa3cbd72aa -anyway, and there's no point trying to save every byte by fixing -this properly. - -Signed-Off-By: Chuck Ebbert - ---- a/fs/cifs/dns_resolve.c -+++ b/fs/cifs/dns_resolve.c -@@ -176,7 +176,7 @@ out: - return rc; - } - --int __init cifs_init_dns_resolver(void) -+int cifs_init_dns_resolver(void) - { - struct cred *cred; - struct key *keyring; -@@ -226,7 +226,7 @@ failed_put_cred: - return ret; - } - --void __exit cifs_exit_dns_resolver(void) -+void cifs_exit_dns_resolver(void) - { - key_revoke(dns_resolver_cache->thread_keyring); - unregister_key_type(&key_type_dns_resolver); ---- a/fs/cifs/dns_resolve.h -+++ b/fs/cifs/dns_resolve.h -@@ -24,8 +24,8 @@ - #define _DNS_RESOLVE_H - - #ifdef __KERNEL__ --extern int __init cifs_init_dns_resolver(void); --extern void __exit cifs_exit_dns_resolver(void); -+extern int cifs_init_dns_resolver(void); -+extern void cifs_exit_dns_resolver(void); - extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr); - #endif /* KERNEL */ - diff --git a/cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch b/cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch new file mode 100644 index 0000000..5b2cd7d --- /dev/null +++ b/cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch @@ -0,0 +1,202 @@ +From: David Howells +Date: Thu, 22 Jul 2010 11:53:18 +0000 (+0100) +Subject: CIFS: Fix a malicious redirect problem in the DNS lookup code +X-Git-Tag: v2.6.35-rc6~6 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=4c0c03ca54f72fdd5912516ad0a23ec5cf01bda7 + +CIFS: Fix a malicious redirect problem in the DNS lookup code + +[ trivial backport to 2.6.3[23] : cebbert@redhat.com ] + +Fix the security problem in the CIFS filesystem DNS lookup code in which a +malicious redirect could be installed by a random user by simply adding a +result record into one of their keyrings with add_key() and then invoking a +CIFS CFS lookup [CVE-2010-2524]. + +This is done by creating an internal keyring specifically for the caching of +DNS lookups. To enforce the use of this keyring, the module init routine +creates a set of override credentials with the keyring installed as the thread +keyring and instructs request_key() to only install lookup result keys in that +keyring. + +The override is then applied around the call to request_key(). + +This has some additional benefits when a kernel service uses this module to +request a key: + + (1) The result keys are owned by root, not the user that caused the lookup. + + (2) The result keys don't pop up in the user's keyrings. + + (3) The result keys don't come out of the quota of the user that caused the + lookup. + +The keyring can be viewed as root by doing cat /proc/keys: + +2a0ca6c3 I----- 1 perm 1f030000 0 0 keyring .dns_resolver: 1/4 + +It can then be listed with 'keyctl list' by root. + + # keyctl list 0x2a0ca6c3 + 1 key in keyring: + 726766307: --alswrv 0 0 dns_resolver: foo.bar.com + +Signed-off-by: David Howells +Reviewed-and-Tested-by: Jeff Layton +Acked-by: Steve French +Signed-off-by: Linus Torvalds +--- + +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c +index 484e52b..2cb1a70 100644 +--- a/fs/cifs/cifsfs.c ++++ b/fs/cifs/cifsfs.c +@@ -923,7 +923,7 @@ init_cifs(void) + goto out_unregister_filesystem; + #endif + #ifdef CONFIG_CIFS_DFS_UPCALL +- rc = register_key_type(&key_type_dns_resolver); ++ rc = cifs_init_dns_resolver(); + if (rc) + goto out_unregister_key_type; + #endif +@@ -935,7 +935,7 @@ init_cifs(void) + + out_unregister_resolver_key: + #ifdef CONFIG_CIFS_DFS_UPCALL +- unregister_key_type(&key_type_dns_resolver); ++ cifs_exit_dns_resolver(); + out_unregister_key_type: + #endif + #ifdef CONFIG_CIFS_UPCALL +@@ -961,7 +961,7 @@ exit_cifs(void) + cifs_proc_clean(); + #ifdef CONFIG_CIFS_DFS_UPCALL + cifs_dfs_release_automount_timer(); +- unregister_key_type(&key_type_dns_resolver); ++ cifs_exit_dns_resolver(); + #endif + #ifdef CONFIG_CIFS_UPCALL + unregister_key_type(&cifs_spnego_key_type); +diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c +index 4db2c5e..49315cb 100644 +--- a/fs/cifs/dns_resolve.c ++++ b/fs/cifs/dns_resolve.c +@@ -24,12 +24,16 @@ + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + ++#include ++#include + #include + #include "dns_resolve.h" + #include "cifsglob.h" + #include "cifsproto.h" + #include "cifs_debug.h" + ++static const struct cred *dns_resolver_cache; ++ + /* Checks if supplied name is IP address + * returns: + * 1 - name is IP +@@ -94,6 +98,7 @@ struct key_type key_type_dns_resolver = { + int + dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) + { ++ const struct cred *saved_cred; + int rc = -EAGAIN; + struct key *rkey = ERR_PTR(-EAGAIN); + char *name; +@@ -133,8 +138,15 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) + goto skip_upcall; + } + ++ saved_cred = override_creds(dns_resolver_cache); + rkey = request_key(&key_type_dns_resolver, name, ""); ++ revert_creds(saved_cred); + if (!IS_ERR(rkey)) { ++ if (!(rkey->perm & KEY_USR_VIEW)) { ++ down_read(&rkey->sem); ++ rkey->perm |= KEY_USR_VIEW; ++ up_read(&rkey->sem); ++ } + len = rkey->type_data.x[0]; + data = rkey->payload.data; + } else { +@@ -165,4 +177,61 @@ out: + return rc; + } + ++int __init cifs_init_dns_resolver(void) ++{ ++ struct cred *cred; ++ struct key *keyring; ++ int ret; ++ ++ printk(KERN_NOTICE "Registering the %s key type\n", ++ key_type_dns_resolver.name); ++ ++ /* create an override credential set with a special thread keyring in ++ * which DNS requests are cached ++ * ++ * this is used to prevent malicious redirections from being installed ++ * with add_key(). ++ */ ++ cred = prepare_kernel_cred(NULL); ++ if (!cred) ++ return -ENOMEM; ++ ++ keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred, ++ (KEY_POS_ALL & ~KEY_POS_SETATTR) | ++ KEY_USR_VIEW | KEY_USR_READ, ++ KEY_ALLOC_NOT_IN_QUOTA); ++ if (IS_ERR(keyring)) { ++ ret = PTR_ERR(keyring); ++ goto failed_put_cred; ++ } ++ ++ ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL); ++ if (ret < 0) ++ goto failed_put_key; ++ ++ ret = register_key_type(&key_type_dns_resolver); ++ if (ret < 0) ++ goto failed_put_key; ++ ++ /* instruct request_key() to use this special keyring as a cache for ++ * the results it looks up */ ++ cred->thread_keyring = keyring; ++ cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; ++ dns_resolver_cache = cred; ++ return 0; ++ ++failed_put_key: ++ key_put(keyring); ++failed_put_cred: ++ put_cred(cred); ++ return ret; ++} + ++void __exit cifs_exit_dns_resolver(void) ++{ ++ key_revoke(dns_resolver_cache->thread_keyring); ++ unregister_key_type(&key_type_dns_resolver); ++ put_cred(dns_resolver_cache); ++ printk(KERN_NOTICE "Unregistered %s key type\n", ++ key_type_dns_resolver.name); ++} +diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h +index 966e928..26b9eaa 100644 +--- a/fs/cifs/dns_resolve.h ++++ b/fs/cifs/dns_resolve.h +@@ -24,8 +24,8 @@ + #define _DNS_RESOLVE_H + + #ifdef __KERNEL__ +-#include +-extern struct key_type key_type_dns_resolver; ++extern int __init cifs_init_dns_resolver(void); ++extern void __exit cifs_exit_dns_resolver(void); + extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr); + #endif /* KERNEL */ + diff --git a/config-arm b/config-arm index ebc5d6c..923ebc3 100644 --- a/config-arm +++ b/config-arm @@ -105,6 +105,3 @@ CONFIG_RCU_FANOUT=32 # CONFIG_OC_ETM is not set # CONFIG_MTD_PISMO is not set - -CONFIG_PERF_EVENTS=y -CONFIG_PERF_COUNTERS=y diff --git a/config-debug b/config-debug index d66f251..1e51d46 100644 --- a/config-debug +++ b/config-debug @@ -8,7 +8,6 @@ CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y CONFIG_DEBUG_VM=y CONFIG_DEBUG_SPINLOCK=y -CONFIG_PROVE_RCU=y CONFIG_FAULT_INJECTION=y CONFIG_FAILSLAB=y @@ -64,6 +63,10 @@ CONFIG_DEBUG_PERF_USE_VMALLOC=y CONFIG_JBD2_DEBUG=y +CONFIG_FUNCTION_TRACER=y +CONFIG_STACK_TRACER=y +CONFIG_DYNAMIC_FTRACE=y + CONFIG_DEBUG_CFQ_IOSCHED=y CONFIG_DRBD_FAULT_INJECTION=y @@ -73,11 +76,3 @@ CONFIG_IWLWIFI_DEVICE_TRACING=y CONFIG_DEBUG_OBJECTS_WORK=y # CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set - -CONFIG_DMADEVICES_DEBUG=y -CONFIG_DMADEVICES_VDEBUG=y - -CONFIG_PM_ADVANCED_DEBUG=y - -CONFIG_CEPH_FS_PRETTYDEBUG=y -CONFIG_QUOTA_DEBUG=y diff --git a/config-generic b/config-generic index 3b23aab..2338def 100644 --- a/config-generic +++ b/config-generic @@ -38,6 +38,7 @@ CONFIG_LOG_BUF_SHIFT=17 CONFIG_KALLSYMS=y CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_EXTRA_PASS=y +CONFIG_KALLSYMS_STRIP_GENERATED=y CONFIG_FUTEX=y CONFIG_EPOLL=y CONFIG_IOSCHED_NOOP=y @@ -55,6 +56,7 @@ CONFIG_POSIX_MQUEUE=y # CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set +CONFIG_PREEMPT_BKL=y CONFIG_SLUB=y # CONFIG_SLUB_STATS is not set @@ -74,6 +76,7 @@ CONFIG_MODULE_UNLOAD=y # -- MODULE_FORCE_UNLOAD is controlled by config-debug/nodebug # CONFIG_MODVERSIONS is not set CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_KMOD=y # # Bus options (PCI, PCMCIA, EISA, MCA, ISA) @@ -120,6 +123,7 @@ CONFIG_SDIO_UART=m # CONFIG_MMC_DEBUG is not set # CONFIG_MMC_UNSAFE_RESUME is not set CONFIG_MMC_BLOCK=m +CONFIG_MMC_RICOH_MMC=m CONFIG_MMC_SDHCI=m CONFIG_MMC_SDHCI_PCI=m CONFIG_MMC_SDRICOH_CS=m @@ -128,7 +132,6 @@ CONFIG_MMC_WBSD=m CONFIG_MMC_VIA_SDMMC=m CONFIG_MMC_SDHCI_PLTFM=m CONFIG_MMC_CB710=m -CONFIG_MMC_RICOH_MMC=y CONFIG_CB710_CORE=m # CONFIG_CB710_DEBUG is not set @@ -227,6 +230,7 @@ CONFIG_MTD_ABSENT=m # CONFIG_MTD_COMPLEX_MAPPINGS=y # CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_PNC2000 is not set CONFIG_MTD_SC520CDP=m CONFIG_MTD_NETSC520=m # CONFIG_MTD_SBC_GXX is not set @@ -361,10 +365,12 @@ CONFIG_BLK_DEV_OSD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_BLK_DEV_RAM_BLOCKSIZE=4096 CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_ATIIXP=y CONFIG_LBD=y CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_LSF=y CONFIG_BLK_DEV_DELKIN=m # CONFIG_BLK_DEV_IT8213 is not set @@ -384,9 +390,13 @@ CONFIG_BLK_DEV_IDE=y # # Please see Documentation/ide.txt for help/info on IDE drives # +# CONFIG_BLK_DEV_HD_IDE is not set +CONFIG_BLK_DEV_IDEDISK=y +CONFIG_IDEDISK_MULTI_MODE=y CONFIG_BLK_DEV_IDECS=m CONFIG_BLK_DEV_IDECD=m # CONFIG_BLK_DEV_IDETAPE is not set +CONFIG_BLK_DEV_IDEFLOPPY=y CONFIG_IDE_TASK_IOCTL=y # CONFIG_BLK_DEV_IDE_SATA is not set @@ -397,6 +407,7 @@ CONFIG_BLK_DEV_CMD640=y CONFIG_BLK_DEV_CMD640_ENHANCED=y CONFIG_BLK_DEV_IDEPNP=y CONFIG_BLK_DEV_IDEPCI=y +CONFIG_IDEPCI_SHARE_IRQ=y # CONFIG_BLK_DEV_OFFBOARD is not set CONFIG_BLK_DEV_GENERIC=y # CONFIG_BLK_DEV_OPTI621 is not set @@ -404,6 +415,7 @@ CONFIG_BLK_DEV_RZ1000=y CONFIG_BLK_DEV_IDEDMA_PCI=y CONFIG_BLK_DEV_AEC62XX=y CONFIG_BLK_DEV_ALI15X3=y +# CONFIG_WDC_ALI15X3 is not set # CONFIG_BLK_DEV_AMD74XX is not set CONFIG_BLK_DEV_CMD64X=y CONFIG_BLK_DEV_TRIFLEX=y @@ -411,6 +423,7 @@ CONFIG_BLK_DEV_TRIFLEX=y CONFIG_BLK_DEV_CS5520=y CONFIG_BLK_DEV_CS5530=y CONFIG_BLK_DEV_CS5535=y +CONFIG_BLK_DEV_HPT34X=y CONFIG_BLK_DEV_HPT366=y CONFIG_BLK_DEV_IT821X=y CONFIG_BLK_DEV_JMICRON=y @@ -418,6 +431,7 @@ CONFIG_BLK_DEV_JMICRON=y CONFIG_BLK_DEV_PIIX=y # CONFIG_BLK_DEV_NS87415 is not set CONFIG_BLK_DEV_PDC202XX_OLD=y +# CONFIG_PDC202XX_BURST is not set CONFIG_BLK_DEV_PDC202XX_NEW=y CONFIG_BLK_DEV_SVWKS=y CONFIG_BLK_DEV_SIIMAGE=y @@ -540,7 +554,6 @@ CONFIG_SCSI_HPSA=m CONFIG_SCSI_3W_SAS=m CONFIG_SCSI_PM8001=m CONFIG_VMWARE_PVSCSI=m -CONFIG_VMWARE_BALLOON=m CONFIG_ATA=y CONFIG_ATA_VERBOSE_ERROR=y @@ -638,6 +651,8 @@ CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1 CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 CONFIG_SCSI_SYM53C8XX_MMIO=y +# CONFIG_SCSI_QLOGIC_FC is not set +# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set CONFIG_SCSI_QLOGIC_1280=m CONFIG_SCSI_DC395x=m # CONFIG_SCSI_NSP32 is not set @@ -648,6 +663,7 @@ CONFIG_SCSI_QLA_ISCSI=m # CONFIG_SCSI_IPR is not set # CONFIG_SCSI_DPT_I2O is not set CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_SEAGATE is not set # # PCMCIA SCSI adapter support @@ -671,6 +687,7 @@ CONFIG_MD_LINEAR=m CONFIG_MD_MULTIPATH=m CONFIG_MD_RAID0=m CONFIG_MD_RAID1=m +CONFIG_MD_RAID5_RESHAPE=y CONFIG_MD_RAID10=m CONFIG_MD_RAID456=m # CONFIG_MULTICORE_RAID456 is not set @@ -681,6 +698,9 @@ CONFIG_DM_DEBUG=y # CONFIG_DM_DELAY is not set CONFIG_DM_MIRROR=y CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_EMC=m +CONFIG_DM_MULTIPATH_HP=m +CONFIG_DM_MULTIPATH_RDAC=m CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_ZERO=y @@ -787,7 +807,6 @@ CONFIG_IP_VS_PROTO_TCP=y CONFIG_IP_VS_PROTO_UDP=y CONFIG_IP_VS_PROTO_ESP=y CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y CONFIG_IP_VS_RR=m CONFIG_IP_VS_WRR=m CONFIG_IP_VS_LC=m @@ -823,6 +842,7 @@ CONFIG_RDS_RDMA=m CONFIG_RDS_TCP=m CONFIG_NET_9P=m +CONFIG_NET_9P_FD=m CONFIG_NET_9P_VIRTIO=m # CONFIG_NET_9P_DEBUG is not set CONFIG_NET_9P_RDMA=m @@ -831,7 +851,6 @@ CONFIG_DECNET=m CONFIG_DECNET_ROUTER=y # CONFIG_DECNET_NF_GRABULATOR is not set CONFIG_BRIDGE=m -CONFIG_BRIDGE_IGMP_SNOOPING=y CONFIG_NETFILTER=y CONFIG_NETFILTER_ADVANCED=y CONFIG_NF_CONNTRACK=y @@ -853,7 +872,6 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_LED=m -CONFIG_NETFILTER_XT_TARGET_CT=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m @@ -894,12 +912,12 @@ CONFIG_BRIDGE_NETFILTER=y # # IP: Netfilter Configuration # +CONFIG_NF_CONNTRACK_ENABLED=y CONFIG_NF_CT_ACCT=y CONFIG_NF_CONNTRACK_MARK=y CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_ZONES=y # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m @@ -922,11 +940,17 @@ CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_IPRANGE=m +CONFIG_IP_NF_MATCH_OWNER=m +CONFIG_IP_NF_MATCH_RECENT=m +CONFIG_IP_NF_MATCH_TOS=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_TARGET_REDIRECT=m CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_SAME=m +CONFIG_IP_NF_TARGET_TOS=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m @@ -955,6 +979,7 @@ CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_OWNER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_QUEUE=m CONFIG_IP6_NF_RAW=m @@ -1058,6 +1083,7 @@ CONFIG_NET_SCH_INGRESS=m CONFIG_NET_SCH_NETEM=m CONFIG_NET_SCH_PRIO=m CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_RR=m CONFIG_NET_SCH_SFQ=m CONFIG_NET_SCH_TBF=m CONFIG_NET_SCH_TEQL=m @@ -1068,6 +1094,7 @@ CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_FLOW=m CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_IND=y +CONFIG_NET_CLS_POLICE=y CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_ROUTE=y CONFIG_NET_CLS_RSVP=m @@ -1104,6 +1131,7 @@ CONFIG_NET_DROP_MONITOR=y CONFIG_NETDEVICES=y # disable later --kyle +CONFIG_COMPAT_NET_DEV_OPS=y # # ARCnet devices @@ -1113,7 +1141,6 @@ CONFIG_IFB=m CONFIG_DUMMY=m CONFIG_BONDING=m CONFIG_MACVLAN=m -CONFIG_MACVTAP=m CONFIG_EQUALIZER=m CONFIG_TUN=m CONFIG_VETH=m @@ -1176,6 +1203,10 @@ CONFIG_BROADCOM_PHY=m CONFIG_CICADA_PHY=m CONFIG_DAVICOM_PHY=m CONFIG_FIXED_PHY=y +CONFIG_FIXED_MII_10_FDX=y +CONFIG_FIXED_MII_100_FDX=y +CONFIG_FIXED_MII_1000_FDX=y +CONFIG_FIXED_MII_AMNT=1 CONFIG_MDIO_BITBANG=m CONFIG_NATIONAL_PHY=m CONFIG_ICPLUS_PHY=m @@ -1187,7 +1218,6 @@ CONFIG_REALTEK_PHY=m CONFIG_SMSC_PHY=m CONFIG_STE10XP=m CONFIG_VITESSE_PHY=m -CONFIG_MICREL_PHY=m CONFIG_MII=m CONFIG_HAPPYMEAL=m @@ -1208,6 +1238,7 @@ CONFIG_TULIP=m # CONFIG_TULIP_MWI is not set CONFIG_TULIP_MMIO=y # CONFIG_NI5010 is not set +# CONFIG_PCMCIA_XIRTULIP is not set CONFIG_DE4X5=m CONFIG_WINBOND_840=m CONFIG_DM9102=m @@ -1219,9 +1250,11 @@ CONFIG_NE3210=m CONFIG_ES3210=m CONFIG_NET_PCI=y CONFIG_PCNET32=m +CONFIG_PCNET32_NAPI=y CONFIG_AMD8111_ETH=m +CONFIG_AMD8111E_NAPI=y CONFIG_ADAPTEC_STARFIRE=m -CONFIG_KSZ884X_PCI=m +CONFIG_ADAPTEC_STARFIRE_NAPI=y CONFIG_B44=m CONFIG_B44_PCI=y CONFIG_BNX2=m @@ -1231,6 +1264,7 @@ CONFIG_ATL1=m CONFIG_ATL1C=m CONFIG_ATL2=m CONFIG_ATL1E=m +# CONFIG_EEPRO100 is not set CONFIG_E100=m CONFIG_FEALNX=m CONFIG_FORCEDETH=m @@ -1253,12 +1287,14 @@ CONFIG_SUNDANCE=m CONFIG_TLAN=m CONFIG_VIA_RHINE=m CONFIG_VIA_RHINE_MMIO=y +CONFIG_VIA_RHINE_NAPI=y CONFIG_VIA_VELOCITY=m CONFIG_NET_POCKET=y CONFIG_ATP=m CONFIG_DE600=m CONFIG_DE620=m CONFIG_CASSINI=m +# CONFIG_FEC_8XX is not set CONFIG_ETHOC=m # CONFIG_KS8842 is not set # CONFIG_KS8851_MLL is not set @@ -1270,15 +1306,20 @@ CONFIG_ACENIC=m # CONFIG_ACENIC_OMIT_TIGON_I is not set CONFIG_DL2K=m CONFIG_E1000=m +CONFIG_E1000_NAPI=y +# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set CONFIG_E1000E=m CONFIG_IGB=m +# CONFIG_IGB_LRO is not set CONFIG_IGB_DCA=y CONFIG_IGBVF=m CONFIG_NS83820=m CONFIG_HAMACHI=m CONFIG_YELLOWFIN=m CONFIG_R8169=m +CONFIG_R8169_NAPI=y CONFIG_R8169_VLAN=y +# CONFIG_SK98LIN is not set CONFIG_SKGE=m # CONFIG_SKGE_DEBUG is not set CONFIG_TIGON3=m @@ -1291,11 +1332,11 @@ CONFIG_JME=m # CONFIG_CHELSIO_T1=m CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T1_NAPI=y CONFIG_CHELSIO_T3=m -CONFIG_CHELSIO_T4=m CONFIG_IP1000=m CONFIG_IXGB=m -CONFIG_IXGBEVF=m +CONFIG_IXGB_NAPI=y CONFIG_IXGBE=m CONFIG_IXGBE_DCA=y CONFIG_IXGBE_DCB=y @@ -1304,13 +1345,13 @@ CONFIG_MYRI10GE_DCA=y CONFIG_NETXEN_NIC=m CONFIG_NIU=m CONFIG_S2IO=m +CONFIG_S2IO_NAPI=y CONFIG_VXGE=m # CONFIG_VXGE_DEBUG_TRACE_ALL is not set CONFIG_TEHUTI=m CONFIG_ENIC=m CONFIG_MLX4_EN=m # CONFIG_MLX4_DEBUG is not set -CONFIG_QLCNIC=m CONFIG_QLGE=m CONFIG_SFC=m CONFIG_SFC_MTD=y @@ -1341,10 +1382,12 @@ CONFIG_SLIP_SMART=y # # CONFIG_WLAN=y +# CONFIG_WLAN_PRE80211 is not set # CONFIG_STRIP is not set # CONFIG_ARLAN is not set CONFIG_PCMCIA_WAVELAN=m CONFIG_PCMCIA_NETWAVE=m +CONFIG_WLAN_80211=y # CONFIG_PCMCIA_RAYCS is not set CONFIG_WIRELESS=y @@ -1366,6 +1409,7 @@ CONFIG_LIB80211_CRYPT_TKIP=m # CONFIG_LIB80211_DEBUG is not set CONFIG_MAC80211=m +CONFIG_MAC80211_QOS=y CONFIG_MAC80211_RC_MINSTREL=y # CONFIG_MAC80211_RC_DEFAULT_PID is not set CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y @@ -1374,6 +1418,7 @@ CONFIG_MAC80211_MESH=y CONFIG_MAC80211_LEDS=y CONFIG_MAC80211_DEBUGFS=y # CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_DEFAULT_PS=y CONFIG_WIMAX=m CONFIG_WIMAX_DEBUG_LEVEL=8 @@ -1393,6 +1438,7 @@ CONFIG_AT76C50X_USB=m CONFIG_AIRO=m CONFIG_AIRO_CS=m CONFIG_ATMEL=m +# CONFIG_BCM43XX is not set CONFIG_B43=m CONFIG_B43_PCMCIA=y CONFIG_B43_SDIO=y @@ -1426,16 +1472,22 @@ CONFIG_LIBERTAS_SDIO=m CONFIG_LIBERTAS_DEBUG=y CONFIG_LIBERTAS_THINFIRM=m CONFIG_LIBERTAS_THINFIRM_USB=m -CONFIG_LIBERTAS_MESH=y CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLWIFI_RUN_TIME_CALIB=y CONFIG_IWLWIFI_DEBUG=y CONFIG_IWLWIFI_DEBUGFS=y +CONFIG_IWLWIFI_RFKILL=y CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT=y CONFIG_IWLAGN=m CONFIG_IWL4965=y CONFIG_IWL5000=y +CONFIG_IWL5000_RUN_TIME_CALIB=y CONFIG_IWL3945=m +CONFIG_IWL3945_RFKILL=y +CONFIG_IWL3945_DEBUG=y CONFIG_IWL3945_SPECTRUM_MEASUREMENT=y +CONFIG_IWL3945_LEDS=y CONFIG_IWM=m # CONFIG_IWM_DEBUG is not set CONFIG_MAC80211_HWSIM=m @@ -1456,9 +1508,16 @@ CONFIG_RT2X00=m CONFIG_RT2X00_LIB_DEBUGFS=y # CONFIG_RT2X00_DEBUG is not set CONFIG_RT2400PCI=m +CONFIG_RT2400PCI_RFKILL=y +CONFIG_RT2400PCI_LEDS=y CONFIG_RT2500PCI=m +CONFIG_RT2500PCI_RFKILL=y +CONFIG_RT2500PCI_LEDS=y CONFIG_RT61PCI=m +CONFIG_RT61PCI_RFKILL=y +CONFIG_RT61PCI_LEDS=y CONFIG_RT2500USB=m +CONFIG_RT2500USB_LEDS=y CONFIG_RT2800USB=m # CONFIG_RT2800USB_RT30XX is not set # CONFIG_RT2800USB_RT35XX is not set @@ -1467,12 +1526,12 @@ CONFIG_RT2800PCI=m # CONFIG_RT2800PCI_RT30XX is not set # CONFIG_RT2800PCI_RT35XX is not set CONFIG_RT73USB=m +CONFIG_RT73USB_LEDS=y CONFIG_RTL8180=m CONFIG_RTL8187=m CONFIG_TMD_HERMES=m CONFIG_USB_ZD1201=m CONFIG_USB_NET_RNDIS_WLAN=m -CONFIG_USB_NET_SMSC75XX=m CONFIG_ZD1211RW=m # CONFIG_ZD1211RW_DEBUG is not set CONFIG_AR9170_USB=m @@ -1498,6 +1557,7 @@ CONFIG_WL1271=m CONFIG_NET_FC=y +# CONFIG_SHAPER is not set # # Wan interfaces @@ -1535,7 +1595,6 @@ CONFIG_CAN_SJA1000_PLATFORM=m CONFIG_CAN_EMS_PCI=m CONFIG_CAN_EMS_USB=m CONFIG_CAN_KVASER_PCI=m -CONFIG_CAN_PLX_PCI=m CONFIG_NETROM=m CONFIG_ROSE=m CONFIG_MKISS=m @@ -1568,6 +1627,7 @@ CONFIG_ACTISYS_DONGLE=m CONFIG_ACT200L_DONGLE=m CONFIG_ESI_DONGLE=m CONFIG_GIRBIL_DONGLE=m +CONFIG_IRPORT_SIR=m CONFIG_KINGSUN_DONGLE=m CONFIG_KSDAZZLE_DONGLE=m CONFIG_KS959_DONGLE=m @@ -1577,6 +1637,7 @@ CONFIG_MCP2120_DONGLE=m CONFIG_OLD_BELKIN_DONGLE=m CONFIG_TEKRAM_DONGLE=m CONFIG_TOIM3232_DONGLE=m +# CONFIG_DONGLE_OLD is not set CONFIG_ALI_FIR=m CONFIG_MCS_FIR=m @@ -1609,6 +1670,7 @@ CONFIG_BT_HIDP=m CONFIG_BT_HCIBTUSB=m # Disable the BT_HCIUSB driver. # It sucks more power than BT_HCIBTUSB which has the same functionality. +# CONFIG_BT_HCIUSB is not set CONFIG_BT_HCIUART=m CONFIG_BT_HCIUART_H4=y CONFIG_BT_HCIUART_BCSP=y @@ -1617,6 +1679,7 @@ CONFIG_BT_HCIBT3C=m CONFIG_BT_HCIBLUECARD=m CONFIG_BT_HCIBTUART=m CONFIG_BT_HCIVHCI=m +CONFIG_BT_HCIUSB_SCO=y CONFIG_BT_HCIBCM203X=m CONFIG_BT_HCIBFUSB=m CONFIG_BT_HCIBPA10X=m @@ -1745,6 +1808,7 @@ CONFIG_GIGASET_BASE=m CONFIG_GIGASET_M101=m CONFIG_GIGASET_M105=m # CONFIG_GIGASET_DEBUG is not set +# CONFIG_GIGASET_UNDOCREQ is not set # # Telephony Support @@ -1936,8 +2000,10 @@ CONFIG_SERIAL_8250_RSA=y CONFIG_CYCLADES=m # CONFIG_CYZ_INTR is not set # CONFIG_DIGIEPCA is not set +# CONFIG_ESPSERIAL is not set # CONFIG_MOXA_INTELLIO is not set # CONFIG_MOXA_SMARTIO is not set +# CONFIG_MOXA_SMARTIO_NEW is not set # CONFIG_ISI is not set # CONFIG_RISCOM8 is not set # CONFIG_SPECIALIX is not set @@ -1952,7 +2018,6 @@ CONFIG_SERIAL_JSM=m # CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y -# CONFIG_SERIAL_TIMBERDALE is not set CONFIG_UNIX98_PTYS=y CONFIG_DEVPTS_MULTIPLE_INSTANCES=y # CONFIG_LEGACY_PTYS is not set @@ -1991,6 +2056,7 @@ CONFIG_I2C_ALGOPCA=m # CONFIG_I2C_DEBUG_CHIP is not set # CONFIG_I2C_ELEKTOR is not set # CONFIG_I2C_I801 is not set +# CONFIG_I2C_I810 is not set # CONFIG_I2C_ISCH is not set # CONFIG_I2C_NFORCE2_S4985 is not set @@ -2007,6 +2073,8 @@ CONFIG_I2C_PASEMI=m CONFIG_I2C_PCA_ISA=m CONFIG_I2C_PCA_PLATFORM=m # CONFIG_I2C_PIIX4 is not set +CONFIG_I2C_PROSAVAGE=m +CONFIG_I2C_SAVAGE4=m # CONFIG_SCx200_ACB is not set # CONFIG_I2C_SIS5595 is not set # CONFIG_I2C_SIS630 is not set @@ -2017,8 +2085,8 @@ CONFIG_I2C_TINY_USB=m # CONFIG_I2C_TAOS_EVM is not set # CONFIG_I2C_VIA is not set # CONFIG_I2C_VIAPRO is not set +CONFIG_I2C_VOODOO3=m # CONFIG_I2C_DESIGNWARE is not set -# CONFIG_I2C_XILINX is not set # # I2C Hardware Sensors Chip support @@ -2045,7 +2113,11 @@ CONFIG_SENSORS_ATXP1=m CONFIG_SENSORS_CORETEMP=m CONFIG_SENSORS_DME1737=m CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_DS1337=m +CONFIG_SENSORS_DS1374=m # CONFIG_DS1682 is not set +CONFIG_SENSORS_FSCHER=m +CONFIG_SENSORS_FSCPOS=m CONFIG_SENSORS_F71805F=m CONFIG_SENSORS_F71882FG=m CONFIG_SENSORS_F75375S=m @@ -2077,8 +2149,11 @@ CONFIG_SENSORS_LM93=m CONFIG_SENSORS_LTC4245=m CONFIG_SENSORS_MAX1619=m CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6875=m CONFIG_SENSORS_PC87360=m CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_PCA9539=m +CONFIG_SENSORS_PCF8574=m CONFIG_SENSORS_PCF8591=m CONFIG_SENSORS_SHT15=m CONFIG_SENSORS_SIS5595=m @@ -2107,8 +2182,6 @@ CONFIG_SENSORS_WM8350=m CONFIG_SENSORS_WM831X=m CONFIG_SENSORS_LM73=m CONFIG_SENSORS_AMC6821=m -CONFIG_SENSORS_ADT7411=m -CONFIG_SENSORS_ASC7621=m CONFIG_W1=m CONFIG_W1_CON=y @@ -2145,7 +2218,9 @@ CONFIG_IPMI_POWEROFF=m CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set CONFIG_SOFT_WATCHDOG=m +# CONFIG_WDT_501 is not set CONFIG_WDTPCI=m +CONFIG_WDT_501_PCI=y # CONFIG_ACQUIRE_WDT is not set # CONFIG_ADVANTECH_WDT is not set # CONFIG_EUROTECH_WDT is not set @@ -2176,7 +2251,6 @@ CONFIG_USBPCWATCHDOG=m # CONFIG_SBC_EPX_C3_WATCHDOG is not set CONFIG_WM8350_WATCHDOG=m CONFIG_WM831X_WATCHDOG=m -# CONFIG_MAX63XX_WATCHDOG is not set CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_TIMERIOMEM=m @@ -2220,7 +2294,7 @@ CONFIG_RTC_DRV_DS1286=m CONFIG_RTC_DRV_M48T35=m CONFIG_RTC_DRV_BQ4802=m CONFIG_RTC_DRV_WM8350=m -# CONFIG_RTC_DRV_AB3100 is not set +CONFIG_RTC_DRV_AB3100=m CONFIG_RTC_DRV_WM831X=m CONFIG_RTC_DRV_BQ32K=m CONFIG_RTC_DRV_MSM6242=m @@ -2246,7 +2320,6 @@ CONFIG_AGP_SWORKS=y CONFIG_AGP_VIA=y CONFIG_AGP_EFFICEON=y CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=16 CONFIG_DRM=m CONFIG_DRM_TDFX=m CONFIG_DRM_R128=m @@ -2261,10 +2334,11 @@ CONFIG_DRM_I915=m CONFIG_DRM_I915_KMS=y CONFIG_DRM_VIA=m CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_NOUVEAU_KMS=y CONFIG_DRM_NOUVEAU_BACKLIGHT=y CONFIG_DRM_NOUVEAU_DEBUG=y CONFIG_DRM_I2C_CH7006=m -CONFIG_DRM_VMWGFX=m +CONFIG_DRM_VMWGFX=n # # PCMCIA character devices @@ -2310,12 +2384,12 @@ CONFIG_VIDEO_CPIA2=m CONFIG_VIDEO_CQCAM=m CONFIG_VIDEO_CX23885=m CONFIG_VIDEO_CX18=m -CONFIG_VIDEO_CX18_ALSA=m CONFIG_VIDEO_CX88=m CONFIG_VIDEO_CX88_DVB=m CONFIG_VIDEO_CX88_ALSA=m CONFIG_VIDEO_CX88_BLACKBIRD=m CONFIG_VIDEO_CX88_VP3054=m +CONFIG_VIDEO_DPC=m CONFIG_VIDEO_EM28XX=m CONFIG_VIDEO_EM28XX_ALSA=m CONFIG_VIDEO_EM28XX_DVB=m @@ -2349,7 +2423,6 @@ CONFIG_VIDEO_ZORAN_LML33R10=m CONFIG_VIDEO_ZORAN_ZR36060=m CONFIG_VIDEO_FB_IVTV=m CONFIG_VIDEO_SAA7164=m -CONFIG_VIDEO_TLG2300=m CONFIG_USB_VIDEO_CLASS=m CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y @@ -2470,7 +2543,6 @@ CONFIG_DVB_ISL6405=m CONFIG_DVB_LGS8GL5=m CONFIG_DVB_DUMMY_FE=m CONFIG_DVB_FIREDTV=m -CONFIG_DVB_NGENE=m # # Supported SAA7146 based PCI Adapters @@ -2518,7 +2590,6 @@ CONFIG_DVB_USB_TTUSB2=m CONFIG_DVB_USB_UMT_010=m CONFIG_DVB_USB_VP702X=m CONFIG_DVB_USB_VP7045=m -CONFIG_DVB_USB_AZ6027=m CONFIG_DVB_PT1=m @@ -2529,8 +2600,11 @@ CONFIG_DVB_HOPPER=m CONFIG_VIDEO_SAA7146=m CONFIG_VIDEO_SAA7146_VV=m CONFIG_VIDEO_TUNER=m +# CONFIG_VIDEO_TUNER_CUSTOMISE is not set CONFIG_VIDEO_BTCX=m CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_24XXX=y +CONFIG_VIDEO_PVRUSB2_29XXX=y CONFIG_VIDEO_PVRUSB2_SYSFS=y # CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set @@ -2547,6 +2621,7 @@ CONFIG_DISPLAY_SUPPORT=m CONFIG_VIDEO_OUTPUT_CONTROL=m CONFIG_FB=y +CONFIG_VIDEO_SELECT=y # CONFIG_FB_FOREIGN_ENDIAN is not set CONFIG_FB_3DFX=m CONFIG_FB_3DFX_ACCEL=y @@ -2562,6 +2637,7 @@ CONFIG_FB_ATY_GENERIC_LCD=y # CONFIG_FB_CARMINE is not set CONFIG_FB_CIRRUS=m # CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_CYBLA is not set # CONFIG_FB_GEODE is not set # CONFIG_FB_HECUBA is not set # CONFIG_FB_HGA is not set @@ -2569,6 +2645,7 @@ CONFIG_FB_I810=m CONFIG_FB_I810_GTF=y CONFIG_FB_I810_I2C=y # CONFIG_FB_IMSTT is not set +# CONFIG_FB_IMAC is not set # CONFIG_FB_INTEL is not set # CONFIG_FB_INTEL_DEBUG is not set # CONFIG_FB_INTEL_I2C is not set @@ -2580,6 +2657,7 @@ CONFIG_FB_MATROX_MYSTIQUE=y CONFIG_FB_MATROX_G=y CONFIG_FB_MATROX_I2C=m CONFIG_FB_MATROX_MAVEN=m +CONFIG_FB_MATROX_MULTIHEAD=y CONFIG_FB_NEOMAGIC=m CONFIG_FB_NVIDIA=m # CONFIG_FB_NVIDIA_DEBUG is not set @@ -2604,6 +2682,7 @@ CONFIG_FB_SIS_315=y CONFIG_FB_SM501=m CONFIG_FB_TILEBLITTING=y CONFIG_FB_TRIDENT=m +CONFIG_FB_TRIDENT_ACCEL=y # CONFIG_FB_UVESA is not set CONFIG_FB_VESA=y CONFIG_FB_VGA16=m @@ -2755,7 +2834,9 @@ CONFIG_SND_ICE1724=m CONFIG_SND_INTEL8X0=y CONFIG_SND_INTEL8X0M=m CONFIG_SND_KORG1212=m +CONFIG_SND_KORG1212_FIRMWARE_IN_KERNEL=y CONFIG_SND_MAESTRO3=m +CONFIG_SND_MAESTRO3_FIRMWARE_IN_KERNEL=y CONFIG_SND_MIRO=m CONFIG_SND_MIXART=m CONFIG_SND_NM256=m @@ -2776,6 +2857,7 @@ CONFIG_SND_VIA82XX_MODEM=m CONFIG_SND_VIRTUOSO=m CONFIG_SND_VX222=m CONFIG_SND_YMFPCI=m +CONFIG_SND_YMFPCI_FIRMWARE_IN_KERNEL=y # # ALSA USB devices @@ -2786,7 +2868,6 @@ CONFIG_SND_USB_CAIAQ=m CONFIG_SND_USB_CAIAQ_INPUT=y CONFIG_SND_USB_USX2Y=m CONFIG_SND_USB_US122L=m -CONFIG_SND_USB_UA101=m # # PCMCIA devices @@ -2810,6 +2891,7 @@ CONFIG_USB_SUPPORT=y # DEPRECATED: See bug 362221. Fix udev. # CONFIG_USB_DEVICE_CLASS is not set +CONFIG_USB_PERSIST=y # # Miscellaneous USB options @@ -2822,6 +2904,7 @@ CONFIG_USB_SUSPEND=y # USB Host Controller Drivers # CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_SPLIT_ISO=y CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_TT_NEWSCHED=y CONFIG_USB_OHCI_HCD=y @@ -2851,8 +2934,10 @@ CONFIG_USB_STORAGE_CYPRESS_ATACB=y CONFIG_USB_STORAGE_DATAFAB=y CONFIG_USB_STORAGE_FREECOM=y CONFIG_USB_STORAGE_ISD200=y +CONFIG_USB_STORAGE_DPCM=y CONFIG_USB_STORAGE_SDDR09=y CONFIG_USB_STORAGE_SDDR55=y +CONFIG_USB_STORAGE_SIERRA=y CONFIG_USB_STORAGE_JUMPSHOT=y CONFIG_USB_STORAGE_USBAT=y CONFIG_USB_STORAGE_ONETOUCH=y @@ -2869,8 +2954,11 @@ CONFIG_HID_SUPPORT=y CONFIG_HID=m # debugging default is y upstream now +CONFIG_HID_DEBUG=y CONFIG_HIDRAW=y +CONFIG_HID_FF=y CONFIG_HID_PID=y +# CONFIG_HID_COMPAT is not set CONFIG_LOGITECH_FF=y CONFIG_LOGIRUMBLEPAD2_FF=y CONFIG_PANTHERLORD_FF=y @@ -2882,13 +2970,6 @@ CONFIG_USB_IDMOUSE=m CONFIG_DRAGONRISE_FF=y CONFIG_GREENASIA_FF=y CONFIG_SMARTJOYPLUS_FF=y -CONFIG_HID_3M_PCT=y -CONFIG_LOGIG940_FF=y -CONFIG_HID_MAGICMOUSE=y -CONFIG_HID_MOSART=y -CONFIG_HID_NTRIG=y -CONFIG_HID_QUANTA=y -CONFIG_HID_STANTUM=y # # USB Imaging devices @@ -2988,8 +3069,6 @@ CONFIG_USB_NET_CDC_EEM=m CONFIG_USB_NET_ZAURUS=m CONFIG_USB_NET_INT51X1=m CONFIG_USB_CDC_PHONET=m -CONFIG_USB_IPHETH=m -CONFIG_USB_SIERRA_NET=m # # USB Host-to-Host Cables @@ -3017,9 +3096,11 @@ CONFIG_USB_USS720=m CONFIG_USB_SERIAL=m CONFIG_USB_SERIAL_GENERIC=y CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_AIRPRIME=m CONFIG_USB_SERIAL_ARK3116=m CONFIG_USB_SERIAL_BELKIN=m CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_CP2101=m CONFIG_USB_SERIAL_CYPRESS_M8=m CONFIG_USB_SERIAL_CYBERJACK=m CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m @@ -3072,8 +3153,6 @@ CONFIG_USB_SERIAL_TI=m CONFIG_USB_SERIAL_VISOR=m CONFIG_USB_SERIAL_WHITEHEAT=m CONFIG_USB_SERIAL_XIRCOM=m -CONFIG_USB_SERIAL_QCAUX=m -CONFIG_USB_SERIAL_VIVOPAY_SERIAL=m CONFIG_USB_SERIAL_DEBUG=m CONFIG_USB_EZUSB=y @@ -3091,6 +3170,7 @@ CONFIG_USB_SEVSEG=m CONFIG_USB_ALI_M5632=y CONFIG_USB_APPLEDISPLAY=m CONFIG_USB_ATM=m +CONFIG_USB_AUERSWALD=m CONFIG_USB_BERRY_CHARGE=m CONFIG_USB_CXACRU=m # CONFIG_USB_C67X00_HCD is not set @@ -3101,8 +3181,10 @@ CONFIG_USB_FTDI_ELAN=m CONFIG_USB_FILE_STORAGE=m # CONFIG_USB_FILE_STORAGE_TEST is not set # CONFIG_USB_GADGET is not set +# CONFIG_USB_GADGET_PXA2XX is not set # CONFIG_USB_GADGET_GOKU is not set # CONFIG_USB_GADGETFS is not set +# CONFIG_USB_HIDINPUT_POWERBOOK is not set # CONFIG_USB_ISP116X_HCD is not set # CONFIG_USB_ISP1760_HCD is not set # CONFIG_USB_OXU210HP_HCD is not set @@ -3113,6 +3195,10 @@ CONFIG_USB_LCD=m CONFIG_USB_LD=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_MON=y +CONFIG_USB_PHIDGET=m +CONFIG_USB_PHIDGETSERVO=m +CONFIG_USB_PHIDGETKIT=m +CONFIG_USB_PHIDGETMOTORCONTROL=m CONFIG_USB_PWC=m CONFIG_USB_PWC_INPUT_EVDEV=y # CONFIG_USB_PWC_DEBUG is not set @@ -3132,6 +3218,7 @@ CONFIG_USB_STKWEBCAM=m CONFIG_USB_TRANCEVIBRATOR=m CONFIG_USB_U132_HCD=m CONFIG_USB_UEAGLEATM=m +CONFIG_USB_UVCVIDEO=m CONFIG_USB_XUSBATM=m # CONFIG_USB_ZC0301 is not set CONFIG_USB_ZERO=m @@ -3153,26 +3240,21 @@ CONFIG_SSB_DRIVER_PCICORE=y # CONFIG_MFD_PCF50633 is not set CONFIG_PCF50633_ADC=m CONFIG_PCF50633_GPIO=m -# CONFIG_AB3100_CORE is not set +CONFIG_AB3100_CORE=m CONFIG_INPUT_PCF50633_PMU=m CONFIG_INPUT_GPIO_ROTARY_ENCODER=m CONFIG_CHARGER_PCF50633=m +CONFIG_REGULATOR_PCF50633=m CONFIG_RTC_DRV_PCF50633=m CONFIG_MFD_SM501=m CONFIG_MFD_SM501_GPIO=y # CONFIG_MFD_TC6393XB is not set CONFIG_MFD_WM8400=m -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8350 is not set +CONFIG_MFD_WM8350_I2C=m +CONFIG_MFD_WM8350=m # CONFIG_MFD_WM831X is not set -# CONFIG_AB3100_OTP is not set -# CONFIG_MFD_TIMBERDALE is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_LPC_SCH is not set -# CONFIG_HTC_I2CPLD is not set -# CONFIG_MFD_MAX8925 is not set +CONFIG_AB3100_OTP=m # # File systems @@ -3195,6 +3277,7 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_XATTR=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4DEV_COMPAT=y CONFIG_JBD2=y CONFIG_FS_MBCACHE=y CONFIG_REISERFS_FS=m @@ -3213,6 +3296,7 @@ CONFIG_XFS_FS=m # CONFIG_XFS_RT is not set CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_SECURITY=y CONFIG_MINIX_FS=m CONFIG_ROMFS_FS=m CONFIG_QUOTA=y @@ -3228,8 +3312,6 @@ CONFIG_AUTOFS4_FS=m CONFIG_EXOFS_FS=m # CONFIG_EXOFS_DEBUG is not set CONFIG_NILFS2_FS=m -CONFIG_LOGFS=m -CONFIG_CEPH_FS=m CONFIG_FSCACHE=m CONFIG_FSCACHE_STATS=y @@ -3298,6 +3380,7 @@ CONFIG_SQUASHFS=m CONFIG_VXFS_FS=m # CONFIG_HPFS_FS is not set CONFIG_QNX4FS_FS=m +# CONFIG_QNX4FS_RW is not set CONFIG_SYSV_FS=m CONFIG_UFS_FS=m # CONFIG_UFS_FS_WRITE is not set @@ -3317,17 +3400,21 @@ CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=y CONFIG_NFS_V4_1=y +CONFIG_NFS_DIRECTIO=y CONFIG_NFSD=m CONFIG_NFSD_V3=y CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y +CONFIG_NFSD_TCP=y CONFIG_NFS_FSCACHE=y CONFIG_LOCKD=m CONFIG_LOCKD_V4=y CONFIG_EXPORTFS=m CONFIG_SUNRPC=m +CONFIG_SUNRPC_BIND34=y CONFIG_SUNRPC_GSS=m CONFIG_SUNRPC_XPRT_RDMA=m +# CONFIG_SUNRPC_REGISTER_V4 is not set CONFIG_RPCSEC_GSS_KRB5=m CONFIG_RPCSEC_GSS_SPKM3=m # CONFIG_SMB_FS is not set @@ -3352,6 +3439,7 @@ CONFIG_NCPFS_SMALLDOS=y CONFIG_NCPFS_NLS=y CONFIG_NCPFS_EXTRAS=y CONFIG_CODA_FS=m +# CONFIG_CODA_FS_OLD_API is not set # CONFIG_AFS_FS is not set # CONFIG_AF_RXRPC is not set @@ -3361,6 +3449,8 @@ CONFIG_OCFS2_FS=m CONFIG_OCFS2_FS_O2CB=m CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m # CONFIG_OCFS2_FS_STATS is not set +# CONFIG_OCFS2_COMPAT_JBD is not set +CONFIG_OCFS2_FS_POSIX_ACL=y CONFIG_BTRFS_FS=m CONFIG_BTRFS_FS_POSIX_ACL=y @@ -3482,13 +3572,18 @@ CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_NETWORK_XFRM=y # CONFIG_SECURITY_PATH is not set +CONFIG_SECURITY_CAPABILITIES=y +CONFIG_SECURITY_FILE_CAPABILITIES=y +# CONFIG_SECURITY_ROOTPLUG is not set CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_SELINUX_DEVELOP=y CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +CONFIG_SECURITY_SELINUX_ENABLE_SECMARK_DEFAULT=y CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536 # CONFIG_SECURITY_SMACK is not set # CONFIG_SECURITY_TOMOYO is not set CONFIG_AUDIT=y @@ -3557,9 +3652,9 @@ CONFIG_CRYPTO_GHASH=m CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DEV_HIFN_795X=m CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y -CONFIG_CRYPTO_PCRYPT=m # Random number generation +CONFIG_CRYPTO_PRNG=m # # Library routines @@ -3637,6 +3732,7 @@ CONFIG_PROC_EVENTS=y CONFIG_IBMASR=m +CONFIG_PM_LEGACY=y CONFIG_PM_DEBUG=y CONFIG_PM_TRACE=y # CONFIG_PM_VERBOSE is not set @@ -3661,6 +3757,7 @@ CONFIG_SCSI_ARCMSR_AER=y # CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set # CONFIG_SCSI_NCR53C406A is not set # CONFIG_SCSI_PAS16 is not set +# CONFIG_SCSI_PSI240I is not set # CONFIG_SCSI_QLOGIC_FAS is not set # CONFIG_SCSI_SYM53C416 is not set # CONFIG_SCSI_T128 is not set @@ -3748,21 +3845,23 @@ CONFIG_RADIO_ADAPTERS=y # CONFIG_RADIO_TEA5764 is not set # CONFIG_RADIO_TYPHOON is not set # CONFIG_RADIO_ZOLTRIX is not set -# CONFIG_RADIO_SAA7706H is not set # CONFIG_SND_OPL4_LIB is not set +# CONFIG_SND_AD1848_LIB is not set # CONFIG_SND_AD1816A is not set # CONFIG_SND_AD1848 is not set # CONFIG_SND_CS4231 is not set +# CONFIG_SND_CS4232 is not set +CONFIG_SND_CS4231_LIB=m CONFIG_SND_CS4236=m # CONFIG_SND_ES968 is not set # CONFIG_SND_ES1688 is not set # CONFIG_SND_ES18XX is not set +# CONFIG_SND_GUS_SYNTH is not set # CONFIG_SND_GUSCLASSIC is not set # CONFIG_SND_GUSEXTREME is not set # CONFIG_SND_GUSMAX is not set # CONFIG_SND_INTERWAVE is not set -# CONFIG_SND_JAZZ16 is not set # CONFIG_SND_INTERWAVE_STB is not set # CONFIG_SND_OPTI92X_AD1848 is not set # CONFIG_SND_OPTI92X_CS4231 is not set @@ -3796,18 +3895,25 @@ CONFIG_SND_INDIGODJX=m ## END of ISA options. +# CONFIG_FORCED_INLINING is not set CONFIG_MIGRATION=y +CONFIG_RESOURCES_64BIT=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y # CONFIG_LEDS_AMS_DELTA is not set +# CONFIG_LEDS_CORGI is not set +# CONFIG_LEDS_IXP4XX is not set # CONFIG_LEDS_LOCOMO is not set # CONFIG_LEDS_NET48XX is not set # CONFIG_LEDS_PCA9532 is not set # CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_LP5521 is not set # CONFIG_LEDS_BD2802 is not set # CONFIG_LEDS_S3C24XX is not set -CONFIG_LEDS_DELL_NETBOOKS=m +# CONFIG_LEDS_SPITZ is not set +# CONFIG_LEDS_TOSA is not set +CONFIG_LEDS_HP_DISK=m CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=m CONFIG_LEDS_TRIGGER_IDE_DISK=y @@ -3836,8 +3942,10 @@ CONFIG_UTRACE=y CONFIG_FTRACE=y # CONFIG_IRQSOFF_TRACER is not set CONFIG_SCHED_TRACER=y +CONFIG_PROCESS_TRACER=y CONFIG_CONTEXT_SWITCH_TRACER=y CONFIG_WORKQUEUE_TRACER=y +CONFIG_EVENT_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_KMEMTRACE=y CONFIG_FTRACE_MCOUNT_RECORD=y @@ -3845,12 +3953,8 @@ CONFIG_FTRACE_MCOUNT_RECORD=y # CONFIG_TRACE_BRANCH_PROFILING is not set CONFIG_FUNCTION_PROFILER=y CONFIG_RING_BUFFER_BENCHMARK=m -CONFIG_FUNCTION_TRACER=y -CONFIG_STACK_TRACER=y -CONFIG_DYNAMIC_FTRACE=y CONFIG_KPROBES=y -CONFIG_OPTPROBES=y # CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set @@ -3888,10 +3992,12 @@ CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_AEC=m CONFIG_UIO_SERCOS3=m CONFIG_UIO_PCI_GENERIC=m -# CONFIG_UIO_NETX is not set +CONFIG_INSTRUMENTATION=y +CONFIG_MARKERS=y # CONFIG_CRC7 is not set +CONFIG_DEFAULT_RELATIME=y # LIRC CONFIG_INPUT_LIRC=m @@ -3921,6 +4027,7 @@ CONFIG_R6040=m CONFIG_BNX2X=m CONFIG_NOZOMI=m +CONFIG_PCF8575=m # CONFIG_TPS65010 is not set # CONFIG_DEBUG_SECTION_MISMATCH is not set # CONFIG_KPROBES_SANITY_TEST is not set @@ -3929,6 +4036,7 @@ CONFIG_LATENCYTOP=y CONFIG_RESOURCE_COUNTERS=y # CONFIG_COMPAT_BRK is not set +CONFIG_ACPI_CUSTOM_DSDT_INITRD=y #FIXME: x86 generic? CONFIG_LEDS_CLEVO_MAIL=m @@ -3942,6 +4050,7 @@ CONFIG_ENCLOSURE_SERVICES=m CONFIG_ISL29003=m CONFIG_IPWIRELESS=m CONFIG_RTC_DRV_DS1511=m +CONFIG_CGROUP_MEM_CONT=y # CONFIG_BLK_DEV_XIP is not set CONFIG_MEMSTICK=m @@ -3967,6 +4076,7 @@ CONFIG_OPTIMIZE_INLINING=y # CONFIG_GPIOLIB is not set +CONFIG_UNEVICTABLE_LRU=y CONFIG_NETFILTER_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TPROXY=m @@ -3994,8 +4104,22 @@ CONFIG_W1_SLAVE_BQ27000=m CONFIG_IT87_WDT=m CONFIG_W83697UG_WDT=m -# CONFIG_REGULATOR is not set +CONFIG_REGULATOR=y # CONFIG_REGULATOR_DEBUG is not set +CONFIG_REGULATOR_FIXED_VOLTAGE=m +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +CONFIG_REGULATOR_BQ24022=m +CONFIG_REGULATOR_WM8350=m +CONFIG_REGULATOR_WM8400=m +CONFIG_REGULATOR_DA903X=m +CONFIG_REGULATOR_USERSPACE_CONSUMER=m +CONFIG_REGULATOR_MAX1586=m +CONFIG_REGULATOR_LP3971=m +CONFIG_REGULATOR_AB3100=m +CONFIG_REGULATOR_TPS65023=m +CONFIG_REGULATOR_TPS6507X=m +CONFIG_REGULATOR_WM831X=m +CONFIG_REGULATOR_MAX8660=m CONFIG_WM8350_POWER=m @@ -4018,29 +4142,41 @@ CONFIG_STAGING=y # CONFIG_STAGING_EXCLUDE_BUILD is not set # CONFIG_ET131X is not set # CONFIG_SLICOSS is not set +# CONFIG_SXG is not set +# CONFIG_ME4000 is not set +# CONFIG_MEILHAUS is not set # CONFIG_VIDEO_GO7007 is not set # CONFIG_USB_IP_COMMON is not set -# CONFIG_DT3155 is not set # CONFIG_W35UND is not set # CONFIG_PRISM2_USB is not set # CONFIG_ECHO is not set CONFIG_USB_ATMEL=m # CONFIG_POCH is not set +# CONFIG_AGNX is not set # CONFIG_OTUS is not set # CONFIG_RT2860 is not set # CONFIG_RT2870 is not set +# CONFIG_BENET is not set # CONFIG_COMEDI is not set # CONFIG_ASUS_OLED is not set # CONFIG_PANEL is not set # CONFIG_ALTERA_PCIE_CHDMA is not set +# CONFIG_RTL8187SE is not set # CONFIG_INPUT_MIMIO is not set # CONFIG_TRANZPORT is not set +# CONFIG_EPL is not set # CONFIG_POHMELFS is not set +# CONFIG_USB_SERIAL_ATEN2011 is not set # CONFIG_B3DFG is not set +# CONFIG_DST is not set # CONFIG_IDE_PHISON is not set # CONFIG_PLAN9AUTH is not set +# CONFIG_HECI is not set # CONFIG_LINE6_USB is not set +# CONFIG_USB_SERIAL_QUATECH_ESU100 is not set +# CONFIG_RT3070 is not set # CONFIG_RTL8192SU is not set +# CONFIG_COWLOOP is not set # CONFIG_IIO is not set # CONFIG_VME_BUS is not set # CONFIG_RAR_REGISTER is not set @@ -4049,6 +4185,7 @@ CONFIG_USB_ATMEL=m # CONFIG_RTL8192E is not set # CONFIG_INPUT_GPIO is not set # CONFIG_VIDEO_CX25821 is not set +# CONFIG_RT3090 is not set # CONFIG_HYPERV is not set # CONFIG_R8187SE is not set # CONFIG_RTL8192U is not set @@ -4059,12 +4196,19 @@ CONFIG_USB_ATMEL=m # # Android # +# CONFIG_ANDROID is not set +# CONFIG_ANDROID_BINDER_IPC is not set +# CONFIG_ANDROID_LOGGER is not set +# CONFIG_ANDROID_RAM_CONSOLE is not set +# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set +# CONFIG_ANDROID_TIMED_GPIO is not set # CONFIG_DEBUG_VIRTUAL is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set # CONFIG_FUNCTION_GRAPH_TRACER is not set # CONFIG_BOOT_TRACER is not set +# CONFIG_DYNAMIC_PRINTK_DEBUG is not set CONFIG_EARLY_PRINTK_DBGP=y CONFIG_SECURITYFS=y @@ -4096,7 +4240,6 @@ CONFIG_SLOW_WORK_DEBUG=y CONFIG_STRIP_ASM_SYMS=y # CONFIG_RCU_FANOUT_EXACT is not set -CONFIG_RCU_FAST_NO_HZ=y CONFIG_KSM=y CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 @@ -4110,12 +4253,12 @@ CONFIG_IEEE802154_FAKEHARD=m # CONFIG_GCOV_KERNEL is not set CONFIG_PPS=m -# CONFIG_PPS_CLIENT_KTIMER is not set -CONFIG_PPS_CLIENT_LDISC=m # CONFIG_PPS_DEBUG is not set # CONFIG_USB_SERIAL_QUATECH2 is not set # CONFIG_VT6655 is not set +# CONFIG_USB_CPC is not set +# CONFIG_RDC_17F3101X is not set # CONFIG_FB_UDL is not set # DEBUG options that don't get enabled/disabled with 'make debug/release' @@ -4163,8 +4306,6 @@ CONFIG_BLK_DEV_DRBD=m # CONFIG_GPIO_PCF857X is not set # CONFIG_GPIO_CS5535 is not set # CONFIG_GPIO_ADP5588 is not set -# CONFIG_GPIO_IT8761E is not set -# CONFIG_GPIO_MAX7300 is not set # CONFIG_UCB1400_CORE is not set # CONFIG_RADIO_MIROPCM20 is not set # CONFIG_USB_GPIO_VBUS is not set diff --git a/config-ia64-generic b/config-ia64-generic index 746fdf2..281ce46 100644 --- a/config-ia64-generic +++ b/config-ia64-generic @@ -8,7 +8,11 @@ CONFIG_IA64=y CONFIG_64BIT=y # CONFIG_XEN is not set +# CONFIG_ARCH_XEN is not set +# CONFIG_XEN_PRIVILEGED_GUEST is not set +# CONFIG_XEN_VT is not set CONFIG_MMU=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_EFI=y # CONFIG_ITANIUM is not set CONFIG_MCKINLEY=y @@ -52,6 +56,7 @@ CONFIG_EFI_PCDP=y # # IDE chipset support/bugfixes # +CONFIG_IDE_MAX_HWIFS=4 CONFIG_BLK_DEV_SGIIOC4=y # @@ -99,6 +104,7 @@ CONFIG_AGP_SGI_TIOCA=y # SGI # CONFIG_SGI_SNSC=y +CONFIG_IA64_SGI_SN_XP=y CONFIG_SGI_TIOCX=y CONFIG_SGI_MBCS=m CONFIG_SGI_IOC3=m @@ -122,10 +128,12 @@ CONFIG_ACPI_AC=y # CONFIG_ACPI_ASUS is not set CONFIG_ACPI_PROCFS_POWER=y CONFIG_ACPI_SYSFS_POWER=y +# CONFIG_ACPI_BAY is not set # CONFIG_ACPI_BATTERY is not set CONFIG_ACPI_BLACKLIST_YEAR=0 CONFIG_ACPI_BUTTON=y # CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_EC=y CONFIG_ACPI_FAN=y CONFIG_ACPI_HOTPLUG_MEMORY=y CONFIG_ACPI_NUMA=y @@ -133,6 +141,7 @@ CONFIG_ACPI_POWER=y CONFIG_ACPI_PROCESSOR=y CONFIG_ACPI_PROCFS=y CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_SYSTEM=y CONFIG_ACPI_THERMAL=y # CONFIG_ACPI_TOSHIBA is not set CONFIG_ACPI_VIDEO=m @@ -168,6 +177,7 @@ CONFIG_IA64_ACPI_CPUFREQ=m CONFIG_NODES_SHIFT=10 +# CONFIG_BCM43XX is not set CONFIG_HW_RANDOM_INTEL=m diff --git a/config-nodebug b/config-nodebug index dcbb617..107955d 100644 --- a/config-nodebug +++ b/config-nodebug @@ -8,7 +8,6 @@ CONFIG_SND_PCM_XRUN_DEBUG=y # CONFIG_PROVE_LOCKING is not set # CONFIG_DEBUG_VM is not set # CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_PROVE_RCU is not set # CONFIG_FAULT_INJECTION is not set # CONFIG_FAILSLAB is not set @@ -64,6 +63,10 @@ CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 # CONFIG_JBD2_DEBUG is not set +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_DYNAMIC_FTRACE is not set + # CONFIG_DEBUG_CFQ_IOSCHED is not set # CONFIG_DRBD_FAULT_INJECTION is not set @@ -73,11 +76,3 @@ CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 # CONFIG_DEBUG_OBJECTS_WORK is not set # CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set - -# CONFIG_DMADEVICES_DEBUG is not set -# CONFIG_DMADEVICES_VDEBUG is not set - -CONFIG_PM_ADVANCED_DEBUG=y - -# CONFIG_CEPH_FS_PRETTYDEBUG is not set -# CONFIG_QUOTA_DEBUG is not set diff --git a/config-powerpc-generic b/config-powerpc-generic index ceace82..3bcecab 100644 --- a/config-powerpc-generic +++ b/config-powerpc-generic @@ -35,9 +35,11 @@ CONFIG_HIBERNATION=y # CONFIG_RTC is not set # CONFIG_GEN_RTC is not set # CONFIG_GEN_RTC_X is not set +CONFIG_RTC_DRV_PPC=y CONFIG_RTC_DRV_GENERIC=y CONFIG_PROC_DEVICETREE=y # CONFIG_CMDLINE_BOOL is not set +CONFIG_ELECTRA_IDE=y CONFIG_ADB=y CONFIG_ADB_PMU=y @@ -103,6 +105,7 @@ CONFIG_MEMORY_HOTPLUG=y # CONFIG_SCSI_DC390T is not set # CONFIG_SCSI_BUSLOGIC is not set +CONFIG_USB_HIDINPUT_POWERBOOK=y # CONFIG_PPC_EARLY_DEBUG is not set @@ -140,9 +143,13 @@ CONFIG_BLK_DEV_IDE=y # Please see Documentation/ide.txt for help/info on IDE drives # # CONFIG_BLK_DEV_IDE_SATA is not set +CONFIG_BLK_DEV_IDEDISK=y +CONFIG_IDEDISK_MULTI_MODE=y # CONFIG_BLK_DEV_IDECS is not set CONFIG_BLK_DEV_IDECD=m # CONFIG_BLK_DEV_IDETAPE is not set +CONFIG_BLK_DEV_IDEFLOPPY=m +# CONFIG_BLK_DEV_IDESCSI is not set CONFIG_IDE_TASK_IOCTL=y # # IDE chipset support/bugfixes @@ -150,6 +157,7 @@ CONFIG_IDE_TASK_IOCTL=y # CONFIG_IDE_GENERIC is not set # CONFIG_BLK_DEV_IDEPNP is not set # CONFIG_BLK_DEV_IDEPCI is not set +CONFIG_IDEPCI_SHARE_IRQ=y # CONFIG_BLK_DEV_AEC62XX is not set # CONFIG_BLK_DEV_ALI15X3 is not set # CONFIG_BLK_DEV_AMD74XX is not set @@ -158,6 +166,7 @@ CONFIG_IDE_TASK_IOCTL=y # CONFIG_BLK_DEV_CY82C693 is not set # CONFIG_BLK_DEV_CS5520 is not set # CONFIG_BLK_DEV_CS5530 is not set +# CONFIG_BLK_DEV_HPT34X is not set # CONFIG_BLK_DEV_HPT366 is not set # CONFIG_BLK_DEV_JMICRON is not set # CONFIG_BLK_DEV_SC1200 is not set @@ -174,6 +183,8 @@ CONFIG_IDE_TASK_IOCTL=y # CONFIG_BLK_DEV_VIA82CXXX is not set CONFIG_BLK_DEV_IDE_PMAC=y CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y +CONFIG_BLK_DEV_IDEDMA_PMAC=y +# CONFIG_IDE_ARM is not set CONFIG_BLK_DEV_IDEDMA=y # CONFIG_BLK_DEV_HD is not set # CONFIG_USB_STORAGE_ISD200 is not set @@ -231,7 +242,11 @@ CONFIG_EXTRA_TARGETS="" # CONFIG_SERIAL_CPM is not set # CONFIG_SERIAL_QE is not set # CONFIG_I2C_CPM is not set +# CONFIG_MT9M001_PCA9536_SWITCH is not set +# CONFIG_MT9V022_PCA9536_SWITCH is not set +# CONFIG_KGDB_CONSOLE is not set +CONFIG_MDIO_OF_GPIO=m CONFIG_SERIO_XILINX_XPS_PS2=m @@ -328,4 +343,3 @@ CONFIG_SERIAL_GRLIB_GAISLER_APBUART=m # CONFIG_MMC_SDHCI_OF_ESDHC is not set # CONFIG_MMC_SDHCI_OF_HLWD is not set -# CONFIG_GPIO_SCH is not set diff --git a/config-powerpc32-generic b/config-powerpc32-generic index a36ca81..ccd4a21 100644 --- a/config-powerpc32-generic +++ b/config-powerpc32-generic @@ -4,7 +4,9 @@ CONFIG_PPC32=y # CONFIG_RTAS_PROC is not set # CONFIG_PCMCIA_M8XX is not set # CONFIG_HOTPLUG_PCI is not set +CONFIG_CLASSIC32=y CONFIG_CPU_FREQ_PMAC=y +CONFIG_PPC_MULTIPLATFORM=y CONFIG_PPC_CHRP=y CONFIG_PPC_PMAC=y CONFIG_PPC_MPC52xx=y @@ -17,6 +19,7 @@ CONFIG_SATA_FSL=m # busted in .28git1 # ERROR: "cacheable_memzero" [drivers/net/gianfar_driver.ko] undefined! # CONFIG_GIANFAR is not set +CONFIG_GFAR_NAPI=y CONFIG_USB_EHCI_FSL=y CONFIG_PMAC_APM_EMU=y @@ -51,6 +54,7 @@ CONFIG_THERM_WINDTUNNEL=m CONFIG_THERM_ADT746X=m # CONFIG_ANSLCD is not set +CONFIG_SENSORS_M41T00=m CONFIG_FB_PLATINUM=y CONFIG_FB_VALKYRIE=y CONFIG_FB_CT65550=y @@ -65,6 +69,7 @@ CONFIG_FB_MATROX=y # CONFIG_KEXEC is not set # CONFIG_HVC_RTAS is not set +# CONFIG_MAMBO is not set # CONFIG_UDBG_RTAS_CONSOLE is not set CONFIG_BRIQ_PANEL=m @@ -82,6 +87,7 @@ CONFIG_SERIAL_MPC52xx=y CONFIG_SERIAL_MPC52xx_CONSOLE=y CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 # CONFIG_MPC5200_WDT is not set +CONFIG_83xx_WDT=m CONFIG_8xxx_WDT=m CONFIG_GEF_WDT=m @@ -115,6 +121,7 @@ CONFIG_PPC_BESTCOMM_GEN_BD=m CONFIG_FORCE_MAX_ZONEORDER=11 # CONFIG_PAGE_OFFSET_BOOL is not set +CONFIG_BLK_DEV_HD_ONLY=y # CONFIG_FB_FSL_DIU is not set CONFIG_IRQSTACKS=y CONFIG_VIRTUALIZATION=y @@ -171,6 +178,7 @@ CONFIG_MPC8610_HPCD=y # drivers/mtd/maps/sbc8240.c: In function 'init_sbc8240_mtd': # drivers/mtd/maps/sbc8240.c:172: warning: passing argument 1 of 'simple_map_init' from incompatible pointer type # drivers/mtd/maps/sbc8240.c:177: error: 'struct mtd_info' has no member named 'module' +# CONFIG_MTD_SBC8240 is not set CONFIG_MTD_NAND_FSL_UPM=m diff --git a/config-powerpc64 b/config-powerpc64 index e2e8f99..3dd228a 100644 --- a/config-powerpc64 +++ b/config-powerpc64 @@ -3,6 +3,9 @@ CONFIG_WINDFARM_PM91=y CONFIG_WINDFARM_PM121=y CONFIG_PPC_PMAC64=y CONFIG_PPC_MAPLE=y +CONFIG_PPC_SYSTEMSIM=y +CONFIG_BLK_DEV_SYSTEMSIM=m +CONFIG_SYSTEMSIM_NET=m CONFIG_PPC_CELL=y CONFIG_PPC_IBM_CELL_BLADE=y CONFIG_PPC_ISERIES=y @@ -15,12 +18,16 @@ CONFIG_PPC_CELLEB=y CONFIG_PPC_CELL_QPACE=y CONFIG_PS3_HTAB_SIZE=20 # CONFIG_PS3_DYNAMIC_DMA is not set +# CONFIG_PS3_USE_LPAR_ADDR is not set CONFIG_PS3_ADVANCED=y CONFIG_PS3_HTAB_SIZE=20 # CONFIG_PS3_DYNAMIC_DMA is not set +CONFIG_PS3_USE_LPAR_ADDR=y CONFIG_PS3_VUART=y CONFIG_PS3_PS3AV=y CONFIG_PS3_STORAGE=m +CONFIG_PS3_STORAGE_EXPECTED_NUM_DRIVES=3 +CONFIG_PS3_STORAGE_MAX_SPINUP_WAIT_TIME=10 CONFIG_PS3_DISK=m CONFIG_PS3_ROM=m CONFIG_PS3_FLASH=m @@ -62,11 +69,15 @@ CONFIG_SCSI_IPR_DUMP=y CONFIG_SPIDER_NET=m CONFIG_HVC_RTAS=y CONFIG_HVC_ISERIES=y +CONFIG_MAMBO=y +CONFIG_MAMBO_DISK=m +CONFIG_MAMBO_NET=m CONFIG_CBE_RAS=y # iSeries device drivers # CONFIG_ISERIES_VETH=m +# CONFIG_VIOCONS is not set CONFIG_VIODASD=m CONFIG_VIOCD=m CONFIG_VIOTAPE=m @@ -141,6 +152,7 @@ CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0 # CONFIG_VIRQ_DEBUG is not set CONFIG_ELECTRA_CF=m +CONFIG_SPU_TRACE=m CONFIG_MTD_NAND_PASEMI=m CONFIG_EDAC_CELL=m CONFIG_EDAC_CPC925=m @@ -148,6 +160,7 @@ CONFIG_FRAME_WARN=2048 CONFIG_PHYP_DUMP=y CONFIG_FORCE_MAX_ZONEORDER=13 +CONFIG_BLK_DEV_HD_ONLY=y CONFIG_VIRTUALIZATION=y CONFIG_VSX=y diff --git a/config-rhel-generic b/config-rhel-generic index 09dbf81..e70447b 100644 --- a/config-rhel-generic +++ b/config-rhel-generic @@ -57,6 +57,7 @@ # CONFIG_SND_AD1816A is not set # CONFIG_SND_AD1848 is not set # CONFIG_SND_CS4231 is not set +# CONFIG_SND_CS4232 is not set # CONFIG_SND_CS4236 is not set # CONFIG_SND_ES968 is not set # CONFIG_SND_ES1688 is not set @@ -102,6 +103,7 @@ # CONFIG_ROCKETPORT is not set # CONFIG_R3964 is not set +# CONFIG_TIPAR is not set # CONFIG_JOYSTICK_ANALOG is not set # CONFIG_JOYSTICK_A3D is not set @@ -115,6 +117,8 @@ # CONFIG_JOYSTICK_SIDEWINDER is not set # CONFIG_JOYSTICK_TMDC is not set # CONFIG_JOYSTICK_IFORCE is not set +# CONFIG_JOYSTICK_IFORCE_USB=y +# CONFIG_JOYSTICK_IFORCE_232=y # CONFIG_JOYSTICK_WARRIOR is not set # CONFIG_JOYSTICK_MAGELLAN is not set # CONFIG_JOYSTICK_SPACEORB is not set @@ -133,6 +137,7 @@ # CONFIG_RADIO_MAXIRADIO is not set # CONFIG_RADIO_MAESTRO is not set # CONFIG_RADIO_MIROPCM20 is not set +# CONFIG_RADIO_MIROPCM20_RDS is not set # CONFIG_RADIO_SF16FMI is not set # CONFIG_RADIO_SF16FMR2 is not set # CONFIG_RADIO_TERRATEC is not set @@ -140,6 +145,7 @@ # CONFIG_RADIO_TYPHOON is not set # CONFIG_RADIO_ZOLTRIX is not set +# CONFIG_TUNER_3036 is not set # CONFIG_VIDEO_PMS is not set # CONFIG_VIDEO_BWQCAM is not set @@ -159,6 +165,7 @@ # CONFIG_VIDEO_MEYE is not set # CONFIG_VIDEO_SAA7134 is not set # CONFIG_VIDEO_MXB is not set +# CONFIG_VIDEO_DPC is not set # CONFIG_VIDEO_HEXIUM_ORION is not set # CONFIG_VIDEO_HEXIUM_GEMINI is not set # CONFIG_VIDEO_CX88 is not set @@ -183,6 +190,7 @@ # CONFIG_FB_ASILIANT is not set # CONFIG_FB_HGA_ACCEL is not set # CONFIG_FB_3DFX_ACCEL is not set +# CONFIG_FB_TRIDENT_ACCEL is not set # CONFIG_JFS_FS is not set # CONFIG_NCP_FS is not set diff --git a/config-s390x b/config-s390x index 331c30c..05910a0 100644 --- a/config-s390x +++ b/config-s390x @@ -13,16 +13,20 @@ CONFIG_HZ_100=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_MMU=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_LOG_BUF_SHIFT=16 CONFIG_NO_IDLE_HZ=y +CONFIG_NO_IDLE_HZ_INIT=y CONFIG_SMP=y # # I/O subsystem configuration # +CONFIG_MACHCHK_WARNING=y CONFIG_QDIO=m +# CONFIG_QDIO_DEBUG is not set # # Misc @@ -35,6 +39,7 @@ CONFIG_PFAULT=y CONFIG_SHARED_KERNEL=y CONFIG_CMM=m CONFIG_CMM_PROC=y +CONFIG_VIRT_TIMER=y # CONFIG_NETIUCV is not set CONFIG_SMSGIUCV=m @@ -99,6 +104,7 @@ CONFIG_S390_TAPE_34XX=m # Token Ring devices # CONFIG_TR=y +# CONFIG_SHAPER is not set CONFIG_NETCONSOLE=m # @@ -114,12 +120,15 @@ CONFIG_CTC=m CONFIG_IUCV=m CONFIG_QETH=m CONFIG_QETH_IPV6=y +CONFIG_QETH_VLAN=y CONFIG_CCWGROUP=m # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_WIRELESS_EXT is not set +# CONFIG_WLAN_80211 is not set # CONFIG_MAC80211 is not set +# CONFIG_IEEE80211 is not set # CONFIG_B44 is not set # @@ -170,6 +179,7 @@ CONFIG_VMCP=m # CONFIG_ATA_OVER_ETH is not set # CONFIG_MII is not set +# CONFIG_BOOT_DELAY is not set CONFIG_STACK_GUARD=256 CONFIG_CMM_IUCV=y @@ -182,6 +192,7 @@ CONFIG_MONWRITER=m CONFIG_ZCRYPT=m CONFIG_ZCRYPT_MONOLITHIC=y +CONFIG_S390_SWITCH_AMODE=y CONFIG_S390_EXEC_PROTECT=y CONFIG_AFIUCV=m CONFIG_S390_PRNG=m @@ -192,6 +203,7 @@ CONFIG_S390_VMUR=m CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PAGE_STATES=y CONFIG_CTCM=m CONFIG_QETH_L2=m CONFIG_QETH_L3=m @@ -222,6 +234,4 @@ CONFIG_PERF_COUNTERS=y CONFIG_PERF_EVENTS=y CONFIG_EVENT_PROFILE=y -CONFIG_SMSGIUCV_EVENT=m - # CONFIG_PREEMPT_TRACER is not set diff --git a/config-sparc64-generic b/config-sparc64-generic index 7d9bef1..1cf6d2a 100644 --- a/config-sparc64-generic +++ b/config-sparc64-generic @@ -35,9 +35,13 @@ CONFIG_NR_CPUS=256 CONFIG_US3_FREQ=m CONFIG_US2E_FREQ=m CONFIG_SUN_OPENPROMFS=m +CONFIG_SPARC32_COMPAT=y CONFIG_COMPAT=y CONFIG_UID16=y CONFIG_BINFMT_ELF32=y +CONFIG_BINFMT_AOUT32=y +CONFIG_SUNOS_EMUL=y +CONFIG_SOLARIS_EMUL=m CONFIG_ENVCTRL=m CONFIG_DISPLAY7SEG=m CONFIG_WATCHDOG_CP1XXX=m @@ -46,10 +50,14 @@ CONFIG_WATCHDOG_RIO=m # CONFIG_PREVENT_FIRMWARE_BUILD is not set # CONFIG_PARPORT is not set # CONFIG_BLK_DEV_FD is not set +# CONFIG_LIRC_PVR150 is not set # CONFIG_LIRC_PARALLEL is not set # CONFIG_I2C_NFORCE2 is not set # CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_PROSAVAGE is not set +# CONFIG_I2C_SAVAGE4 is not set # CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_VOODOO3 is not set CONFIG_I2C_ALI1535=m # CONFIG_VGASTATE is not set # CONFIG_FB_DDC is not set @@ -75,6 +83,7 @@ CONFIG_FB_PM2=y CONFIG_FB_P9100=y # CONFIG_FB_LEO is not set CONFIG_FB_XVR500=y +CONFIG_FB_XVR1000=y CONFIG_FB_XVR2500=y # CONFIG_VGASTATE is not set # CONFIG_FB_DDC is not set @@ -84,6 +93,7 @@ CONFIG_FB_XVR2500=y # CONFIG_AGP is not set # CONFIG_DRM_NOUVEAU is not set # CONFIG_MDA_CONSOLE is not set +# CONFIG_PROM_CONSOLE is not set CONFIG_FONTS=y # CONFIG_FONT_8x8 is not set # CONFIG_FONT_8x16 is not set @@ -105,12 +115,15 @@ CONFIG_SERIAL_SUNSAB=y CONFIG_SERIAL_SUNSAB_CONSOLE=y CONFIG_SERIAL_SUNHV=y CONFIG_SUN_OPENPROMIO=y +CONFIG_SUN_MOSTEK_RTC=y CONFIG_OBP_FLASH=m +# CONFIG_SUN_VIDEOPIX is not set # CONFIG_SERIO_SERPORT is not set CONFIG_BLK_DEV_FD=y CONFIG_SUNVDC=m CONFIG_SUNVNET=m # CONFIG_BLK_DEV_AEC62XX is not set +# CONFIG_BLK_DEV_HPT34X is not set # CONFIG_BLK_DEV_HPT366 is not set # CONFIG_BLK_DEV_PDC202XX_OLD is not set # CONFIG_BLK_DEV_PDC202XX_NEW is not set @@ -132,6 +145,9 @@ CONFIG_SUNQE=m # CONFIG_DM9102 is not set # CONFIG_HAMACHI is not set # CONFIG_R8169 is not set +CONFIG_ATM_FORE200E_SBA=y +CONFIG_ATM_FORE200E_SBA_DEFAULT_FW=y +# CONFIG_ATM_FORE200E_SBA_FW is not set CONFIG_ATM_FORE200E_USE_TASKLET=y CONFIG_ATM_FORE200E_DEBUG=0 CONFIG_ATM_FORE200E_TX_RETRY=16 @@ -146,6 +162,7 @@ CONFIG_SND_SUN_CS4231=m # CONFIG_SND_SUN_DBRI is not set CONFIG_PARPORT_SUNBPP=m CONFIG_LOGO_SUN_CLUT224=y +CONFIG_SUN_BPP=m CONFIG_MTD_SUN_UFLASH=m CONFIG_MYRI_SBUS=m # CONFIG_SGI_IOC4 is not set @@ -196,6 +213,3 @@ CONFIG_EVENT_PROFILE=y CONFIG_EARLYFB=y CONFIG_SERIAL_GRLIB_GAISLER_APBUART=m - -CONFIG_GRETH=m -CONFIG_FB_XVR1000=y diff --git a/config-x86-generic b/config-x86-generic index 9179350..6a1af33 100644 --- a/config-x86-generic +++ b/config-x86-generic @@ -8,10 +8,13 @@ CONFIG_UID16=y # # Enable summit and co via the generic arch # +# CONFIG_X86_PC is not set +CONFIG_X86_GENERICARCH=y CONFIG_X86_EXTENDED_PLATFORM=y CONFIG_X86_32_NON_STANDARD=y # CONFIG_X86_ELAN is not set +# CONFIG_X86_VOYAGER is not set # CONFIG_X86_NUMAQ is not set # CONFIG_X86_SUMMIT is not set CONFIG_X86_BIGSMP=y @@ -33,17 +36,34 @@ CONFIG_M686=y # CONFIG_MK8 is not set # CONFIG_MCRUSOE is not set # CONFIG_MWINCHIPC6 is not set +# CONFIG_MWINCHIP2 is not set # CONFIG_MWINCHIP3D is not set # CONFIG_MCYRIXIII is not set # CONFIG_MVIAC3_2 is not set CONFIG_SMP=y CONFIG_NR_CPUS=32 CONFIG_X86_GENERIC=y -# CONFIG_X86_PPRO_FENCE is not set +CONFIG_X86_CMPXCHG=y +CONFIG_X86_L1_CACHE_SHIFT=7 +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_X86_PPRO_FENCE=y +CONFIG_X86_WP_WORKS_OK=y +CONFIG_X86_INVLPG=y +CONFIG_X86_BSWAP=y +CONFIG_X86_POPAD_OK=y +CONFIG_X86_GOOD_APIC=y +CONFIG_X86_INTEL_USERCOPY=y +CONFIG_X86_USE_PPRO_CHECKSUM=y CONFIG_HPET=y CONFIG_HPET_TIMER=y +# CONFIG_HPET_RTC_IRQ is not set # CONFIG_HPET_MMAP is not set +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_TSC=y CONFIG_X86_MCE=y +# CONFIG_X86_MCE_NONFATAL is not set +CONFIG_X86_MCE_P4THERMAL=y CONFIG_TOSHIBA=m CONFIG_I8K=m CONFIG_SONYPI=m @@ -51,6 +71,7 @@ CONFIG_SONYPI_COMPAT=y CONFIG_MICROCODE=m CONFIG_X86_MSR=y CONFIG_X86_CPUID=y +# CONFIG_X86_CPU_DEBUG is not set CONFIG_EDD=m # CONFIG_EDD_OFF is not set # CONFIG_NUMA is not set @@ -100,6 +121,16 @@ CONFIG_SECCOMP=y CONFIG_CAPI_EICON=y +CONFIG_I2O=m +CONFIG_I2O_BLOCK=m +CONFIG_I2O_SCSI=m +CONFIG_I2O_PROC=m +CONFIG_I2O_CONFIG=y +CONFIG_I2O_EXT_ADAPTEC=y +CONFIG_I2O_EXT_ADAPTEC_DMA64=y +CONFIG_I2O_CONFIG_OLD_IOCTL=y +CONFIG_I2O_BUS=m + # # APM (Advanced Power Management) BIOS Support # @@ -109,10 +140,12 @@ CONFIG_APM=y CONFIG_APM_CPU_IDLE=y # CONFIG_APM_DISPLAY_BLANK is not set # CONFIG_APM_ALLOW_INTS is not set +# CONFIG_APM_REAL_MODE_POWER_OFF is not set # # Kernel debugging # +CONFIG_X86_FIND_SMP_CONFIG=y CONFIG_X86_MPPARSE=y CONFIG_ACPI=y @@ -121,10 +154,12 @@ CONFIG_ACPI_AC=y CONFIG_ACPI_PROCFS_POWER=y CONFIG_ACPI_SYSFS_POWER=y CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BAY=y CONFIG_ACPI_BLACKLIST_YEAR=1999 CONFIG_ACPI_BUTTON=y CONFIG_ACPI_CONTAINER=m CONFIG_ACPI_DOCK=y +CONFIG_ACPI_EC=y CONFIG_ACPI_FAN=y CONFIG_ACPI_NUMA=y CONFIG_ACPI_PROCESSOR=y @@ -132,6 +167,7 @@ CONFIG_ACPI_POWER=y CONFIG_ACPI_PROCFS=y CONFIG_ACPI_SBS=m CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_SYSTEM=y CONFIG_ACPI_THERMAL=y CONFIG_TOPSTAR_LAPTOP=m CONFIG_ACPI_TOSHIBA=m @@ -159,10 +195,11 @@ CONFIG_CPU_FREQ_STAT=m CONFIG_CPU_FREQ_STAT_DETAILS=y CONFIG_X86_ACPI_CPUFREQ=m -CONFIG_X86_PCC_CPUFREQ=m +# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set # CONFIG_X86_POWERNOW_K6 is not set CONFIG_X86_POWERNOW_K7=y CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_POWERNOW_K8_ACPI=y # CONFIG_X86_GX_SUSPMOD is not set # CONFIG_X86_SPEEDSTEP_CENTRINO is not set CONFIG_X86_SPEEDSTEP_ICH=y @@ -176,7 +213,9 @@ CONFIG_X86_LONGRUN=y # e_powersaver is dangerous # CONFIG_X86_E_POWERSAVER is not set +CONFIG_X86_SMP=y CONFIG_X86_HT=y +CONFIG_X86_BIOS_REBOOT=y CONFIG_X86_TRAMPOLINE=y # @@ -231,16 +270,20 @@ CONFIG_I2C_AMD756=m CONFIG_I2C_AMD756_S4882=m CONFIG_I2C_AMD8111=m CONFIG_I2C_I801=m +CONFIG_I2C_I810=m CONFIG_I2C_ISCH=m CONFIG_I2C_NFORCE2=m CONFIG_I2C_NFORCE2_S4985=m CONFIG_I2C_PIIX4=m +CONFIG_I2C_PROSAVAGE=m +CONFIG_I2C_SAVAGE4=m CONFIG_I2C_SIS5595=m CONFIG_I2C_SIS630=m CONFIG_I2C_SIS96X=m CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m +CONFIG_I2C_VOODOO3=m CONFIG_SCx200_ACB=m @@ -249,7 +292,6 @@ CONFIG_SCx200_ACB=m CONFIG_DELL_RBU=m CONFIG_DCDBAS=m -CONFIG_GPIO_SCH=m CONFIG_PC8736x_GPIO=m # CONFIG_NSC_GPIO is not set CONFIG_CS5535_GPIO=m @@ -283,16 +325,17 @@ CONFIG_HW_RANDOM_AMD=m CONFIG_HW_RANDOM_GEODE=m CONFIG_HW_RANDOM_VIA=m +CONFIG_USB_HIDINPUT_POWERBOOK=y # CONFIG_COMPAT_VDSO is not set # CONFIG_SGI_IOC4 is not set CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ACERHDF=m CONFIG_ASUS_LAPTOP=m CONFIG_COMPAL_LAPTOP=m CONFIG_EEEPC_LAPTOP=m -CONFIG_EEEPC_WMI=m CONFIG_FUJITSU_LAPTOP=m # CONFIG_FUJITSU_LAPTOP_DEBUG is not set CONFIG_MSI_LAPTOP=m @@ -300,7 +343,6 @@ CONFIG_SONY_LAPTOP=m CONFIG_DELL_LAPTOP=m CONFIG_ACPI_WMI=m CONFIG_ACER_WMI=m -CONFIG_ACERHDF=m CONFIG_TC1100_WMI=m CONFIG_HP_WMI=m CONFIG_DELL_WMI=m @@ -325,6 +367,7 @@ CONFIG_VIRTUALIZATION=y CONFIG_KVM=m CONFIG_KVM_INTEL=m CONFIG_KVM_AMD=m +CONFIG_KVM_TRACE=y CONFIG_LGUEST=m CONFIG_PARAVIRT_GUEST=y @@ -365,6 +408,7 @@ CONFIG_THINKPAD_ACPI=m # CONFIG_THINKPAD_ACPI_DEBUG is not set # CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +CONFIG_THINKPAD_ACPI_BAY=y CONFIG_THINKPAD_ACPI_VIDEO=y CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y # CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set @@ -391,8 +435,6 @@ CONFIG_MOUSE_PS2_OLPC=y CONFIG_STRICT_DEVMEM=y -# CONFIG_NO_BOOTMEM is not set - # CONFIG_MEMTEST is not set # CONFIG_MAXSMP is not set CONFIG_MTRR_SANITIZER=y @@ -438,6 +480,7 @@ CONFIG_PERF_COUNTERS=y CONFIG_PERF_EVENTS=y CONFIG_EVENT_PROFILE=y +# CONFIG_X86_OLD_MCE is not set CONFIG_X86_MCE_INTEL=y CONFIG_X86_MCE_AMD=y # CONFIG_X86_ANCIENT_MCE is not set @@ -468,6 +511,3 @@ CONFIG_ACPI_CMPC=m CONFIG_MSI_WMI=m CONFIG_TOSHIBA_BT_RFKILL=m # CONFIG_SAMSUNG_LAPTOP is not set - -CONFIG_VGA_SWITCHEROO=y -CONFIG_LPC_SCH=m diff --git a/config-x86_64-generic b/config-x86_64-generic index 64f87e6..e938813 100644 --- a/config-x86_64-generic +++ b/config-x86_64-generic @@ -10,6 +10,7 @@ CONFIG_X86_EXTENDED_PLATFORM=y # CONFIG_X86_UV is not set CONFIG_X86_MSR=y CONFIG_X86_CPUID=y +# CONFIG_X86_CPU_DEBUG is not set CONFIG_MTRR=y CONFIG_NUMA=y CONFIG_K8_NUMA=y @@ -17,6 +18,7 @@ CONFIG_X86_64_ACPI_NUMA=y # CONFIG_NUMA_EMU is not set CONFIG_NR_CPUS=256 CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_POWERNOW_K8_ACPI=y CONFIG_X86_P4_CLOCKMOD=m CONFIG_IA32_EMULATION=y # CONFIG_IA32_AOUT is not set @@ -43,6 +45,16 @@ CONFIG_EFI_VARS=y CONFIG_EFI_PCDP=y CONFIG_FB_EFI=y +CONFIG_I2O=m +CONFIG_I2O_BLOCK=m +CONFIG_I2O_SCSI=m +CONFIG_I2O_PROC=m +CONFIG_I2O_CONFIG=y +CONFIG_I2O_EXT_ADAPTEC=y +CONFIG_I2O_EXT_ADAPTEC_DMA64=y +CONFIG_I2O_CONFIG_OLD_IOCTL=y +CONFIG_I2O_BUS=m + CONFIG_SECCOMP=y CONFIG_CAPI_EICON=y @@ -64,8 +76,8 @@ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m CONFIG_CPU_FREQ_TABLE=y CONFIG_CPU_FREQ_DEBUG=y # CONFIG_X86_SPEEDSTEP_CENTRINO is not set -CONFIG_X86_PCC_CPUFREQ=m CONFIG_X86_ACPI_CPUFREQ=m +# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set CONFIG_CPU_FREQ_STAT=m CONFIG_CPU_FREQ_STAT_DETAILS=y @@ -75,10 +87,12 @@ CONFIG_ACPI_AC=y CONFIG_ACPI_PROCFS_POWER=y CONFIG_ACPI_SYSFS_POWER=y CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BAY=m CONFIG_ACPI_BLACKLIST_YEAR=0 CONFIG_ACPI_BUTTON=y CONFIG_ACPI_CONTAINER=m CONFIG_ACPI_DOCK=y +CONFIG_ACPI_EC=y CONFIG_ACPI_FAN=y CONFIG_ACPI_HOTPLUG_MEMORY=m CONFIG_ACPI_NUMA=y @@ -89,6 +103,7 @@ CONFIG_ACPI_SLEEP=y CONFIG_ACPI_THERMAL=y CONFIG_ACPI_TOSHIBA=m CONFIG_ACPI_POWER=y +CONFIG_ACPI_SYSTEM=y CONFIG_ACPI_VIDEO=m # Disable in F9. CONFIG_ACPI_PROC_EVENT=y @@ -96,6 +111,7 @@ CONFIG_ACPI_POWER_METER=m CONFIG_ACPI_PROCESSOR_AGGREGATOR=m CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ACERHDF=m CONFIG_ASUS_LAPTOP=m CONFIG_COMPAL_LAPTOP=m CONFIG_FUJITSU_LAPTOP=m @@ -104,11 +120,9 @@ CONFIG_MSI_LAPTOP=m CONFIG_SONY_LAPTOP=m CONFIG_SONYPI_COMPAT=y CONFIG_EEEPC_LAPTOP=m -CONFIG_EEEPC_WMI=m CONFIG_DELL_LAPTOP=m CONFIG_ACPI_WMI=m CONFIG_ACER_WMI=m -CONFIG_ACERHDF=m CONFIG_HP_WMI=m CONFIG_DELL_WMI=m @@ -116,6 +130,7 @@ CONFIG_THINKPAD_ACPI=m # CONFIG_THINKPAD_ACPI_DEBUG is not set # CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +CONFIG_THINKPAD_ACPI_BAY=y CONFIG_THINKPAD_ACPI_VIDEO=y CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y # CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set @@ -130,6 +145,7 @@ CONFIG_HOTPLUG_PCI_SHPC=m CONFIG_HPET=y # CONFIG_HPET_MMAP is not set +# CONFIG_HPET_RTC_IRQ is not set CONFIG_PM=y CONFIG_IPW2100=m @@ -153,8 +169,7 @@ CONFIG_CRYPTO_AES_X86_64=m CONFIG_CRYPTO_TWOFISH_X86_64=m # CONFIG_CRYPTO_SALSA20 is not set CONFIG_CRYPTO_SALSA20_X86_64=m -# -- Temporarily disabled until bugs can be sorted -# CONFIG_CRYPTO_AES_NI_INTEL is not set +CONFIG_CRYPTO_AES_NI_INTEL=m CONFIG_X86_MCE=y CONFIG_X86_MCE_INTEL=y @@ -168,6 +183,7 @@ CONFIG_I2C_AMD756_S4882=m CONFIG_I2C_AMD8111=m CONFIG_I2C_I801=m CONFIG_I2C_ISCH=m +# CONFIG_I2C_I810 is not set CONFIG_I2C_NFORCE2_S4985=m CONFIG_I2C_PIIX4=m # CONFIG_I2C_SIS5595 is not set @@ -211,11 +227,11 @@ CONFIG_HW_RANDOM_VIA=m # CONFIG_HW_RANDOM_GEODE is not set +CONFIG_USB_HIDINPUT_POWERBOOK=y CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_DEBUG_NMI_TIMEOUT=5 -CONFIG_GPIO_SCH=m # CONFIG_PC8736x_GPIO is not set # CONFIG_DISCONTIGMEM_MANUAL is not set @@ -235,6 +251,7 @@ CONFIG_MEMORY_HOTREMOVE=y # CONFIG_BLK_DEV_CS5535 is not set CONFIG_CC_STACKPROTECTOR=y +# CONFIG_CC_STACKPROTECTOR_ALL is not set CONFIG_SGI_IOC4=m CONFIG_SGI_XP=m @@ -272,6 +289,7 @@ CONFIG_VIRTUALIZATION=y CONFIG_KVM=m CONFIG_KVM_INTEL=m CONFIG_KVM_AMD=m +CONFIG_KVM_TRACE=y CONFIG_PARAVIRT_GUEST=y CONFIG_PARAVIRT=y @@ -310,6 +328,8 @@ CONFIG_HP_WATCHDOG=m CONFIG_FRAME_WARN=2048 +CONFIG_MEMTEST_BOOTPARAM=y +CONFIG_MEMTEST_BOOTPARAM_VALUE=0 CONFIG_NODES_SHIFT=9 CONFIG_X86_PAT=y # FIXME: These should be 32bit only @@ -318,8 +338,6 @@ CONFIG_STRICT_DEVMEM=y CONFIG_DIRECT_GBPAGES=y -# CONFIG_NO_BOOTMEM is not set - # CONFIG_MEMTEST is not set CONFIG_AMD_IOMMU=y CONFIG_AMD_IOMMU_STATS=y @@ -359,6 +377,7 @@ CONFIG_HW_BRANCH_TRACER=y CONFIG_X86_X2APIC=y CONFIG_SPARSE_IRQ=y +CONFIG_NUMA_MIGRATE_IRQ_DESC=y CONFIG_RCU_FANOUT=64 @@ -393,6 +412,3 @@ CONFIG_CS5535_CLOCK_EVENT_SRC=m CONFIG_X86_DECODER_SELFTEST=y CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m - -CONFIG_VGA_SWITCHEROO=y -CONFIG_LPC_SCH=m diff --git a/coredump-uid-pipe-check.patch b/coredump-uid-pipe-check.patch deleted file mode 100644 index 4b98a07..0000000 --- a/coredump-uid-pipe-check.patch +++ /dev/null @@ -1,14 +0,0 @@ -diff -up linux-2.6.32.noarch/fs/exec.c.orig linux-2.6.32.noarch/fs/exec.c ---- linux-2.6.32.noarch/fs/exec.c.orig 2010-02-22 12:40:06.000000000 -0500 -+++ linux-2.6.32.noarch/fs/exec.c 2010-02-22 12:48:34.000000000 -0500 -@@ -1973,8 +1973,9 @@ void do_coredump(long signr, int exit_co - /* - * Dont allow local users get cute and trick others to coredump - * into their pre-created files: -+ * Note this isn't relevant to pipes - */ -- if (inode->i_uid != current_fsuid()) -+ if (!ispipe && (inode->i_uid != current_fsuid())) - goto close_fail; - if (!cprm.file->f_op) - goto close_fail; diff --git a/cred-dont-resurrect-dead-credentials.patch b/cred-dont-resurrect-dead-credentials.patch deleted file mode 100644 index 6ffab63..0000000 --- a/cred-dont-resurrect-dead-credentials.patch +++ /dev/null @@ -1,176 +0,0 @@ -From: David Howells -Date: Thu, 29 Jul 2010 11:45:49 +0000 (+0100) -Subject: CRED: Fix get_task_cred() and task_state() to not resurrect dead credentials -X-Git-Tag: v2.6.35~11 -X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=de09a9771a5346029f4d11e4ac886be7f9b - -CRED: Fix get_task_cred() and task_state() to not resurrect dead credentials - -It's possible for get_task_cred() as it currently stands to 'corrupt' a set of -credentials by incrementing their usage count after their replacement by the -task being accessed. - -What happens is that get_task_cred() can race with commit_creds(): - - TASK_1 TASK_2 RCU_CLEANER - -->get_task_cred(TASK_2) - rcu_read_lock() - __cred = __task_cred(TASK_2) - -->commit_creds() - old_cred = TASK_2->real_cred - TASK_2->real_cred = ... - put_cred(old_cred) - call_rcu(old_cred) - [__cred->usage == 0] - get_cred(__cred) - [__cred->usage == 1] - rcu_read_unlock() - -->put_cred_rcu() - [__cred->usage == 1] - panic() - -However, since a tasks credentials are generally not changed very often, we can -reasonably make use of a loop involving reading the creds pointer and using -atomic_inc_not_zero() to attempt to increment it if it hasn't already hit zero. - -If successful, we can safely return the credentials in the knowledge that, even -if the task we're accessing has released them, they haven't gone to the RCU -cleanup code. - -We then change task_state() in procfs to use get_task_cred() rather than -calling get_cred() on the result of __task_cred(), as that suffers from the -same problem. - -Without this change, a BUG_ON in __put_cred() or in put_cred_rcu() can be -tripped when it is noticed that the usage count is not zero as it ought to be, -for example: - -kernel BUG at kernel/cred.c:168! -invalid opcode: 0000 [#1] SMP -last sysfs file: /sys/kernel/mm/ksm/run -CPU 0 -Pid: 2436, comm: master Not tainted 2.6.33.3-85.fc13.x86_64 #1 0HR330/OptiPlex -745 -RIP: 0010:[] [] __put_cred+0xc/0x45 -RSP: 0018:ffff88019e7e9eb8 EFLAGS: 00010202 -RAX: 0000000000000001 RBX: ffff880161514480 RCX: 00000000ffffffff -RDX: 00000000ffffffff RSI: ffff880140c690c0 RDI: ffff880140c690c0 -RBP: ffff88019e7e9eb8 R08: 00000000000000d0 R09: 0000000000000000 -R10: 0000000000000001 R11: 0000000000000040 R12: ffff880140c690c0 -R13: ffff88019e77aea0 R14: 00007fff336b0a5c R15: 0000000000000001 -FS: 00007f12f50d97c0(0000) GS:ffff880007400000(0000) knlGS:0000000000000000 -CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 -CR2: 00007f8f461bc000 CR3: 00000001b26ce000 CR4: 00000000000006f0 -DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 -DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 -Process master (pid: 2436, threadinfo ffff88019e7e8000, task ffff88019e77aea0) -Stack: - ffff88019e7e9ec8 ffffffff810698cd ffff88019e7e9ef8 ffffffff81069b45 -<0> ffff880161514180 ffff880161514480 ffff880161514180 0000000000000000 -<0> ffff88019e7e9f28 ffffffff8106aace 0000000000000001 0000000000000246 -Call Trace: - [] put_cred+0x13/0x15 - [] commit_creds+0x16b/0x175 - [] set_current_groups+0x47/0x4e - [] sys_setgroups+0xf6/0x105 - [] system_call_fastpath+0x16/0x1b -Code: 48 8d 71 ff e8 7e 4e 15 00 85 c0 78 0b 8b 75 ec 48 89 df e8 ef 4a 15 00 -48 83 c4 18 5b c9 c3 55 8b 07 8b 07 48 89 e5 85 c0 74 04 <0f> 0b eb fe 65 48 8b -04 25 00 cc 00 00 48 3b b8 58 04 00 00 75 -RIP [] __put_cred+0xc/0x45 - RSP ----[ end trace df391256a100ebdd ]--- - -Signed-off-by: David Howells -Acked-by: Jiri Olsa -Signed-off-by: Linus Torvalds ---- - -diff --git a/fs/proc/array.c b/fs/proc/array.c -index 9b58d38..fff6572 100644 ---- a/fs/proc/array.c -+++ b/fs/proc/array.c -@@ -176,7 +176,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, - if (tracer) - tpid = task_pid_nr_ns(tracer, ns); - } -- cred = get_cred((struct cred *) __task_cred(p)); -+ cred = get_task_cred(p); - seq_printf(m, - "State:\t%s\n" - "Tgid:\t%d\n" -diff --git a/include/linux/cred.h b/include/linux/cred.h -index 75c0fa8..ce40cbc 100644 ---- a/include/linux/cred.h -+++ b/include/linux/cred.h -@@ -153,6 +153,7 @@ struct cred { - extern void __put_cred(struct cred *); - extern void exit_creds(struct task_struct *); - extern int copy_creds(struct task_struct *, unsigned long); -+extern const struct cred *get_task_cred(struct task_struct *); - extern struct cred *cred_alloc_blank(void); - extern struct cred *prepare_creds(void); - extern struct cred *prepare_exec_creds(void); -@@ -282,26 +283,6 @@ static inline void put_cred(const struct cred *_cred) - ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_tasklist_lock_is_held()))) - - /** -- * get_task_cred - Get another task's objective credentials -- * @task: The task to query -- * -- * Get the objective credentials of a task, pinning them so that they can't go -- * away. Accessing a task's credentials directly is not permitted. -- * -- * The caller must make sure task doesn't go away, either by holding a ref on -- * task or by holding tasklist_lock to prevent it from being unlinked. -- */ --#define get_task_cred(task) \ --({ \ -- struct cred *__cred; \ -- rcu_read_lock(); \ -- __cred = (struct cred *) __task_cred((task)); \ -- get_cred(__cred); \ -- rcu_read_unlock(); \ -- __cred; \ --}) -- --/** - * get_current_cred - Get the current task's subjective credentials - * - * Get the subjective credentials of the current task, pinning them so that -diff --git a/kernel/cred.c b/kernel/cred.c -index a2d5504..60bc8b1 100644 ---- a/kernel/cred.c -+++ b/kernel/cred.c -@@ -209,6 +209,31 @@ void exit_creds(struct task_struct *tsk) - } - } - -+/** -+ * get_task_cred - Get another task's objective credentials -+ * @task: The task to query -+ * -+ * Get the objective credentials of a task, pinning them so that they can't go -+ * away. Accessing a task's credentials directly is not permitted. -+ * -+ * The caller must also make sure task doesn't get deleted, either by holding a -+ * ref on task or by holding tasklist_lock to prevent it from being unlinked. -+ */ -+const struct cred *get_task_cred(struct task_struct *task) -+{ -+ const struct cred *cred; -+ -+ rcu_read_lock(); -+ -+ do { -+ cred = __task_cred((task)); -+ BUG_ON(!cred); -+ } while (!atomic_inc_not_zero(&((struct cred *)cred)->usage)); -+ -+ rcu_read_unlock(); -+ return cred; -+} -+ - /* - * Allocate blank credentials, such that the credentials can be filled in at a - * later date without risk of ENOMEM. diff --git a/crypto-add-async-hash-testing.patch b/crypto-add-async-hash-testing.patch deleted file mode 100644 index 8df0ad4..0000000 --- a/crypto-add-async-hash-testing.patch +++ /dev/null @@ -1,111 +0,0 @@ -From e45009229be6a7fae49bdfa3459905668c0b0fb1 Mon Sep 17 00:00:00 2001 -From: David S. Miller -Date: Wed, 19 May 2010 14:12:03 +1000 -Subject: crypto: testmgr - Add testing for async hashing and update/final - -Extend testmgr such that it tests async hash algorithms, -and that for both sync and async hashes it tests both -->digest() and ->update()/->final() sequences. - -Signed-off-by: David S. Miller -Signed-off-by: Herbert Xu ---- - crypto/testmgr.c | 66 +++++++++++++++++++++++++++++++++++++++-------------- - 1 files changed, 48 insertions(+), 18 deletions(-) - -diff --git a/crypto/testmgr.c b/crypto/testmgr.c -index c494d76..5c8aaa0 100644 ---- a/crypto/testmgr.c -+++ b/crypto/testmgr.c -@@ -153,8 +153,21 @@ static void testmgr_free_buf(char *buf[XBUFSIZE]) - free_page((unsigned long)buf[i]); - } - -+static int do_one_async_hash_op(struct ahash_request *req, -+ struct tcrypt_result *tr, -+ int ret) -+{ -+ if (ret == -EINPROGRESS || ret == -EBUSY) { -+ ret = wait_for_completion_interruptible(&tr->completion); -+ if (!ret) -+ ret = tr->err; -+ INIT_COMPLETION(tr->completion); -+ } -+ return ret; -+} -+ - static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, -- unsigned int tcount) -+ unsigned int tcount, bool use_digest) - { - const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); - unsigned int i, j, k, temp; -@@ -206,23 +219,36 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, - } - - ahash_request_set_crypt(req, sg, result, template[i].psize); -- ret = crypto_ahash_digest(req); -- switch (ret) { -- case 0: -- break; -- case -EINPROGRESS: -- case -EBUSY: -- ret = wait_for_completion_interruptible( -- &tresult.completion); -- if (!ret && !(ret = tresult.err)) { -- INIT_COMPLETION(tresult.completion); -- break; -+ if (use_digest) { -+ ret = do_one_async_hash_op(req, &tresult, -+ crypto_ahash_digest(req)); -+ if (ret) { -+ pr_err("alg: hash: digest failed on test %d " -+ "for %s: ret=%d\n", j, algo, -ret); -+ goto out; -+ } -+ } else { -+ ret = do_one_async_hash_op(req, &tresult, -+ crypto_ahash_init(req)); -+ if (ret) { -+ pr_err("alt: hash: init failed on test %d " -+ "for %s: ret=%d\n", j, algo, -ret); -+ goto out; -+ } -+ ret = do_one_async_hash_op(req, &tresult, -+ crypto_ahash_update(req)); -+ if (ret) { -+ pr_err("alt: hash: update failed on test %d " -+ "for %s: ret=%d\n", j, algo, -ret); -+ goto out; -+ } -+ ret = do_one_async_hash_op(req, &tresult, -+ crypto_ahash_final(req)); -+ if (ret) { -+ pr_err("alt: hash: final failed on test %d " -+ "for %s: ret=%d\n", j, algo, -ret); -+ goto out; - } -- /* fall through */ -- default: -- printk(KERN_ERR "alg: hash: digest failed on test %d " -- "for %s: ret=%d\n", j, algo, -ret); -- goto out; - } - - if (memcmp(result, template[i].digest, -@@ -1402,7 +1428,11 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, - return PTR_ERR(tfm); - } - -- err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count); -+ err = test_hash(tfm, desc->suite.hash.vecs, -+ desc->suite.hash.count, true); -+ if (!err) -+ err = test_hash(tfm, desc->suite.hash.vecs, -+ desc->suite.hash.count, false); - - crypto_free_ahash(tfm); - return err; --- -1.7.0.1 - diff --git a/crypto-aesni-kill-module_alias.patch b/crypto-aesni-kill-module_alias.patch new file mode 100644 index 0000000..a3ade8a --- /dev/null +++ b/crypto-aesni-kill-module_alias.patch @@ -0,0 +1,9 @@ +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c +index 49c552c..ae88694 100644 +--- a/arch/x86/crypto/aesni-intel_glue.c ++++ b/arch/x86/crypto/aesni-intel_glue.c +@@ -720,4 +720,3 @@ module_exit(aesni_exit); + + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("aes"); diff --git a/crystalhd-2.6.34-staging.patch b/crystalhd-2.6.34-staging.patch new file mode 100644 index 0000000..f1eb397 --- /dev/null +++ b/crystalhd-2.6.34-staging.patch @@ -0,0 +1,8287 @@ +Broadcom Crystal HD video decoder driver from upstream staging/linux-next. + +Signed-off-by: Jarod Wilson + +--- + drivers/staging/Kconfig | 2 + + drivers/staging/Makefile | 1 + + drivers/staging/crystalhd/Kconfig | 6 + + drivers/staging/crystalhd/Makefile | 6 + + drivers/staging/crystalhd/TODO | 16 + + drivers/staging/crystalhd/bc_dts_defs.h | 498 ++++++ + drivers/staging/crystalhd/bc_dts_glob_lnx.h | 299 ++++ + drivers/staging/crystalhd/bc_dts_types.h | 121 ++ + drivers/staging/crystalhd/bcm_70012_regs.h | 757 +++++++++ + drivers/staging/crystalhd/crystalhd_cmds.c | 1058 ++++++++++++ + drivers/staging/crystalhd/crystalhd_cmds.h | 88 + + drivers/staging/crystalhd/crystalhd_fw_if.h | 369 ++++ + drivers/staging/crystalhd/crystalhd_hw.c | 2395 +++++++++++++++++++++++++++ + drivers/staging/crystalhd/crystalhd_hw.h | 398 +++++ + drivers/staging/crystalhd/crystalhd_lnx.c | 780 +++++++++ + drivers/staging/crystalhd/crystalhd_lnx.h | 96 ++ + drivers/staging/crystalhd/crystalhd_misc.c | 1029 ++++++++++++ + drivers/staging/crystalhd/crystalhd_misc.h | 229 +++ + 18 files changed, 8148 insertions(+), 0 deletions(-) + +diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig +index 94eb863..61ec152 100644 +--- a/drivers/staging/Kconfig ++++ b/drivers/staging/Kconfig +@@ -145,5 +145,7 @@ source "drivers/staging/netwave/Kconfig" + + source "drivers/staging/sm7xx/Kconfig" + ++source "drivers/staging/crystalhd/Kconfig" ++ + endif # !STAGING_EXCLUDE_BUILD + endif # STAGING +diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile +index b5e67b8..dc40493 100644 +--- a/drivers/staging/Makefile ++++ b/drivers/staging/Makefile +@@ -53,3 +53,4 @@ obj-$(CONFIG_WAVELAN) += wavelan/ + obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan/ + obj-$(CONFIG_PCMCIA_NETWAVE) += netwave/ + obj-$(CONFIG_FB_SM7XX) += sm7xx/ ++obj-$(CONFIG_CRYSTALHD) += crystalhd/ +diff --git a/drivers/staging/crystalhd/Kconfig b/drivers/staging/crystalhd/Kconfig +new file mode 100644 +index 0000000..56b414b +--- /dev/null ++++ b/drivers/staging/crystalhd/Kconfig +@@ -0,0 +1,6 @@ ++config CRYSTALHD ++ tristate "Broadcom Crystal HD video decoder support" ++ depends on PCI ++ default n ++ help ++ Support for the Broadcom Crystal HD video decoder chipset +diff --git a/drivers/staging/crystalhd/Makefile b/drivers/staging/crystalhd/Makefile +new file mode 100644 +index 0000000..e2af0ce +--- /dev/null ++++ b/drivers/staging/crystalhd/Makefile +@@ -0,0 +1,6 @@ ++obj-$(CONFIG_CRYSTALHD) += crystalhd.o ++ ++crystalhd-objs := crystalhd_cmds.o \ ++ crystalhd_hw.o \ ++ crystalhd_lnx.o \ ++ crystalhd_misc.o +diff --git a/drivers/staging/crystalhd/TODO b/drivers/staging/crystalhd/TODO +new file mode 100644 +index 0000000..69be5d0 +--- /dev/null ++++ b/drivers/staging/crystalhd/TODO +@@ -0,0 +1,16 @@ ++- Testing ++- Cleanup return codes ++- Cleanup typedefs ++- Cleanup all WIN* references ++- Allocate an Accelerator device class specific Major number, ++ since we don't have any other open sourced accelerators, it is the only ++ one in that category for now. ++ A somewhat similar device is the DXR2/3 ++ ++Please send patches to: ++Greg Kroah-Hartman ++Naren Sankar ++Jarod Wilson ++Scott Davilla ++Manu Abraham ++ +diff --git a/drivers/staging/crystalhd/bc_dts_defs.h b/drivers/staging/crystalhd/bc_dts_defs.h +new file mode 100644 +index 0000000..c34cc07 +--- /dev/null ++++ b/drivers/staging/crystalhd/bc_dts_defs.h +@@ -0,0 +1,498 @@ ++/******************************************************************** ++ * Copyright(c) 2006-2009 Broadcom Corporation. ++ * ++ * Name: bc_dts_defs.h ++ * ++ * Description: Common definitions for all components. Only types ++ * is allowed to be included from this file. ++ * ++ * AU ++ * ++ * HISTORY: ++ * ++ ******************************************************************** ++ * This header is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License as published ++ * by the Free Software Foundation, either version 2.1 of the License. ++ * ++ * This header is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU Lesser General Public License for more details. ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this header. If not, see . ++ *******************************************************************/ ++ ++#ifndef _BC_DTS_DEFS_H_ ++#define _BC_DTS_DEFS_H_ ++ ++#include "bc_dts_types.h" ++ ++/* BIT Mask */ ++#define BC_BIT(_x) (1 << (_x)) ++ ++typedef enum _BC_STATUS { ++ BC_STS_SUCCESS = 0, ++ BC_STS_INV_ARG = 1, ++ BC_STS_BUSY = 2, ++ BC_STS_NOT_IMPL = 3, ++ BC_STS_PGM_QUIT = 4, ++ BC_STS_NO_ACCESS = 5, ++ BC_STS_INSUFF_RES = 6, ++ BC_STS_IO_ERROR = 7, ++ BC_STS_NO_DATA = 8, ++ BC_STS_VER_MISMATCH = 9, ++ BC_STS_TIMEOUT = 10, ++ BC_STS_FW_CMD_ERR = 11, ++ BC_STS_DEC_NOT_OPEN = 12, ++ BC_STS_ERR_USAGE = 13, ++ BC_STS_IO_USER_ABORT = 14, ++ BC_STS_IO_XFR_ERROR = 15, ++ BC_STS_DEC_NOT_STARTED = 16, ++ BC_STS_FWHEX_NOT_FOUND = 17, ++ BC_STS_FMT_CHANGE = 18, ++ BC_STS_HIF_ACCESS = 19, ++ BC_STS_CMD_CANCELLED = 20, ++ BC_STS_FW_AUTH_FAILED = 21, ++ BC_STS_BOOTLOADER_FAILED = 22, ++ BC_STS_CERT_VERIFY_ERROR = 23, ++ BC_STS_DEC_EXIST_OPEN = 24, ++ BC_STS_PENDING = 25, ++ BC_STS_CLK_NOCHG = 26, ++ ++ /* Must be the last one.*/ ++ BC_STS_ERROR = -1 ++} BC_STATUS; ++ ++/*------------------------------------------------------* ++ * Registry Key Definitions * ++ *------------------------------------------------------*/ ++#define BC_REG_KEY_MAIN_PATH "Software\\Broadcom\\MediaPC\\70010" ++#define BC_REG_KEY_FWPATH "FirmwareFilePath" ++#define BC_REG_KEY_SEC_OPT "DbgOptions" ++ ++/* ++ * Options: ++ * ++ * b[5] = Enable RSA KEY in EEPROM Support ++ * b[6] = Enable Old PIB scheme. (0 = Use PIB with video scheme) ++ * ++ * b[12] = Enable send message to NotifyIcon ++ * ++ */ ++ ++typedef enum _BC_SW_OPTIONS { ++ BC_OPT_DOSER_OUT_ENCRYPT = BC_BIT(3), ++ BC_OPT_LINK_OUT_ENCRYPT = BC_BIT(29), ++} BC_SW_OPTIONS; ++ ++typedef struct _BC_REG_CONFIG{ ++ uint32_t DbgOptions; ++} BC_REG_CONFIG; ++ ++#if defined(__KERNEL__) || defined(__LINUX_USER__) ++#else ++/* Align data structures */ ++#define ALIGN(x) __declspec(align(x)) ++#endif ++ ++/* mode ++ * b[0]..b[7] = _DtsDeviceOpenMode ++ * b[8] = Load new FW ++ * b[9] = Load file play back FW ++ * b[10] = Disk format (0 for HD DVD and 1 for BLU ray) ++ * b[11]-b[15] = default output resolution ++ * b[16] = Skip TX CPB Buffer Check ++ * b[17] = Adaptive Output Encrypt/Scramble Scheme ++ * b[18]-b[31] = reserved for future use ++ */ ++ ++/* To allow multiple apps to open the device. */ ++enum _DtsDeviceOpenMode { ++ DTS_PLAYBACK_MODE = 0, ++ DTS_DIAG_MODE, ++ DTS_MONITOR_MODE, ++ DTS_HWINIT_MODE ++}; ++ ++/* To enable the filter to selectively enable/disable fixes or erratas */ ++enum _DtsDeviceFixMode { ++ DTS_LOAD_NEW_FW = BC_BIT(8), ++ DTS_LOAD_FILE_PLAY_FW = BC_BIT(9), ++ DTS_DISK_FMT_BD = BC_BIT(10), ++ /* b[11]-b[15] : Default output resolution */ ++ DTS_SKIP_TX_CHK_CPB = BC_BIT(16), ++ DTS_ADAPTIVE_OUTPUT_PER = BC_BIT(17), ++ DTS_INTELLIMAP = BC_BIT(18), ++ /* b[19]-b[21] : select clock frequency */ ++ DTS_PLAYBACK_DROP_RPT_MODE = BC_BIT(22) ++}; ++ ++#define DTS_DFLT_RESOLUTION(x) (x<<11) ++ ++#define DTS_DFLT_CLOCK(x) (x<<19) ++ ++/* F/W File Version corresponding to S/W Releases */ ++enum _FW_FILE_VER { ++ /* S/W release: 02.04.02 F/W release 2.12.2.0 */ ++ BC_FW_VER_020402 = ((12<<16) | (2<<8) | (0)) ++}; ++ ++/*------------------------------------------------------* ++ * Stream Types for DtsOpenDecoder() * ++ *------------------------------------------------------*/ ++enum _DtsOpenDecStreamTypes { ++ BC_STREAM_TYPE_ES = 0, ++ BC_STREAM_TYPE_PES = 1, ++ BC_STREAM_TYPE_TS = 2, ++ BC_STREAM_TYPE_ES_TSTAMP = 6, ++}; ++ ++/*------------------------------------------------------* ++ * Video Algorithms for DtsSetVideoParams() * ++ *------------------------------------------------------*/ ++enum _DtsSetVideoParamsAlgo { ++ BC_VID_ALGO_H264 = 0, ++ BC_VID_ALGO_MPEG2 = 1, ++ BC_VID_ALGO_VC1 = 4, ++ BC_VID_ALGO_VC1MP = 7, ++}; ++ ++/*------------------------------------------------------* ++ * MPEG Extension to the PPB * ++ *------------------------------------------------------*/ ++#define BC_MPEG_VALID_PANSCAN (1) ++ ++typedef struct _BC_PIB_EXT_MPEG { ++ uint32_t valid; ++ /* Always valid, defaults to picture size if no ++ * sequence display extension in the stream. */ ++ uint32_t display_horizontal_size; ++ uint32_t display_vertical_size; ++ ++ /* MPEG_VALID_PANSCAN ++ * Offsets are a copy values from the MPEG stream. */ ++ uint32_t offset_count; ++ int32_t horizontal_offset[3]; ++ int32_t vertical_offset[3]; ++ ++} BC_PIB_EXT_MPEG; ++ ++/*------------------------------------------------------* ++ * H.264 Extension to the PPB * ++ *------------------------------------------------------*/ ++/* Bit definitions for 'other.h264.valid' field */ ++#define H264_VALID_PANSCAN (1) ++#define H264_VALID_SPS_CROP (2) ++#define H264_VALID_VUI (4) ++ ++typedef struct _BC_PIB_EXT_H264 { ++ /* 'valid' specifies which fields (or sets of ++ * fields) below are valid. If the corresponding ++ * bit in 'valid' is NOT set then that field(s) ++ * is (are) not initialized. */ ++ uint32_t valid; ++ ++ /* H264_VALID_PANSCAN */ ++ uint32_t pan_scan_count; ++ int32_t pan_scan_left[3]; ++ int32_t pan_scan_right[3]; ++ int32_t pan_scan_top[3]; ++ int32_t pan_scan_bottom[3]; ++ ++ /* H264_VALID_SPS_CROP */ ++ int32_t sps_crop_left; ++ int32_t sps_crop_right; ++ int32_t sps_crop_top; ++ int32_t sps_crop_bottom; ++ ++ /* H264_VALID_VUI */ ++ uint32_t chroma_top; ++ uint32_t chroma_bottom; ++ ++} BC_PIB_EXT_H264; ++ ++/*------------------------------------------------------* ++ * VC1 Extension to the PPB * ++ *------------------------------------------------------*/ ++#define VC1_VALID_PANSCAN (1) ++ ++typedef struct _BC_PIB_EXT_VC1 { ++ uint32_t valid; ++ ++ /* Always valid, defaults to picture size if no ++ * sequence display extension in the stream. */ ++ uint32_t display_horizontal_size; ++ uint32_t display_vertical_size; ++ ++ /* VC1 pan scan windows */ ++ uint32_t num_panscan_windows; ++ int32_t ps_horiz_offset[4]; ++ int32_t ps_vert_offset[4]; ++ int32_t ps_width[4]; ++ int32_t ps_height[4]; ++ ++} BC_PIB_EXT_VC1; ++ ++ ++/*------------------------------------------------------* ++ * Picture Information Block * ++ *------------------------------------------------------*/ ++#if defined(_WIN32) || defined(_WIN64) || defined(__LINUX_USER__) ++/* Values for 'pulldown' field. '0' means no pulldown information ++ * was present for this picture. */ ++enum { ++ vdecNoPulldownInfo = 0, ++ vdecTop = 1, ++ vdecBottom = 2, ++ vdecTopBottom = 3, ++ vdecBottomTop = 4, ++ vdecTopBottomTop = 5, ++ vdecBottomTopBottom = 6, ++ vdecFrame_X2 = 7, ++ vdecFrame_X3 = 8, ++ vdecFrame_X1 = 9, ++ vdecFrame_X4 = 10, ++}; ++ ++/* Values for the 'frame_rate' field. */ ++enum { ++ vdecFrameRateUnknown = 0, ++ vdecFrameRate23_97, ++ vdecFrameRate24, ++ vdecFrameRate25, ++ vdecFrameRate29_97, ++ vdecFrameRate30, ++ vdecFrameRate50, ++ vdecFrameRate59_94, ++ vdecFrameRate60, ++}; ++ ++/* Values for the 'aspect_ratio' field. */ ++enum { ++ vdecAspectRatioUnknown = 0, ++ vdecAspectRatioSquare, ++ vdecAspectRatio12_11, ++ vdecAspectRatio10_11, ++ vdecAspectRatio16_11, ++ vdecAspectRatio40_33, ++ vdecAspectRatio24_11, ++ vdecAspectRatio20_11, ++ vdecAspectRatio32_11, ++ vdecAspectRatio80_33, ++ vdecAspectRatio18_11, ++ vdecAspectRatio15_11, ++ vdecAspectRatio64_33, ++ vdecAspectRatio160_99, ++ vdecAspectRatio4_3, ++ vdecAspectRatio16_9, ++ vdecAspectRatio221_1, ++ vdecAspectRatioOther = 255, ++}; ++ ++/* Values for the 'colour_primaries' field. */ ++enum { ++ vdecColourPrimariesUnknown = 0, ++ vdecColourPrimariesBT709, ++ vdecColourPrimariesUnspecified, ++ vdecColourPrimariesReserved, ++ vdecColourPrimariesBT470_2M = 4, ++ vdecColourPrimariesBT470_2BG, ++ vdecColourPrimariesSMPTE170M, ++ vdecColourPrimariesSMPTE240M, ++ vdecColourPrimariesGenericFilm, ++}; ++ ++enum { ++ vdecRESOLUTION_CUSTOM = 0x00000000, /* custom */ ++ vdecRESOLUTION_480i = 0x00000001, /* 480i */ ++ vdecRESOLUTION_1080i = 0x00000002, /* 1080i (1920x1080, 60i) */ ++ vdecRESOLUTION_NTSC = 0x00000003, /* NTSC (720x483, 60i) */ ++ vdecRESOLUTION_480p = 0x00000004, /* 480p (720x480, 60p) */ ++ vdecRESOLUTION_720p = 0x00000005, /* 720p (1280x720, 60p) */ ++ vdecRESOLUTION_PAL1 = 0x00000006, /* PAL_1 (720x576, 50i) */ ++ vdecRESOLUTION_1080i25 = 0x00000007, /* 1080i25 (1920x1080, 50i) */ ++ vdecRESOLUTION_720p50 = 0x00000008, /* 720p50 (1280x720, 50p) */ ++ vdecRESOLUTION_576p = 0x00000009, /* 576p (720x576, 50p) */ ++ vdecRESOLUTION_1080i29_97 = 0x0000000A, /* 1080i (1920x1080, 59.94i) */ ++ vdecRESOLUTION_720p59_94 = 0x0000000B, /* 720p (1280x720, 59.94p) */ ++ vdecRESOLUTION_SD_DVD = 0x0000000C, /* SD DVD (720x483, 60i) */ ++ vdecRESOLUTION_480p656 = 0x0000000D, /* 480p (720x480, 60p), output bus width 8 bit, clock 74.25MHz */ ++ vdecRESOLUTION_1080p23_976 = 0x0000000E, /* 1080p23_976 (1920x1080, 23.976p) */ ++ vdecRESOLUTION_720p23_976 = 0x0000000F, /* 720p23_976 (1280x720p, 23.976p) */ ++ vdecRESOLUTION_240p29_97 = 0x00000010, /* 240p (1440x240, 29.97p ) */ ++ vdecRESOLUTION_240p30 = 0x00000011, /* 240p (1440x240, 30p) */ ++ vdecRESOLUTION_288p25 = 0x00000012, /* 288p (1440x288p, 25p) */ ++ vdecRESOLUTION_1080p29_97 = 0x00000013, /* 1080p29_97 (1920x1080, 29.97p) */ ++ vdecRESOLUTION_1080p30 = 0x00000014, /* 1080p30 (1920x1080, 30p) */ ++ vdecRESOLUTION_1080p24 = 0x00000015, /* 1080p24 (1920x1080, 24p) */ ++ vdecRESOLUTION_1080p25 = 0x00000016, /* 1080p25 (1920x1080, 25p) */ ++ vdecRESOLUTION_720p24 = 0x00000017, /* 720p24 (1280x720, 25p) */ ++ vdecRESOLUTION_720p29_97 = 0x00000018, /* 720p29.97 (1280x720, 29.97p) */ ++ vdecRESOLUTION_480p23_976 = 0x00000019, /* 480p23.976 (720*480, 23.976) */ ++ vdecRESOLUTION_480p29_97 = 0x0000001A, /* 480p29.976 (720*480, 29.97p) */ ++ vdecRESOLUTION_576p25 = 0x0000001B, /* 576p25 (720*576, 25p) */ ++ /* For Zero Frame Rate */ ++ vdecRESOLUTION_480p0 = 0x0000001C, /* 480p (720x480, 0p) */ ++ vdecRESOLUTION_480i0 = 0x0000001D, /* 480i (720x480, 0i) */ ++ vdecRESOLUTION_576p0 = 0x0000001E, /* 576p (720x576, 0p) */ ++ vdecRESOLUTION_720p0 = 0x0000001F, /* 720p (1280x720, 0p) */ ++ vdecRESOLUTION_1080p0 = 0x00000020, /* 1080p (1920x1080, 0p) */ ++ vdecRESOLUTION_1080i0 = 0x00000021, /* 1080i (1920x1080, 0i) */ ++}; ++ ++/* Bit definitions for 'flags' field */ ++#define VDEC_FLAG_EOS (0x0004) ++ ++#define VDEC_FLAG_FRAME (0x0000) ++#define VDEC_FLAG_FIELDPAIR (0x0008) ++#define VDEC_FLAG_TOPFIELD (0x0010) ++#define VDEC_FLAG_BOTTOMFIELD (0x0018) ++ ++#define VDEC_FLAG_PROGRESSIVE_SRC (0x0000) ++#define VDEC_FLAG_INTERLACED_SRC (0x0020) ++#define VDEC_FLAG_UNKNOWN_SRC (0x0040) ++ ++#define VDEC_FLAG_BOTTOM_FIRST (0x0080) ++#define VDEC_FLAG_LAST_PICTURE (0x0100) ++ ++#define VDEC_FLAG_PICTURE_META_DATA_PRESENT (0x40000) ++ ++#endif /* _WIN32 || _WIN64 */ ++ ++enum _BC_OUTPUT_FORMAT { ++ MODE420 = 0x0, ++ MODE422_YUY2 = 0x1, ++ MODE422_UYVY = 0x2, ++}; ++ ++typedef struct _BC_PIC_INFO_BLOCK { ++ /* Common fields. */ ++ uint64_t timeStamp; /* Timestamp */ ++ uint32_t picture_number; /* Ordinal display number */ ++ uint32_t width; /* pixels */ ++ uint32_t height; /* pixels */ ++ uint32_t chroma_format; /* 0x420, 0x422 or 0x444 */ ++ uint32_t pulldown; ++ uint32_t flags; ++ uint32_t frame_rate; ++ uint32_t aspect_ratio; ++ uint32_t colour_primaries; ++ uint32_t picture_meta_payload; ++ uint32_t sess_num; ++ uint32_t ycom; ++ uint32_t custom_aspect_ratio_width_height; ++ uint32_t n_drop; /* number of non-reference frames remaining to be dropped */ ++ ++ /* Protocol-specific extensions. */ ++ union { ++ BC_PIB_EXT_H264 h264; ++ BC_PIB_EXT_MPEG mpeg; ++ BC_PIB_EXT_VC1 vc1; ++ } other; ++ ++} BC_PIC_INFO_BLOCK, *PBC_PIC_INFO_BLOCK; ++ ++/*------------------------------------------------------* ++ * ProcOut Info * ++ *------------------------------------------------------*/ ++/* Optional flags for ProcOut Interface.*/ ++enum _POUT_OPTIONAL_IN_FLAGS_{ ++ /* Flags from App to Device */ ++ BC_POUT_FLAGS_YV12 = 0x01, /* Copy Data in YV12 format */ ++ BC_POUT_FLAGS_STRIDE = 0x02, /* Stride size is valid. */ ++ BC_POUT_FLAGS_SIZE = 0x04, /* Take size information from Application */ ++ BC_POUT_FLAGS_INTERLACED = 0x08, /* copy only half the bytes */ ++ BC_POUT_FLAGS_INTERLEAVED = 0x10, /* interleaved frame */ ++ ++ /* Flags from Device to APP */ ++ BC_POUT_FLAGS_FMT_CHANGE = 0x10000, /* Data is not VALID when this flag is set */ ++ BC_POUT_FLAGS_PIB_VALID = 0x20000, /* PIB Information valid */ ++ BC_POUT_FLAGS_ENCRYPTED = 0x40000, /* Data is encrypted. */ ++ BC_POUT_FLAGS_FLD_BOT = 0x80000, /* Bottom Field data */ ++}; ++ ++#if defined(__KERNEL__) || defined(__LINUX_USER__) ++typedef BC_STATUS(*dts_pout_callback)(void *shnd, uint32_t width, uint32_t height, uint32_t stride, void *pOut); ++#else ++typedef BC_STATUS(*dts_pout_callback)(void *shnd, uint32_t width, uint32_t height, uint32_t stride, struct _BC_DTS_PROC_OUT *pOut); ++#endif ++ ++/* Line 21 Closed Caption */ ++/* User Data */ ++#define MAX_UD_SIZE 1792 /* 1920 - 128 */ ++ ++typedef struct _BC_DTS_PROC_OUT { ++ uint8_t *Ybuff; /* Caller Supplied buffer for Y data */ ++ uint32_t YbuffSz; /* Caller Supplied Y buffer size */ ++ uint32_t YBuffDoneSz; /* Transferred Y datasize */ ++ ++ uint8_t *UVbuff; /* Caller Supplied buffer for UV data */ ++ uint32_t UVbuffSz; /* Caller Supplied UV buffer size */ ++ uint32_t UVBuffDoneSz; /* Transferred UV data size */ ++ ++ uint32_t StrideSz; /* Caller supplied Stride Size */ ++ uint32_t PoutFlags; /* Call IN Flags */ ++ ++ uint32_t discCnt; /* Picture discontinuity count */ ++ ++ BC_PIC_INFO_BLOCK PicInfo; /* Picture Information Block Data */ ++ ++ /* Line 21 Closed Caption */ ++ /* User Data */ ++ uint32_t UserDataSz; ++ uint8_t UserData[MAX_UD_SIZE]; ++ ++ void *hnd; ++ dts_pout_callback AppCallBack; ++ uint8_t DropFrames; ++ uint8_t b422Mode; /* Picture output Mode */ ++ uint8_t bPibEnc; /* PIB encrypted */ ++ uint8_t bRevertScramble; ++ ++} BC_DTS_PROC_OUT; ++ ++typedef struct _BC_DTS_STATUS { ++ uint8_t ReadyListCount; /* Number of frames in ready list (reported by driver) */ ++ uint8_t FreeListCount; /* Number of frame buffers free. (reported by driver) */ ++ uint8_t PowerStateChange; /* Number of active state power transitions (reported by driver) */ ++ uint8_t reserved_[1]; ++ ++ uint32_t FramesDropped; /* Number of frames dropped. (reported by DIL) */ ++ uint32_t FramesCaptured; /* Number of frames captured. (reported by DIL) */ ++ uint32_t FramesRepeated; /* Number of frames repeated. (reported by DIL) */ ++ ++ uint32_t InputCount; /* Times compressed video has been sent to the HW. ++ * i.e. Successful DtsProcInput() calls (reported by DIL) */ ++ uint64_t InputTotalSize; /* Amount of compressed video that has been sent to the HW. ++ * (reported by DIL) */ ++ uint32_t InputBusyCount; /* Times compressed video has attempted to be sent to the HW ++ * but the input FIFO was full. (reported by DIL) */ ++ ++ uint32_t PIBMissCount; /* Amount of times a PIB is invalid. (reported by DIL) */ ++ ++ uint32_t cpbEmptySize; /* supported only for H.264, specifically changed for ++ * Adobe. Report size of CPB buffer available. ++ * Reported by DIL */ ++ uint64_t NextTimeStamp; /* TimeStamp of the next picture that will be returned ++ * by a call to ProcOutput. Added for Adobe. Reported ++ * back from the driver */ ++ uint8_t reserved__[16]; ++ ++} BC_DTS_STATUS; ++ ++#define BC_SWAP32(_v) \ ++ ((((_v) & 0xFF000000)>>24)| \ ++ (((_v) & 0x00FF0000)>>8)| \ ++ (((_v) & 0x0000FF00)<<8)| \ ++ (((_v) & 0x000000FF)<<24)) ++ ++#define WM_AGENT_TRAYICON_DECODER_OPEN 10001 ++#define WM_AGENT_TRAYICON_DECODER_CLOSE 10002 ++#define WM_AGENT_TRAYICON_DECODER_START 10003 ++#define WM_AGENT_TRAYICON_DECODER_STOP 10004 ++#define WM_AGENT_TRAYICON_DECODER_RUN 10005 ++#define WM_AGENT_TRAYICON_DECODER_PAUSE 10006 ++ ++ ++#endif /* _BC_DTS_DEFS_H_ */ +diff --git a/drivers/staging/crystalhd/bc_dts_glob_lnx.h b/drivers/staging/crystalhd/bc_dts_glob_lnx.h +new file mode 100644 +index 0000000..b3125e3 +--- /dev/null ++++ b/drivers/staging/crystalhd/bc_dts_glob_lnx.h +@@ -0,0 +1,299 @@ ++/******************************************************************** ++ * Copyright(c) 2006-2009 Broadcom Corporation. ++ * ++ * Name: bc_dts_glob_lnx.h ++ * ++ * Description: Wrapper to Windows dts_glob.h for Link-Linux usage. ++ * The idea is to define additional Linux related defs ++ * in this file to avoid changes to existing Windows ++ * glob file. ++ * ++ * AU ++ * ++ * HISTORY: ++ * ++ ******************************************************************** ++ * This header is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License as published ++ * by the Free Software Foundation, either version 2.1 of the License. ++ * ++ * This header is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU Lesser General Public License for more details. ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this header. If not, see . ++ *******************************************************************/ ++ ++#ifndef _BC_DTS_GLOB_LNX_H_ ++#define _BC_DTS_GLOB_LNX_H_ ++ ++#ifdef __LINUX_USER__ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DRVIFLIB_INT_API ++ ++#endif ++ ++#include "bc_dts_defs.h" ++#include "bcm_70012_regs.h" /* Link Register defs */ ++ ++#define CRYSTALHD_API_NAME "crystalhd" ++#define CRYSTALHD_API_DEV_NAME "/dev/crystalhd" ++ ++/* ++ * These are SW stack tunable parameters shared ++ * between the driver and the application. ++ */ ++enum _BC_DTS_GLOBALS { ++ BC_MAX_FW_CMD_BUFF_SZ = 0x40, /* FW passthrough cmd/rsp buffer size */ ++ PCI_CFG_SIZE = 256, /* PCI config size buffer */ ++ BC_IOCTL_DATA_POOL_SIZE = 8, /* BC_IOCTL_DATA Pool size */ ++ BC_LINK_MAX_OPENS = 3, /* Maximum simultaneous opens*/ ++ BC_LINK_MAX_SGLS = 1024, /* Maximum SG elements 4M/4K */ ++ BC_TX_LIST_CNT = 2, /* Max Tx DMA Rings */ ++ BC_RX_LIST_CNT = 8, /* Max Rx DMA Rings*/ ++ BC_PROC_OUTPUT_TIMEOUT = 3000, /* Milliseconds */ ++ BC_INFIFO_THRESHOLD = 0x10000, ++}; ++ ++typedef struct _BC_CMD_REG_ACC { ++ uint32_t Offset; ++ uint32_t Value; ++} BC_CMD_REG_ACC; ++ ++typedef struct _BC_CMD_DEV_MEM { ++ uint32_t StartOff; ++ uint32_t NumDwords; ++ uint32_t Rsrd; ++} BC_CMD_DEV_MEM; ++ ++/* FW Passthrough command structure */ ++enum _bc_fw_cmd_flags { ++ BC_FW_CMD_FLAGS_NONE = 0, ++ BC_FW_CMD_PIB_QS = 0x01, ++}; ++ ++typedef struct _BC_FW_CMD { ++ uint32_t cmd[BC_MAX_FW_CMD_BUFF_SZ]; ++ uint32_t rsp[BC_MAX_FW_CMD_BUFF_SZ]; ++ uint32_t flags; ++ uint32_t add_data; ++} BC_FW_CMD, *PBC_FW_CMD; ++ ++typedef struct _BC_HW_TYPE { ++ uint16_t PciDevId; ++ uint16_t PciVenId; ++ uint8_t HwRev; ++ uint8_t Align[3]; ++} BC_HW_TYPE; ++ ++typedef struct _BC_PCI_CFG { ++ uint32_t Size; ++ uint32_t Offset; ++ uint8_t pci_cfg_space[PCI_CFG_SIZE]; ++} BC_PCI_CFG; ++ ++typedef struct _BC_VERSION_INFO_ { ++ uint8_t DriverMajor; ++ uint8_t DriverMinor; ++ uint16_t DriverRevision; ++} BC_VERSION_INFO; ++ ++typedef struct _BC_START_RX_CAP_ { ++ uint32_t Rsrd; ++ uint32_t StartDeliveryThsh; ++ uint32_t PauseThsh; ++ uint32_t ResumeThsh; ++} BC_START_RX_CAP; ++ ++typedef struct _BC_FLUSH_RX_CAP_ { ++ uint32_t Rsrd; ++ uint32_t bDiscardOnly; ++} BC_FLUSH_RX_CAP; ++ ++typedef struct _BC_DTS_STATS { ++ uint8_t drvRLL; ++ uint8_t drvFLL; ++ uint8_t eosDetected; ++ uint8_t pwr_state_change; ++ ++ /* Stats from App */ ++ uint32_t opFrameDropped; ++ uint32_t opFrameCaptured; ++ uint32_t ipSampleCnt; ++ uint64_t ipTotalSize; ++ uint32_t reptdFrames; ++ uint32_t pauseCount; ++ uint32_t pibMisses; ++ uint32_t discCounter; ++ ++ /* Stats from Driver */ ++ uint32_t TxFifoBsyCnt; ++ uint32_t intCount; ++ uint32_t DrvIgnIntrCnt; ++ uint32_t DrvTotalFrmDropped; ++ uint32_t DrvTotalHWErrs; ++ uint32_t DrvTotalPIBFlushCnt; ++ uint32_t DrvTotalFrmCaptured; ++ uint32_t DrvPIBMisses; ++ uint32_t DrvPauseTime; ++ uint32_t DrvRepeatedFrms; ++ uint32_t res1[13]; ++ ++} BC_DTS_STATS; ++ ++typedef struct _BC_PROC_INPUT_ { ++ uint8_t *pDmaBuff; ++ uint32_t BuffSz; ++ uint8_t Mapped; ++ uint8_t Encrypted; ++ uint8_t Rsrd[2]; ++ uint32_t DramOffset; /* For debug use only */ ++} BC_PROC_INPUT, *PBC_PROC_INPUT; ++ ++typedef struct _BC_DEC_YUV_BUFFS { ++ uint32_t b422Mode; ++ uint8_t *YuvBuff; ++ uint32_t YuvBuffSz; ++ uint32_t UVbuffOffset; ++ uint32_t YBuffDoneSz; ++ uint32_t UVBuffDoneSz; ++ uint32_t RefCnt; ++} BC_DEC_YUV_BUFFS; ++ ++enum _DECOUT_COMPLETION_FLAGS{ ++ COMP_FLAG_NO_INFO = 0x00, ++ COMP_FLAG_FMT_CHANGE = 0x01, ++ COMP_FLAG_PIB_VALID = 0x02, ++ COMP_FLAG_DATA_VALID = 0x04, ++ COMP_FLAG_DATA_ENC = 0x08, ++ COMP_FLAG_DATA_BOT = 0x10, ++}; ++ ++typedef struct _BC_DEC_OUT_BUFF{ ++ BC_DEC_YUV_BUFFS OutPutBuffs; ++ BC_PIC_INFO_BLOCK PibInfo; ++ uint32_t Flags; ++ uint32_t BadFrCnt; ++} BC_DEC_OUT_BUFF; ++ ++typedef struct _BC_NOTIFY_MODE { ++ uint32_t Mode; ++ uint32_t Rsvr[3]; ++} BC_NOTIFY_MODE; ++ ++typedef struct _BC_CLOCK { ++ uint32_t clk; ++ uint32_t Rsvr[3]; ++} BC_CLOCK; ++ ++typedef struct _BC_IOCTL_DATA { ++ BC_STATUS RetSts; ++ uint32_t IoctlDataSz; ++ uint32_t Timeout; ++ union { ++ BC_CMD_REG_ACC regAcc; ++ BC_CMD_DEV_MEM devMem; ++ BC_FW_CMD fwCmd; ++ BC_HW_TYPE hwType; ++ BC_PCI_CFG pciCfg; ++ BC_VERSION_INFO VerInfo; ++ BC_PROC_INPUT ProcInput; ++ BC_DEC_YUV_BUFFS RxBuffs; ++ BC_DEC_OUT_BUFF DecOutData; ++ BC_START_RX_CAP RxCap; ++ BC_FLUSH_RX_CAP FlushRxCap; ++ BC_DTS_STATS drvStat; ++ BC_NOTIFY_MODE NotifyMode; ++ BC_CLOCK clockValue; ++ } u; ++ struct _BC_IOCTL_DATA *next; ++} BC_IOCTL_DATA; ++ ++typedef enum _BC_DRV_CMD{ ++ DRV_CMD_VERSION = 0, /* Get SW version */ ++ DRV_CMD_GET_HWTYPE, /* Get HW version and type Dozer/Tank */ ++ DRV_CMD_REG_RD, /* Read Device Register */ ++ DRV_CMD_REG_WR, /* Write Device Register */ ++ DRV_CMD_FPGA_RD, /* Read FPGA Register */ ++ DRV_CMD_FPGA_WR, /* Wrtie FPGA Reister */ ++ DRV_CMD_MEM_RD, /* Read Device Memory */ ++ DRV_CMD_MEM_WR, /* Write Device Memory */ ++ DRV_CMD_RD_PCI_CFG, /* Read PCI Config Space */ ++ DRV_CMD_WR_PCI_CFG, /* Write the PCI Configuration Space*/ ++ DRV_CMD_FW_DOWNLOAD, /* Download Firmware */ ++ DRV_ISSUE_FW_CMD, /* Issue FW Cmd (pass through mode) */ ++ DRV_CMD_PROC_INPUT, /* Process Input Sample */ ++ DRV_CMD_ADD_RXBUFFS, /* Add Rx side buffers to driver pool */ ++ DRV_CMD_FETCH_RXBUFF, /* Get Rx DMAed buffer */ ++ DRV_CMD_START_RX_CAP, /* Start Rx Buffer Capture */ ++ DRV_CMD_FLUSH_RX_CAP, /* Stop the capture for now...we will enhance this later*/ ++ DRV_CMD_GET_DRV_STAT, /* Get Driver Internal Statistics */ ++ DRV_CMD_RST_DRV_STAT, /* Reset Driver Internal Statistics */ ++ DRV_CMD_NOTIFY_MODE, /* Notify the Mode to driver in which the application is Operating*/ ++ DRV_CMD_CHANGE_CLOCK, /* Change the core clock to either save power or improve performance */ ++ ++ /* MUST be the last one.. */ ++ DRV_CMD_END, /* End of the List.. */ ++} BC_DRV_CMD; ++ ++#define BC_IOC_BASE 'b' ++#define BC_IOC_VOID _IOC_NONE ++#define BC_IOC_IOWR(nr, type) _IOWR(BC_IOC_BASE, nr, type) ++#define BC_IOCTL_MB BC_IOCTL_DATA ++ ++#define BCM_IOC_GET_VERSION BC_IOC_IOWR(DRV_CMD_VERSION, BC_IOCTL_MB) ++#define BCM_IOC_GET_HWTYPE BC_IOC_IOWR(DRV_CMD_GET_HWTYPE, BC_IOCTL_MB) ++#define BCM_IOC_REG_RD BC_IOC_IOWR(DRV_CMD_REG_RD, BC_IOCTL_MB) ++#define BCM_IOC_REG_WR BC_IOC_IOWR(DRV_CMD_REG_WR, BC_IOCTL_MB) ++#define BCM_IOC_MEM_RD BC_IOC_IOWR(DRV_CMD_MEM_RD, BC_IOCTL_MB) ++#define BCM_IOC_MEM_WR BC_IOC_IOWR(DRV_CMD_MEM_WR, BC_IOCTL_MB) ++#define BCM_IOC_FPGA_RD BC_IOC_IOWR(DRV_CMD_FPGA_RD, BC_IOCTL_MB) ++#define BCM_IOC_FPGA_WR BC_IOC_IOWR(DRV_CMD_FPGA_WR, BC_IOCTL_MB) ++#define BCM_IOC_RD_PCI_CFG BC_IOC_IOWR(DRV_CMD_RD_PCI_CFG, BC_IOCTL_MB) ++#define BCM_IOC_WR_PCI_CFG BC_IOC_IOWR(DRV_CMD_WR_PCI_CFG, BC_IOCTL_MB) ++#define BCM_IOC_PROC_INPUT BC_IOC_IOWR(DRV_CMD_PROC_INPUT, BC_IOCTL_MB) ++#define BCM_IOC_ADD_RXBUFFS BC_IOC_IOWR(DRV_CMD_ADD_RXBUFFS, BC_IOCTL_MB) ++#define BCM_IOC_FETCH_RXBUFF BC_IOC_IOWR(DRV_CMD_FETCH_RXBUFF, BC_IOCTL_MB) ++#define BCM_IOC_FW_CMD BC_IOC_IOWR(DRV_ISSUE_FW_CMD, BC_IOCTL_MB) ++#define BCM_IOC_START_RX_CAP BC_IOC_IOWR(DRV_CMD_START_RX_CAP, BC_IOCTL_MB) ++#define BCM_IOC_FLUSH_RX_CAP BC_IOC_IOWR(DRV_CMD_FLUSH_RX_CAP, BC_IOCTL_MB) ++#define BCM_IOC_GET_DRV_STAT BC_IOC_IOWR(DRV_CMD_GET_DRV_STAT, BC_IOCTL_MB) ++#define BCM_IOC_RST_DRV_STAT BC_IOC_IOWR(DRV_CMD_RST_DRV_STAT, BC_IOCTL_MB) ++#define BCM_IOC_NOTIFY_MODE BC_IOC_IOWR(DRV_CMD_NOTIFY_MODE, BC_IOCTL_MB) ++#define BCM_IOC_FW_DOWNLOAD BC_IOC_IOWR(DRV_CMD_FW_DOWNLOAD, BC_IOCTL_MB) ++#define BCM_IOC_CHG_CLK BC_IOC_IOWR(DRV_CMD_CHANGE_CLOCK, BC_IOCTL_MB) ++#define BCM_IOC_END BC_IOC_VOID ++ ++/* Wrapper for main IOCTL data */ ++typedef struct _crystalhd_ioctl_data { ++ BC_IOCTL_DATA udata; /* IOCTL from App..*/ ++ uint32_t u_id; /* Driver specific user ID */ ++ uint32_t cmd; /* Cmd ID for driver's use. */ ++ void *add_cdata; /* Additional command specific data..*/ ++ uint32_t add_cdata_sz; /* Additional command specific data size */ ++ struct _crystalhd_ioctl_data *next; /* List/Fifo management */ ++} crystalhd_ioctl_data; ++ ++ ++enum _crystalhd_kmod_ver{ ++ crystalhd_kmod_major = 0, ++ crystalhd_kmod_minor = 9, ++ crystalhd_kmod_rev = 27, ++}; ++ ++#endif +diff --git a/drivers/staging/crystalhd/bc_dts_types.h b/drivers/staging/crystalhd/bc_dts_types.h +new file mode 100644 +index 0000000..ac0c817 +--- /dev/null ++++ b/drivers/staging/crystalhd/bc_dts_types.h +@@ -0,0 +1,121 @@ ++/******************************************************************** ++ * Copyright(c) 2006-2009 Broadcom Corporation. ++ * ++ * Name: bc_dts_types.h ++ * ++ * Description: Data types ++ * ++ * AU ++ * ++ * HISTORY: ++ * ++ ******************************************************************** ++ * This header is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License as published ++ * by the Free Software Foundation, either version 2.1 of the License. ++ * ++ * This header is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU Lesser General Public License for more details. ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this header. If not, see . ++ *******************************************************************/ ++ ++#ifndef _BC_DTS_TYPES_H_ ++#define _BC_DTS_TYPES_H_ ++ ++#ifdef __LINUX_USER__ // Don't include these for KERNEL.. ++#include ++#endif ++ ++#if defined(_WIN64) || defined(_WIN32) ++typedef uint32_t U32; ++typedef int32_t S32; ++typedef uint16_t U16; ++typedef int16_t S16; ++typedef unsigned char U8; ++typedef char S8; ++#endif ++ ++#ifndef PVOID ++typedef void *PVOID; ++#endif ++ ++#ifndef BOOL ++typedef int BOOL; ++#endif ++ ++#ifdef WIN32 ++ typedef unsigned __int64 U64; ++#elif defined(_WIN64) ++ typedef uint64_t U64; ++#endif ++ ++#ifdef _WIN64 ++#if !(defined(POINTER_32)) ++#define POINTER_32 __ptr32 ++#endif ++#else /* _WIN32 */ ++#define POINTER_32 ++#endif ++ ++#if defined(__KERNEL__) || defined(__LINUX_USER__) ++ ++#ifdef __LINUX_USER__ /* Don't include these for KERNEL */ ++typedef uint32_t ULONG; ++typedef int32_t LONG; ++typedef void *HANDLE; ++#ifndef VOID ++typedef void VOID; ++#endif ++typedef void *LPVOID; ++typedef uint32_t DWORD; ++typedef uint32_t UINT32; ++typedef uint32_t *LPDWORD; ++typedef unsigned char *PUCHAR; ++ ++#ifndef TRUE ++ #define TRUE 1 ++#endif ++ ++#ifndef FALSE ++ #define FALSE 0 ++#endif ++ ++#define TEXT ++ ++#else ++ ++/* For Kernel usage.. */ ++typedef bool bc_bool_t; ++#endif ++ ++#else ++ ++#ifndef uint64_t ++typedef struct _uint64_t { ++ uint32_t low_dw; ++ uint32_t hi_dw; ++} uint64_t; ++#endif ++ ++#ifndef int32_t ++typedef signed long int32_t; ++#endif ++ ++#ifndef uint32_t ++typedef unsigned long uint32_t; ++#endif ++ ++#ifndef uint16_t ++typedef unsigned short uint16_t; ++#endif ++ ++#ifndef uint8_t ++typedef unsigned char uint8_t; ++#endif ++#endif ++ ++#endif ++ +diff --git a/drivers/staging/crystalhd/bcm_70012_regs.h b/drivers/staging/crystalhd/bcm_70012_regs.h +new file mode 100644 +index 0000000..6922f54 +--- /dev/null ++++ b/drivers/staging/crystalhd/bcm_70012_regs.h +@@ -0,0 +1,757 @@ ++/*************************************************************************** ++ * Copyright (c) 1999-2009, Broadcom Corporation. ++ * ++ * Name: bcm_70012_regs.h ++ * ++ * Description: BCM70012 registers ++ * ++ ******************************************************************** ++ * This header is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU Lesser General Public License as published ++ * by the Free Software Foundation, either version 2.1 of the License. ++ * ++ * This header is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU Lesser General Public License for more details. ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this header. If not, see . ++ ***************************************************************************/ ++ ++#ifndef MACFILE_H__ ++#define MACFILE_H__ ++ ++/** ++ * m = memory, c = core, r = register, f = field, d = data. ++ */ ++#if !defined(GET_FIELD) && !defined(SET_FIELD) ++#define BRCM_ALIGN(c,r,f) c##_##r##_##f##_ALIGN ++#define BRCM_BITS(c,r,f) c##_##r##_##f##_BITS ++#define BRCM_MASK(c,r,f) c##_##r##_##f##_MASK ++#define BRCM_SHIFT(c,r,f) c##_##r##_##f##_SHIFT ++ ++#define GET_FIELD(m,c,r,f) \ ++ ((((m) & BRCM_MASK(c,r,f)) >> BRCM_SHIFT(c,r,f)) << BRCM_ALIGN(c,r,f)) ++ ++#define SET_FIELD(m,c,r,f,d) \ ++ ((m) = (((m) & ~BRCM_MASK(c,r,f)) | ((((d) >> BRCM_ALIGN(c,r,f)) << \ ++ BRCM_SHIFT(c,r,f)) & BRCM_MASK(c,r,f))) \ ++ ) ++ ++#define SET_TYPE_FIELD(m,c,r,f,d) SET_FIELD(m,c,r,f,c##_##d) ++#define SET_NAME_FIELD(m,c,r,f,d) SET_FIELD(m,c,r,f,c##_##r##_##f##_##d) ++#define SET_VALUE_FIELD(m,c,r,f,d) SET_FIELD(m,c,r,f,d) ++ ++#endif /* GET & SET */ ++ ++/**************************************************************************** ++ * Core Enums. ++ ***************************************************************************/ ++/**************************************************************************** ++ * Enums: AES_RGR_BRIDGE_RESET_CTRL ++ ***************************************************************************/ ++#define AES_RGR_BRIDGE_RESET_CTRL_DEASSERT 0 ++#define AES_RGR_BRIDGE_RESET_CTRL_ASSERT 1 ++ ++/**************************************************************************** ++ * Enums: CCE_RGR_BRIDGE_RESET_CTRL ++ ***************************************************************************/ ++#define CCE_RGR_BRIDGE_RESET_CTRL_DEASSERT 0 ++#define CCE_RGR_BRIDGE_RESET_CTRL_ASSERT 1 ++ ++/**************************************************************************** ++ * Enums: DBU_RGR_BRIDGE_RESET_CTRL ++ ***************************************************************************/ ++#define DBU_RGR_BRIDGE_RESET_CTRL_DEASSERT 0 ++#define DBU_RGR_BRIDGE_RESET_CTRL_ASSERT 1 ++ ++/**************************************************************************** ++ * Enums: DCI_RGR_BRIDGE_RESET_CTRL ++ ***************************************************************************/ ++#define DCI_RGR_BRIDGE_RESET_CTRL_DEASSERT 0 ++#define DCI_RGR_BRIDGE_RESET_CTRL_ASSERT 1 ++ ++/**************************************************************************** ++ * Enums: GISB_ARBITER_DEASSERT_ASSERT ++ ***************************************************************************/ ++#define GISB_ARBITER_DEASSERT_ASSERT_DEASSERT 0 ++#define GISB_ARBITER_DEASSERT_ASSERT_ASSERT 1 ++ ++/**************************************************************************** ++ * Enums: GISB_ARBITER_UNMASK_MASK ++ ***************************************************************************/ ++#define GISB_ARBITER_UNMASK_MASK_UNMASK 0 ++#define GISB_ARBITER_UNMASK_MASK_MASK 1 ++ ++/**************************************************************************** ++ * Enums: GISB_ARBITER_DISABLE_ENABLE ++ ***************************************************************************/ ++#define GISB_ARBITER_DISABLE_ENABLE_DISABLE 0 ++#define GISB_ARBITER_DISABLE_ENABLE_ENABLE 1 ++ ++/**************************************************************************** ++ * Enums: I2C_GR_BRIDGE_RESET_CTRL ++ ***************************************************************************/ ++#define I2C_GR_BRIDGE_RESET_CTRL_DEASSERT 0 ++#define I2C_GR_BRIDGE_RESET_CTRL_ASSERT 1 ++ ++/**************************************************************************** ++ * Enums: MISC_GR_BRIDGE_RESET_CTRL ++ ***************************************************************************/ ++#define MISC_GR_BRIDGE_RESET_CTRL_DEASSERT 0 ++#define MISC_GR_BRIDGE_RESET_CTRL_ASSERT 1 ++ ++/**************************************************************************** ++ * Enums: OTP_GR_BRIDGE_RESET_CTRL ++ ***************************************************************************/ ++#define OTP_GR_BRIDGE_RESET_CTRL_DEASSERT 0 ++#define OTP_GR_BRIDGE_RESET_CTRL_ASSERT 1 ++ ++/**************************************************************************** ++ * BCM70012_TGT_TOP_PCIE_CFG ++ ***************************************************************************/ ++#define PCIE_CFG_DEVICE_VENDOR_ID 0x00000000 /* DEVICE_VENDOR_ID Register */ ++#define PCIE_CFG_STATUS_COMMAND 0x00000004 /* STATUS_COMMAND Register */ ++#define PCIE_CFG_PCI_CLASSCODE_AND_REVISION_ID 0x00000008 /* PCI_CLASSCODE_AND_REVISION_ID Register */ ++#define PCIE_CFG_BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE_SIZE 0x0000000c /* BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE_SIZE Register */ ++#define PCIE_CFG_BASE_ADDRESS_1 0x00000010 /* BASE_ADDRESS_1 Register */ ++#define PCIE_CFG_BASE_ADDRESS_2 0x00000014 /* BASE_ADDRESS_2 Register */ ++#define PCIE_CFG_BASE_ADDRESS_3 0x00000018 /* BASE_ADDRESS_3 Register */ ++#define PCIE_CFG_BASE_ADDRESS_4 0x0000001c /* BASE_ADDRESS_4 Register */ ++#define PCIE_CFG_CARDBUS_CIS_POINTER 0x00000028 /* CARDBUS_CIS_POINTER Register */ ++#define PCIE_CFG_SUBSYSTEM_DEVICE_VENDOR_ID 0x0000002c /* SUBSYSTEM_DEVICE_VENDOR_ID Register */ ++#define PCIE_CFG_EXPANSION_ROM_BASE_ADDRESS 0x00000030 /* EXPANSION_ROM_BASE_ADDRESS Register */ ++#define PCIE_CFG_CAPABILITIES_POINTER 0x00000034 /* CAPABILITIES_POINTER Register */ ++#define PCIE_CFG_INTERRUPT 0x0000003c /* INTERRUPT Register */ ++#define PCIE_CFG_VPD_CAPABILITIES 0x00000040 /* VPD_CAPABILITIES Register */ ++#define PCIE_CFG_VPD_DATA 0x00000044 /* VPD_DATA Register */ ++#define PCIE_CFG_POWER_MANAGEMENT_CAPABILITY 0x00000048 /* POWER_MANAGEMENT_CAPABILITY Register */ ++#define PCIE_CFG_POWER_MANAGEMENT_CONTROL_STATUS 0x0000004c /* POWER_MANAGEMENT_CONTROL_STATUS Register */ ++#define PCIE_CFG_MSI_CAPABILITY_HEADER 0x00000050 /* MSI_CAPABILITY_HEADER Register */ ++#define PCIE_CFG_MSI_LOWER_ADDRESS 0x00000054 /* MSI_LOWER_ADDRESS Register */ ++#define PCIE_CFG_MSI_UPPER_ADDRESS_REGISTER 0x00000058 /* MSI_UPPER_ADDRESS_REGISTER Register */ ++#define PCIE_CFG_MSI_DATA 0x0000005c /* MSI_DATA Register */ ++#define PCIE_CFG_BROADCOM_VENDOR_SPECIFIC_CAPABILITY_HEADER 0x00000060 /* BROADCOM_VENDOR_SPECIFIC_CAPABILITY_HEADER Register */ ++#define PCIE_CFG_RESET_COUNTERS_INITIAL_VALUES 0x00000064 /* RESET_COUNTERS_INITIAL_VALUES Register */ ++#define PCIE_CFG_MISCELLANEOUS_HOST_CONTROL 0x00000068 /* MISCELLANEOUS_HOST_CONTROL Register */ ++#define PCIE_CFG_SPARE 0x0000006c /* SPARE Register */ ++#define PCIE_CFG_PCI_STATE 0x00000070 /* PCI_STATE Register */ ++#define PCIE_CFG_CLOCK_CONTROL 0x00000074 /* CLOCK_CONTROL Register */ ++#define PCIE_CFG_REGISTER_BASE 0x00000078 /* REGISTER_BASE Register */ ++#define PCIE_CFG_MEMORY_BASE 0x0000007c /* MEMORY_BASE Register */ ++#define PCIE_CFG_REGISTER_DATA 0x00000080 /* REGISTER_DATA Register */ ++#define PCIE_CFG_MEMORY_DATA 0x00000084 /* MEMORY_DATA Register */ ++#define PCIE_CFG_EXPANSION_ROM_BAR_SIZE 0x00000088 /* EXPANSION_ROM_BAR_SIZE Register */ ++#define PCIE_CFG_EXPANSION_ROM_ADDRESS 0x0000008c /* EXPANSION_ROM_ADDRESS Register */ ++#define PCIE_CFG_EXPANSION_ROM_DATA 0x00000090 /* EXPANSION_ROM_DATA Register */ ++#define PCIE_CFG_VPD_INTERFACE 0x00000094 /* VPD_INTERFACE Register */ ++#define PCIE_CFG_UNDI_RECEIVE_BD_STANDARD_PRODUCER_RING_PRODUCER_INDEX_MAILBOX_UPPER 0x00000098 /* UNDI_RECEIVE_BD_STANDARD_PRODUCER_RING_PRODUCER_INDEX_MAILBOX_UPPER Register */ ++#define PCIE_CFG_UNDI_RECEIVE_BD_STANDARD_PRODUCER_RING_PRODUCER_INDEX_MAILBOX_LOWER 0x0000009c /* UNDI_RECEIVE_BD_STANDARD_PRODUCER_RING_PRODUCER_INDEX_MAILBOX_LOWER Register */ ++#define PCIE_CFG_UNDI_RECEIVE_RETURN_RING_CONSUMER_INDEX_UPPER 0x000000a0 /* UNDI_RECEIVE_RETURN_RING_CONSUMER_INDEX_UPPER Register */ ++#define PCIE_CFG_UNDI_RECEIVE_RETURN_RING_CONSUMER_INDEX_LOWER 0x000000a4 /* UNDI_RECEIVE_RETURN_RING_CONSUMER_INDEX_LOWER Register */ ++#define PCIE_CFG_UNDI_SEND_BD_PRODUCER_INDEX_MAILBOX_UPPER 0x000000a8 /* UNDI_SEND_BD_PRODUCER_INDEX_MAILBOX_UPPER Register */ ++#define PCIE_CFG_UNDI_SEND_BD_PRODUCER_INDEX_MAILBOX_LOWER 0x000000ac /* UNDI_SEND_BD_PRODUCER_INDEX_MAILBOX_LOWER Register */ ++#define PCIE_CFG_INT_MAILBOX_UPPER 0x000000b0 /* INT_MAILBOX_UPPER Register */ ++#define PCIE_CFG_INT_MAILBOX_LOWER 0x000000b4 /* INT_MAILBOX_LOWER Register */ ++#define PCIE_CFG_PRODUCT_ID_AND_ASIC_REVISION 0x000000bc /* PRODUCT_ID_AND_ASIC_REVISION Register */ ++#define PCIE_CFG_FUNCTION_EVENT 0x000000c0 /* FUNCTION_EVENT Register */ ++#define PCIE_CFG_FUNCTION_EVENT_MASK 0x000000c4 /* FUNCTION_EVENT_MASK Register */ ++#define PCIE_CFG_FUNCTION_PRESENT 0x000000c8 /* FUNCTION_PRESENT Register */ ++#define PCIE_CFG_PCIE_CAPABILITIES 0x000000cc /* PCIE_CAPABILITIES Register */ ++#define PCIE_CFG_DEVICE_CAPABILITIES 0x000000d0 /* DEVICE_CAPABILITIES Register */ ++#define PCIE_CFG_DEVICE_STATUS_CONTROL 0x000000d4 /* DEVICE_STATUS_CONTROL Register */ ++#define PCIE_CFG_LINK_CAPABILITY 0x000000d8 /* LINK_CAPABILITY Register */ ++#define PCIE_CFG_LINK_STATUS_CONTROL 0x000000dc /* LINK_STATUS_CONTROL Register */ ++#define PCIE_CFG_DEVICE_CAPABILITIES_2 0x000000f0 /* DEVICE_CAPABILITIES_2 Register */ ++#define PCIE_CFG_DEVICE_STATUS_CONTROL_2 0x000000f4 /* DEVICE_STATUS_CONTROL_2 Register */ ++#define PCIE_CFG_LINK_CAPABILITIES_2 0x000000f8 /* LINK_CAPABILITIES_2 Register */ ++#define PCIE_CFG_LINK_STATUS_CONTROL_2 0x000000fc /* LINK_STATUS_CONTROL_2 Register */ ++#define PCIE_CFG_ADVANCED_ERROR_REPORTING_ENHANCED_CAPABILITY_HEADER 0x00000100 /* ADVANCED_ERROR_REPORTING_ENHANCED_CAPABILITY_HEADER Register */ ++#define PCIE_CFG_UNCORRECTABLE_ERROR_STATUS 0x00000104 /* UNCORRECTABLE_ERROR_STATUS Register */ ++#define PCIE_CFG_UNCORRECTABLE_ERROR_MASK 0x00000108 /* UNCORRECTABLE_ERROR_MASK Register */ ++#define PCIE_CFG_UNCORRECTABLE_ERROR_SEVERITY 0x0000010c /* UNCORRECTABLE_ERROR_SEVERITY Register */ ++#define PCIE_CFG_CORRECTABLE_ERROR_STATUS 0x00000110 /* CORRECTABLE_ERROR_STATUS Register */ ++#define PCIE_CFG_CORRECTABLE_ERROR_MASK 0x00000114 /* CORRECTABLE_ERROR_MASK Register */ ++#define PCIE_CFG_ADVANCED_ERROR_CAPABILITIES_AND_CONTROL 0x00000118 /* ADVANCED_ERROR_CAPABILITIES_AND_CONTROL Register */ ++#define PCIE_CFG_HEADER_LOG_1 0x0000011c /* HEADER_LOG_1 Register */ ++#define PCIE_CFG_HEADER_LOG_2 0x00000120 /* HEADER_LOG_2 Register */ ++#define PCIE_CFG_HEADER_LOG_3 0x00000124 /* HEADER_LOG_3 Register */ ++#define PCIE_CFG_HEADER_LOG_4 0x00000128 /* HEADER_LOG_4 Register */ ++#define PCIE_CFG_VIRTUAL_CHANNEL_ENHANCED_CAPABILITY_HEADER 0x0000013c /* VIRTUAL_CHANNEL_ENHANCED_CAPABILITY_HEADER Register */ ++#define PCIE_CFG_PORT_VC_CAPABILITY 0x00000140 /* PORT_VC_CAPABILITY Register */ ++#define PCIE_CFG_PORT_VC_CAPABILITY_2 0x00000144 /* PORT_VC_CAPABILITY_2 Register */ ++#define PCIE_CFG_PORT_VC_STATUS_CONTROL 0x00000148 /* PORT_VC_STATUS_CONTROL Register */ ++#define PCIE_CFG_VC_RESOURCE_CAPABILITY 0x0000014c /* VC_RESOURCE_CAPABILITY Register */ ++#define PCIE_CFG_VC_RESOURCE_CONTROL 0x00000150 /* VC_RESOURCE_CONTROL Register */ ++#define PCIE_CFG_VC_RESOURCE_STATUS 0x00000154 /* VC_RESOURCE_STATUS Register */ ++#define PCIE_CFG_DEVICE_SERIAL_NO_ENHANCED_CAPABILITY_HEADER 0x00000160 /* DEVICE_SERIAL_NO_ENHANCED_CAPABILITY_HEADER Register */ ++#define PCIE_CFG_DEVICE_SERIAL_NO_LOWER_DW 0x00000164 /* DEVICE_SERIAL_NO_LOWER_DW Register */ ++#define PCIE_CFG_DEVICE_SERIAL_NO_UPPER_DW 0x00000168 /* DEVICE_SERIAL_NO_UPPER_DW Register */ ++#define PCIE_CFG_POWER_BUDGETING_ENHANCED_CAPABILITY_HEADER 0x0000016c /* POWER_BUDGETING_ENHANCED_CAPABILITY_HEADER Register */ ++#define PCIE_CFG_POWER_BUDGETING_DATA_SELECT 0x00000170 /* POWER_BUDGETING_DATA_SELECT Register */ ++#define PCIE_CFG_POWER_BUDGETING_DATA 0x00000174 /* POWER_BUDGETING_DATA Register */ ++#define PCIE_CFG_POWER_BUDGETING_CAPABILITY 0x00000178 /* POWER_BUDGETING_CAPABILITY Register */ ++#define PCIE_CFG_FIRMWARE_POWER_BUDGETING_2_1 0x0000017c /* FIRMWARE_POWER_BUDGETING_2_1 Register */ ++#define PCIE_CFG_FIRMWARE_POWER_BUDGETING_4_3 0x00000180 /* FIRMWARE_POWER_BUDGETING_4_3 Register */ ++#define PCIE_CFG_FIRMWARE_POWER_BUDGETING_6_5 0x00000184 /* FIRMWARE_POWER_BUDGETING_6_5 Register */ ++#define PCIE_CFG_FIRMWARE_POWER_BUDGETING_8_7 0x00000188 /* FIRMWARE_POWER_BUDGETING_8_7 Register */ ++#define PCIE_CFG_PCIE_1_1_ADVISORY_NON_FATAL_ERROR_MASKING 0x0000018c /* PCIE_1_1_ADVISORY_NON_FATAL_ERROR_MASKING Register */ ++ ++ ++/**************************************************************************** ++ * BCM70012_TGT_TOP_PCIE_TL ++ ***************************************************************************/ ++#define PCIE_TL_TL_CONTROL 0x00000400 /* TL_CONTROL Register */ ++#define PCIE_TL_TRANSACTION_CONFIGURATION 0x00000404 /* TRANSACTION_CONFIGURATION Register */ ++ ++ ++/**************************************************************************** ++ * BCM70012_TGT_TOP_PCIE_DLL ++ ***************************************************************************/ ++#define PCIE_DLL_DATA_LINK_CONTROL 0x00000500 /* DATA_LINK_CONTROL Register */ ++#define PCIE_DLL_DATA_LINK_STATUS 0x00000504 /* DATA_LINK_STATUS Register */ ++ ++ ++/**************************************************************************** ++ * BCM70012_TGT_TOP_INTR ++ ***************************************************************************/ ++#define INTR_INTR_STATUS 0x00000700 /* Interrupt Status Register */ ++#define INTR_INTR_SET 0x00000704 /* Interrupt Set Register */ ++#define INTR_INTR_CLR_REG 0x00000708 /* Interrupt Clear Register */ ++#define INTR_INTR_MSK_STS_REG 0x0000070c /* Interrupt Mask Status Register */ ++#define INTR_INTR_MSK_SET_REG 0x00000710 /* Interrupt Mask Set Register */ ++#define INTR_INTR_MSK_CLR_REG 0x00000714 /* Interrupt Mask Clear Register */ ++#define INTR_EOI_CTRL 0x00000720 /* End of interrupt control register */ ++ ++ ++/**************************************************************************** ++ * BCM70012_MISC_TOP_MISC1 ++ ***************************************************************************/ ++#define MISC1_TX_FIRST_DESC_L_ADDR_LIST0 0x00000c00 /* Tx DMA Descriptor List0 First Descriptor lower Address */ ++#define MISC1_TX_FIRST_DESC_U_ADDR_LIST0 0x00000c04 /* Tx DMA Descriptor List0 First Descriptor Upper Address */ ++#define MISC1_TX_FIRST_DESC_L_ADDR_LIST1 0x00000c08 /* Tx DMA Descriptor List1 First Descriptor Lower Address */ ++#define MISC1_TX_FIRST_DESC_U_ADDR_LIST1 0x00000c0c /* Tx DMA Descriptor List1 First Descriptor Upper Address */ ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS 0x00000c10 /* Tx DMA Software Descriptor List Control and Status */ ++#define MISC1_TX_DMA_ERROR_STATUS 0x00000c18 /* Tx DMA Engine Error Status */ ++#define MISC1_TX_DMA_LIST0_CUR_DESC_L_ADDR 0x00000c1c /* Tx DMA List0 Current Descriptor Lower Address */ ++#define MISC1_TX_DMA_LIST0_CUR_DESC_U_ADDR 0x00000c20 /* Tx DMA List0 Current Descriptor Upper Address */ ++#define MISC1_TX_DMA_LIST0_CUR_BYTE_CNT_REM 0x00000c24 /* Tx DMA List0 Current Descriptor Upper Address */ ++#define MISC1_TX_DMA_LIST1_CUR_DESC_L_ADDR 0x00000c28 /* Tx DMA List1 Current Descriptor Lower Address */ ++#define MISC1_TX_DMA_LIST1_CUR_DESC_U_ADDR 0x00000c2c /* Tx DMA List1 Current Descriptor Upper Address */ ++#define MISC1_TX_DMA_LIST1_CUR_BYTE_CNT_REM 0x00000c30 /* Tx DMA List1 Current Descriptor Upper Address */ ++#define MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0 0x00000c34 /* Y Rx Descriptor List0 First Descriptor Lower Address */ ++#define MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0 0x00000c38 /* Y Rx Descriptor List0 First Descriptor Upper Address */ ++#define MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1 0x00000c3c /* Y Rx Descriptor List1 First Descriptor Lower Address */ ++#define MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1 0x00000c40 /* Y Rx Descriptor List1 First Descriptor Upper Address */ ++#define MISC1_Y_RX_SW_DESC_LIST_CTRL_STS 0x00000c44 /* Y Rx Software Descriptor List Control and Status */ ++#define MISC1_Y_RX_ERROR_STATUS 0x00000c4c /* Y Rx Engine Error Status */ ++#define MISC1_Y_RX_LIST0_CUR_DESC_L_ADDR 0x00000c50 /* Y Rx List0 Current Descriptor Lower Address */ ++#define MISC1_Y_RX_LIST0_CUR_DESC_U_ADDR 0x00000c54 /* Y Rx List0 Current Descriptor Upper Address */ ++#define MISC1_Y_RX_LIST0_CUR_BYTE_CNT 0x00000c58 /* Y Rx List0 Current Descriptor Byte Count */ ++#define MISC1_Y_RX_LIST1_CUR_DESC_L_ADDR 0x00000c5c /* Y Rx List1 Current Descriptor Lower address */ ++#define MISC1_Y_RX_LIST1_CUR_DESC_U_ADDR 0x00000c60 /* Y Rx List1 Current Descriptor Upper address */ ++#define MISC1_Y_RX_LIST1_CUR_BYTE_CNT 0x00000c64 /* Y Rx List1 Current Descriptor Byte Count */ ++#define MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0 0x00000c68 /* UV Rx Descriptor List0 First Descriptor lower Address */ ++#define MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0 0x00000c6c /* UV Rx Descriptor List0 First Descriptor Upper Address */ ++#define MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1 0x00000c70 /* UV Rx Descriptor List1 First Descriptor Lower Address */ ++#define MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1 0x00000c74 /* UV Rx Descriptor List1 First Descriptor Upper Address */ ++#define MISC1_UV_RX_SW_DESC_LIST_CTRL_STS 0x00000c78 /* UV Rx Software Descriptor List Control and Status */ ++#define MISC1_UV_RX_ERROR_STATUS 0x00000c7c /* UV Rx Engine Error Status */ ++#define MISC1_UV_RX_LIST0_CUR_DESC_L_ADDR 0x00000c80 /* UV Rx List0 Current Descriptor Lower Address */ ++#define MISC1_UV_RX_LIST0_CUR_DESC_U_ADDR 0x00000c84 /* UV Rx List0 Current Descriptor Upper Address */ ++#define MISC1_UV_RX_LIST0_CUR_BYTE_CNT 0x00000c88 /* UV Rx List0 Current Descriptor Byte Count */ ++#define MISC1_UV_RX_LIST1_CUR_DESC_L_ADDR 0x00000c8c /* UV Rx List1 Current Descriptor Lower Address */ ++#define MISC1_UV_RX_LIST1_CUR_DESC_U_ADDR 0x00000c90 /* UV Rx List1 Current Descriptor Upper Address */ ++#define MISC1_UV_RX_LIST1_CUR_BYTE_CNT 0x00000c94 /* UV Rx List1 Current Descriptor Byte Count */ ++#define MISC1_DMA_DEBUG_OPTIONS_REG 0x00000c98 /* DMA Debug Options Register */ ++#define MISC1_READ_CHANNEL_ERROR_STATUS 0x00000c9c /* Read Channel Error Status */ ++#define MISC1_PCIE_DMA_CTRL 0x00000ca0 /* PCIE DMA Control Register */ ++ ++ ++/**************************************************************************** ++ * BCM70012_MISC_TOP_MISC2 ++ ***************************************************************************/ ++#define MISC2_GLOBAL_CTRL 0x00000d00 /* Global Control Register */ ++#define MISC2_INTERNAL_STATUS 0x00000d04 /* Internal Status Register */ ++#define MISC2_INTERNAL_STATUS_MUX_CTRL 0x00000d08 /* Internal Debug Mux Control */ ++#define MISC2_DEBUG_FIFO_LENGTH 0x00000d0c /* Debug FIFO Length */ ++ ++ ++/**************************************************************************** ++ * BCM70012_MISC_TOP_MISC3 ++ ***************************************************************************/ ++#define MISC3_RESET_CTRL 0x00000e00 /* Reset Control Register */ ++#define MISC3_BIST_CTRL 0x00000e04 /* BIST Control Register */ ++#define MISC3_BIST_STATUS 0x00000e08 /* BIST Status Register */ ++#define MISC3_RX_CHECKSUM 0x00000e0c /* Receive Checksum */ ++#define MISC3_TX_CHECKSUM 0x00000e10 /* Transmit Checksum */ ++#define MISC3_ECO_CTRL_CORE 0x00000e14 /* ECO Core Reset Control Register */ ++#define MISC3_CSI_TEST_CTRL 0x00000e18 /* CSI Test Control Register */ ++#define MISC3_HD_DVI_TEST_CTRL 0x00000e1c /* HD DVI Test Control Register */ ++ ++ ++/**************************************************************************** ++ * BCM70012_MISC_TOP_MISC_PERST ++ ***************************************************************************/ ++#define MISC_PERST_ECO_CTRL_PERST 0x00000e80 /* ECO PCIE Reset Control Register */ ++#define MISC_PERST_DECODER_CTRL 0x00000e84 /* Decoder Control Register */ ++#define MISC_PERST_CCE_STATUS 0x00000e88 /* Config Copy Engine Status */ ++#define MISC_PERST_PCIE_DEBUG 0x00000e8c /* PCIE Debug Control Register */ ++#define MISC_PERST_PCIE_DEBUG_STATUS 0x00000e90 /* PCIE Debug Status Register */ ++#define MISC_PERST_VREG_CTRL 0x00000e94 /* Voltage Regulator Control Register */ ++#define MISC_PERST_MEM_CTRL 0x00000e98 /* Memory Control Register */ ++#define MISC_PERST_CLOCK_CTRL 0x00000e9c /* Clock Control Register */ ++ ++ ++/**************************************************************************** ++ * BCM70012_MISC_TOP_GISB_ARBITER ++ ***************************************************************************/ ++#define GISB_ARBITER_REVISION 0x00000f00 /* GISB ARBITER REVISION */ ++#define GISB_ARBITER_SCRATCH 0x00000f04 /* GISB ARBITER Scratch Register */ ++#define GISB_ARBITER_REQ_MASK 0x00000f08 /* GISB ARBITER Master Request Mask Register */ ++#define GISB_ARBITER_TIMER 0x00000f0c /* GISB ARBITER Timer Value Register */ ++ ++ ++/**************************************************************************** ++ * BCM70012_OTP_TOP_OTP ++ ***************************************************************************/ ++#define OTP_CONFIG_INFO 0x00001400 /* OTP Configuration Register */ ++#define OTP_CMD 0x00001404 /* OTP Command Register */ ++#define OTP_STATUS 0x00001408 /* OTP Status Register */ ++#define OTP_CONTENT_MISC 0x0000140c /* Content : Miscellaneous Register */ ++#define OTP_CONTENT_AES_0 0x00001410 /* Content : AES Key 0 Register */ ++#define OTP_CONTENT_AES_1 0x00001414 /* Content : AES Key 1 Register */ ++#define OTP_CONTENT_AES_2 0x00001418 /* Content : AES Key 2 Register */ ++#define OTP_CONTENT_AES_3 0x0000141c /* Content : AES Key 3 Register */ ++#define OTP_CONTENT_SHA_0 0x00001420 /* Content : SHA Key 0 Register */ ++#define OTP_CONTENT_SHA_1 0x00001424 /* Content : SHA Key 1 Register */ ++#define OTP_CONTENT_SHA_2 0x00001428 /* Content : SHA Key 2 Register */ ++#define OTP_CONTENT_SHA_3 0x0000142c /* Content : SHA Key 3 Register */ ++#define OTP_CONTENT_SHA_4 0x00001430 /* Content : SHA Key 4 Register */ ++#define OTP_CONTENT_SHA_5 0x00001434 /* Content : SHA Key 5 Register */ ++#define OTP_CONTENT_SHA_6 0x00001438 /* Content : SHA Key 6 Register */ ++#define OTP_CONTENT_SHA_7 0x0000143c /* Content : SHA Key 7 Register */ ++#define OTP_CONTENT_CHECKSUM 0x00001440 /* Content : Checksum Register */ ++#define OTP_PROG_CTRL 0x00001444 /* Programming Control Register */ ++#define OTP_PROG_STATUS 0x00001448 /* Programming Status Register */ ++#define OTP_PROG_PULSE 0x0000144c /* Program Pulse Width Register */ ++#define OTP_VERIFY_PULSE 0x00001450 /* Verify Pulse Width Register */ ++#define OTP_PROG_MASK 0x00001454 /* Program Mask Register */ ++#define OTP_DATA_INPUT 0x00001458 /* Data Input Register */ ++#define OTP_DATA_OUTPUT 0x0000145c /* Data Output Register */ ++ ++ ++/**************************************************************************** ++ * BCM70012_AES_TOP_AES ++ ***************************************************************************/ ++#define AES_CONFIG_INFO 0x00001800 /* AES Configuration Information Register */ ++#define AES_CMD 0x00001804 /* AES Command Register */ ++#define AES_STATUS 0x00001808 /* AES Status Register */ ++#define AES_EEPROM_CONFIG 0x0000180c /* AES EEPROM Configuration Register */ ++#define AES_EEPROM_DATA_0 0x00001810 /* AES EEPROM Data Register 0 */ ++#define AES_EEPROM_DATA_1 0x00001814 /* AES EEPROM Data Register 1 */ ++#define AES_EEPROM_DATA_2 0x00001818 /* AES EEPROM Data Register 2 */ ++#define AES_EEPROM_DATA_3 0x0000181c /* AES EEPROM Data Register 3 */ ++ ++ ++/**************************************************************************** ++ * BCM70012_DCI_TOP_DCI ++ ***************************************************************************/ ++#define DCI_CMD 0x00001c00 /* DCI Command Register */ ++#define DCI_STATUS 0x00001c04 /* DCI Status Register */ ++#define DCI_DRAM_BASE_ADDR 0x00001c08 /* DRAM Base Address Register */ ++#define DCI_FIRMWARE_ADDR 0x00001c0c /* Firmware Address Register */ ++#define DCI_FIRMWARE_DATA 0x00001c10 /* Firmware Data Register */ ++#define DCI_SIGNATURE_DATA_0 0x00001c14 /* Signature Data Register 0 */ ++#define DCI_SIGNATURE_DATA_1 0x00001c18 /* Signature Data Register 1 */ ++#define DCI_SIGNATURE_DATA_2 0x00001c1c /* Signature Data Register 2 */ ++#define DCI_SIGNATURE_DATA_3 0x00001c20 /* Signature Data Register 3 */ ++#define DCI_SIGNATURE_DATA_4 0x00001c24 /* Signature Data Register 4 */ ++#define DCI_SIGNATURE_DATA_5 0x00001c28 /* Signature Data Register 5 */ ++#define DCI_SIGNATURE_DATA_6 0x00001c2c /* Signature Data Register 6 */ ++#define DCI_SIGNATURE_DATA_7 0x00001c30 /* Signature Data Register 7 */ ++ ++ ++/**************************************************************************** ++ * BCM70012_TGT_TOP_INTR ++ ***************************************************************************/ ++/**************************************************************************** ++ * INTR :: INTR_STATUS ++ ***************************************************************************/ ++/* INTR :: INTR_STATUS :: reserved0 [31:26] */ ++#define INTR_INTR_STATUS_reserved0_MASK 0xfc000000 ++#define INTR_INTR_STATUS_reserved0_ALIGN 0 ++#define INTR_INTR_STATUS_reserved0_BITS 6 ++#define INTR_INTR_STATUS_reserved0_SHIFT 26 ++ ++/* INTR :: INTR_STATUS :: PCIE_TGT_CA_ATTN [25:25] */ ++#define INTR_INTR_STATUS_PCIE_TGT_CA_ATTN_MASK 0x02000000 ++#define INTR_INTR_STATUS_PCIE_TGT_CA_ATTN_ALIGN 0 ++#define INTR_INTR_STATUS_PCIE_TGT_CA_ATTN_BITS 1 ++#define INTR_INTR_STATUS_PCIE_TGT_CA_ATTN_SHIFT 25 ++ ++/* INTR :: INTR_STATUS :: PCIE_TGT_UR_ATTN [24:24] */ ++#define INTR_INTR_STATUS_PCIE_TGT_UR_ATTN_MASK 0x01000000 ++#define INTR_INTR_STATUS_PCIE_TGT_UR_ATTN_ALIGN 0 ++#define INTR_INTR_STATUS_PCIE_TGT_UR_ATTN_BITS 1 ++#define INTR_INTR_STATUS_PCIE_TGT_UR_ATTN_SHIFT 24 ++ ++/* INTR :: INTR_STATUS :: reserved1 [23:14] */ ++#define INTR_INTR_STATUS_reserved1_MASK 0x00ffc000 ++#define INTR_INTR_STATUS_reserved1_ALIGN 0 ++#define INTR_INTR_STATUS_reserved1_BITS 10 ++#define INTR_INTR_STATUS_reserved1_SHIFT 14 ++ ++/* INTR :: INTR_STATUS :: L1_UV_RX_DMA_ERR_INTR [13:13] */ ++#define INTR_INTR_STATUS_L1_UV_RX_DMA_ERR_INTR_MASK 0x00002000 ++#define INTR_INTR_STATUS_L1_UV_RX_DMA_ERR_INTR_ALIGN 0 ++#define INTR_INTR_STATUS_L1_UV_RX_DMA_ERR_INTR_BITS 1 ++#define INTR_INTR_STATUS_L1_UV_RX_DMA_ERR_INTR_SHIFT 13 ++ ++/* INTR :: INTR_STATUS :: L1_UV_RX_DMA_DONE_INTR [12:12] */ ++#define INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK 0x00001000 ++#define INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_ALIGN 0 ++#define INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_BITS 1 ++#define INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_SHIFT 12 ++ ++/* INTR :: INTR_STATUS :: L1_Y_RX_DMA_ERR_INTR [11:11] */ ++#define INTR_INTR_STATUS_L1_Y_RX_DMA_ERR_INTR_MASK 0x00000800 ++#define INTR_INTR_STATUS_L1_Y_RX_DMA_ERR_INTR_ALIGN 0 ++#define INTR_INTR_STATUS_L1_Y_RX_DMA_ERR_INTR_BITS 1 ++#define INTR_INTR_STATUS_L1_Y_RX_DMA_ERR_INTR_SHIFT 11 ++ ++/* INTR :: INTR_STATUS :: L1_Y_RX_DMA_DONE_INTR [10:10] */ ++#define INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK 0x00000400 ++#define INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_ALIGN 0 ++#define INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_BITS 1 ++#define INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_SHIFT 10 ++ ++/* INTR :: INTR_STATUS :: L1_TX_DMA_ERR_INTR [09:09] */ ++#define INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK 0x00000200 ++#define INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_ALIGN 0 ++#define INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_BITS 1 ++#define INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_SHIFT 9 ++ ++/* INTR :: INTR_STATUS :: L1_TX_DMA_DONE_INTR [08:08] */ ++#define INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK 0x00000100 ++#define INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_ALIGN 0 ++#define INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_BITS 1 ++#define INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_SHIFT 8 ++ ++/* INTR :: INTR_STATUS :: reserved2 [07:06] */ ++#define INTR_INTR_STATUS_reserved2_MASK 0x000000c0 ++#define INTR_INTR_STATUS_reserved2_ALIGN 0 ++#define INTR_INTR_STATUS_reserved2_BITS 2 ++#define INTR_INTR_STATUS_reserved2_SHIFT 6 ++ ++/* INTR :: INTR_STATUS :: L0_UV_RX_DMA_ERR_INTR [05:05] */ ++#define INTR_INTR_STATUS_L0_UV_RX_DMA_ERR_INTR_MASK 0x00000020 ++#define INTR_INTR_STATUS_L0_UV_RX_DMA_ERR_INTR_ALIGN 0 ++#define INTR_INTR_STATUS_L0_UV_RX_DMA_ERR_INTR_BITS 1 ++#define INTR_INTR_STATUS_L0_UV_RX_DMA_ERR_INTR_SHIFT 5 ++ ++/* INTR :: INTR_STATUS :: L0_UV_RX_DMA_DONE_INTR [04:04] */ ++#define INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK 0x00000010 ++#define INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_ALIGN 0 ++#define INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_BITS 1 ++#define INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_SHIFT 4 ++ ++/* INTR :: INTR_STATUS :: L0_Y_RX_DMA_ERR_INTR [03:03] */ ++#define INTR_INTR_STATUS_L0_Y_RX_DMA_ERR_INTR_MASK 0x00000008 ++#define INTR_INTR_STATUS_L0_Y_RX_DMA_ERR_INTR_ALIGN 0 ++#define INTR_INTR_STATUS_L0_Y_RX_DMA_ERR_INTR_BITS 1 ++#define INTR_INTR_STATUS_L0_Y_RX_DMA_ERR_INTR_SHIFT 3 ++ ++/* INTR :: INTR_STATUS :: L0_Y_RX_DMA_DONE_INTR [02:02] */ ++#define INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK 0x00000004 ++#define INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_ALIGN 0 ++#define INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_BITS 1 ++#define INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_SHIFT 2 ++ ++/* INTR :: INTR_STATUS :: L0_TX_DMA_ERR_INTR [01:01] */ ++#define INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK 0x00000002 ++#define INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_ALIGN 0 ++#define INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_BITS 1 ++#define INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_SHIFT 1 ++ ++/* INTR :: INTR_STATUS :: L0_TX_DMA_DONE_INTR [00:00] */ ++#define INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK 0x00000001 ++#define INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_ALIGN 0 ++#define INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_BITS 1 ++#define INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_SHIFT 0 ++ ++ ++/**************************************************************************** ++ * MISC1 :: TX_SW_DESC_LIST_CTRL_STS ++ ***************************************************************************/ ++/* MISC1 :: TX_SW_DESC_LIST_CTRL_STS :: reserved0 [31:04] */ ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_reserved0_MASK 0xfffffff0 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_reserved0_ALIGN 0 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_reserved0_BITS 28 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_reserved0_SHIFT 4 ++ ++/* MISC1 :: TX_SW_DESC_LIST_CTRL_STS :: DMA_DATA_SERV_PTR [03:03] */ ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DMA_DATA_SERV_PTR_MASK 0x00000008 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DMA_DATA_SERV_PTR_ALIGN 0 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DMA_DATA_SERV_PTR_BITS 1 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DMA_DATA_SERV_PTR_SHIFT 3 ++ ++/* MISC1 :: TX_SW_DESC_LIST_CTRL_STS :: DESC_SERV_PTR [02:02] */ ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DESC_SERV_PTR_MASK 0x00000004 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DESC_SERV_PTR_ALIGN 0 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DESC_SERV_PTR_BITS 1 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_DESC_SERV_PTR_SHIFT 2 ++ ++/* MISC1 :: TX_SW_DESC_LIST_CTRL_STS :: TX_DMA_HALT_ON_ERROR [01:01] */ ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_HALT_ON_ERROR_MASK 0x00000002 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_HALT_ON_ERROR_ALIGN 0 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_HALT_ON_ERROR_BITS 1 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_HALT_ON_ERROR_SHIFT 1 ++ ++/* MISC1 :: TX_SW_DESC_LIST_CTRL_STS :: TX_DMA_RUN_STOP [00:00] */ ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_RUN_STOP_MASK 0x00000001 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_RUN_STOP_ALIGN 0 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_RUN_STOP_BITS 1 ++#define MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_RUN_STOP_SHIFT 0 ++ ++ ++/**************************************************************************** ++ * MISC1 :: TX_DMA_ERROR_STATUS ++ ***************************************************************************/ ++/* MISC1 :: TX_DMA_ERROR_STATUS :: reserved0 [31:10] */ ++#define MISC1_TX_DMA_ERROR_STATUS_reserved0_MASK 0xfffffc00 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved0_ALIGN 0 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved0_BITS 22 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved0_SHIFT 10 ++ ++/* MISC1 :: TX_DMA_ERROR_STATUS :: TX_L1_DESC_TX_ABORT_ERRORS [09:09] */ ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK 0x00000200 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_ALIGN 0 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_BITS 1 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_SHIFT 9 ++ ++/* MISC1 :: TX_DMA_ERROR_STATUS :: reserved1 [08:08] */ ++#define MISC1_TX_DMA_ERROR_STATUS_reserved1_MASK 0x00000100 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved1_ALIGN 0 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved1_BITS 1 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved1_SHIFT 8 ++ ++/* MISC1 :: TX_DMA_ERROR_STATUS :: TX_L0_DESC_TX_ABORT_ERRORS [07:07] */ ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK 0x00000080 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_ALIGN 0 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_BITS 1 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_SHIFT 7 ++ ++/* MISC1 :: TX_DMA_ERROR_STATUS :: reserved2 [06:06] */ ++#define MISC1_TX_DMA_ERROR_STATUS_reserved2_MASK 0x00000040 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved2_ALIGN 0 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved2_BITS 1 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved2_SHIFT 6 ++ ++/* MISC1 :: TX_DMA_ERROR_STATUS :: TX_L1_DMA_DATA_TX_ABORT_ERRORS [05:05] */ ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK 0x00000020 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_ALIGN 0 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_BITS 1 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_SHIFT 5 ++ ++/* MISC1 :: TX_DMA_ERROR_STATUS :: TX_L1_FIFO_FULL_ERRORS [04:04] */ ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK 0x00000010 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_ALIGN 0 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_BITS 1 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_SHIFT 4 ++ ++/* MISC1 :: TX_DMA_ERROR_STATUS :: reserved3 [03:03] */ ++#define MISC1_TX_DMA_ERROR_STATUS_reserved3_MASK 0x00000008 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved3_ALIGN 0 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved3_BITS 1 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved3_SHIFT 3 ++ ++/* MISC1 :: TX_DMA_ERROR_STATUS :: TX_L0_DMA_DATA_TX_ABORT_ERRORS [02:02] */ ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK 0x00000004 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_ALIGN 0 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_BITS 1 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_SHIFT 2 ++ ++/* MISC1 :: TX_DMA_ERROR_STATUS :: TX_L0_FIFO_FULL_ERRORS [01:01] */ ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK 0x00000002 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_ALIGN 0 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_BITS 1 ++#define MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_SHIFT 1 ++ ++/* MISC1 :: TX_DMA_ERROR_STATUS :: reserved4 [00:00] */ ++#define MISC1_TX_DMA_ERROR_STATUS_reserved4_MASK 0x00000001 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved4_ALIGN 0 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved4_BITS 1 ++#define MISC1_TX_DMA_ERROR_STATUS_reserved4_SHIFT 0 ++ ++ ++/**************************************************************************** ++ * MISC1 :: Y_RX_ERROR_STATUS ++ ***************************************************************************/ ++/* MISC1 :: Y_RX_ERROR_STATUS :: reserved0 [31:14] */ ++#define MISC1_Y_RX_ERROR_STATUS_reserved0_MASK 0xffffc000 ++#define MISC1_Y_RX_ERROR_STATUS_reserved0_ALIGN 0 ++#define MISC1_Y_RX_ERROR_STATUS_reserved0_BITS 18 ++#define MISC1_Y_RX_ERROR_STATUS_reserved0_SHIFT 14 ++ ++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L1_UNDERRUN_ERROR [13:13] */ ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK 0x00002000 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_ALIGN 0 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_BITS 1 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_SHIFT 13 ++ ++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L1_OVERRUN_ERROR [12:12] */ ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_MASK 0x00001000 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_ALIGN 0 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_BITS 1 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_SHIFT 12 ++ ++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L0_UNDERRUN_ERROR [11:11] */ ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK 0x00000800 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_ALIGN 0 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_BITS 1 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_SHIFT 11 ++ ++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L0_OVERRUN_ERROR [10:10] */ ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_MASK 0x00000400 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_ALIGN 0 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_BITS 1 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_SHIFT 10 ++ ++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L1_DESC_TX_ABORT_ERRORS [09:09] */ ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_MASK 0x00000200 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_ALIGN 0 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_BITS 1 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_SHIFT 9 ++ ++/* MISC1 :: Y_RX_ERROR_STATUS :: reserved1 [08:08] */ ++#define MISC1_Y_RX_ERROR_STATUS_reserved1_MASK 0x00000100 ++#define MISC1_Y_RX_ERROR_STATUS_reserved1_ALIGN 0 ++#define MISC1_Y_RX_ERROR_STATUS_reserved1_BITS 1 ++#define MISC1_Y_RX_ERROR_STATUS_reserved1_SHIFT 8 ++ ++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L0_DESC_TX_ABORT_ERRORS [07:07] */ ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_MASK 0x00000080 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_ALIGN 0 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_BITS 1 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_SHIFT 7 ++ ++/* MISC1 :: Y_RX_ERROR_STATUS :: reserved2 [06:05] */ ++#define MISC1_Y_RX_ERROR_STATUS_reserved2_MASK 0x00000060 ++#define MISC1_Y_RX_ERROR_STATUS_reserved2_ALIGN 0 ++#define MISC1_Y_RX_ERROR_STATUS_reserved2_BITS 2 ++#define MISC1_Y_RX_ERROR_STATUS_reserved2_SHIFT 5 ++ ++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L1_FIFO_FULL_ERRORS [04:04] */ ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK 0x00000010 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_ALIGN 0 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_BITS 1 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_SHIFT 4 ++ ++/* MISC1 :: Y_RX_ERROR_STATUS :: reserved3 [03:02] */ ++#define MISC1_Y_RX_ERROR_STATUS_reserved3_MASK 0x0000000c ++#define MISC1_Y_RX_ERROR_STATUS_reserved3_ALIGN 0 ++#define MISC1_Y_RX_ERROR_STATUS_reserved3_BITS 2 ++#define MISC1_Y_RX_ERROR_STATUS_reserved3_SHIFT 2 ++ ++/* MISC1 :: Y_RX_ERROR_STATUS :: RX_L0_FIFO_FULL_ERRORS [01:01] */ ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK 0x00000002 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_ALIGN 0 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_BITS 1 ++#define MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_SHIFT 1 ++ ++/* MISC1 :: Y_RX_ERROR_STATUS :: reserved4 [00:00] */ ++#define MISC1_Y_RX_ERROR_STATUS_reserved4_MASK 0x00000001 ++#define MISC1_Y_RX_ERROR_STATUS_reserved4_ALIGN 0 ++#define MISC1_Y_RX_ERROR_STATUS_reserved4_BITS 1 ++#define MISC1_Y_RX_ERROR_STATUS_reserved4_SHIFT 0 ++ ++ ++/**************************************************************************** ++ * MISC1 :: UV_RX_ERROR_STATUS ++ ***************************************************************************/ ++/* MISC1 :: UV_RX_ERROR_STATUS :: reserved0 [31:14] */ ++#define MISC1_UV_RX_ERROR_STATUS_reserved0_MASK 0xffffc000 ++#define MISC1_UV_RX_ERROR_STATUS_reserved0_ALIGN 0 ++#define MISC1_UV_RX_ERROR_STATUS_reserved0_BITS 18 ++#define MISC1_UV_RX_ERROR_STATUS_reserved0_SHIFT 14 ++ ++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L1_UNDERRUN_ERROR [13:13] */ ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK 0x00002000 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_ALIGN 0 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_BITS 1 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_SHIFT 13 ++ ++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L1_OVERRUN_ERROR [12:12] */ ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_MASK 0x00001000 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_ALIGN 0 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_BITS 1 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_SHIFT 12 ++ ++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L0_UNDERRUN_ERROR [11:11] */ ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK 0x00000800 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_ALIGN 0 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_BITS 1 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_SHIFT 11 ++ ++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L0_OVERRUN_ERROR [10:10] */ ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_MASK 0x00000400 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_ALIGN 0 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_BITS 1 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_SHIFT 10 ++ ++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L1_DESC_TX_ABORT_ERRORS [09:09] */ ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_MASK 0x00000200 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_ALIGN 0 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_BITS 1 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_SHIFT 9 ++ ++/* MISC1 :: UV_RX_ERROR_STATUS :: reserved1 [08:08] */ ++#define MISC1_UV_RX_ERROR_STATUS_reserved1_MASK 0x00000100 ++#define MISC1_UV_RX_ERROR_STATUS_reserved1_ALIGN 0 ++#define MISC1_UV_RX_ERROR_STATUS_reserved1_BITS 1 ++#define MISC1_UV_RX_ERROR_STATUS_reserved1_SHIFT 8 ++ ++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L0_DESC_TX_ABORT_ERRORS [07:07] */ ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_MASK 0x00000080 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_ALIGN 0 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_BITS 1 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_SHIFT 7 ++ ++/* MISC1 :: UV_RX_ERROR_STATUS :: reserved2 [06:05] */ ++#define MISC1_UV_RX_ERROR_STATUS_reserved2_MASK 0x00000060 ++#define MISC1_UV_RX_ERROR_STATUS_reserved2_ALIGN 0 ++#define MISC1_UV_RX_ERROR_STATUS_reserved2_BITS 2 ++#define MISC1_UV_RX_ERROR_STATUS_reserved2_SHIFT 5 ++ ++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L1_FIFO_FULL_ERRORS [04:04] */ ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK 0x00000010 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_ALIGN 0 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_BITS 1 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_SHIFT 4 ++ ++/* MISC1 :: UV_RX_ERROR_STATUS :: reserved3 [03:02] */ ++#define MISC1_UV_RX_ERROR_STATUS_reserved3_MASK 0x0000000c ++#define MISC1_UV_RX_ERROR_STATUS_reserved3_ALIGN 0 ++#define MISC1_UV_RX_ERROR_STATUS_reserved3_BITS 2 ++#define MISC1_UV_RX_ERROR_STATUS_reserved3_SHIFT 2 ++ ++/* MISC1 :: UV_RX_ERROR_STATUS :: RX_L0_FIFO_FULL_ERRORS [01:01] */ ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK 0x00000002 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_ALIGN 0 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_BITS 1 ++#define MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_SHIFT 1 ++ ++/* MISC1 :: UV_RX_ERROR_STATUS :: reserved4 [00:00] */ ++#define MISC1_UV_RX_ERROR_STATUS_reserved4_MASK 0x00000001 ++#define MISC1_UV_RX_ERROR_STATUS_reserved4_ALIGN 0 ++#define MISC1_UV_RX_ERROR_STATUS_reserved4_BITS 1 ++#define MISC1_UV_RX_ERROR_STATUS_reserved4_SHIFT 0 ++ ++/**************************************************************************** ++ * Datatype Definitions. ++ ***************************************************************************/ ++#endif /* #ifndef MACFILE_H__ */ ++ ++/* End of File */ ++ +diff --git a/drivers/staging/crystalhd/crystalhd_cmds.c b/drivers/staging/crystalhd/crystalhd_cmds.c +new file mode 100644 +index 0000000..39c641d +--- /dev/null ++++ b/drivers/staging/crystalhd/crystalhd_cmds.c +@@ -0,0 +1,1058 @@ ++/*************************************************************************** ++ * Copyright (c) 2005-2009, Broadcom Corporation. ++ * ++ * Name: crystalhd_cmds . c ++ * ++ * Description: ++ * BCM70010 Linux driver user command interfaces. ++ * ++ * HISTORY: ++ * ++ ********************************************************************** ++ * This file is part of the crystalhd device driver. ++ * ++ * This driver is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, version 2 of the License. ++ * ++ * This driver is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this driver. If not, see . ++ **********************************************************************/ ++ ++#include "crystalhd_cmds.h" ++#include "crystalhd_hw.h" ++ ++static struct crystalhd_user *bc_cproc_get_uid(struct crystalhd_cmd *ctx) ++{ ++ struct crystalhd_user *user = NULL; ++ int i; ++ ++ for (i = 0; i < BC_LINK_MAX_OPENS; i++) { ++ if (!ctx->user[i].in_use) { ++ user = &ctx->user[i]; ++ break; ++ } ++ } ++ ++ return user; ++} ++ ++static int bc_cproc_get_user_count(struct crystalhd_cmd *ctx) ++{ ++ int i, count = 0; ++ ++ for (i = 0; i < BC_LINK_MAX_OPENS; i++) { ++ if (ctx->user[i].in_use) ++ count++; ++ } ++ ++ return count; ++} ++ ++static void bc_cproc_mark_pwr_state(struct crystalhd_cmd *ctx) ++{ ++ int i; ++ ++ for (i = 0; i < BC_LINK_MAX_OPENS; i++) { ++ if (!ctx->user[i].in_use) ++ continue; ++ if (ctx->user[i].mode == DTS_DIAG_MODE || ++ ctx->user[i].mode == DTS_PLAYBACK_MODE) { ++ ctx->pwr_state_change = 1; ++ break; ++ } ++ } ++} ++ ++static BC_STATUS bc_cproc_notify_mode(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ int rc = 0, i = 0; ++ ++ if (!ctx || !idata) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ if (ctx->user[idata->u_id].mode != DTS_MODE_INV) { ++ BCMLOG_ERR("Close the handle first..\n"); ++ return BC_STS_ERR_USAGE; ++ } ++ if (idata->udata.u.NotifyMode.Mode == DTS_MONITOR_MODE) { ++ ctx->user[idata->u_id].mode = idata->udata.u.NotifyMode.Mode; ++ return BC_STS_SUCCESS; ++ } ++ if (ctx->state != BC_LINK_INVALID) { ++ BCMLOG_ERR("Link invalid state %d \n", ctx->state); ++ return BC_STS_ERR_USAGE; ++ } ++ /* Check for duplicate playback sessions..*/ ++ for (i = 0; i < BC_LINK_MAX_OPENS; i++) { ++ if (ctx->user[i].mode == DTS_DIAG_MODE || ++ ctx->user[i].mode == DTS_PLAYBACK_MODE) { ++ BCMLOG_ERR("multiple playback sessions are not " ++ "supported..\n"); ++ return BC_STS_ERR_USAGE; ++ } ++ } ++ ctx->cin_wait_exit = 0; ++ ctx->user[idata->u_id].mode = idata->udata.u.NotifyMode.Mode; ++ /* Setup mmap pool for uaddr sgl mapping..*/ ++ rc = crystalhd_create_dio_pool(ctx->adp, BC_LINK_MAX_SGLS); ++ if (rc) ++ return BC_STS_ERROR; ++ ++ /* Setup Hardware DMA rings */ ++ return crystalhd_hw_setup_dma_rings(&ctx->hw_ctx); ++} ++ ++static BC_STATUS bc_cproc_get_version(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ ++ if (!ctx || !idata) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ idata->udata.u.VerInfo.DriverMajor = crystalhd_kmod_major; ++ idata->udata.u.VerInfo.DriverMinor = crystalhd_kmod_minor; ++ idata->udata.u.VerInfo.DriverRevision = crystalhd_kmod_rev; ++ return BC_STS_SUCCESS; ++} ++ ++ ++static BC_STATUS bc_cproc_get_hwtype(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata) ++{ ++ if (!ctx || !idata) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ crystalhd_pci_cfg_rd(ctx->adp, 0, 2, ++ (uint32_t *)&idata->udata.u.hwType.PciVenId); ++ crystalhd_pci_cfg_rd(ctx->adp, 2, 2, ++ (uint32_t *)&idata->udata.u.hwType.PciDevId); ++ crystalhd_pci_cfg_rd(ctx->adp, 8, 1, ++ (uint32_t *)&idata->udata.u.hwType.HwRev); ++ ++ return BC_STS_SUCCESS; ++} ++ ++static BC_STATUS bc_cproc_reg_rd(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ if (!ctx || !idata) ++ return BC_STS_INV_ARG; ++ idata->udata.u.regAcc.Value = bc_dec_reg_rd(ctx->adp, ++ idata->udata.u.regAcc.Offset); ++ return BC_STS_SUCCESS; ++} ++ ++static BC_STATUS bc_cproc_reg_wr(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ if (!ctx || !idata) ++ return BC_STS_INV_ARG; ++ ++ bc_dec_reg_wr(ctx->adp, idata->udata.u.regAcc.Offset, ++ idata->udata.u.regAcc.Value); ++ ++ return BC_STS_SUCCESS; ++} ++ ++static BC_STATUS bc_cproc_link_reg_rd(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ if (!ctx || !idata) ++ return BC_STS_INV_ARG; ++ ++ idata->udata.u.regAcc.Value = crystalhd_reg_rd(ctx->adp, ++ idata->udata.u.regAcc.Offset); ++ return BC_STS_SUCCESS; ++} ++ ++static BC_STATUS bc_cproc_link_reg_wr(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ if (!ctx || !idata) ++ return BC_STS_INV_ARG; ++ ++ crystalhd_reg_wr(ctx->adp, idata->udata.u.regAcc.Offset, ++ idata->udata.u.regAcc.Value); ++ ++ return BC_STS_SUCCESS; ++} ++ ++static BC_STATUS bc_cproc_mem_rd(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ if (!ctx || !idata || !idata->add_cdata) ++ return BC_STS_INV_ARG; ++ ++ if (idata->udata.u.devMem.NumDwords > (idata->add_cdata_sz / 4)) { ++ BCMLOG_ERR("insufficient buffer\n"); ++ return BC_STS_INV_ARG; ++ } ++ sts = crystalhd_mem_rd(ctx->adp, idata->udata.u.devMem.StartOff, ++ idata->udata.u.devMem.NumDwords, ++ (uint32_t *)idata->add_cdata); ++ return sts; ++ ++} ++ ++static BC_STATUS bc_cproc_mem_wr(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ if (!ctx || !idata || !idata->add_cdata) ++ return BC_STS_INV_ARG; ++ ++ if (idata->udata.u.devMem.NumDwords > (idata->add_cdata_sz / 4)) { ++ BCMLOG_ERR("insufficient buffer\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ sts = crystalhd_mem_wr(ctx->adp, idata->udata.u.devMem.StartOff, ++ idata->udata.u.devMem.NumDwords, ++ (uint32_t *)idata->add_cdata); ++ return sts; ++} ++ ++static BC_STATUS bc_cproc_cfg_rd(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ uint32_t ix, cnt, off, len; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ uint32_t *temp; ++ ++ if (!ctx || !idata) ++ return BC_STS_INV_ARG; ++ ++ temp = (uint32_t *) idata->udata.u.pciCfg.pci_cfg_space; ++ off = idata->udata.u.pciCfg.Offset; ++ len = idata->udata.u.pciCfg.Size; ++ ++ if (len <= 4) ++ return crystalhd_pci_cfg_rd(ctx->adp, off, len, temp); ++ ++ /* Truncate to dword alignment..*/ ++ len = 4; ++ cnt = idata->udata.u.pciCfg.Size / len; ++ for (ix = 0; ix < cnt; ix++) { ++ sts = crystalhd_pci_cfg_rd(ctx->adp, off, len, &temp[ix]); ++ if (sts != BC_STS_SUCCESS) { ++ BCMLOG_ERR("config read : %d\n", sts); ++ return sts; ++ } ++ off += len; ++ } ++ ++ return sts; ++} ++ ++static BC_STATUS bc_cproc_cfg_wr(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ uint32_t ix, cnt, off, len; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ uint32_t *temp; ++ ++ if (!ctx || !idata) ++ return BC_STS_INV_ARG; ++ ++ temp = (uint32_t *) idata->udata.u.pciCfg.pci_cfg_space; ++ off = idata->udata.u.pciCfg.Offset; ++ len = idata->udata.u.pciCfg.Size; ++ ++ if (len <= 4) ++ return crystalhd_pci_cfg_wr(ctx->adp, off, len, temp[0]); ++ ++ /* Truncate to dword alignment..*/ ++ len = 4; ++ cnt = idata->udata.u.pciCfg.Size / len; ++ for (ix = 0; ix < cnt; ix++) { ++ sts = crystalhd_pci_cfg_wr(ctx->adp, off, len, temp[ix]); ++ if (sts != BC_STS_SUCCESS) { ++ BCMLOG_ERR("config write : %d\n", sts); ++ return sts; ++ } ++ off += len; ++ } ++ ++ return sts; ++} ++ ++static BC_STATUS bc_cproc_download_fw(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ if (!ctx || !idata || !idata->add_cdata || !idata->add_cdata_sz) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ if (ctx->state != BC_LINK_INVALID) { ++ BCMLOG_ERR("Link invalid state %d \n", ctx->state); ++ return BC_STS_ERR_USAGE; ++ } ++ ++ sts = crystalhd_download_fw(ctx->adp, (uint8_t *)idata->add_cdata, ++ idata->add_cdata_sz); ++ ++ if (sts != BC_STS_SUCCESS) { ++ BCMLOG_ERR("Firmware Download Failure!! - %d\n", sts); ++ } else ++ ctx->state |= BC_LINK_INIT; ++ ++ return sts; ++} ++ ++/* ++ * We use the FW_CMD interface to sync up playback state with application ++ * and firmware. This function will perform the required pre and post ++ * processing of the Firmware commands. ++ * ++ * Pause - ++ * Disable capture after decoder pause. ++ * Resume - ++ * First enable capture and issue decoder resume command. ++ * Flush - ++ * Abort pending input transfers and issue decoder flush command. ++ * ++ */ ++static BC_STATUS bc_cproc_do_fw_cmd(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata) ++{ ++ BC_STATUS sts; ++ uint32_t *cmd; ++ ++ if (!(ctx->state & BC_LINK_INIT)) { ++ BCMLOG_ERR("Link invalid state %d \n", ctx->state); ++ return BC_STS_ERR_USAGE; ++ } ++ ++ cmd = idata->udata.u.fwCmd.cmd; ++ ++ /* Pre-Process */ ++ if (cmd[0] == eCMD_C011_DEC_CHAN_PAUSE) { ++ if (!cmd[3]) { ++ ctx->state &= ~BC_LINK_PAUSED; ++ crystalhd_hw_unpause(&ctx->hw_ctx); ++ } ++ } else if (cmd[0] == eCMD_C011_DEC_CHAN_FLUSH) { ++ BCMLOG(BCMLOG_INFO, "Flush issued\n"); ++ if (cmd[3]) ++ ctx->cin_wait_exit = 1; ++ } ++ ++ sts = crystalhd_do_fw_cmd(&ctx->hw_ctx, &idata->udata.u.fwCmd); ++ ++ if (sts != BC_STS_SUCCESS) { ++ BCMLOG(BCMLOG_INFO, "fw cmd %x failed\n", cmd[0]); ++ return sts; ++ } ++ ++ /* Post-Process */ ++ if (cmd[0] == eCMD_C011_DEC_CHAN_PAUSE) { ++ if (cmd[3]) { ++ ctx->state |= BC_LINK_PAUSED; ++ crystalhd_hw_pause(&ctx->hw_ctx); ++ } ++ } ++ ++ return sts; ++} ++ ++static void bc_proc_in_completion(crystalhd_dio_req *dio_hnd, ++ wait_queue_head_t *event, BC_STATUS sts) ++{ ++ if (!dio_hnd || !event) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return; ++ } ++ if (sts == BC_STS_IO_USER_ABORT) ++ return; ++ ++ dio_hnd->uinfo.comp_sts = sts; ++ dio_hnd->uinfo.ev_sts = 1; ++ crystalhd_set_event(event); ++} ++ ++static BC_STATUS bc_cproc_codein_sleep(struct crystalhd_cmd *ctx) ++{ ++ wait_queue_head_t sleep_ev; ++ int rc = 0; ++ ++ if (ctx->state & BC_LINK_SUSPEND) ++ return BC_STS_IO_USER_ABORT; ++ ++ if (ctx->cin_wait_exit) { ++ ctx->cin_wait_exit = 0; ++ return BC_STS_CMD_CANCELLED; ++ } ++ crystalhd_create_event(&sleep_ev); ++ crystalhd_wait_on_event(&sleep_ev, 0, 100, rc, 0); ++ if (rc == -EINTR) ++ return BC_STS_IO_USER_ABORT; ++ ++ return BC_STS_SUCCESS; ++} ++ ++static BC_STATUS bc_cproc_hw_txdma(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata, ++ crystalhd_dio_req *dio) ++{ ++ uint32_t tx_listid = 0; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ wait_queue_head_t event; ++ int rc = 0; ++ ++ if (!ctx || !idata || !dio) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ crystalhd_create_event(&event); ++ ++ ctx->tx_list_id = 0; ++ /* msleep_interruptible(2000); */ ++ sts = crystalhd_hw_post_tx(&ctx->hw_ctx, dio, bc_proc_in_completion, ++ &event, &tx_listid, ++ idata->udata.u.ProcInput.Encrypted); ++ ++ while (sts == BC_STS_BUSY) { ++ sts = bc_cproc_codein_sleep(ctx); ++ if (sts != BC_STS_SUCCESS) ++ break; ++ sts = crystalhd_hw_post_tx(&ctx->hw_ctx, dio, ++ bc_proc_in_completion, ++ &event, &tx_listid, ++ idata->udata.u.ProcInput.Encrypted); ++ } ++ if (sts != BC_STS_SUCCESS) { ++ BCMLOG(BCMLOG_DBG, "_hw_txdma returning sts:%d\n", sts); ++ return sts; ++ } ++ if (ctx->cin_wait_exit) ++ ctx->cin_wait_exit = 0; ++ ++ ctx->tx_list_id = tx_listid; ++ ++ /* _post() succeeded.. wait for the completion. */ ++ crystalhd_wait_on_event(&event, (dio->uinfo.ev_sts), 3000, rc, 0); ++ ctx->tx_list_id = 0; ++ if (!rc) { ++ return dio->uinfo.comp_sts; ++ } else if (rc == -EBUSY) { ++ BCMLOG(BCMLOG_DBG, "_tx_post() T/O \n"); ++ sts = BC_STS_TIMEOUT; ++ } else if (rc == -EINTR) { ++ BCMLOG(BCMLOG_DBG, "Tx Wait Signal int.\n"); ++ sts = BC_STS_IO_USER_ABORT; ++ } else { ++ sts = BC_STS_IO_ERROR; ++ } ++ ++ /* We are cancelling the IO from the same context as the _post(). ++ * so no need to wait on the event again.. the return itself ++ * ensures the release of our resources. ++ */ ++ crystalhd_hw_cancel_tx(&ctx->hw_ctx, tx_listid); ++ ++ return sts; ++} ++ ++/* Helper function to check on user buffers */ ++static BC_STATUS bc_cproc_check_inbuffs(bool pin, void *ubuff, uint32_t ub_sz, ++ uint32_t uv_off, bool en_422) ++{ ++ if (!ubuff || !ub_sz) { ++ BCMLOG_ERR("%s->Invalid Arg %p %x\n", ++ ((pin) ? "TX" : "RX"), ubuff, ub_sz); ++ return BC_STS_INV_ARG; ++ } ++ ++ /* Check for alignment */ ++ if (((uintptr_t)ubuff) & 0x03) { ++ BCMLOG_ERR("%s-->Un-aligned address not implemented yet.. %p \n", ++ ((pin) ? "TX" : "RX"), ubuff); ++ return BC_STS_NOT_IMPL; ++ } ++ if (pin) ++ return BC_STS_SUCCESS; ++ ++ if (!en_422 && !uv_off) { ++ BCMLOG_ERR("Need UV offset for 420 mode.\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ if (en_422 && uv_off) { ++ BCMLOG_ERR("UV offset in 422 mode ??\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ return BC_STS_SUCCESS; ++} ++ ++static BC_STATUS bc_cproc_proc_input(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata) ++{ ++ void *ubuff; ++ uint32_t ub_sz; ++ crystalhd_dio_req *dio_hnd = NULL; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ if (!ctx || !idata) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ ubuff = idata->udata.u.ProcInput.pDmaBuff; ++ ub_sz = idata->udata.u.ProcInput.BuffSz; ++ ++ sts = bc_cproc_check_inbuffs(1, ubuff, ub_sz, 0, 0); ++ if (sts != BC_STS_SUCCESS) ++ return sts; ++ ++ sts = crystalhd_map_dio(ctx->adp, ubuff, ub_sz, 0, 0, 1, &dio_hnd); ++ if (sts != BC_STS_SUCCESS) { ++ BCMLOG_ERR("dio map - %d \n", sts); ++ return sts; ++ } ++ ++ if (!dio_hnd) ++ return BC_STS_ERROR; ++ ++ sts = bc_cproc_hw_txdma(ctx, idata, dio_hnd); ++ ++ crystalhd_unmap_dio(ctx->adp, dio_hnd); ++ ++ return sts; ++} ++ ++static BC_STATUS bc_cproc_add_cap_buff(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ void *ubuff; ++ uint32_t ub_sz, uv_off; ++ bool en_422; ++ crystalhd_dio_req *dio_hnd = NULL; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ if (!ctx || !idata) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ ubuff = idata->udata.u.RxBuffs.YuvBuff; ++ ub_sz = idata->udata.u.RxBuffs.YuvBuffSz; ++ uv_off = idata->udata.u.RxBuffs.UVbuffOffset; ++ en_422 = idata->udata.u.RxBuffs.b422Mode; ++ ++ sts = bc_cproc_check_inbuffs(0, ubuff, ub_sz, uv_off, en_422); ++ if (sts != BC_STS_SUCCESS) ++ return sts; ++ ++ sts = crystalhd_map_dio(ctx->adp, ubuff, ub_sz, uv_off, ++ en_422, 0, &dio_hnd); ++ if (sts != BC_STS_SUCCESS) { ++ BCMLOG_ERR("dio map - %d \n", sts); ++ return sts; ++ } ++ ++ if (!dio_hnd) ++ return BC_STS_ERROR; ++ ++ sts = crystalhd_hw_add_cap_buffer(&ctx->hw_ctx, dio_hnd, (ctx->state == BC_LINK_READY)); ++ if ((sts != BC_STS_SUCCESS) && (sts != BC_STS_BUSY)) { ++ crystalhd_unmap_dio(ctx->adp, dio_hnd); ++ return sts; ++ } ++ ++ return BC_STS_SUCCESS; ++} ++ ++static BC_STATUS bc_cproc_fmt_change(struct crystalhd_cmd *ctx, ++ crystalhd_dio_req *dio) ++{ ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ sts = crystalhd_hw_add_cap_buffer(&ctx->hw_ctx, dio, 0); ++ if (sts != BC_STS_SUCCESS) ++ return sts; ++ ++ ctx->state |= BC_LINK_FMT_CHG; ++ if (ctx->state == BC_LINK_READY) ++ sts = crystalhd_hw_start_capture(&ctx->hw_ctx); ++ ++ return sts; ++} ++ ++static BC_STATUS bc_cproc_fetch_frame(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ crystalhd_dio_req *dio = NULL; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ BC_DEC_OUT_BUFF *frame; ++ ++ if (!ctx || !idata) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ if (!(ctx->state & BC_LINK_CAP_EN)) { ++ BCMLOG(BCMLOG_DBG, "Capture not enabled..%x\n", ctx->state); ++ return BC_STS_ERR_USAGE; ++ } ++ ++ frame = &idata->udata.u.DecOutData; ++ ++ sts = crystalhd_hw_get_cap_buffer(&ctx->hw_ctx, &frame->PibInfo, &dio); ++ if (sts != BC_STS_SUCCESS) ++ return (ctx->state & BC_LINK_SUSPEND) ? BC_STS_IO_USER_ABORT : sts; ++ ++ frame->Flags = dio->uinfo.comp_flags; ++ ++ if (frame->Flags & COMP_FLAG_FMT_CHANGE) ++ return bc_cproc_fmt_change(ctx, dio); ++ ++ frame->OutPutBuffs.YuvBuff = dio->uinfo.xfr_buff; ++ frame->OutPutBuffs.YuvBuffSz = dio->uinfo.xfr_len; ++ frame->OutPutBuffs.UVbuffOffset = dio->uinfo.uv_offset; ++ frame->OutPutBuffs.b422Mode = dio->uinfo.b422mode; ++ ++ frame->OutPutBuffs.YBuffDoneSz = dio->uinfo.y_done_sz; ++ frame->OutPutBuffs.UVBuffDoneSz = dio->uinfo.uv_done_sz; ++ ++ crystalhd_unmap_dio(ctx->adp, dio); ++ ++ return BC_STS_SUCCESS; ++} ++ ++static BC_STATUS bc_cproc_start_capture(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ ctx->state |= BC_LINK_CAP_EN; ++ if (ctx->state == BC_LINK_READY) ++ return crystalhd_hw_start_capture(&ctx->hw_ctx); ++ ++ return BC_STS_SUCCESS; ++} ++ ++static BC_STATUS bc_cproc_flush_cap_buffs(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ crystalhd_dio_req *dio = NULL; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ BC_DEC_OUT_BUFF *frame; ++ uint32_t count; ++ ++ if (!ctx || !idata) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ if (!(ctx->state & BC_LINK_CAP_EN)) ++ return BC_STS_ERR_USAGE; ++ ++ /* We should ack flush even when we are in paused/suspend state */ ++ if (!(ctx->state & BC_LINK_READY)) ++ return crystalhd_hw_stop_capture(&ctx->hw_ctx); ++ ++ ctx->state &= ~(BC_LINK_CAP_EN|BC_LINK_FMT_CHG); ++ ++ frame = &idata->udata.u.DecOutData; ++ for (count = 0; count < BC_RX_LIST_CNT; count++) { ++ ++ sts = crystalhd_hw_get_cap_buffer(&ctx->hw_ctx, &frame->PibInfo, &dio); ++ if (sts != BC_STS_SUCCESS) ++ break; ++ ++ crystalhd_unmap_dio(ctx->adp, dio); ++ } ++ ++ return crystalhd_hw_stop_capture(&ctx->hw_ctx); ++} ++ ++static BC_STATUS bc_cproc_get_stats(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ BC_DTS_STATS *stats; ++ struct crystalhd_hw_stats hw_stats; ++ ++ if (!ctx || !idata) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ crystalhd_hw_stats(&ctx->hw_ctx, &hw_stats); ++ ++ stats = &idata->udata.u.drvStat; ++ stats->drvRLL = hw_stats.rdyq_count; ++ stats->drvFLL = hw_stats.freeq_count; ++ stats->DrvTotalFrmDropped = hw_stats.rx_errors; ++ stats->DrvTotalHWErrs = hw_stats.rx_errors + hw_stats.tx_errors; ++ stats->intCount = hw_stats.num_interrupts; ++ stats->DrvIgnIntrCnt = hw_stats.num_interrupts - ++ hw_stats.dev_interrupts; ++ stats->TxFifoBsyCnt = hw_stats.cin_busy; ++ stats->pauseCount = hw_stats.pause_cnt; ++ ++ if (ctx->pwr_state_change) ++ stats->pwr_state_change = 1; ++ if (ctx->state & BC_LINK_PAUSED) ++ stats->DrvPauseTime = 1; ++ ++ return BC_STS_SUCCESS; ++} ++ ++static BC_STATUS bc_cproc_reset_stats(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ crystalhd_hw_stats(&ctx->hw_ctx, NULL); ++ ++ return BC_STS_SUCCESS; ++} ++ ++static BC_STATUS bc_cproc_chg_clk(struct crystalhd_cmd *ctx, ++ crystalhd_ioctl_data *idata) ++{ ++ BC_CLOCK *clock; ++ uint32_t oldClk; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ if (!ctx || !idata) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ clock = &idata->udata.u.clockValue; ++ oldClk = ctx->hw_ctx.core_clock_mhz; ++ ctx->hw_ctx.core_clock_mhz = clock->clk; ++ ++ if (ctx->state & BC_LINK_READY) { ++ sts = crystalhd_hw_set_core_clock(&ctx->hw_ctx); ++ if (sts == BC_STS_CLK_NOCHG) ++ ctx->hw_ctx.core_clock_mhz = oldClk; ++ } ++ ++ clock->clk = ctx->hw_ctx.core_clock_mhz; ++ ++ return sts; ++} ++ ++/*=============== Cmd Proc Table.. ======================================*/ ++static const crystalhd_cmd_tbl_t g_crystalhd_cproc_tbl[] = { ++ { BCM_IOC_GET_VERSION, bc_cproc_get_version, 0}, ++ { BCM_IOC_GET_HWTYPE, bc_cproc_get_hwtype, 0}, ++ { BCM_IOC_REG_RD, bc_cproc_reg_rd, 0}, ++ { BCM_IOC_REG_WR, bc_cproc_reg_wr, 0}, ++ { BCM_IOC_FPGA_RD, bc_cproc_link_reg_rd, 0}, ++ { BCM_IOC_FPGA_WR, bc_cproc_link_reg_wr, 0}, ++ { BCM_IOC_MEM_RD, bc_cproc_mem_rd, 0}, ++ { BCM_IOC_MEM_WR, bc_cproc_mem_wr, 0}, ++ { BCM_IOC_RD_PCI_CFG, bc_cproc_cfg_rd, 0}, ++ { BCM_IOC_WR_PCI_CFG, bc_cproc_cfg_wr, 1}, ++ { BCM_IOC_FW_DOWNLOAD, bc_cproc_download_fw, 1}, ++ { BCM_IOC_FW_CMD, bc_cproc_do_fw_cmd, 1}, ++ { BCM_IOC_PROC_INPUT, bc_cproc_proc_input, 1}, ++ { BCM_IOC_ADD_RXBUFFS, bc_cproc_add_cap_buff, 1}, ++ { BCM_IOC_FETCH_RXBUFF, bc_cproc_fetch_frame, 1}, ++ { BCM_IOC_START_RX_CAP, bc_cproc_start_capture, 1}, ++ { BCM_IOC_FLUSH_RX_CAP, bc_cproc_flush_cap_buffs, 1}, ++ { BCM_IOC_GET_DRV_STAT, bc_cproc_get_stats, 0}, ++ { BCM_IOC_RST_DRV_STAT, bc_cproc_reset_stats, 0}, ++ { BCM_IOC_NOTIFY_MODE, bc_cproc_notify_mode, 0}, ++ { BCM_IOC_CHG_CLK, bc_cproc_chg_clk, 0}, ++ { BCM_IOC_END, NULL}, ++}; ++ ++/*=============== Cmd Proc Functions.. ===================================*/ ++ ++/** ++ * crystalhd_suspend - Power management suspend request. ++ * @ctx: Command layer context. ++ * @idata: Iodata - required for internal use. ++ * ++ * Return: ++ * status ++ * ++ * 1. Set the state to Suspend. ++ * 2. Flush the Rx Buffers it will unmap all the buffers and ++ * stop the RxDMA engine. ++ * 3. Cancel The TX Io and Stop Dma Engine. ++ * 4. Put the DDR in to deep sleep. ++ * 5. Stop the hardware putting it in to Reset State. ++ * ++ * Current gstreamer frame work does not provide any power management ++ * related notification to user mode decoder plug-in. As a work-around ++ * we pass on the power mangement notification to our plug-in by completing ++ * all outstanding requests with BC_STS_IO_USER_ABORT return code. ++ */ ++BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata) ++{ ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ if (!ctx || !idata) { ++ BCMLOG_ERR("Invalid Parameters\n"); ++ return BC_STS_ERROR; ++ } ++ ++ if (ctx->state & BC_LINK_SUSPEND) ++ return BC_STS_SUCCESS; ++ ++ if (ctx->state == BC_LINK_INVALID) { ++ BCMLOG(BCMLOG_DBG, "Nothing To Do Suspend Success\n"); ++ return BC_STS_SUCCESS; ++ } ++ ++ ctx->state |= BC_LINK_SUSPEND; ++ ++ bc_cproc_mark_pwr_state(ctx); ++ ++ if (ctx->state & BC_LINK_CAP_EN) { ++ sts = bc_cproc_flush_cap_buffs(ctx, idata); ++ if (sts != BC_STS_SUCCESS) ++ return sts; ++ } ++ ++ if (ctx->tx_list_id) { ++ sts = crystalhd_hw_cancel_tx(&ctx->hw_ctx, ctx->tx_list_id); ++ if (sts != BC_STS_SUCCESS) ++ return sts; ++ } ++ ++ sts = crystalhd_hw_suspend(&ctx->hw_ctx); ++ if (sts != BC_STS_SUCCESS) ++ return sts; ++ ++ BCMLOG(BCMLOG_DBG, "BCM70012 suspend success\n"); ++ ++ return BC_STS_SUCCESS; ++} ++ ++/** ++ * crystalhd_resume - Resume frame capture. ++ * @ctx: Command layer contextx. ++ * ++ * Return: ++ * status ++ * ++ * ++ * Resume frame capture. ++ * ++ * PM_Resume can't resume the playback state back to pre-suspend state ++ * because we don't keep video clip related information within driver. ++ * To get back to the pre-suspend state App will re-open the device and ++ * start a new playback session from the pre-suspend clip position. ++ * ++ */ ++BC_STATUS crystalhd_resume(struct crystalhd_cmd *ctx) ++{ ++ BCMLOG(BCMLOG_DBG, "crystalhd_resume Success %x\n", ctx->state); ++ ++ bc_cproc_mark_pwr_state(ctx); ++ ++ return BC_STS_SUCCESS; ++} ++ ++/** ++ * crystalhd_user_open - Create application handle. ++ * @ctx: Command layer contextx. ++ * @user_ctx: User ID context. ++ * ++ * Return: ++ * status ++ * ++ * Creates an application specific UID and allocates ++ * application specific resources. HW layer initialization ++ * is done for the first open request. ++ */ ++BC_STATUS crystalhd_user_open(struct crystalhd_cmd *ctx, ++ struct crystalhd_user **user_ctx) ++{ ++ struct crystalhd_user *uc; ++ ++ if (!ctx || !user_ctx) { ++ BCMLOG_ERR("Invalid arg..\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ uc = bc_cproc_get_uid(ctx); ++ if (!uc) { ++ BCMLOG(BCMLOG_INFO, "No free user context...\n"); ++ return BC_STS_BUSY; ++ } ++ ++ BCMLOG(BCMLOG_INFO, "Opening new user[%x] handle\n", uc->uid); ++ ++ crystalhd_hw_open(&ctx->hw_ctx, ctx->adp); ++ ++ uc->in_use = 1; ++ ++ *user_ctx = uc; ++ ++ return BC_STS_SUCCESS; ++} ++ ++/** ++ * crystalhd_user_close - Close application handle. ++ * @ctx: Command layer contextx. ++ * @uc: User ID context. ++ * ++ * Return: ++ * status ++ * ++ * Closer aplication handle and release app specific ++ * resources. ++ */ ++BC_STATUS crystalhd_user_close(struct crystalhd_cmd *ctx, struct crystalhd_user *uc) ++{ ++ uint32_t mode = uc->mode; ++ ++ ctx->user[uc->uid].mode = DTS_MODE_INV; ++ ctx->user[uc->uid].in_use = 0; ++ ctx->cin_wait_exit = 1; ++ ctx->pwr_state_change = 0; ++ ++ BCMLOG(BCMLOG_INFO, "Closing user[%x] handle\n", uc->uid); ++ ++ if ((mode == DTS_DIAG_MODE) || (mode == DTS_PLAYBACK_MODE)) { ++ crystalhd_hw_free_dma_rings(&ctx->hw_ctx); ++ crystalhd_destroy_dio_pool(ctx->adp); ++ } else if (bc_cproc_get_user_count(ctx)) { ++ return BC_STS_SUCCESS; ++ } ++ ++ crystalhd_hw_close(&ctx->hw_ctx); ++ ++ ctx->state = BC_LINK_INVALID; ++ ++ return BC_STS_SUCCESS; ++} ++ ++/** ++ * crystalhd_setup_cmd_context - Setup Command layer resources. ++ * @ctx: Command layer contextx. ++ * @adp: Adapter context ++ * ++ * Return: ++ * status ++ * ++ * Called at the time of driver load. ++ */ ++BC_STATUS crystalhd_setup_cmd_context(struct crystalhd_cmd *ctx, ++ struct crystalhd_adp *adp) ++{ ++ int i = 0; ++ ++ if (!ctx || !adp) { ++ BCMLOG_ERR("Invalid arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ if (ctx->adp) ++ BCMLOG(BCMLOG_DBG, "Resetting Cmd context delete missing..\n"); ++ ++ ctx->adp = adp; ++ for (i = 0; i < BC_LINK_MAX_OPENS; i++) { ++ ctx->user[i].uid = i; ++ ctx->user[i].in_use = 0; ++ ctx->user[i].mode = DTS_MODE_INV; ++ } ++ ++ /*Open and Close the Hardware to put it in to sleep state*/ ++ crystalhd_hw_open(&ctx->hw_ctx, ctx->adp); ++ crystalhd_hw_close(&ctx->hw_ctx); ++ return BC_STS_SUCCESS; ++} ++ ++/** ++ * crystalhd_delete_cmd_context - Release Command layer resources. ++ * @ctx: Command layer contextx. ++ * ++ * Return: ++ * status ++ * ++ * Called at the time of driver un-load. ++ */ ++BC_STATUS crystalhd_delete_cmd_context(struct crystalhd_cmd *ctx) ++{ ++ BCMLOG(BCMLOG_DBG, "Deleting Command context..\n"); ++ ++ ctx->adp = NULL; ++ ++ return BC_STS_SUCCESS; ++} ++ ++/** ++ * crystalhd_get_cmd_proc - Cproc table lookup. ++ * @ctx: Command layer contextx. ++ * @cmd: IOCTL command code. ++ * @uc: User ID context. ++ * ++ * Return: ++ * command proc function pointer ++ * ++ * This function checks the process context, application's ++ * mode of operation and returns the function pointer ++ * from the cproc table. ++ */ ++crystalhd_cmd_proc crystalhd_get_cmd_proc(struct crystalhd_cmd *ctx, uint32_t cmd, ++ struct crystalhd_user *uc) ++{ ++ crystalhd_cmd_proc cproc = NULL; ++ unsigned int i, tbl_sz; ++ ++ if (!ctx) { ++ BCMLOG_ERR("Invalid arg.. Cmd[%d]\n", cmd); ++ return NULL; ++ } ++ ++ if ((cmd != BCM_IOC_GET_DRV_STAT) && (ctx->state & BC_LINK_SUSPEND)) { ++ BCMLOG_ERR("Invalid State [suspend Set].. Cmd[%d]\n", cmd); ++ return NULL; ++ } ++ ++ tbl_sz = sizeof(g_crystalhd_cproc_tbl) / sizeof(crystalhd_cmd_tbl_t); ++ for (i = 0; i < tbl_sz; i++) { ++ if (g_crystalhd_cproc_tbl[i].cmd_id == cmd) { ++ if ((uc->mode == DTS_MONITOR_MODE) && ++ (g_crystalhd_cproc_tbl[i].block_mon)) { ++ BCMLOG(BCMLOG_INFO, "Blocking cmd %d \n", cmd); ++ break; ++ } ++ cproc = g_crystalhd_cproc_tbl[i].cmd_proc; ++ break; ++ } ++ } ++ ++ return cproc; ++} ++ ++/** ++ * crystalhd_cmd_interrupt - ISR entry point ++ * @ctx: Command layer contextx. ++ * ++ * Return: ++ * TRUE: If interrupt from bcm70012 device. ++ * ++ * ++ * ISR entry point from OS layer. ++ */ ++bool crystalhd_cmd_interrupt(struct crystalhd_cmd *ctx) ++{ ++ if (!ctx) { ++ BCMLOG_ERR("Invalid arg..\n"); ++ return 0; ++ } ++ ++ return crystalhd_hw_interrupt(ctx->adp, &ctx->hw_ctx); ++} +diff --git a/drivers/staging/crystalhd/crystalhd_cmds.h b/drivers/staging/crystalhd/crystalhd_cmds.h +new file mode 100644 +index 0000000..6b290ae +--- /dev/null ++++ b/drivers/staging/crystalhd/crystalhd_cmds.h +@@ -0,0 +1,88 @@ ++/*************************************************************************** ++ * Copyright (c) 2005-2009, Broadcom Corporation. ++ * ++ * Name: crystalhd_cmds . h ++ * ++ * Description: ++ * BCM70010 Linux driver user command interfaces. ++ * ++ * HISTORY: ++ * ++ ********************************************************************** ++ * This file is part of the crystalhd device driver. ++ * ++ * This driver is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, version 2 of the License. ++ * ++ * This driver is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this driver. If not, see . ++ **********************************************************************/ ++ ++#ifndef _CRYSTALHD_CMDS_H_ ++#define _CRYSTALHD_CMDS_H_ ++ ++/* ++ * NOTE:: This is the main interface file between the Linux layer ++ * and the harware layer. This file will use the definitions ++ * from _dts_glob and dts_defs etc.. which are defined for ++ * windows. ++ */ ++#include "crystalhd_misc.h" ++#include "crystalhd_hw.h" ++ ++enum _crystalhd_state{ ++ BC_LINK_INVALID = 0x00, ++ BC_LINK_INIT = 0x01, ++ BC_LINK_CAP_EN = 0x02, ++ BC_LINK_FMT_CHG = 0x04, ++ BC_LINK_SUSPEND = 0x10, ++ BC_LINK_PAUSED = 0x20, ++ BC_LINK_READY = (BC_LINK_INIT | BC_LINK_CAP_EN | BC_LINK_FMT_CHG), ++}; ++ ++struct crystalhd_user { ++ uint32_t uid; ++ uint32_t in_use; ++ uint32_t mode; ++}; ++ ++#define DTS_MODE_INV (-1) ++ ++struct crystalhd_cmd { ++ uint32_t state; ++ struct crystalhd_adp *adp; ++ struct crystalhd_user user[BC_LINK_MAX_OPENS]; ++ ++ spinlock_t ctx_lock; ++ uint32_t tx_list_id; ++ uint32_t cin_wait_exit; ++ uint32_t pwr_state_change; ++ struct crystalhd_hw hw_ctx; ++}; ++ ++typedef BC_STATUS (*crystalhd_cmd_proc)(struct crystalhd_cmd *, crystalhd_ioctl_data *); ++ ++typedef struct _crystalhd_cmd_tbl { ++ uint32_t cmd_id; ++ const crystalhd_cmd_proc cmd_proc; ++ uint32_t block_mon; ++} crystalhd_cmd_tbl_t; ++ ++ ++BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata); ++BC_STATUS crystalhd_resume(struct crystalhd_cmd *ctx); ++crystalhd_cmd_proc crystalhd_get_cmd_proc(struct crystalhd_cmd *ctx, uint32_t cmd, ++ struct crystalhd_user *uc); ++BC_STATUS crystalhd_user_open(struct crystalhd_cmd *ctx, struct crystalhd_user **user_ctx); ++BC_STATUS crystalhd_user_close(struct crystalhd_cmd *ctx, struct crystalhd_user *uc); ++BC_STATUS crystalhd_setup_cmd_context(struct crystalhd_cmd *ctx, struct crystalhd_adp *adp); ++BC_STATUS crystalhd_delete_cmd_context(struct crystalhd_cmd *ctx); ++bool crystalhd_cmd_interrupt(struct crystalhd_cmd *ctx); ++ ++#endif +diff --git a/drivers/staging/crystalhd/crystalhd_fw_if.h b/drivers/staging/crystalhd/crystalhd_fw_if.h +new file mode 100644 +index 0000000..261cd19 +--- /dev/null ++++ b/drivers/staging/crystalhd/crystalhd_fw_if.h +@@ -0,0 +1,369 @@ ++/*************************************************************************** ++ * Copyright (c) 2005-2009, Broadcom Corporation. ++ * ++ * Name: crystalhd_fw_if . h ++ * ++ * Description: ++ * BCM70012 Firmware interface definitions. ++ * ++ * HISTORY: ++ * ++ ********************************************************************** ++ * This file is part of the crystalhd device driver. ++ * ++ * This driver is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, version 2 of the License. ++ * ++ * This driver is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this driver. If not, see . ++ **********************************************************************/ ++ ++#ifndef _CRYSTALHD_FW_IF_H_ ++#define _CRYSTALHD_FW_IF_H_ ++ ++/* TBD: Pull in only required defs into this file.. */ ++ ++ ++ ++/* User Data Header */ ++typedef struct user_data { ++ struct user_data *next; ++ uint32_t type; ++ uint32_t size; ++} UD_HDR; ++ ++ ++ ++/*------------------------------------------------------* ++ * MPEG Extension to the PPB * ++ *------------------------------------------------------*/ ++typedef struct { ++ uint32_t to_be_defined; ++ uint32_t valid; ++ ++ /* Always valid, defaults to picture size if no ++ sequence display extension in the stream. */ ++ uint32_t display_horizontal_size; ++ uint32_t display_vertical_size; ++ ++ /* MPEG_VALID_PANSCAN ++ Offsets are a copy values from the MPEG stream. */ ++ uint32_t offset_count; ++ int32_t horizontal_offset[3]; ++ int32_t vertical_offset[3]; ++ ++ /* MPEG_VALID_USERDATA ++ User data is in the form of a linked list. */ ++ int32_t userDataSize; ++ UD_HDR *userData; ++ ++} PPB_MPEG; ++ ++ ++/*------------------------------------------------------* ++ * VC1 Extension to the PPB * ++ *------------------------------------------------------*/ ++typedef struct { ++ uint32_t to_be_defined; ++ uint32_t valid; ++ ++ /* Always valid, defaults to picture size if no ++ sequence display extension in the stream. */ ++ uint32_t display_horizontal_size; ++ uint32_t display_vertical_size; ++ ++ /* VC1 pan scan windows */ ++ uint32_t num_panscan_windows; ++ int32_t ps_horiz_offset[4]; ++ int32_t ps_vert_offset[4]; ++ int32_t ps_width[4]; ++ int32_t ps_height[4]; ++ ++ /* VC1_VALID_USERDATA ++ User data is in the form of a linked list. */ ++ int32_t userDataSize; ++ UD_HDR *userData; ++ ++} PPB_VC1; ++ ++/*------------------------------------------------------* ++ * H.264 Extension to the PPB * ++ *------------------------------------------------------*/ ++ ++/** ++ * @brief Film grain SEI message. ++ * ++ * Content of the film grain SEI message. ++ */ ++ ++/* maximum number of model-values as for Thomson spec(standard says 5) */ ++#define MAX_FGT_MODEL_VALUE (3) ++ ++/* maximum number of intervals(as many as 256 intervals?) */ ++#define MAX_FGT_VALUE_INTERVAL (256) ++ ++typedef struct FGT_SEI { ++ struct FGT_SEI *next; ++ unsigned char model_values[3][MAX_FGT_VALUE_INTERVAL][MAX_FGT_MODEL_VALUE]; ++ unsigned char upper_bound[3][MAX_FGT_VALUE_INTERVAL]; ++ unsigned char lower_bound[3][MAX_FGT_VALUE_INTERVAL]; ++ ++ unsigned char cancel_flag; /* Cancel flag: 1 no film grain. */ ++ unsigned char model_id; /* Model id. */ ++ ++ /* +unused SE based on Thomson spec */ ++ unsigned char color_desc_flag; /* Separate color descrition flag. */ ++ unsigned char bit_depth_luma; /* Bit depth luma minus 8. */ ++ unsigned char bit_depth_chroma; /* Bit depth chroma minus 8. */ ++ unsigned char full_range_flag; /* Full range flag. */ ++ unsigned char color_primaries; /* Color primaries. */ ++ unsigned char transfer_charact; /* Transfer characteristics. */ ++ unsigned char matrix_coeff; /*< Matrix coefficients. */ ++ /* -unused SE based on Thomson spec */ ++ ++ unsigned char blending_mode_id; /* Blending mode. */ ++ unsigned char log2_scale_factor; /* Log2 scale factor (2-7). */ ++ unsigned char comp_flag[3]; /* Components [0,2] parameters present flag. */ ++ unsigned char num_intervals_minus1[3]; /* Number of intensity level intervals. */ ++ unsigned char num_model_values[3]; /* Number of model values. */ ++ uint16_t repetition_period; /* Repetition period (0-16384) */ ++ ++} FGT_SEI; ++ ++typedef struct { ++ /* 'valid' specifies which fields (or sets of ++ * fields) below are valid. If the corresponding ++ * bit in 'valid' is NOT set then that field(s) ++ * is (are) not initialized. */ ++ uint32_t valid; ++ ++ int32_t poc_top; /* POC for Top Field/Frame */ ++ int32_t poc_bottom; /* POC for Bottom Field */ ++ uint32_t idr_pic_id; ++ ++ /* H264_VALID_PANSCAN */ ++ uint32_t pan_scan_count; ++ int32_t pan_scan_left[3]; ++ int32_t pan_scan_right[3]; ++ int32_t pan_scan_top[3]; ++ int32_t pan_scan_bottom[3]; ++ ++ /* H264_VALID_CT_TYPE */ ++ uint32_t ct_type_count; ++ uint32_t ct_type[3]; ++ ++ /* H264_VALID_SPS_CROP */ ++ int32_t sps_crop_left; ++ int32_t sps_crop_right; ++ int32_t sps_crop_top; ++ int32_t sps_crop_bottom; ++ ++ /* H264_VALID_VUI */ ++ uint32_t chroma_top; ++ uint32_t chroma_bottom; ++ ++ /* H264_VALID_USER */ ++ uint32_t user_data_size; ++ UD_HDR *user_data; ++ ++ /* H264 VALID FGT */ ++ FGT_SEI *pfgt; ++ ++} PPB_H264; ++ ++typedef struct { ++ /* Common fields. */ ++ uint32_t picture_number; /* Ordinal display number */ ++ uint32_t video_buffer; /* Video (picbuf) number */ ++ uint32_t video_address; /* Address of picbuf Y */ ++ uint32_t video_address_uv; /* Address of picbuf UV */ ++ uint32_t video_stripe; /* Picbuf stripe */ ++ uint32_t video_width; /* Picbuf width */ ++ uint32_t video_height; /* Picbuf height */ ++ ++ uint32_t channel_id; /* Decoder channel ID */ ++ uint32_t status; /* reserved */ ++ uint32_t width; /* pixels */ ++ uint32_t height; /* pixels */ ++ uint32_t chroma_format; /* see above */ ++ uint32_t pulldown; /* see above */ ++ uint32_t flags; /* see above */ ++ uint32_t pts; /* 32 LSBs of PTS */ ++ uint32_t protocol; /* protocolXXX (above) */ ++ ++ uint32_t frame_rate; /* see above */ ++ uint32_t matrix_coeff; /* see above */ ++ uint32_t aspect_ratio; /* see above */ ++ uint32_t colour_primaries; /* see above */ ++ uint32_t transfer_char; /* see above */ ++ uint32_t pcr_offset; /* 45kHz if PCR type; else 27MHz */ ++ uint32_t n_drop; /* Number of pictures to be dropped */ ++ ++ uint32_t custom_aspect_ratio_width_height; ++ /* upper 16-bits is Y and lower 16-bits is X */ ++ ++ uint32_t picture_tag; /* Indexing tag from BUD packets */ ++ uint32_t picture_done_payload; ++ uint32_t picture_meta_payload; ++ uint32_t reserved[1]; ++ ++ /* Protocol-specific extensions. */ ++ union { ++ PPB_H264 h264; ++ PPB_MPEG mpeg; ++ PPB_VC1 vc1; ++ } other; ++ ++} PPB; ++ ++typedef struct { ++ uint32_t bFormatChange; ++ uint32_t resolution; ++ uint32_t channelId; ++ uint32_t ppbPtr; ++ int32_t ptsStcOffset; ++ uint32_t zeroPanscanValid; ++ uint32_t dramOutBufAddr; ++ uint32_t yComponent; ++ PPB ppb; ++ ++} C011_PIB; ++ ++ ++ ++typedef struct { ++ uint32_t command; ++ uint32_t sequence; ++ uint32_t status; ++ uint32_t picBuf; ++ uint32_t picRelBuf; ++ uint32_t picInfoDeliveryQ; ++ uint32_t picInfoReleaseQ; ++ uint32_t channelStatus; ++ uint32_t userDataDeliveryQ; ++ uint32_t userDataReleaseQ; ++ uint32_t transportStreamCaptureAddr; ++ uint32_t asyncEventQ; ++ ++} DecRspChannelStartVideo; ++ ++#define eCMD_C011_CMD_BASE (0x73763000) ++ ++/* host commands */ ++typedef enum { ++ eCMD_TS_GET_NEXT_PIC = 0x7376F100, /* debug get next picture */ ++ eCMD_TS_GET_LAST_PIC = 0x7376F102, /* debug get last pic status */ ++ eCMD_TS_READ_WRITE_MEM = 0x7376F104, /* debug read write memory */ ++ ++ /* New API commands */ ++ /* General commands */ ++ eCMD_C011_INIT = eCMD_C011_CMD_BASE + 0x01, ++ eCMD_C011_RESET = eCMD_C011_CMD_BASE + 0x02, ++ eCMD_C011_SELF_TEST = eCMD_C011_CMD_BASE + 0x03, ++ eCMD_C011_GET_VERSION = eCMD_C011_CMD_BASE + 0x04, ++ eCMD_C011_GPIO = eCMD_C011_CMD_BASE + 0x05, ++ eCMD_C011_DEBUG_SETUP = eCMD_C011_CMD_BASE + 0x06, ++ ++ /* Decoding commands */ ++ eCMD_C011_DEC_CHAN_OPEN = eCMD_C011_CMD_BASE + 0x100, ++ eCMD_C011_DEC_CHAN_CLOSE = eCMD_C011_CMD_BASE + 0x101, ++ eCMD_C011_DEC_CHAN_ACTIVATE = eCMD_C011_CMD_BASE + 0x102, ++ eCMD_C011_DEC_CHAN_STATUS = eCMD_C011_CMD_BASE + 0x103, ++ eCMD_C011_DEC_CHAN_FLUSH = eCMD_C011_CMD_BASE + 0x104, ++ eCMD_C011_DEC_CHAN_TRICK_PLAY = eCMD_C011_CMD_BASE + 0x105, ++ eCMD_C011_DEC_CHAN_TS_PIDS = eCMD_C011_CMD_BASE + 0x106, ++ eCMD_C011_DEC_CHAN_PS_STREAM_ID = eCMD_C011_CMD_BASE + 0x107, ++ eCMD_C011_DEC_CHAN_INPUT_PARAMS = eCMD_C011_CMD_BASE + 0x108, ++ eCMD_C011_DEC_CHAN_VIDEO_OUTPUT = eCMD_C011_CMD_BASE + 0x109, ++ eCMD_C011_DEC_CHAN_OUTPUT_FORMAT = eCMD_C011_CMD_BASE + 0x10A, ++ eCMD_C011_DEC_CHAN_SCALING_FILTERS = eCMD_C011_CMD_BASE + 0x10B, ++ eCMD_C011_DEC_CHAN_OSD_MODE = eCMD_C011_CMD_BASE + 0x10D, ++ eCMD_C011_DEC_CHAN_DROP = eCMD_C011_CMD_BASE + 0x10E, ++ eCMD_C011_DEC_CHAN_RELEASE = eCMD_C011_CMD_BASE + 0x10F, ++ eCMD_C011_DEC_CHAN_STREAM_SETTINGS = eCMD_C011_CMD_BASE + 0x110, ++ eCMD_C011_DEC_CHAN_PAUSE_OUTPUT = eCMD_C011_CMD_BASE + 0x111, ++ eCMD_C011_DEC_CHAN_CHANGE = eCMD_C011_CMD_BASE + 0x112, ++ eCMD_C011_DEC_CHAN_SET_STC = eCMD_C011_CMD_BASE + 0x113, ++ eCMD_C011_DEC_CHAN_SET_PTS = eCMD_C011_CMD_BASE + 0x114, ++ eCMD_C011_DEC_CHAN_CC_MODE = eCMD_C011_CMD_BASE + 0x115, ++ eCMD_C011_DEC_CREATE_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x116, ++ eCMD_C011_DEC_COPY_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x117, ++ eCMD_C011_DEC_DELETE_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x118, ++ eCMD_C011_DEC_CHAN_SET_DECYPTION = eCMD_C011_CMD_BASE + 0x119, ++ eCMD_C011_DEC_CHAN_START_VIDEO = eCMD_C011_CMD_BASE + 0x11A, ++ eCMD_C011_DEC_CHAN_STOP_VIDEO = eCMD_C011_CMD_BASE + 0x11B, ++ eCMD_C011_DEC_CHAN_PIC_CAPTURE = eCMD_C011_CMD_BASE + 0x11C, ++ eCMD_C011_DEC_CHAN_PAUSE = eCMD_C011_CMD_BASE + 0x11D, ++ eCMD_C011_DEC_CHAN_PAUSE_STATE = eCMD_C011_CMD_BASE + 0x11E, ++ eCMD_C011_DEC_CHAN_SET_SLOWM_RATE = eCMD_C011_CMD_BASE + 0x11F, ++ eCMD_C011_DEC_CHAN_GET_SLOWM_RATE = eCMD_C011_CMD_BASE + 0x120, ++ eCMD_C011_DEC_CHAN_SET_FF_RATE = eCMD_C011_CMD_BASE + 0x121, ++ eCMD_C011_DEC_CHAN_GET_FF_RATE = eCMD_C011_CMD_BASE + 0x122, ++ eCMD_C011_DEC_CHAN_FRAME_ADVANCE = eCMD_C011_CMD_BASE + 0x123, ++ eCMD_C011_DEC_CHAN_SET_SKIP_PIC_MODE = eCMD_C011_CMD_BASE + 0x124, ++ eCMD_C011_DEC_CHAN_GET_SKIP_PIC_MODE = eCMD_C011_CMD_BASE + 0x125, ++ eCMD_C011_DEC_CHAN_FILL_PIC_BUF = eCMD_C011_CMD_BASE + 0x126, ++ eCMD_C011_DEC_CHAN_SET_CONTINUITY_CHECK = eCMD_C011_CMD_BASE + 0x127, ++ eCMD_C011_DEC_CHAN_GET_CONTINUITY_CHECK = eCMD_C011_CMD_BASE + 0x128, ++ eCMD_C011_DEC_CHAN_SET_BRCM_TRICK_MODE = eCMD_C011_CMD_BASE + 0x129, ++ eCMD_C011_DEC_CHAN_GET_BRCM_TRICK_MODE = eCMD_C011_CMD_BASE + 0x12A, ++ eCMD_C011_DEC_CHAN_REVERSE_FIELD_STATUS = eCMD_C011_CMD_BASE + 0x12B, ++ eCMD_C011_DEC_CHAN_I_PICTURE_FOUND = eCMD_C011_CMD_BASE + 0x12C, ++ eCMD_C011_DEC_CHAN_SET_PARAMETER = eCMD_C011_CMD_BASE + 0x12D, ++ eCMD_C011_DEC_CHAN_SET_USER_DATA_MODE = eCMD_C011_CMD_BASE + 0x12E, ++ eCMD_C011_DEC_CHAN_SET_PAUSE_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x12F, ++ eCMD_C011_DEC_CHAN_SET_SLOW_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x130, ++ eCMD_C011_DEC_CHAN_SET_FF_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x131, ++ eCMD_C011_DEC_CHAN_SET_DISPLAY_TIMING_MODE = eCMD_C011_CMD_BASE + 0x132, ++ eCMD_C011_DEC_CHAN_SET_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x133, ++ eCMD_C011_DEC_CHAN_GET_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x134, ++ eCMD_C011_DEC_CHAN_SET_REVERSE_FIELD = eCMD_C011_CMD_BASE + 0x135, ++ eCMD_C011_DEC_CHAN_STREAM_OPEN = eCMD_C011_CMD_BASE + 0x136, ++ eCMD_C011_DEC_CHAN_SET_PCR_PID = eCMD_C011_CMD_BASE + 0x137, ++ eCMD_C011_DEC_CHAN_SET_VID_PID = eCMD_C011_CMD_BASE + 0x138, ++ eCMD_C011_DEC_CHAN_SET_PAN_SCAN_MODE = eCMD_C011_CMD_BASE + 0x139, ++ eCMD_C011_DEC_CHAN_START_DISPLAY_AT_PTS = eCMD_C011_CMD_BASE + 0x140, ++ eCMD_C011_DEC_CHAN_STOP_DISPLAY_AT_PTS = eCMD_C011_CMD_BASE + 0x141, ++ eCMD_C011_DEC_CHAN_SET_DISPLAY_ORDER = eCMD_C011_CMD_BASE + 0x142, ++ eCMD_C011_DEC_CHAN_GET_DISPLAY_ORDER = eCMD_C011_CMD_BASE + 0x143, ++ eCMD_C011_DEC_CHAN_SET_HOST_TRICK_MODE = eCMD_C011_CMD_BASE + 0x144, ++ eCMD_C011_DEC_CHAN_SET_OPERATION_MODE = eCMD_C011_CMD_BASE + 0x145, ++ eCMD_C011_DEC_CHAN_DISPLAY_PAUSE_UNTO_PTS = eCMD_C011_CMD_BASE + 0x146, ++ eCMD_C011_DEC_CHAN_SET_PTS_STC_DIFF_THRESHOLD = eCMD_C011_CMD_BASE + 0x147, ++ eCMD_C011_DEC_CHAN_SEND_COMPRESSED_BUF = eCMD_C011_CMD_BASE + 0x148, ++ eCMD_C011_DEC_CHAN_SET_CLIPPING = eCMD_C011_CMD_BASE + 0x149, ++ eCMD_C011_DEC_CHAN_SET_PARAMETERS_FOR_HARD_RESET_INTERRUPT_TO_HOST ++ = eCMD_C011_CMD_BASE + 0x150, ++ ++ /* Decoder RevD commands */ ++ eCMD_C011_DEC_CHAN_SET_CSC = eCMD_C011_CMD_BASE + 0x180, /* color space conversion */ ++ eCMD_C011_DEC_CHAN_SET_RANGE_REMAP = eCMD_C011_CMD_BASE + 0x181, ++ eCMD_C011_DEC_CHAN_SET_FGT = eCMD_C011_CMD_BASE + 0x182, ++ /* Note: 0x183 not implemented yet in Rev D main */ ++ eCMD_C011_DEC_CHAN_SET_LASTPICTURE_PADDING = eCMD_C011_CMD_BASE + 0x183, ++ ++ /* Decoder 7412 commands (7412-only) */ ++ eCMD_C011_DEC_CHAN_SET_CONTENT_KEY = eCMD_C011_CMD_BASE + 0x190, ++ eCMD_C011_DEC_CHAN_SET_SESSION_KEY = eCMD_C011_CMD_BASE + 0x191, ++ eCMD_C011_DEC_CHAN_FMT_CHANGE_ACK = eCMD_C011_CMD_BASE + 0x192, ++ ++ eCMD_C011_DEC_CHAN_CUSTOM_VIDOUT = eCMD_C011_CMD_BASE + 0x1FF, ++ ++ /* Encoding commands */ ++ eCMD_C011_ENC_CHAN_OPEN = eCMD_C011_CMD_BASE + 0x200, ++ eCMD_C011_ENC_CHAN_CLOSE = eCMD_C011_CMD_BASE + 0x201, ++ eCMD_C011_ENC_CHAN_ACTIVATE = eCMD_C011_CMD_BASE + 0x202, ++ eCMD_C011_ENC_CHAN_CONTROL = eCMD_C011_CMD_BASE + 0x203, ++ eCMD_C011_ENC_CHAN_STATISTICS = eCMD_C011_CMD_BASE + 0x204, ++ ++ eNOTIFY_C011_ENC_CHAN_EVENT = eCMD_C011_CMD_BASE + 0x210, ++ ++} eC011_TS_CMD; ++ ++#endif +diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c +new file mode 100644 +index 0000000..01819d3 +--- /dev/null ++++ b/drivers/staging/crystalhd/crystalhd_hw.c +@@ -0,0 +1,2395 @@ ++/*************************************************************************** ++ * Copyright (c) 2005-2009, Broadcom Corporation. ++ * ++ * Name: crystalhd_hw . c ++ * ++ * Description: ++ * BCM70010 Linux driver HW layer. ++ * ++ ********************************************************************** ++ * This file is part of the crystalhd device driver. ++ * ++ * This driver is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, version 2 of the License. ++ * ++ * This driver is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this driver. If not, see . ++ **********************************************************************/ ++ ++#include ++#include ++#include "crystalhd_hw.h" ++ ++/* Functions internal to this file */ ++ ++static void crystalhd_enable_uarts(struct crystalhd_adp *adp) ++{ ++ bc_dec_reg_wr(adp, UartSelectA, BSVS_UART_STREAM); ++ bc_dec_reg_wr(adp, UartSelectB, BSVS_UART_DEC_OUTER); ++} ++ ++ ++static void crystalhd_start_dram(struct crystalhd_adp *adp) ++{ ++ bc_dec_reg_wr(adp, SDRAM_PARAM, ((40 / 5 - 1) << 0) | ++ /* tras (40ns tras)/(5ns period) -1 ((15/5 - 1) << 4) | // trcd */ ++ ((15 / 5 - 1) << 7) | /* trp */ ++ ((10 / 5 - 1) << 10) | /* trrd */ ++ ((15 / 5 + 1) << 12) | /* twr */ ++ ((2 + 1) << 16) | /* twtr */ ++ ((70 / 5 - 2) << 19) | /* trfc */ ++ (0 << 23)); ++ ++ bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0); ++ bc_dec_reg_wr(adp, SDRAM_EXT_MODE, 2); ++ bc_dec_reg_wr(adp, SDRAM_MODE, 0x132); ++ bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0); ++ bc_dec_reg_wr(adp, SDRAM_REFRESH, 0); ++ bc_dec_reg_wr(adp, SDRAM_REFRESH, 0); ++ bc_dec_reg_wr(adp, SDRAM_MODE, 0x32); ++ /* setting the refresh rate here */ ++ bc_dec_reg_wr(adp, SDRAM_REF_PARAM, ((1 << 12) | 96)); ++} ++ ++ ++static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp) ++{ ++ link_misc_perst_deco_ctrl rst_deco_cntrl; ++ link_misc_perst_clk_ctrl rst_clk_cntrl; ++ uint32_t temp; ++ ++ /* ++ * Link clocks: MISC_PERST_CLOCK_CTRL Clear PLL power down bit, ++ * delay to allow PLL to lock Clear alternate clock, stop clock bits ++ */ ++ rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL); ++ rst_clk_cntrl.pll_pwr_dn = 0; ++ crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg); ++ msleep_interruptible(50); ++ ++ rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL); ++ rst_clk_cntrl.stop_core_clk = 0; ++ rst_clk_cntrl.sel_alt_clk = 0; ++ ++ crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg); ++ msleep_interruptible(50); ++ ++ /* ++ * Bus Arbiter Timeout: GISB_ARBITER_TIMER ++ * Set internal bus arbiter timeout to 40us based on core clock speed ++ * (63MHz * 40us = 0x9D8) ++ */ ++ crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x9D8); ++ ++ /* ++ * Decoder clocks: MISC_PERST_DECODER_CTRL ++ * Enable clocks while 7412 reset is asserted, delay ++ * De-assert 7412 reset ++ */ ++ rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL); ++ rst_deco_cntrl.stop_bcm_7412_clk = 0; ++ rst_deco_cntrl.bcm7412_rst = 1; ++ crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg); ++ msleep_interruptible(10); ++ ++ rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL); ++ rst_deco_cntrl.bcm7412_rst = 0; ++ crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg); ++ msleep_interruptible(50); ++ ++ /* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */ ++ crystalhd_reg_wr(adp, OTP_CONTENT_MISC, 0); ++ ++ /* Clear bit 29 of 0x404 */ ++ temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION); ++ temp &= ~BC_BIT(29); ++ crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp); ++ ++ /* 2.5V regulator must be set to 2.6 volts (+6%) */ ++ /* FIXME: jarod: what's the point of this reg read? */ ++ temp = crystalhd_reg_rd(adp, MISC_PERST_VREG_CTRL); ++ crystalhd_reg_wr(adp, MISC_PERST_VREG_CTRL, 0xF3); ++ ++ return true; ++} ++ ++static bool crystalhd_put_in_reset(struct crystalhd_adp *adp) ++{ ++ link_misc_perst_deco_ctrl rst_deco_cntrl; ++ link_misc_perst_clk_ctrl rst_clk_cntrl; ++ uint32_t temp; ++ ++ /* ++ * Decoder clocks: MISC_PERST_DECODER_CTRL ++ * Assert 7412 reset, delay ++ * Assert 7412 stop clock ++ */ ++ rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL); ++ rst_deco_cntrl.stop_bcm_7412_clk = 1; ++ crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg); ++ msleep_interruptible(50); ++ ++ /* Bus Arbiter Timeout: GISB_ARBITER_TIMER ++ * Set internal bus arbiter timeout to 40us based on core clock speed ++ * (6.75MHZ * 40us = 0x10E) ++ */ ++ crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x10E); ++ ++ /* Link clocks: MISC_PERST_CLOCK_CTRL ++ * Stop core clk, delay ++ * Set alternate clk, delay, set PLL power down ++ */ ++ rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL); ++ rst_clk_cntrl.stop_core_clk = 1; ++ rst_clk_cntrl.sel_alt_clk = 1; ++ crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg); ++ msleep_interruptible(50); ++ ++ rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL); ++ rst_clk_cntrl.pll_pwr_dn = 1; ++ crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg); ++ ++ /* ++ * Read and restore the Transaction Configuration Register ++ * after core reset ++ */ ++ temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION); ++ ++ /* ++ * Link core soft reset: MISC3_RESET_CTRL ++ * - Write BIT[0]=1 and read it back for core reset to take place ++ */ ++ crystalhd_reg_wr(adp, MISC3_RESET_CTRL, 1); ++ rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC3_RESET_CTRL); ++ msleep_interruptible(50); ++ ++ /* restore the transaction configuration register */ ++ crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp); ++ ++ return true; ++} ++ ++static void crystalhd_disable_interrupts(struct crystalhd_adp *adp) ++{ ++ intr_mask_reg intr_mask; ++ intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG); ++ intr_mask.mask_pcie_err = 1; ++ intr_mask.mask_pcie_rbusmast_err = 1; ++ intr_mask.mask_pcie_rgr_bridge = 1; ++ intr_mask.mask_rx_done = 1; ++ intr_mask.mask_rx_err = 1; ++ intr_mask.mask_tx_done = 1; ++ intr_mask.mask_tx_err = 1; ++ crystalhd_reg_wr(adp, INTR_INTR_MSK_SET_REG, intr_mask.whole_reg); ++ ++ return; ++} ++ ++static void crystalhd_enable_interrupts(struct crystalhd_adp *adp) ++{ ++ intr_mask_reg intr_mask; ++ intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG); ++ intr_mask.mask_pcie_err = 1; ++ intr_mask.mask_pcie_rbusmast_err = 1; ++ intr_mask.mask_pcie_rgr_bridge = 1; ++ intr_mask.mask_rx_done = 1; ++ intr_mask.mask_rx_err = 1; ++ intr_mask.mask_tx_done = 1; ++ intr_mask.mask_tx_err = 1; ++ crystalhd_reg_wr(adp, INTR_INTR_MSK_CLR_REG, intr_mask.whole_reg); ++ ++ return; ++} ++ ++static void crystalhd_clear_errors(struct crystalhd_adp *adp) ++{ ++ uint32_t reg; ++ ++ /* FIXME: jarod: wouldn't we want to write a 0 to the reg? Or does the write clear the bits specified? */ ++ reg = crystalhd_reg_rd(adp, MISC1_Y_RX_ERROR_STATUS); ++ if (reg) ++ crystalhd_reg_wr(adp, MISC1_Y_RX_ERROR_STATUS, reg); ++ ++ reg = crystalhd_reg_rd(adp, MISC1_UV_RX_ERROR_STATUS); ++ if (reg) ++ crystalhd_reg_wr(adp, MISC1_UV_RX_ERROR_STATUS, reg); ++ ++ reg = crystalhd_reg_rd(adp, MISC1_TX_DMA_ERROR_STATUS); ++ if (reg) ++ crystalhd_reg_wr(adp, MISC1_TX_DMA_ERROR_STATUS, reg); ++} ++ ++static void crystalhd_clear_interrupts(struct crystalhd_adp *adp) ++{ ++ uint32_t intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS); ++ ++ if (intr_sts) { ++ crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts); ++ ++ /* Write End Of Interrupt for PCIE */ ++ crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1); ++ } ++} ++ ++static void crystalhd_soft_rst(struct crystalhd_adp *adp) ++{ ++ uint32_t val; ++ ++ /* Assert c011 soft reset*/ ++ bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000001); ++ msleep_interruptible(50); ++ ++ /* Release c011 soft reset*/ ++ bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000000); ++ ++ /* Disable Stuffing..*/ ++ val = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL); ++ val |= BC_BIT(8); ++ crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, val); ++} ++ ++static bool crystalhd_load_firmware_config(struct crystalhd_adp *adp) ++{ ++ uint32_t i = 0, reg; ++ ++ crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (BC_DRAM_FW_CFG_ADDR >> 19)); ++ ++ crystalhd_reg_wr(adp, AES_CMD, 0); ++ crystalhd_reg_wr(adp, AES_CONFIG_INFO, (BC_DRAM_FW_CFG_ADDR & 0x7FFFF)); ++ crystalhd_reg_wr(adp, AES_CMD, 0x1); ++ ++ /* FIXME: jarod: I've seen this fail, and introducing extra delays helps... */ ++ for (i = 0; i < 100; ++i) { ++ reg = crystalhd_reg_rd(adp, AES_STATUS); ++ if (reg & 0x1) ++ return true; ++ msleep_interruptible(10); ++ } ++ ++ return false; ++} ++ ++ ++static bool crystalhd_start_device(struct crystalhd_adp *adp) ++{ ++ uint32_t dbg_options, glb_cntrl = 0, reg_pwrmgmt = 0; ++ ++ BCMLOG(BCMLOG_INFO, "Starting BCM70012 Device\n"); ++ ++ reg_pwrmgmt = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL); ++ reg_pwrmgmt &= ~ASPM_L1_ENABLE; ++ ++ crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg_pwrmgmt); ++ ++ if (!crystalhd_bring_out_of_rst(adp)) { ++ BCMLOG_ERR("Failed To Bring Link Out Of Reset\n"); ++ return false; ++ } ++ ++ crystalhd_disable_interrupts(adp); ++ ++ crystalhd_clear_errors(adp); ++ ++ crystalhd_clear_interrupts(adp); ++ ++ crystalhd_enable_interrupts(adp); ++ ++ /* Enable the option for getting the total no. of DWORDS ++ * that have been transfered by the RXDMA engine ++ */ ++ dbg_options = crystalhd_reg_rd(adp, MISC1_DMA_DEBUG_OPTIONS_REG); ++ dbg_options |= 0x10; ++ crystalhd_reg_wr(adp, MISC1_DMA_DEBUG_OPTIONS_REG, dbg_options); ++ ++ /* Enable PCI Global Control options */ ++ glb_cntrl = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL); ++ glb_cntrl |= 0x100; ++ glb_cntrl |= 0x8000; ++ crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, glb_cntrl); ++ ++ crystalhd_enable_interrupts(adp); ++ ++ crystalhd_soft_rst(adp); ++ crystalhd_start_dram(adp); ++ crystalhd_enable_uarts(adp); ++ ++ return true; ++} ++ ++static bool crystalhd_stop_device(struct crystalhd_adp *adp) ++{ ++ uint32_t reg; ++ ++ BCMLOG(BCMLOG_INFO, "Stopping BCM70012 Device\n"); ++ /* Clear and disable interrupts */ ++ crystalhd_disable_interrupts(adp); ++ crystalhd_clear_errors(adp); ++ crystalhd_clear_interrupts(adp); ++ ++ if (!crystalhd_put_in_reset(adp)) ++ BCMLOG_ERR("Failed to Put Link To Reset State\n"); ++ ++ reg = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL); ++ reg |= ASPM_L1_ENABLE; ++ crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg); ++ ++ /* Set PCI Clk Req */ ++ reg = crystalhd_reg_rd(adp, PCIE_CLK_REQ_REG); ++ reg |= PCI_CLK_REQ_ENABLE; ++ crystalhd_reg_wr(adp, PCIE_CLK_REQ_REG, reg); ++ ++ return true; ++} ++ ++static crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(struct crystalhd_hw *hw) ++{ ++ unsigned long flags = 0; ++ crystalhd_rx_dma_pkt *temp = NULL; ++ ++ if (!hw) ++ return NULL; ++ ++ spin_lock_irqsave(&hw->lock, flags); ++ temp = hw->rx_pkt_pool_head; ++ if (temp) { ++ hw->rx_pkt_pool_head = hw->rx_pkt_pool_head->next; ++ temp->dio_req = NULL; ++ temp->pkt_tag = 0; ++ temp->flags = 0; ++ } ++ spin_unlock_irqrestore(&hw->lock, flags); ++ ++ return temp; ++} ++ ++static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw, ++ crystalhd_rx_dma_pkt *pkt) ++{ ++ unsigned long flags = 0; ++ ++ if (!hw || !pkt) ++ return; ++ ++ spin_lock_irqsave(&hw->lock, flags); ++ pkt->next = hw->rx_pkt_pool_head; ++ hw->rx_pkt_pool_head = pkt; ++ spin_unlock_irqrestore(&hw->lock, flags); ++} ++ ++/* ++ * Call back from TX - IOQ deletion. ++ * ++ * This routine will release the TX DMA rings allocated ++ * druing setup_dma rings interface. ++ * ++ * Memory is allocated per DMA ring basis. This is just ++ * a place holder to be able to create the dio queues. ++ */ ++static void crystalhd_tx_desc_rel_call_back(void *context, void *data) ++{ ++} ++ ++/* ++ * Rx Packet release callback.. ++ * ++ * Release All user mapped capture buffers and Our DMA packets ++ * back to our free pool. The actual cleanup of the DMA ++ * ring descriptors happen during dma ring release. ++ */ ++static void crystalhd_rx_pkt_rel_call_back(void *context, void *data) ++{ ++ struct crystalhd_hw *hw = (struct crystalhd_hw *)context; ++ crystalhd_rx_dma_pkt *pkt = (crystalhd_rx_dma_pkt *)data; ++ ++ if (!pkt || !hw) { ++ BCMLOG_ERR("Invalid arg - %p %p\n", hw, pkt); ++ return; ++ } ++ ++ if (pkt->dio_req) ++ crystalhd_unmap_dio(hw->adp, pkt->dio_req); ++ else ++ BCMLOG_ERR("Missing dio_req: 0x%x\n", pkt->pkt_tag); ++ ++ crystalhd_hw_free_rx_pkt(hw, pkt); ++} ++ ++#define crystalhd_hw_delete_ioq(adp, q) \ ++ if (q) { \ ++ crystalhd_delete_dioq(adp, q); \ ++ q = NULL; \ ++ } ++ ++static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw) ++{ ++ if (!hw) ++ return; ++ ++ BCMLOG(BCMLOG_DBG, "Deleting IOQs \n"); ++ crystalhd_hw_delete_ioq(hw->adp, hw->tx_actq); ++ crystalhd_hw_delete_ioq(hw->adp, hw->tx_freeq); ++ crystalhd_hw_delete_ioq(hw->adp, hw->rx_actq); ++ crystalhd_hw_delete_ioq(hw->adp, hw->rx_freeq); ++ crystalhd_hw_delete_ioq(hw->adp, hw->rx_rdyq); ++} ++ ++#define crystalhd_hw_create_ioq(sts, hw, q, cb) \ ++do { \ ++ sts = crystalhd_create_dioq(hw->adp, &q, cb, hw); \ ++ if (sts != BC_STS_SUCCESS) \ ++ goto hw_create_ioq_err; \ ++} while (0) ++ ++/* ++ * Create IOQs.. ++ * ++ * TX - Active & Free ++ * RX - Active, Ready and Free. ++ */ ++static BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw *hw) ++{ ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ if (!hw) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ crystalhd_hw_create_ioq(sts, hw, hw->tx_freeq, ++ crystalhd_tx_desc_rel_call_back); ++ crystalhd_hw_create_ioq(sts, hw, hw->tx_actq, ++ crystalhd_tx_desc_rel_call_back); ++ ++ crystalhd_hw_create_ioq(sts, hw, hw->rx_freeq, ++ crystalhd_rx_pkt_rel_call_back); ++ crystalhd_hw_create_ioq(sts, hw, hw->rx_rdyq, ++ crystalhd_rx_pkt_rel_call_back); ++ crystalhd_hw_create_ioq(sts, hw, hw->rx_actq, ++ crystalhd_rx_pkt_rel_call_back); ++ ++ return sts; ++ ++hw_create_ioq_err: ++ crystalhd_hw_delete_ioqs(hw); ++ ++ return sts; ++} ++ ++ ++static bool crystalhd_code_in_full(struct crystalhd_adp *adp, uint32_t needed_sz, ++ bool b_188_byte_pkts, uint8_t flags) ++{ ++ uint32_t base, end, writep, readp; ++ uint32_t cpbSize, cpbFullness, fifoSize; ++ ++ if (flags & 0x02) { /* ASF Bit is set */ ++ base = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Base); ++ end = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2End); ++ writep = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Wrptr); ++ readp = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Rdptr); ++ } else if (b_188_byte_pkts) { /*Encrypted 188 byte packets*/ ++ base = bc_dec_reg_rd(adp, REG_Dec_TsUser0Base); ++ end = bc_dec_reg_rd(adp, REG_Dec_TsUser0End); ++ writep = bc_dec_reg_rd(adp, REG_Dec_TsUser0Wrptr); ++ readp = bc_dec_reg_rd(adp, REG_Dec_TsUser0Rdptr); ++ } else { ++ base = bc_dec_reg_rd(adp, REG_DecCA_RegCinBase); ++ end = bc_dec_reg_rd(adp, REG_DecCA_RegCinEnd); ++ writep = bc_dec_reg_rd(adp, REG_DecCA_RegCinWrPtr); ++ readp = bc_dec_reg_rd(adp, REG_DecCA_RegCinRdPtr); ++ } ++ ++ cpbSize = end - base; ++ if (writep >= readp) ++ cpbFullness = writep - readp; ++ else ++ cpbFullness = (end - base) - (readp - writep); ++ ++ fifoSize = cpbSize - cpbFullness; ++ ++ if (fifoSize < BC_INFIFO_THRESHOLD) ++ return true; ++ ++ if (needed_sz > (fifoSize - BC_INFIFO_THRESHOLD)) ++ return true; ++ ++ return false; ++} ++ ++static BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw, ++ uint32_t list_id, BC_STATUS cs) ++{ ++ tx_dma_pkt *tx_req; ++ ++ if (!hw || !list_id) { ++ BCMLOG_ERR("Invalid Arg..\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ hw->pwr_lock--; ++ ++ tx_req = (tx_dma_pkt *)crystalhd_dioq_find_and_fetch(hw->tx_actq, list_id); ++ if (!tx_req) { ++ if (cs != BC_STS_IO_USER_ABORT) ++ BCMLOG_ERR("Find and Fetch Did not find req\n"); ++ return BC_STS_NO_DATA; ++ } ++ ++ if (tx_req->call_back) { ++ tx_req->call_back(tx_req->dio_req, tx_req->cb_event, cs); ++ tx_req->dio_req = NULL; ++ tx_req->cb_event = NULL; ++ tx_req->call_back = NULL; ++ } else { ++ BCMLOG(BCMLOG_DBG, "Missing Tx Callback - %X\n", ++ tx_req->list_tag); ++ } ++ ++ /* Now put back the tx_list back in FreeQ */ ++ tx_req->list_tag = 0; ++ ++ return crystalhd_dioq_add(hw->tx_freeq, tx_req, false, 0); ++} ++ ++static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw, uint32_t err_sts) ++{ ++ uint32_t err_mask, tmp; ++ unsigned long flags = 0; ++ ++ err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK | ++ MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK | ++ MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK; ++ ++ if (!(err_sts & err_mask)) ++ return false; ++ ++ BCMLOG_ERR("Error on Tx-L0 %x \n", err_sts); ++ ++ tmp = err_mask; ++ ++ if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK) ++ tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK; ++ ++ if (tmp) { ++ spin_lock_irqsave(&hw->lock, flags); ++ /* reset list index.*/ ++ hw->tx_list_post_index = 0; ++ spin_unlock_irqrestore(&hw->lock, flags); ++ } ++ ++ tmp = err_sts & err_mask; ++ crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp); ++ ++ return true; ++} ++ ++static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw, uint32_t err_sts) ++{ ++ uint32_t err_mask, tmp; ++ unsigned long flags = 0; ++ ++ err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK | ++ MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK | ++ MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK; ++ ++ if (!(err_sts & err_mask)) ++ return false; ++ ++ BCMLOG_ERR("Error on Tx-L1 %x \n", err_sts); ++ ++ tmp = err_mask; ++ ++ if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK) ++ tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK; ++ ++ if (tmp) { ++ spin_lock_irqsave(&hw->lock, flags); ++ /* reset list index.*/ ++ hw->tx_list_post_index = 0; ++ spin_unlock_irqrestore(&hw->lock, flags); ++ } ++ ++ tmp = err_sts & err_mask; ++ crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp); ++ ++ return true; ++} ++ ++static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts) ++{ ++ uint32_t err_sts; ++ ++ if (int_sts & INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK) ++ crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0, ++ BC_STS_SUCCESS); ++ ++ if (int_sts & INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK) ++ crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1, ++ BC_STS_SUCCESS); ++ ++ if (!(int_sts & (INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK | ++ INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK))) { ++ /* No error mask set.. */ ++ return; ++ } ++ ++ /* Handle Tx errors. */ ++ err_sts = crystalhd_reg_rd(hw->adp, MISC1_TX_DMA_ERROR_STATUS); ++ ++ if (crystalhd_tx_list0_handler(hw, err_sts)) ++ crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0, ++ BC_STS_ERROR); ++ ++ if (crystalhd_tx_list1_handler(hw, err_sts)) ++ crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1, ++ BC_STS_ERROR); ++ ++ hw->stats.tx_errors++; ++} ++ ++static void crystalhd_hw_dump_desc(pdma_descriptor p_dma_desc, ++ uint32_t ul_desc_index, uint32_t cnt) ++{ ++ uint32_t ix, ll = 0; ++ ++ if (!p_dma_desc || !cnt) ++ return; ++ ++ /* FIXME: jarod: perhaps a modparam desc_debug to enable this, rather than ++ * setting ll (log level, I presume) to non-zero? */ ++ if (!ll) ++ return; ++ ++ for (ix = ul_desc_index; ix < (ul_desc_index + cnt); ix++) { ++ BCMLOG(ll, "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n", ++ ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"), ++ ul_desc_index, ++ p_dma_desc[ul_desc_index].buff_addr_high, ++ p_dma_desc[ul_desc_index].buff_addr_low, ++ p_dma_desc[ul_desc_index].next_desc_addr_high, ++ p_dma_desc[ul_desc_index].next_desc_addr_low, ++ p_dma_desc[ul_desc_index].xfer_size, ++ p_dma_desc[ul_desc_index].intr_enable, ++ p_dma_desc[ul_desc_index].last_rec_indicator); ++ } ++ ++} ++ ++static BC_STATUS crystalhd_hw_fill_desc(crystalhd_dio_req *ioreq, ++ dma_descriptor *desc, ++ dma_addr_t desc_paddr_base, ++ uint32_t sg_cnt, uint32_t sg_st_ix, ++ uint32_t sg_st_off, uint32_t xfr_sz) ++{ ++ uint32_t count = 0, ix = 0, sg_ix = 0, len = 0, last_desc_ix = 0; ++ dma_addr_t desc_phy_addr = desc_paddr_base; ++ addr_64 addr_temp; ++ ++ if (!ioreq || !desc || !desc_paddr_base || !xfr_sz || ++ (!sg_cnt && !ioreq->uinfo.dir_tx)) { ++ BCMLOG_ERR("Invalid Args\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ for (ix = 0; ix < sg_cnt; ix++) { ++ ++ /* Setup SGLE index. */ ++ sg_ix = ix + sg_st_ix; ++ ++ /* Get SGLE length */ ++ len = crystalhd_get_sgle_len(ioreq, sg_ix); ++ if (len % 4) { ++ BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix, sg_cnt); ++ return BC_STS_NOT_IMPL; ++ } ++ /* Setup DMA desc with Phy addr & Length at current index. */ ++ addr_temp.full_addr = crystalhd_get_sgle_paddr(ioreq, sg_ix); ++ if (sg_ix == sg_st_ix) { ++ addr_temp.full_addr += sg_st_off; ++ len -= sg_st_off; ++ } ++ memset(&desc[ix], 0, sizeof(desc[ix])); ++ desc[ix].buff_addr_low = addr_temp.low_part; ++ desc[ix].buff_addr_high = addr_temp.high_part; ++ desc[ix].dma_dir = ioreq->uinfo.dir_tx; ++ ++ /* Chain DMA descriptor. */ ++ addr_temp.full_addr = desc_phy_addr + sizeof(dma_descriptor); ++ desc[ix].next_desc_addr_low = addr_temp.low_part; ++ desc[ix].next_desc_addr_high = addr_temp.high_part; ++ ++ if ((count + len) > xfr_sz) ++ len = xfr_sz - count; ++ ++ /* Debug.. */ ++ if ((!len) || (len > crystalhd_get_sgle_len(ioreq, sg_ix))) { ++ BCMLOG_ERR("inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n", ++ len, ix, count, xfr_sz, sg_cnt); ++ return BC_STS_ERROR; ++ } ++ /* Length expects Multiple of 4 */ ++ desc[ix].xfer_size = (len / 4); ++ ++ crystalhd_hw_dump_desc(desc, ix, 1); ++ ++ count += len; ++ desc_phy_addr += sizeof(dma_descriptor); ++ } ++ ++ last_desc_ix = ix - 1; ++ ++ if (ioreq->fb_size) { ++ memset(&desc[ix], 0, sizeof(desc[ix])); ++ addr_temp.full_addr = ioreq->fb_pa; ++ desc[ix].buff_addr_low = addr_temp.low_part; ++ desc[ix].buff_addr_high = addr_temp.high_part; ++ desc[ix].dma_dir = ioreq->uinfo.dir_tx; ++ desc[ix].xfer_size = 1; ++ desc[ix].fill_bytes = 4 - ioreq->fb_size; ++ count += ioreq->fb_size; ++ last_desc_ix++; ++ } ++ ++ /* setup last descriptor..*/ ++ desc[last_desc_ix].last_rec_indicator = 1; ++ desc[last_desc_ix].next_desc_addr_low = 0; ++ desc[last_desc_ix].next_desc_addr_high = 0; ++ desc[last_desc_ix].intr_enable = 1; ++ ++ crystalhd_hw_dump_desc(desc, last_desc_ix, 1); ++ ++ if (count != xfr_sz) { ++ BCMLOG_ERR("interal error sz curr:%x exp:%x\n", count, xfr_sz); ++ return BC_STS_ERROR; ++ } ++ ++ return BC_STS_SUCCESS; ++} ++ ++static BC_STATUS crystalhd_xlat_sgl_to_dma_desc(crystalhd_dio_req *ioreq, ++ pdma_desc_mem pdesc_mem, ++ uint32_t *uv_desc_index) ++{ ++ dma_descriptor *desc = NULL; ++ dma_addr_t desc_paddr_base = 0; ++ uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0; ++ uint32_t xfr_sz = 0; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ /* Check params.. */ ++ if (!ioreq || !pdesc_mem || !uv_desc_index) { ++ BCMLOG_ERR("Invalid Args\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ if (!pdesc_mem->sz || !pdesc_mem->pdma_desc_start || ++ !ioreq->sg || (!ioreq->sg_cnt && !ioreq->uinfo.dir_tx)) { ++ BCMLOG_ERR("Invalid Args\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ if ((ioreq->uinfo.dir_tx) && (ioreq->uinfo.uv_offset)) { ++ BCMLOG_ERR("UV offset for TX??\n"); ++ return BC_STS_INV_ARG; ++ ++ } ++ ++ desc = pdesc_mem->pdma_desc_start; ++ desc_paddr_base = pdesc_mem->phy_addr; ++ ++ if (ioreq->uinfo.dir_tx || (ioreq->uinfo.uv_offset == 0)) { ++ sg_cnt = ioreq->sg_cnt; ++ xfr_sz = ioreq->uinfo.xfr_len; ++ } else { ++ sg_cnt = ioreq->uinfo.uv_sg_ix + 1; ++ xfr_sz = ioreq->uinfo.uv_offset; ++ } ++ ++ sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt, ++ sg_st_ix, sg_st_off, xfr_sz); ++ ++ if ((sts != BC_STS_SUCCESS) || !ioreq->uinfo.uv_offset) ++ return sts; ++ ++ /* Prepare for UV mapping.. */ ++ desc = &pdesc_mem->pdma_desc_start[sg_cnt]; ++ desc_paddr_base = pdesc_mem->phy_addr + ++ (sg_cnt * sizeof(dma_descriptor)); ++ ++ /* Done with desc addr.. now update sg stuff.*/ ++ sg_cnt = ioreq->sg_cnt - ioreq->uinfo.uv_sg_ix; ++ xfr_sz = ioreq->uinfo.xfr_len - ioreq->uinfo.uv_offset; ++ sg_st_ix = ioreq->uinfo.uv_sg_ix; ++ sg_st_off = ioreq->uinfo.uv_sg_off; ++ ++ sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt, ++ sg_st_ix, sg_st_off, xfr_sz); ++ if (sts != BC_STS_SUCCESS) ++ return sts; ++ ++ *uv_desc_index = sg_st_ix; ++ ++ return sts; ++} ++ ++static void crystalhd_start_tx_dma_engine(struct crystalhd_hw *hw) ++{ ++ uint32_t dma_cntrl; ++ ++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS); ++ if (!(dma_cntrl & DMA_START_BIT)) { ++ dma_cntrl |= DMA_START_BIT; ++ crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS, ++ dma_cntrl); ++ } ++ ++ return; ++} ++ ++/* _CHECK_THIS_ ++ * ++ * Verify if the Stop generates a completion interrupt or not. ++ * if it does not generate an interrupt, then add polling here. ++ */ ++static BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw) ++{ ++ uint32_t dma_cntrl, cnt = 30; ++ uint32_t l1 = 1, l2 = 1; ++ unsigned long flags = 0; ++ ++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS); ++ ++ BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n"); ++ ++ /* FIXME: jarod: invert dma_ctrl and check bit? or are there missing parens? */ ++ if (!dma_cntrl & DMA_START_BIT) { ++ BCMLOG(BCMLOG_DBG, "Already Stopped\n"); ++ return BC_STS_SUCCESS; ++ } ++ ++ crystalhd_disable_interrupts(hw->adp); ++ ++ /* Issue stop to HW */ ++ /* This bit when set gave problems. Please check*/ ++ dma_cntrl &= ~DMA_START_BIT; ++ crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS, dma_cntrl); ++ ++ BCMLOG(BCMLOG_DBG, "Cleared the DMA Start bit\n"); ++ ++ /* Poll for 3seconds (30 * 100ms) on both the lists..*/ ++ while ((l1 || l2) && cnt) { ++ ++ if (l1) { ++ l1 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST0); ++ l1 &= DMA_START_BIT; ++ } ++ ++ if (l2) { ++ l2 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST1); ++ l2 &= DMA_START_BIT; ++ } ++ ++ msleep_interruptible(100); ++ ++ cnt--; ++ } ++ ++ if (!cnt) { ++ BCMLOG_ERR("Failed to stop TX DMA.. l1 %d, l2 %d\n", l1, l2); ++ crystalhd_enable_interrupts(hw->adp); ++ return BC_STS_ERROR; ++ } ++ ++ spin_lock_irqsave(&hw->lock, flags); ++ hw->tx_list_post_index = 0; ++ spin_unlock_irqrestore(&hw->lock, flags); ++ BCMLOG(BCMLOG_DBG, "stopped TX DMA..\n"); ++ crystalhd_enable_interrupts(hw->adp); ++ ++ return BC_STS_SUCCESS; ++} ++ ++static uint32_t crystalhd_get_pib_avail_cnt(struct crystalhd_hw *hw) ++{ ++ /* ++ * Position of the PIB Entries can be found at ++ * 0th and the 1st location of the Circular list. ++ */ ++ uint32_t Q_addr; ++ uint32_t pib_cnt, r_offset, w_offset; ++ ++ Q_addr = hw->pib_del_Q_addr; ++ ++ /* Get the Read Pointer */ ++ crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset); ++ ++ /* Get the Write Pointer */ ++ crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset); ++ ++ if (r_offset == w_offset) ++ return 0; /* Queue is empty */ ++ ++ if (w_offset > r_offset) ++ pib_cnt = w_offset - r_offset; ++ else ++ pib_cnt = (w_offset + MAX_PIB_Q_DEPTH) - ++ (r_offset + MIN_PIB_Q_DEPTH); ++ ++ if (pib_cnt > MAX_PIB_Q_DEPTH) { ++ BCMLOG_ERR("Invalid PIB Count (%u)\n", pib_cnt); ++ return 0; ++ } ++ ++ return pib_cnt; ++} ++ ++static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw *hw) ++{ ++ uint32_t Q_addr; ++ uint32_t addr_entry, r_offset, w_offset; ++ ++ Q_addr = hw->pib_del_Q_addr; ++ ++ /* Get the Read Pointer 0Th Location is Read Pointer */ ++ crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset); ++ ++ /* Get the Write Pointer 1st Location is Write pointer */ ++ crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset); ++ ++ /* Queue is empty */ ++ if (r_offset == w_offset) ++ return 0; ++ ++ if ((r_offset < MIN_PIB_Q_DEPTH) || (r_offset >= MAX_PIB_Q_DEPTH)) ++ return 0; ++ ++ /* Get the Actual Address of the PIB */ ++ crystalhd_mem_rd(hw->adp, Q_addr + (r_offset * sizeof(uint32_t)), ++ 1, &addr_entry); ++ ++ /* Increment the Read Pointer */ ++ r_offset++; ++ ++ if (MAX_PIB_Q_DEPTH == r_offset) ++ r_offset = MIN_PIB_Q_DEPTH; ++ ++ /* Write back the read pointer to It's Location */ ++ crystalhd_mem_wr(hw->adp, Q_addr, 1, &r_offset); ++ ++ return addr_entry; ++} ++ ++static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw, uint32_t addr_to_rel) ++{ ++ uint32_t Q_addr; ++ uint32_t r_offset, w_offset, n_offset; ++ ++ Q_addr = hw->pib_rel_Q_addr; ++ ++ /* Get the Read Pointer */ ++ crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset); ++ ++ /* Get the Write Pointer */ ++ crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset); ++ ++ if ((r_offset < MIN_PIB_Q_DEPTH) || ++ (r_offset >= MAX_PIB_Q_DEPTH)) ++ return false; ++ ++ n_offset = w_offset + 1; ++ ++ if (MAX_PIB_Q_DEPTH == n_offset) ++ n_offset = MIN_PIB_Q_DEPTH; ++ ++ if (r_offset == n_offset) ++ return false; /* should never happen */ ++ ++ /* Write the DRAM ADDR to the Queue at Next Offset */ ++ crystalhd_mem_wr(hw->adp, Q_addr + (w_offset * sizeof(uint32_t)), ++ 1, &addr_to_rel); ++ ++ /* Put the New value of the write pointer in Queue */ ++ crystalhd_mem_wr(hw->adp, Q_addr + sizeof(uint32_t), 1, &n_offset); ++ ++ return true; ++} ++ ++static void cpy_pib_to_app(C011_PIB *src_pib, BC_PIC_INFO_BLOCK *dst_pib) ++{ ++ if (!src_pib || !dst_pib) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return; ++ } ++ ++ dst_pib->timeStamp = 0; ++ dst_pib->picture_number = src_pib->ppb.picture_number; ++ dst_pib->width = src_pib->ppb.width; ++ dst_pib->height = src_pib->ppb.height; ++ dst_pib->chroma_format = src_pib->ppb.chroma_format; ++ dst_pib->pulldown = src_pib->ppb.pulldown; ++ dst_pib->flags = src_pib->ppb.flags; ++ dst_pib->sess_num = src_pib->ptsStcOffset; ++ dst_pib->aspect_ratio = src_pib->ppb.aspect_ratio; ++ dst_pib->colour_primaries = src_pib->ppb.colour_primaries; ++ dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload; ++ dst_pib->frame_rate = src_pib->resolution ; ++ return; ++} ++ ++static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw) ++{ ++ unsigned int cnt; ++ C011_PIB src_pib; ++ uint32_t pib_addr, pib_cnt; ++ BC_PIC_INFO_BLOCK *AppPib; ++ crystalhd_rx_dma_pkt *rx_pkt = NULL; ++ ++ pib_cnt = crystalhd_get_pib_avail_cnt(hw); ++ ++ if (!pib_cnt) ++ return; ++ ++ for (cnt = 0; cnt < pib_cnt; cnt++) { ++ ++ pib_addr = crystalhd_get_addr_from_pib_Q(hw); ++ crystalhd_mem_rd(hw->adp, pib_addr, sizeof(C011_PIB) / 4, ++ (uint32_t *)&src_pib); ++ ++ if (src_pib.bFormatChange) { ++ rx_pkt = (crystalhd_rx_dma_pkt *)crystalhd_dioq_fetch(hw->rx_freeq); ++ if (!rx_pkt) ++ return; ++ rx_pkt->flags = 0; ++ rx_pkt->flags |= COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE; ++ AppPib = &rx_pkt->pib; ++ cpy_pib_to_app(&src_pib, AppPib); ++ ++ BCMLOG(BCMLOG_DBG, ++ "App PIB:%x %x %x %x %x %x %x %x %x %x\n", ++ rx_pkt->pib.picture_number, ++ rx_pkt->pib.aspect_ratio, ++ rx_pkt->pib.chroma_format, ++ rx_pkt->pib.colour_primaries, ++ rx_pkt->pib.frame_rate, ++ rx_pkt->pib.height, ++ rx_pkt->pib.height, ++ rx_pkt->pib.n_drop, ++ rx_pkt->pib.pulldown, ++ rx_pkt->pib.ycom); ++ ++ crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true, rx_pkt->pkt_tag); ++ ++ } ++ ++ crystalhd_rel_addr_to_pib_Q(hw, pib_addr); ++ } ++} ++ ++static void crystalhd_start_rx_dma_engine(struct crystalhd_hw *hw) ++{ ++ uint32_t dma_cntrl; ++ ++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS); ++ if (!(dma_cntrl & DMA_START_BIT)) { ++ dma_cntrl |= DMA_START_BIT; ++ crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl); ++ } ++ ++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS); ++ if (!(dma_cntrl & DMA_START_BIT)) { ++ dma_cntrl |= DMA_START_BIT; ++ crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl); ++ } ++ ++ return; ++} ++ ++static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw) ++{ ++ uint32_t dma_cntrl = 0, count = 30; ++ uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1; ++ ++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS); ++ if ((dma_cntrl & DMA_START_BIT)) { ++ dma_cntrl &= ~DMA_START_BIT; ++ crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl); ++ } ++ ++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS); ++ if ((dma_cntrl & DMA_START_BIT)) { ++ dma_cntrl &= ~DMA_START_BIT; ++ crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl); ++ } ++ ++ /* Poll for 3seconds (30 * 100ms) on both the lists..*/ ++ while ((l0y || l0uv || l1y || l1uv) && count) { ++ ++ if (l0y) { ++ l0y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0); ++ l0y &= DMA_START_BIT; ++ if (!l0y) { ++ hw->rx_list_sts[0] &= ~rx_waiting_y_intr; ++ } ++ } ++ ++ if (l1y) { ++ l1y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1); ++ l1y &= DMA_START_BIT; ++ if (!l1y) { ++ hw->rx_list_sts[1] &= ~rx_waiting_y_intr; ++ } ++ } ++ ++ if (l0uv) { ++ l0uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0); ++ l0uv &= DMA_START_BIT; ++ if (!l0uv) { ++ hw->rx_list_sts[0] &= ~rx_waiting_uv_intr; ++ } ++ } ++ ++ if (l1uv) { ++ l1uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1); ++ l1uv &= DMA_START_BIT; ++ if (!l1uv) { ++ hw->rx_list_sts[1] &= ~rx_waiting_uv_intr; ++ } ++ } ++ msleep_interruptible(100); ++ count--; ++ } ++ ++ hw->rx_list_post_index = 0; ++ ++ BCMLOG(BCMLOG_SSTEP, "Capture Stop: %d List0:Sts:%x List1:Sts:%x\n", ++ count, hw->rx_list_sts[0], hw->rx_list_sts[1]); ++} ++ ++static BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, crystalhd_rx_dma_pkt *rx_pkt) ++{ ++ uint32_t y_low_addr_reg, y_high_addr_reg; ++ uint32_t uv_low_addr_reg, uv_high_addr_reg; ++ addr_64 desc_addr; ++ unsigned long flags; ++ ++ if (!hw || !rx_pkt) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ if (hw->rx_list_post_index >= DMA_ENGINE_CNT) { ++ BCMLOG_ERR("List Out Of bounds %x\n", hw->rx_list_post_index); ++ return BC_STS_INV_ARG; ++ } ++ ++ spin_lock_irqsave(&hw->rx_lock, flags); ++ /* FIXME: jarod: sts_free is an enum for 0, in crystalhd_hw.h... yuk... */ ++ if (sts_free != hw->rx_list_sts[hw->rx_list_post_index]) { ++ spin_unlock_irqrestore(&hw->rx_lock, flags); ++ return BC_STS_BUSY; ++ } ++ ++ if (!hw->rx_list_post_index) { ++ y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0; ++ y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0; ++ uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0; ++ uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0; ++ } else { ++ y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1; ++ y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1; ++ uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1; ++ uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1; ++ } ++ rx_pkt->pkt_tag = hw->rx_pkt_tag_seed + hw->rx_list_post_index; ++ hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_y_intr; ++ if (rx_pkt->uv_phy_addr) ++ hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_uv_intr; ++ hw->rx_list_post_index = (hw->rx_list_post_index + 1) % DMA_ENGINE_CNT; ++ spin_unlock_irqrestore(&hw->rx_lock, flags); ++ ++ crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false, rx_pkt->pkt_tag); ++ ++ crystalhd_start_rx_dma_engine(hw); ++ /* Program the Y descriptor */ ++ desc_addr.full_addr = rx_pkt->desc_mem.phy_addr; ++ crystalhd_reg_wr(hw->adp, y_high_addr_reg, desc_addr.high_part); ++ crystalhd_reg_wr(hw->adp, y_low_addr_reg, desc_addr.low_part | 0x01); ++ ++ if (rx_pkt->uv_phy_addr) { ++ /* Program the UV descriptor */ ++ desc_addr.full_addr = rx_pkt->uv_phy_addr; ++ crystalhd_reg_wr(hw->adp, uv_high_addr_reg, desc_addr.high_part); ++ crystalhd_reg_wr(hw->adp, uv_low_addr_reg, desc_addr.low_part | 0x01); ++ } ++ ++ return BC_STS_SUCCESS; ++} ++ ++static BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw, ++ crystalhd_rx_dma_pkt *rx_pkt) ++{ ++ BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt); ++ ++ if (sts == BC_STS_BUSY) ++ crystalhd_dioq_add(hw->rx_freeq, (void *)rx_pkt, ++ false, rx_pkt->pkt_tag); ++ ++ return sts; ++} ++ ++static void crystalhd_get_dnsz(struct crystalhd_hw *hw, uint32_t list_index, ++ uint32_t *y_dw_dnsz, uint32_t *uv_dw_dnsz) ++{ ++ uint32_t y_dn_sz_reg, uv_dn_sz_reg; ++ ++ if (!list_index) { ++ y_dn_sz_reg = MISC1_Y_RX_LIST0_CUR_BYTE_CNT; ++ uv_dn_sz_reg = MISC1_UV_RX_LIST0_CUR_BYTE_CNT; ++ } else { ++ y_dn_sz_reg = MISC1_Y_RX_LIST1_CUR_BYTE_CNT; ++ uv_dn_sz_reg = MISC1_UV_RX_LIST1_CUR_BYTE_CNT; ++ } ++ ++ *y_dw_dnsz = crystalhd_reg_rd(hw->adp, y_dn_sz_reg); ++ *uv_dw_dnsz = crystalhd_reg_rd(hw->adp, uv_dn_sz_reg); ++} ++ ++/* ++ * This function should be called only after making sure that the two DMA ++ * lists are free. This function does not check if DMA's are active, before ++ * turning off the DMA. ++ */ ++static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw) ++{ ++ uint32_t dma_cntrl, aspm; ++ ++ hw->stop_pending = 0; ++ ++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS); ++ if (dma_cntrl & DMA_START_BIT) { ++ dma_cntrl &= ~DMA_START_BIT; ++ crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl); ++ } ++ ++ dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS); ++ if (dma_cntrl & DMA_START_BIT) { ++ dma_cntrl &= ~DMA_START_BIT; ++ crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl); ++ } ++ hw->rx_list_post_index = 0; ++ ++ aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL); ++ aspm |= ASPM_L1_ENABLE; ++ /* NAREN BCMLOG(BCMLOG_INFO, "aspm on\n"); */ ++ crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm); ++} ++ ++static BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t list_index, ++ BC_STATUS comp_sts) ++{ ++ crystalhd_rx_dma_pkt *rx_pkt = NULL; ++ uint32_t y_dw_dnsz, uv_dw_dnsz; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ if (!hw || list_index >= DMA_ENGINE_CNT) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ rx_pkt = crystalhd_dioq_find_and_fetch(hw->rx_actq, ++ hw->rx_pkt_tag_seed + list_index); ++ if (!rx_pkt) { ++ BCMLOG_ERR("Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n", ++ hw->rx_list_post_index, hw->rx_list_sts[0], ++ hw->rx_list_sts[1], list_index, ++ hw->rx_pkt_tag_seed + list_index, comp_sts); ++ return BC_STS_INV_ARG; ++ } ++ ++ if (comp_sts == BC_STS_SUCCESS) { ++ crystalhd_get_dnsz(hw, list_index, &y_dw_dnsz, &uv_dw_dnsz); ++ rx_pkt->dio_req->uinfo.y_done_sz = y_dw_dnsz; ++ rx_pkt->flags = COMP_FLAG_DATA_VALID; ++ if (rx_pkt->uv_phy_addr) ++ rx_pkt->dio_req->uinfo.uv_done_sz = uv_dw_dnsz; ++ crystalhd_dioq_add(hw->rx_rdyq, rx_pkt, true, ++ hw->rx_pkt_tag_seed + list_index); ++ return sts; ++ } ++ ++ /* Check if we can post this DIO again. */ ++ return crystalhd_hw_post_cap_buff(hw, rx_pkt); ++} ++ ++static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw, uint32_t int_sts, ++ uint32_t y_err_sts, uint32_t uv_err_sts) ++{ ++ uint32_t tmp; ++ list_sts tmp_lsts; ++ ++ if (!(y_err_sts & GET_Y0_ERR_MSK) && !(uv_err_sts & GET_UV0_ERR_MSK)) ++ return false; ++ ++ tmp_lsts = hw->rx_list_sts[0]; ++ ++ /* Y0 - DMA */ ++ tmp = y_err_sts & GET_Y0_ERR_MSK; ++ if (int_sts & INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK) ++ hw->rx_list_sts[0] &= ~rx_waiting_y_intr; ++ ++ if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) { ++ hw->rx_list_sts[0] &= ~rx_waiting_y_intr; ++ tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK; ++ } ++ ++ if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) { ++ hw->rx_list_sts[0] &= ~rx_y_mask; ++ hw->rx_list_sts[0] |= rx_y_error; ++ tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK; ++ } ++ ++ if (tmp) { ++ hw->rx_list_sts[0] &= ~rx_y_mask; ++ hw->rx_list_sts[0] |= rx_y_error; ++ hw->rx_list_post_index = 0; ++ } ++ ++ /* UV0 - DMA */ ++ tmp = uv_err_sts & GET_UV0_ERR_MSK; ++ if (int_sts & INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK) ++ hw->rx_list_sts[0] &= ~rx_waiting_uv_intr; ++ ++ if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) { ++ hw->rx_list_sts[0] &= ~rx_waiting_uv_intr; ++ tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK; ++ } ++ ++ if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) { ++ hw->rx_list_sts[0] &= ~rx_uv_mask; ++ hw->rx_list_sts[0] |= rx_uv_error; ++ tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK; ++ } ++ ++ if (tmp) { ++ hw->rx_list_sts[0] &= ~rx_uv_mask; ++ hw->rx_list_sts[0] |= rx_uv_error; ++ hw->rx_list_post_index = 0; ++ } ++ ++ if (y_err_sts & GET_Y0_ERR_MSK) { ++ tmp = y_err_sts & GET_Y0_ERR_MSK; ++ crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp); ++ } ++ ++ if (uv_err_sts & GET_UV0_ERR_MSK) { ++ tmp = uv_err_sts & GET_UV0_ERR_MSK; ++ crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp); ++ } ++ ++ return (tmp_lsts != hw->rx_list_sts[0]); ++} ++ ++static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw, uint32_t int_sts, ++ uint32_t y_err_sts, uint32_t uv_err_sts) ++{ ++ uint32_t tmp; ++ list_sts tmp_lsts; ++ ++ if (!(y_err_sts & GET_Y1_ERR_MSK) && !(uv_err_sts & GET_UV1_ERR_MSK)) ++ return false; ++ ++ tmp_lsts = hw->rx_list_sts[1]; ++ ++ /* Y1 - DMA */ ++ tmp = y_err_sts & GET_Y1_ERR_MSK; ++ if (int_sts & INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK) ++ hw->rx_list_sts[1] &= ~rx_waiting_y_intr; ++ ++ if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) { ++ hw->rx_list_sts[1] &= ~rx_waiting_y_intr; ++ tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK; ++ } ++ ++ if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) { ++ /* Add retry-support..*/ ++ hw->rx_list_sts[1] &= ~rx_y_mask; ++ hw->rx_list_sts[1] |= rx_y_error; ++ tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK; ++ } ++ ++ if (tmp) { ++ hw->rx_list_sts[1] &= ~rx_y_mask; ++ hw->rx_list_sts[1] |= rx_y_error; ++ hw->rx_list_post_index = 0; ++ } ++ ++ /* UV1 - DMA */ ++ tmp = uv_err_sts & GET_UV1_ERR_MSK; ++ if (int_sts & INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK) { ++ hw->rx_list_sts[1] &= ~rx_waiting_uv_intr; ++ } ++ ++ if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) { ++ hw->rx_list_sts[1] &= ~rx_waiting_uv_intr; ++ tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK; ++ } ++ ++ if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) { ++ /* Add retry-support*/ ++ hw->rx_list_sts[1] &= ~rx_uv_mask; ++ hw->rx_list_sts[1] |= rx_uv_error; ++ tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK; ++ } ++ ++ if (tmp) { ++ hw->rx_list_sts[1] &= ~rx_uv_mask; ++ hw->rx_list_sts[1] |= rx_uv_error; ++ hw->rx_list_post_index = 0; ++ } ++ ++ if (y_err_sts & GET_Y1_ERR_MSK) { ++ tmp = y_err_sts & GET_Y1_ERR_MSK; ++ crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp); ++ } ++ ++ if (uv_err_sts & GET_UV1_ERR_MSK) { ++ tmp = uv_err_sts & GET_UV1_ERR_MSK; ++ crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp); ++ } ++ ++ return (tmp_lsts != hw->rx_list_sts[1]); ++} ++ ++ ++static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts) ++{ ++ unsigned long flags; ++ uint32_t i, list_avail = 0; ++ BC_STATUS comp_sts = BC_STS_NO_DATA; ++ uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0; ++ bool ret = 0; ++ ++ if (!hw) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return; ++ } ++ ++ if (!(intr_sts & GET_RX_INTR_MASK)) ++ return; ++ ++ y_err_sts = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_ERROR_STATUS); ++ uv_err_sts = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_ERROR_STATUS); ++ ++ for (i = 0; i < DMA_ENGINE_CNT; i++) { ++ /* Update States..*/ ++ spin_lock_irqsave(&hw->rx_lock, flags); ++ if (i == 0) ++ ret = crystalhd_rx_list0_handler(hw, intr_sts, y_err_sts, uv_err_sts); ++ else ++ ret = crystalhd_rx_list1_handler(hw, intr_sts, y_err_sts, uv_err_sts); ++ if (ret) { ++ switch (hw->rx_list_sts[i]) { ++ case sts_free: ++ comp_sts = BC_STS_SUCCESS; ++ list_avail = 1; ++ break; ++ case rx_y_error: ++ case rx_uv_error: ++ case rx_sts_error: ++ /* We got error on both or Y or uv. */ ++ hw->stats.rx_errors++; ++ crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz); ++ /* FIXME: jarod: this is where my mini pci-e card is tripping up */ ++ BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x " ++ "UV:%x Int:%x YDnSz:%x UVDnSz:%x\n", ++ i, hw->stats.rx_errors, y_err_sts, ++ uv_err_sts, intr_sts, y_dn_sz, uv_dn_sz); ++ hw->rx_list_sts[i] = sts_free; ++ comp_sts = BC_STS_ERROR; ++ break; ++ default: ++ /* Wait for completion..*/ ++ comp_sts = BC_STS_NO_DATA; ++ break; ++ } ++ } ++ spin_unlock_irqrestore(&hw->rx_lock, flags); ++ ++ /* handle completion...*/ ++ if (comp_sts != BC_STS_NO_DATA) { ++ crystalhd_rx_pkt_done(hw, i, comp_sts); ++ comp_sts = BC_STS_NO_DATA; ++ } ++ } ++ ++ if (list_avail) { ++ if (hw->stop_pending) { ++ if ((hw->rx_list_sts[0] == sts_free) && ++ (hw->rx_list_sts[1] == sts_free)) ++ crystalhd_hw_finalize_pause(hw); ++ } else { ++ crystalhd_hw_start_capture(hw); ++ } ++ } ++} ++ ++static BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw, ++ BC_FW_CMD *fw_cmd) ++{ ++ BC_STATUS sts = BC_STS_SUCCESS; ++ DecRspChannelStartVideo *st_rsp = NULL; ++ ++ switch (fw_cmd->cmd[0]) { ++ case eCMD_C011_DEC_CHAN_START_VIDEO: ++ st_rsp = (DecRspChannelStartVideo *)fw_cmd->rsp; ++ hw->pib_del_Q_addr = st_rsp->picInfoDeliveryQ; ++ hw->pib_rel_Q_addr = st_rsp->picInfoReleaseQ; ++ BCMLOG(BCMLOG_DBG, "DelQAddr:%x RelQAddr:%x\n", ++ hw->pib_del_Q_addr, hw->pib_rel_Q_addr); ++ break; ++ case eCMD_C011_INIT: ++ if (!(crystalhd_load_firmware_config(hw->adp))) { ++ BCMLOG_ERR("Invalid Params.\n"); ++ sts = BC_STS_FW_AUTH_FAILED; ++ } ++ break; ++ default: ++ break; ++ } ++ return sts; ++} ++ ++static BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw) ++{ ++ uint32_t reg; ++ link_misc_perst_decoder_ctrl rst_cntrl_reg; ++ ++ /* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */ ++ rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp, MISC_PERST_DECODER_CTRL); ++ ++ rst_cntrl_reg.bcm_7412_rst = 1; ++ crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg); ++ msleep_interruptible(50); ++ ++ rst_cntrl_reg.bcm_7412_rst = 0; ++ crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg); ++ ++ /* Close all banks, put DDR in idle */ ++ bc_dec_reg_wr(hw->adp, SDRAM_PRECHARGE, 0); ++ ++ /* Set bit 25 (drop CKE pin of DDR) */ ++ reg = bc_dec_reg_rd(hw->adp, SDRAM_PARAM); ++ reg |= 0x02000000; ++ bc_dec_reg_wr(hw->adp, SDRAM_PARAM, reg); ++ ++ /* Reset the audio block */ ++ bc_dec_reg_wr(hw->adp, AUD_DSP_MISC_SOFT_RESET, 0x1); ++ ++ /* Power down Raptor PLL */ ++ reg = bc_dec_reg_rd(hw->adp, DecHt_PllCCtl); ++ reg |= 0x00008000; ++ bc_dec_reg_wr(hw->adp, DecHt_PllCCtl, reg); ++ ++ /* Power down all Audio PLL */ ++ bc_dec_reg_wr(hw->adp, AIO_MISC_PLL_RESET, 0x1); ++ ++ /* Power down video clock (75MHz) */ ++ reg = bc_dec_reg_rd(hw->adp, DecHt_PllECtl); ++ reg |= 0x00008000; ++ bc_dec_reg_wr(hw->adp, DecHt_PllECtl, reg); ++ ++ /* Power down video clock (75MHz) */ ++ reg = bc_dec_reg_rd(hw->adp, DecHt_PllDCtl); ++ reg |= 0x00008000; ++ bc_dec_reg_wr(hw->adp, DecHt_PllDCtl, reg); ++ ++ /* Power down core clock (200MHz) */ ++ reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl); ++ reg |= 0x00008000; ++ bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg); ++ ++ /* Power down core clock (200MHz) */ ++ reg = bc_dec_reg_rd(hw->adp, DecHt_PllBCtl); ++ reg |= 0x00008000; ++ bc_dec_reg_wr(hw->adp, DecHt_PllBCtl, reg); ++ ++ return BC_STS_SUCCESS; ++} ++ ++/************************************************ ++** ++*************************************************/ ++ ++BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, uint32_t sz) ++{ ++ uint32_t reg_data, cnt, *temp_buff; ++ uint32_t fw_sig_len = 36; ++ uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg; ++ ++ BCMLOG_ENTER; ++ ++ if (!adp || !buffer || !sz) { ++ BCMLOG_ERR("Invalid Params.\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ reg_data = crystalhd_reg_rd(adp, OTP_CMD); ++ if (!(reg_data & 0x02)) { ++ BCMLOG_ERR("Invalid hw config.. otp not programmed\n"); ++ return BC_STS_ERROR; ++ } ++ ++ reg_data = 0; ++ crystalhd_reg_wr(adp, DCI_CMD, 0); ++ reg_data |= BC_BIT(0); ++ crystalhd_reg_wr(adp, DCI_CMD, reg_data); ++ ++ reg_data = 0; ++ cnt = 1000; ++ msleep_interruptible(10); ++ ++ while (reg_data != BC_BIT(4)) { ++ reg_data = crystalhd_reg_rd(adp, DCI_STATUS); ++ reg_data &= BC_BIT(4); ++ if (--cnt == 0) { ++ BCMLOG_ERR("Firmware Download RDY Timeout.\n"); ++ return BC_STS_TIMEOUT; ++ } ++ } ++ ++ msleep_interruptible(10); ++ /* Load the FW to the FW_ADDR field in the DCI_FIRMWARE_ADDR */ ++ crystalhd_reg_wr(adp, DCI_FIRMWARE_ADDR, dram_offset); ++ temp_buff = (uint32_t *)buffer; ++ for (cnt = 0; cnt < (sz - fw_sig_len); cnt += 4) { ++ crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (dram_offset >> 19)); ++ crystalhd_reg_wr(adp, DCI_FIRMWARE_DATA, *temp_buff); ++ dram_offset += 4; ++ temp_buff++; ++ } ++ msleep_interruptible(10); ++ ++ temp_buff++; ++ ++ sig_reg = (uint32_t)DCI_SIGNATURE_DATA_7; ++ for (cnt = 0; cnt < 8; cnt++) { ++ uint32_t swapped_data = *temp_buff; ++ swapped_data = bswap_32_1(swapped_data); ++ crystalhd_reg_wr(adp, sig_reg, swapped_data); ++ sig_reg -= 4; ++ temp_buff++; ++ } ++ msleep_interruptible(10); ++ ++ reg_data = 0; ++ reg_data |= BC_BIT(1); ++ crystalhd_reg_wr(adp, DCI_CMD, reg_data); ++ msleep_interruptible(10); ++ ++ reg_data = 0; ++ reg_data = crystalhd_reg_rd(adp, DCI_STATUS); ++ ++ if ((reg_data & BC_BIT(9)) == BC_BIT(9)) { ++ cnt = 1000; ++ while ((reg_data & BC_BIT(0)) != BC_BIT(0)) { ++ reg_data = crystalhd_reg_rd(adp, DCI_STATUS); ++ reg_data &= BC_BIT(0); ++ if (!(--cnt)) ++ break; ++ msleep_interruptible(10); ++ } ++ reg_data = 0; ++ reg_data = crystalhd_reg_rd(adp, DCI_CMD); ++ reg_data |= BC_BIT(4); ++ crystalhd_reg_wr(adp, DCI_CMD, reg_data); ++ ++ } else { ++ BCMLOG_ERR("F/w Signature mismatch\n"); ++ return BC_STS_FW_AUTH_FAILED; ++ } ++ ++ BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n"); ++ return BC_STS_SUCCESS;; ++} ++ ++BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw, BC_FW_CMD *fw_cmd) ++{ ++ uint32_t cnt = 0, cmd_res_addr; ++ uint32_t *cmd_buff, *res_buff; ++ wait_queue_head_t fw_cmd_event; ++ int rc = 0; ++ BC_STATUS sts; ++ ++ crystalhd_create_event(&fw_cmd_event); ++ ++ BCMLOG_ENTER; ++ ++ if (!hw || !fw_cmd) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ cmd_buff = fw_cmd->cmd; ++ res_buff = fw_cmd->rsp; ++ ++ if (!cmd_buff || !res_buff) { ++ BCMLOG_ERR("Invalid Parameters for F/W Command \n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ hw->pwr_lock++; ++ ++ hw->fwcmd_evt_sts = 0; ++ hw->pfw_cmd_event = &fw_cmd_event; ++ ++ /*Write the command to the memory*/ ++ crystalhd_mem_wr(hw->adp, TS_Host2CpuSnd, FW_CMD_BUFF_SZ, cmd_buff); ++ ++ /*Memory Read for memory arbitrator flush*/ ++ crystalhd_mem_rd(hw->adp, TS_Host2CpuSnd, 1, &cnt); ++ ++ /* Write the command address to mailbox */ ++ bc_dec_reg_wr(hw->adp, Hst2CpuMbx1, TS_Host2CpuSnd); ++ msleep_interruptible(50); ++ ++ crystalhd_wait_on_event(&fw_cmd_event, hw->fwcmd_evt_sts, 20000, rc, 0); ++ ++ if (!rc) { ++ sts = BC_STS_SUCCESS; ++ } else if (rc == -EBUSY) { ++ BCMLOG_ERR("Firmware command T/O\n"); ++ sts = BC_STS_TIMEOUT; ++ } else if (rc == -EINTR) { ++ BCMLOG(BCMLOG_DBG, "FwCmd Wait Signal int.\n"); ++ sts = BC_STS_IO_USER_ABORT; ++ } else { ++ BCMLOG_ERR("FwCmd IO Error.\n"); ++ sts = BC_STS_IO_ERROR; ++ } ++ ++ if (sts != BC_STS_SUCCESS) { ++ BCMLOG_ERR("FwCmd Failed.\n"); ++ hw->pwr_lock--; ++ return sts; ++ } ++ ++ /*Get the Responce Address*/ ++ cmd_res_addr = bc_dec_reg_rd(hw->adp, Cpu2HstMbx1); ++ ++ /*Read the Response*/ ++ crystalhd_mem_rd(hw->adp, cmd_res_addr, FW_CMD_BUFF_SZ, res_buff); ++ ++ hw->pwr_lock--; ++ ++ if (res_buff[2] != C011_RET_SUCCESS) { ++ BCMLOG_ERR("res_buff[2] != C011_RET_SUCCESS\n"); ++ return BC_STS_FW_CMD_ERR; ++ } ++ ++ sts = crystalhd_fw_cmd_post_proc(hw, fw_cmd); ++ if (sts != BC_STS_SUCCESS) ++ BCMLOG_ERR("crystalhd_fw_cmd_post_proc Failed.\n"); ++ ++ return sts; ++} ++ ++bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw) ++{ ++ uint32_t intr_sts = 0; ++ uint32_t deco_intr = 0; ++ bool rc = 0; ++ ++ if (!adp || !hw->dev_started) ++ return rc; ++ ++ hw->stats.num_interrupts++; ++ hw->pwr_lock++; ++ ++ deco_intr = bc_dec_reg_rd(adp, Stream2Host_Intr_Sts); ++ intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS); ++ ++ if (intr_sts) { ++ /* let system know we processed interrupt..*/ ++ rc = 1; ++ hw->stats.dev_interrupts++; ++ } ++ ++ if (deco_intr && (deco_intr != 0xdeaddead)) { ++ ++ if (deco_intr & 0x80000000) { ++ /*Set the Event and the status flag*/ ++ if (hw->pfw_cmd_event) { ++ hw->fwcmd_evt_sts = 1; ++ crystalhd_set_event(hw->pfw_cmd_event); ++ } ++ } ++ ++ if (deco_intr & BC_BIT(1)) ++ crystalhd_hw_proc_pib(hw); ++ ++ bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, deco_intr); ++ /* FIXME: jarod: No udelay? might this be the real reason mini pci-e cards were stalling out? */ ++ bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, 0); ++ rc = 1; ++ } ++ ++ /* Rx interrupts */ ++ crystalhd_rx_isr(hw, intr_sts); ++ ++ /* Tx interrupts*/ ++ crystalhd_tx_isr(hw, intr_sts); ++ ++ /* Clear interrupts */ ++ if (rc) { ++ if (intr_sts) ++ crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts); ++ ++ crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1); ++ } ++ ++ hw->pwr_lock--; ++ ++ return rc; ++} ++ ++BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw, struct crystalhd_adp *adp) ++{ ++ if (!hw || !adp) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ if (hw->dev_started) ++ return BC_STS_SUCCESS; ++ ++ memset(hw, 0, sizeof(struct crystalhd_hw)); ++ ++ hw->adp = adp; ++ spin_lock_init(&hw->lock); ++ spin_lock_init(&hw->rx_lock); ++ /* FIXME: jarod: what are these magic numbers?!? */ ++ hw->tx_ioq_tag_seed = 0x70023070; ++ hw->rx_pkt_tag_seed = 0x70029070; ++ ++ hw->stop_pending = 0; ++ crystalhd_start_device(hw->adp); ++ hw->dev_started = true; ++ ++ /* set initial core clock */ ++ hw->core_clock_mhz = CLOCK_PRESET; ++ hw->prev_n = 0; ++ hw->pwr_lock = 0; ++ crystalhd_hw_set_core_clock(hw); ++ ++ return BC_STS_SUCCESS; ++} ++ ++BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw) ++{ ++ if (!hw) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ if (!hw->dev_started) ++ return BC_STS_SUCCESS; ++ ++ /* Stop and DDR sleep will happen in here */ ++ crystalhd_hw_suspend(hw); ++ hw->dev_started = false; ++ ++ return BC_STS_SUCCESS; ++} ++ ++BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw) ++{ ++ unsigned int i; ++ void *mem; ++ size_t mem_len; ++ dma_addr_t phy_addr; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ crystalhd_rx_dma_pkt *rpkt; ++ ++ if (!hw || !hw->adp) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ sts = crystalhd_hw_create_ioqs(hw); ++ if (sts != BC_STS_SUCCESS) { ++ BCMLOG_ERR("Failed to create IOQs..\n"); ++ return sts; ++ } ++ ++ mem_len = BC_LINK_MAX_SGLS * sizeof(dma_descriptor); ++ ++ for (i = 0; i < BC_TX_LIST_CNT; i++) { ++ mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr); ++ if (mem) { ++ memset(mem, 0, mem_len); ++ } else { ++ BCMLOG_ERR("Insufficient Memory For TX\n"); ++ crystalhd_hw_free_dma_rings(hw); ++ return BC_STS_INSUFF_RES; ++ } ++ /* rx_pkt_pool -- static memory allocation */ ++ hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = mem; ++ hw->tx_pkt_pool[i].desc_mem.phy_addr = phy_addr; ++ hw->tx_pkt_pool[i].desc_mem.sz = BC_LINK_MAX_SGLS * ++ sizeof(dma_descriptor); ++ hw->tx_pkt_pool[i].list_tag = 0; ++ ++ /* Add TX dma requests to Free Queue..*/ ++ sts = crystalhd_dioq_add(hw->tx_freeq, ++ &hw->tx_pkt_pool[i], false, 0); ++ if (sts != BC_STS_SUCCESS) { ++ crystalhd_hw_free_dma_rings(hw); ++ return sts; ++ } ++ } ++ ++ for (i = 0; i < BC_RX_LIST_CNT; i++) { ++ rpkt = kzalloc(sizeof(*rpkt), GFP_KERNEL); ++ if (!rpkt) { ++ BCMLOG_ERR("Insufficient Memory For RX\n"); ++ crystalhd_hw_free_dma_rings(hw); ++ return BC_STS_INSUFF_RES; ++ } ++ ++ mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr); ++ if (mem) { ++ memset(mem, 0, mem_len); ++ } else { ++ BCMLOG_ERR("Insufficient Memory For RX\n"); ++ crystalhd_hw_free_dma_rings(hw); ++ return BC_STS_INSUFF_RES; ++ } ++ rpkt->desc_mem.pdma_desc_start = mem; ++ rpkt->desc_mem.phy_addr = phy_addr; ++ rpkt->desc_mem.sz = BC_LINK_MAX_SGLS * sizeof(dma_descriptor); ++ rpkt->pkt_tag = hw->rx_pkt_tag_seed + i; ++ crystalhd_hw_free_rx_pkt(hw, rpkt); ++ } ++ ++ return BC_STS_SUCCESS; ++} ++ ++BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw) ++{ ++ unsigned int i; ++ crystalhd_rx_dma_pkt *rpkt = NULL; ++ ++ if (!hw || !hw->adp) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ /* Delete all IOQs.. */ ++ crystalhd_hw_delete_ioqs(hw); ++ ++ for (i = 0; i < BC_TX_LIST_CNT; i++) { ++ if (hw->tx_pkt_pool[i].desc_mem.pdma_desc_start) { ++ bc_kern_dma_free(hw->adp, ++ hw->tx_pkt_pool[i].desc_mem.sz, ++ hw->tx_pkt_pool[i].desc_mem.pdma_desc_start, ++ hw->tx_pkt_pool[i].desc_mem.phy_addr); ++ ++ hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = NULL; ++ } ++ } ++ ++ BCMLOG(BCMLOG_DBG, "Releasing RX Pkt pool\n"); ++ do { ++ rpkt = crystalhd_hw_alloc_rx_pkt(hw); ++ if (!rpkt) ++ break; ++ bc_kern_dma_free(hw->adp, rpkt->desc_mem.sz, ++ rpkt->desc_mem.pdma_desc_start, ++ rpkt->desc_mem.phy_addr); ++ kfree(rpkt); ++ } while (rpkt); ++ ++ return BC_STS_SUCCESS; ++} ++ ++BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, crystalhd_dio_req *ioreq, ++ hw_comp_callback call_back, ++ wait_queue_head_t *cb_event, uint32_t *list_id, ++ uint8_t data_flags) ++{ ++ tx_dma_pkt *tx_dma_packet = NULL; ++ uint32_t first_desc_u_addr, first_desc_l_addr; ++ uint32_t low_addr, high_addr; ++ addr_64 desc_addr; ++ BC_STATUS sts, add_sts; ++ uint32_t dummy_index = 0; ++ unsigned long flags; ++ bool rc; ++ ++ if (!hw || !ioreq || !call_back || !cb_event || !list_id) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ /* ++ * Since we hit code in busy condition very frequently, ++ * we will check the code in status first before ++ * checking the availability of free elem. ++ * ++ * This will avoid the Q fetch/add in normal condition. ++ */ ++ rc = crystalhd_code_in_full(hw->adp, ioreq->uinfo.xfr_len, ++ false, data_flags); ++ if (rc) { ++ hw->stats.cin_busy++; ++ return BC_STS_BUSY; ++ } ++ ++ /* Get a list from TxFreeQ */ ++ tx_dma_packet = (tx_dma_pkt *)crystalhd_dioq_fetch(hw->tx_freeq); ++ if (!tx_dma_packet) { ++ BCMLOG_ERR("No empty elements..\n"); ++ return BC_STS_ERR_USAGE; ++ } ++ ++ sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, ++ &tx_dma_packet->desc_mem, ++ &dummy_index); ++ if (sts != BC_STS_SUCCESS) { ++ add_sts = crystalhd_dioq_add(hw->tx_freeq, tx_dma_packet, ++ false, 0); ++ if (add_sts != BC_STS_SUCCESS) ++ BCMLOG_ERR("double fault..\n"); ++ ++ return sts; ++ } ++ ++ hw->pwr_lock++; ++ ++ desc_addr.full_addr = tx_dma_packet->desc_mem.phy_addr; ++ low_addr = desc_addr.low_part; ++ high_addr = desc_addr.high_part; ++ ++ tx_dma_packet->call_back = call_back; ++ tx_dma_packet->cb_event = cb_event; ++ tx_dma_packet->dio_req = ioreq; ++ ++ spin_lock_irqsave(&hw->lock, flags); ++ ++ if (hw->tx_list_post_index == 0) { ++ first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST0; ++ first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST0; ++ } else { ++ first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST1; ++ first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST1; ++ } ++ ++ *list_id = tx_dma_packet->list_tag = hw->tx_ioq_tag_seed + ++ hw->tx_list_post_index; ++ ++ hw->tx_list_post_index = (hw->tx_list_post_index + 1) % DMA_ENGINE_CNT; ++ ++ spin_unlock_irqrestore(&hw->lock, flags); ++ ++ ++ /* Insert in Active Q..*/ ++ crystalhd_dioq_add(hw->tx_actq, tx_dma_packet, false, ++ tx_dma_packet->list_tag); ++ ++ /* ++ * Interrupt will come as soon as you write ++ * the valid bit. So be ready for that. All ++ * the initialization should happen before that. ++ */ ++ crystalhd_start_tx_dma_engine(hw); ++ crystalhd_reg_wr(hw->adp, first_desc_u_addr, desc_addr.high_part); ++ ++ crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part | 0x01); ++ /* Be sure we set the valid bit ^^^^ */ ++ ++ return BC_STS_SUCCESS; ++} ++ ++/* ++ * This is a force cancel and we are racing with ISR. ++ * ++ * Will try to remove the req from ActQ before ISR gets it. ++ * If ISR gets it first then the completion happens in the ++ * normal path and we will return _STS_NO_DATA from here. ++ * ++ * FIX_ME: Not Tested the actual condition.. ++ */ ++BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id) ++{ ++ if (!hw || !list_id) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ crystalhd_stop_tx_dma_engine(hw); ++ crystalhd_hw_tx_req_complete(hw, list_id, BC_STS_IO_USER_ABORT); ++ ++ return BC_STS_SUCCESS; ++} ++ ++BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw, ++ crystalhd_dio_req *ioreq, bool en_post) ++{ ++ crystalhd_rx_dma_pkt *rpkt; ++ uint32_t tag, uv_desc_ix = 0; ++ BC_STATUS sts; ++ ++ if (!hw || !ioreq) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ rpkt = crystalhd_hw_alloc_rx_pkt(hw); ++ if (!rpkt) { ++ BCMLOG_ERR("Insufficient resources\n"); ++ return BC_STS_INSUFF_RES; ++ } ++ ++ rpkt->dio_req = ioreq; ++ tag = rpkt->pkt_tag; ++ ++ sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem, &uv_desc_ix); ++ if (sts != BC_STS_SUCCESS) ++ return sts; ++ ++ rpkt->uv_phy_addr = 0; ++ ++ /* Store the address of UV in the rx packet for post*/ ++ if (uv_desc_ix) ++ rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr + ++ (sizeof(dma_descriptor) * (uv_desc_ix + 1)); ++ ++ if (en_post) ++ sts = crystalhd_hw_post_cap_buff(hw, rpkt); ++ else ++ sts = crystalhd_dioq_add(hw->rx_freeq, rpkt, false, tag); ++ ++ return sts; ++} ++ ++BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw, ++ BC_PIC_INFO_BLOCK *pib, ++ crystalhd_dio_req **ioreq) ++{ ++ crystalhd_rx_dma_pkt *rpkt; ++ uint32_t timeout = BC_PROC_OUTPUT_TIMEOUT / 1000; ++ uint32_t sig_pending = 0; ++ ++ ++ if (!hw || !ioreq || !pib) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ rpkt = crystalhd_dioq_fetch_wait(hw->rx_rdyq, timeout, &sig_pending); ++ if (!rpkt) { ++ if (sig_pending) { ++ BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n", sig_pending); ++ return BC_STS_IO_USER_ABORT; ++ } else { ++ return BC_STS_TIMEOUT; ++ } ++ } ++ ++ rpkt->dio_req->uinfo.comp_flags = rpkt->flags; ++ ++ if (rpkt->flags & COMP_FLAG_PIB_VALID) ++ memcpy(pib, &rpkt->pib, sizeof(*pib)); ++ ++ *ioreq = rpkt->dio_req; ++ ++ crystalhd_hw_free_rx_pkt(hw, rpkt); ++ ++ return BC_STS_SUCCESS; ++} ++ ++BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw) ++{ ++ crystalhd_rx_dma_pkt *rx_pkt; ++ BC_STATUS sts; ++ uint32_t i; ++ ++ if (!hw) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ /* This is start of capture.. Post to both the lists.. */ ++ for (i = 0; i < DMA_ENGINE_CNT; i++) { ++ rx_pkt = crystalhd_dioq_fetch(hw->rx_freeq); ++ if (!rx_pkt) ++ return BC_STS_NO_DATA; ++ sts = crystalhd_hw_post_cap_buff(hw, rx_pkt); ++ if (BC_STS_SUCCESS != sts) ++ break; ++ ++ } ++ ++ return BC_STS_SUCCESS; ++} ++ ++BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw) ++{ ++ void *temp = NULL; ++ ++ if (!hw) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ crystalhd_stop_rx_dma_engine(hw); ++ ++ do { ++ temp = crystalhd_dioq_fetch(hw->rx_freeq); ++ if (temp) ++ crystalhd_rx_pkt_rel_call_back(hw, temp); ++ } while (temp); ++ ++ return BC_STS_SUCCESS; ++} ++ ++BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw) ++{ ++ hw->stats.pause_cnt++; ++ hw->stop_pending = 1; ++ ++ if ((hw->rx_list_sts[0] == sts_free) && ++ (hw->rx_list_sts[1] == sts_free)) ++ crystalhd_hw_finalize_pause(hw); ++ ++ return BC_STS_SUCCESS; ++} ++ ++BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw) ++{ ++ BC_STATUS sts; ++ uint32_t aspm; ++ ++ hw->stop_pending = 0; ++ ++ aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL); ++ aspm &= ~ASPM_L1_ENABLE; ++/* NAREN BCMLOG(BCMLOG_INFO, "aspm off\n"); */ ++ crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm); ++ ++ sts = crystalhd_hw_start_capture(hw); ++ return sts; ++} ++ ++BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw) ++{ ++ BC_STATUS sts; ++ ++ if (!hw) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ sts = crystalhd_put_ddr2sleep(hw); ++ if (sts != BC_STS_SUCCESS) { ++ BCMLOG_ERR("Failed to Put DDR To Sleep!!\n"); ++ return BC_STS_ERROR; ++ } ++ ++ if (!crystalhd_stop_device(hw->adp)) { ++ BCMLOG_ERR("Failed to Stop Device!!\n"); ++ return BC_STS_ERROR; ++ } ++ ++ return BC_STS_SUCCESS; ++} ++ ++void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stats) ++{ ++ if (!hw) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return; ++ } ++ ++ /* if called w/NULL stats, its a req to zero out the stats */ ++ if (!stats) { ++ memset(&hw->stats, 0, sizeof(hw->stats)); ++ return; ++ } ++ ++ hw->stats.freeq_count = crystalhd_dioq_count(hw->rx_freeq); ++ hw->stats.rdyq_count = crystalhd_dioq_count(hw->rx_rdyq); ++ memcpy(stats, &hw->stats, sizeof(*stats)); ++} ++ ++BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw) ++{ ++ uint32_t reg, n, i; ++ uint32_t vco_mg, refresh_reg; ++ ++ if (!hw) { ++ BCMLOG_ERR("Invalid Arguments\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ /* FIXME: jarod: wha? */ ++ /*n = (hw->core_clock_mhz * 3) / 20 + 1; */ ++ n = hw->core_clock_mhz/5; ++ ++ if (n == hw->prev_n) ++ return BC_STS_CLK_NOCHG; ++ ++ if (hw->pwr_lock > 0) { ++ /* BCMLOG(BCMLOG_INFO,"pwr_lock is %u\n", hw->pwr_lock) */ ++ return BC_STS_CLK_NOCHG; ++ } ++ ++ i = n * 27; ++ if (i < 560) ++ vco_mg = 0; ++ else if (i < 900) ++ vco_mg = 1; ++ else if (i < 1030) ++ vco_mg = 2; ++ else ++ vco_mg = 3; ++ ++ reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl); ++ ++ reg &= 0xFFFFCFC0; ++ reg |= n; ++ reg |= vco_mg << 12; ++ ++ BCMLOG(BCMLOG_INFO, "clock is moving to %d with n %d with vco_mg %d\n", ++ hw->core_clock_mhz, n, vco_mg); ++ ++ /* Change the DRAM refresh rate to accomodate the new frequency */ ++ /* refresh reg = ((refresh_rate * clock_rate)/16) - 1; rounding up*/ ++ refresh_reg = (7 * hw->core_clock_mhz / 16); ++ bc_dec_reg_wr(hw->adp, SDRAM_REF_PARAM, ((1 << 12) | refresh_reg)); ++ ++ bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg); ++ ++ i = 0; ++ ++ for (i = 0; i < 10; i++) { ++ reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl); ++ ++ if (reg & 0x00020000) { ++ hw->prev_n = n; ++ /* FIXME: jarod: outputting a random "C" is... confusing... */ ++ BCMLOG(BCMLOG_INFO, "C"); ++ return BC_STS_SUCCESS; ++ } else { ++ msleep_interruptible(10); ++ } ++ } ++ BCMLOG(BCMLOG_INFO, "clk change failed\n"); ++ return BC_STS_CLK_NOCHG; ++} +diff --git a/drivers/staging/crystalhd/crystalhd_hw.h b/drivers/staging/crystalhd/crystalhd_hw.h +new file mode 100644 +index 0000000..1c6318e +--- /dev/null ++++ b/drivers/staging/crystalhd/crystalhd_hw.h +@@ -0,0 +1,398 @@ ++/*************************************************************************** ++ * Copyright (c) 2005-2009, Broadcom Corporation. ++ * ++ * Name: crystalhd_hw . h ++ * ++ * Description: ++ * BCM70012 Linux driver hardware layer. ++ * ++ * HISTORY: ++ * ++ ********************************************************************** ++ * This file is part of the crystalhd device driver. ++ * ++ * This driver is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, version 2 of the License. ++ * ++ * This driver is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this driver. If not, see . ++ **********************************************************************/ ++ ++#ifndef _CRYSTALHD_HW_H_ ++#define _CRYSTALHD_HW_H_ ++ ++#include "crystalhd_misc.h" ++#include "crystalhd_fw_if.h" ++ ++/* HW constants..*/ ++#define DMA_ENGINE_CNT 2 ++#define MAX_PIB_Q_DEPTH 64 ++#define MIN_PIB_Q_DEPTH 2 ++#define WR_POINTER_OFF 4 ++ ++#define ASPM_L1_ENABLE (BC_BIT(27)) ++ ++/************************************************* ++ 7412 Decoder Registers. ++**************************************************/ ++#define FW_CMD_BUFF_SZ 64 ++#define TS_Host2CpuSnd 0x00000100 ++#define Hst2CpuMbx1 0x00100F00 ++#define Cpu2HstMbx1 0x00100F04 ++#define MbxStat1 0x00100F08 ++#define Stream2Host_Intr_Sts 0x00100F24 ++#define C011_RET_SUCCESS 0x0 /* Reutrn status of firmware command. */ ++ ++/* TS input status register */ ++#define TS_StreamAFIFOStatus 0x0010044C ++#define TS_StreamBFIFOStatus 0x0010084C ++ ++/*UART Selection definitions*/ ++#define UartSelectA 0x00100300 ++#define UartSelectB 0x00100304 ++ ++#define BSVS_UART_DEC_NONE 0x00 ++#define BSVS_UART_DEC_OUTER 0x01 ++#define BSVS_UART_DEC_INNER 0x02 ++#define BSVS_UART_STREAM 0x03 ++ ++/* Code-In fifo */ ++#define REG_DecCA_RegCinCTL 0xa00 ++#define REG_DecCA_RegCinBase 0xa0c ++#define REG_DecCA_RegCinEnd 0xa10 ++#define REG_DecCA_RegCinWrPtr 0xa04 ++#define REG_DecCA_RegCinRdPtr 0xa08 ++ ++#define REG_Dec_TsUser0Base 0x100864 ++#define REG_Dec_TsUser0Rdptr 0x100868 ++#define REG_Dec_TsUser0Wrptr 0x10086C ++#define REG_Dec_TsUser0End 0x100874 ++ ++/* ASF Case ...*/ ++#define REG_Dec_TsAudCDB2Base 0x10036c ++#define REG_Dec_TsAudCDB2Rdptr 0x100378 ++#define REG_Dec_TsAudCDB2Wrptr 0x100374 ++#define REG_Dec_TsAudCDB2End 0x100370 ++ ++/* DRAM bringup Registers */ ++#define SDRAM_PARAM 0x00040804 ++#define SDRAM_PRECHARGE 0x000408B0 ++#define SDRAM_EXT_MODE 0x000408A4 ++#define SDRAM_MODE 0x000408A0 ++#define SDRAM_REFRESH 0x00040890 ++#define SDRAM_REF_PARAM 0x00040808 ++ ++#define DecHt_PllACtl 0x34000C ++#define DecHt_PllBCtl 0x340010 ++#define DecHt_PllCCtl 0x340014 ++#define DecHt_PllDCtl 0x340034 ++#define DecHt_PllECtl 0x340038 ++#define AUD_DSP_MISC_SOFT_RESET 0x00240104 ++#define AIO_MISC_PLL_RESET 0x0026000C ++#define PCIE_CLK_REQ_REG 0xDC ++#define PCI_CLK_REQ_ENABLE (BC_BIT(8)) ++ ++/************************************************* ++ F/W Copy engine definitions.. ++**************************************************/ ++#define BC_FWIMG_ST_ADDR 0x00000000 ++/* FIXME: jarod: there's a kernel function that'll do this for us... */ ++#define rotr32_1(x, n) (((x) >> n) | ((x) << (32 - n))) ++#define bswap_32_1(x) ((rotr32_1((x), 24) & 0x00ff00ff) | (rotr32_1((x), 8) & 0xff00ff00)) ++ ++#define DecHt_HostSwReset 0x340000 ++#define BC_DRAM_FW_CFG_ADDR 0x001c2000 ++ ++typedef union _addr_64_ { ++ struct { ++ uint32_t low_part; ++ uint32_t high_part; ++ }; ++ ++ uint64_t full_addr; ++ ++} addr_64; ++ ++typedef union _intr_mask_reg_ { ++ struct { ++ uint32_t mask_tx_done:1; ++ uint32_t mask_tx_err:1; ++ uint32_t mask_rx_done:1; ++ uint32_t mask_rx_err:1; ++ uint32_t mask_pcie_err:1; ++ uint32_t mask_pcie_rbusmast_err:1; ++ uint32_t mask_pcie_rgr_bridge:1; ++ uint32_t reserved:25; ++ }; ++ ++ uint32_t whole_reg; ++ ++} intr_mask_reg; ++ ++typedef union _link_misc_perst_deco_ctrl_ { ++ struct { ++ uint32_t bcm7412_rst:1; /* 1 -> BCM7412 is held in reset. Reset value 1.*/ ++ uint32_t reserved0:3; /* Reserved.No Effect*/ ++ uint32_t stop_bcm_7412_clk:1; /* 1 ->Stops branch of 27MHz clk used to clk BCM7412*/ ++ uint32_t reserved1:27; /* Reseved. No Effect*/ ++ }; ++ ++ uint32_t whole_reg; ++ ++} link_misc_perst_deco_ctrl; ++ ++typedef union _link_misc_perst_clk_ctrl_ { ++ struct { ++ uint32_t sel_alt_clk:1; /* When set, selects a 6.75MHz clock as the source of core_clk */ ++ uint32_t stop_core_clk:1; /* When set, stops the branch of core_clk that is not needed for low power operation */ ++ uint32_t pll_pwr_dn:1; /* When set, powers down the main PLL. The alternate clock bit should be set ++ to select an alternate clock before setting this bit.*/ ++ uint32_t reserved0:5; /* Reserved */ ++ uint32_t pll_mult:8; /* This setting controls the multiplier for the PLL. */ ++ uint32_t pll_div:4; /* This setting controls the divider for the PLL. */ ++ uint32_t reserved1:12; /* Reserved */ ++ }; ++ ++ uint32_t whole_reg; ++ ++} link_misc_perst_clk_ctrl; ++ ++ ++typedef union _link_misc_perst_decoder_ctrl_ { ++ struct { ++ uint32_t bcm_7412_rst:1; /* 1 -> BCM7412 is held in reset. Reset value 1.*/ ++ uint32_t res0:3; /* Reserved.No Effect*/ ++ uint32_t stop_7412_clk:1; /* 1 ->Stops branch of 27MHz clk used to clk BCM7412*/ ++ uint32_t res1:27; /* Reseved. No Effect */ ++ }; ++ ++ uint32_t whole_reg; ++ ++} link_misc_perst_decoder_ctrl; ++ ++ ++typedef union _desc_low_addr_reg_ { ++ struct { ++ uint32_t list_valid:1; ++ uint32_t reserved:4; ++ uint32_t low_addr:27; ++ }; ++ ++ uint32_t whole_reg; ++ ++} desc_low_addr_reg; ++ ++typedef struct _dma_descriptor_ { /* 8 32-bit values */ ++ /* 0th u32 */ ++ uint32_t sdram_buff_addr:28; /* bits 0-27: SDRAM Address */ ++ uint32_t res0:4; /* bits 28-31: Reserved */ ++ ++ /* 1st u32 */ ++ uint32_t buff_addr_low; /* 1 buffer address low */ ++ uint32_t buff_addr_high; /* 2 buffer address high */ ++ ++ /* 3rd u32 */ ++ uint32_t res2:2; /* 0-1 - Reserved */ ++ uint32_t xfer_size:23; /* 2-24 = Xfer size in words */ ++ uint32_t res3:6; /* 25-30 reserved */ ++ uint32_t intr_enable:1; /* 31 - Interrupt After this desc */ ++ ++ /* 4th u32 */ ++ uint32_t endian_xlat_align:2; /* 0-1 Endian Translation */ ++ uint32_t next_desc_cont:1; /* 2 - Next desc is in contig memory */ ++ uint32_t res4:25; /* 3 - 27 Reserved bits */ ++ uint32_t fill_bytes:2; /* 28-29 Bits Fill Bytes */ ++ uint32_t dma_dir:1; /* 30 bit DMA Direction */ ++ uint32_t last_rec_indicator:1; /* 31 bit Last Record Indicator */ ++ ++ /* 5th u32 */ ++ uint32_t next_desc_addr_low; /* 32-bits Next Desc Addr lower */ ++ ++ /* 6th u32 */ ++ uint32_t next_desc_addr_high; /* 32-bits Next Desc Addr Higher */ ++ ++ /* 7th u32 */ ++ uint32_t res8; /* Last 32bits reserved */ ++ ++} dma_descriptor, *pdma_descriptor; ++ ++/* ++ * We will allocate the memory in 4K pages ++ * the linked list will be a list of 32 byte descriptors. ++ * The virtual address will determine what should be freed. ++ */ ++typedef struct _dma_desc_mem_ { ++ pdma_descriptor pdma_desc_start; /* 32-bytes for dma descriptor. should be first element */ ++ dma_addr_t phy_addr; /* physical address of each DMA desc */ ++ uint32_t sz; ++ struct _dma_desc_mem_ *Next; /* points to Next Descriptor in chain */ ++ ++} dma_desc_mem, *pdma_desc_mem; ++ ++ ++ ++typedef enum _list_sts_ { ++ sts_free = 0, ++ ++ /* RX-Y Bits 0:7 */ ++ rx_waiting_y_intr = 0x00000001, ++ rx_y_error = 0x00000004, ++ ++ /* RX-UV Bits 8:16 */ ++ rx_waiting_uv_intr = 0x0000100, ++ rx_uv_error = 0x0000400, ++ ++ rx_sts_waiting = (rx_waiting_y_intr|rx_waiting_uv_intr), ++ rx_sts_error = (rx_y_error|rx_uv_error), ++ ++ rx_y_mask = 0x000000FF, ++ rx_uv_mask = 0x0000FF00, ++ ++} list_sts; ++ ++typedef struct _tx_dma_pkt_ { ++ dma_desc_mem desc_mem; ++ hw_comp_callback call_back; ++ crystalhd_dio_req *dio_req; ++ wait_queue_head_t *cb_event; ++ uint32_t list_tag; ++ ++} tx_dma_pkt; ++ ++typedef struct _crystalhd_rx_dma_pkt { ++ dma_desc_mem desc_mem; ++ crystalhd_dio_req *dio_req; ++ uint32_t pkt_tag; ++ uint32_t flags; ++ BC_PIC_INFO_BLOCK pib; ++ dma_addr_t uv_phy_addr; ++ struct _crystalhd_rx_dma_pkt *next; ++ ++} crystalhd_rx_dma_pkt; ++ ++struct crystalhd_hw_stats{ ++ uint32_t rx_errors; ++ uint32_t tx_errors; ++ uint32_t freeq_count; ++ uint32_t rdyq_count; ++ uint32_t num_interrupts; ++ uint32_t dev_interrupts; ++ uint32_t cin_busy; ++ uint32_t pause_cnt; ++}; ++ ++struct crystalhd_hw { ++ tx_dma_pkt tx_pkt_pool[DMA_ENGINE_CNT]; ++ spinlock_t lock; ++ ++ uint32_t tx_ioq_tag_seed; ++ uint32_t tx_list_post_index; ++ ++ crystalhd_rx_dma_pkt *rx_pkt_pool_head; ++ uint32_t rx_pkt_tag_seed; ++ ++ bool dev_started; ++ void *adp; ++ ++ wait_queue_head_t *pfw_cmd_event; ++ int fwcmd_evt_sts; ++ ++ uint32_t pib_del_Q_addr; ++ uint32_t pib_rel_Q_addr; ++ ++ crystalhd_dioq_t *tx_freeq; ++ crystalhd_dioq_t *tx_actq; ++ ++ /* Rx DMA Engine Specific Locks */ ++ spinlock_t rx_lock; ++ uint32_t rx_list_post_index; ++ list_sts rx_list_sts[DMA_ENGINE_CNT]; ++ crystalhd_dioq_t *rx_rdyq; ++ crystalhd_dioq_t *rx_freeq; ++ crystalhd_dioq_t *rx_actq; ++ uint32_t stop_pending; ++ ++ /* HW counters.. */ ++ struct crystalhd_hw_stats stats; ++ ++ /* Core clock in MHz */ ++ uint32_t core_clock_mhz; ++ uint32_t prev_n; ++ uint32_t pwr_lock; ++}; ++ ++/* Clock defines for power control */ ++#define CLOCK_PRESET 175 ++ ++/* DMA engine register BIT mask wrappers.. */ ++#define DMA_START_BIT MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_RUN_STOP_MASK ++ ++#define GET_RX_INTR_MASK (INTR_INTR_STATUS_L1_UV_RX_DMA_ERR_INTR_MASK | \ ++ INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK | \ ++ INTR_INTR_STATUS_L1_Y_RX_DMA_ERR_INTR_MASK | \ ++ INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK | \ ++ INTR_INTR_STATUS_L0_UV_RX_DMA_ERR_INTR_MASK | \ ++ INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK | \ ++ INTR_INTR_STATUS_L0_Y_RX_DMA_ERR_INTR_MASK | \ ++ INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK) ++ ++#define GET_Y0_ERR_MSK (MISC1_Y_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_MASK | \ ++ MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK | \ ++ MISC1_Y_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_MASK | \ ++ MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) ++ ++#define GET_UV0_ERR_MSK (MISC1_UV_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_MASK | \ ++ MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK | \ ++ MISC1_UV_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_MASK | \ ++ MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) ++ ++#define GET_Y1_ERR_MSK (MISC1_Y_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_MASK | \ ++ MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK | \ ++ MISC1_Y_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_MASK | \ ++ MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) ++ ++#define GET_UV1_ERR_MSK (MISC1_UV_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_MASK | \ ++ MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK | \ ++ MISC1_UV_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_MASK | \ ++ MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) ++ ++ ++/**** API Exposed to the other layers ****/ ++BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, ++ void *buffer, uint32_t sz); ++BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw, BC_FW_CMD *fw_cmd); ++bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw); ++BC_STATUS crystalhd_hw_open(struct crystalhd_hw *, struct crystalhd_adp *); ++BC_STATUS crystalhd_hw_close(struct crystalhd_hw *); ++BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *); ++BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *); ++ ++ ++BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, crystalhd_dio_req *ioreq, ++ hw_comp_callback call_back, ++ wait_queue_head_t *cb_event, ++ uint32_t *list_id, uint8_t data_flags); ++ ++BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw); ++BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw); ++BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw); ++BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id); ++BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw, ++ crystalhd_dio_req *ioreq, bool en_post); ++BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw, ++ BC_PIC_INFO_BLOCK *pib, ++ crystalhd_dio_req **ioreq); ++BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw); ++BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw); ++void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stats); ++ ++/* API to program the core clock on the decoder */ ++BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *); ++ ++#endif +diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c +new file mode 100644 +index 0000000..1f36b4d +--- /dev/null ++++ b/drivers/staging/crystalhd/crystalhd_lnx.c +@@ -0,0 +1,780 @@ ++/*************************************************************************** ++ * Copyright (c) 2005-2009, Broadcom Corporation. ++ * ++ * Name: crystalhd_lnx . c ++ * ++ * Description: ++ * BCM70010 Linux driver ++ * ++ * HISTORY: ++ * ++ ********************************************************************** ++ * This file is part of the crystalhd device driver. ++ * ++ * This driver is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, version 2 of the License. ++ * ++ * This driver is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this driver. If not, see . ++ **********************************************************************/ ++ ++#include ++ ++#include "crystalhd_lnx.h" ++ ++static struct class *crystalhd_class; ++ ++static struct crystalhd_adp *g_adp_info; ++ ++static irqreturn_t chd_dec_isr(int irq, void *arg) ++{ ++ struct crystalhd_adp *adp = (struct crystalhd_adp *) arg; ++ int rc = 0; ++ if (adp) ++ rc = crystalhd_cmd_interrupt(&adp->cmds); ++ ++ return IRQ_RETVAL(rc); ++} ++ ++static int chd_dec_enable_int(struct crystalhd_adp *adp) ++{ ++ int rc = 0; ++ ++ if (!adp || !adp->pdev) { ++ BCMLOG_ERR("Invalid arg!!\n"); ++ return -EINVAL; ++ } ++ ++ if (adp->pdev->msi_enabled) ++ adp->msi = 1; ++ else ++ adp->msi = pci_enable_msi(adp->pdev); ++ ++ rc = request_irq(adp->pdev->irq, chd_dec_isr, IRQF_SHARED, ++ adp->name, (void *)adp); ++ if (rc) { ++ BCMLOG_ERR("Interrupt request failed.. \n"); ++ pci_disable_msi(adp->pdev); ++ } ++ ++ return rc; ++} ++ ++static int chd_dec_disable_int(struct crystalhd_adp *adp) ++{ ++ if (!adp || !adp->pdev) { ++ BCMLOG_ERR("Invalid arg!!\n"); ++ return -EINVAL; ++ } ++ ++ free_irq(adp->pdev->irq, adp); ++ ++ if (adp->msi) ++ pci_disable_msi(adp->pdev); ++ ++ return 0; ++} ++ ++crystalhd_ioctl_data *chd_dec_alloc_iodata(struct crystalhd_adp *adp, bool isr) ++{ ++ unsigned long flags = 0; ++ crystalhd_ioctl_data *temp; ++ ++ if (!adp) ++ return NULL; ++ ++ spin_lock_irqsave(&adp->lock, flags); ++ ++ temp = adp->idata_free_head; ++ if (temp) { ++ adp->idata_free_head = adp->idata_free_head->next; ++ memset(temp, 0, sizeof(*temp)); ++ } ++ ++ spin_unlock_irqrestore(&adp->lock, flags); ++ return temp; ++} ++ ++void chd_dec_free_iodata(struct crystalhd_adp *adp, crystalhd_ioctl_data *iodata, ++ bool isr) ++{ ++ unsigned long flags = 0; ++ ++ if (!adp || !iodata) ++ return; ++ ++ spin_lock_irqsave(&adp->lock, flags); ++ iodata->next = adp->idata_free_head; ++ adp->idata_free_head = iodata; ++ spin_unlock_irqrestore(&adp->lock, flags); ++} ++ ++static inline int crystalhd_user_data(unsigned long ud, void *dr, int size, int set) ++{ ++ int rc; ++ ++ if (!ud || !dr) { ++ BCMLOG_ERR("Invalid arg \n"); ++ return -EINVAL; ++ } ++ ++ if (set) ++ rc = copy_to_user((void *)ud, dr, size); ++ else ++ rc = copy_from_user(dr, (void *)ud, size); ++ ++ if (rc) { ++ BCMLOG_ERR("Invalid args for command \n"); ++ rc = -EFAULT; ++ } ++ ++ return rc; ++} ++ ++static int chd_dec_fetch_cdata(struct crystalhd_adp *adp, crystalhd_ioctl_data *io, ++ uint32_t m_sz, unsigned long ua) ++{ ++ unsigned long ua_off; ++ int rc = 0; ++ ++ if (!adp || !io || !ua || !m_sz) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return -EINVAL; ++ } ++ ++ io->add_cdata = vmalloc(m_sz); ++ if (!io->add_cdata) { ++ BCMLOG_ERR("kalloc fail for sz:%x\n", m_sz); ++ return -ENOMEM; ++ } ++ ++ io->add_cdata_sz = m_sz; ++ ua_off = ua + sizeof(io->udata); ++ rc = crystalhd_user_data(ua_off, io->add_cdata, io->add_cdata_sz, 0); ++ if (rc) { ++ BCMLOG_ERR("failed to pull add_cdata sz:%x ua_off:%x\n", ++ io->add_cdata_sz, (unsigned int)ua_off); ++ if (io->add_cdata) { ++ kfree(io->add_cdata); ++ io->add_cdata = NULL; ++ } ++ return -ENODATA; ++ } ++ ++ return rc; ++} ++ ++static int chd_dec_release_cdata(struct crystalhd_adp *adp, ++ crystalhd_ioctl_data *io, unsigned long ua) ++{ ++ unsigned long ua_off; ++ int rc; ++ ++ if (!adp || !io || !ua) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return -EINVAL; ++ } ++ ++ if (io->cmd != BCM_IOC_FW_DOWNLOAD) { ++ ua_off = ua + sizeof(io->udata); ++ rc = crystalhd_user_data(ua_off, io->add_cdata, ++ io->add_cdata_sz, 1); ++ if (rc) { ++ BCMLOG_ERR("failed to push add_cdata sz:%x ua_off:%x\n", ++ io->add_cdata_sz, (unsigned int)ua_off); ++ return -ENODATA; ++ } ++ } ++ ++ if (io->add_cdata) { ++ vfree(io->add_cdata); ++ io->add_cdata = NULL; ++ } ++ ++ return 0; ++} ++ ++static int chd_dec_proc_user_data(struct crystalhd_adp *adp, ++ crystalhd_ioctl_data *io, ++ unsigned long ua, int set) ++{ ++ int rc; ++ uint32_t m_sz = 0; ++ ++ if (!adp || !io || !ua) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return -EINVAL; ++ } ++ ++ rc = crystalhd_user_data(ua, &io->udata, sizeof(io->udata), set); ++ if (rc) { ++ BCMLOG_ERR("failed to %s iodata \n", (set ? "set" : "get")); ++ return rc; ++ } ++ ++ switch (io->cmd) { ++ case BCM_IOC_MEM_RD: ++ case BCM_IOC_MEM_WR: ++ case BCM_IOC_FW_DOWNLOAD: ++ m_sz = io->udata.u.devMem.NumDwords * 4; ++ if (set) ++ rc = chd_dec_release_cdata(adp, io, ua); ++ else ++ rc = chd_dec_fetch_cdata(adp, io, m_sz, ua); ++ break; ++ default: ++ break; ++ } ++ ++ return rc; ++} ++ ++static int chd_dec_api_cmd(struct crystalhd_adp *adp, unsigned long ua, ++ uint32_t uid, uint32_t cmd, crystalhd_cmd_proc func) ++{ ++ int rc; ++ crystalhd_ioctl_data *temp; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ temp = chd_dec_alloc_iodata(adp, 0); ++ if (!temp) { ++ BCMLOG_ERR("Failed to get iodata..\n"); ++ return -EINVAL; ++ } ++ ++ temp->u_id = uid; ++ temp->cmd = cmd; ++ ++ rc = chd_dec_proc_user_data(adp, temp, ua, 0); ++ if (!rc) { ++ sts = func(&adp->cmds, temp); ++ if (sts == BC_STS_PENDING) ++ sts = BC_STS_NOT_IMPL; ++ temp->udata.RetSts = sts; ++ rc = chd_dec_proc_user_data(adp, temp, ua, 1); ++ } ++ ++ if (temp) { ++ chd_dec_free_iodata(adp, temp, 0); ++ temp = NULL; ++ } ++ ++ return rc; ++} ++ ++/* ========================= API interfaces =================================*/ ++static int chd_dec_ioctl(struct inode *in, struct file *fd, ++ unsigned int cmd, unsigned long ua) ++{ ++ struct crystalhd_adp *adp = chd_get_adp(); ++ crystalhd_cmd_proc cproc; ++ struct crystalhd_user *uc; ++ ++ if (!adp || !fd) { ++ BCMLOG_ERR("Invalid adp\n"); ++ return -EINVAL; ++ } ++ ++ uc = (struct crystalhd_user *)fd->private_data; ++ if (!uc) { ++ BCMLOG_ERR("Failed to get uc\n"); ++ return -ENODATA; ++ } ++ ++ cproc = crystalhd_get_cmd_proc(&adp->cmds, cmd, uc); ++ if (!cproc) { ++ BCMLOG_ERR("Unhandled command: %d\n", cmd); ++ return -EINVAL; ++ } ++ ++ return chd_dec_api_cmd(adp, ua, uc->uid, cmd, cproc); ++} ++ ++static int chd_dec_open(struct inode *in, struct file *fd) ++{ ++ struct crystalhd_adp *adp = chd_get_adp(); ++ int rc = 0; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ struct crystalhd_user *uc = NULL; ++ ++ BCMLOG_ENTER; ++ if (!adp) { ++ BCMLOG_ERR("Invalid adp\n"); ++ return -EINVAL; ++ } ++ ++ if (adp->cfg_users >= BC_LINK_MAX_OPENS) { ++ BCMLOG(BCMLOG_INFO, "Already in use.%d\n", adp->cfg_users); ++ return -EBUSY; ++ } ++ ++ sts = crystalhd_user_open(&adp->cmds, &uc); ++ if (sts != BC_STS_SUCCESS) { ++ BCMLOG_ERR("cmd_user_open - %d \n", sts); ++ rc = -EBUSY; ++ } ++ ++ adp->cfg_users++; ++ ++ fd->private_data = uc; ++ ++ return rc; ++} ++ ++static int chd_dec_close(struct inode *in, struct file *fd) ++{ ++ struct crystalhd_adp *adp = chd_get_adp(); ++ struct crystalhd_user *uc; ++ ++ BCMLOG_ENTER; ++ if (!adp) { ++ BCMLOG_ERR("Invalid adp \n"); ++ return -EINVAL; ++ } ++ ++ uc = (struct crystalhd_user *)fd->private_data; ++ if (!uc) { ++ BCMLOG_ERR("Failed to get uc\n"); ++ return -ENODATA; ++ } ++ ++ crystalhd_user_close(&adp->cmds, uc); ++ ++ adp->cfg_users--; ++ ++ return 0; ++} ++ ++static const struct file_operations chd_dec_fops = { ++ .owner = THIS_MODULE, ++ .ioctl = chd_dec_ioctl, ++ .open = chd_dec_open, ++ .release = chd_dec_close, ++}; ++ ++static int chd_dec_init_chdev(struct crystalhd_adp *adp) ++{ ++ crystalhd_ioctl_data *temp; ++ struct device *dev; ++ int rc = -ENODEV, i = 0; ++ ++ if (!adp) ++ goto fail; ++ ++ adp->chd_dec_major = register_chrdev(0, CRYSTALHD_API_NAME, ++ &chd_dec_fops); ++ if (adp->chd_dec_major < 0) { ++ BCMLOG_ERR("Failed to create config dev\n"); ++ rc = adp->chd_dec_major; ++ goto fail; ++ } ++ ++ /* register crystalhd class */ ++ crystalhd_class = class_create(THIS_MODULE, "crystalhd"); ++ if (IS_ERR(crystalhd_class)) { ++ BCMLOG_ERR("failed to create class\n"); ++ goto fail; ++ } ++ ++ dev = device_create(crystalhd_class, NULL, MKDEV(adp->chd_dec_major, 0), ++ NULL, "crystalhd"); ++ if (!dev) { ++ BCMLOG_ERR("failed to create device\n"); ++ goto device_create_fail; ++ } ++ ++ rc = crystalhd_create_elem_pool(adp, BC_LINK_ELEM_POOL_SZ); ++ if (rc) { ++ BCMLOG_ERR("failed to create device\n"); ++ goto elem_pool_fail; ++ } ++ ++ /* Allocate general purpose ioctl pool. */ ++ for (i = 0; i < CHD_IODATA_POOL_SZ; i++) { ++ /* FIXME: jarod: why atomic? */ ++ temp = kzalloc(sizeof(crystalhd_ioctl_data), GFP_ATOMIC); ++ if (!temp) { ++ BCMLOG_ERR("ioctl data pool kzalloc failed\n"); ++ rc = -ENOMEM; ++ goto kzalloc_fail; ++ } ++ /* Add to global pool.. */ ++ chd_dec_free_iodata(adp, temp, 0); ++ } ++ ++ return 0; ++ ++kzalloc_fail: ++ crystalhd_delete_elem_pool(adp); ++elem_pool_fail: ++ device_destroy(crystalhd_class, MKDEV(adp->chd_dec_major, 0)); ++device_create_fail: ++ class_destroy(crystalhd_class); ++fail: ++ return rc; ++} ++ ++static void chd_dec_release_chdev(struct crystalhd_adp *adp) ++{ ++ crystalhd_ioctl_data *temp = NULL; ++ if (!adp) ++ return; ++ ++ if (adp->chd_dec_major > 0) { ++ /* unregister crystalhd class */ ++ device_destroy(crystalhd_class, MKDEV(adp->chd_dec_major, 0)); ++ unregister_chrdev(adp->chd_dec_major, CRYSTALHD_API_NAME); ++ BCMLOG(BCMLOG_INFO, "released api device - %d\n", ++ adp->chd_dec_major); ++ class_destroy(crystalhd_class); ++ } ++ adp->chd_dec_major = 0; ++ ++ /* Clear iodata pool.. */ ++ do { ++ temp = chd_dec_alloc_iodata(adp, 0); ++ if (temp) ++ kfree(temp); ++ } while (temp); ++ ++ crystalhd_delete_elem_pool(adp); ++} ++ ++static int chd_pci_reserve_mem(struct crystalhd_adp *pinfo) ++{ ++ int rc; ++ unsigned long bar2 = pci_resource_start(pinfo->pdev, 2); ++ uint32_t mem_len = pci_resource_len(pinfo->pdev, 2); ++ unsigned long bar0 = pci_resource_start(pinfo->pdev, 0); ++ uint32_t i2o_len = pci_resource_len(pinfo->pdev, 0); ++ ++ BCMLOG(BCMLOG_SSTEP, "bar2:0x%lx-0x%08x bar0:0x%lx-0x%08x\n", ++ bar2, mem_len, bar0, i2o_len); ++ ++ rc = check_mem_region(bar2, mem_len); ++ if (rc) { ++ BCMLOG_ERR("No valid mem region...\n"); ++ return -ENOMEM; ++ } ++ ++ pinfo->addr = ioremap_nocache(bar2, mem_len); ++ if (!pinfo->addr) { ++ BCMLOG_ERR("Failed to remap mem region...\n"); ++ return -ENOMEM; ++ } ++ ++ pinfo->pci_mem_start = bar2; ++ pinfo->pci_mem_len = mem_len; ++ ++ rc = check_mem_region(bar0, i2o_len); ++ if (rc) { ++ BCMLOG_ERR("No valid mem region...\n"); ++ return -ENOMEM; ++ } ++ ++ pinfo->i2o_addr = ioremap_nocache(bar0, i2o_len); ++ if (!pinfo->i2o_addr) { ++ BCMLOG_ERR("Failed to remap mem region...\n"); ++ return -ENOMEM; ++ } ++ ++ pinfo->pci_i2o_start = bar0; ++ pinfo->pci_i2o_len = i2o_len; ++ ++ rc = pci_request_regions(pinfo->pdev, pinfo->name); ++ if (rc < 0) { ++ BCMLOG_ERR("Region request failed: %d\n", rc); ++ return rc; ++ } ++ ++ BCMLOG(BCMLOG_SSTEP, "Mapped addr:0x%08lx i2o_addr:0x%08lx\n", ++ (unsigned long)pinfo->addr, (unsigned long)pinfo->i2o_addr); ++ ++ return 0; ++} ++ ++static void chd_pci_release_mem(struct crystalhd_adp *pinfo) ++{ ++ if (!pinfo) ++ return; ++ ++ if (pinfo->addr) ++ iounmap(pinfo->addr); ++ ++ if (pinfo->i2o_addr) ++ iounmap(pinfo->i2o_addr); ++ ++ pci_release_regions(pinfo->pdev); ++} ++ ++ ++static void chd_dec_pci_remove(struct pci_dev *pdev) ++{ ++ struct crystalhd_adp *pinfo; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ BCMLOG_ENTER; ++ ++ pinfo = (struct crystalhd_adp *) pci_get_drvdata(pdev); ++ if (!pinfo) { ++ BCMLOG_ERR("could not get adp\n"); ++ return; ++ } ++ ++ sts = crystalhd_delete_cmd_context(&pinfo->cmds); ++ if (sts != BC_STS_SUCCESS) ++ BCMLOG_ERR("cmd delete :%d \n", sts); ++ ++ chd_dec_release_chdev(pinfo); ++ ++ chd_dec_disable_int(pinfo); ++ ++ chd_pci_release_mem(pinfo); ++ pci_disable_device(pinfo->pdev); ++ ++ kfree(pinfo); ++ g_adp_info = NULL; ++} ++ ++static int chd_dec_pci_probe(struct pci_dev *pdev, ++ const struct pci_device_id *entry) ++{ ++ struct crystalhd_adp *pinfo; ++ int rc; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ BCMLOG(BCMLOG_DBG, "PCI_INFO: Vendor:0x%04x Device:0x%04x " ++ "s_vendor:0x%04x s_device: 0x%04x\n", ++ pdev->vendor, pdev->device, pdev->subsystem_vendor, ++ pdev->subsystem_device); ++ ++ /* FIXME: jarod: why atomic? */ ++ pinfo = kzalloc(sizeof(struct crystalhd_adp), GFP_ATOMIC); ++ if (!pinfo) { ++ BCMLOG_ERR("Failed to allocate memory\n"); ++ return -ENOMEM; ++ } ++ ++ pinfo->pdev = pdev; ++ ++ rc = pci_enable_device(pdev); ++ if (rc) { ++ BCMLOG_ERR("Failed to enable PCI device\n"); ++ return rc; ++ } ++ ++ snprintf(pinfo->name, 31, "crystalhd_pci_e:%d:%d:%d", ++ pdev->bus->number, PCI_SLOT(pdev->devfn), ++ PCI_FUNC(pdev->devfn)); ++ ++ rc = chd_pci_reserve_mem(pinfo); ++ if (rc) { ++ BCMLOG_ERR("Failed to setup memory regions.\n"); ++ return -ENOMEM; ++ } ++ ++ pinfo->present = 1; ++ pinfo->drv_data = entry->driver_data; ++ ++ /* Setup adapter level lock.. */ ++ spin_lock_init(&pinfo->lock); ++ ++ /* setup api stuff.. */ ++ chd_dec_init_chdev(pinfo); ++ rc = chd_dec_enable_int(pinfo); ++ if (rc) { ++ BCMLOG_ERR("_enable_int err:%d \n", rc); ++ pci_disable_device(pdev); ++ return -ENODEV; ++ } ++ ++ /* Set dma mask... */ ++ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { ++ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); ++ pinfo->dmabits = 64; ++ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { ++ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); ++ pinfo->dmabits = 32; ++ } else { ++ BCMLOG_ERR("Unabled to setup DMA %d\n", rc); ++ pci_disable_device(pdev); ++ return -ENODEV; ++ } ++ ++ sts = crystalhd_setup_cmd_context(&pinfo->cmds, pinfo); ++ if (sts != BC_STS_SUCCESS) { ++ BCMLOG_ERR("cmd setup :%d \n", sts); ++ pci_disable_device(pdev); ++ return -ENODEV; ++ } ++ ++ pci_set_master(pdev); ++ ++ pci_set_drvdata(pdev, pinfo); ++ ++ g_adp_info = pinfo; ++ ++ return 0; ++ ++} ++ ++#ifdef CONFIG_PM ++int chd_dec_pci_suspend(struct pci_dev *pdev, pm_message_t state) ++{ ++ struct crystalhd_adp *adp; ++ crystalhd_ioctl_data *temp; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ ++ adp = (struct crystalhd_adp *)pci_get_drvdata(pdev); ++ if (!adp) { ++ BCMLOG_ERR("could not get adp\n"); ++ return -ENODEV; ++ } ++ ++ temp = chd_dec_alloc_iodata(adp, false); ++ if (!temp) { ++ BCMLOG_ERR("could not get ioctl data\n"); ++ return -ENODEV; ++ } ++ ++ sts = crystalhd_suspend(&adp->cmds, temp); ++ if (sts != BC_STS_SUCCESS) { ++ BCMLOG_ERR("BCM70012 Suspend %d\n", sts); ++ return -ENODEV; ++ } ++ ++ chd_dec_free_iodata(adp, temp, false); ++ chd_dec_disable_int(adp); ++ pci_save_state(pdev); ++ ++ /* Disable IO/bus master/irq router */ ++ pci_disable_device(pdev); ++ pci_set_power_state(pdev, pci_choose_state(pdev, state)); ++ return 0; ++} ++ ++int chd_dec_pci_resume(struct pci_dev *pdev) ++{ ++ struct crystalhd_adp *adp; ++ BC_STATUS sts = BC_STS_SUCCESS; ++ int rc; ++ ++ adp = (struct crystalhd_adp *)pci_get_drvdata(pdev); ++ if (!adp) { ++ BCMLOG_ERR("could not get adp\n"); ++ return -ENODEV; ++ } ++ ++ pci_set_power_state(pdev, PCI_D0); ++ pci_restore_state(pdev); ++ ++ /* device's irq possibly is changed, driver should take care */ ++ if (pci_enable_device(pdev)) { ++ BCMLOG_ERR("Failed to enable PCI device\n"); ++ return 1; ++ } ++ ++ pci_set_master(pdev); ++ ++ rc = chd_dec_enable_int(adp); ++ if (rc) { ++ BCMLOG_ERR("_enable_int err:%d \n", rc); ++ pci_disable_device(pdev); ++ return -ENODEV; ++ } ++ ++ sts = crystalhd_resume(&adp->cmds); ++ if (sts != BC_STS_SUCCESS) { ++ BCMLOG_ERR("BCM70012 Resume %d\n", sts); ++ pci_disable_device(pdev); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++#endif ++ ++static struct pci_device_id chd_dec_pci_id_table[] = { ++/* vendor, device, subvendor, subdevice, class, classmask, driver_data */ ++ { 0x14e4, 0x1612, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, ++ { 0, }, ++}; ++ ++struct pci_driver bc_chd_70012_driver = { ++ .name = "Broadcom 70012 Decoder", ++ .probe = chd_dec_pci_probe, ++ .remove = chd_dec_pci_remove, ++ .id_table = chd_dec_pci_id_table, ++#ifdef CONFIG_PM ++ .suspend = chd_dec_pci_suspend, ++ .resume = chd_dec_pci_resume ++#endif ++}; ++MODULE_DEVICE_TABLE(pci, chd_dec_pci_id_table); ++ ++ ++void chd_set_log_level(struct crystalhd_adp *adp, char *arg) ++{ ++ if ((!arg) || (strlen(arg) < 3)) ++ g_linklog_level = BCMLOG_ERROR | BCMLOG_DATA; ++ else if (!strncmp(arg, "sstep", 5)) ++ g_linklog_level = BCMLOG_INFO | BCMLOG_DATA | BCMLOG_DBG | ++ BCMLOG_SSTEP | BCMLOG_ERROR; ++ else if (!strncmp(arg, "info", 4)) ++ g_linklog_level = BCMLOG_ERROR | BCMLOG_DATA | BCMLOG_INFO; ++ else if (!strncmp(arg, "debug", 5)) ++ g_linklog_level = BCMLOG_ERROR | BCMLOG_DATA | BCMLOG_INFO | ++ BCMLOG_DBG; ++ else if (!strncmp(arg, "pball", 5)) ++ g_linklog_level = 0xFFFFFFFF & ~(BCMLOG_SPINLOCK); ++ else if (!strncmp(arg, "silent", 6)) ++ g_linklog_level = 0; ++ else ++ g_linklog_level = 0; ++} ++ ++struct crystalhd_adp *chd_get_adp(void) ++{ ++ return g_adp_info; ++} ++ ++int __init chd_dec_module_init(void) ++{ ++ int rc; ++ ++ chd_set_log_level(NULL, "debug"); ++ BCMLOG(BCMLOG_DATA, "Loading crystalhd %d.%d.%d \n", ++ crystalhd_kmod_major, crystalhd_kmod_minor, crystalhd_kmod_rev); ++ ++ rc = pci_register_driver(&bc_chd_70012_driver); ++ ++ if (rc < 0) ++ BCMLOG_ERR("Could not find any devices. err:%d \n", rc); ++ ++ return rc; ++} ++ ++void __exit chd_dec_module_cleanup(void) ++{ ++ BCMLOG(BCMLOG_DATA, "unloading crystalhd %d.%d.%d \n", ++ crystalhd_kmod_major, crystalhd_kmod_minor, crystalhd_kmod_rev); ++ ++ pci_unregister_driver(&bc_chd_70012_driver); ++} ++ ++ ++MODULE_AUTHOR("Naren Sankar "); ++MODULE_AUTHOR("Prasad Bolisetty "); ++MODULE_DESCRIPTION(CRYSTAL_HD_NAME); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("bcm70012"); ++ ++module_init(chd_dec_module_init); ++module_exit(chd_dec_module_cleanup); ++ +diff --git a/drivers/staging/crystalhd/crystalhd_lnx.h b/drivers/staging/crystalhd/crystalhd_lnx.h +new file mode 100644 +index 0000000..d338ae9 +--- /dev/null ++++ b/drivers/staging/crystalhd/crystalhd_lnx.h +@@ -0,0 +1,96 @@ ++/*************************************************************************** ++ * Copyright (c) 2005-2009, Broadcom Corporation. ++ * ++ * Name: crystalhd_lnx . c ++ * ++ * Description: ++ * BCM70012 Linux driver ++ * ++ * HISTORY: ++ * ++ ********************************************************************** ++ * This file is part of the crystalhd device driver. ++ * ++ * This driver is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, version 2 of the License. ++ * ++ * This driver is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this driver. If not, see . ++ **********************************************************************/ ++ ++#ifndef _CRYSTALHD_LNX_H_ ++#define _CRYSTALHD_LNX_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "crystalhd_cmds.h" ++ ++#define CRYSTAL_HD_NAME "Broadcom Crystal HD Decoder (BCM70012) Driver" ++ ++ ++/* OS specific PCI information structure and adapter information. */ ++struct crystalhd_adp { ++ /* Hardware borad/PCI specifics */ ++ char name[32]; ++ struct pci_dev *pdev; ++ ++ unsigned long pci_mem_start; ++ uint32_t pci_mem_len; ++ void *addr; ++ ++ unsigned long pci_i2o_start; ++ uint32_t pci_i2o_len; ++ void *i2o_addr; ++ ++ unsigned int drv_data; ++ unsigned int dmabits; /* 32 | 64 */ ++ unsigned int registered; ++ unsigned int present; ++ unsigned int msi; ++ ++ spinlock_t lock; ++ ++ /* API Related */ ++ unsigned int chd_dec_major; ++ unsigned int cfg_users; ++ ++ crystalhd_ioctl_data *idata_free_head; /* ioctl data pool */ ++ crystalhd_elem_t *elem_pool_head; /* Queue element pool */ ++ ++ struct crystalhd_cmd cmds; ++ ++ crystalhd_dio_req *ua_map_free_head; ++ struct pci_pool *fill_byte_pool; ++}; ++ ++ ++struct crystalhd_adp *chd_get_adp(void); ++void chd_set_log_level(struct crystalhd_adp *adp, char *arg); ++ ++#endif ++ +diff --git a/drivers/staging/crystalhd/crystalhd_misc.c b/drivers/staging/crystalhd/crystalhd_misc.c +new file mode 100644 +index 0000000..32e632c +--- /dev/null ++++ b/drivers/staging/crystalhd/crystalhd_misc.c +@@ -0,0 +1,1029 @@ ++/*************************************************************************** ++ * Copyright (c) 2005-2009, Broadcom Corporation. ++ * ++ * Name: crystalhd_misc . c ++ * ++ * Description: ++ * BCM70012 Linux driver misc routines. ++ * ++ * HISTORY: ++ * ++ ********************************************************************** ++ * This file is part of the crystalhd device driver. ++ * ++ * This driver is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, version 2 of the License. ++ * ++ * This driver is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this driver. If not, see . ++ **********************************************************************/ ++ ++#include "crystalhd_misc.h" ++#include "crystalhd_lnx.h" ++ ++uint32_t g_linklog_level; ++ ++static inline uint32_t crystalhd_dram_rd(struct crystalhd_adp *adp, uint32_t mem_off) ++{ ++ crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (mem_off >> 19)); ++ return bc_dec_reg_rd(adp, (0x00380000 | (mem_off & 0x0007FFFF))); ++} ++ ++static inline void crystalhd_dram_wr(struct crystalhd_adp *adp, uint32_t mem_off, uint32_t val) ++{ ++ crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (mem_off >> 19)); ++ bc_dec_reg_wr(adp, (0x00380000 | (mem_off & 0x0007FFFF)), val); ++} ++ ++static inline BC_STATUS bc_chk_dram_range(struct crystalhd_adp *adp, uint32_t start_off, uint32_t cnt) ++{ ++ return BC_STS_SUCCESS; ++} ++ ++static crystalhd_dio_req *crystalhd_alloc_dio(struct crystalhd_adp *adp) ++{ ++ unsigned long flags = 0; ++ crystalhd_dio_req *temp = NULL; ++ ++ if (!adp) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return temp; ++ } ++ ++ spin_lock_irqsave(&adp->lock, flags); ++ temp = adp->ua_map_free_head; ++ if (temp) ++ adp->ua_map_free_head = adp->ua_map_free_head->next; ++ spin_unlock_irqrestore(&adp->lock, flags); ++ ++ return temp; ++} ++ ++static void crystalhd_free_dio(struct crystalhd_adp *adp, crystalhd_dio_req *dio) ++{ ++ unsigned long flags = 0; ++ ++ if (!adp || !dio) ++ return; ++ spin_lock_irqsave(&adp->lock, flags); ++ dio->sig = crystalhd_dio_inv; ++ dio->page_cnt = 0; ++ dio->fb_size = 0; ++ memset(&dio->uinfo, 0, sizeof(dio->uinfo)); ++ dio->next = adp->ua_map_free_head; ++ adp->ua_map_free_head = dio; ++ spin_unlock_irqrestore(&adp->lock, flags); ++} ++ ++static crystalhd_elem_t *crystalhd_alloc_elem(struct crystalhd_adp *adp) ++{ ++ unsigned long flags = 0; ++ crystalhd_elem_t *temp = NULL; ++ ++ if (!adp) ++ return temp; ++ spin_lock_irqsave(&adp->lock, flags); ++ temp = adp->elem_pool_head; ++ if (temp) { ++ adp->elem_pool_head = adp->elem_pool_head->flink; ++ memset(temp, 0, sizeof(*temp)); ++ } ++ spin_unlock_irqrestore(&adp->lock, flags); ++ ++ return temp; ++} ++static void crystalhd_free_elem(struct crystalhd_adp *adp, crystalhd_elem_t *elem) ++{ ++ unsigned long flags = 0; ++ ++ if (!adp || !elem) ++ return; ++ spin_lock_irqsave(&adp->lock, flags); ++ elem->flink = adp->elem_pool_head; ++ adp->elem_pool_head = elem; ++ spin_unlock_irqrestore(&adp->lock, flags); ++} ++ ++static inline void crystalhd_set_sg(struct scatterlist *sg, struct page *page, ++ unsigned int len, unsigned int offset) ++{ ++ sg_set_page(sg, page, len, offset); ++#ifdef CONFIG_X86_64 ++ sg->dma_length = len; ++#endif ++} ++ ++static inline void crystalhd_init_sg(struct scatterlist *sg, unsigned int entries) ++{ ++ /* http://lkml.org/lkml/2007/11/27/68 */ ++ sg_init_table(sg, entries); ++} ++ ++/*========================== Extern ========================================*/ ++/** ++ * bc_dec_reg_rd - Read 7412's device register. ++ * @adp: Adapter instance ++ * @reg_off: Register offset. ++ * ++ * Return: ++ * 32bit value read ++ * ++ * 7412's device register read routine. This interface use ++ * 7412's device access range mapped from BAR-2 (4M) of PCIe ++ * configuration space. ++ */ ++uint32_t bc_dec_reg_rd(struct crystalhd_adp *adp, uint32_t reg_off) ++{ ++ if (!adp || (reg_off > adp->pci_mem_len)) { ++ BCMLOG_ERR("dec_rd_reg_off outof range: 0x%08x\n", reg_off); ++ return 0; ++ } ++ ++ return readl(adp->addr + reg_off); ++} ++ ++/** ++ * bc_dec_reg_wr - Write 7412's device register ++ * @adp: Adapter instance ++ * @reg_off: Register offset. ++ * @val: Dword value to be written. ++ * ++ * Return: ++ * none. ++ * ++ * 7412's device register write routine. This interface use ++ * 7412's device access range mapped from BAR-2 (4M) of PCIe ++ * configuration space. ++ */ ++void bc_dec_reg_wr(struct crystalhd_adp *adp, uint32_t reg_off, uint32_t val) ++{ ++ if (!adp || (reg_off > adp->pci_mem_len)) { ++ BCMLOG_ERR("dec_wr_reg_off outof range: 0x%08x\n", reg_off); ++ return; ++ } ++ writel(val, adp->addr + reg_off); ++ udelay(8); ++} ++ ++/** ++ * crystalhd_reg_rd - Read Link's device register. ++ * @adp: Adapter instance ++ * @reg_off: Register offset. ++ * ++ * Return: ++ * 32bit value read ++ * ++ * Link device register read routine. This interface use ++ * Link's device access range mapped from BAR-1 (64K) of PCIe ++ * configuration space. ++ * ++ */ ++uint32_t crystalhd_reg_rd(struct crystalhd_adp *adp, uint32_t reg_off) ++{ ++ if (!adp || (reg_off > adp->pci_i2o_len)) { ++ BCMLOG_ERR("link_rd_reg_off outof range: 0x%08x\n", reg_off); ++ return 0; ++ } ++ return readl(adp->i2o_addr + reg_off); ++} ++ ++/** ++ * crystalhd_reg_wr - Write Link's device register ++ * @adp: Adapter instance ++ * @reg_off: Register offset. ++ * @val: Dword value to be written. ++ * ++ * Return: ++ * none. ++ * ++ * Link device register write routine. This interface use ++ * Link's device access range mapped from BAR-1 (64K) of PCIe ++ * configuration space. ++ * ++ */ ++void crystalhd_reg_wr(struct crystalhd_adp *adp, uint32_t reg_off, uint32_t val) ++{ ++ if (!adp || (reg_off > adp->pci_i2o_len)) { ++ BCMLOG_ERR("link_wr_reg_off outof range: 0x%08x\n", reg_off); ++ return; ++ } ++ writel(val, adp->i2o_addr + reg_off); ++} ++ ++/** ++ * crystalhd_mem_rd - Read data from 7412's DRAM area. ++ * @adp: Adapter instance ++ * @start_off: Start offset. ++ * @dw_cnt: Count in dwords. ++ * @rd_buff: Buffer to copy the data from dram. ++ * ++ * Return: ++ * Status. ++ * ++ * 7412's Dram read routine. ++ */ ++BC_STATUS crystalhd_mem_rd(struct crystalhd_adp *adp, uint32_t start_off, ++ uint32_t dw_cnt, uint32_t *rd_buff) ++{ ++ uint32_t ix = 0; ++ ++ if (!adp || !rd_buff || ++ (bc_chk_dram_range(adp, start_off, dw_cnt) != BC_STS_SUCCESS)) { ++ BCMLOG_ERR("Invalid arg \n"); ++ return BC_STS_INV_ARG; ++ } ++ for (ix = 0; ix < dw_cnt; ix++) ++ rd_buff[ix] = crystalhd_dram_rd(adp, (start_off + (ix * 4))); ++ ++ return BC_STS_SUCCESS; ++} ++ ++/** ++ * crystalhd_mem_wr - Write data to 7412's DRAM area. ++ * @adp: Adapter instance ++ * @start_off: Start offset. ++ * @dw_cnt: Count in dwords. ++ * @wr_buff: Data Buffer to be written. ++ * ++ * Return: ++ * Status. ++ * ++ * 7412's Dram write routine. ++ */ ++BC_STATUS crystalhd_mem_wr(struct crystalhd_adp *adp, uint32_t start_off, ++ uint32_t dw_cnt, uint32_t *wr_buff) ++{ ++ uint32_t ix = 0; ++ ++ if (!adp || !wr_buff || ++ (bc_chk_dram_range(adp, start_off, dw_cnt) != BC_STS_SUCCESS)) { ++ BCMLOG_ERR("Invalid arg \n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ for (ix = 0; ix < dw_cnt; ix++) ++ crystalhd_dram_wr(adp, (start_off + (ix * 4)), wr_buff[ix]); ++ ++ return BC_STS_SUCCESS; ++} ++/** ++ * crystalhd_pci_cfg_rd - PCIe config read ++ * @adp: Adapter instance ++ * @off: PCI config space offset. ++ * @len: Size -- Byte, Word & dword. ++ * @val: Value read ++ * ++ * Return: ++ * Status. ++ * ++ * Get value from Link's PCIe config space. ++ */ ++BC_STATUS crystalhd_pci_cfg_rd(struct crystalhd_adp *adp, uint32_t off, ++ uint32_t len, uint32_t *val) ++{ ++ BC_STATUS sts = BC_STS_SUCCESS; ++ int rc = 0; ++ ++ if (!adp || !val) { ++ BCMLOG_ERR("Invalid arg \n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ switch (len) { ++ case 1: ++ rc = pci_read_config_byte(adp->pdev, off, (u8 *)val); ++ break; ++ case 2: ++ rc = pci_read_config_word(adp->pdev, off, (u16 *)val); ++ break; ++ case 4: ++ rc = pci_read_config_dword(adp->pdev, off, (u32 *)val); ++ break; ++ default: ++ rc = -EINVAL; ++ sts = BC_STS_INV_ARG; ++ BCMLOG_ERR("Invalid len:%d\n", len); ++ }; ++ ++ if (rc && (sts == BC_STS_SUCCESS)) ++ sts = BC_STS_ERROR; ++ ++ return sts; ++} ++ ++/** ++ * crystalhd_pci_cfg_wr - PCIe config write ++ * @adp: Adapter instance ++ * @off: PCI config space offset. ++ * @len: Size -- Byte, Word & dword. ++ * @val: Value to be written ++ * ++ * Return: ++ * Status. ++ * ++ * Set value to Link's PCIe config space. ++ */ ++BC_STATUS crystalhd_pci_cfg_wr(struct crystalhd_adp *adp, uint32_t off, ++ uint32_t len, uint32_t val) ++{ ++ BC_STATUS sts = BC_STS_SUCCESS; ++ int rc = 0; ++ ++ if (!adp || !val) { ++ BCMLOG_ERR("Invalid arg \n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ switch (len) { ++ case 1: ++ rc = pci_write_config_byte(adp->pdev, off, (u8)val); ++ break; ++ case 2: ++ rc = pci_write_config_word(adp->pdev, off, (u16)val); ++ break; ++ case 4: ++ rc = pci_write_config_dword(adp->pdev, off, val); ++ break; ++ default: ++ rc = -EINVAL; ++ sts = BC_STS_INV_ARG; ++ BCMLOG_ERR("Invalid len:%d\n", len); ++ }; ++ ++ if (rc && (sts == BC_STS_SUCCESS)) ++ sts = BC_STS_ERROR; ++ ++ return sts; ++} ++ ++/** ++ * bc_kern_dma_alloc - Allocate memory for Dma rings ++ * @adp: Adapter instance ++ * @sz: Size of the memory to allocate. ++ * @phy_addr: Physical address of the memory allocated. ++ * Typedef to system's dma_addr_t (u64) ++ * ++ * Return: ++ * Pointer to allocated memory.. ++ * ++ * Wrapper to Linux kernel interface. ++ * ++ */ ++void *bc_kern_dma_alloc(struct crystalhd_adp *adp, uint32_t sz, ++ dma_addr_t *phy_addr) ++{ ++ void *temp = NULL; ++ ++ if (!adp || !sz || !phy_addr) { ++ BCMLOG_ERR("Invalide Arg..\n"); ++ return temp; ++ } ++ ++ temp = pci_alloc_consistent(adp->pdev, sz, phy_addr); ++ if (temp) ++ memset(temp, 0, sz); ++ ++ return temp; ++} ++ ++/** ++ * bc_kern_dma_free - Release Dma ring memory. ++ * @adp: Adapter instance ++ * @sz: Size of the memory to allocate. ++ * @ka: Kernel virtual address returned during _dio_alloc() ++ * @phy_addr: Physical address of the memory allocated. ++ * Typedef to system's dma_addr_t (u64) ++ * ++ * Return: ++ * none. ++ */ ++void bc_kern_dma_free(struct crystalhd_adp *adp, uint32_t sz, void *ka, ++ dma_addr_t phy_addr) ++{ ++ if (!adp || !ka || !sz || !phy_addr) { ++ BCMLOG_ERR("Invalide Arg..\n"); ++ return; ++ } ++ ++ pci_free_consistent(adp->pdev, sz, ka, phy_addr); ++} ++ ++/** ++ * crystalhd_create_dioq - Create Generic DIO queue ++ * @adp: Adapter instance ++ * @dioq_hnd: Handle to the dio queue created ++ * @cb : Optional - Call back To free the element. ++ * @cbctx: Context to pass to callback. ++ * ++ * Return: ++ * status ++ * ++ * Initialize Generic DIO queue to hold any data. Callback ++ * will be used to free elements while deleting the queue. ++ */ ++BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *adp, ++ crystalhd_dioq_t **dioq_hnd, ++ crystalhd_data_free_cb cb, void *cbctx) ++{ ++ crystalhd_dioq_t *dioq = NULL; ++ ++ if (!adp || !dioq_hnd) { ++ BCMLOG_ERR("Invalid arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ dioq = kzalloc(sizeof(*dioq), GFP_KERNEL); ++ if (!dioq) ++ return BC_STS_INSUFF_RES; ++ ++ spin_lock_init(&dioq->lock); ++ dioq->sig = BC_LINK_DIOQ_SIG; ++ dioq->head = (crystalhd_elem_t *)&dioq->head; ++ dioq->tail = (crystalhd_elem_t *)&dioq->head; ++ crystalhd_create_event(&dioq->event); ++ dioq->adp = adp; ++ dioq->data_rel_cb = cb; ++ dioq->cb_context = cbctx; ++ *dioq_hnd = dioq; ++ ++ return BC_STS_SUCCESS; ++} ++ ++/** ++ * crystalhd_delete_dioq - Delete Generic DIO queue ++ * @adp: Adapter instance ++ * @dioq: DIOQ instance.. ++ * ++ * Return: ++ * None. ++ * ++ * Release Generic DIO queue. This function will remove ++ * all the entries from the Queue and will release data ++ * by calling the call back provided during creation. ++ * ++ */ ++void crystalhd_delete_dioq(struct crystalhd_adp *adp, crystalhd_dioq_t *dioq) ++{ ++ void *temp; ++ ++ if (!dioq || (dioq->sig != BC_LINK_DIOQ_SIG)) ++ return; ++ ++ do { ++ temp = crystalhd_dioq_fetch(dioq); ++ if (temp && dioq->data_rel_cb) ++ dioq->data_rel_cb(dioq->cb_context, temp); ++ } while (temp); ++ dioq->sig = 0; ++ kfree(dioq); ++} ++ ++/** ++ * crystalhd_dioq_add - Add new DIO request element. ++ * @ioq: DIO queue instance ++ * @t: DIO request to be added. ++ * @wake: True - Wake up suspended process. ++ * @tag: Special tag to assign - For search and get. ++ * ++ * Return: ++ * Status. ++ * ++ * Insert new element to Q tail. ++ */ ++BC_STATUS crystalhd_dioq_add(crystalhd_dioq_t *ioq, void *data, ++ bool wake, uint32_t tag) ++{ ++ unsigned long flags = 0; ++ crystalhd_elem_t *tmp; ++ ++ if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG) || !data) { ++ BCMLOG_ERR("Invalid arg!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ tmp = crystalhd_alloc_elem(ioq->adp); ++ if (!tmp) { ++ BCMLOG_ERR("No free elements.\n"); ++ return BC_STS_INSUFF_RES; ++ } ++ ++ tmp->data = data; ++ tmp->tag = tag; ++ spin_lock_irqsave(&ioq->lock, flags); ++ tmp->flink = (crystalhd_elem_t *)&ioq->head; ++ tmp->blink = ioq->tail; ++ tmp->flink->blink = tmp; ++ tmp->blink->flink = tmp; ++ ioq->count++; ++ spin_unlock_irqrestore(&ioq->lock, flags); ++ ++ if (wake) ++ crystalhd_set_event(&ioq->event); ++ ++ return BC_STS_SUCCESS; ++} ++ ++/** ++ * crystalhd_dioq_fetch - Fetch element from head. ++ * @ioq: DIO queue instance ++ * ++ * Return: ++ * data element from the head.. ++ * ++ * Remove an element from Queue. ++ */ ++void *crystalhd_dioq_fetch(crystalhd_dioq_t *ioq) ++{ ++ unsigned long flags = 0; ++ crystalhd_elem_t *tmp; ++ crystalhd_elem_t *ret = NULL; ++ void *data = NULL; ++ ++ if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG)) { ++ BCMLOG_ERR("Invalid arg!!\n"); ++ return data; ++ } ++ ++ spin_lock_irqsave(&ioq->lock, flags); ++ tmp = ioq->head; ++ if (tmp != (crystalhd_elem_t *)&ioq->head) { ++ ret = tmp; ++ tmp->flink->blink = tmp->blink; ++ tmp->blink->flink = tmp->flink; ++ ioq->count--; ++ } ++ spin_unlock_irqrestore(&ioq->lock, flags); ++ if (ret) { ++ data = ret->data; ++ crystalhd_free_elem(ioq->adp, ret); ++ } ++ ++ return data; ++} ++/** ++ * crystalhd_dioq_find_and_fetch - Search the tag and Fetch element ++ * @ioq: DIO queue instance ++ * @tag: Tag to search for. ++ * ++ * Return: ++ * element from the head.. ++ * ++ * Search TAG and remove the element. ++ */ ++void *crystalhd_dioq_find_and_fetch(crystalhd_dioq_t *ioq, uint32_t tag) ++{ ++ unsigned long flags = 0; ++ crystalhd_elem_t *tmp; ++ crystalhd_elem_t *ret = NULL; ++ void *data = NULL; ++ ++ if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG)) { ++ BCMLOG_ERR("Invalid arg!!\n"); ++ return data; ++ } ++ ++ spin_lock_irqsave(&ioq->lock, flags); ++ tmp = ioq->head; ++ while (tmp != (crystalhd_elem_t *)&ioq->head) { ++ if (tmp->tag == tag) { ++ ret = tmp; ++ tmp->flink->blink = tmp->blink; ++ tmp->blink->flink = tmp->flink; ++ ioq->count--; ++ break; ++ } ++ tmp = tmp->flink; ++ } ++ spin_unlock_irqrestore(&ioq->lock, flags); ++ ++ if (ret) { ++ data = ret->data; ++ crystalhd_free_elem(ioq->adp, ret); ++ } ++ ++ return data; ++} ++ ++/** ++ * crystalhd_dioq_fetch_wait - Fetch element from Head. ++ * @ioq: DIO queue instance ++ * @to_secs: Wait timeout in seconds.. ++ * ++ * Return: ++ * element from the head.. ++ * ++ * Return element from head if Q is not empty. Wait for new element ++ * if Q is empty for Timeout seconds. ++ */ ++void *crystalhd_dioq_fetch_wait(crystalhd_dioq_t *ioq, uint32_t to_secs, ++ uint32_t *sig_pend) ++{ ++ unsigned long flags = 0; ++ int rc = 0, count; ++ void *tmp = NULL; ++ ++ if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG) || !to_secs || !sig_pend) { ++ BCMLOG_ERR("Invalid arg!!\n"); ++ return tmp; ++ } ++ ++ count = to_secs; ++ spin_lock_irqsave(&ioq->lock, flags); ++ while ((ioq->count == 0) && count) { ++ spin_unlock_irqrestore(&ioq->lock, flags); ++ ++ crystalhd_wait_on_event(&ioq->event, (ioq->count > 0), 1000, rc, 0); ++ if (rc == 0) { ++ goto out; ++ } else if (rc == -EINTR) { ++ BCMLOG(BCMLOG_INFO, "Cancelling fetch wait\n"); ++ *sig_pend = 1; ++ return tmp; ++ } ++ spin_lock_irqsave(&ioq->lock, flags); ++ count--; ++ } ++ spin_unlock_irqrestore(&ioq->lock, flags); ++ ++out: ++ return crystalhd_dioq_fetch(ioq); ++} ++ ++/** ++ * crystalhd_map_dio - Map user address for DMA ++ * @adp: Adapter instance ++ * @ubuff: User buffer to map. ++ * @ubuff_sz: User buffer size. ++ * @uv_offset: UV buffer offset. ++ * @en_422mode: TRUE:422 FALSE:420 Capture mode. ++ * @dir_tx: TRUE for Tx (To device from host) ++ * @dio_hnd: Handle to mapped DIO request. ++ * ++ * Return: ++ * Status. ++ * ++ * This routine maps user address and lock pages for DMA. ++ * ++ */ ++BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff, ++ uint32_t ubuff_sz, uint32_t uv_offset, ++ bool en_422mode, bool dir_tx, ++ crystalhd_dio_req **dio_hnd) ++{ ++ crystalhd_dio_req *dio; ++ /* FIXME: jarod: should some of these unsigned longs be uint32_t or uintptr_t? */ ++ unsigned long start = 0, end = 0, uaddr = 0, count = 0; ++ unsigned long spsz = 0, uv_start = 0; ++ int i = 0, rw = 0, res = 0, nr_pages = 0, skip_fb_sg = 0; ++ ++ if (!adp || !ubuff || !ubuff_sz || !dio_hnd) { ++ BCMLOG_ERR("Invalid arg \n"); ++ return BC_STS_INV_ARG; ++ } ++ /* Compute pages */ ++ uaddr = (unsigned long)ubuff; ++ count = (unsigned long)ubuff_sz; ++ end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ start = uaddr >> PAGE_SHIFT; ++ nr_pages = end - start; ++ ++ if (!count || ((uaddr + count) < uaddr)) { ++ BCMLOG_ERR("User addr overflow!!\n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ dio = crystalhd_alloc_dio(adp); ++ if (!dio) { ++ BCMLOG_ERR("dio pool empty..\n"); ++ return BC_STS_INSUFF_RES; ++ } ++ ++ if (dir_tx) { ++ rw = WRITE; ++ dio->direction = DMA_TO_DEVICE; ++ } else { ++ rw = READ; ++ dio->direction = DMA_FROM_DEVICE; ++ } ++ ++ if (nr_pages > dio->max_pages) { ++ BCMLOG_ERR("max_pages(%d) exceeded(%d)!!\n", ++ dio->max_pages, nr_pages); ++ crystalhd_unmap_dio(adp, dio); ++ return BC_STS_INSUFF_RES; ++ } ++ ++ if (uv_offset) { ++ uv_start = (uaddr + (unsigned long)uv_offset) >> PAGE_SHIFT; ++ dio->uinfo.uv_sg_ix = uv_start - start; ++ dio->uinfo.uv_sg_off = ((uaddr + (unsigned long)uv_offset) & ~PAGE_MASK); ++ } ++ ++ dio->fb_size = ubuff_sz & 0x03; ++ if (dio->fb_size) { ++ res = copy_from_user(dio->fb_va, ++ (void *)(uaddr + count - dio->fb_size), ++ dio->fb_size); ++ if (res) { ++ BCMLOG_ERR("failed %d to copy %u fill bytes from %p\n", ++ res, dio->fb_size, ++ (void *)(uaddr + count-dio->fb_size)); ++ crystalhd_unmap_dio(adp, dio); ++ return BC_STS_INSUFF_RES; ++ } ++ } ++ ++ down_read(¤t->mm->mmap_sem); ++ res = get_user_pages(current, current->mm, uaddr, nr_pages, rw == READ, ++ 0, dio->pages, NULL); ++ up_read(¤t->mm->mmap_sem); ++ ++ /* Save for release..*/ ++ dio->sig = crystalhd_dio_locked; ++ if (res < nr_pages) { ++ BCMLOG_ERR("get pages failed: %d-%d\n", nr_pages, res); ++ dio->page_cnt = res; ++ crystalhd_unmap_dio(adp, dio); ++ return BC_STS_ERROR; ++ } ++ ++ dio->page_cnt = nr_pages; ++ /* Get scatter/gather */ ++ crystalhd_init_sg(dio->sg, dio->page_cnt); ++ crystalhd_set_sg(&dio->sg[0], dio->pages[0], 0, uaddr & ~PAGE_MASK); ++ if (nr_pages > 1) { ++ dio->sg[0].length = PAGE_SIZE - dio->sg[0].offset; ++ ++#ifdef CONFIG_X86_64 ++ dio->sg[0].dma_length = dio->sg[0].length; ++#endif ++ count -= dio->sg[0].length; ++ for (i = 1; i < nr_pages; i++) { ++ if (count < 4) { ++ spsz = count; ++ skip_fb_sg = 1; ++ } else { ++ spsz = (count < PAGE_SIZE) ? ++ (count & ~0x03) : PAGE_SIZE; ++ } ++ crystalhd_set_sg(&dio->sg[i], dio->pages[i], spsz, 0); ++ count -= spsz; ++ } ++ } else { ++ if (count < 4) { ++ dio->sg[0].length = count; ++ skip_fb_sg = 1; ++ } else { ++ dio->sg[0].length = count - dio->fb_size; ++ } ++#ifdef CONFIG_X86_64 ++ dio->sg[0].dma_length = dio->sg[0].length; ++#endif ++ } ++ dio->sg_cnt = pci_map_sg(adp->pdev, dio->sg, ++ dio->page_cnt, dio->direction); ++ if (dio->sg_cnt <= 0) { ++ BCMLOG_ERR("sg map %d-%d \n", dio->sg_cnt, dio->page_cnt); ++ crystalhd_unmap_dio(adp, dio); ++ return BC_STS_ERROR; ++ } ++ if (dio->sg_cnt && skip_fb_sg) ++ dio->sg_cnt -= 1; ++ dio->sig = crystalhd_dio_sg_mapped; ++ /* Fill in User info.. */ ++ dio->uinfo.xfr_len = ubuff_sz; ++ dio->uinfo.xfr_buff = ubuff; ++ dio->uinfo.uv_offset = uv_offset; ++ dio->uinfo.b422mode = en_422mode; ++ dio->uinfo.dir_tx = dir_tx; ++ ++ *dio_hnd = dio; ++ ++ return BC_STS_SUCCESS; ++} ++ ++/** ++ * crystalhd_unmap_sgl - Release mapped resources ++ * @adp: Adapter instance ++ * @dio: DIO request instance ++ * ++ * Return: ++ * Status. ++ * ++ * This routine is to unmap the user buffer pages. ++ */ ++BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp, crystalhd_dio_req *dio) ++{ ++ struct page *page = NULL; ++ int j = 0; ++ ++ if (!adp || !dio) { ++ BCMLOG_ERR("Invalid arg \n"); ++ return BC_STS_INV_ARG; ++ } ++ ++ if ((dio->page_cnt > 0) && (dio->sig != crystalhd_dio_inv)) { ++ for (j = 0; j < dio->page_cnt; j++) { ++ page = dio->pages[j]; ++ if (page) { ++ if (!PageReserved(page) && ++ (dio->direction == DMA_FROM_DEVICE)) ++ SetPageDirty(page); ++ page_cache_release(page); ++ } ++ } ++ } ++ if (dio->sig == crystalhd_dio_sg_mapped) ++ pci_unmap_sg(adp->pdev, dio->sg, dio->page_cnt, dio->direction); ++ ++ crystalhd_free_dio(adp, dio); ++ ++ return BC_STS_SUCCESS; ++} ++ ++/** ++ * crystalhd_create_dio_pool - Allocate mem pool for DIO management. ++ * @adp: Adapter instance ++ * @max_pages: Max pages for size calculation. ++ * ++ * Return: ++ * system error. ++ * ++ * This routine creates a memory pool to hold dio context for ++ * for HW Direct IO operation. ++ */ ++int crystalhd_create_dio_pool(struct crystalhd_adp *adp, uint32_t max_pages) ++{ ++ uint32_t asz = 0, i = 0; ++ uint8_t *temp; ++ crystalhd_dio_req *dio; ++ ++ if (!adp || !max_pages) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return -EINVAL; ++ } ++ ++ /* Get dma memory for fill byte handling..*/ ++ adp->fill_byte_pool = pci_pool_create("crystalhd_fbyte", ++ adp->pdev, 8, 8, 0); ++ if (!adp->fill_byte_pool) { ++ BCMLOG_ERR("failed to create fill byte pool\n"); ++ return -ENOMEM; ++ } ++ ++ /* Get the max size from user based on 420/422 modes */ ++ asz = (sizeof(*dio->pages) * max_pages) + ++ (sizeof(*dio->sg) * max_pages) + sizeof(*dio); ++ ++ BCMLOG(BCMLOG_DBG, "Initializing Dio pool %d %d %x %p\n", ++ BC_LINK_SG_POOL_SZ, max_pages, asz, adp->fill_byte_pool); ++ ++ for (i = 0; i < BC_LINK_SG_POOL_SZ; i++) { ++ temp = (uint8_t *)kzalloc(asz, GFP_KERNEL); ++ if ((temp) == NULL) { ++ BCMLOG_ERR("Failed to alloc %d mem\n", asz); ++ return -ENOMEM; ++ } ++ ++ dio = (crystalhd_dio_req *)temp; ++ temp += sizeof(*dio); ++ dio->pages = (struct page **)temp; ++ temp += (sizeof(*dio->pages) * max_pages); ++ dio->sg = (struct scatterlist *)temp; ++ dio->max_pages = max_pages; ++ dio->fb_va = pci_pool_alloc(adp->fill_byte_pool, GFP_KERNEL, ++ &dio->fb_pa); ++ if (!dio->fb_va) { ++ BCMLOG_ERR("fill byte alloc failed.\n"); ++ return -ENOMEM; ++ } ++ ++ crystalhd_free_dio(adp, dio); ++ } ++ ++ return 0; ++} ++ ++/** ++ * crystalhd_destroy_dio_pool - Release DIO mem pool. ++ * @adp: Adapter instance ++ * ++ * Return: ++ * none. ++ * ++ * This routine releases dio memory pool during close. ++ */ ++void crystalhd_destroy_dio_pool(struct crystalhd_adp *adp) ++{ ++ crystalhd_dio_req *dio; ++ int count = 0; ++ ++ if (!adp) { ++ BCMLOG_ERR("Invalid Arg!!\n"); ++ return; ++ } ++ ++ do { ++ dio = crystalhd_alloc_dio(adp); ++ if (dio) { ++ if (dio->fb_va) ++ pci_pool_free(adp->fill_byte_pool, ++ dio->fb_va, dio->fb_pa); ++ count++; ++ kfree(dio); ++ } ++ } while (dio); ++ ++ if (adp->fill_byte_pool) { ++ pci_pool_destroy(adp->fill_byte_pool); ++ adp->fill_byte_pool = NULL; ++ } ++ ++ BCMLOG(BCMLOG_DBG, "Released dio pool %d \n", count); ++} ++ ++/** ++ * crystalhd_create_elem_pool - List element pool creation. ++ * @adp: Adapter instance ++ * @pool_size: Number of elements in the pool. ++ * ++ * Return: ++ * 0 - success, <0 error ++ * ++ * Create general purpose list element pool to hold pending, ++ * and active requests. ++ */ ++int crystalhd_create_elem_pool(struct crystalhd_adp *adp, uint32_t pool_size) ++{ ++ uint32_t i; ++ crystalhd_elem_t *temp; ++ ++ if (!adp || !pool_size) ++ return -EINVAL; ++ ++ for (i = 0; i < pool_size; i++) { ++ temp = kzalloc(sizeof(*temp), GFP_KERNEL); ++ if (!temp) { ++ BCMLOG_ERR("kalloc failed \n"); ++ return -ENOMEM; ++ } ++ crystalhd_free_elem(adp, temp); ++ } ++ BCMLOG(BCMLOG_DBG, "allocated %d elem\n", pool_size); ++ return 0; ++} ++ ++/** ++ * crystalhd_delete_elem_pool - List element pool deletion. ++ * @adp: Adapter instance ++ * ++ * Return: ++ * none ++ * ++ * Delete general purpose list element pool. ++ */ ++void crystalhd_delete_elem_pool(struct crystalhd_adp *adp) ++{ ++ crystalhd_elem_t *temp; ++ int dbg_cnt = 0; ++ ++ if (!adp) ++ return; ++ ++ do { ++ temp = crystalhd_alloc_elem(adp); ++ if (temp) { ++ kfree(temp); ++ dbg_cnt++; ++ } ++ } while (temp); ++ ++ BCMLOG(BCMLOG_DBG, "released %d elem\n", dbg_cnt); ++} ++ ++/*================ Debug support routines.. ================================*/ ++void crystalhd_show_buffer(uint32_t off, uint8_t *buff, uint32_t dwcount) ++{ ++ uint32_t i, k = 1; ++ ++ for (i = 0; i < dwcount; i++) { ++ if (k == 1) ++ BCMLOG(BCMLOG_DATA, "0x%08X : ", off); ++ ++ BCMLOG(BCMLOG_DATA, " 0x%08X ", *((uint32_t *)buff)); ++ ++ buff += sizeof(uint32_t); ++ off += sizeof(uint32_t); ++ k++; ++ if ((i == dwcount - 1) || (k > 4)) { ++ BCMLOG(BCMLOG_DATA, "\n"); ++ k = 1; ++ } ++ } ++} +diff --git a/drivers/staging/crystalhd/crystalhd_misc.h b/drivers/staging/crystalhd/crystalhd_misc.h +new file mode 100644 +index 0000000..a2aa6ad +--- /dev/null ++++ b/drivers/staging/crystalhd/crystalhd_misc.h +@@ -0,0 +1,229 @@ ++/*************************************************************************** ++ * Copyright (c) 2005-2009, Broadcom Corporation. ++ * ++ * Name: crystalhd_misc . h ++ * ++ * Description: ++ * BCM70012 Linux driver general purpose routines. ++ * Includes reg/mem read and write routines. ++ * ++ * HISTORY: ++ * ++ ********************************************************************** ++ * This file is part of the crystalhd device driver. ++ * ++ * This driver is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, version 2 of the License. ++ * ++ * This driver is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this driver. If not, see . ++ **********************************************************************/ ++ ++#ifndef _CRYSTALHD_MISC_H_ ++#define _CRYSTALHD_MISC_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "bc_dts_glob_lnx.h" ++ ++/* Global log level variable defined in crystal_misc.c file */ ++extern uint32_t g_linklog_level; ++ ++/* Global element pool for all Queue management. ++ * TX: Active = BC_TX_LIST_CNT, Free = BC_TX_LIST_CNT. ++ * RX: Free = BC_RX_LIST_CNT, Active = 2 ++ * FW-CMD: 4 ++ */ ++#define BC_LINK_ELEM_POOL_SZ ((BC_TX_LIST_CNT * 2) + BC_RX_LIST_CNT + 2 + 4) ++ ++/* Driver's IODATA pool count */ ++#define CHD_IODATA_POOL_SZ (BC_IOCTL_DATA_POOL_SIZE * BC_LINK_MAX_OPENS) ++ ++/* Scatter Gather memory pool size for Tx and Rx */ ++#define BC_LINK_SG_POOL_SZ (BC_TX_LIST_CNT + BC_RX_LIST_CNT) ++ ++enum _crystalhd_dio_sig { ++ crystalhd_dio_inv = 0, ++ crystalhd_dio_locked, ++ crystalhd_dio_sg_mapped, ++}; ++ ++struct crystalhd_dio_user_info { ++ void *xfr_buff; ++ uint32_t xfr_len; ++ uint32_t uv_offset; ++ bool dir_tx; ++ ++ uint32_t uv_sg_ix; ++ uint32_t uv_sg_off; ++ int comp_sts; ++ int ev_sts; ++ uint32_t y_done_sz; ++ uint32_t uv_done_sz; ++ uint32_t comp_flags; ++ bool b422mode; ++}; ++ ++typedef struct _crystalhd_dio_req { ++ uint32_t sig; ++ uint32_t max_pages; ++ struct page **pages; ++ struct scatterlist *sg; ++ int sg_cnt; ++ int page_cnt; ++ int direction; ++ struct crystalhd_dio_user_info uinfo; ++ void *fb_va; ++ uint32_t fb_size; ++ dma_addr_t fb_pa; ++ struct _crystalhd_dio_req *next; ++} crystalhd_dio_req; ++ ++#define BC_LINK_DIOQ_SIG (0x09223280) ++ ++typedef struct _crystalhd_elem_s { ++ struct _crystalhd_elem_s *flink; ++ struct _crystalhd_elem_s *blink; ++ void *data; ++ uint32_t tag; ++} crystalhd_elem_t; ++ ++typedef void (*crystalhd_data_free_cb)(void *context, void *data); ++ ++typedef struct _crystalhd_dioq_s { ++ uint32_t sig; ++ struct crystalhd_adp *adp; ++ crystalhd_elem_t *head; ++ crystalhd_elem_t *tail; ++ uint32_t count; ++ spinlock_t lock; ++ wait_queue_head_t event; ++ crystalhd_data_free_cb data_rel_cb; ++ void *cb_context; ++} crystalhd_dioq_t; ++ ++typedef void (*hw_comp_callback)(crystalhd_dio_req *, ++ wait_queue_head_t *event, BC_STATUS sts); ++ ++/*========= Decoder (7412) register access routines.================= */ ++uint32_t bc_dec_reg_rd(struct crystalhd_adp *, uint32_t); ++void bc_dec_reg_wr(struct crystalhd_adp *, uint32_t, uint32_t); ++ ++/*========= Link (70012) register access routines.. =================*/ ++uint32_t crystalhd_reg_rd(struct crystalhd_adp *, uint32_t); ++void crystalhd_reg_wr(struct crystalhd_adp *, uint32_t, uint32_t); ++ ++/*========= Decoder (7412) memory access routines..=================*/ ++BC_STATUS crystalhd_mem_rd(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *); ++BC_STATUS crystalhd_mem_wr(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *); ++ ++/*==========Link (70012) PCIe Config access routines.================*/ ++BC_STATUS crystalhd_pci_cfg_rd(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *); ++BC_STATUS crystalhd_pci_cfg_wr(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t); ++ ++/*========= Linux Kernel Interface routines. ======================= */ ++void *bc_kern_dma_alloc(struct crystalhd_adp *, uint32_t, dma_addr_t *); ++void bc_kern_dma_free(struct crystalhd_adp *, uint32_t, ++ void *, dma_addr_t); ++#define crystalhd_create_event(_ev) init_waitqueue_head(_ev) ++#define crystalhd_set_event(_ev) wake_up_interruptible(_ev) ++#define crystalhd_wait_on_event(ev, condition, timeout, ret, nosig) \ ++do { \ ++ DECLARE_WAITQUEUE(entry, current); \ ++ unsigned long end = jiffies + ((timeout * HZ) / 1000); \ ++ ret = 0; \ ++ add_wait_queue(ev, &entry); \ ++ for (;;) { \ ++ __set_current_state(TASK_INTERRUPTIBLE); \ ++ if (condition) { \ ++ break; \ ++ } \ ++ if (time_after_eq(jiffies, end)) { \ ++ ret = -EBUSY; \ ++ break; \ ++ } \ ++ schedule_timeout((HZ / 100 > 1) ? HZ / 100 : 1); \ ++ if (!nosig && signal_pending(current)) { \ ++ ret = -EINTR; \ ++ break; \ ++ } \ ++ } \ ++ __set_current_state(TASK_RUNNING); \ ++ remove_wait_queue(ev, &entry); \ ++} while (0) ++ ++/*================ Direct IO mapping routines ==================*/ ++extern int crystalhd_create_dio_pool(struct crystalhd_adp *, uint32_t); ++extern void crystalhd_destroy_dio_pool(struct crystalhd_adp *); ++extern BC_STATUS crystalhd_map_dio(struct crystalhd_adp *, void *, uint32_t, ++ uint32_t, bool, bool, crystalhd_dio_req**); ++ ++extern BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *, crystalhd_dio_req*); ++#define crystalhd_get_sgle_paddr(_dio, _ix) (cpu_to_le64(sg_dma_address(&_dio->sg[_ix]))) ++#define crystalhd_get_sgle_len(_dio, _ix) (cpu_to_le32(sg_dma_len(&_dio->sg[_ix]))) ++ ++/*================ General Purpose Queues ==================*/ ++extern BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *, crystalhd_dioq_t **, crystalhd_data_free_cb , void *); ++extern void crystalhd_delete_dioq(struct crystalhd_adp *, crystalhd_dioq_t *); ++extern BC_STATUS crystalhd_dioq_add(crystalhd_dioq_t *ioq, void *data, bool wake, uint32_t tag); ++extern void *crystalhd_dioq_fetch(crystalhd_dioq_t *ioq); ++extern void *crystalhd_dioq_find_and_fetch(crystalhd_dioq_t *ioq, uint32_t tag); ++extern void *crystalhd_dioq_fetch_wait(crystalhd_dioq_t *ioq, uint32_t to_secs, uint32_t *sig_pend); ++ ++#define crystalhd_dioq_count(_ioq) ((_ioq) ? _ioq->count : 0) ++ ++extern int crystalhd_create_elem_pool(struct crystalhd_adp *, uint32_t); ++extern void crystalhd_delete_elem_pool(struct crystalhd_adp *); ++ ++ ++/*================ Debug routines/macros .. ================================*/ ++extern void crystalhd_show_buffer(uint32_t off, uint8_t *buff, uint32_t dwcount); ++ ++enum _chd_log_levels { ++ BCMLOG_ERROR = 0x80000000, /* Don't disable this option */ ++ BCMLOG_DATA = 0x40000000, /* Data, enable by default */ ++ BCMLOG_SPINLOCK = 0x20000000, /* Spcial case for Spin locks*/ ++ ++ /* Following are allowed only in debug mode */ ++ BCMLOG_INFO = 0x00000001, /* Generic informational */ ++ BCMLOG_DBG = 0x00000002, /* First level Debug info */ ++ BCMLOG_SSTEP = 0x00000004, /* Stepping information */ ++ BCMLOG_ENTER_LEAVE = 0x00000008, /* stack tracking */ ++}; ++ ++#define BCMLOG_ENTER \ ++if (g_linklog_level & BCMLOG_ENTER_LEAVE) { \ ++ printk("Entered %s\n", __func__); \ ++} ++ ++#define BCMLOG_LEAVE \ ++if (g_linklog_level & BCMLOG_ENTER_LEAVE) { \ ++ printk("Leaving %s\n", __func__); \ ++} ++ ++#define BCMLOG(trace, fmt, args...) \ ++if (g_linklog_level & trace) { \ ++ printk(fmt, ##args); \ ++} ++ ++#define BCMLOG_ERR(fmt, args...) \ ++do { \ ++ if (g_linklog_level & BCMLOG_ERROR) { \ ++ printk("*ERR*:%s:%d: "fmt, __FILE__, __LINE__, ##args); \ ++ } \ ++} while (0); ++ ++#endif diff --git a/die-floppy-die.patch b/die-floppy-die.patch index 76db312..26beabf 100644 --- a/die-floppy-die.patch +++ b/die-floppy-die.patch @@ -1,30 +1,18 @@ -From 4ff58b642f80dedb20533978123d89b5ac9b1ed5 Mon Sep 17 00:00:00 2001 -From: Kyle McMartin -Date: Tue, 30 Mar 2010 00:04:29 -0400 -Subject: die-floppy-die - Kill the floppy.ko pnp modalias. We were surviving just fine without autoloading floppy drivers, tyvm. Please feel free to register all complaints in the wastepaper bin. ---- - drivers/block/floppy.c | 3 +-- - 1 files changed, 1 insertions(+), 2 deletions(-) diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c -index 90c4038..f4a0b90 100644 +index 91b7530..2ea84a6 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c -@@ -4619,8 +4619,7 @@ static const struct pnp_device_id floppy_pnpids[] = { - {"PNP0700", 0}, - {} +@@ -4631,7 +4631,7 @@ static const struct pnp_device_id floppy_pnpids[] = { + { "PNP0700", 0 }, + { } }; -- -MODULE_DEVICE_TABLE(pnp, floppy_pnpids); +/* MODULE_DEVICE_TABLE(pnp, floppy_pnpids); */ #else --- -1.7.0.1 - diff --git a/direct-io-move-aio_complete-into-end_io.patch b/direct-io-move-aio_complete-into-end_io.patch deleted file mode 100644 index b404871..0000000 --- a/direct-io-move-aio_complete-into-end_io.patch +++ /dev/null @@ -1,204 +0,0 @@ -From: Christoph Hellwig -Date: Sun, 18 Jul 2010 21:17:09 +0000 (+0000) -Subject: direct-io: move aio_complete into ->end_io -X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=40e2e97316af6e62affab7a392e792494b8d9dde - -direct-io: move aio_complete into ->end_io - -Filesystems with unwritten extent support must not complete an AIO request -until the transaction to convert the extent has been commited. That means -the aio_complete calls needs to be moved into the ->end_io callback so -that the filesystem can control when to call it exactly. - -This makes a bit of a mess out of dio_complete and the ->end_io callback -prototype even more complicated. - -Signed-off-by: Christoph Hellwig -Reviewed-by: Jan Kara -Signed-off-by: Alex Elder ---- - -diff --git a/fs/direct-io.c b/fs/direct-io.c -index 7600aac..a10cb91 100644 ---- a/fs/direct-io.c -+++ b/fs/direct-io.c -@@ -218,7 +218,7 @@ static struct page *dio_get_page(struct dio *dio) - * filesystems can use it to hold additional state between get_block calls and - * dio_complete. - */ --static int dio_complete(struct dio *dio, loff_t offset, int ret) -+static int dio_complete(struct dio *dio, loff_t offset, int ret, bool is_async) - { - ssize_t transferred = 0; - -@@ -239,14 +239,6 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret) - transferred = dio->i_size - offset; - } - -- if (dio->end_io && dio->result) -- dio->end_io(dio->iocb, offset, transferred, -- dio->map_bh.b_private); -- -- if (dio->flags & DIO_LOCKING) -- /* lockdep: non-owner release */ -- up_read_non_owner(&dio->inode->i_alloc_sem); -- - if (ret == 0) - ret = dio->page_errors; - if (ret == 0) -@@ -254,6 +246,17 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret) - if (ret == 0) - ret = transferred; - -+ if (dio->end_io && dio->result) { -+ dio->end_io(dio->iocb, offset, transferred, -+ dio->map_bh.b_private, ret, is_async); -+ } else if (is_async) { -+ aio_complete(dio->iocb, ret, 0); -+ } -+ -+ if (dio->flags & DIO_LOCKING) -+ /* lockdep: non-owner release */ -+ up_read_non_owner(&dio->inode->i_alloc_sem); -+ - return ret; - } - -@@ -277,8 +280,7 @@ static void dio_bio_end_aio(struct bio *bio, int error) - spin_unlock_irqrestore(&dio->bio_lock, flags); - - if (remaining == 0) { -- int ret = dio_complete(dio, dio->iocb->ki_pos, 0); -- aio_complete(dio->iocb, ret, 0); -+ dio_complete(dio, dio->iocb->ki_pos, 0, true); - kfree(dio); - } - } -@@ -1126,7 +1128,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, - spin_unlock_irqrestore(&dio->bio_lock, flags); - - if (ret2 == 0) { -- ret = dio_complete(dio, offset, ret); -+ ret = dio_complete(dio, offset, ret, false); - kfree(dio); - } else - BUG_ON(ret != -EIOCBQUEUED); -diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c -index 42272d6..0afc8c1 100644 ---- a/fs/ext4/inode.c -+++ b/fs/ext4/inode.c -@@ -3775,7 +3775,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags) - } - - static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, -- ssize_t size, void *private) -+ ssize_t size, void *private, int ret, -+ bool is_async) - { - ext4_io_end_t *io_end = iocb->private; - struct workqueue_struct *wq; -@@ -3784,7 +3785,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, - - /* if not async direct IO or dio with 0 bytes write, just return */ - if (!io_end || !size) -- return; -+ goto out; - - ext_debug("ext4_end_io_dio(): io_end 0x%p" - "for inode %lu, iocb 0x%p, offset %llu, size %llu\n", -@@ -3795,7 +3796,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, - if (io_end->flag != EXT4_IO_UNWRITTEN){ - ext4_free_io_end(io_end); - iocb->private = NULL; -- return; -+ goto out; - } - - io_end->offset = offset; -@@ -3812,6 +3813,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, - list_add_tail(&io_end->list, &ei->i_completed_io_list); - spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); - iocb->private = NULL; -+out: -+ if (is_async) -+ aio_complete(iocb, ret, 0); - } - - static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) -diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c -index 356e976..96337a4 100644 ---- a/fs/ocfs2/aops.c -+++ b/fs/ocfs2/aops.c -@@ -578,7 +578,9 @@ bail: - static void ocfs2_dio_end_io(struct kiocb *iocb, - loff_t offset, - ssize_t bytes, -- void *private) -+ void *private, -+ int ret, -+ bool is_async) - { - struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; - int level; -@@ -592,6 +594,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, - if (!level) - up_read(&inode->i_alloc_sem); - ocfs2_rw_unlock(inode, level); -+ -+ if (is_async) -+ aio_complete(iocb, ret, 0); - } - - /* -diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c -index 8abbf05..95d1e26 100644 ---- a/fs/xfs/linux-2.6/xfs_aops.c -+++ b/fs/xfs/linux-2.6/xfs_aops.c -@@ -1406,7 +1406,9 @@ xfs_end_io_direct( - struct kiocb *iocb, - loff_t offset, - ssize_t size, -- void *private) -+ void *private, -+ int ret, -+ bool is_async) - { - xfs_ioend_t *ioend = iocb->private; - -@@ -1452,6 +1454,9 @@ xfs_end_io_direct( - * against double-freeing. - */ - iocb->private = NULL; -+ -+ if (is_async) -+ aio_complete(iocb, ret, 0); - } - - STATIC ssize_t -diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h -index 319da17..c5057fb 100644 ---- a/fs/xfs/linux-2.6/xfs_aops.h -+++ b/fs/xfs/linux-2.6/xfs_aops.h -@@ -37,6 +37,8 @@ typedef struct xfs_ioend { - size_t io_size; /* size of the extent */ - xfs_off_t io_offset; /* offset in the file */ - struct work_struct io_work; /* xfsdatad work queue */ -+ struct kiocb *io_iocb; -+ int io_result; - } xfs_ioend_t; - - extern const struct address_space_operations xfs_address_space_operations; -diff --git a/include/linux/fs.h b/include/linux/fs.h -index 68ca1b0..f91affb 100644 ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -415,7 +415,8 @@ struct buffer_head; - typedef int (get_block_t)(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create); - typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, -- ssize_t bytes, void *private); -+ ssize_t bytes, void *private, int ret, -+ bool is_async); - - /* - * Attribute flags. These should be or-ed together to figure out what diff --git a/disable-i8042-check-on-apple-mac.patch b/disable-i8042-check-on-apple-mac.patch index f99d0f9..693875b 100644 --- a/disable-i8042-check-on-apple-mac.patch +++ b/disable-i8042-check-on-apple-mac.patch @@ -1,6 +1,6 @@ -From 2a79554c864ac58fa2ad982f0fcee2cc2aa33eb5 Mon Sep 17 00:00:00 2001 +From d01268ff135052cd40c375c6b7ebadbee3281b4d Mon Sep 17 00:00:00 2001 From: Bastien Nocera -Date: Thu, 20 May 2010 10:30:31 -0400 +Date: Wed, 20 Jan 2010 18:23:13 +0000 Subject: Disable i8042 checks on Intel Apple Macs As those computers never had any i8042 controllers, and the @@ -15,10 +15,10 @@ Signed-off-by: Bastien Nocera 1 files changed, 22 insertions(+), 0 deletions(-) diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c -index 6440a8f..4d7cf98 100644 +index b54aee7..5e1e59c 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c -@@ -1451,6 +1451,22 @@ static struct platform_driver i8042_driver = { +@@ -1446,12 +1446,34 @@ static struct platform_driver i8042_driver = { .shutdown = i8042_shutdown, }; @@ -40,8 +40,7 @@ index 6440a8f..4d7cf98 100644 + static int __init i8042_init(void) { - struct platform_device *pdev; -@@ -1458,6 +1474,12 @@ static int __init i8042_init(void) + int err; dbg_init(); diff --git a/drm-1024x768-85.patch b/drm-1024x768-85.patch new file mode 100644 index 0000000..5dacf72 --- /dev/null +++ b/drm-1024x768-85.patch @@ -0,0 +1,29 @@ +From 65a1d7cab62b4f514eaaf608b2f16e26a0e48042 Mon Sep 17 00:00:00 2001 +From: Adam Jackson +Date: Thu, 13 May 2010 14:55:28 -0400 +Subject: [PATCH] drm/edid: Fix 1024x768@85Hz + +Having hsync both start and end on pixel 1072 ain't gonna work very +well. Matches the X server's list. + +Signed-off-by: Adam Jackson +--- + drivers/gpu/drm/drm_edid.c | 2 +- + 1 files changed, 1 insertions(+), 1 deletions(-) + +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c +index 18f41d7..10348d3 100644 +--- a/drivers/gpu/drm/drm_edid.c ++++ b/drivers/gpu/drm/drm_edid.c +@@ -335,7 +335,7 @@ static struct drm_display_mode drm_dmt_modes[] = { + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1024x768@85Hz */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, +- 1072, 1376, 0, 768, 769, 772, 808, 0, ++ 1168, 1376, 0, 768, 769, 772, 808, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1152x864@75Hz */ + { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, +-- +1.7.0.1 + diff --git a/drm-connection-cache.patch b/drm-connection-cache.patch new file mode 100644 index 0000000..d0c544d --- /dev/null +++ b/drm-connection-cache.patch @@ -0,0 +1,65 @@ +diff -up linux-2.6.30.noarch/drivers/gpu/drm/drm_crtc_helper.c.jx linux-2.6.30.noarch/drivers/gpu/drm/drm_crtc_helper.c +--- linux-2.6.30.noarch/drivers/gpu/drm/drm_crtc_helper.c.jx 2009-09-09 08:57:39.000000000 -0400 ++++ linux-2.6.30.noarch/drivers/gpu/drm/drm_crtc_helper.c 2009-09-09 09:35:24.000000000 -0400 +@@ -92,6 +92,18 @@ int drm_helper_probe_single_connector_mo + + connector->status = connector->funcs->detect(connector); + ++ /* fast path if the driver tracks disconnection */ ++ if (connector->status == connector_status_cached) { ++ DRM_DEBUG_KMS("%s still connected\n", ++ drm_get_connector_name(connector)); ++ list_for_each_entry_safe(mode, t, &connector->modes, head) { ++ count++; ++ mode->status = MODE_OK; ++ } ++ connector->status = connector_status_connected; ++ return count; ++ } ++ + if (connector->status == connector_status_disconnected) { + DRM_DEBUG_KMS("%s is disconnected\n", + drm_get_connector_name(connector)); +diff -up linux-2.6.30.noarch/drivers/gpu/drm/i915/intel_lvds.c.jx linux-2.6.30.noarch/drivers/gpu/drm/i915/intel_lvds.c +--- linux-2.6.30.noarch/drivers/gpu/drm/i915/intel_lvds.c.jx 2009-09-09 08:57:39.000000000 -0400 ++++ linux-2.6.30.noarch/drivers/gpu/drm/i915/intel_lvds.c 2009-09-09 09:56:18.000000000 -0400 +@@ -593,7 +593,14 @@ static void intel_lvds_mode_set(struct d + */ + static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) + { +- return connector_status_connected; ++ static int done; ++ ++ if (!done) { ++ done = 1; ++ return connector_status_connected; ++ } else { ++ return connector_status_cached; ++ } + } + + /** +diff -up linux-2.6.30.noarch/include/drm/drm_crtc.h.jx linux-2.6.30.noarch/include/drm/drm_crtc.h +--- linux-2.6.30.noarch/include/drm/drm_crtc.h.jx 2009-09-09 08:57:39.000000000 -0400 ++++ linux-2.6.30.noarch/include/drm/drm_crtc.h 2009-09-09 09:35:24.000000000 -0400 +@@ -172,6 +172,7 @@ enum drm_connector_status { + connector_status_connected = 1, + connector_status_disconnected = 2, + connector_status_unknown = 3, ++ connector_status_cached = 4, + }; + + enum subpixel_order { +diff -up linux-2.6.30.x86_64/drivers/gpu/drm/drm_crtc.c.jx linux-2.6.30.x86_64/drivers/gpu/drm/drm_crtc.c +--- linux-2.6.30.x86_64/drivers/gpu/drm/drm_crtc.c.jx 2009-09-09 10:10:44.000000000 -0400 ++++ linux-2.6.30.x86_64/drivers/gpu/drm/drm_crtc.c 2009-09-09 11:23:14.000000000 -0400 +@@ -185,7 +185,8 @@ EXPORT_SYMBOL(drm_get_connector_name); + + char *drm_get_connector_status_name(enum drm_connector_status status) + { +- if (status == connector_status_connected) ++ if (status == connector_status_connected || ++ status == connector_status_cached) + return "connected"; + else if (status == connector_status_disconnected) + return "disconnected"; diff --git a/drm-core-next.patch b/drm-core-next.patch new file mode 100644 index 0000000..6920c36 --- /dev/null +++ b/drm-core-next.patch @@ -0,0 +1,3494 @@ +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c +index d91fb8c..aa24f2f 100644 +--- a/drivers/gpu/drm/drm_crtc.c ++++ b/drivers/gpu/drm/drm_crtc.c +@@ -33,6 +33,7 @@ + #include "drm.h" + #include "drmP.h" + #include "drm_crtc.h" ++#include "drm_edid.h" + + struct drm_prop_enum_list { + int type; +@@ -2349,7 +2350,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, + struct edid *edid) + { + struct drm_device *dev = connector->dev; +- int ret = 0; ++ int ret = 0, size; + + if (connector->edid_blob_ptr) + drm_property_destroy_blob(dev, connector->edid_blob_ptr); +@@ -2361,7 +2362,9 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, + return ret; + } + +- connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid); ++ size = EDID_LENGTH * (1 + edid->extensions); ++ connector->edid_blob_ptr = drm_property_create_blob(connector->dev, ++ size, edid); + + ret = drm_connector_property_set_value(connector, + dev->mode_config.edid_property, +diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c +index 7d0f00a..51103aa 100644 +--- a/drivers/gpu/drm/drm_crtc_helper.c ++++ b/drivers/gpu/drm/drm_crtc_helper.c +@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, + if (connector->status == connector_status_disconnected) { + DRM_DEBUG_KMS("%s is disconnected\n", + drm_get_connector_name(connector)); ++ drm_mode_connector_update_edid_property(connector, NULL); + goto prune; + } + +@@ -836,11 +837,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) + mode_changed = true; + } else if (set->fb == NULL) { + mode_changed = true; +- } else if ((set->fb->bits_per_pixel != +- set->crtc->fb->bits_per_pixel) || +- set->fb->depth != set->crtc->fb->depth) +- fb_changed = true; +- else ++ } else + fb_changed = true; + } + +diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c +index 766c468..f3c58e2 100644 +--- a/drivers/gpu/drm/drm_drv.c ++++ b/drivers/gpu/drm/drm_drv.c +@@ -125,28 +125,28 @@ static struct drm_ioctl_desc drm_ioctls[] = { + + DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + +- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), +- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), +- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), +- +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW) ++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED) + }; + + #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c +index ab6c973..4ea2721 100644 +--- a/drivers/gpu/drm/drm_edid.c ++++ b/drivers/gpu/drm/drm_edid.c +@@ -2,6 +2,7 @@ + * Copyright (c) 2006 Luc Verhaegen (quirks list) + * Copyright (c) 2007-2008 Intel Corporation + * Jesse Barnes ++ * Copyright 2010 Red Hat, Inc. + * + * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from + * FB layer. +@@ -32,10 +33,9 @@ + #include "drmP.h" + #include "drm_edid.h" + +-/* +- * TODO: +- * - support EDID 1.4 (incl. CE blocks) +- */ ++#define EDID_EST_TIMINGS 16 ++#define EDID_STD_TIMINGS 8 ++#define EDID_DETAILED_TIMINGS 4 + + /* + * EDID blocks out in the wild have a variety of bugs, try to collect +@@ -65,7 +65,8 @@ + + #define LEVEL_DMT 0 + #define LEVEL_GTF 1 +-#define LEVEL_CVT 2 ++#define LEVEL_GTF2 2 ++#define LEVEL_CVT 3 + + static struct edid_quirk { + char *vendor; +@@ -85,6 +86,8 @@ static struct edid_quirk { + + /* Envision Peripherals, Inc. EN-7100e */ + { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, ++ /* Envision EN2028 */ ++ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 }, + + /* Funai Electronics PM36B */ + { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | +@@ -107,36 +110,38 @@ static struct edid_quirk { + { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, + }; + ++/*** DDC fetch and block validation ***/ + +-/* Valid EDID header has these bytes */ + static const u8 edid_header[] = { + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 + }; + +-/** +- * edid_is_valid - sanity check EDID data +- * @edid: EDID data +- * +- * Sanity check the EDID block by looking at the header, the version number +- * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's +- * valid. ++/* ++ * Sanity check the EDID block (base or extension). Return 0 if the block ++ * doesn't check out, or 1 if it's valid. + */ +-static bool edid_is_valid(struct edid *edid) ++static bool ++drm_edid_block_valid(u8 *raw_edid) + { +- int i, score = 0; ++ int i; + u8 csum = 0; +- u8 *raw_edid = (u8 *)edid; ++ struct edid *edid = (struct edid *)raw_edid; + +- for (i = 0; i < sizeof(edid_header); i++) +- if (raw_edid[i] == edid_header[i]) +- score++; ++ if (raw_edid[0] == 0x00) { ++ int score = 0; + +- if (score == 8) ; +- else if (score >= 6) { +- DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); +- memcpy(raw_edid, edid_header, sizeof(edid_header)); +- } else +- goto bad; ++ for (i = 0; i < sizeof(edid_header); i++) ++ if (raw_edid[i] == edid_header[i]) ++ score++; ++ ++ if (score == 8) ; ++ else if (score >= 6) { ++ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); ++ memcpy(raw_edid, edid_header, sizeof(edid_header)); ++ } else { ++ goto bad; ++ } ++ } + + for (i = 0; i < EDID_LENGTH; i++) + csum += raw_edid[i]; +@@ -145,13 +150,21 @@ static bool edid_is_valid(struct edid *edid) + goto bad; + } + +- if (edid->version != 1) { +- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); +- goto bad; +- } ++ /* per-block-type checks */ ++ switch (raw_edid[0]) { ++ case 0: /* base */ ++ if (edid->version != 1) { ++ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); ++ goto bad; ++ } + +- if (edid->revision > 4) +- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); ++ if (edid->revision > 4) ++ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); ++ break; ++ ++ default: ++ break; ++ } + + return 1; + +@@ -165,6 +178,157 @@ bad: + } + + /** ++ * drm_edid_is_valid - sanity check EDID data ++ * @edid: EDID data ++ * ++ * Sanity-check an entire EDID record (including extensions) ++ */ ++bool drm_edid_is_valid(struct edid *edid) ++{ ++ int i; ++ u8 *raw = (u8 *)edid; ++ ++ if (!edid) ++ return false; ++ ++ for (i = 0; i <= edid->extensions; i++) ++ if (!drm_edid_block_valid(raw + i * EDID_LENGTH)) ++ return false; ++ ++ return true; ++} ++EXPORT_SYMBOL(drm_edid_is_valid); ++ ++#define DDC_ADDR 0x50 ++#define DDC_SEGMENT_ADDR 0x30 ++/** ++ * Get EDID information via I2C. ++ * ++ * \param adapter : i2c device adaptor ++ * \param buf : EDID data buffer to be filled ++ * \param len : EDID data buffer length ++ * \return 0 on success or -1 on failure. ++ * ++ * Try to fetch EDID information by calling i2c driver function. ++ */ ++static int ++drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, ++ int block, int len) ++{ ++ unsigned char start = block * EDID_LENGTH; ++ struct i2c_msg msgs[] = { ++ { ++ .addr = DDC_ADDR, ++ .flags = 0, ++ .len = 1, ++ .buf = &start, ++ }, { ++ .addr = DDC_ADDR, ++ .flags = I2C_M_RD, ++ .len = len, ++ .buf = buf + start, ++ } ++ }; ++ ++ if (i2c_transfer(adapter, msgs, 2) == 2) ++ return 0; ++ ++ return -1; ++} ++ ++static u8 * ++drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) ++{ ++ int i, j = 0; ++ u8 *block, *new; ++ ++ if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) ++ return NULL; ++ ++ /* base block fetch */ ++ for (i = 0; i < 4; i++) { ++ if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH)) ++ goto out; ++ if (drm_edid_block_valid(block)) ++ break; ++ } ++ if (i == 4) ++ goto carp; ++ ++ /* if there's no extensions, we're done */ ++ if (block[0x7e] == 0) ++ return block; ++ ++ new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); ++ if (!new) ++ goto out; ++ block = new; ++ ++ for (j = 1; j <= block[0x7e]; j++) { ++ for (i = 0; i < 4; i++) { ++ if (drm_do_probe_ddc_edid(adapter, block, j, ++ EDID_LENGTH)) ++ goto out; ++ if (drm_edid_block_valid(block + j * EDID_LENGTH)) ++ break; ++ } ++ if (i == 4) ++ goto carp; ++ } ++ ++ return block; ++ ++carp: ++ dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n", ++ drm_get_connector_name(connector), j); ++ ++out: ++ kfree(block); ++ return NULL; ++} ++ ++/** ++ * Probe DDC presence. ++ * ++ * \param adapter : i2c device adaptor ++ * \return 1 on success ++ */ ++static bool ++drm_probe_ddc(struct i2c_adapter *adapter) ++{ ++ unsigned char out; ++ ++ return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0); ++} ++ ++/** ++ * drm_get_edid - get EDID data, if available ++ * @connector: connector we're probing ++ * @adapter: i2c adapter to use for DDC ++ * ++ * Poke the given i2c channel to grab EDID data if possible. If found, ++ * attach it to the connector. ++ * ++ * Return edid data or NULL if we couldn't find any. ++ */ ++struct edid *drm_get_edid(struct drm_connector *connector, ++ struct i2c_adapter *adapter) ++{ ++ struct edid *edid = NULL; ++ ++ if (drm_probe_ddc(adapter)) ++ edid = (struct edid *)drm_do_get_edid(connector, adapter); ++ ++ connector->display_info.raw_edid = (char *)edid; ++ ++ return edid; ++ ++} ++EXPORT_SYMBOL(drm_get_edid); ++ ++/*** EDID parsing ***/ ++ ++/** + * edid_vendor - match a string against EDID's obfuscated vendor field + * @edid: EDID to match + * @vendor: vendor string +@@ -514,6 +678,110 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, + return mode; + } + ++typedef void detailed_cb(struct detailed_timing *timing, void *closure); ++ ++static void ++drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure) ++{ ++ int i; ++ struct edid *edid = (struct edid *)raw_edid; ++ ++ if (edid == NULL) ++ return; ++ ++ for (i = 0; i < EDID_DETAILED_TIMINGS; i++) ++ cb(&(edid->detailed_timings[i]), closure); ++ ++ /* XXX extension block walk */ ++} ++ ++static void ++is_rb(struct detailed_timing *t, void *data) ++{ ++ u8 *r = (u8 *)t; ++ if (r[3] == EDID_DETAIL_MONITOR_RANGE) ++ if (r[15] & 0x10) ++ *(bool *)data = true; ++} ++ ++/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */ ++static bool ++drm_monitor_supports_rb(struct edid *edid) ++{ ++ if (edid->revision >= 4) { ++ bool ret; ++ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); ++ return ret; ++ } ++ ++ return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0); ++} ++ ++static void ++find_gtf2(struct detailed_timing *t, void *data) ++{ ++ u8 *r = (u8 *)t; ++ if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02) ++ *(u8 **)data = r; ++} ++ ++/* Secondary GTF curve kicks in above some break frequency */ ++static int ++drm_gtf2_hbreak(struct edid *edid) ++{ ++ u8 *r = NULL; ++ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); ++ return r ? (r[12] * 2) : 0; ++} ++ ++static int ++drm_gtf2_2c(struct edid *edid) ++{ ++ u8 *r = NULL; ++ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); ++ return r ? r[13] : 0; ++} ++ ++static int ++drm_gtf2_m(struct edid *edid) ++{ ++ u8 *r = NULL; ++ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); ++ return r ? (r[15] << 8) + r[14] : 0; ++} ++ ++static int ++drm_gtf2_k(struct edid *edid) ++{ ++ u8 *r = NULL; ++ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); ++ return r ? r[16] : 0; ++} ++ ++static int ++drm_gtf2_2j(struct edid *edid) ++{ ++ u8 *r = NULL; ++ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); ++ return r ? r[17] : 0; ++} ++ ++/** ++ * standard_timing_level - get std. timing level(CVT/GTF/DMT) ++ * @edid: EDID block to scan ++ */ ++static int standard_timing_level(struct edid *edid) ++{ ++ if (edid->revision >= 2) { ++ if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) ++ return LEVEL_CVT; ++ if (drm_gtf2_hbreak(edid)) ++ return LEVEL_GTF2; ++ return LEVEL_GTF; ++ } ++ return LEVEL_DMT; ++} ++ + /* + * 0 is reserved. The spec says 0x01 fill for unused timings. Some old + * monitors fill with ascii space (0x20) instead. +@@ -533,22 +801,20 @@ bad_std_timing(u8 a, u8 b) + * + * Take the standard timing params (in this case width, aspect, and refresh) + * and convert them into a real mode using CVT/GTF/DMT. +- * +- * Punts for now, but should eventually use the FB layer's CVT based mode +- * generation code. + */ +-struct drm_display_mode *drm_mode_std(struct drm_device *dev, +- struct std_timing *t, +- int revision, +- int timing_level) ++static struct drm_display_mode * ++drm_mode_std(struct drm_connector *connector, struct edid *edid, ++ struct std_timing *t, int revision) + { +- struct drm_display_mode *mode; ++ struct drm_device *dev = connector->dev; ++ struct drm_display_mode *m, *mode = NULL; + int hsize, vsize; + int vrefresh_rate; + unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) + >> EDID_TIMING_ASPECT_SHIFT; + unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) + >> EDID_TIMING_VFREQ_SHIFT; ++ int timing_level = standard_timing_level(edid); + + if (bad_std_timing(t->hsize, t->vfreq_aspect)) + return NULL; +@@ -569,16 +835,36 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, + vsize = (hsize * 4) / 5; + else + vsize = (hsize * 9) / 16; +- /* HDTV hack */ +- if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) { +- mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, ++ ++ /* HDTV hack, part 1 */ ++ if (vrefresh_rate == 60 && ++ ((hsize == 1360 && vsize == 765) || ++ (hsize == 1368 && vsize == 769))) { ++ hsize = 1366; ++ vsize = 768; ++ } ++ ++ /* ++ * If this connector already has a mode for this size and refresh ++ * rate (because it came from detailed or CVT info), use that ++ * instead. This way we don't have to guess at interlace or ++ * reduced blanking. ++ */ ++ list_for_each_entry(m, &connector->probed_modes, head) ++ if (m->hdisplay == hsize && m->vdisplay == vsize && ++ drm_mode_vrefresh(m) == vrefresh_rate) ++ return NULL; ++ ++ /* HDTV hack, part 2 */ ++ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) { ++ mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, + false); + mode->hdisplay = 1366; + mode->vsync_start = mode->vsync_start - 1; + mode->vsync_end = mode->vsync_end - 1; + return mode; + } +- mode = NULL; ++ + /* check whether it can be found in default mode table */ + mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate); + if (mode) +@@ -590,6 +876,23 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, + case LEVEL_GTF: + mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); + break; ++ case LEVEL_GTF2: ++ /* ++ * This is potentially wrong if there's ever a monitor with ++ * more than one ranges section, each claiming a different ++ * secondary GTF curve. Please don't do that. ++ */ ++ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); ++ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) { ++ kfree(mode); ++ mode = drm_gtf_mode_complex(dev, hsize, vsize, ++ vrefresh_rate, 0, 0, ++ drm_gtf2_m(edid), ++ drm_gtf2_2c(edid), ++ drm_gtf2_k(edid), ++ drm_gtf2_2j(edid)); ++ } ++ break; + case LEVEL_CVT: + mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, + false); +@@ -707,25 +1010,16 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, + mode->vsync_end = mode->vsync_start + vsync_pulse_width; + mode->vtotal = mode->vdisplay + vblank; + +- /* perform the basic check for the detailed timing */ +- if (mode->hsync_end > mode->htotal || +- mode->vsync_end > mode->vtotal) { +- drm_mode_destroy(dev, mode); +- DRM_DEBUG_KMS("Incorrect detailed timing. " +- "Sync is beyond the blank.\n"); +- return NULL; +- } +- + /* Some EDIDs have bogus h/vtotal values */ + if (mode->hsync_end > mode->htotal) + mode->htotal = mode->hsync_end + 1; + if (mode->vsync_end > mode->vtotal) + mode->vtotal = mode->vsync_end + 1; + +- drm_mode_set_name(mode); +- + drm_mode_do_interlace_quirk(mode, pt); + ++ drm_mode_set_name(mode); ++ + if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { + pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; + } +@@ -808,10 +1102,6 @@ static struct drm_display_mode edid_est_modes[] = { + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ + }; + +-#define EDID_EST_TIMINGS 16 +-#define EDID_STD_TIMINGS 8 +-#define EDID_DETAILED_TIMINGS 4 +- + /** + * add_established_modes - get est. modes from EDID and add them + * @edid: EDID block to scan +@@ -839,19 +1129,6 @@ static int add_established_modes(struct drm_connector *connector, struct edid *e + + return modes; + } +-/** +- * stanard_timing_level - get std. timing level(CVT/GTF/DMT) +- * @edid: EDID block to scan +- */ +-static int standard_timing_level(struct edid *edid) +-{ +- if (edid->revision >= 2) { +- if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) +- return LEVEL_CVT; +- return LEVEL_GTF; +- } +- return LEVEL_DMT; +-} + + /** + * add_standard_modes - get std. modes from EDID and add them +@@ -862,22 +1139,14 @@ static int standard_timing_level(struct edid *edid) + */ + static int add_standard_modes(struct drm_connector *connector, struct edid *edid) + { +- struct drm_device *dev = connector->dev; + int i, modes = 0; +- int timing_level; +- +- timing_level = standard_timing_level(edid); + + for (i = 0; i < EDID_STD_TIMINGS; i++) { +- struct std_timing *t = &edid->standard_timings[i]; + struct drm_display_mode *newmode; + +- /* If std timings bytes are 1, 1 it's empty */ +- if (t->hsize == 1 && t->vfreq_aspect == 1) +- continue; +- +- newmode = drm_mode_std(dev, &edid->standard_timings[i], +- edid->revision, timing_level); ++ newmode = drm_mode_std(connector, edid, ++ &edid->standard_timings[i], ++ edid->revision); + if (newmode) { + drm_mode_probed_add(connector, newmode); + modes++; +@@ -887,36 +1156,86 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid + return modes; + } + +-/* +- * XXX fix this for: +- * - GTF secondary curve formula +- * - EDID 1.4 range offsets +- * - CVT extended bits +- */ + static bool +-mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing) ++mode_is_rb(struct drm_display_mode *mode) + { +- struct detailed_data_monitor_range *range; +- int hsync, vrefresh; +- +- range = &timing->data.other_data.data.range; ++ return (mode->htotal - mode->hdisplay == 160) && ++ (mode->hsync_end - mode->hdisplay == 80) && ++ (mode->hsync_end - mode->hsync_start == 32) && ++ (mode->vsync_start - mode->vdisplay == 3); ++} + ++static bool ++mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t) ++{ ++ int hsync, hmin, hmax; ++ ++ hmin = t[7]; ++ if (edid->revision >= 4) ++ hmin += ((t[4] & 0x04) ? 255 : 0); ++ hmax = t[8]; ++ if (edid->revision >= 4) ++ hmax += ((t[4] & 0x08) ? 255 : 0); + hsync = drm_mode_hsync(mode); +- vrefresh = drm_mode_vrefresh(mode); + +- if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz) ++ return (hsync <= hmax && hsync >= hmin); ++} ++ ++static bool ++mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t) ++{ ++ int vsync, vmin, vmax; ++ ++ vmin = t[5]; ++ if (edid->revision >= 4) ++ vmin += ((t[4] & 0x01) ? 255 : 0); ++ vmax = t[6]; ++ if (edid->revision >= 4) ++ vmax += ((t[4] & 0x02) ? 255 : 0); ++ vsync = drm_mode_vrefresh(mode); ++ ++ return (vsync <= vmax && vsync >= vmin); ++} ++ ++static u32 ++range_pixel_clock(struct edid *edid, u8 *t) ++{ ++ /* unspecified */ ++ if (t[9] == 0 || t[9] == 255) ++ return 0; ++ ++ /* 1.4 with CVT support gives us real precision, yay */ ++ if (edid->revision >= 4 && t[10] == 0x04) ++ return (t[9] * 10000) - ((t[12] >> 2) * 250); ++ ++ /* 1.3 is pathetic, so fuzz up a bit */ ++ return t[9] * 10000 + 5001; ++} ++ ++static bool ++mode_in_range(struct drm_display_mode *mode, struct edid *edid, ++ struct detailed_timing *timing) ++{ ++ u32 max_clock; ++ u8 *t = (u8 *)timing; ++ ++ if (!mode_in_hsync_range(mode, edid, t)) + return false; + +- if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq) ++ if (!mode_in_vsync_range(mode, edid, t)) + return false; + +- if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) { +- /* be forgiving since it's in units of 10MHz */ +- int max_clock = range->pixel_clock_mhz * 10 + 9; +- max_clock *= 1000; ++ if ((max_clock = range_pixel_clock(edid, t))) + if (mode->clock > max_clock) + return false; +- } ++ ++ /* 1.4 max horizontal check */ ++ if (edid->revision >= 4 && t[10] == 0x04) ++ if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3)))) ++ return false; ++ ++ if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid)) ++ return false; + + return true; + } +@@ -925,15 +1244,16 @@ mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing) + * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will + * need to account for them. + */ +-static int drm_gtf_modes_for_range(struct drm_connector *connector, +- struct detailed_timing *timing) ++static int ++drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, ++ struct detailed_timing *timing) + { + int i, modes = 0; + struct drm_display_mode *newmode; + struct drm_device *dev = connector->dev; + + for (i = 0; i < drm_num_dmt_modes; i++) { +- if (mode_in_range(drm_dmt_modes + i, timing)) { ++ if (mode_in_range(drm_dmt_modes + i, edid, timing)) { + newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); + if (newmode) { + drm_mode_probed_add(connector, newmode); +@@ -994,13 +1314,100 @@ static int drm_cvt_modes(struct drm_connector *connector, + return modes; + } + ++static const struct { ++ short w; ++ short h; ++ short r; ++ short rb; ++} est3_modes[] = { ++ /* byte 6 */ ++ { 640, 350, 85, 0 }, ++ { 640, 400, 85, 0 }, ++ { 720, 400, 85, 0 }, ++ { 640, 480, 85, 0 }, ++ { 848, 480, 60, 0 }, ++ { 800, 600, 85, 0 }, ++ { 1024, 768, 85, 0 }, ++ { 1152, 864, 75, 0 }, ++ /* byte 7 */ ++ { 1280, 768, 60, 1 }, ++ { 1280, 768, 60, 0 }, ++ { 1280, 768, 75, 0 }, ++ { 1280, 768, 85, 0 }, ++ { 1280, 960, 60, 0 }, ++ { 1280, 960, 85, 0 }, ++ { 1280, 1024, 60, 0 }, ++ { 1280, 1024, 85, 0 }, ++ /* byte 8 */ ++ { 1360, 768, 60, 0 }, ++ { 1440, 900, 60, 1 }, ++ { 1440, 900, 60, 0 }, ++ { 1440, 900, 75, 0 }, ++ { 1440, 900, 85, 0 }, ++ { 1400, 1050, 60, 1 }, ++ { 1400, 1050, 60, 0 }, ++ { 1400, 1050, 75, 0 }, ++ /* byte 9 */ ++ { 1400, 1050, 85, 0 }, ++ { 1680, 1050, 60, 1 }, ++ { 1680, 1050, 60, 0 }, ++ { 1680, 1050, 75, 0 }, ++ { 1680, 1050, 85, 0 }, ++ { 1600, 1200, 60, 0 }, ++ { 1600, 1200, 65, 0 }, ++ { 1600, 1200, 70, 0 }, ++ /* byte 10 */ ++ { 1600, 1200, 75, 0 }, ++ { 1600, 1200, 85, 0 }, ++ { 1792, 1344, 60, 0 }, ++ { 1792, 1344, 85, 0 }, ++ { 1856, 1392, 60, 0 }, ++ { 1856, 1392, 75, 0 }, ++ { 1920, 1200, 60, 1 }, ++ { 1920, 1200, 60, 0 }, ++ /* byte 11 */ ++ { 1920, 1200, 75, 0 }, ++ { 1920, 1200, 85, 0 }, ++ { 1920, 1440, 60, 0 }, ++ { 1920, 1440, 75, 0 }, ++}; ++static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]); ++ ++static int ++drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing) ++{ ++ int i, j, m, modes = 0; ++ struct drm_display_mode *mode; ++ u8 *est = ((u8 *)timing) + 5; ++ ++ for (i = 0; i < 6; i++) { ++ for (j = 7; j > 0; j--) { ++ m = (i * 8) + (7 - j); ++ if (m > num_est3_modes) ++ break; ++ if (est[i] & (1 << j)) { ++ mode = drm_find_dmt(connector->dev, ++ est3_modes[m].w, ++ est3_modes[m].h, ++ est3_modes[m].r ++ /*, est3_modes[m].rb */); ++ if (mode) { ++ drm_mode_probed_add(connector, mode); ++ modes++; ++ } ++ } ++ } ++ } ++ ++ return modes; ++} ++ + static int add_detailed_modes(struct drm_connector *connector, + struct detailed_timing *timing, + struct edid *edid, u32 quirks, int preferred) + { + int i, modes = 0; + struct detailed_non_pixel *data = &timing->data.other_data; +- int timing_level = standard_timing_level(edid); + int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF); + struct drm_display_mode *newmode; + struct drm_device *dev = connector->dev; +@@ -1021,7 +1428,8 @@ static int add_detailed_modes(struct drm_connector *connector, + switch (data->type) { + case EDID_DETAIL_MONITOR_RANGE: + if (gtf) +- modes += drm_gtf_modes_for_range(connector, timing); ++ modes += drm_gtf_modes_for_range(connector, edid, ++ timing); + break; + case EDID_DETAIL_STD_MODES: + /* Six modes per detailed section */ +@@ -1030,8 +1438,8 @@ static int add_detailed_modes(struct drm_connector *connector, + struct drm_display_mode *newmode; + + std = &data->data.timings[i]; +- newmode = drm_mode_std(dev, std, edid->revision, +- timing_level); ++ newmode = drm_mode_std(connector, edid, std, ++ edid->revision); + if (newmode) { + drm_mode_probed_add(connector, newmode); + modes++; +@@ -1041,6 +1449,9 @@ static int add_detailed_modes(struct drm_connector *connector, + case EDID_DETAIL_CVT_3BYTE: + modes += drm_cvt_modes(connector, timing); + break; ++ case EDID_DETAIL_EST_TIMINGS: ++ modes += drm_est3_modes(connector, timing); ++ break; + default: + break; + } +@@ -1064,7 +1475,10 @@ static int add_detailed_info(struct drm_connector *connector, + + for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { + struct detailed_timing *timing = &edid->detailed_timings[i]; +- int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); ++ int preferred = (i == 0); ++ ++ if (preferred && edid->version == 1 && edid->revision < 4) ++ preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); + + /* In 1.0, only timings are allowed */ + if (!timing->pixel_clock && edid->version == 1 && +@@ -1094,39 +1508,23 @@ static int add_detailed_info_eedid(struct drm_connector *connector, + int i, modes = 0; + char *edid_ext = NULL; + struct detailed_timing *timing; +- int edid_ext_num; + int start_offset, end_offset; + int timing_level; + +- if (edid->version == 1 && edid->revision < 3) { +- /* If the EDID version is less than 1.3, there is no +- * extension EDID. +- */ ++ if (edid->version == 1 && edid->revision < 3) + return 0; +- } +- if (!edid->extensions) { +- /* if there is no extension EDID, it is unnecessary to +- * parse the E-EDID to get detailed info +- */ ++ if (!edid->extensions) + return 0; +- } +- +- /* Chose real EDID extension number */ +- edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ? +- MAX_EDID_EXT_NUM : edid->extensions; + + /* Find CEA extension */ +- for (i = 0; i < edid_ext_num; i++) { ++ for (i = 0; i < edid->extensions; i++) { + edid_ext = (char *)edid + EDID_LENGTH * (i + 1); +- /* This block is CEA extension */ + if (edid_ext[0] == 0x02) + break; + } + +- if (i == edid_ext_num) { +- /* if there is no additional timing EDID block, return */ ++ if (i == edid->extensions) + return 0; +- } + + /* Get the start offset of detailed timing block */ + start_offset = edid_ext[2]; +@@ -1150,123 +1548,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector, + return modes; + } + +-#define DDC_ADDR 0x50 +-/** +- * Get EDID information via I2C. +- * +- * \param adapter : i2c device adaptor +- * \param buf : EDID data buffer to be filled +- * \param len : EDID data buffer length +- * \return 0 on success or -1 on failure. +- * +- * Try to fetch EDID information by calling i2c driver function. +- */ +-int drm_do_probe_ddc_edid(struct i2c_adapter *adapter, +- unsigned char *buf, int len) +-{ +- unsigned char start = 0x0; +- struct i2c_msg msgs[] = { +- { +- .addr = DDC_ADDR, +- .flags = 0, +- .len = 1, +- .buf = &start, +- }, { +- .addr = DDC_ADDR, +- .flags = I2C_M_RD, +- .len = len, +- .buf = buf, +- } +- }; +- +- if (i2c_transfer(adapter, msgs, 2) == 2) +- return 0; +- +- return -1; +-} +-EXPORT_SYMBOL(drm_do_probe_ddc_edid); +- +-static int drm_ddc_read_edid(struct drm_connector *connector, +- struct i2c_adapter *adapter, +- char *buf, int len) +-{ +- int i; +- +- for (i = 0; i < 4; i++) { +- if (drm_do_probe_ddc_edid(adapter, buf, len)) +- return -1; +- if (edid_is_valid((struct edid *)buf)) +- return 0; +- } +- +- /* repeated checksum failures; warn, but carry on */ +- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", +- drm_get_connector_name(connector)); +- return -1; +-} +- +-/** +- * drm_get_edid - get EDID data, if available +- * @connector: connector we're probing +- * @adapter: i2c adapter to use for DDC +- * +- * Poke the given connector's i2c channel to grab EDID data if possible. +- * +- * Return edid data or NULL if we couldn't find any. +- */ +-struct edid *drm_get_edid(struct drm_connector *connector, +- struct i2c_adapter *adapter) +-{ +- int ret; +- struct edid *edid; +- +- edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1), +- GFP_KERNEL); +- if (edid == NULL) { +- dev_warn(&connector->dev->pdev->dev, +- "Failed to allocate EDID\n"); +- goto end; +- } +- +- /* Read first EDID block */ +- ret = drm_ddc_read_edid(connector, adapter, +- (unsigned char *)edid, EDID_LENGTH); +- if (ret != 0) +- goto clean_up; +- +- /* There are EDID extensions to be read */ +- if (edid->extensions != 0) { +- int edid_ext_num = edid->extensions; +- +- if (edid_ext_num > MAX_EDID_EXT_NUM) { +- dev_warn(&connector->dev->pdev->dev, +- "The number of extension(%d) is " +- "over max (%d), actually read number (%d)\n", +- edid_ext_num, MAX_EDID_EXT_NUM, +- MAX_EDID_EXT_NUM); +- /* Reset EDID extension number to be read */ +- edid_ext_num = MAX_EDID_EXT_NUM; +- } +- /* Read EDID including extensions too */ +- ret = drm_ddc_read_edid(connector, adapter, (char *)edid, +- EDID_LENGTH * (edid_ext_num + 1)); +- if (ret != 0) +- goto clean_up; +- +- } +- +- connector->display_info.raw_edid = (char *)edid; +- goto end; +- +-clean_up: +- kfree(edid); +- edid = NULL; +-end: +- return edid; +- +-} +-EXPORT_SYMBOL(drm_get_edid); +- + #define HDMI_IDENTIFIER 0x000C03 + #define VENDOR_BLOCK 0x03 + /** +@@ -1279,7 +1560,7 @@ EXPORT_SYMBOL(drm_get_edid); + bool drm_detect_hdmi_monitor(struct edid *edid) + { + char *edid_ext = NULL; +- int i, hdmi_id, edid_ext_num; ++ int i, hdmi_id; + int start_offset, end_offset; + bool is_hdmi = false; + +@@ -1287,19 +1568,15 @@ bool drm_detect_hdmi_monitor(struct edid *edid) + if (edid == NULL || edid->extensions == 0) + goto end; + +- /* Chose real EDID extension number */ +- edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ? +- MAX_EDID_EXT_NUM : edid->extensions; +- + /* Find CEA extension */ +- for (i = 0; i < edid_ext_num; i++) { ++ for (i = 0; i < edid->extensions; i++) { + edid_ext = (char *)edid + EDID_LENGTH * (i + 1); + /* This block is CEA extension */ + if (edid_ext[0] == 0x02) + break; + } + +- if (i == edid_ext_num) ++ if (i == edid->extensions) + goto end; + + /* Data block offset in CEA extension block */ +@@ -1346,7 +1623,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) + if (edid == NULL) { + return 0; + } +- if (!edid_is_valid(edid)) { ++ if (!drm_edid_is_valid(edid)) { + dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", + drm_get_connector_name(connector)); + return 0; +@@ -1354,10 +1631,24 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) + + quirks = edid_get_quirks(edid); + +- num_modes += add_established_modes(connector, edid); +- num_modes += add_standard_modes(connector, edid); ++ /* ++ * EDID spec says modes should be preferred in this order: ++ * - preferred detailed mode ++ * - other detailed modes from base block ++ * - detailed modes from extension blocks ++ * - CVT 3-byte code modes ++ * - standard timing codes ++ * - established timing codes ++ * - modes inferred from GTF or CVT range information ++ * ++ * We don't quite implement this yet, but we're close. ++ * ++ * XXX order for additional mode types in extension blocks? ++ */ + num_modes += add_detailed_info(connector, edid, quirks); + num_modes += add_detailed_info_eedid(connector, edid, quirks); ++ num_modes += add_standard_modes(connector, edid); ++ num_modes += add_established_modes(connector, edid); + + if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) + edid_fixup_preferred(connector, quirks); +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c +index 0f9e905..da258ca 100644 +--- a/drivers/gpu/drm/drm_fb_helper.c ++++ b/drivers/gpu/drm/drm_fb_helper.c +@@ -27,6 +27,7 @@ + * Dave Airlie + * Jesse Barnes + */ ++#include + #include + #include + #include "drmP.h" +@@ -50,21 +51,6 @@ int drm_fb_helper_add_connector(struct drm_connector *connector) + } + EXPORT_SYMBOL(drm_fb_helper_add_connector); + +-static int my_atoi(const char *name) +-{ +- int val = 0; +- +- for (;; name++) { +- switch (*name) { +- case '0' ... '9': +- val = 10*val+(*name-'0'); +- break; +- default: +- return val; +- } +- } +-} +- + /** + * drm_fb_helper_connector_parse_command_line - parse command line for connector + * @connector - connector to parse line for +@@ -111,7 +97,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con + namelen = i; + if (!refresh_specified && !bpp_specified && + !yres_specified) { +- refresh = my_atoi(&name[i+1]); ++ refresh = simple_strtol(&name[i+1], NULL, 10); + refresh_specified = 1; + if (cvt || rb) + cvt = 0; +@@ -121,7 +107,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con + case '-': + namelen = i; + if (!bpp_specified && !yres_specified) { +- bpp = my_atoi(&name[i+1]); ++ bpp = simple_strtol(&name[i+1], NULL, 10); + bpp_specified = 1; + if (cvt || rb) + cvt = 0; +@@ -130,7 +116,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con + break; + case 'x': + if (!yres_specified) { +- yres = my_atoi(&name[i+1]); ++ yres = simple_strtol(&name[i+1], NULL, 10); + yres_specified = 1; + } else + goto done; +@@ -170,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con + } + } + if (i < 0 && yres_specified) { +- xres = my_atoi(name); ++ xres = simple_strtol(name, NULL, 10); + res_specified = 1; + } + done: +@@ -297,6 +283,8 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { + .help_msg = "force-fb(V)", + .action_msg = "Restore framebuffer console", + }; ++#else ++static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; + #endif + + static void drm_fb_helper_on(struct fb_info *info) +diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c +index 08d14df..4804872 100644 +--- a/drivers/gpu/drm/drm_fops.c ++++ b/drivers/gpu/drm/drm_fops.c +@@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct file *filp) + spin_unlock(&dev->count_lock); + } + out: +- mutex_lock(&dev->struct_mutex); +- if (minor->type == DRM_MINOR_LEGACY) { +- BUG_ON((dev->dev_mapping != NULL) && +- (dev->dev_mapping != inode->i_mapping)); +- if (dev->dev_mapping == NULL) +- dev->dev_mapping = inode->i_mapping; ++ if (!retcode) { ++ mutex_lock(&dev->struct_mutex); ++ if (minor->type == DRM_MINOR_LEGACY) { ++ if (dev->dev_mapping == NULL) ++ dev->dev_mapping = inode->i_mapping; ++ else if (dev->dev_mapping != inode->i_mapping) ++ retcode = -ENODEV; ++ } ++ mutex_unlock(&dev->struct_mutex); + } +- mutex_unlock(&dev->struct_mutex); + + return retcode; + } +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c +index 8bf3770..aa89d4b 100644 +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -192,9 +192,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) + idr_remove(&filp->object_idr, handle); + spin_unlock(&filp->table_lock); + +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_handle_unreference(obj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_handle_unreference_unlocked(obj); + + return 0; + } +@@ -325,9 +323,7 @@ again: + } + + err: +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(obj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(obj); + return ret; + } + +@@ -358,9 +354,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, + return -ENOENT; + + ret = drm_gem_handle_create(file_priv, obj, &handle); +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(obj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(obj); + if (ret) + return ret; + +@@ -390,7 +384,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) + { + struct drm_gem_object *obj = ptr; + +- drm_gem_object_handle_unreference(obj); ++ drm_gem_object_handle_unreference_unlocked(obj); + + return 0; + } +@@ -403,16 +397,25 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) + void + drm_gem_release(struct drm_device *dev, struct drm_file *file_private) + { +- mutex_lock(&dev->struct_mutex); + idr_for_each(&file_private->object_idr, + &drm_gem_object_release_handle, NULL); + + idr_destroy(&file_private->object_idr); +- mutex_unlock(&dev->struct_mutex); ++} ++ ++static void ++drm_gem_object_free_common(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ fput(obj->filp); ++ atomic_dec(&dev->object_count); ++ atomic_sub(obj->size, &dev->object_memory); ++ kfree(obj); + } + + /** + * Called after the last reference to the object has been lost. ++ * Must be called holding struct_ mutex + * + * Frees the object + */ +@@ -427,14 +430,40 @@ drm_gem_object_free(struct kref *kref) + if (dev->driver->gem_free_object != NULL) + dev->driver->gem_free_object(obj); + +- fput(obj->filp); +- atomic_dec(&dev->object_count); +- atomic_sub(obj->size, &dev->object_memory); +- kfree(obj); ++ drm_gem_object_free_common(obj); + } + EXPORT_SYMBOL(drm_gem_object_free); + + /** ++ * Called after the last reference to the object has been lost. ++ * Must be called without holding struct_mutex ++ * ++ * Frees the object ++ */ ++void ++drm_gem_object_free_unlocked(struct kref *kref) ++{ ++ struct drm_gem_object *obj = (struct drm_gem_object *) kref; ++ struct drm_device *dev = obj->dev; ++ ++ if (dev->driver->gem_free_object_unlocked != NULL) ++ dev->driver->gem_free_object_unlocked(obj); ++ else if (dev->driver->gem_free_object != NULL) { ++ mutex_lock(&dev->struct_mutex); ++ dev->driver->gem_free_object(obj); ++ mutex_unlock(&dev->struct_mutex); ++ } ++ ++ drm_gem_object_free_common(obj); ++} ++EXPORT_SYMBOL(drm_gem_object_free_unlocked); ++ ++static void drm_gem_object_ref_bug(struct kref *list_kref) ++{ ++ BUG(); ++} ++ ++/** + * Called after the last handle to the object has been closed + * + * Removes any name for the object. Note that this must be +@@ -458,8 +487,10 @@ drm_gem_object_handle_free(struct kref *kref) + /* + * The object name held a reference to this object, drop + * that now. ++ * ++ * This cannot be the last reference, since the handle holds one too. + */ +- drm_gem_object_unreference(obj); ++ kref_put(&obj->refcount, drm_gem_object_ref_bug); + } else + spin_unlock(&dev->object_name_lock); + +@@ -477,11 +508,8 @@ EXPORT_SYMBOL(drm_gem_vm_open); + void drm_gem_vm_close(struct vm_area_struct *vma) + { + struct drm_gem_object *obj = vma->vm_private_data; +- struct drm_device *dev = obj->dev; + +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(obj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(obj); + } + EXPORT_SYMBOL(drm_gem_vm_close); + +diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c +index 76d6339..f1f473e 100644 +--- a/drivers/gpu/drm/drm_modes.c ++++ b/drivers/gpu/drm/drm_modes.c +@@ -258,8 +258,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, + drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP; + /* 18/16. Find actual vertical frame frequency */ + /* ignore - just set the mode flag for interlaced */ +- if (interlaced) ++ if (interlaced) { + drm_mode->vtotal *= 2; ++ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; ++ } + /* Fill the mode line name */ + drm_mode_set_name(drm_mode); + if (reduced) +@@ -268,43 +270,35 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, + else + drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_NHSYNC); +- if (interlaced) +- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; + +- return drm_mode; ++ return drm_mode; + } + EXPORT_SYMBOL(drm_cvt_mode); + + /** +- * drm_gtf_mode - create the modeline based on GTF algorithm ++ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm + * + * @dev :drm device + * @hdisplay :hdisplay size + * @vdisplay :vdisplay size + * @vrefresh :vrefresh rate. + * @interlaced :whether the interlace is supported +- * @margins :whether the margin is supported ++ * @margins :desired margin size ++ * @GTF_[MCKJ] :extended GTF formula parameters + * + * LOCKING. + * none. + * +- * return the modeline based on GTF algorithm +- * +- * This function is to create the modeline based on the GTF algorithm. +- * Generalized Timing Formula is derived from: +- * GTF Spreadsheet by Andy Morrish (1/5/97) +- * available at http://www.vesa.org ++ * return the modeline based on full GTF algorithm. + * +- * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. +- * What I have done is to translate it by using integer calculation. +- * I also refer to the function of fb_get_mode in the file of +- * drivers/video/fbmon.c ++ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them ++ * in here multiplied by two. For a C of 40, pass in 80. + */ +-struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, +- int vdisplay, int vrefresh, +- bool interlaced, int margins) +-{ +- /* 1) top/bottom margin size (% of height) - default: 1.8, */ ++struct drm_display_mode * ++drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay, ++ int vrefresh, bool interlaced, int margins, ++ int GTF_M, int GTF_2C, int GTF_K, int GTF_2J) ++{ /* 1) top/bottom margin size (% of height) - default: 1.8, */ + #define GTF_MARGIN_PERCENTAGE 18 + /* 2) character cell horizontal granularity (pixels) - default 8 */ + #define GTF_CELL_GRAN 8 +@@ -316,17 +310,9 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, + #define H_SYNC_PERCENT 8 + /* min time of vsync + back porch (microsec) */ + #define MIN_VSYNC_PLUS_BP 550 +- /* blanking formula gradient */ +-#define GTF_M 600 +- /* blanking formula offset */ +-#define GTF_C 40 +- /* blanking formula scaling factor */ +-#define GTF_K 128 +- /* blanking formula scaling factor */ +-#define GTF_J 20 + /* C' and M' are part of the Blanking Duty Cycle computation */ +-#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J) +-#define GTF_M_PRIME (GTF_K * GTF_M / 256) ++#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2) ++#define GTF_M_PRIME (GTF_K * GTF_M / 256) + struct drm_display_mode *drm_mode; + unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd; + int top_margin, bottom_margin; +@@ -460,17 +446,61 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, + + drm_mode->clock = pixel_freq; + +- drm_mode_set_name(drm_mode); +- drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC; +- + if (interlaced) { + drm_mode->vtotal *= 2; + drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; + } + ++ drm_mode_set_name(drm_mode); ++ if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40) ++ drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC; ++ else ++ drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC; ++ + return drm_mode; + } ++EXPORT_SYMBOL(drm_gtf_mode_complex); ++ ++/** ++ * drm_gtf_mode - create the modeline based on GTF algorithm ++ * ++ * @dev :drm device ++ * @hdisplay :hdisplay size ++ * @vdisplay :vdisplay size ++ * @vrefresh :vrefresh rate. ++ * @interlaced :whether the interlace is supported ++ * @margins :whether the margin is supported ++ * ++ * LOCKING. ++ * none. ++ * ++ * return the modeline based on GTF algorithm ++ * ++ * This function is to create the modeline based on the GTF algorithm. ++ * Generalized Timing Formula is derived from: ++ * GTF Spreadsheet by Andy Morrish (1/5/97) ++ * available at http://www.vesa.org ++ * ++ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. ++ * What I have done is to translate it by using integer calculation. ++ * I also refer to the function of fb_get_mode in the file of ++ * drivers/video/fbmon.c ++ * ++ * Standard GTF parameters: ++ * M = 600 ++ * C = 40 ++ * K = 128 ++ * J = 20 ++ */ ++struct drm_display_mode * ++drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, ++ bool lace, int margins) ++{ ++ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace, ++ margins, 600, 40 * 2, 128, 20 * 2); ++} + EXPORT_SYMBOL(drm_gtf_mode); ++ + /** + * drm_mode_set_name - set the name on a mode + * @mode: name will be set in this mode +@@ -482,8 +512,11 @@ EXPORT_SYMBOL(drm_gtf_mode); + */ + void drm_mode_set_name(struct drm_display_mode *mode) + { +- snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay, +- mode->vdisplay); ++ bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); ++ ++ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s", ++ mode->hdisplay, mode->vdisplay, ++ interlaced ? "i" : ""); + } + EXPORT_SYMBOL(drm_mode_set_name); + +diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c +index 7e42b7e..9721513 100644 +--- a/drivers/gpu/drm/drm_sysfs.c ++++ b/drivers/gpu/drm/drm_sysfs.c +@@ -334,7 +334,7 @@ static struct device_attribute connector_attrs_opt1[] = { + static struct bin_attribute edid_attr = { + .attr.name = "edid", + .attr.mode = 0444, +- .size = 128, ++ .size = 0, + .read = edid_show, + }; + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index fd099a1..1c0cc9e 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -128,9 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, + return -ENOMEM; + + ret = drm_gem_handle_create(file_priv, obj, &handle); +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_handle_unreference(obj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_handle_unreference_unlocked(obj); + + if (ret) + return ret; +@@ -488,7 +486,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, + */ + if (args->offset > obj->size || args->size > obj->size || + args->offset + args->size > obj->size) { +- drm_gem_object_unreference(obj); ++ drm_gem_object_unreference_unlocked(obj); + return -EINVAL; + } + +@@ -501,7 +499,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, + file_priv); + } + +- drm_gem_object_unreference(obj); ++ drm_gem_object_unreference_unlocked(obj); + + return ret; + } +@@ -961,7 +959,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, + */ + if (args->offset > obj->size || args->size > obj->size || + args->offset + args->size > obj->size) { +- drm_gem_object_unreference(obj); ++ drm_gem_object_unreference_unlocked(obj); + return -EINVAL; + } + +@@ -995,7 +993,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, + DRM_INFO("pwrite failed %d\n", ret); + #endif + +- drm_gem_object_unreference(obj); ++ drm_gem_object_unreference_unlocked(obj); + + return ret; + } +@@ -1138,9 +1136,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, + PROT_READ | PROT_WRITE, MAP_SHARED, + args->offset); + up_write(¤t->mm->mmap_sem); +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(obj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(obj); + if (IS_ERR((void *)addr)) + return addr; + +diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c +index df278b2..137e888 100644 +--- a/drivers/gpu/drm/i915/i915_gem_tiling.c ++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c +@@ -438,9 +438,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, + obj_priv = obj->driver_private; + + if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(obj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(obj); + return -EINVAL; + } + +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index b27202d..c8fd15f 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -3553,11 +3553,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, + intel_crtc->cursor_bo = bo; + + return 0; +-fail: +- mutex_lock(&dev->struct_mutex); + fail_locked: +- drm_gem_object_unreference(bo); + mutex_unlock(&dev->struct_mutex); ++fail: ++ drm_gem_object_unreference_unlocked(bo); + return ret; + } + +@@ -4476,9 +4475,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) + intelfb_remove(dev, fb); + + drm_framebuffer_cleanup(fb); +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(intel_fb->obj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(intel_fb->obj); + + kfree(intel_fb); + } +@@ -4541,9 +4538,7 @@ intel_user_framebuffer_create(struct drm_device *dev, + + ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); + if (ret) { +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(obj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(obj); + return NULL; + } + +diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c +index 63f569b..f8887f6 100644 +--- a/drivers/gpu/drm/i915/intel_overlay.c ++++ b/drivers/gpu/drm/i915/intel_overlay.c +@@ -1183,8 +1183,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, + out_unlock: + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->mode_config.mutex); +- drm_gem_object_unreference(new_bo); + out_free: ++ drm_gem_object_unreference_unlocked(new_bo); + kfree(params); + + return ret; +diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c +index dfc9439..cf1c5c0 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_display.c ++++ b/drivers/gpu/drm/nouveau/nouveau_display.c +@@ -39,11 +39,8 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) + if (drm_fb->fbdev) + nouveau_fbcon_remove(dev, drm_fb); + +- if (fb->nvbo) { +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(fb->nvbo->gem); +- mutex_unlock(&dev->struct_mutex); +- } ++ if (fb->nvbo) ++ drm_gem_object_unreference_unlocked(fb->nvbo->gem); + + drm_framebuffer_cleanup(drm_fb); + kfree(fb); +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c +index ea879a2..d48c59c 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c +@@ -401,10 +401,8 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb) + + unregister_framebuffer(info); + nouveau_bo_unmap(nouveau_fb->nvbo); +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(nouveau_fb->nvbo->gem); ++ drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); + nouveau_fb->nvbo = NULL; +- mutex_unlock(&dev->struct_mutex); + if (par) + drm_fb_helper_free(&par->helper); + framebuffer_release(info); +diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c +index 70cc308..34063c5 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_gem.c ++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c +@@ -167,12 +167,10 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, + + ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); + out: +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_handle_unreference(nvbo->gem); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_handle_unreference_unlocked(nvbo->gem); + + if (ret) +- drm_gem_object_unreference(nvbo->gem); ++ drm_gem_object_unreference_unlocked(nvbo->gem); + return ret; + } + +@@ -865,9 +863,7 @@ nouveau_gem_ioctl_pin(struct drm_device *dev, void *data, + req->domain = NOUVEAU_GEM_DOMAIN_VRAM; + + out: +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(gem); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gem); + + return ret; + } +@@ -891,9 +887,7 @@ nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data, + + ret = nouveau_bo_unpin(nouveau_gem_object(gem)); + +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(gem); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gem); + + return ret; + } +@@ -935,9 +929,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, + } + + out: +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(gem); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gem); + return ret; + } + +@@ -965,9 +957,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, + ret = 0; + + out: +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(gem); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gem); + return ret; + } + +@@ -986,9 +976,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data, + return -EINVAL; + + ret = nouveau_gem_info(gem, req); +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(gem); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gem); + return ret; + } + +diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c +index d99dc08..9537f3e 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c ++++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c +@@ -61,11 +61,8 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) + + chan->notifier_bo = ntfy; + out_err: +- if (ret) { +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(ntfy->gem); +- mutex_unlock(&dev->struct_mutex); +- } ++ if (ret) ++ drm_gem_object_unreference_unlocked(ntfy->gem); + + return ret; + } +@@ -81,8 +78,8 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) + nouveau_bo_unmap(chan->notifier_bo); + mutex_lock(&dev->struct_mutex); + nouveau_bo_unpin(chan->notifier_bo); +- drm_gem_object_unreference(chan->notifier_bo->gem); + mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); + nouveau_mem_takedown(&chan->notifier_heap); + } + +diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c +index d2f143e..a1d1ebb 100644 +--- a/drivers/gpu/drm/nouveau/nv04_crtc.c ++++ b/drivers/gpu/drm/nouveau/nv04_crtc.c +@@ -926,9 +926,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); + nv_crtc->cursor.show(nv_crtc, true); + out: +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(gem); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gem); + return ret; + } + +diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c +index d1a651e..cfabeb9 100644 +--- a/drivers/gpu/drm/nouveau/nv50_crtc.c ++++ b/drivers/gpu/drm/nouveau/nv50_crtc.c +@@ -358,9 +358,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + nv_crtc->cursor.show(nv_crtc, true); + + out: +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(gem); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gem); + return ret; + } + +diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c +index e9d0850..70ba02e 100644 +--- a/drivers/gpu/drm/radeon/radeon_cs.c ++++ b/drivers/gpu/drm/radeon/radeon_cs.c +@@ -194,11 +194,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) + radeon_bo_list_unreserve(&parser->validated); + if (parser->relocs != NULL) { + for (i = 0; i < parser->nrelocs; i++) { +- if (parser->relocs[i].gobj) { +- mutex_lock(&parser->rdev->ddev->struct_mutex); +- drm_gem_object_unreference(parser->relocs[i].gobj); +- mutex_unlock(&parser->rdev->ddev->struct_mutex); +- } ++ if (parser->relocs[i].gobj) ++ drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); + } + } + kfree(parser->track); +diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c +index 28772a3..6f4a553 100644 +--- a/drivers/gpu/drm/radeon/radeon_cursor.c ++++ b/drivers/gpu/drm/radeon/radeon_cursor.c +@@ -169,17 +169,13 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, + unpin: + if (radeon_crtc->cursor_bo) { + radeon_gem_object_unpin(radeon_crtc->cursor_bo); +- mutex_lock(&crtc->dev->struct_mutex); +- drm_gem_object_unreference(radeon_crtc->cursor_bo); +- mutex_unlock(&crtc->dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); + } + + radeon_crtc->cursor_bo = obj; + return 0; + fail: +- mutex_lock(&crtc->dev->struct_mutex); +- drm_gem_object_unreference(obj); +- mutex_unlock(&crtc->dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(obj); + + return 0; + } +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c +index 7e17a36..3db8255 100644 +--- a/drivers/gpu/drm/radeon/radeon_display.c ++++ b/drivers/gpu/drm/radeon/radeon_display.c +@@ -679,11 +679,8 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) + if (fb->fbdev) + radeonfb_remove(dev, fb); + +- if (radeon_fb->obj) { +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(radeon_fb->obj); +- mutex_unlock(&dev->struct_mutex); +- } ++ if (radeon_fb->obj) ++ drm_gem_object_unreference_unlocked(radeon_fb->obj); + drm_framebuffer_cleanup(fb); + kfree(radeon_fb); + } +diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c +index db8e9a3..ef92d14 100644 +--- a/drivers/gpu/drm/radeon/radeon_gem.c ++++ b/drivers/gpu/drm/radeon/radeon_gem.c +@@ -69,9 +69,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, + if (r != -ERESTARTSYS) + DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", + size, initial_domain, alignment, r); +- mutex_lock(&rdev->ddev->struct_mutex); +- drm_gem_object_unreference(gobj); +- mutex_unlock(&rdev->ddev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gobj); + return r; + } + gobj->driver_private = robj; +@@ -202,14 +200,10 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, + } + r = drm_gem_handle_create(filp, gobj, &handle); + if (r) { +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(gobj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gobj); + return r; + } +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_handle_unreference(gobj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_handle_unreference_unlocked(gobj); + args->handle = handle; + return 0; + } +@@ -236,9 +230,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, + + r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); + +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(gobj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gobj); + return r; + } + +@@ -255,9 +247,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, + } + robj = gobj->driver_private; + args->addr_ptr = radeon_bo_mmap_offset(robj); +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(gobj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gobj); + return 0; + } + +@@ -288,9 +278,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, + default: + break; + } +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(gobj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gobj); + return r; + } + +@@ -311,9 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, + /* callback hw specific functions if any */ + if (robj->rdev->asic->ioctl_wait_idle) + robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(gobj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gobj); + return r; + } + +@@ -331,9 +317,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, + return -EINVAL; + robj = gobj->driver_private; + r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(gobj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gobj); + return r; + } + +@@ -356,8 +340,6 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, + radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); + radeon_bo_unreserve(rbo); + out: +- mutex_lock(&dev->struct_mutex); +- drm_gem_object_unreference(gobj); +- mutex_unlock(&dev->struct_mutex); ++ drm_gem_object_unreference_unlocked(gobj); + return r; + } +diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile +index 1e138f5..4256e20 100644 +--- a/drivers/gpu/drm/ttm/Makefile ++++ b/drivers/gpu/drm/ttm/Makefile +@@ -4,6 +4,6 @@ + ccflags-y := -Iinclude/drm + ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ + ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ +- ttm_object.o ttm_lock.o ttm_execbuf_util.o ++ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o + + obj-$(CONFIG_DRM_TTM) += ttm.o +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c +index c7320ce..9db02bb 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo.c ++++ b/drivers/gpu/drm/ttm/ttm_bo.c +@@ -1425,8 +1425,8 @@ int ttm_bo_global_init(struct ttm_global_reference *ref) + + atomic_set(&glob->bo_count, 0); + +- kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type); +- ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects"); ++ ret = kobject_init_and_add( ++ &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); + if (unlikely(ret != 0)) + kobject_put(&glob->kobj); + return ret; +diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c +index f5245c0..4057a17 100644 +--- a/drivers/gpu/drm/ttm/ttm_memory.c ++++ b/drivers/gpu/drm/ttm/ttm_memory.c +@@ -27,6 +27,7 @@ + + #include "ttm/ttm_memory.h" + #include "ttm/ttm_module.h" ++#include "ttm/ttm_page_alloc.h" + #include + #include + #include +@@ -260,8 +261,8 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, + zone->used_mem = 0; + zone->glob = glob; + glob->zone_kernel = zone; +- kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); +- ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); ++ ret = kobject_init_and_add( ++ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); + if (unlikely(ret != 0)) { + kobject_put(&zone->kobj); + return ret; +@@ -296,8 +297,8 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, + zone->used_mem = 0; + zone->glob = glob; + glob->zone_highmem = zone; +- kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); +- ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); ++ ret = kobject_init_and_add( ++ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); + if (unlikely(ret != 0)) { + kobject_put(&zone->kobj); + return ret; +@@ -343,8 +344,8 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, + zone->used_mem = 0; + zone->glob = glob; + glob->zone_dma32 = zone; +- kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); +- ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); ++ ret = kobject_init_and_add( ++ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); + if (unlikely(ret != 0)) { + kobject_put(&zone->kobj); + return ret; +@@ -365,10 +366,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) + glob->swap_queue = create_singlethread_workqueue("ttm_swap"); + INIT_WORK(&glob->work, ttm_shrink_work); + init_waitqueue_head(&glob->queue); +- kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type); +- ret = kobject_add(&glob->kobj, +- ttm_get_kobj(), +- "memory_accounting"); ++ ret = kobject_init_and_add( ++ &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting"); + if (unlikely(ret != 0)) { + kobject_put(&glob->kobj); + return ret; +@@ -394,6 +393,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) + "Zone %7s: Available graphics memory: %llu kiB.\n", + zone->name, (unsigned long long) zone->max_mem >> 10); + } ++ ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); + return 0; + out_no_zone: + ttm_mem_global_release(glob); +@@ -406,6 +406,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) + unsigned int i; + struct ttm_mem_zone *zone; + ++ /* let the page allocator first stop the shrink work. */ ++ ttm_page_alloc_fini(); ++ + flush_workqueue(glob->swap_queue); + destroy_workqueue(glob->swap_queue); + glob->swap_queue = NULL; +@@ -413,7 +416,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) + zone = glob->zones[i]; + kobject_del(&zone->kobj); + kobject_put(&zone->kobj); +- } ++ } + kobject_del(&glob->kobj); + kobject_put(&glob->kobj); + } +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c +new file mode 100644 +index 0000000..03509f8 +--- /dev/null ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c +@@ -0,0 +1,855 @@ ++/* ++ * Copyright (c) Red Hat Inc. ++ ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Jerome Glisse ++ * Pauli Nieminen ++ */ ++ ++/* simple list based uncached page pool ++ * - Pool collects resently freed pages for reuse ++ * - Use page->lru to keep a free list ++ * - doesn't track currently in use pages ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* for seq_printf */ ++#include ++ ++#include ++#include ++ ++#include "ttm/ttm_bo_driver.h" ++#include "ttm/ttm_page_alloc.h" ++ ++ ++#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) ++#define SMALL_ALLOCATION 16 ++#define FREE_ALL_PAGES (~0U) ++/* times are in msecs */ ++#define PAGE_FREE_INTERVAL 1000 ++ ++/** ++ * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. ++ * ++ * @lock: Protects the shared pool from concurrnet access. Must be used with ++ * irqsave/irqrestore variants because pool allocator maybe called from ++ * delayed work. ++ * @fill_lock: Prevent concurrent calls to fill. ++ * @list: Pool of free uc/wc pages for fast reuse. ++ * @gfp_flags: Flags to pass for alloc_page. ++ * @npages: Number of pages in pool. ++ */ ++struct ttm_page_pool { ++ spinlock_t lock; ++ bool fill_lock; ++ struct list_head list; ++ int gfp_flags; ++ unsigned npages; ++ char *name; ++ unsigned long nfrees; ++ unsigned long nrefills; ++}; ++ ++/** ++ * Limits for the pool. They are handled without locks because only place where ++ * they may change is in sysfs store. They won't have immediate effect anyway ++ * so forcing serialiazation to access them is pointless. ++ */ ++ ++struct ttm_pool_opts { ++ unsigned alloc_size; ++ unsigned max_size; ++ unsigned small; ++}; ++ ++#define NUM_POOLS 4 ++ ++/** ++ * struct ttm_pool_manager - Holds memory pools for fst allocation ++ * ++ * Manager is read only object for pool code so it doesn't need locking. ++ * ++ * @free_interval: minimum number of jiffies between freeing pages from pool. ++ * @page_alloc_inited: reference counting for pool allocation. ++ * @work: Work that is used to shrink the pool. Work is only run when there is ++ * some pages to free. ++ * @small_allocation: Limit in number of pages what is small allocation. ++ * ++ * @pools: All pool objects in use. ++ **/ ++struct ttm_pool_manager { ++ struct kobject kobj; ++ struct shrinker mm_shrink; ++ atomic_t page_alloc_inited; ++ struct ttm_pool_opts options; ++ ++ union { ++ struct ttm_page_pool pools[NUM_POOLS]; ++ struct { ++ struct ttm_page_pool wc_pool; ++ struct ttm_page_pool uc_pool; ++ struct ttm_page_pool wc_pool_dma32; ++ struct ttm_page_pool uc_pool_dma32; ++ } ; ++ }; ++}; ++ ++static struct attribute ttm_page_pool_max = { ++ .name = "pool_max_size", ++ .mode = S_IRUGO | S_IWUSR ++}; ++static struct attribute ttm_page_pool_small = { ++ .name = "pool_small_allocation", ++ .mode = S_IRUGO | S_IWUSR ++}; ++static struct attribute ttm_page_pool_alloc_size = { ++ .name = "pool_allocation_size", ++ .mode = S_IRUGO | S_IWUSR ++}; ++ ++static struct attribute *ttm_pool_attrs[] = { ++ &ttm_page_pool_max, ++ &ttm_page_pool_small, ++ &ttm_page_pool_alloc_size, ++ NULL ++}; ++ ++static void ttm_pool_kobj_release(struct kobject *kobj) ++{ ++ struct ttm_pool_manager *m = ++ container_of(kobj, struct ttm_pool_manager, kobj); ++ (void)m; ++} ++ ++static ssize_t ttm_pool_store(struct kobject *kobj, ++ struct attribute *attr, const char *buffer, size_t size) ++{ ++ struct ttm_pool_manager *m = ++ container_of(kobj, struct ttm_pool_manager, kobj); ++ int chars; ++ unsigned val; ++ chars = sscanf(buffer, "%u", &val); ++ if (chars == 0) ++ return size; ++ ++ /* Convert kb to number of pages */ ++ val = val / (PAGE_SIZE >> 10); ++ ++ if (attr == &ttm_page_pool_max) ++ m->options.max_size = val; ++ else if (attr == &ttm_page_pool_small) ++ m->options.small = val; ++ else if (attr == &ttm_page_pool_alloc_size) { ++ if (val > NUM_PAGES_TO_ALLOC*8) { ++ printk(KERN_ERR "[ttm] Setting allocation size to %lu " ++ "is not allowed. Recomended size is " ++ "%lu\n", ++ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), ++ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); ++ return size; ++ } else if (val > NUM_PAGES_TO_ALLOC) { ++ printk(KERN_WARNING "[ttm] Setting allocation size to " ++ "larger than %lu is not recomended.\n", ++ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); ++ } ++ m->options.alloc_size = val; ++ } ++ ++ return size; ++} ++ ++static ssize_t ttm_pool_show(struct kobject *kobj, ++ struct attribute *attr, char *buffer) ++{ ++ struct ttm_pool_manager *m = ++ container_of(kobj, struct ttm_pool_manager, kobj); ++ unsigned val = 0; ++ ++ if (attr == &ttm_page_pool_max) ++ val = m->options.max_size; ++ else if (attr == &ttm_page_pool_small) ++ val = m->options.small; ++ else if (attr == &ttm_page_pool_alloc_size) ++ val = m->options.alloc_size; ++ ++ val = val * (PAGE_SIZE >> 10); ++ ++ return snprintf(buffer, PAGE_SIZE, "%u\n", val); ++} ++ ++static const struct sysfs_ops ttm_pool_sysfs_ops = { ++ .show = &ttm_pool_show, ++ .store = &ttm_pool_store, ++}; ++ ++static struct kobj_type ttm_pool_kobj_type = { ++ .release = &ttm_pool_kobj_release, ++ .sysfs_ops = &ttm_pool_sysfs_ops, ++ .default_attrs = ttm_pool_attrs, ++}; ++ ++static struct ttm_pool_manager _manager = { ++ .page_alloc_inited = ATOMIC_INIT(0) ++}; ++ ++#ifdef CONFIG_X86 ++/* TODO: add this to x86 like _uc, this version here is inefficient */ ++static int set_pages_array_wc(struct page **pages, int addrinarray) ++{ ++ int i; ++ ++ for (i = 0; i < addrinarray; i++) ++ set_memory_wc((unsigned long)page_address(pages[i]), 1); ++ return 0; ++} ++#else ++static int set_pages_array_wb(struct page **pages, int addrinarray) ++{ ++#ifdef TTM_HAS_AGP ++ int i; ++ ++ for (i = 0; i < addrinarray; i++) ++ unmap_page_from_agp(pages[i]); ++#endif ++ return 0; ++} ++ ++static int set_pages_array_wc(struct page **pages, int addrinarray) ++{ ++#ifdef TTM_HAS_AGP ++ int i; ++ ++ for (i = 0; i < addrinarray; i++) ++ map_page_into_agp(pages[i]); ++#endif ++ return 0; ++} ++ ++static int set_pages_array_uc(struct page **pages, int addrinarray) ++{ ++#ifdef TTM_HAS_AGP ++ int i; ++ ++ for (i = 0; i < addrinarray; i++) ++ map_page_into_agp(pages[i]); ++#endif ++ return 0; ++} ++#endif ++ ++/** ++ * Select the right pool or requested caching state and ttm flags. */ ++static struct ttm_page_pool *ttm_get_pool(int flags, ++ enum ttm_caching_state cstate) ++{ ++ int pool_index; ++ ++ if (cstate == tt_cached) ++ return NULL; ++ ++ if (cstate == tt_wc) ++ pool_index = 0x0; ++ else ++ pool_index = 0x1; ++ ++ if (flags & TTM_PAGE_FLAG_DMA32) ++ pool_index |= 0x2; ++ ++ return &_manager.pools[pool_index]; ++} ++ ++/* set memory back to wb and free the pages. */ ++static void ttm_pages_put(struct page *pages[], unsigned npages) ++{ ++ unsigned i; ++ if (set_pages_array_wb(pages, npages)) ++ printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n", ++ npages); ++ for (i = 0; i < npages; ++i) ++ __free_page(pages[i]); ++} ++ ++static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, ++ unsigned freed_pages) ++{ ++ pool->npages -= freed_pages; ++ pool->nfrees += freed_pages; ++} ++ ++/** ++ * Free pages from pool. ++ * ++ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC ++ * number of pages in one go. ++ * ++ * @pool: to free the pages from ++ * @free_all: If set to true will free all pages in pool ++ **/ ++static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) ++{ ++ unsigned long irq_flags; ++ struct page *p; ++ struct page **pages_to_free; ++ unsigned freed_pages = 0, ++ npages_to_free = nr_free; ++ ++ if (NUM_PAGES_TO_ALLOC < nr_free) ++ npages_to_free = NUM_PAGES_TO_ALLOC; ++ ++ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), ++ GFP_KERNEL); ++ if (!pages_to_free) { ++ printk(KERN_ERR "Failed to allocate memory for pool free operation.\n"); ++ return 0; ++ } ++ ++restart: ++ spin_lock_irqsave(&pool->lock, irq_flags); ++ ++ list_for_each_entry_reverse(p, &pool->list, lru) { ++ if (freed_pages >= npages_to_free) ++ break; ++ ++ pages_to_free[freed_pages++] = p; ++ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ ++ if (freed_pages >= NUM_PAGES_TO_ALLOC) { ++ /* remove range of pages from the pool */ ++ __list_del(p->lru.prev, &pool->list); ++ ++ ttm_pool_update_free_locked(pool, freed_pages); ++ /** ++ * Because changing page caching is costly ++ * we unlock the pool to prevent stalling. ++ */ ++ spin_unlock_irqrestore(&pool->lock, irq_flags); ++ ++ ttm_pages_put(pages_to_free, freed_pages); ++ if (likely(nr_free != FREE_ALL_PAGES)) ++ nr_free -= freed_pages; ++ ++ if (NUM_PAGES_TO_ALLOC >= nr_free) ++ npages_to_free = nr_free; ++ else ++ npages_to_free = NUM_PAGES_TO_ALLOC; ++ ++ freed_pages = 0; ++ ++ /* free all so restart the processing */ ++ if (nr_free) ++ goto restart; ++ ++ /* Not allowed to fall tough or break because ++ * following context is inside spinlock while we are ++ * outside here. ++ */ ++ goto out; ++ ++ } ++ } ++ ++ /* remove range of pages from the pool */ ++ if (freed_pages) { ++ __list_del(&p->lru, &pool->list); ++ ++ ttm_pool_update_free_locked(pool, freed_pages); ++ nr_free -= freed_pages; ++ } ++ ++ spin_unlock_irqrestore(&pool->lock, irq_flags); ++ ++ if (freed_pages) ++ ttm_pages_put(pages_to_free, freed_pages); ++out: ++ kfree(pages_to_free); ++ return nr_free; ++} ++ ++/* Get good estimation how many pages are free in pools */ ++static int ttm_pool_get_num_unused_pages(void) ++{ ++ unsigned i; ++ int total = 0; ++ for (i = 0; i < NUM_POOLS; ++i) ++ total += _manager.pools[i].npages; ++ ++ return total; ++} ++ ++/** ++ * Calback for mm to request pool to reduce number of page held. ++ */ ++static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) ++{ ++ static atomic_t start_pool = ATOMIC_INIT(0); ++ unsigned i; ++ unsigned pool_offset = atomic_add_return(1, &start_pool); ++ struct ttm_page_pool *pool; ++ ++ pool_offset = pool_offset % NUM_POOLS; ++ /* select start pool in round robin fashion */ ++ for (i = 0; i < NUM_POOLS; ++i) { ++ unsigned nr_free = shrink_pages; ++ if (shrink_pages == 0) ++ break; ++ pool = &_manager.pools[(i + pool_offset)%NUM_POOLS]; ++ shrink_pages = ttm_page_pool_free(pool, nr_free); ++ } ++ /* return estimated number of unused pages in pool */ ++ return ttm_pool_get_num_unused_pages(); ++} ++ ++static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) ++{ ++ manager->mm_shrink.shrink = &ttm_pool_mm_shrink; ++ manager->mm_shrink.seeks = 1; ++ register_shrinker(&manager->mm_shrink); ++} ++ ++static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) ++{ ++ unregister_shrinker(&manager->mm_shrink); ++} ++ ++static int ttm_set_pages_caching(struct page **pages, ++ enum ttm_caching_state cstate, unsigned cpages) ++{ ++ int r = 0; ++ /* Set page caching */ ++ switch (cstate) { ++ case tt_uncached: ++ r = set_pages_array_uc(pages, cpages); ++ if (r) ++ printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n", ++ cpages); ++ break; ++ case tt_wc: ++ r = set_pages_array_wc(pages, cpages); ++ if (r) ++ printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n", ++ cpages); ++ break; ++ default: ++ break; ++ } ++ return r; ++} ++ ++/** ++ * Free pages the pages that failed to change the caching state. If there is ++ * any pages that have changed their caching state already put them to the ++ * pool. ++ */ ++static void ttm_handle_caching_state_failure(struct list_head *pages, ++ int ttm_flags, enum ttm_caching_state cstate, ++ struct page **failed_pages, unsigned cpages) ++{ ++ unsigned i; ++ /* Failed pages has to be reed */ ++ for (i = 0; i < cpages; ++i) { ++ list_del(&failed_pages[i]->lru); ++ __free_page(failed_pages[i]); ++ } ++} ++ ++/** ++ * Allocate new pages with correct caching. ++ * ++ * This function is reentrant if caller updates count depending on number of ++ * pages returned in pages array. ++ */ ++static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, ++ int ttm_flags, enum ttm_caching_state cstate, unsigned count) ++{ ++ struct page **caching_array; ++ struct page *p; ++ int r = 0; ++ unsigned i, cpages; ++ unsigned max_cpages = min(count, ++ (unsigned)(PAGE_SIZE/sizeof(struct page *))); ++ ++ /* allocate array for page caching change */ ++ caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); ++ ++ if (!caching_array) { ++ printk(KERN_ERR "[ttm] unable to allocate table for new pages."); ++ return -ENOMEM; ++ } ++ ++ for (i = 0, cpages = 0; i < count; ++i) { ++ p = alloc_page(gfp_flags); ++ ++ if (!p) { ++ printk(KERN_ERR "[ttm] unable to get page %u\n", i); ++ ++ /* store already allocated pages in the pool after ++ * setting the caching state */ ++ if (cpages) { ++ r = ttm_set_pages_caching(caching_array, cstate, cpages); ++ if (r) ++ ttm_handle_caching_state_failure(pages, ++ ttm_flags, cstate, ++ caching_array, cpages); ++ } ++ r = -ENOMEM; ++ goto out; ++ } ++ ++#ifdef CONFIG_HIGHMEM ++ /* gfp flags of highmem page should never be dma32 so we ++ * we should be fine in such case ++ */ ++ if (!PageHighMem(p)) ++#endif ++ { ++ caching_array[cpages++] = p; ++ if (cpages == max_cpages) { ++ ++ r = ttm_set_pages_caching(caching_array, ++ cstate, cpages); ++ if (r) { ++ ttm_handle_caching_state_failure(pages, ++ ttm_flags, cstate, ++ caching_array, cpages); ++ goto out; ++ } ++ cpages = 0; ++ } ++ } ++ ++ list_add(&p->lru, pages); ++ } ++ ++ if (cpages) { ++ r = ttm_set_pages_caching(caching_array, cstate, cpages); ++ if (r) ++ ttm_handle_caching_state_failure(pages, ++ ttm_flags, cstate, ++ caching_array, cpages); ++ } ++out: ++ kfree(caching_array); ++ ++ return r; ++} ++ ++/** ++ * Fill the given pool if there isn't enough pages and requested number of ++ * pages is small. ++ */ ++static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, ++ int ttm_flags, enum ttm_caching_state cstate, unsigned count, ++ unsigned long *irq_flags) ++{ ++ struct page *p; ++ int r; ++ unsigned cpages = 0; ++ /** ++ * Only allow one pool fill operation at a time. ++ * If pool doesn't have enough pages for the allocation new pages are ++ * allocated from outside of pool. ++ */ ++ if (pool->fill_lock) ++ return; ++ ++ pool->fill_lock = true; ++ ++ /* If allocation request is small and there is not enough ++ * pages in pool we fill the pool first */ ++ if (count < _manager.options.small ++ && count > pool->npages) { ++ struct list_head new_pages; ++ unsigned alloc_size = _manager.options.alloc_size; ++ ++ /** ++ * Can't change page caching if in irqsave context. We have to ++ * drop the pool->lock. ++ */ ++ spin_unlock_irqrestore(&pool->lock, *irq_flags); ++ ++ INIT_LIST_HEAD(&new_pages); ++ r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, ++ cstate, alloc_size); ++ spin_lock_irqsave(&pool->lock, *irq_flags); ++ ++ if (!r) { ++ list_splice(&new_pages, &pool->list); ++ ++pool->nrefills; ++ pool->npages += alloc_size; ++ } else { ++ printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool); ++ /* If we have any pages left put them to the pool. */ ++ list_for_each_entry(p, &pool->list, lru) { ++ ++cpages; ++ } ++ list_splice(&new_pages, &pool->list); ++ pool->npages += cpages; ++ } ++ ++ } ++ pool->fill_lock = false; ++} ++ ++/** ++ * Cut count nubmer of pages from the pool and put them to return list ++ * ++ * @return count of pages still to allocate to fill the request. ++ */ ++static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, ++ struct list_head *pages, int ttm_flags, ++ enum ttm_caching_state cstate, unsigned count) ++{ ++ unsigned long irq_flags; ++ struct list_head *p; ++ unsigned i; ++ ++ spin_lock_irqsave(&pool->lock, irq_flags); ++ ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); ++ ++ if (count >= pool->npages) { ++ /* take all pages from the pool */ ++ list_splice_init(&pool->list, pages); ++ count -= pool->npages; ++ pool->npages = 0; ++ goto out; ++ } ++ /* find the last pages to include for requested number of pages. Split ++ * pool to begin and halves to reduce search space. */ ++ if (count <= pool->npages/2) { ++ i = 0; ++ list_for_each(p, &pool->list) { ++ if (++i == count) ++ break; ++ } ++ } else { ++ i = pool->npages + 1; ++ list_for_each_prev(p, &pool->list) { ++ if (--i == count) ++ break; ++ } ++ } ++ /* Cut count number of pages from pool */ ++ list_cut_position(pages, &pool->list, p); ++ pool->npages -= count; ++ count = 0; ++out: ++ spin_unlock_irqrestore(&pool->lock, irq_flags); ++ return count; ++} ++ ++/* ++ * On success pages list will hold count number of correctly ++ * cached pages. ++ */ ++int ttm_get_pages(struct list_head *pages, int flags, ++ enum ttm_caching_state cstate, unsigned count) ++{ ++ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); ++ struct page *p = NULL; ++ int gfp_flags = 0; ++ int r; ++ ++ /* set zero flag for page allocation if required */ ++ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) ++ gfp_flags |= __GFP_ZERO; ++ ++ /* No pool for cached pages */ ++ if (pool == NULL) { ++ if (flags & TTM_PAGE_FLAG_DMA32) ++ gfp_flags |= GFP_DMA32; ++ else ++ gfp_flags |= __GFP_HIGHMEM; ++ ++ for (r = 0; r < count; ++r) { ++ p = alloc_page(gfp_flags); ++ if (!p) { ++ ++ printk(KERN_ERR "[ttm] unable to allocate page."); ++ return -ENOMEM; ++ } ++ ++ list_add(&p->lru, pages); ++ } ++ return 0; ++ } ++ ++ ++ /* combine zero flag to pool flags */ ++ gfp_flags |= pool->gfp_flags; ++ ++ /* First we take pages from the pool */ ++ count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count); ++ ++ /* clear the pages coming from the pool if requested */ ++ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { ++ list_for_each_entry(p, pages, lru) { ++ clear_page(page_address(p)); ++ } ++ } ++ ++ /* If pool didn't have enough pages allocate new one. */ ++ if (count > 0) { ++ /* ttm_alloc_new_pages doesn't reference pool so we can run ++ * multiple requests in parallel. ++ **/ ++ r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count); ++ if (r) { ++ /* If there is any pages in the list put them back to ++ * the pool. */ ++ printk(KERN_ERR "[ttm] Failed to allocate extra pages " ++ "for large request."); ++ ttm_put_pages(pages, 0, flags, cstate); ++ return r; ++ } ++ } ++ ++ ++ return 0; ++} ++ ++/* Put all pages in pages list to correct pool to wait for reuse */ ++void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, ++ enum ttm_caching_state cstate) ++{ ++ unsigned long irq_flags; ++ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); ++ struct page *p, *tmp; ++ ++ if (pool == NULL) { ++ /* No pool for this memory type so free the pages */ ++ ++ list_for_each_entry_safe(p, tmp, pages, lru) { ++ __free_page(p); ++ } ++ /* Make the pages list empty */ ++ INIT_LIST_HEAD(pages); ++ return; ++ } ++ if (page_count == 0) { ++ list_for_each_entry_safe(p, tmp, pages, lru) { ++ ++page_count; ++ } ++ } ++ ++ spin_lock_irqsave(&pool->lock, irq_flags); ++ list_splice_init(pages, &pool->list); ++ pool->npages += page_count; ++ /* Check that we don't go over the pool limit */ ++ page_count = 0; ++ if (pool->npages > _manager.options.max_size) { ++ page_count = pool->npages - _manager.options.max_size; ++ /* free at least NUM_PAGES_TO_ALLOC number of pages ++ * to reduce calls to set_memory_wb */ ++ if (page_count < NUM_PAGES_TO_ALLOC) ++ page_count = NUM_PAGES_TO_ALLOC; ++ } ++ spin_unlock_irqrestore(&pool->lock, irq_flags); ++ if (page_count) ++ ttm_page_pool_free(pool, page_count); ++} ++ ++static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, ++ char *name) ++{ ++ spin_lock_init(&pool->lock); ++ pool->fill_lock = false; ++ INIT_LIST_HEAD(&pool->list); ++ pool->npages = pool->nfrees = 0; ++ pool->gfp_flags = flags; ++ pool->name = name; ++} ++ ++int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) ++{ ++ int ret; ++ if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) ++ return 0; ++ ++ printk(KERN_INFO "[ttm] Initializing pool allocator.\n"); ++ ++ ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); ++ ++ ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc"); ++ ++ ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32, ++ "wc dma"); ++ ++ ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32, ++ "uc dma"); ++ ++ _manager.options.max_size = max_pages; ++ _manager.options.small = SMALL_ALLOCATION; ++ _manager.options.alloc_size = NUM_PAGES_TO_ALLOC; ++ ++ kobject_init(&_manager.kobj, &ttm_pool_kobj_type); ++ ret = kobject_add(&_manager.kobj, &glob->kobj, "pool"); ++ if (unlikely(ret != 0)) { ++ kobject_put(&_manager.kobj); ++ return ret; ++ } ++ ++ ttm_pool_mm_shrink_init(&_manager); ++ ++ return 0; ++} ++ ++void ttm_page_alloc_fini() ++{ ++ int i; ++ ++ if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) ++ return; ++ ++ printk(KERN_INFO "[ttm] Finilizing pool allocator.\n"); ++ ttm_pool_mm_shrink_fini(&_manager); ++ ++ for (i = 0; i < NUM_POOLS; ++i) ++ ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES); ++ ++ kobject_put(&_manager.kobj); ++} ++ ++int ttm_page_alloc_debugfs(struct seq_file *m, void *data) ++{ ++ struct ttm_page_pool *p; ++ unsigned i; ++ char *h[] = {"pool", "refills", "pages freed", "size"}; ++ if (atomic_read(&_manager.page_alloc_inited) == 0) { ++ seq_printf(m, "No pool allocator running.\n"); ++ return 0; ++ } ++ seq_printf(m, "%6s %12s %13s %8s\n", ++ h[0], h[1], h[2], h[3]); ++ for (i = 0; i < NUM_POOLS; ++i) { ++ p = &_manager.pools[i]; ++ ++ seq_printf(m, "%6s %12ld %13ld %8d\n", ++ p->name, p->nrefills, ++ p->nfrees, p->npages); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(ttm_page_alloc_debugfs); +diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c +index a759170..a3269ef 100644 +--- a/drivers/gpu/drm/ttm/ttm_tt.c ++++ b/drivers/gpu/drm/ttm/ttm_tt.c +@@ -28,65 +28,34 @@ + * Authors: Thomas Hellstrom + */ + +-#include + #include + #include + #include + #include + #include + #include "drm_cache.h" ++#include "drm_mem_util.h" + #include "ttm/ttm_module.h" + #include "ttm/ttm_bo_driver.h" + #include "ttm/ttm_placement.h" ++#include "ttm/ttm_page_alloc.h" + + static int ttm_tt_swapin(struct ttm_tt *ttm); + + /** + * Allocates storage for pointers to the pages that back the ttm. +- * +- * Uses kmalloc if possible. Otherwise falls back to vmalloc. + */ + static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) + { +- unsigned long size = ttm->num_pages * sizeof(*ttm->pages); +- ttm->pages = NULL; +- +- if (size <= PAGE_SIZE) +- ttm->pages = kzalloc(size, GFP_KERNEL); +- +- if (!ttm->pages) { +- ttm->pages = vmalloc_user(size); +- if (ttm->pages) +- ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC; +- } ++ ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); + } + + static void ttm_tt_free_page_directory(struct ttm_tt *ttm) + { +- if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) { +- vfree(ttm->pages); +- ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC; +- } else { +- kfree(ttm->pages); +- } ++ drm_free_large(ttm->pages); + ttm->pages = NULL; + } + +-static struct page *ttm_tt_alloc_page(unsigned page_flags) +-{ +- gfp_t gfp_flags = GFP_USER; +- +- if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) +- gfp_flags |= __GFP_ZERO; +- +- if (page_flags & TTM_PAGE_FLAG_DMA32) +- gfp_flags |= __GFP_DMA32; +- else +- gfp_flags |= __GFP_HIGHMEM; +- +- return alloc_page(gfp_flags); +-} +- + static void ttm_tt_free_user_pages(struct ttm_tt *ttm) + { + int write; +@@ -127,15 +96,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm) + static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) + { + struct page *p; ++ struct list_head h; + struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; + int ret; + + while (NULL == (p = ttm->pages[index])) { +- p = ttm_tt_alloc_page(ttm->page_flags); + +- if (!p) ++ INIT_LIST_HEAD(&h); ++ ++ ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1); ++ ++ if (ret != 0) + return NULL; + ++ p = list_first_entry(&h, struct page, lru); ++ + ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); + if (unlikely(ret != 0)) + goto out_err; +@@ -244,10 +219,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, + if (ttm->caching_state == c_state) + return 0; + +- if (c_state != tt_cached) { +- ret = ttm_tt_populate(ttm); +- if (unlikely(ret != 0)) +- return ret; ++ if (ttm->state == tt_unpopulated) { ++ /* Change caching but don't populate */ ++ ttm->caching_state = c_state; ++ return 0; + } + + if (ttm->caching_state == tt_cached) +@@ -298,13 +273,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching); + static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) + { + int i; ++ unsigned count = 0; ++ struct list_head h; + struct page *cur_page; + struct ttm_backend *be = ttm->be; + ++ INIT_LIST_HEAD(&h); ++ + if (be) + be->func->clear(be); +- (void)ttm_tt_set_caching(ttm, tt_cached); + for (i = 0; i < ttm->num_pages; ++i) { ++ + cur_page = ttm->pages[i]; + ttm->pages[i] = NULL; + if (cur_page) { +@@ -314,9 +293,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) + "Leaking pages.\n"); + ttm_mem_global_free_page(ttm->glob->mem_glob, + cur_page); +- __free_page(cur_page); ++ list_add(&cur_page->lru, &h); ++ count++; + } + } ++ ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state); + ttm->state = tt_unpopulated; + ttm->first_himem_page = ttm->num_pages; + ttm->last_lomem_page = -1; +diff --git a/include/drm/drmP.h b/include/drm/drmP.h +index ffac157..de2f82e 100644 +--- a/include/drm/drmP.h ++++ b/include/drm/drmP.h +@@ -801,6 +801,7 @@ struct drm_driver { + */ + int (*gem_init_object) (struct drm_gem_object *obj); + void (*gem_free_object) (struct drm_gem_object *obj); ++ void (*gem_free_object_unlocked) (struct drm_gem_object *obj); + + /* vga arb irq handler */ + void (*vgaarb_irq)(struct drm_device *dev, bool state); +@@ -1427,6 +1428,7 @@ extern void drm_sysfs_connector_remove(struct drm_connector *connector); + int drm_gem_init(struct drm_device *dev); + void drm_gem_destroy(struct drm_device *dev); + void drm_gem_object_free(struct kref *kref); ++void drm_gem_object_free_unlocked(struct kref *kref); + struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, + size_t size); + void drm_gem_object_handle_free(struct kref *kref); +@@ -1443,10 +1445,15 @@ drm_gem_object_reference(struct drm_gem_object *obj) + static inline void + drm_gem_object_unreference(struct drm_gem_object *obj) + { +- if (obj == NULL) +- return; ++ if (obj != NULL) ++ kref_put(&obj->refcount, drm_gem_object_free); ++} + +- kref_put(&obj->refcount, drm_gem_object_free); ++static inline void ++drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) ++{ ++ if (obj != NULL) ++ kref_put(&obj->refcount, drm_gem_object_free_unlocked); + } + + int drm_gem_handle_create(struct drm_file *file_priv, +@@ -1475,6 +1482,21 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj) + drm_gem_object_unreference(obj); + } + ++static inline void ++drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) ++{ ++ if (obj == NULL) ++ return; ++ ++ /* ++ * Must bump handle count first as this may be the last ++ * ref, in which case the object would disappear before we ++ * checked for a name ++ */ ++ kref_put(&obj->handlecount, drm_gem_object_handle_free); ++ drm_gem_object_unreference_unlocked(obj); ++} ++ + struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, + struct drm_file *filp, + u32 handle); +@@ -1523,39 +1545,7 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map) + { + } + +- +-static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) +-{ +- if (size != 0 && nmemb > ULONG_MAX / size) +- return NULL; +- +- if (size * nmemb <= PAGE_SIZE) +- return kcalloc(nmemb, size, GFP_KERNEL); +- +- return __vmalloc(size * nmemb, +- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); +-} +- +-/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */ +-static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) +-{ +- if (size != 0 && nmemb > ULONG_MAX / size) +- return NULL; +- +- if (size * nmemb <= PAGE_SIZE) +- return kmalloc(nmemb * size, GFP_KERNEL); +- +- return __vmalloc(size * nmemb, +- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); +-} +- +-static __inline void drm_free_large(void *ptr) +-{ +- if (!is_vmalloc_addr(ptr)) +- return kfree(ptr); +- +- vfree(ptr); +-} ++#include "drm_mem_util.h" + /*@}*/ + + #endif /* __KERNEL__ */ +diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h +index fdf43ab..f3cc7a6 100644 +--- a/include/drm/drm_crtc.h ++++ b/include/drm/drm_crtc.h +@@ -666,8 +666,6 @@ extern void drm_fb_release(struct drm_file *file_priv); + extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); + extern struct edid *drm_get_edid(struct drm_connector *connector, + struct i2c_adapter *adapter); +-extern int drm_do_probe_ddc_edid(struct i2c_adapter *adapter, +- unsigned char *buf, int len); + extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); + extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); + extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode); +@@ -799,6 +797,10 @@ extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, + extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, + int hdisplay, int vdisplay, int vrefresh, + bool interlaced, int margins); ++extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev, ++ int hdisplay, int vdisplay, int vrefresh, ++ bool interlaced, int margins, int GTF_M, ++ int GTF_2C, int GTF_K, int GTF_2J); + extern int drm_add_modes_noedid(struct drm_connector *connector, + int hdisplay, int vdisplay); + #endif /* __DRM_CRTC_H__ */ +diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h +new file mode 100644 +index 0000000..6bd325f +--- /dev/null ++++ b/include/drm/drm_mem_util.h +@@ -0,0 +1,65 @@ ++/* ++ * Copyright © 2008 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ * ++ * Authors: ++ * Jesse Barnes ++ * ++ */ ++#ifndef _DRM_MEM_UTIL_H_ ++#define _DRM_MEM_UTIL_H_ ++ ++#include ++ ++static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) ++{ ++ if (size != 0 && nmemb > ULONG_MAX / size) ++ return NULL; ++ ++ if (size * nmemb <= PAGE_SIZE) ++ return kcalloc(nmemb, size, GFP_KERNEL); ++ ++ return __vmalloc(size * nmemb, ++ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); ++} ++ ++/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */ ++static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) ++{ ++ if (size != 0 && nmemb > ULONG_MAX / size) ++ return NULL; ++ ++ if (size * nmemb <= PAGE_SIZE) ++ return kmalloc(nmemb * size, GFP_KERNEL); ++ ++ return __vmalloc(size * nmemb, ++ GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); ++} ++ ++static __inline void drm_free_large(void *ptr) ++{ ++ if (!is_vmalloc_addr(ptr)) ++ return kfree(ptr); ++ ++ vfree(ptr); ++} ++ ++#endif +diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h +index 4c4e0f8..fd2c122 100644 +--- a/include/drm/ttm/ttm_bo_driver.h ++++ b/include/drm/ttm/ttm_bo_driver.h +@@ -115,7 +115,6 @@ struct ttm_backend { + struct ttm_backend_func *func; + }; + +-#define TTM_PAGE_FLAG_VMALLOC (1 << 0) + #define TTM_PAGE_FLAG_USER (1 << 1) + #define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) + #define TTM_PAGE_FLAG_WRITE (1 << 3) +diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h +new file mode 100644 +index 0000000..8bb4de5 +--- /dev/null ++++ b/include/drm/ttm/ttm_page_alloc.h +@@ -0,0 +1,74 @@ ++/* ++ * Copyright (c) Red Hat Inc. ++ ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ * Jerome Glisse ++ */ ++#ifndef TTM_PAGE_ALLOC ++#define TTM_PAGE_ALLOC ++ ++#include "ttm_bo_driver.h" ++#include "ttm_memory.h" ++ ++/** ++ * Get count number of pages from pool to pages list. ++ * ++ * @pages: heado of empty linked list where pages are filled. ++ * @flags: ttm flags for page allocation. ++ * @cstate: ttm caching state for the page. ++ * @count: number of pages to allocate. ++ */ ++int ttm_get_pages(struct list_head *pages, ++ int flags, ++ enum ttm_caching_state cstate, ++ unsigned count); ++/** ++ * Put linked list of pages to pool. ++ * ++ * @pages: list of pages to free. ++ * @page_count: number of pages in the list. Zero can be passed for unknown ++ * count. ++ * @flags: ttm flags for page allocation. ++ * @cstate: ttm caching state. ++ */ ++void ttm_put_pages(struct list_head *pages, ++ unsigned page_count, ++ int flags, ++ enum ttm_caching_state cstate); ++/** ++ * Initialize pool allocator. ++ * ++ * Pool allocator is internaly reference counted so it can be initialized ++ * multiple times but ttm_page_alloc_fini has to be called same number of ++ * times. ++ */ ++int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); ++/** ++ * Free pool allocator. ++ */ ++void ttm_page_alloc_fini(void); ++ ++/** ++ * Output the state of pools to debugfs file ++ */ ++extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data); ++#endif diff --git a/drm-encoder-disable.patch b/drm-encoder-disable.patch deleted file mode 100644 index 8c8c7cb..0000000 --- a/drm-encoder-disable.patch +++ /dev/null @@ -1,74 +0,0 @@ -From 0b91f360956aa7a5aa8900d358d1bff3020182e0 Mon Sep 17 00:00:00 2001 -From: Ben Skeggs -Date: Thu, 1 Jul 2010 12:34:56 +1000 -Subject: [PATCH 1/2] drm: disable encoder rather than dpms off in drm_crtc_prepare_encoders() - -Original behaviour will be preserved for drivers that don't implement -disable() hooks for an encoder. - -Signed-off-by: Ben Skeggs ---- - drivers/gpu/drm/drm_crtc_helper.c | 22 ++++++++++++++-------- - 1 files changed, 14 insertions(+), 8 deletions(-) - -diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c -index b142ac2..32dae0e 100644 ---- a/drivers/gpu/drm/drm_crtc_helper.c -+++ b/drivers/gpu/drm/drm_crtc_helper.c -@@ -201,6 +201,17 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc) - } - EXPORT_SYMBOL(drm_helper_crtc_in_use); - -+static void -+drm_encoder_disable(struct drm_encoder *encoder) -+{ -+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; -+ -+ if (encoder_funcs->disable) -+ (*encoder_funcs->disable)(encoder); -+ else -+ (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); -+} -+ - /** - * drm_helper_disable_unused_functions - disable unused objects - * @dev: DRM device -@@ -215,7 +226,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) - { - struct drm_encoder *encoder; - struct drm_connector *connector; -- struct drm_encoder_helper_funcs *encoder_funcs; - struct drm_crtc *crtc; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -@@ -226,12 +236,8 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) - } - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -- encoder_funcs = encoder->helper_private; - if (!drm_helper_encoder_in_use(encoder)) { -- if (encoder_funcs->disable) -- (*encoder_funcs->disable)(encoder); -- else -- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); -+ drm_encoder_disable(encoder); - /* disconnector encoder from any connector */ - encoder->crtc = NULL; - } -@@ -292,11 +298,11 @@ drm_crtc_prepare_encoders(struct drm_device *dev) - encoder_funcs = encoder->helper_private; - /* Disable unused encoders */ - if (encoder->crtc == NULL) -- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); -+ drm_encoder_disable(encoder); - /* Disable encoders whose CRTC is about to change */ - if (encoder_funcs->get_crtc && - encoder->crtc != (*encoder_funcs->get_crtc)(encoder)) -- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); -+ drm_encoder_disable(encoder); - } - } - --- -1.7.1.1 - diff --git a/drm-i915-add-reclaimable-to-page-allocations.patch b/drm-i915-add-reclaimable-to-page-allocations.patch new file mode 100644 index 0000000..6014f2c --- /dev/null +++ b/drm-i915-add-reclaimable-to-page-allocations.patch @@ -0,0 +1,48 @@ +From: Linus Torvalds +Date: Sun, 18 Jul 2010 16:44:37 +0000 (-0700) +Subject: drm/i915: add 'reclaimable' to i915 self-reclaimable page allocations +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=cd9f040df6ce46573760a507cb88192d05d27d86 + +drm/i915: add 'reclaimable' to i915 self-reclaimable page allocations + +The hibernate issues that got fixed in commit 985b823b9192 ("drm/i915: +fix hibernation since i915 self-reclaim fixes") turn out to have been +incomplete. Vefa Bicakci tested lots of hibernate cycles, and without +the __GFP_RECLAIMABLE flag the system eventually fails to resume. + +With the flag added, Vefa can apparently hibernate forever (or until he +gets bored running his automated scripts, whichever comes first). + +The reclaimable flag was there originally, and was one of the flags that +were dropped (unintentionally) by commit 4bdadb978569 ("drm/i915: +Selectively enable self-reclaim") that introduced all these problems, +but I didn't want to just blindly add back all the flags in commit +985b823b9192, and it looked like __GFP_RECLAIM wasn't necessary. It +clearly was. + +I still suspect that there is some subtle reason we're missing that +causes the problems, but __GFP_RECLAIMABLE is certainly not wrong to use +in this context, and is what the code historically used. And we have no +idea what the causes the corruption without it. + +Reported-and-tested-by: M. Vefa Bicakci +Cc: Dave Airlie +Cc: Chris Wilson +Cc: KOSAKI Motohiro +Cc: Hugh Dickins +Cc: stable@kernel.org +Signed-off-by: Linus Torvalds +--- + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 0743858..8757ecf 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -2241,6 +2241,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, + page = read_cache_page_gfp(mapping, i, + GFP_HIGHUSER | + __GFP_COLD | ++ __GFP_RECLAIMABLE | + gfpmask); + if (IS_ERR(page)) + goto err_pages; diff --git a/drm-i915-fix-edp-panels.patch b/drm-i915-fix-edp-panels.patch index 01d3edd..d576e31 100644 --- a/drm-i915-fix-edp-panels.patch +++ b/drm-i915-fix-edp-panels.patch @@ -1,12 +1,34 @@ diff -up linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_dp.c.dave linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_dp.c ---- linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_dp.c.dave 2010-06-25 16:30:13.000000000 +1000 -+++ linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_dp.c 2010-06-25 16:30:23.000000000 +1000 -@@ -129,7 +129,7 @@ intel_dp_link_required(struct drm_device - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_eDP(intel_encoder)) -- return (pixel_clock * dev_priv->edp_bpp) / 8; -+ return (pixel_clock * ALIGN(dev_priv->edp_bpp, 8)) / 8; - else - return pixel_clock * 3; +--- linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_dp.c.dave 2010-06-28 09:50:36.000000000 +1000 ++++ linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_dp.c 2010-06-28 09:53:24.000000000 +1000 +@@ -135,6 +135,12 @@ intel_dp_link_required(struct drm_device } + + static int ++intel_dp_max_data_rate(int max_link_clock, int max_lanes) ++{ ++ return (max_link_clock * max_lanes * 8) / 10; ++} ++ ++static int + intel_dp_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { +@@ -144,7 +150,7 @@ intel_dp_mode_valid(struct drm_connector + int max_lanes = intel_dp_max_lane_count(intel_encoder); + + if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) +- > max_link_clock * max_lanes) ++ > intel_dp_max_data_rate(max_link_clock, max_lanes)) + return MODE_CLOCK_HIGH; + + if (mode->clock < 10000) +@@ -505,7 +511,7 @@ intel_dp_mode_fixup(struct drm_encoder * + + for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { + for (clock = 0; clock <= max_clock; clock++) { +- int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; ++ int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); + + if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) + <= link_avail) { diff --git a/drm-i915-fix-hibernate-memory-corruption.patch b/drm-i915-fix-hibernate-memory-corruption.patch new file mode 100644 index 0000000..3e7e860 --- /dev/null +++ b/drm-i915-fix-hibernate-memory-corruption.patch @@ -0,0 +1,36 @@ +From 0121d50088a9e04f3bbbee14043cd89164bdf4e6 Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Fri, 2 Jul 2010 09:56:19 +1000 +Subject: [PATCH] drm/i915: fix hibernation since 4bdadb9785696439c6e2b3efe34aa76df1149c83 + +Since 4bdadb9785696439c6e2b3efe34aa76df1149c83, we've been passing +GFP_MOVABLE where we weren't before caused hibernate on Intel hardware +to results in a lot of memory corruptions on resume. + +[airlied: linus please enhance commit msg if you commit this] + +http://bugzilla.kernel.org/show_bug.cgi?id=13811 + +Reported-by: Evengi Golov (in bugzilla) +Signed-off-by: Dave Airlie +Tested-by: M. Vefa Bicakci +--- + drivers/gpu/drm/i915/i915_gem.c | 2 +- + 1 files changed, 1 insertions(+), 1 deletions(-) + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 9ded3da..0743858 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -2239,7 +2239,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, + mapping = inode->i_mapping; + for (i = 0; i < page_count; i++) { + page = read_cache_page_gfp(mapping, i, +- mapping_gfp_mask (mapping) | ++ GFP_HIGHUSER | + __GFP_COLD | + gfpmask); + if (IS_ERR(page)) +-- +1.7.0.1 + diff --git a/drm-i915-fix-non-ironlake-965-class-crashes.patch b/drm-i915-fix-non-ironlake-965-class-crashes.patch new file mode 100644 index 0000000..74b65ee --- /dev/null +++ b/drm-i915-fix-non-ironlake-965-class-crashes.patch @@ -0,0 +1,43 @@ +From 1918ad77f7f908ed67cf37c505c6ad4ac52f1ecf Mon Sep 17 00:00:00 2001 +From: Jesse Barnes +Date: Fri, 23 Apr 2010 09:32:23 -0700 +Subject: drm/i915: fix non-Ironlake 965 class crashes +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Jesse Barnes + +commit 1918ad77f7f908ed67cf37c505c6ad4ac52f1ecf upstream. + +My PIPE_CONTROL fix (just sent via Eric's tree) was buggy; I was +testing a whole set of patches together and missed a conversion to the +new HAS_PIPE_CONTROL macro, which will cause breakage on non-Ironlake +965 class chips. Fortunately, the fix is trivial and has been tested. + +Be sure to use the HAS_PIPE_CONTROL macro in i915_get_gem_seqno, or +we'll end up reading the wrong graphics memory, likely causing hangs, +crashes, or worse. + +Reported-by: Zdenek Kabelac +Reported-by: Toralf Förster +Tested-by: Toralf Förster +Signed-off-by: Jesse Barnes +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/i915/i915_gem.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -1785,7 +1785,7 @@ i915_get_gem_seqno(struct drm_device *de + { + drm_i915_private_t *dev_priv = dev->dev_private; + +- if (IS_I965G(dev)) ++ if (HAS_PIPE_CONTROL(dev)) + return ((volatile u32 *)(dev_priv->seqno_page))[0]; + else + return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); diff --git a/drm-i915-make-G4X-style-PLL-search-more-permissive.patch b/drm-i915-make-G4X-style-PLL-search-more-permissive.patch new file mode 100644 index 0000000..b7d8630 --- /dev/null +++ b/drm-i915-make-G4X-style-PLL-search-more-permissive.patch @@ -0,0 +1,51 @@ +drm/i915: Make G4X-style PLL search more permissive + +Fixes an Ironlake laptop with a 68.940MHz 1280x800 panel and 120MHz SSC +reference clock. + +More generally, the 0.488% tolerance used before is just too tight to +reliably find a PLL setting. I extracted the search algorithm and +modified it to find the dot clocks with maximum error over the valid +range for the given output type: + +http://people.freedesktop.org/~ajax/intel_g4x_find_best_pll.c + +This gave: + +Worst dotclock for Ironlake DAC refclk is 350000kHz (error 0.00571) +Worst dotclock for Ironlake SL-LVDS refclk is 102321kHz (error 0.00524) +Worst dotclock for Ironlake DL-LVDS refclk is 219642kHz (error 0.00488) +Worst dotclock for Ironlake SL-LVDS SSC refclk is 84374kHz (error 0.00529) +Worst dotclock for Ironlake DL-LVDS SSC refclk is 183035kHz (error 0.00488) +Worst dotclock for G4X SDVO refclk is 50000kHz (error 0.17332) +Worst dotclock for G4X HDMI refclk is 334400kHz (error 0.00478) +Worst dotclock for G4X SL-LVDS refclk is 95571kHz (error 0.00449) +Worst dotclock for G4X DL-LVDS refclk is 224000kHz (error 0.00510) + +The SDVO number looks a bit suspicious, which I haven't tracked down +yet. But it's clear that the old threshold is too tight. + +Signed-off-by: Adam Jackson +[ RHBZ #572799 ] +--- + drivers/gpu/drm/i915/intel_display.c | 4 ++-- + 1 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index a8d65b7..4b17722 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -862,8 +862,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + intel_clock_t clock; + int max_n; + bool found; +- /* approximately equals target * 0.00488 */ +- int err_most = (target >> 8) + (target >> 10); ++ /* approximately equals target * 0.00585 */ ++ int err_most = (target >> 8) + (target >> 9); + found = false; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { +-- +1.7.1 + diff --git a/drm-i915-use-pipe_control-instruction-on-ironlake-and-sandy-bridge.patch b/drm-i915-use-pipe_control-instruction-on-ironlake-and-sandy-bridge.patch new file mode 100644 index 0000000..20b98e5 --- /dev/null +++ b/drm-i915-use-pipe_control-instruction-on-ironlake-and-sandy-bridge.patch @@ -0,0 +1,333 @@ +From e552eb7038a36d9b18860f525aa02875e313fe16 Mon Sep 17 00:00:00 2001 +From: Jesse Barnes +Date: Wed, 21 Apr 2010 11:39:23 -0700 +Subject: drm/i915: use PIPE_CONTROL instruction on Ironlake and Sandy Bridge + +From: Jesse Barnes + +commit e552eb7038a36d9b18860f525aa02875e313fe16 upstream. + +Since 965, the hardware has supported the PIPE_CONTROL command, which +provides fine grained GPU cache flushing control. On recent chipsets, +this instruction is required for reliable interrupt and sequence number +reporting in the driver. + +So add support for this instruction, including workarounds, on Ironlake +and Sandy Bridge hardware. + +https://bugs.freedesktop.org/show_bug.cgi?id=27108 + +Signed-off-by: Jesse Barnes +Tested-by: Chris Wilson +Signed-off-by: Eric Anholt +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/i915/i915_drv.h | 4 + + drivers/gpu/drm/i915/i915_gem.c | 145 ++++++++++++++++++++++++++++++++++++---- + drivers/gpu/drm/i915/i915_irq.c | 8 +- + drivers/gpu/drm/i915/i915_reg.h | 11 +++ + 4 files changed, 152 insertions(+), 16 deletions(-) + +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -206,11 +206,14 @@ typedef struct drm_i915_private { + + drm_dma_handle_t *status_page_dmah; + void *hw_status_page; ++ void *seqno_page; + dma_addr_t dma_status_page; + uint32_t counter; + unsigned int status_gfx_addr; ++ unsigned int seqno_gfx_addr; + drm_local_map_t hws_map; + struct drm_gem_object *hws_obj; ++ struct drm_gem_object *seqno_obj; + struct drm_gem_object *pwrctx; + + struct resource mch_res; +@@ -1090,6 +1093,7 @@ extern int i915_wait_ring(struct drm_dev + + #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) + #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) ++#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) + + #define PRIMARY_RINGBUFFER_SIZE (128*1024) + +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -1559,6 +1559,13 @@ i915_gem_object_move_to_inactive(struct + i915_verify_inactive(dev, __FILE__, __LINE__); + } + ++#define PIPE_CONTROL_FLUSH(addr) \ ++ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ ++ PIPE_CONTROL_DEPTH_STALL); \ ++ OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ ++ OUT_RING(0); \ ++ OUT_RING(0); \ ++ + /** + * Creates a new sequence number, emitting a write of it to the status page + * plus an interrupt, which will trigger i915_user_interrupt_handler. +@@ -1593,13 +1600,47 @@ i915_add_request(struct drm_device *dev, + if (dev_priv->mm.next_gem_seqno == 0) + dev_priv->mm.next_gem_seqno++; + +- BEGIN_LP_RING(4); +- OUT_RING(MI_STORE_DWORD_INDEX); +- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); +- OUT_RING(seqno); ++ if (HAS_PIPE_CONTROL(dev)) { ++ u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; + +- OUT_RING(MI_USER_INTERRUPT); +- ADVANCE_LP_RING(); ++ /* ++ * Workaround qword write incoherence by flushing the ++ * PIPE_NOTIFY buffers out to memory before requesting ++ * an interrupt. ++ */ ++ BEGIN_LP_RING(32); ++ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | ++ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); ++ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); ++ OUT_RING(seqno); ++ OUT_RING(0); ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; /* write to separate cachelines */ ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | ++ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | ++ PIPE_CONTROL_NOTIFY); ++ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); ++ OUT_RING(seqno); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } else { ++ BEGIN_LP_RING(4); ++ OUT_RING(MI_STORE_DWORD_INDEX); ++ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); ++ OUT_RING(seqno); ++ ++ OUT_RING(MI_USER_INTERRUPT); ++ ADVANCE_LP_RING(); ++ } + + DRM_DEBUG_DRIVER("%d\n", seqno); + +@@ -1744,7 +1785,10 @@ i915_get_gem_seqno(struct drm_device *de + { + drm_i915_private_t *dev_priv = dev->dev_private; + +- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); ++ if (IS_I965G(dev)) ++ return ((volatile u32 *)(dev_priv->seqno_page))[0]; ++ else ++ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); + } + + /** +@@ -4576,6 +4620,49 @@ i915_gem_idle(struct drm_device *dev) + return 0; + } + ++/* ++ * 965+ support PIPE_CONTROL commands, which provide finer grained control ++ * over cache flushing. ++ */ ++static int ++i915_gem_init_pipe_control(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret; ++ ++ obj = drm_gem_object_alloc(dev, 4096); ++ if (obj == NULL) { ++ DRM_ERROR("Failed to allocate seqno page\n"); ++ ret = -ENOMEM; ++ goto err; ++ } ++ obj_priv = obj->driver_private; ++ obj_priv->agp_type = AGP_USER_CACHED_MEMORY; ++ ++ ret = i915_gem_object_pin(obj, 4096); ++ if (ret) ++ goto err_unref; ++ ++ dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; ++ dev_priv->seqno_page = kmap(obj_priv->pages[0]); ++ if (dev_priv->seqno_page == NULL) ++ goto err_unpin; ++ ++ dev_priv->seqno_obj = obj; ++ memset(dev_priv->seqno_page, 0, PAGE_SIZE); ++ ++ return 0; ++ ++err_unpin: ++ i915_gem_object_unpin(obj); ++err_unref: ++ drm_gem_object_unreference(obj); ++err: ++ return ret; ++} ++ + static int + i915_gem_init_hws(struct drm_device *dev) + { +@@ -4593,7 +4680,8 @@ i915_gem_init_hws(struct drm_device *dev + obj = drm_gem_object_alloc(dev, 4096); + if (obj == NULL) { + DRM_ERROR("Failed to allocate status page\n"); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto err; + } + obj_priv = to_intel_bo(obj); + obj_priv->agp_type = AGP_USER_CACHED_MEMORY; +@@ -4601,7 +4689,7 @@ i915_gem_init_hws(struct drm_device *dev + ret = i915_gem_object_pin(obj, 4096); + if (ret != 0) { + drm_gem_object_unreference(obj); +- return ret; ++ goto err_unref; + } + + dev_priv->status_gfx_addr = obj_priv->gtt_offset; +@@ -4610,10 +4698,16 @@ i915_gem_init_hws(struct drm_device *dev + if (dev_priv->hw_status_page == NULL) { + DRM_ERROR("Failed to map status page.\n"); + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); +- i915_gem_object_unpin(obj); +- drm_gem_object_unreference(obj); +- return -EINVAL; ++ ret = -EINVAL; ++ goto err_unpin; + } ++ ++ if (HAS_PIPE_CONTROL(dev)) { ++ ret = i915_gem_init_pipe_control(dev); ++ if (ret) ++ goto err_unpin; ++ } ++ + dev_priv->hws_obj = obj; + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); +@@ -4621,6 +4715,30 @@ i915_gem_init_hws(struct drm_device *dev + DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); + + return 0; ++ ++err_unpin: ++ i915_gem_object_unpin(obj); ++err_unref: ++ drm_gem_object_unreference(obj); ++err: ++ return 0; ++} ++ ++static void ++i915_gem_cleanup_pipe_control(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ++ obj = dev_priv->seqno_obj; ++ obj_priv = obj->driver_private; ++ kunmap(obj_priv->pages[0]); ++ i915_gem_object_unpin(obj); ++ drm_gem_object_unreference(obj); ++ dev_priv->seqno_obj = NULL; ++ ++ dev_priv->seqno_page = NULL; + } + + static void +@@ -4644,6 +4762,9 @@ i915_gem_cleanup_hws(struct drm_device * + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); + dev_priv->hw_status_page = NULL; + ++ if (HAS_PIPE_CONTROL(dev)) ++ i915_gem_cleanup_pipe_control(dev); ++ + /* Write high address into HWS_PGA when disabling. */ + I915_WRITE(HWS_PGA, 0x1ffff000); + } +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -297,7 +297,7 @@ irqreturn_t ironlake_irq_handler(struct + READ_BREADCRUMB(dev_priv); + } + +- if (gt_iir & GT_USER_INTERRUPT) { ++ if (gt_iir & GT_PIPE_NOTIFY) { + u32 seqno = i915_get_gem_seqno(dev); + dev_priv->mm.irq_gem_seqno = seqno; + trace_i915_gem_request_complete(dev, seqno); +@@ -738,7 +738,7 @@ void i915_user_irq_get(struct drm_device + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { + if (HAS_PCH_SPLIT(dev)) +- ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); ++ ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); + else + i915_enable_irq(dev_priv, I915_USER_INTERRUPT); + } +@@ -754,7 +754,7 @@ void i915_user_irq_put(struct drm_device + BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); + if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { + if (HAS_PCH_SPLIT(dev)) +- ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); ++ ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); + else + i915_disable_irq(dev_priv, I915_USER_INTERRUPT); + } +@@ -1034,7 +1034,7 @@ static int ironlake_irq_postinstall(stru + /* enable kind of interrupts always enabled */ + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; +- u32 render_mask = GT_USER_INTERRUPT; ++ u32 render_mask = GT_PIPE_NOTIFY; + u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | + SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; + +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -210,6 +210,16 @@ + #define ASYNC_FLIP (1<<22) + #define DISPLAY_PLANE_A (0<<20) + #define DISPLAY_PLANE_B (1<<20) ++#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2) ++#define PIPE_CONTROL_QW_WRITE (1<<14) ++#define PIPE_CONTROL_DEPTH_STALL (1<<13) ++#define PIPE_CONTROL_WC_FLUSH (1<<12) ++#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */ ++#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */ ++#define PIPE_CONTROL_ISP_DIS (1<<9) ++#define PIPE_CONTROL_NOTIFY (1<<8) ++#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ ++#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ + + /* + * Fence registers +@@ -2111,6 +2121,7 @@ + #define DEIER 0x4400c + + /* GT interrupt */ ++#define GT_PIPE_NOTIFY (1 << 4) + #define GT_SYNC_STATUS (1 << 2) + #define GT_USER_INTERRUPT (1 << 0) + diff --git a/drm-intel-945gm-stability-fixes.patch b/drm-intel-945gm-stability-fixes.patch new file mode 100644 index 0000000..4dbc446 --- /dev/null +++ b/drm-intel-945gm-stability-fixes.patch @@ -0,0 +1,99 @@ +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 0d05c6f..b87f65d 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -4967,6 +4967,16 @@ i915_gem_load(struct drm_device *dev) + list_add(&dev_priv->mm.shrink_list, &shrink_list); + spin_unlock(&shrink_list_lock); + ++ /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ ++ if (IS_GEN3(dev)) { ++ u32 tmp = I915_READ(MI_ARB_STATE); ++ if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) { ++ /* arb state is a masked write, so set bit + bit in mask */ ++ tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT); ++ I915_WRITE(MI_ARB_STATE, tmp); ++ } ++ } ++ + /* Old X drivers will take 0-2 for front, back, depth buffers */ + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + dev_priv->fence_reg_start = 3; +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index 4cbc521..4543975 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -357,6 +357,70 @@ + #define LM_BURST_LENGTH 0x00000700 + #define LM_FIFO_WATERMARK 0x0000001F + #define MI_ARB_STATE 0x020e4 /* 915+ only */ ++#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */ ++ ++/* Make render/texture TLB fetches lower priorty than associated data ++ * fetches. This is not turned on by default ++ */ ++#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15) ++ ++/* Isoch request wait on GTT enable (Display A/B/C streams). ++ * Make isoch requests stall on the TLB update. May cause ++ * display underruns (test mode only) ++ */ ++#define MI_ARB_ISOCH_WAIT_GTT (1 << 14) ++ ++/* Block grant count for isoch requests when block count is ++ * set to a finite value. ++ */ ++#define MI_ARB_BLOCK_GRANT_MASK (3 << 12) ++#define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */ ++#define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */ ++#define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */ ++#define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */ ++ ++/* Enable render writes to complete in C2/C3/C4 power states. ++ * If this isn't enabled, render writes are prevented in low ++ * power states. That seems bad to me. ++ */ ++#define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11) ++ ++/* This acknowledges an async flip immediately instead ++ * of waiting for 2TLB fetches. ++ */ ++#define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10) ++ ++/* Enables non-sequential data reads through arbiter ++ */ ++#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9) ++ ++/* Disable FSB snooping of cacheable write cycles from binner/render ++ * command stream ++ */ ++#define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8) ++ ++/* Arbiter time slice for non-isoch streams */ ++#define MI_ARB_TIME_SLICE_MASK (7 << 5) ++#define MI_ARB_TIME_SLICE_1 (0 << 5) ++#define MI_ARB_TIME_SLICE_2 (1 << 5) ++#define MI_ARB_TIME_SLICE_4 (2 << 5) ++#define MI_ARB_TIME_SLICE_6 (3 << 5) ++#define MI_ARB_TIME_SLICE_8 (4 << 5) ++#define MI_ARB_TIME_SLICE_10 (5 << 5) ++#define MI_ARB_TIME_SLICE_14 (6 << 5) ++#define MI_ARB_TIME_SLICE_16 (7 << 5) ++ ++/* Low priority grace period page size */ ++#define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */ ++#define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4) ++ ++/* Disable display A/B trickle feed */ ++#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) ++ ++/* Set display plane priority */ ++#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ ++#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ ++ + #define CACHE_MODE_0 0x02120 /* 915+ only */ + #define CM0_MASK_SHIFT 16 + #define CM0_IZ_OPT_DISABLE (1<<6) +-- +1.7.1 + diff --git a/drm-intel-gen5-dither.patch b/drm-intel-gen5-dither.patch new file mode 100644 index 0000000..68df94d --- /dev/null +++ b/drm-intel-gen5-dither.patch @@ -0,0 +1,57 @@ +From b44ee8a479c3adb22b818b8b6aff29c6a08c1cb1 Mon Sep 17 00:00:00 2001 +From: Adam Jackson +Date: Mon, 19 Apr 2010 15:52:32 -0400 +Subject: [PATCH] drm/i915: Use spatio-temporal dithering on PCH + +Spatial dither is better than nothing, but ST is even better. + +Signed-off-by: Adam Jackson +--- + drivers/gpu/drm/i915/i915_reg.h | 5 ++++- + drivers/gpu/drm/i915/intel_display.c | 10 ++++++---- + 2 files changed, 10 insertions(+), 5 deletions(-) + +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index 527d30a..0bbbb77 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -1922,7 +1922,10 @@ + /* Display & cursor control */ + + /* dithering flag on Ironlake */ +-#define PIPE_ENABLE_DITHER (1 << 4) ++#define PIPE_ENABLE_DITHER (1 << 4) ++#define PIPE_DITHER_TYPE_MASK (3 << 2) ++#define PIPE_DITHER_TYPE_SPATIAL (0 << 2) ++#define PIPE_DITHER_TYPE_ST01 (1 << 2) + /* Pipe A */ + #define PIPEADSL 0x70000 + #define PIPEACONF 0x70008 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index 58668c4..36ead0e 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -3676,14 +3676,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + /* set the dithering flag */ + if (IS_I965G(dev)) { + if (dev_priv->lvds_dither) { +- if (HAS_PCH_SPLIT(dev)) ++ if (HAS_PCH_SPLIT(dev)) { + pipeconf |= PIPE_ENABLE_DITHER; +- else ++ pipeconf |= PIPE_DITHER_TYPE_ST01; ++ } else + lvds |= LVDS_ENABLE_DITHER; + } else { +- if (HAS_PCH_SPLIT(dev)) ++ if (HAS_PCH_SPLIT(dev)) { + pipeconf &= ~PIPE_ENABLE_DITHER; +- else ++ pipeconf &= ~PIPE_DITHER_TYPE_MASK; ++ } else + lvds &= ~LVDS_ENABLE_DITHER; + } + } +-- +1.7.0.1 + diff --git a/drm-intel-make-lvds-work.patch b/drm-intel-make-lvds-work.patch index 5ca0152..d39936a 100644 --- a/drm-intel-make-lvds-work.patch +++ b/drm-intel-make-lvds-work.patch @@ -1,19 +1,19 @@ -diff -up linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.orig linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c ---- linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.orig 2010-03-31 16:59:39.901995671 -0400 -+++ linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c 2010-03-31 17:01:05.416996744 -0400 -@@ -3757,7 +3757,6 @@ struct drm_crtc *intel_get_load_detect_p - void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode) +diff -up linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.jx linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c +--- linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.jx 2010-04-19 17:13:31.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c 2010-04-19 17:13:59.753994103 -0400 +@@ -4141,7 +4141,6 @@ void intel_release_load_detect_pipe(stru + struct drm_connector *connector, int dpms_mode) { struct drm_encoder *encoder = &intel_encoder->enc; - struct drm_device *dev = encoder->dev; struct drm_crtc *crtc = encoder->crtc; struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; -@@ -3767,7 +3766,6 @@ void intel_release_load_detect_pipe(stru - intel_encoder->base.encoder = NULL; +@@ -4151,7 +4150,6 @@ void intel_release_load_detect_pipe(stru + connector->encoder = NULL; intel_encoder->load_detect_temp = false; crtc->enabled = drm_helper_crtc_in_use(crtc); - drm_helper_disable_unused_functions(dev); } - /* Switch crtc and output back off if necessary */ + /* Switch crtc and encoder back off if necessary */ diff --git a/drm-intel-next.patch b/drm-intel-next.patch index c6cac69..4e2ef12 100644 --- a/drm-intel-next.patch +++ b/drm-intel-next.patch @@ -1 +1,13624 @@ -empty +Start from 2.6.33.y, then pull in 2.6.34, then anholt's for-linus, then +anholt's drm-intel-next minus a bogus TV detection patch and the AGP/GTT +splitup. + +commit 61b8a24d45b1c2ccab371b98f077425d0d73ab7e +Author: Chris Wilson +Date: Thu Jan 7 10:39:13 2010 +0000 + + drm/i915: Replace open-coded eviction in i915_gem_idle() + + With the introduction of the hang-check, we can safely expect that + i915_wait_request() will always return even when the GPU hangs, and so + do not need to open code the wait in order to manually check for the + hang. Also we do not need to always evict all buffers, so only flush + the GPU (and wait for it to idle) for KMS, but continue to evict for UMS. + + Signed-off-by: Chris Wilson + Signed-off-by: Eric Anholt + +commit 4b508552539d0a31eb5c62d1bf4847e1fd338eb8 +Author: Zhenyu Wang +Date: Thu Dec 17 14:48:43 2009 +0800 + + drm/i915: Keep MCHBAR always enabled + + As we need more and more controls within MCHBAR for memory config and + power management, this trys to keep MCHBAR enabled from driver load and + only tear down in driver unload. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 4dd298cb92eccc685aace852cab33d36085cffcc +Author: Eric Anholt +Date: Tue Jan 26 09:43:10 2010 -0800 + + drm/i915: Don't reserve compatibility fence regs in KMS mode. + + The fence start is for compatibility with UMS X Servers before fence + management. KMS X Servers only started doing tiling after fence + management appeared. + + Signed-off-by: Eric Anholt + +commit dd4031f49bbce4b4ed4ad6f0ecf3e7da8bd240aa +Author: Li Peng +Date: Wed Jan 27 19:01:11 2010 +0800 + + drm/i915: enable memory self refresh on 9xx + + Enabling memory self refresh (SR) on 9xx needs to set additional + register bits. On 945, we need bit 31 of FW_BLC_SELF to enable the + write to self refresh bit and bit 16 to enable the write of self + refresh watermark. On 915, bit 12 of INSTPM is used to enable SR. + + SR will take effect when CPU enters C3+ state and its entry/exit + should be automatically controlled by H/W, driver only needs to set + SR enable bits in wm update. But this isn't safe in my test on 945 + because GPU is hung. So this patch explicitly enables SR when GPU + is idle, and disables SR when it is busy. In my test on a netbook of + 945GSE chipset, it saves about 0.8W idle power. + + Signed-off-by: Li Peng + [anholt: rebased against 33c5fd121eabbccc9103daf6cda36941eb3c349f + by adding disable of INSTPM SR bit on 915GM for two pipe setup] + Signed-off-by: Eric Anholt + +commit b71e8e4881b7b8d9d0f622ace6db4d0587198b47 +Author: Jesse Barnes +Date: Fri Jan 29 11:27:07 2010 -0800 + + drm/i915: add dynamic performance control support for Ironlake + + Ironlake (and 965GM, which this patch doesn't support) supports a + hardware performance and power management feature that allows it to + adjust to changes in GPU load over time with software help. The goal + if this is to maximize performance/power for a given workload. + + This patch enables that feature, which is also a requirement for + supporting Intelligent Power Sharing, a feature which allows for + dynamic budgeting of power between the CPU and GPU in Arrandale + platforms. + + Tested-by: ykzhao + [anholt: Resolved against the irq handler loop removal] + Signed-off-by: Jesse Barnes + Signed-off-by: Eric Anholt + +commit bb617ddfd5cfbbd2b657550528a8b9d9827c0c5d +Author: Matthew Garrett +Date: Tue Feb 2 18:30:47 2010 +0000 + + drm/i915: Deobfuscate the render p-state obfuscation + + The ironlake render p-state support includes some rather odd variable + names. Clean them up in order to improve the readability of the code. + + Signed-off-by: Matthew Garrett + Signed-off-by: Eric Anholt + +commit 5b497c826bdd1e2834fa5cf34a1a513f99bde04a +Author: Li Peng +Date: Wed Feb 10 01:54:24 2010 +0800 + + drm/i915: Fix OGLC performance regression on 945 + + He Shuang reported an OGLC performance regression introduced in the patch + "enable memory self refresh on 9xx", In that patch, SR on 945 is disabled + everytime when calling intel_mark_busy(), while too much of such operation + will impact performance. Actually disable SR is necessary only when GPU and + Crtc changing from idle to busy. This patch make such optimization. + + It fixes upstream bug + http://bugs.freedesktop.org/show_bug.cgi?id=26422 + + Signed-off-by: Li Peng + Signed-off-by: Eric Anholt + +commit 8b31f2d2d4c38dd98bc8e1b363984a769b4bcc70 +Author: Jesse Barnes +Date: Thu Feb 4 14:17:47 2010 -0800 + + drm/i915: fix drps disable so unload & re-load works + + At unload time, we need to disable DRPS, but we need to do it correctly + or the GPU will hang and we won't be able to load the module again. So + set the SFCAVM bit so we can properly restore the DRPS config at unload. + + Signed-off-by: Jesse Barnes + Signed-off-by: Eric Anholt + +commit 380119c14abe900e7907ba919cd31935c4bd9b26 +Author: Jesse Barnes +Date: Fri Feb 5 12:42:41 2010 -0800 + + drm/i915: provide FBC status in debugfs + + Tools like powertop want to check the current FBC status and report it + to the user. So add a debugfs file indicating whether FBC is enabled, + and if not, why. + + Signed-off-by: Jesse Barnes + Signed-off-by: Eric Anholt + +commit 1a96157856ccbdad57e0b2c0fdda90533a035d5b +Author: Jesse Barnes +Date: Fri Feb 5 12:47:35 2010 -0800 + + drm/i915: provide self-refresh status in debugfs + + Signed-off-by: Jesse Barnes + Signed-off-by: Eric Anholt + +commit cda4b3ba8d82079de2929f637380a6067f446e08 +Author: Daniel Vetter +Date: Thu Feb 11 14:14:42 2010 +0100 + + drm/i915: overlay: nuke readback to flush wc caches + + I retested this and whatever this papered over, the problem doesn't seem + to exist anymore. + + Signed-off-by: Daniel Vetter + Reviewed-by: Eric Anholt + [anholt: fixed up compile warning] + Signed-off-by: Eric Anholt + +commit 80bd60eafb329be40d6e83a4f0e2f2f2b28b900c +Author: Daniel Vetter +Date: Thu Feb 11 14:14:43 2010 +0100 + + drm/i915: overlay: drop superflous gpu flushes + + Cache-coherency is maintained by gem. Drop these leftover MI_FLUSH + commands from the userspace code. + + Signed-off-by: Daniel Vetter + Acked-by: Chris Wilson + Signed-off-by: Eric Anholt + +commit dd4120e04e07feec4275807656a650448b9fd2ae +Author: Zhenyu Wang +Date: Wed Feb 10 10:39:33 2010 +0800 + + agp/intel: official names for Pineview and Ironlake + + Print official names for Pineview and Ironlake, which is Intel + GMA3150 and Intel HD graphics. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 4c0c82c9109dff54cc80493e7b3d39d0ce97b4f0 +Author: Daniel Vetter +Date: Mon Feb 1 13:59:16 2010 +0100 + + drm/i915: move a gtt flush to the correct place + + No functional change, because gtt flushing is a no-op. Still, try + to keep the bookkeeping accurate. The if is still slightly wrong + for with execbuf2 even i915-class hw doesn't always need a fence + reg for gpu access. But that's for somewhen lateron. + + Signed-off-by: Daniel Vetter + Signed-off-by: Eric Anholt + +commit 37da128862c402c8e0f70d42e5f9113dd20ee6e8 +Author: Daniel Vetter +Date: Mon Feb 1 13:59:17 2010 +0100 + + drm/i915: blow away userspace mappings before fence change + + This aligns it with the other user of i915_gem_clear_fence_reg, + which blows away the mapping before changing the fence reg. + + Only affects userspace if it races against itself when changing + tiling parameters, i.e. behaviour is undefined, anyway. + + Signed-off-by: Daniel Vetter + Reviewed-by: Chris Wilson + Signed-off-by: Eric Anholt + +commit 4f0dd6f6fd13ef49a298e1f7d300b3d4f9ca8f16 +Author: Owain Ainsworth +Date: Thu Feb 18 15:33:00 2010 +0000 + + drm/i915: reduce some of the duplication of tiling checking + + i915_gem_object_fenceable was mostly just a repeat of the + i915_gem_object_fence_offset_ok, but also checking the size (which was + checkecd when we allowed that BO to be tiled in the first place). So + instead, export the latter function and use it in place. + + Signed-Off-By: Owain G. Ainsworth + Signed-off-by: Eric Anholt + +commit e627564787847102753d565bd49a47a9fbd2b2ee +Author: Chris Wilson +Date: Thu Feb 18 10:24:56 2010 +0000 + + drm/i915: Record batch buffer following GPU error + + In order to improve our diagnostic capabilities following a GPU hang + and subsequent reset, we need to record the batch buffer that triggered + the error. We assume that the current batch buffer, plus a few details + about what else is on the active list, will be sufficient -- at the very + least an improvement over nothing. + + The extra information is stored in /debug/dri/.../i915_error_state + following an error, and may be decoded using + intel_gpu_tools/tools/intel_error_decode. + + v2: Avoid excessive work under spinlocks. + v3: Include ringbuffer for later analysis. + v4: Use kunmap correctly and record more buffer state. + v5: Search ringbuffer for current batch buffer + v6: Use a work fn for the impossible IRQ error case. + v7: Avoid non-atomic paths whilst in IRQ context. + + Signed-off-by: Chris Wilson + Signed-off-by: Eric Anholt + +commit 132c106ae1af6c5184fca797366efe24a6a5b3ce +Author: Eric Anholt +Date: Thu Oct 22 16:10:52 2009 -0700 + + agp/intel: Add support for Sandybridge. + + Signed-off-by: Eric Anholt + +commit 404fe3988cc0529bee1a7cb6e9e31096ac003e68 +Author: Eric Anholt +Date: Thu Oct 22 16:11:14 2009 -0700 + + drm/i915: Add initial bits for VGA modesetting bringup on Sandybridge. + + Signed-off-by: Eric Anholt + +commit 80e3829ff4b255ab7f05c4baa0a6af3fafd4885c +Author: Eric Anholt +Date: Mon Oct 26 16:44:17 2009 -0700 + + drm/i915: Set up fence registers on sandybridge. + + Signed-off-by: Eric Anholt + +commit fdda47cece044474e38fc234ee86b0ad889b7e25 +Author: Eric Anholt +Date: Mon Nov 2 12:08:22 2009 -0800 + + drm/i915: Fix sandybridge status page setup. + + The register's moved to the same location as the one for the BCS, it seems. + + Signed-off-by: Eric Anholt + +commit 68904b08da632ce82b551bb1c91111343ba6f715 +Author: Eric Anholt +Date: Mon Nov 2 15:33:05 2009 -0800 + + agp/intel: Use a non-reserved value for the cache field of the PTEs. + + I don't know if this is what we'll want to be using long term, we'll see. + + Signed-off-by: Eric Anholt + +commit c6372005734d735fa05442971656a83ba9bc2fe1 +Author: Eric Anholt +Date: Thu Nov 5 15:30:35 2009 -0800 + + drm/i915: Disable the surface tile swizzling on Sandybridge. + + I can't explain this, except that it makes my display correct. + + Signed-off-by: Eric Anholt + +commit e4dc1f5a66bd5e737fe32f8c17c4eef9ed48e2a3 +Author: Eric Anholt +Date: Mon Nov 9 14:57:34 2009 -0800 + + drm/i915: Correct locking in the modesetting failure path, fixing a BUG_ON. + + Signed-off-by: Eric Anholt + +commit 439c85dab263af43eddbf229dfdad7b5763dfc27 +Author: Zhenyu Wang +Date: Wed Nov 11 01:25:25 2009 +0800 + + drm/i915, agp/intel: Fix stolen memory size on Sandybridge + + New memory control config reg at 0x50 should be used for stolen + memory size detection on Sandybridge. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit fa358e1c80893bf82f1d3d924dbfe10a5a7909c1 +Author: Eric Anholt +Date: Thu Jan 7 16:21:46 2010 -0800 + + agp/intel: Add a new Sandybridge HB/IG PCI ID combo. + + Signed-off-by: Eric Anholt + +commit 3fece8822281b539cc61f86fd8e8b42afed9113c +Author: Eric Anholt +Date: Thu Jan 7 15:08:18 2010 -0800 + + drm/i915: Add a new mobile Sandybridge PCI ID. + + Signed-off-by: Eric Anholt + +commit 0cc29ff88697d208b7244d28bdb508b84b4a6acc +Author: Eric Anholt +Date: Fri Jan 8 14:25:16 2010 -0800 + + drm/i915: Disable the hangcheck reset on Sandybridge until we add support. + + Signed-off-by: Eric Anholt + +commit d37679f2b2661a4a05f260bc6677d8f012ad1d3a +Author: Eric Anholt +Date: Thu Jan 28 16:13:29 2010 -0800 + + drm/i915: Correct the Sandybridge chipset info structs. + + Disables CXSR until it's done, and sets the mobile bit on mobile. + + Signed-off-by: Eric Anholt + +commit cc45bac0967148d193bad87edbf20268d474346a +Author: Eric Anholt +Date: Thu Jan 28 16:45:52 2010 -0800 + + drm/i915: More s/IS_IRONLAKE/HAS_PCH_SPLIT for Sandybridge. + + I think this is pretty much correct. Not really tested. + + Signed-off-by: Eric Anholt + +commit 1035cda8796172825f191fc5a53690a60ae79694 +Author: Zhenyu Wang +Date: Tue Feb 23 14:05:24 2010 +0800 + + drm/i915: Add dependency on the intel agp module + + See http://bugzilla.kernel.org/show_bug.cgi?id=15021 + + Make sure that the appropriate AGP module is loaded and probed before + trying to set up the DRM. The DRM already depends on the AGP core, + but in this case we know the specific AGP driver we need too, and can + help users avoid the trap of loading the AGP driver after the DRM + driver. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 749a3068de80d499a4632c20599e18372b150612 +Author: Daniel Vetter +Date: Fri Feb 19 11:51:56 2010 +0100 + + drm/i915: reuse i915_gem_object_put_fence_reg for fence stealing code + + This has a few functional changes against the old code: + + * a few more unnecessary loads and stores to the drm_i915_fence_reg + objects. Also an unnecessary store to the hw fence register. + + * zaps any userspace mappings before doing other flushes. Only changes + anything when userspace does racy stuff against itself. + + * also flush GTT domain. This is a noop, but still try to keep the + bookkeeping correct. + + Signed-off-by: Daniel Vetter + Signed-off-by: Eric Anholt + +commit 8b0f935bf4b29f75849e59a0b51f671bd009b6cb +Author: Daniel Vetter +Date: Fri Feb 19 11:51:57 2010 +0100 + + drm/i915: fixup active list locking in object_unbind + + All other accesses take this spinlock, so do this here, too. + + Signed-off-by: Daniel Vetter + Signed-off-by: Eric Anholt + +commit 4778b6c5ada71ebe9850005d0321a3a1969cef9c +Author: Daniel Vetter +Date: Fri Feb 19 11:51:58 2010 +0100 + + drm/i915: extract fence stealing code + + The spaghetti logic in there tripped up my brain's code parser for a + few secs. Prevent this from happening again by extracting the fence + stealing code into a seperate functions. IMHO this slightly clears up + the code flow. + + v2: Beautified according to ickle's comments. + v3: ickle forgot to flush his comment queue ... Now there's also a + we-are-paranoid BUG_ON in there. + v4: I've forgotten to switch on my brain when doing v3. Now the BUG_ON + actually checks something useful. + v5: Clean up a stale comment as noted by Eric Anholt. + + Signed-off-by: Daniel Vetter + Reviewed-by: Chris Wilson + Signed-off-by: Eric Anholt + +commit 34c761de7a382135d815ea7cb5a979b33d520484 +Author: Daniel Vetter +Date: Fri Feb 19 11:51:59 2010 +0100 + + drm/i915: ensure lru ordering of fence_list + + The fence_list should be lru ordered for otherwise we might try + to steal a fence reg from an active object even though there are + fences from inactive objects available. lru ordering was obeyed + for gpu access everywhere save when moving dirty objects from + flushing_list to active_list. + + Fixing this cause the code to indent way to much, so I've extracted + the flushing_list processing logic into its on function. + + Signed-off-by: Daniel Vetter + Reviewed-by: Chris Wilson + Signed-off-by: Eric Anholt + +commit f85c68a7feb2bb9d59be8d3faee04d0c4a888ef2 +Author: Daniel Vetter +Date: Fri Feb 19 11:52:00 2010 +0100 + + drm/i915: reuse i915_gpu_idle helper + + We have it, so use it. This required moving the function to avoid + a forward declaration. + + Signed-off-by: Daniel Vetter + Signed-off-by: Eric Anholt + +commit 2cbc022b1813cf865a79d051dec0c43f19987168 +Author: Daniel Vetter +Date: Fri Feb 19 11:52:01 2010 +0100 + + drm/i915: clean-up i915_gem_flush_gpu_write_domain + + Now that we have an exact gpu write domain tracking, we don't need + to move objects to the active list ourself. i915_add_request will + take care of that under all circumstances. + + Idea stolen from a patch by Chris Wilson . + + Signed-off-by: Daniel Vetter + Signed-off-by: Chris Wilson + Signed-off-by: Eric Anholt + +commit bee2714db60a22a3e50bedb130bfeb4601eaeaa6 +Author: Daniel Vetter +Date: Fri Feb 19 11:52:02 2010 +0100 + + drm/i915: check for multiple write domains in pin_and_relocate + + The assumption that an object has only ever one write domain is deeply + threaded into gem (it's even encoded the the singular of the variable + name). Don't let userspace screw us over. + + Signed-off-by: Daniel Vetter + Signed-off-by: Eric Anholt + +commit 0a0ab3f65aa8f24d12aada76f46db902b9936762 +Author: Jesse Barnes +Date: Thu Feb 11 12:41:05 2010 -0800 + + drm/i915: enable/disable LVDS port at DPMS time + + It would be good to disable the LVDS port when we shut down the panel + to save power. We haven't done so until now because we had trouble + getting the right LVDS parameters from the BIOS. I think we're past + that now, so enabling and disabling the port should be safe, though it + would probably be made cleaner with some additional changes to the + display code, where we also bang on the LVDS reg to set the pairing + correctly etc. + + Seems to save a bit of power (up to 300mW in my basic wattsup + meter testing). + + Signed-off-by: Jesse Barnes + Signed-off-by: Eric Anholt + +commit 9dab2f83450d574e05d03058dd2c0ce544a47fcd +Author: Eric Anholt +Date: Fri Feb 26 13:32:11 2010 -0800 + + drm/i915: Don't bother with the BKL for GEM ioctls. + + We probably don't need it for most of the other driver ioctls as well, + but we explicitly did locking when doing the GEM pieces. On CPU-bound + graphics tasks, the BKL was showing up as 1-2% of CPU time. + + Signed-off-by: Eric Anholt + +commit eaa175e1942fa941c1047159721f838b3dc56263 +Author: Owain G. Ainsworth +Date: Wed Mar 3 05:34:29 2010 +0000 + + drm/i915: remove an unnecessary wait_request() + + The continue just after this call with loop around and wait for the + request just added just fine. This leads to slightly more compact code. + + Signed-Off-by: Owain G. Ainsworth + Signed-off-by: Eric Anholt + +commit 4ab0ede782d4e161ee75e076344d244847892857 +Author: Priit Laes +Date: Tue Mar 2 11:37:00 2010 +0200 + + drm/i915: Rename FBC_C3_IDLE to FBC_CTL_C3_IDLE to match other registers + + Signed-off-by: Priit Laes + Signed-off-by: Eric Anholt + +commit e213ec072ae610dad353481c654b46904b01f830 +Author: Eric Anholt +Date: Mon Mar 8 23:41:55 2010 -0800 + + drm/i915: Enable VS timer dispatch. + + This could resolve HW deadlocks where a unit downstream of the VS is + waiting for more input, the VS has one vertex queued up but not + dispatched because it hopes to get one more vertex for 2x4 dispatch, + and software isn't handing more vertices down because it's waiting for + rendering to complete. The B-Spec says you should always have this + bit set. + + Signed-off-by: Eric Anholt + +commit ae87627922f7f0f0b16c8b24e120f1d4c5574ed5 +Author: Joe Perches +Date: Thu Mar 11 14:01:38 2010 -0800 + + drivers/gpu/drm/i915/intel_bios.c: fix continuation line formats + + String constants that are continued on subsequent lines with \ will cause + spurious whitespace in the resulting output. + + Signed-off-by: Joe Perches + Cc: Dave Airlie + Cc: Eric Anholt + Cc: Jesse Barnes + Signed-off-by: Andrew Morton + [anholt: whacked it to wrap to 80 columns instead] + Signed-off-by: Eric Anholt + +commit a977b689d5e9163fe9dec0f3c33c4fb76982964e +Author: Zhenyu Wang +Date: Tue Mar 9 23:37:07 2010 +0800 + + drm/i915: Fix check with IS_GEN6 + + IS_GEN6 missed to include SandyBridge mobile chip, which failed in + i915_probe_agp() for memory config detection. Fix it with a device + info flag. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit bced22270e7a871b27cf6df978fbbda1afe010c2 +Author: Dan Carpenter +Date: Sat Mar 6 14:05:39 2010 +0300 + + drm/i915: fix small leak on overlay error path + + We should free "params" before returning. + + Signed-off-by: Dan Carpenter + Reviewed-by: Daniel Vetter + Cc: stable@kernel.org (for .33) + Signed-off-by: Eric Anholt + +commit 9f3dd99fc14b1344a34132011d189e44ca94faab +Author: Eric Anholt +Date: Thu Mar 18 11:24:06 2010 -0700 + + agp/intel: Respect the GTT size on Sandybridge for scratch page setup. + + This is similar to 14bc490bbdf1b194ad1f5f3d2a0a27edfdf78986 which + respected it for how much of the GTT we would actually use. Now we + won't clear beyond allocated memory when filling the GTT with scratch + page addresses. + + Signed-off-by: Eric Anholt + +commit aa46089ff1f59213cfd0cee248485830b60e2ba6 +Author: Eric Anholt +Date: Thu Mar 18 12:19:37 2010 -0700 + + agp/intel: Don't do the chipset flush on Sandybridge. + + This CPU should be coherent with graphics in this direction, though + flushing graphics caches are still required. Fixes a system reset on + module load on Sandybridge with 4G+ memory. + + Signed-off-by: Eric Anholt + +commit 57cf4aac610bac23c5469d6e65d56004be4ebfab +Author: Eric Anholt +Date: Thu Mar 18 13:21:14 2010 -0700 + + drm/i915: Set up the documented clock gating on Sandybridge and Ironlake. + + Signed-off-by: Eric Anholt + +commit 3b0f299bd7b2ecc2ed71f539aff433eb74a59526 +Author: Daniel Vetter +Date: Thu Mar 18 09:22:12 2010 +0100 + + drm/intel: fix up set_tiling for untiled->tiled transition + + Bug introduced in + + commit 10ae9bd25acf394c8fa2f9d795dfa9cec4d19ed6 + Author: Daniel Vetter + Date: Mon Feb 1 13:59:17 2010 +0100 + + drm/i915: blow away userspace mappings before fence change + + The problem is that when there's no fence reg assigned and the object + is mapped at a fenceable offset in the gtt, the userspace mappings won't + be torn down. Which happens on untiled->tiled transition quite often + on 4th gen and later because there fencing does not have any special + alignment constraints (as opposed to 2nd and 3rd gen on which I've tested + the original commit). + + Bugzilla: http://bugs.freedesktop.org/show_bug.cgi?id=26993 + Signed-off-by: Daniel Vetter + Tested-by: Eric Anholt (fixes OpenArena) + Signed-off-by: Eric Anholt + +commit a84f55fc0dfb2d5d5a71b2662cb6a438955865d8 +Author: Eric Anholt +Date: Wed Mar 17 13:48:06 2010 -0700 + + drm/i915: Stop trying to use ACPI lid status to determine LVDS connection. + + I've been getting more and more quirk reports about this. It seems + clear at this point that other OSes are not using this for determining + whether the integrated panel should be turned on, and it is not + reliable for doing so. Better to light up an unintended panel than to + not light up the only usable output on the system. + + Signed-off-by: Eric Anholt + Acked-by: Jesse Barnes + +commit e08daaf2d1c43bf6321fd16ac07a784afcfc3a93 +Author: Robert Hooker +Date: Fri Mar 19 15:13:27 2010 -0400 + + drm/i915: Disable FBC on 915GM and 945GM. + + It is causing hangs after a suspend/resume cycle with the default + powersave=1 module option on these chipsets since 2.6.32-rc. + + BugLink: http://bugs.launchpad.net/bugs/492392 + Signed-off-by: Robert Hooker + Acked-by: Jesse Barnes + Signed-off-by: Eric Anholt + +commit c438e43c09f6c2afe32571a4096e27dbd221a616 +Author: Daniel Vetter +Date: Mon Mar 8 13:35:02 2010 +0100 + + drm/i915: introduce to_intel_bo helper + + This is a purely cosmetic change to make changes in this area easier. + And hey, it's not only clearer and typechecked, but actually shorter, + too! + + [anholt: To clarify, this is a change to let us later make + drm_i915_gem_object subclass drm_gem_object, instead of having + drm_gem_object have a pointer to i915's private data] + + Signed-off-by: Daniel Vetter + Acked-by: Dave Airlie + Signed-off-by: Eric Anholt + +commit 6a891482640b165ac350b83d8f0446f6b09a1a3d +Author: Daniel Vetter +Date: Fri Mar 19 21:46:23 2010 +0100 + + agp/intel: intel_845_driver is an agp driver! + + ... not a GTT driver. So the additional chipset flush introduced in + + commit 2162e6a2b0cd5acbb9bd8a3c94e1c1269b078295 + Author: Dave Airlie + Date: Wed Nov 21 16:36:31 2007 +1000 + + agp/intel: Add chipset flushing support for i8xx chipsets. + + to fix a GTT problem makes absolutely no sense. If this would really be needed + for AGP chipsets, too, we should add it to all i8xx agp drivers, not just one. + + Signed-off-by: Daniel Vetter + Signed-off-by: Eric Anholt + +commit d6a705b18cf0635af7c7b033e46051d9ed62df64 +Author: Eric Anholt +Date: Thu Mar 25 11:11:14 2010 -0700 + + drm/i915: Rename intel_output to intel_encoder. + + The intel_output naming is inherited from the UMS code, which had a + structure of screen -> CRTC -> output. The DRM code has an additional + notion of encoder/connector, so the structure is screen -> CRTC -> + encoder -> connector. This is a useful structure for SDVO encoders + which can support multiple connectors (each of which requires + different programming in the one encoder and could be connected to + different CRTCs), or for DVI-I, where multiple encoders feed into the + connector for whether it's used for digital or analog. Most of our + code is encoder-related, so transition it to talking about encoders + before we start trying to distinguish connectors. + + This patch is produced by sed s/intel_output/intel_encoder/ over the + driver. + + Signed-off-by: Eric Anholt + +commit 9feb424d3e107e74e2ed9dfbce6c34a20d3eff0d +Author: Eric Anholt +Date: Thu Mar 25 11:48:48 2010 -0700 + + drm/i915: Rename many remaining uses of "output" to encoder or connector. + + Signed-off-by: Eric Anholt + +commit fdb1359d8e086da1139424be81dba17596b59c06 +Author: Stefan Bader +Date: Mon Mar 29 17:53:12 2010 +0200 + + drm/i915: Add no_lvds entry for the Clientron U800 + + BugLink: http://bugs.launchpad.net/ubuntu/bugs/544671 + + This system claims to have a LVDS but has not. + + Signed-off-by: Stephane Graber + Signed-off-by: Stefan Bader + CC: stable@kernel.org + Signed-off-by: Eric Anholt + +commit cdff9756ece2333713af35eedefffc58721a0eab +Author: Zhao Yakui +Date: Wed Apr 7 17:11:22 2010 +0800 + + drm/i915: Ignore LVDS EDID when it is unavailabe or invalid + + This trys to shut up complains about invalid LVDS EDID during + mode probe, but uses fixed panel mode directly for panels with + broken EDID. + + https://bugs.freedesktop.org/show_bug.cgi?id=23099 + https://bugs.freedesktop.org/show_bug.cgi?id=26395 + + Signed-off-by: Zhao Yakui + Tested-by: Sitsofe Wheeler + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 827b98413e1ca8c3657fe83b39671c87503702b8 +Author: Adam Jackson +Date: Fri Apr 16 18:20:57 2010 -0400 + + drm/i915: Attempt to fix watermark setup on 85x (v2) + + IS_MOBILE() catches 85x, so we'd always try to use the 9xx FIFO sizing; + since there's an explicit 85x version, this seems wrong. + + v2: Handle 830m correctly too. + + Signed-off-by: Adam Jackson + Reviewed-by: Eric Anholt + Signed-off-by: Eric Anholt + +commit 5c9713caf3a211c7af399036dbc70defa4df09a4 +Author: Adam Jackson +Date: Thu Apr 15 14:03:30 2010 -0400 + + drm/i915: Fix 82854 PCI ID, and treat it like other 85X + + pci.ids and the datasheet both say it's 358e, not 35e8. + + Signed-off-by: Adam Jackson + Signed-off-by: Eric Anholt + +commit 6a399e79b2e61ca95a8b88a6f49e1382aea23f75 +Author: Zhang Rui +Date: Mon Mar 29 15:12:16 2010 +0800 + + drm/i915: set DIDL using the ACPI video output device _ADR method return. + + we used to set the DIDL in the output device detected order. + But some BIOSes requires it to be initialized in the ACPI device order. + e.g. the value of the first field in DIDL stands for the first + ACPI video output device in ACPI namespace. + + Now we initialize the DIDL using the device id, i.e. _ADR return value, + of each ACPI video device, if it is not 0. + https://bugzilla.kernel.org/show_bug.cgi?id=15054 + + Signed-off-by: Zhang Rui + Signed-off-by: Eric Anholt + +commit fabde8feddccbfc88c48e3e1c3e97d58094988c8 +Author: Daniel Vetter +Date: Sat Apr 17 15:12:03 2010 +0200 + + drm/i915: fix tiling limits for i915 class hw v2 + + Current code is definitely crap: Largest pitch allowed spills into + the TILING_Y bit of the fence registers ... :( + + I've rewritten the limits check under the assumption that 3rd gen hw + has a 3d pitch limit of 8kb (like 2nd gen). This is supported by an + otherwise totally misleading XXX comment. + + This bug mostly resulted in tiling-corrupted pixmaps because the kernel + allowed too wide buffers to be tiled. Bug brought to the light by the + xf86-video-intel 2.11 release because that unconditionally enabled + tiling for pixmaps, relying on the kernel to check things. Tiling for + the framebuffer was not affected because the ddx does some additional + checks there ensure the buffer is within hw-limits. + + v2: Instead of computing the value that would be written into the + hw fence registers and then checking the limits simply check whether + the stride is above the 8kb limit. To better document the hw, add + some WARN_ONs in i915_write_fence_reg like I've done for the i830 + case (using the right limits). + + Signed-off-by: Daniel Vetter + Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=27449 + Tested-by: Alexander Lam + Cc: stable@kernel.org + Signed-off-by: Eric Anholt + +commit d69abbbe43a7697ed7c12a7c57008023e92cffd2 +Author: Li Zefan +Date: Thu Mar 11 16:41:45 2010 +0800 + + drm/i915: Convert some trace events to DEFINE_TRACE + + Use DECLARE_EVENT_CLASS to remove duplicate code: + + text data bss dec hex filename + 14655 2732 15 17402 43fa i915_trace_points.o.orig + 11625 2732 10 14367 381f i915_trace_points.o + + 8 events are converted: + + i915_gem_object: i915_gem_object_{unbind, destroy} + i915_gem_request: i915_gem_request_{complete, retire, wait_begin, wait_end} + i915_ring: i915_ring_{wait_begin, wait_end} + + No functional change. + + Signed-off-by: Li Zefan + Signed-off-by: Eric Anholt + +commit 2f99e2fb0673252dc3559ed69ff3ec96fb579166 +Author: Zhenyu Wang +Date: Tue Mar 30 14:39:26 2010 +0800 + + drm/i915: use encoder_list for hotplug callback + + Instead of walking through drm connector_list uses encoder_list + for calling hotplug functions which is consistent with intel display + hotplug reporting. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit fefa3dcedc5f630f653bfd6b39ff172a0e5e171e +Author: Zhenyu Wang +Date: Tue Mar 30 14:39:27 2010 +0800 + + drm/i915: more conversion from connector_list walk to encoder_list + + What we really want is encoder info instead of connector, so change + some more list walk in pipeline setup functions from connector_list + to encoder_list. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 49c4aec093be9e3a9fa046211a11b88949cf6792 +Author: Zhenyu Wang +Date: Tue Mar 30 14:39:28 2010 +0800 + + drm/i915: Add new 'intel_connector' structure + + This adds new structure of intel_connector to present drm's + connector object, which is used to convert from origin single + output into encoder/connector model. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 2100929e476cc423d08478432b04ac887e7b6b3b +Author: Zhenyu Wang +Date: Tue Mar 30 14:39:29 2010 +0800 + + drm/i915: Add new helper to return current attached encoder for connector + + For introducing splitted encoder/connector structure, this helper will return + connector's attached encoder when needed. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 0059113e16782b363c65dcf6f3041b37783ba190 +Author: Zhenyu Wang +Date: Tue Mar 30 14:39:30 2010 +0800 + + drm/i915: passing drm connector param for load detection + + In load detection, connector's encoder assignment must be kept + consistent for proper mode setting, and this makes connector as + explicit parameter for load detect function to not require single + data structure to hold both encoder and connector reference, ease + the transition for splitted encoder/connector model. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 17b2b7b441b69c8ddaff50332e2c55a004c791cb +Author: Zhenyu Wang +Date: Tue Mar 30 14:39:31 2010 +0800 + + drm/i915: change intel_ddc_get_modes() function parameters + + This one replaces original param for intel_ddc_get_modes() with + DRM connector and i2c bus adapter instead. With explicit params, + we won't require that a single driver structure must hold connector + and DDC bus reference, which ease the conversion to splitted encoder/ + connector model. + + It also clears up for some cases that we would steal other DDC bus + for mode probe, like VGA analog DDC probe for DVI-I. Also it fixed + a bug in old DVI-I probe handling, that failed to restore origin + analog GPIO port. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 020bcd645be8b7a0837b7a0593430f26ad121844 +Author: Eric Anholt +Date: Fri Apr 2 15:24:27 2010 -0700 + + drm/i915: Remove dead KMS encoder save/restore code. + + This was brought over from UMS, and used for a while until we decided + that drm_helper_resume_force_mode was easier and more reliable, since + it didn't require duplicating all the code deleted here. We just + forgot to delete all that junk for a while. + +commit a51895bee213fc506644305d65f8a7e9bc0a42d7 +Author: Adam Jackson +Date: Wed Mar 31 11:41:51 2010 -0400 + + drm/i915: Allow LVDS on pipe A on gen4+ + + The gen4 docs say it works, so why not. Tested on Ironlake. + + Signed-off-by: Adam Jackson + Signed-off-by: Eric Anholt + +commit f8c7768b50a68991f118aaa9c27039a9f1dfaa16 +Author: Eric Anholt +Date: Fri Apr 9 14:18:55 2010 -0700 + + drm/i915: Clear the LVDS pipe B select bit when moving the LVDS to pipe A. + + Based on a patch by Zhao Yakui. + + Signed-off-by: Eric Anholt + +commit 4b75c3a050bccdd35f4298514571c024bf51c33e +Author: Zhao Yakui +Date: Wed Apr 7 17:11:21 2010 +0800 + + drm/i915: Fix legacy BLC event for pipe A + + OpRegion event on 965G requires legacy BLC event enabled in pipe stat. As LVDS + could be on either pipe now, we should enable BLC event on both pipe. If fail to + do so, we couldn't handle the brightness request triggered from graphics + opregion. + + Signed-off-by: Zhao Yakui + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 1d3d609b68107aad7076244742ddb3bcdcd282eb +Author: Zhenyu Wang +Date: Wed Apr 7 16:15:52 2010 +0800 + + drm/i915: Sandybridge has no integrated TV + + Integrated TV is deprecated in new chips from Ironlake. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit c4418d1fcf08d0ffde92c96f235ac6a23864d709 +Author: Zhenyu Wang +Date: Wed Apr 7 16:15:53 2010 +0800 + + drm/i915: Probe for PCH chipset type + + PCH is the new name for south bridge from Ironlake/Sandybridge, + which contains most of the display outputs except eDP. This one + adds a probe function to detect current PCH type, and method to + detect Cougarpoint PCH. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 528bcb3d7676df9cb29b11b73fcb53be10f8240e +Author: Zhenyu Wang +Date: Wed Apr 7 16:15:54 2010 +0800 + + drm/i915: Support for Cougarpoint PCH display pipeline + + Cougarpoint is the new PCH for Sandybridge CPU. This one resolves the + chipset change for display pipeline compared to previous Ibexpeak PCH. + + Sandybridge/Cougarpoint has different FDI training parameters, so this also + makes seperate FDI training functions for IBX and CPT. Other change includes + new transcoder DPLL select function to set which DPLL for transcoder to pick + up. + + And with another new transcoder C introduced in Cougarpoint, each connector + has new transcoder select bits. This one adds that change to light up VGA. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit c19eb88cd3867b0e5850c322ffeeb2b0f95a61fa +Author: Zhenyu Wang +Date: Wed Apr 7 16:15:55 2010 +0800 + + drm/i915: Fix CRT force detect on Cougarpoint + + To make CRT force detect reliable on Cougarpoint, we need to + disable DAC before force detect, and restore back when trigger + is completed. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit bbe5385748d3514f2ec821af6b0cd1e8b8126788 +Author: Zhenyu Wang +Date: Wed Apr 7 16:15:56 2010 +0800 + + drm/i915: enable LVDS on Cougarpoint + + Fix the transcoder select bit for LVDS on CPT. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 9397e6409c31333c24f2120bc1242e57409b17b1 +Author: Zhenyu Wang +Date: Wed Apr 7 16:15:57 2010 +0800 + + drm/i915: enable HDMI on Cougarpoint + + Fix transcoder select bit for HDMI on CPT. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit b0280fea16c1993823e96ea60732471a673930a9 +Author: Zhenyu Wang +Date: Thu Apr 8 09:43:27 2010 +0800 + + drm/i915: enable DP/eDP for Sandybridge/Cougarpoint + + DP on Cougarpoint has new training pattern definitions, and + new transcoder DP control register is used to determine the mapping + for transcoder and DP digital output. And eDP for Sandybridge has + new voltage and pre-emphasis level definitions. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 34400e21a2ec1862a6aea1d415e16a8973d08009 +Author: Adam Jackson +Date: Mon Apr 5 17:58:00 2010 -0400 + + drm/i915: Un-magic a DPCD register write + + Signed-off-by: Adam Jackson + Signed-off-by: Eric Anholt + +commit d529f2bd3e73af3f58dacc142c8df96b2d8ac3de +Author: Adam Jackson +Date: Mon Apr 5 17:57:59 2010 -0400 + + drm/i915: Set sync polarity correctly on DisplayPort + + Probably only matters for format-converting dongles, but might as well + get it right all the time. + + Signed-off-by: Adam Jackson + Signed-off-by: Eric Anholt + +commit a0cf638b48c4c19531c02b546dc0a65947512588 +Author: Zhenyu Wang +Date: Mon Mar 29 15:53:23 2010 +0800 + + drm/i915: convert VGA driver to new encoder/connector structure + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 19dc930333364cee93468b49f96d93411b2dab93 +Author: Zhenyu Wang +Date: Mon Mar 29 16:40:50 2010 +0800 + + drm/i915: convert LVDS driver to new encoder/connector structure + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit fdb747792398be0bc2574b1e5bb0375a6e1ba709 +Author: Zhenyu Wang +Date: Mon Mar 29 15:57:42 2010 +0800 + + drm/i915: convert HDMI driver to new encoder/connector structure + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 76641985c72921092cb831dd09bc634c2fd89f71 +Author: Zhenyu Wang +Date: Mon Mar 29 16:13:57 2010 +0800 + + drm/i915: convert DP/eDP driver to new encoder/connector structure + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit ce8122f57adb42e80d2c62a6dd2df5660331c769 +Author: Zhenyu Wang +Date: Mon Mar 29 16:17:31 2010 +0800 + + drm/i915: convert DVO driver to new encoder/connector structure + + Also remove old UMS copied code for get_crtc. + + Signed-off-by: Zhenyu Wang + Signed-off-by: Eric Anholt + +commit 6cc8f013f8ebd91eea0667853ed765e1a6532649 +Author: Zhenyu Wang +Date: Mon Mar 29 21:22:55 2010 +0800 + + drm/i915: convert SDVO driver to new encoder/connector structure + + Signed-off-by: Zhenyu Wang + +commit f870c2dc4e9ca7609d9a0948cafb80b5af84158c +Author: Zhenyu Wang +Date: Mon Mar 29 16:38:44 2010 +0800 + + drm/i915: convert TV driver to new encoder/connector structure + + Signed-off-by: Zhenyu Wang + +commit 1be1fd9444588e710ba251400249deda17e6801b +Author: Zhenyu Wang +Date: Mon Mar 29 16:44:15 2010 +0800 + + drm/i915: remove connector object in old output structure + + As all display drivers have been converted, remove the left reference + for connector object in old structure. + + Signed-off-by: Zhenyu Wang + +commit f9949e3847426bd72beb6be9361a0bed32992ac5 +Author: Zhenyu Wang +Date: Mon Mar 29 16:54:31 2010 +0800 + + drm/i915: remove unused intel_pipe_get_connector() + + Added by original eDP support patch, not used any more. + + Signed-off-by: Zhenyu Wang + +commit 50b923cf1150fdf88312fb8616114e3fb5e57e57 +Author: Zhenyu Wang +Date: Tue Mar 30 14:06:33 2010 +0800 + + drm/i915: implement multifunction SDVO device support + + With new intel_encoder/intel_connector structure change, each supported + connector type on SDVO device will be created as a new 'intel_connector', + and all attached to one 'intel_encoder' for its SDVO port. + + The SDVO encoder will handle SDVO protocol stuff, and each connector does + its own part of work now, like detection is only to check if current active + output is itself, etc. + + Update since last submit: + - Fixed SDVO TV property creation failure by incorrect set target output call + + Signed-off-by: Zhenyu Wang + +commit be7b7e1da367289b0998343af837dad531da7fd6 +Author: Zhenyu Wang +Date: Mon Mar 29 23:16:13 2010 +0800 + + Revert "drm/i915: Use a dmi quirk to skip a broken SDVO TV output." + + This reverts commit 6070a4a928f8c92b9fae7d6717ebbb05f425d6b2. + + The quirk for this SDVO device on IBM specific board is just a hack + in old code which showed the broken multifunction SDVO support in + the driver. Multifunction SDVO patch provided the right fix for it. + + Signed-off-by: Zhenyu Wang + +commit fa4cee3074fea74b556864ce25983b79598ba743 +Author: Luca Tettamanti +Date: Thu Apr 8 21:41:59 2010 +0200 + + drm/i915: do not read uninitialized ->dev_private + + ->dev_private at that point is NULL and is initialied only a few lines + later. + + Signed-off-by: Luca Tettamanti + Signed-off-by: Eric Anholt + +commit 52eacfb9897f9a36cc93b587c57b922b8507d900 +Author: Zhao Yakui +Date: Tue Mar 30 15:11:33 2010 +0800 + + drm/i915: Add support of SDVO on Ibexpeak PCH + + SDVO on Ibexpeak PCH with Ironlake is multiplexed with + HDMIB port, and only has SDVOB port. + + Signed-off-by: Zhao Yakui + Signed-off-by: Zhenyu Wang + +commit 9df31d77abdd69427324bd0b4cd7f52eecb4a95a +Author: Zhao Yakui +Date: Tue Mar 30 15:15:02 2010 +0800 + + drm/i915: Fix the incorrect argument for SDVO SET_TV_format command + + Otherwise it will cause that S-video output becomes black/white when + switching to other TV format. + + http://bugs.freedesktop.org/show_bug.cgi?id=23916 + + Signed-off-by: Zhao Yakui + Tested-by: Arnold + Tested-by: Bazin + Tested-by: Nigel + Signed-off-by: Zhenyu Wang + +commit 064e7eadac1c51ce7a675da8b750defd6e82cb4b +Author: Zhao Yakui +Date: Fri Mar 19 17:05:10 2010 +0800 + + drm/i915: Only save/restore FBC on the platform that supports FBC + + Signed-off-by: Zhao Yakui + Signed-off-by: Zhenyu Wang + +commit 8860a05210d493379a9857e10b051f54097d6078 +Author: Zhao Yakui +Date: Mon Mar 22 22:45:36 2010 +0800 + + drm/i915: Move Pineview CxSR and watermark code into update_wm hook. + + Previously, after setting up the Pineview CxSR state, i9xx_update_wm would + get called and overwrite our state. + + BTW: We will disable the self-refresh and never enable it any more if we + can't find the appropriate the latency on pineview plaftorm. In such case + the update_wm callback will be NULL. + + The bitmask macro is also defined to access the corresponding fifo + watermark register. + + Signed-off-by: Zhao Yakui + Signed-off-by: Zhenyu Wang + +commit 8ab60b6bda618cf0ad0cc628bd174a9b9338dd64 +Author: Zhenyu Wang +Date: Thu Apr 1 13:07:53 2010 +0800 + + drm/i915: Add the support of memory self-refresh on Ironlake + + Update the self-refresh watermark for display plane/cursor and enable + the memory self-refresh on Ironlake. The watermark is also updated for + the active display plane. + + More than 1W idle power is saved on one Ironlake laptop after enabling + memory self-refresh. + + Signed-off-by: Zhao Yakui + Signed-off-by: Zhenyu Wang + +commit 55d812048fff0c36c2aed8165dcbd5578adb5807 +Author: Adam Jackson +Date: Mon Apr 12 11:38:44 2010 -0400 + + drm/i915/pch: Use minimal number of FDI lanes (v2) + + This should be a small power savings. Tested on Lenovo T410 (Ironlake), LVDS + VGA and DisplayPort, up to 1920x1200R. + + v2: Add Sandybridge support, fix obvious math error. + + Acked-by: Zhenyu Wang + Signed-off-by: Adam Jackson + Signed-off-by: Eric Anholt + +commit 12f713e6d7a31fb9cad311bbceae592ce1bb77a0 +Author: Karsten Wiese +Date: Sat Mar 27 22:48:33 2010 +0100 + + drm/i915: Don't touch PORT_HOTPLUG_EN in intel_dp_detect() + + PORT_HOTPLUG_EN has allready been setup in i915_driver_irq_postinstall(), + when intel_dp_detect() runs. + + Delete the DP[BCD]_HOTPLUG_INT_EN defines, they are not referenced anymore. + + I found this while searching for a fix for + https://bugzilla.redhat.com/show_bug.cgi?id=528312 + + Signed-off-by: Karsten Wiese + Signed-off-by: Eric Anholt + +diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c +index 3999a5f..64f8397 100644 +--- a/drivers/char/agp/intel-agp.c ++++ b/drivers/char/agp/intel-agp.c +@@ -10,6 +10,9 @@ + #include + #include "agp.h" + ++int intel_agp_enabled; ++EXPORT_SYMBOL(intel_agp_enabled); ++ + /* + * If we have Intel graphics, we're not going to have anything other than + * an Intel IOMMU. So make the correct use of the PCI DMA API contingent +@@ -64,6 +67,10 @@ + #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 + #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a + #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 ++#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 ++#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 ++#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 ++#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 + + /* cover 915 and 945 variants */ + #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ +@@ -89,6 +96,9 @@ + #define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) + ++#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) ++ + #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ +@@ -98,7 +108,8 @@ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB) ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ ++ IS_SNB) + + extern int agp_memory_reserved; + +@@ -147,6 +158,29 @@ extern int agp_memory_reserved; + #define INTEL_I7505_AGPCTRL 0x70 + #define INTEL_I7505_MCHCFG 0x50 + ++#define SNB_GMCH_CTRL 0x50 ++#define SNB_GMCH_GMS_STOLEN_MASK 0xF8 ++#define SNB_GMCH_GMS_STOLEN_32M (1 << 3) ++#define SNB_GMCH_GMS_STOLEN_64M (2 << 3) ++#define SNB_GMCH_GMS_STOLEN_96M (3 << 3) ++#define SNB_GMCH_GMS_STOLEN_128M (4 << 3) ++#define SNB_GMCH_GMS_STOLEN_160M (5 << 3) ++#define SNB_GMCH_GMS_STOLEN_192M (6 << 3) ++#define SNB_GMCH_GMS_STOLEN_224M (7 << 3) ++#define SNB_GMCH_GMS_STOLEN_256M (8 << 3) ++#define SNB_GMCH_GMS_STOLEN_288M (9 << 3) ++#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) ++#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) ++#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) ++#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) ++#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) ++#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) ++#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) ++#define SNB_GTT_SIZE_0M (0 << 8) ++#define SNB_GTT_SIZE_1M (1 << 8) ++#define SNB_GTT_SIZE_2M (2 << 8) ++#define SNB_GTT_SIZE_MASK (3 << 8) ++ + static const struct aper_size_info_fixed intel_i810_sizes[] = + { + {64, 16384, 4}, +@@ -293,6 +327,13 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem, + off_t pg_start, int mask_type) + { + int i, j; ++ u32 cache_bits = 0; ++ ++ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) ++ { ++ cache_bits = I830_PTE_SYSTEM_CACHED; ++ } + + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + writel(agp_bridge->driver->mask_memory(agp_bridge, +@@ -613,7 +654,7 @@ static struct aper_size_info_fixed intel_i830_sizes[] = + static void intel_i830_init_gtt_entries(void) + { + u16 gmch_ctrl; +- int gtt_entries; ++ int gtt_entries = 0; + u8 rdct; + int local = 0; + static const int ddt[4] = { 0, 16, 32, 64 }; +@@ -705,6 +746,63 @@ static void intel_i830_init_gtt_entries(void) + gtt_entries = 0; + break; + } ++ } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { ++ /* ++ * SandyBridge has new memory control reg at 0x50.w ++ */ ++ u16 snb_gmch_ctl; ++ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); ++ switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { ++ case SNB_GMCH_GMS_STOLEN_32M: ++ gtt_entries = MB(32) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_64M: ++ gtt_entries = MB(64) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_96M: ++ gtt_entries = MB(96) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_128M: ++ gtt_entries = MB(128) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_160M: ++ gtt_entries = MB(160) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_192M: ++ gtt_entries = MB(192) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_224M: ++ gtt_entries = MB(224) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_256M: ++ gtt_entries = MB(256) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_288M: ++ gtt_entries = MB(288) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_320M: ++ gtt_entries = MB(320) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_352M: ++ gtt_entries = MB(352) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_384M: ++ gtt_entries = MB(384) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_416M: ++ gtt_entries = MB(416) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_448M: ++ gtt_entries = MB(448) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_480M: ++ gtt_entries = MB(480) - KB(size); ++ break; ++ case SNB_GMCH_GMS_STOLEN_512M: ++ gtt_entries = MB(512) - KB(size); ++ break; ++ } + } else { + switch (gmch_ctrl & I855_GMCH_GMS_MASK) { + case I855_GMCH_GMS_STOLEN_1M: +@@ -1115,6 +1213,9 @@ static void intel_i9xx_setup_flush(void) + if (intel_private.ifp_resource.start) + return; + ++ if (IS_SNB) ++ return; ++ + /* setup a resource for this object */ + intel_private.ifp_resource.name = "Intel Flush Page"; + intel_private.ifp_resource.flags = IORESOURCE_MEM; +@@ -1353,6 +1454,8 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, + + static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) + { ++ u16 snb_gmch_ctl; ++ + switch (agp_bridge->dev->device) { + case PCI_DEVICE_ID_INTEL_GM45_HB: + case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: +@@ -1366,6 +1469,25 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) + case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: + *gtt_offset = *gtt_size = MB(2); + break; ++ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: ++ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: ++ *gtt_offset = MB(2); ++ ++ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); ++ switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { ++ default: ++ case SNB_GTT_SIZE_0M: ++ printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); ++ *gtt_size = MB(0); ++ break; ++ case SNB_GTT_SIZE_1M: ++ *gtt_size = MB(1); ++ break; ++ case SNB_GTT_SIZE_2M: ++ *gtt_size = MB(2); ++ break; ++ } ++ break; + default: + *gtt_offset = *gtt_size = KB(512); + } +@@ -1701,8 +1823,6 @@ static int intel_845_configure(void) + pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); + /* clear any possible error conditions */ + pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); +- +- intel_i830_setup_flush(); + return 0; + } + +@@ -2072,7 +2192,6 @@ static const struct agp_bridge_driver intel_845_driver = { + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = agp_generic_type_to_mask_type, +- .chipset_flush = intel_i830_chipset_flush, + }; + + static const struct agp_bridge_driver intel_850_driver = { +@@ -2345,9 +2464,9 @@ static const struct intel_driver_description { + NULL, &intel_g33_driver }, + { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", + NULL, &intel_g33_driver }, +- { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview", ++ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150", + NULL, &intel_g33_driver }, +- { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview", ++ { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150", + NULL, &intel_g33_driver }, + { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, + "GM45", NULL, &intel_i965_driver }, +@@ -2362,13 +2481,17 @@ static const struct intel_driver_description { + { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, + "G41", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0, +- "Ironlake/D", NULL, &intel_i965_driver }, ++ "HD Graphics", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, +- "Ironlake/M", NULL, &intel_i965_driver }, ++ "HD Graphics", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, +- "Ironlake/MA", NULL, &intel_i965_driver }, ++ "HD Graphics", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, +- "Ironlake/MC2", NULL, &intel_i965_driver }, ++ "HD Graphics", NULL, &intel_i965_driver }, ++ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0, ++ "Sandybridge", NULL, &intel_i965_driver }, ++ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0, ++ "Sandybridge", NULL, &intel_i965_driver }, + { 0, 0, 0, NULL, NULL, NULL } + }; + +@@ -2378,7 +2501,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, + struct agp_bridge_data *bridge; + u8 cap_ptr = 0; + struct resource *r; +- int i; ++ int i, err; + + cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); + +@@ -2470,7 +2593,10 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, + } + + pci_set_drvdata(pdev, bridge); +- return agp_add_bridge(bridge); ++ err = agp_add_bridge(bridge); ++ if (!err) ++ intel_agp_enabled = 1; ++ return err; + } + + static void __devexit agp_intel_remove(struct pci_dev *pdev) +@@ -2575,6 +2701,8 @@ static struct pci_device_id agp_intel_pci_table[] = { + ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), + ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), + ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), ++ ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), ++ ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), + { } + }; + +diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h +--- a/drivers/gpu/drm/i915/dvo.h ++++ b/drivers/gpu/drm/i915/dvo.h +@@ -70,16 +70,6 @@ struct intel_dvo_dev_ops { + void (*dpms)(struct intel_dvo_device *dvo, int mode); + + /* +- * Saves the output's state for restoration on VT switch. +- */ +- void (*save)(struct intel_dvo_device *dvo); +- +- /* +- * Restore's the output's state at VT switch. +- */ +- void (*restore)(struct intel_dvo_device *dvo); +- +- /* + * Callback for testing a video mode for a given output. + * + * This function should only check for cases where a mode can't +diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c +--- a/drivers/gpu/drm/i915/dvo_ch7017.c ++++ b/drivers/gpu/drm/i915/dvo_ch7017.c +@@ -159,16 +159,7 @@ + #define CH7017_BANG_LIMIT_CONTROL 0x7f + + struct ch7017_priv { +- uint8_t save_hapi; +- uint8_t save_vali; +- uint8_t save_valo; +- uint8_t save_ailo; +- uint8_t save_lvds_pll_vco; +- uint8_t save_feedback_div; +- uint8_t save_lvds_control_2; +- uint8_t save_outputs_enable; +- uint8_t save_lvds_power_down; +- uint8_t save_power_management; ++ uint8_t dummy; + }; + + static void ch7017_dump_regs(struct intel_dvo_device *dvo); +@@ -401,39 +392,6 @@ do { \ + DUMP(CH7017_LVDS_POWER_DOWN); + } + +-static void ch7017_save(struct intel_dvo_device *dvo) +-{ +- struct ch7017_priv *priv = dvo->dev_priv; +- +- ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi); +- ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo); +- ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo); +- ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco); +- ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div); +- ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2); +- ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable); +- ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down); +- ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management); +-} +- +-static void ch7017_restore(struct intel_dvo_device *dvo) +-{ +- struct ch7017_priv *priv = dvo->dev_priv; +- +- /* Power down before changing mode */ +- ch7017_dpms(dvo, DRM_MODE_DPMS_OFF); +- +- ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi); +- ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo); +- ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo); +- ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco); +- ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div); +- ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2); +- ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable); +- ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down); +- ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management); +-} +- + static void ch7017_destroy(struct intel_dvo_device *dvo) + { + struct ch7017_priv *priv = dvo->dev_priv; +@@ -451,7 +409,5 @@ struct intel_dvo_dev_ops ch7017_ops = { + .mode_set = ch7017_mode_set, + .dpms = ch7017_dpms, + .dump_regs = ch7017_dump_regs, +- .save = ch7017_save, +- .restore = ch7017_restore, + .destroy = ch7017_destroy, + }; +diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c +--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c ++++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c +@@ -92,21 +92,10 @@ static struct ch7xxx_id_struct { + { CH7301_VID, "CH7301" }, + }; + +-struct ch7xxx_reg_state { +- uint8_t regs[CH7xxx_NUM_REGS]; +-}; +- + struct ch7xxx_priv { + bool quiet; +- +- struct ch7xxx_reg_state save_reg; +- struct ch7xxx_reg_state mode_reg; +- uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT; +- uint8_t save_TLPF, save_TCT, save_PM, save_IDF; + }; + +-static void ch7xxx_save(struct intel_dvo_device *dvo); +- + static char *ch7xxx_get_id(uint8_t vid) + { + int i; +@@ -312,42 +301,17 @@ static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode) + + static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) + { +- struct ch7xxx_priv *ch7xxx = dvo->dev_priv; + int i; + + for (i = 0; i < CH7xxx_NUM_REGS; i++) { ++ uint8_t val; + if ((i % 8) == 0 ) + DRM_LOG_KMS("\n %02X: ", i); +- DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]); ++ ch7xxx_readb(dvo, i, &val); ++ DRM_LOG_KMS("%02X ", val); + } + } + +-static void ch7xxx_save(struct intel_dvo_device *dvo) +-{ +- struct ch7xxx_priv *ch7xxx= dvo->dev_priv; +- +- ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL); +- ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP); +- ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD); +- ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT); +- ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF); +- ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM); +- ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF); +-} +- +-static void ch7xxx_restore(struct intel_dvo_device *dvo) +-{ +- struct ch7xxx_priv *ch7xxx = dvo->dev_priv; +- +- ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL); +- ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP); +- ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD); +- ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT); +- ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF); +- ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF); +- ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM); +-} +- + static void ch7xxx_destroy(struct intel_dvo_device *dvo) + { + struct ch7xxx_priv *ch7xxx = dvo->dev_priv; +@@ -365,7 +329,5 @@ struct intel_dvo_dev_ops ch7xxx_ops = { + .mode_set = ch7xxx_mode_set, + .dpms = ch7xxx_dpms, + .dump_regs = ch7xxx_dump_regs, +- .save = ch7xxx_save, +- .restore = ch7xxx_restore, + .destroy = ch7xxx_destroy, + }; +diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c +--- a/drivers/gpu/drm/i915/dvo_ivch.c ++++ b/drivers/gpu/drm/i915/dvo_ivch.c +@@ -153,9 +153,6 @@ struct ivch_priv { + bool quiet; + + uint16_t width, height; +- +- uint16_t save_VR01; +- uint16_t save_VR40; + }; + + +@@ -405,22 +402,6 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo) + DRM_LOG_KMS("VR8F: 0x%04x\n", val); + } + +-static void ivch_save(struct intel_dvo_device *dvo) +-{ +- struct ivch_priv *priv = dvo->dev_priv; +- +- ivch_read(dvo, VR01, &priv->save_VR01); +- ivch_read(dvo, VR40, &priv->save_VR40); +-} +- +-static void ivch_restore(struct intel_dvo_device *dvo) +-{ +- struct ivch_priv *priv = dvo->dev_priv; +- +- ivch_write(dvo, VR01, priv->save_VR01); +- ivch_write(dvo, VR40, priv->save_VR40); +-} +- + static void ivch_destroy(struct intel_dvo_device *dvo) + { + struct ivch_priv *priv = dvo->dev_priv; +@@ -434,8 +415,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo) + struct intel_dvo_dev_ops ivch_ops= { + .init = ivch_init, + .dpms = ivch_dpms, +- .save = ivch_save, +- .restore = ivch_restore, + .mode_valid = ivch_mode_valid, + .mode_set = ivch_mode_set, + .detect = ivch_detect, +diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c +--- a/drivers/gpu/drm/i915/dvo_sil164.c ++++ b/drivers/gpu/drm/i915/dvo_sil164.c +@@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + #define SIL164_REGC 0x0c + +-struct sil164_save_rec { +- uint8_t reg8; +- uint8_t reg9; +- uint8_t regc; +-}; +- + struct sil164_priv { + //I2CDevRec d; + bool quiet; +- struct sil164_save_rec save_regs; +- struct sil164_save_rec mode_regs; + }; + + #define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) +@@ -252,34 +244,6 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo) + DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val); + } + +-static void sil164_save(struct intel_dvo_device *dvo) +-{ +- struct sil164_priv *sil= dvo->dev_priv; +- +- if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8)) +- return; +- +- if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9)) +- return; +- +- if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc)) +- return; +- +- return; +-} +- +-static void sil164_restore(struct intel_dvo_device *dvo) +-{ +- struct sil164_priv *sil = dvo->dev_priv; +- +- /* Restore it powered down initially */ +- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1); +- +- sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9); +- sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc); +- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8); +-} +- + static void sil164_destroy(struct intel_dvo_device *dvo) + { + struct sil164_priv *sil = dvo->dev_priv; +@@ -297,7 +261,5 @@ struct intel_dvo_dev_ops sil164_ops = { + .mode_set = sil164_mode_set, + .dpms = sil164_dpms, + .dump_regs = sil164_dump_regs, +- .save = sil164_save, +- .restore = sil164_restore, + .destroy = sil164_destroy, + }; +diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c +--- a/drivers/gpu/drm/i915/dvo_tfp410.c ++++ b/drivers/gpu/drm/i915/dvo_tfp410.c +@@ -86,16 +86,8 @@ + #define TFP410_V_RES_LO 0x3C + #define TFP410_V_RES_HI 0x3D + +-struct tfp410_save_rec { +- uint8_t ctl1; +- uint8_t ctl2; +-}; +- + struct tfp410_priv { + bool quiet; +- +- struct tfp410_save_rec saved_reg; +- struct tfp410_save_rec mode_reg; + }; + + static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) +@@ -293,28 +285,6 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo) + DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); + } + +-static void tfp410_save(struct intel_dvo_device *dvo) +-{ +- struct tfp410_priv *tfp = dvo->dev_priv; +- +- if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1)) +- return; +- +- if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2)) +- return; +-} +- +-static void tfp410_restore(struct intel_dvo_device *dvo) +-{ +- struct tfp410_priv *tfp = dvo->dev_priv; +- +- /* Restore it powered down initially */ +- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1); +- +- tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2); +- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1); +-} +- + static void tfp410_destroy(struct intel_dvo_device *dvo) + { + struct tfp410_priv *tfp = dvo->dev_priv; +@@ -332,7 +302,5 @@ struct intel_dvo_dev_ops tfp410_ops = { + .mode_set = tfp410_mode_set, + .dpms = tfp410_dpms, + .dump_regs = tfp410_dump_regs, +- .save = tfp410_save, +- .restore = tfp410_restore, + .destroy = tfp410_destroy, + }; +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c +--- a/drivers/gpu/drm/i915/i915_debugfs.c ++++ b/drivers/gpu/drm/i915/i915_debugfs.c +@@ -162,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + +- if (!IS_IRONLAKE(dev)) { ++ if (!HAS_PCH_SPLIT(dev)) { + seq_printf(m, "Interrupt enable: %08x\n", + I915_READ(IER)); + seq_printf(m, "Interrupt identity: %08x\n", +@@ -225,7 +225,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) + } else { + struct drm_i915_gem_object *obj_priv; + +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + seq_printf(m, "Fenced object[%2d] = %p: %s " + "%08x %08zx %08x %s %08x %08x %d", + i, obj, get_pin_flag(obj_priv), +@@ -350,6 +350,36 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) + return 0; + } + ++static const char *pin_flag(int pinned) ++{ ++ if (pinned > 0) ++ return " P"; ++ else if (pinned < 0) ++ return " p"; ++ else ++ return ""; ++} ++ ++static const char *tiling_flag(int tiling) ++{ ++ switch (tiling) { ++ default: ++ case I915_TILING_NONE: return ""; ++ case I915_TILING_X: return " X"; ++ case I915_TILING_Y: return " Y"; ++ } ++} ++ ++static const char *dirty_flag(int dirty) ++{ ++ return dirty ? " dirty" : ""; ++} ++ ++static const char *purgeable_flag(int purgeable) ++{ ++ return purgeable ? " purgeable" : ""; ++} ++ + static int i915_error_state(struct seq_file *m, void *unused) + { + struct drm_info_node *node = (struct drm_info_node *) m->private; +@@ -357,6 +387,7 @@ static int i915_error_state(struct seq_file *m, void *unused) + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_error_state *error; + unsigned long flags; ++ int i, page, offset, elt; + + spin_lock_irqsave(&dev_priv->error_lock, flags); + if (!dev_priv->first_error) { +@@ -368,6 +399,7 @@ static int i915_error_state(struct seq_file *m, void *unused) + + seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, + error->time.tv_usec); ++ seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); + seq_printf(m, "EIR: 0x%08x\n", error->eir); + seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); + seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); +@@ -379,6 +411,59 @@ static int i915_error_state(struct seq_file *m, void *unused) + seq_printf(m, " INSTPS: 0x%08x\n", error->instps); + seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); + } ++ seq_printf(m, "seqno: 0x%08x\n", error->seqno); ++ ++ if (error->active_bo_count) { ++ seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); ++ ++ for (i = 0; i < error->active_bo_count; i++) { ++ seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s", ++ error->active_bo[i].gtt_offset, ++ error->active_bo[i].size, ++ error->active_bo[i].read_domains, ++ error->active_bo[i].write_domain, ++ error->active_bo[i].seqno, ++ pin_flag(error->active_bo[i].pinned), ++ tiling_flag(error->active_bo[i].tiling), ++ dirty_flag(error->active_bo[i].dirty), ++ purgeable_flag(error->active_bo[i].purgeable)); ++ ++ if (error->active_bo[i].name) ++ seq_printf(m, " (name: %d)", error->active_bo[i].name); ++ if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE) ++ seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg); ++ ++ seq_printf(m, "\n"); ++ } ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { ++ if (error->batchbuffer[i]) { ++ struct drm_i915_error_object *obj = error->batchbuffer[i]; ++ ++ seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); ++ offset = 0; ++ for (page = 0; page < obj->page_count; page++) { ++ for (elt = 0; elt < PAGE_SIZE/4; elt++) { ++ seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); ++ offset += 4; ++ } ++ } ++ } ++ } ++ ++ if (error->ringbuffer) { ++ struct drm_i915_error_object *obj = error->ringbuffer; ++ ++ seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset); ++ offset = 0; ++ for (page = 0; page < obj->page_count; page++) { ++ for (elt = 0; elt < PAGE_SIZE/4; elt++) { ++ seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); ++ offset += 4; ++ } ++ } ++ } + + out: + spin_unlock_irqrestore(&dev_priv->error_lock, flags); +@@ -386,6 +471,165 @@ out: + return 0; + } + ++static int i915_rstdby_delays(struct seq_file *m, void *unused) ++{ ++ struct drm_info_node *node = (struct drm_info_node *) m->private; ++ struct drm_device *dev = node->minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ u16 crstanddelay = I915_READ16(CRSTANDVID); ++ ++ seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); ++ ++ return 0; ++} ++ ++static int i915_cur_delayinfo(struct seq_file *m, void *unused) ++{ ++ struct drm_info_node *node = (struct drm_info_node *) m->private; ++ struct drm_device *dev = node->minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ u16 rgvswctl = I915_READ16(MEMSWCTL); ++ ++ seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3); ++ seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1); ++ seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf, ++ rgvswctl & 0x3f); ++ ++ return 0; ++} ++ ++static int i915_delayfreq_table(struct seq_file *m, void *unused) ++{ ++ struct drm_info_node *node = (struct drm_info_node *) m->private; ++ struct drm_device *dev = node->minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ u32 delayfreq; ++ int i; ++ ++ for (i = 0; i < 16; i++) { ++ delayfreq = I915_READ(PXVFREQ_BASE + i * 4); ++ seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq); ++ } ++ ++ return 0; ++} ++ ++static inline int MAP_TO_MV(int map) ++{ ++ return 1250 - (map * 25); ++} ++ ++static int i915_inttoext_table(struct seq_file *m, void *unused) ++{ ++ struct drm_info_node *node = (struct drm_info_node *) m->private; ++ struct drm_device *dev = node->minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ u32 inttoext; ++ int i; ++ ++ for (i = 1; i <= 32; i++) { ++ inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); ++ seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); ++ } ++ ++ return 0; ++} ++ ++static int i915_drpc_info(struct seq_file *m, void *unused) ++{ ++ struct drm_info_node *node = (struct drm_info_node *) m->private; ++ struct drm_device *dev = node->minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ u32 rgvmodectl = I915_READ(MEMMODECTL); ++ ++ seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? ++ "yes" : "no"); ++ seq_printf(m, "Boost freq: %d\n", ++ (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> ++ MEMMODE_BOOST_FREQ_SHIFT); ++ seq_printf(m, "HW control enabled: %s\n", ++ rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); ++ seq_printf(m, "SW control enabled: %s\n", ++ rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); ++ seq_printf(m, "Gated voltage change: %s\n", ++ rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); ++ seq_printf(m, "Starting frequency: P%d\n", ++ (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); ++ seq_printf(m, "Max frequency: P%d\n", ++ (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); ++ seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); ++ ++ return 0; ++} ++ ++static int i915_fbc_status(struct seq_file *m, void *unused) ++{ ++ struct drm_info_node *node = (struct drm_info_node *) m->private; ++ struct drm_device *dev = node->minor->dev; ++ struct drm_crtc *crtc; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ bool fbc_enabled = false; ++ ++ if (!dev_priv->display.fbc_enabled) { ++ seq_printf(m, "FBC unsupported on this chipset\n"); ++ return 0; ++ } ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ if (!crtc->enabled) ++ continue; ++ if (dev_priv->display.fbc_enabled(crtc)) ++ fbc_enabled = true; ++ } ++ ++ if (fbc_enabled) { ++ seq_printf(m, "FBC enabled\n"); ++ } else { ++ seq_printf(m, "FBC disabled: "); ++ switch (dev_priv->no_fbc_reason) { ++ case FBC_STOLEN_TOO_SMALL: ++ seq_printf(m, "not enough stolen memory"); ++ break; ++ case FBC_UNSUPPORTED_MODE: ++ seq_printf(m, "mode not supported"); ++ break; ++ case FBC_MODE_TOO_LARGE: ++ seq_printf(m, "mode too large"); ++ break; ++ case FBC_BAD_PLANE: ++ seq_printf(m, "FBC unsupported on plane"); ++ break; ++ case FBC_NOT_TILED: ++ seq_printf(m, "scanout buffer not tiled"); ++ break; ++ default: ++ seq_printf(m, "unknown reason"); ++ } ++ seq_printf(m, "\n"); ++ } ++ return 0; ++} ++ ++static int i915_sr_status(struct seq_file *m, void *unused) ++{ ++ struct drm_info_node *node = (struct drm_info_node *) m->private; ++ struct drm_device *dev = node->minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ bool sr_enabled = false; ++ ++ if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev)) ++ sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; ++ else if (IS_I915GM(dev)) ++ sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; ++ else if (IS_PINEVIEW(dev)) ++ sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; ++ ++ seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" : ++ "disabled"); ++ ++ return 0; ++} ++ + static int + i915_wedged_open(struct inode *inode, + struct file *filp) +@@ -503,6 +747,13 @@ static struct drm_info_list i915_debugfs_list[] = { + {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, + {"i915_batchbuffers", i915_batchbuffer_info, 0}, + {"i915_error_state", i915_error_state, 0}, ++ {"i915_rstdby_delays", i915_rstdby_delays, 0}, ++ {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, ++ {"i915_delayfreq_table", i915_delayfreq_table, 0}, ++ {"i915_inttoext_table", i915_inttoext_table, 0}, ++ {"i915_drpc_info", i915_drpc_info, 0}, ++ {"i915_fbc_status", i915_fbc_status, 0}, ++ {"i915_sr_status", i915_sr_status, 0}, + }; + #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) + +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c +--- a/drivers/gpu/drm/i915/i915_dma.c ++++ b/drivers/gpu/drm/i915/i915_dma.c +@@ -35,6 +35,8 @@ + #include "i915_drv.h" + #include "i915_trace.h" + #include ++#include ++#include + + /* Really want an OS-independent resettable timer. Would like to have + * this loop run for (eg) 3 sec, but have the timer reset every time +@@ -933,6 +935,120 @@ static int i915_get_bridge_dev(struct drm_device *dev) + return 0; + } + ++#define MCHBAR_I915 0x44 ++#define MCHBAR_I965 0x48 ++#define MCHBAR_SIZE (4*4096) ++ ++#define DEVEN_REG 0x54 ++#define DEVEN_MCHBAR_EN (1 << 28) ++ ++/* Allocate space for the MCH regs if needed, return nonzero on error */ ++static int ++intel_alloc_mchbar_resource(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; ++ u32 temp_lo, temp_hi = 0; ++ u64 mchbar_addr; ++ int ret = 0; ++ ++ if (IS_I965G(dev)) ++ pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); ++ pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); ++ mchbar_addr = ((u64)temp_hi << 32) | temp_lo; ++ ++ /* If ACPI doesn't have it, assume we need to allocate it ourselves */ ++#ifdef CONFIG_PNP ++ if (mchbar_addr && ++ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { ++ ret = 0; ++ goto out; ++ } ++#endif ++ ++ /* Get some space for it */ ++ ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, ++ MCHBAR_SIZE, MCHBAR_SIZE, ++ PCIBIOS_MIN_MEM, ++ 0, pcibios_align_resource, ++ dev_priv->bridge_dev); ++ if (ret) { ++ DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); ++ dev_priv->mch_res.start = 0; ++ goto out; ++ } ++ ++ if (IS_I965G(dev)) ++ pci_write_config_dword(dev_priv->bridge_dev, reg + 4, ++ upper_32_bits(dev_priv->mch_res.start)); ++ ++ pci_write_config_dword(dev_priv->bridge_dev, reg, ++ lower_32_bits(dev_priv->mch_res.start)); ++out: ++ return ret; ++} ++ ++/* Setup MCHBAR if possible, return true if we should disable it again */ ++static void ++intel_setup_mchbar(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; ++ u32 temp; ++ bool enabled; ++ ++ dev_priv->mchbar_need_disable = false; ++ ++ if (IS_I915G(dev) || IS_I915GM(dev)) { ++ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); ++ enabled = !!(temp & DEVEN_MCHBAR_EN); ++ } else { ++ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); ++ enabled = temp & 1; ++ } ++ ++ /* If it's already enabled, don't have to do anything */ ++ if (enabled) ++ return; ++ ++ if (intel_alloc_mchbar_resource(dev)) ++ return; ++ ++ dev_priv->mchbar_need_disable = true; ++ ++ /* Space is allocated or reserved, so enable it. */ ++ if (IS_I915G(dev) || IS_I915GM(dev)) { ++ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, ++ temp | DEVEN_MCHBAR_EN); ++ } else { ++ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); ++ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); ++ } ++} ++ ++static void ++intel_teardown_mchbar(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; ++ u32 temp; ++ ++ if (dev_priv->mchbar_need_disable) { ++ if (IS_I915G(dev) || IS_I915GM(dev)) { ++ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); ++ temp &= ~DEVEN_MCHBAR_EN; ++ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); ++ } else { ++ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); ++ temp &= ~1; ++ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); ++ } ++ } ++ ++ if (dev_priv->mch_res.start) ++ release_resource(&dev_priv->mch_res); ++} ++ + /** + * i915_probe_agp - get AGP bootup configuration + * @pdev: PCI device +@@ -978,59 +1094,123 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, + * Some of the preallocated space is taken by the GTT + * and popup. GTT is 1K per MB of aperture size, and popup is 4K. + */ +- if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev)) ++ if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) + overhead = 4096; + else + overhead = (*aperture_size / 1024) + 4096; + +- switch (tmp & INTEL_GMCH_GMS_MASK) { +- case INTEL_855_GMCH_GMS_DISABLED: +- DRM_ERROR("video memory is disabled\n"); +- return -1; +- case INTEL_855_GMCH_GMS_STOLEN_1M: +- stolen = 1 * 1024 * 1024; +- break; +- case INTEL_855_GMCH_GMS_STOLEN_4M: +- stolen = 4 * 1024 * 1024; +- break; +- case INTEL_855_GMCH_GMS_STOLEN_8M: +- stolen = 8 * 1024 * 1024; +- break; +- case INTEL_855_GMCH_GMS_STOLEN_16M: +- stolen = 16 * 1024 * 1024; +- break; +- case INTEL_855_GMCH_GMS_STOLEN_32M: +- stolen = 32 * 1024 * 1024; +- break; +- case INTEL_915G_GMCH_GMS_STOLEN_48M: +- stolen = 48 * 1024 * 1024; +- break; +- case INTEL_915G_GMCH_GMS_STOLEN_64M: +- stolen = 64 * 1024 * 1024; +- break; +- case INTEL_GMCH_GMS_STOLEN_128M: +- stolen = 128 * 1024 * 1024; +- break; +- case INTEL_GMCH_GMS_STOLEN_256M: +- stolen = 256 * 1024 * 1024; +- break; +- case INTEL_GMCH_GMS_STOLEN_96M: +- stolen = 96 * 1024 * 1024; +- break; +- case INTEL_GMCH_GMS_STOLEN_160M: +- stolen = 160 * 1024 * 1024; +- break; +- case INTEL_GMCH_GMS_STOLEN_224M: +- stolen = 224 * 1024 * 1024; +- break; +- case INTEL_GMCH_GMS_STOLEN_352M: +- stolen = 352 * 1024 * 1024; +- break; +- default: +- DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", +- tmp & INTEL_GMCH_GMS_MASK); +- return -1; ++ if (IS_GEN6(dev)) { ++ /* SNB has memory control reg at 0x50.w */ ++ pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp); ++ ++ switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) { ++ case INTEL_855_GMCH_GMS_DISABLED: ++ DRM_ERROR("video memory is disabled\n"); ++ return -1; ++ case SNB_GMCH_GMS_STOLEN_32M: ++ stolen = 32 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_64M: ++ stolen = 64 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_96M: ++ stolen = 96 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_128M: ++ stolen = 128 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_160M: ++ stolen = 160 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_192M: ++ stolen = 192 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_224M: ++ stolen = 224 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_256M: ++ stolen = 256 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_288M: ++ stolen = 288 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_320M: ++ stolen = 320 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_352M: ++ stolen = 352 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_384M: ++ stolen = 384 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_416M: ++ stolen = 416 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_448M: ++ stolen = 448 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_480M: ++ stolen = 480 * 1024 * 1024; ++ break; ++ case SNB_GMCH_GMS_STOLEN_512M: ++ stolen = 512 * 1024 * 1024; ++ break; ++ default: ++ DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", ++ tmp & SNB_GMCH_GMS_STOLEN_MASK); ++ return -1; ++ } ++ } else { ++ switch (tmp & INTEL_GMCH_GMS_MASK) { ++ case INTEL_855_GMCH_GMS_DISABLED: ++ DRM_ERROR("video memory is disabled\n"); ++ return -1; ++ case INTEL_855_GMCH_GMS_STOLEN_1M: ++ stolen = 1 * 1024 * 1024; ++ break; ++ case INTEL_855_GMCH_GMS_STOLEN_4M: ++ stolen = 4 * 1024 * 1024; ++ break; ++ case INTEL_855_GMCH_GMS_STOLEN_8M: ++ stolen = 8 * 1024 * 1024; ++ break; ++ case INTEL_855_GMCH_GMS_STOLEN_16M: ++ stolen = 16 * 1024 * 1024; ++ break; ++ case INTEL_855_GMCH_GMS_STOLEN_32M: ++ stolen = 32 * 1024 * 1024; ++ break; ++ case INTEL_915G_GMCH_GMS_STOLEN_48M: ++ stolen = 48 * 1024 * 1024; ++ break; ++ case INTEL_915G_GMCH_GMS_STOLEN_64M: ++ stolen = 64 * 1024 * 1024; ++ break; ++ case INTEL_GMCH_GMS_STOLEN_128M: ++ stolen = 128 * 1024 * 1024; ++ break; ++ case INTEL_GMCH_GMS_STOLEN_256M: ++ stolen = 256 * 1024 * 1024; ++ break; ++ case INTEL_GMCH_GMS_STOLEN_96M: ++ stolen = 96 * 1024 * 1024; ++ break; ++ case INTEL_GMCH_GMS_STOLEN_160M: ++ stolen = 160 * 1024 * 1024; ++ break; ++ case INTEL_GMCH_GMS_STOLEN_224M: ++ stolen = 224 * 1024 * 1024; ++ break; ++ case INTEL_GMCH_GMS_STOLEN_352M: ++ stolen = 352 * 1024 * 1024; ++ break; ++ default: ++ DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", ++ tmp & INTEL_GMCH_GMS_MASK); ++ return -1; ++ } + } ++ + *preallocated_size = stolen - overhead; + *start = overhead; + +@@ -1064,7 +1244,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev, + int gtt_offset, gtt_size; + + if (IS_I965G(dev)) { +- if (IS_G4X(dev) || IS_IRONLAKE(dev)) { ++ if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { + gtt_offset = 2*1024*1024; + gtt_size = 2*1024*1024; + } else { +@@ -1133,6 +1313,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) + /* Leave 1M for line length buffer & misc. */ + compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); + if (!compressed_fb) { ++ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; + i915_warn_stolen(dev); + return; + } +@@ -1140,6 +1321,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) + compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); + if (!compressed_fb) { + i915_warn_stolen(dev); ++ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; + return; + } + +@@ -1281,7 +1463,9 @@ static int i915_load_modeset_init(struct drm_device *dev, + return 0; + + destroy_ringbuffer: ++ mutex_lock(&dev->struct_mutex); + i915_gem_cleanup_ringbuffer(dev); ++ mutex_unlock(&dev->struct_mutex); + out: + return ret; + } +@@ -1361,7 +1545,7 @@ static void i915_get_mem_freq(struct drm_device *dev) + */ + int i915_driver_load(struct drm_device *dev, unsigned long flags) + { +- struct drm_i915_private *dev_priv = dev->dev_private; ++ struct drm_i915_private *dev_priv; + resource_size_t base, size; + int ret = 0, mmio_bar; + uint32_t agp_size, prealloc_size, prealloc_start; +@@ -1445,11 +1629,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) + + dev->driver->get_vblank_counter = i915_get_vblank_counter; + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ +- if (IS_G4X(dev) || IS_IRONLAKE(dev)) { ++ if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { + dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ + dev->driver->get_vblank_counter = gm45_get_vblank_counter; + } + ++ /* Try to make sure MCHBAR is enabled before poking at it */ ++ intel_setup_mchbar(dev); ++ + i915_gem_load(dev); + + /* Init HWS */ +@@ -1490,6 +1677,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) + /* Start out suspended */ + dev_priv->mm.suspended = 1; + ++ intel_detect_pch(dev); ++ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = i915_load_modeset_init(dev, prealloc_start, + prealloc_size, agp_size); +@@ -1523,6 +1712,8 @@ int i915_driver_unload(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; + ++ i915_destroy_error_state(dev); ++ + destroy_workqueue(dev_priv->wq); + del_timer_sync(&dev_priv->hangcheck_timer); + +@@ -1569,6 +1760,8 @@ int i915_driver_unload(struct drm_device *dev) + intel_cleanup_overlay(dev); + } + ++ intel_teardown_mchbar(dev); ++ + pci_dev_put(dev_priv->bridge_dev); + kfree(dev->dev_private); + +@@ -1655,29 +1848,29 @@ struct drm_ioctl_desc i915_ioctls[] = { + DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), + DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), +- DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), +- DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), +- DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH), +- DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), +- DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), +- DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), +- DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), +- DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), +- DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), +- DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), +- DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), +- DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), +- DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), +- DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0), +- DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), +- DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), +- DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), +- DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), +- DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), +- DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), +- DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), +- DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), +- DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), ++ DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), ++ DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + }; + + int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c +--- a/drivers/gpu/drm/i915/i915_drv.c ++++ b/drivers/gpu/drm/i915/i915_drv.c +@@ -49,6 +49,7 @@ unsigned int i915_lvds_downclock = 0; + module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); + + static struct drm_driver driver; ++extern int intel_agp_enabled; + + #define INTEL_VGA_DEVICE(id, info) { \ + .class = PCI_CLASS_DISPLAY_VGA << 8, \ +@@ -68,7 +69,8 @@ const static struct intel_device_info intel_845g_info = { + }; + + const static struct intel_device_info intel_i85x_info = { +- .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, ++ .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, ++ .cursor_needs_physical = 1, + }; + + const static struct intel_device_info intel_i865g_info = { +@@ -79,14 +81,14 @@ const static struct intel_device_info intel_i915g_info = { + .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, + }; + const static struct intel_device_info intel_i915gm_info = { +- .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, ++ .is_i9xx = 1, .is_mobile = 1, + .cursor_needs_physical = 1, + }; + const static struct intel_device_info intel_i945g_info = { + .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, + }; + const static struct intel_device_info intel_i945gm_info = { +- .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, ++ .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, + .has_hotplug = 1, .cursor_needs_physical = 1, + }; + +@@ -136,11 +138,21 @@ const static struct intel_device_info intel_ironlake_m_info = { + .has_hotplug = 1, + }; + ++const static struct intel_device_info intel_sandybridge_d_info = { ++ .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, ++ .has_hotplug = 1, .is_gen6 = 1, ++}; ++ ++const static struct intel_device_info intel_sandybridge_m_info = { ++ .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, ++ .has_hotplug = 1, .is_gen6 = 1, ++}; ++ + const static struct pci_device_id pciidlist[] = { + INTEL_VGA_DEVICE(0x3577, &intel_i830_info), + INTEL_VGA_DEVICE(0x2562, &intel_845g_info), + INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), +- INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), ++ INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), + INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), + INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), + INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), +@@ -167,6 +179,8 @@ const static struct pci_device_id pciidlist[] = { + INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), + INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), + INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), ++ INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), ++ INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), + {0, 0, 0} + }; + +@@ -174,6 +188,35 @@ const static struct pci_device_id pciidlist[] = { + MODULE_DEVICE_TABLE(pci, pciidlist); + #endif + ++#define INTEL_PCH_DEVICE_ID_MASK 0xff00 ++#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 ++ ++void intel_detect_pch (struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct pci_dev *pch; ++ ++ /* ++ * The reason to probe ISA bridge instead of Dev31:Fun0 is to ++ * make graphics device passthrough work easy for VMM, that only ++ * need to expose ISA bridge to let driver know the real hardware ++ * underneath. This is a requirement from virtualization team. ++ */ ++ pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); ++ if (pch) { ++ if (pch->vendor == PCI_VENDOR_ID_INTEL) { ++ int id; ++ id = pch->device & INTEL_PCH_DEVICE_ID_MASK; ++ ++ if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { ++ dev_priv->pch_type = PCH_CPT; ++ DRM_DEBUG_KMS("Found CougarPoint PCH\n"); ++ } ++ } ++ pci_dev_put(pch); ++ } ++} ++ + static int i915_drm_freeze(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; +@@ -348,7 +391,7 @@ int i965_reset(struct drm_device *dev, u8 flags) + !dev_priv->mm.suspended) { + drm_i915_ring_buffer_t *ring = &dev_priv->ring; + struct drm_gem_object *obj = ring->ring_obj; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + dev_priv->mm.suspended = 0; + + /* Stop the ring if it's running. */ +@@ -546,6 +589,11 @@ static struct drm_driver driver = { + + static int __init i915_init(void) + { ++ if (!intel_agp_enabled) { ++ DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); ++ return -ENODEV; ++ } ++ + driver.num_ioctls = i915_max_ioctl; + + i915_gem_shrinker_init(); +@@ -571,6 +619,11 @@ static int __init i915_init(void) + driver.driver_features &= ~DRIVER_MODESET; + #endif + ++ if (!(driver.driver_features & DRIVER_MODESET)) { ++ driver.suspend = i915_suspend; ++ driver.resume = i915_resume; ++ } ++ + return drm_init(&driver); + } + +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -150,7 +150,27 @@ struct drm_i915_error_state { + u32 instps; + u32 instdone1; + u32 seqno; ++ u64 bbaddr; + struct timeval time; ++ struct drm_i915_error_object { ++ int page_count; ++ u32 gtt_offset; ++ u32 *pages[0]; ++ } *ringbuffer, *batchbuffer[2]; ++ struct drm_i915_error_buffer { ++ size_t size; ++ u32 name; ++ u32 seqno; ++ u32 gtt_offset; ++ u32 read_domains; ++ u32 write_domain; ++ u32 fence_reg; ++ s32 pinned:2; ++ u32 tiling:2; ++ u32 dirty:1; ++ u32 purgeable:1; ++ } *active_bo; ++ u32 active_bo_count; + }; + + struct drm_i915_display_funcs { +@@ -175,6 +195,7 @@ struct intel_overlay; + struct intel_device_info { + u8 is_mobile : 1; + u8 is_i8xx : 1; ++ u8 is_i85x : 1; + u8 is_i915g : 1; + u8 is_i9xx : 1; + u8 is_i945gm : 1; +@@ -185,6 +206,7 @@ struct intel_device_info { + u8 is_g4x : 1; + u8 is_pineview : 1; + u8 is_ironlake : 1; ++ u8 is_gen6 : 1; + u8 has_fbc : 1; + u8 has_rc6 : 1; + u8 has_pipe_cxsr : 1; +@@ -192,6 +214,19 @@ struct intel_device_info { + u8 cursor_needs_physical : 1; + }; + ++enum no_fbc_reason { ++ FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ ++ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ ++ FBC_MODE_TOO_LARGE, /* mode too large for compression */ ++ FBC_BAD_PLANE, /* fbc not supported on plane */ ++ FBC_NOT_TILED, /* buffer not tiled */ ++}; ++ ++enum intel_pch { ++ PCH_IBX, /* Ibexpeak PCH */ ++ PCH_CPT, /* Cougarpoint PCH */ ++}; ++ + typedef struct drm_i915_private { + struct drm_device *dev; + +@@ -302,6 +337,9 @@ typedef struct drm_i915_private { + /* Display functions */ + struct drm_i915_display_funcs display; + ++ /* PCH chipset type */ ++ enum intel_pch pch_type; ++ + /* Register state */ + bool modeset_on_lid; + u8 saveLBB; +@@ -452,6 +490,7 @@ typedef struct drm_i915_private { + u32 savePIPEB_DATA_N1; + u32 savePIPEB_LINK_M1; + u32 savePIPEB_LINK_N1; ++ u32 saveMCHBAR_RENDER_STANDBY; + + struct { + struct drm_mm gtt_space; +@@ -581,6 +620,8 @@ typedef struct drm_i915_private { + /* Reclocking support */ + bool render_reclock_avail; + bool lvds_downclock_avail; ++ /* indicate whether the LVDS EDID is OK */ ++ bool lvds_edid_good; + /* indicates the reduced downclock for LVDS*/ + int lvds_downclock; + struct work_struct idle_work; +@@ -590,6 +631,14 @@ typedef struct drm_i915_private { + int child_dev_num; + struct child_device_config *child_dev; + struct drm_connector *int_lvds_connector; ++ ++ bool mchbar_need_disable; ++ ++ u8 cur_delay; ++ u8 min_delay; ++ u8 max_delay; ++ ++ enum no_fbc_reason no_fbc_reason; + } drm_i915_private_t; + + /** driver private structure attached to each drm_gem_object */ +@@ -693,6 +742,8 @@ struct drm_i915_gem_object { + atomic_t pending_flip; + }; + ++#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private) ++ + /** + * Request queue structure. + * +@@ -761,6 +812,7 @@ extern int i965_reset(struct drm_device *dev, u8 flags); + + /* i915_irq.c */ + void i915_hangcheck_elapsed(unsigned long data); ++void i915_destroy_error_state(struct drm_device *dev); + extern int i915_irq_emit(struct drm_device *dev, void *data, + struct drm_file *file_priv); + extern int i915_irq_wait(struct drm_device *dev, void *data, +@@ -897,7 +949,8 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); + void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); + bool i915_tiling_ok(struct drm_device *dev, int stride, int size, + int tiling_mode); +-bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj); ++bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, ++ int tiling_mode); + + /* i915_gem_debug.c */ + void i915_gem_dump_object(struct drm_gem_object *obj, int len, +@@ -946,6 +999,9 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); + extern void i8xx_disable_fbc(struct drm_device *dev); + extern void g4x_disable_fbc(struct drm_device *dev); + ++extern void intel_detect_pch (struct drm_device *dev); ++extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); ++ + /** + * Lock test for when it's just for synchronization of ring access. + * +@@ -1024,9 +1080,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); + + #define IS_I830(dev) ((dev)->pci_device == 0x3577) + #define IS_845G(dev) ((dev)->pci_device == 0x2562) +-#define IS_I85X(dev) ((dev)->pci_device == 0x3582) ++#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) + #define IS_I865G(dev) ((dev)->pci_device == 0x2572) +-#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) ++#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) + #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) + #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) + #define IS_I945G(dev) ((dev)->pci_device == 0x2772) +@@ -1043,8 +1099,28 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); + #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) + #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) + #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) ++#define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6) + #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) + ++#define IS_GEN3(dev) (IS_I915G(dev) || \ ++ IS_I915GM(dev) || \ ++ IS_I945G(dev) || \ ++ IS_I945GM(dev) || \ ++ IS_G33(dev) || \ ++ IS_PINEVIEW(dev)) ++#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \ ++ (dev)->pci_device == 0x2982 || \ ++ (dev)->pci_device == 0x2992 || \ ++ (dev)->pci_device == 0x29A2 || \ ++ (dev)->pci_device == 0x2A02 || \ ++ (dev)->pci_device == 0x2A12 || \ ++ (dev)->pci_device == 0x2E02 || \ ++ (dev)->pci_device == 0x2E12 || \ ++ (dev)->pci_device == 0x2E22 || \ ++ (dev)->pci_device == 0x2E32 || \ ++ (dev)->pci_device == 0x2A42 || \ ++ (dev)->pci_device == 0x2E42) ++ + #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) + + /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte +@@ -1057,7 +1133,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); + #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) + #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) + #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ +- !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) ++ !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \ ++ !IS_GEN6(dev)) + #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) + /* dsparb controlled by hw only */ + #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) +@@ -1067,6 +1144,12 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); + #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) + #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) + ++#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ ++ IS_GEN6(dev)) ++ ++#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) ++#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) ++ + #define PRIMARY_RINGBUFFER_SIZE (128*1024) + + #endif +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -162,7 +162,7 @@ fast_shmem_read(struct page **pages, + static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) + { + drm_i915_private_t *dev_priv = obj->dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && + obj_priv->tiling_mode != I915_TILING_NONE; +@@ -263,7 +263,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pread *args, + struct drm_file *file_priv) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + ssize_t remain; + loff_t offset, page_base; + char __user *user_data; +@@ -284,7 +284,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, + if (ret != 0) + goto fail_put_pages; + +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + offset = args->offset; + + while (remain > 0) { +@@ -353,7 +353,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pread *args, + struct drm_file *file_priv) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct mm_struct *mm = current->mm; + struct page **user_pages; + ssize_t remain; +@@ -402,7 +402,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, + if (ret != 0) + goto fail_put_pages; + +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + offset = args->offset; + + while (remain > 0) { +@@ -478,7 +478,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EBADF; +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + + /* Bounds check source. + * +@@ -580,7 +580,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + drm_i915_private_t *dev_priv = dev->dev_private; + ssize_t remain; + loff_t offset, page_base; +@@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, + if (ret) + goto fail; + +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + offset = obj_priv->gtt_offset + args->offset; + + while (remain > 0) { +@@ -654,7 +654,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + drm_i915_private_t *dev_priv = dev->dev_private; + ssize_t remain; + loff_t gtt_page_base, offset; +@@ -698,7 +698,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, + if (ret) + goto out_unpin_object; + +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + offset = obj_priv->gtt_offset + args->offset; + + while (remain > 0) { +@@ -760,7 +760,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + ssize_t remain; + loff_t offset, page_base; + char __user *user_data; +@@ -780,7 +780,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, + if (ret != 0) + goto fail_put_pages; + +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + offset = args->offset; + obj_priv->dirty = 1; + +@@ -828,7 +828,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct mm_struct *mm = current->mm; + struct page **user_pages; + ssize_t remain; +@@ -876,7 +876,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, + if (ret != 0) + goto fail_put_pages; + +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + offset = args->offset; + obj_priv->dirty = 1; + +@@ -951,7 +951,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EBADF; +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + + /* Bounds check destination. + * +@@ -1033,7 +1033,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EBADF; +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + + mutex_lock(&dev->struct_mutex); + +@@ -1095,7 +1095,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, + DRM_INFO("%s: sw_finish %d (%p %zd)\n", + __func__, args->handle, obj, obj->size); + #endif +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + + /* Pinned buffers may be scanout, so flush the cache */ + if (obj_priv->pin_count) +@@ -1166,7 +1166,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + pgoff_t page_offset; + unsigned long pfn; + int ret = 0; +@@ -1233,7 +1233,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; + struct drm_gem_mm *mm = dev->mm_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_map_list *list; + struct drm_local_map *map; + int ret = 0; +@@ -1304,7 +1304,7 @@ void + i915_gem_release_mmap(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + if (dev->dev_mapping) + unmap_mapping_range(dev->dev_mapping, +@@ -1315,7 +1315,7 @@ static void + i915_gem_free_mmap_offset(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_gem_mm *mm = dev->mm_private; + struct drm_map_list *list; + +@@ -1346,7 +1346,7 @@ static uint32_t + i915_gem_get_gtt_alignment(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int start, i; + + /* +@@ -1405,7 +1405,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, + + mutex_lock(&dev->struct_mutex); + +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + + if (obj_priv->madv != I915_MADV_WILLNEED) { + DRM_ERROR("Attempting to mmap a purgeable buffer\n"); +@@ -1449,7 +1449,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, + void + i915_gem_object_put_pages(struct drm_gem_object *obj) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int page_count = obj->size / PAGE_SIZE; + int i; + +@@ -1485,7 +1485,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) + { + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + /* Add a reference if we're newly entering the active list. */ + if (!obj_priv->active) { +@@ -1505,7 +1505,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + BUG_ON(!obj_priv->active); + list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); +@@ -1516,7 +1516,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) + static void + i915_gem_object_truncate(struct drm_gem_object *obj) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct inode *inode; + + inode = obj->filp->f_path.dentry->d_inode; +@@ -1537,7 +1537,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + i915_verify_inactive(dev, __FILE__, __LINE__); + if (obj_priv->pin_count != 0) +@@ -1555,6 +1555,38 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) + i915_verify_inactive(dev, __FILE__, __LINE__); + } + ++static void ++i915_gem_process_flushing_list(struct drm_device *dev, ++ uint32_t flush_domains, uint32_t seqno) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv, *next; ++ ++ list_for_each_entry_safe(obj_priv, next, ++ &dev_priv->mm.gpu_write_list, ++ gpu_write_list) { ++ struct drm_gem_object *obj = obj_priv->obj; ++ ++ if ((obj->write_domain & flush_domains) == ++ obj->write_domain) { ++ uint32_t old_write_domain = obj->write_domain; ++ ++ obj->write_domain = 0; ++ list_del_init(&obj_priv->gpu_write_list); ++ i915_gem_object_move_to_active(obj, seqno); ++ ++ /* update the fence lru list */ ++ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) ++ list_move_tail(&obj_priv->fence_list, ++ &dev_priv->mm.fence_list); ++ ++ trace_i915_gem_object_change_domain(obj, ++ obj->read_domains, ++ old_write_domain); ++ } ++ } ++} ++ + /** + * Creates a new sequence number, emitting a write of it to the status page + * plus an interrupt, which will trigger i915_user_interrupt_handler. +@@ -1613,29 +1645,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, + /* Associate any objects on the flushing list matching the write + * domain we're flushing with our flush. + */ +- if (flush_domains != 0) { +- struct drm_i915_gem_object *obj_priv, *next; +- +- list_for_each_entry_safe(obj_priv, next, +- &dev_priv->mm.gpu_write_list, +- gpu_write_list) { +- struct drm_gem_object *obj = obj_priv->obj; +- +- if ((obj->write_domain & flush_domains) == +- obj->write_domain) { +- uint32_t old_write_domain = obj->write_domain; +- +- obj->write_domain = 0; +- list_del_init(&obj_priv->gpu_write_list); +- i915_gem_object_move_to_active(obj, seqno); +- +- trace_i915_gem_object_change_domain(obj, +- obj->read_domains, +- old_write_domain); +- } +- } +- +- } ++ if (flush_domains != 0) ++ i915_gem_process_flushing_list(dev, flush_domains, seqno); + + if (!dev_priv->mm.suspended) { + mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); +@@ -1815,7 +1826,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) + return -EIO; + + if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + ier = I915_READ(DEIER) | I915_READ(GTIER); + else + ier = I915_READ(IER); +@@ -1953,7 +1964,7 @@ static int + i915_gem_object_wait_rendering(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int ret; + + /* This function only exists to support waiting for existing rendering, +@@ -1984,7 +1995,8 @@ int + i915_gem_object_unbind(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int ret = 0; + + #if WATCH_BUF +@@ -2039,8 +2051,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj) + } + + /* Remove ourselves from the LRU list if present. */ ++ spin_lock(&dev_priv->mm.active_list_lock); + if (!list_empty(&obj_priv->list)) + list_del_init(&obj_priv->list); ++ spin_unlock(&dev_priv->mm.active_list_lock); + + if (i915_gem_object_is_purgeable(obj_priv)) + i915_gem_object_truncate(obj); +@@ -2078,11 +2092,34 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size) + } + + static int ++i915_gpu_idle(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ bool lists_empty; ++ uint32_t seqno; ++ ++ spin_lock(&dev_priv->mm.active_list_lock); ++ lists_empty = list_empty(&dev_priv->mm.flushing_list) && ++ list_empty(&dev_priv->mm.active_list); ++ spin_unlock(&dev_priv->mm.active_list_lock); ++ ++ if (lists_empty) ++ return 0; ++ ++ /* Flush everything onto the inactive list. */ ++ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); ++ seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); ++ if (seqno == 0) ++ return -ENOMEM; ++ ++ return i915_wait_request(dev, seqno); ++} ++ ++static int + i915_gem_evict_everything(struct drm_device *dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; +- uint32_t seqno; + bool lists_empty; + + spin_lock(&dev_priv->mm.active_list_lock); +@@ -2095,12 +2132,7 @@ i915_gem_evict_everything(struct drm_device *dev) + return -ENOSPC; + + /* Flush everything (on to the inactive lists) and evict */ +- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); +- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); +- if (seqno == 0) +- return -ENOMEM; +- +- ret = i915_wait_request(dev, seqno); ++ ret = i915_gpu_idle(dev); + if (ret) + return ret; + +@@ -2140,7 +2172,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) + #if WATCH_LRU + DRM_INFO("%s: evicting %p\n", __func__, obj); + #endif +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + BUG_ON(obj_priv->pin_count != 0); + BUG_ON(obj_priv->active); + +@@ -2192,11 +2224,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) + seqno = i915_add_request(dev, NULL, obj->write_domain); + if (seqno == 0) + return -ENOMEM; +- +- ret = i915_wait_request(dev, seqno); +- if (ret) +- return ret; +- + continue; + } + } +@@ -2216,7 +2243,7 @@ int + i915_gem_object_get_pages(struct drm_gem_object *obj, + gfp_t gfpmask) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int page_count, i; + struct address_space *mapping; + struct inode *inode; +@@ -2264,12 +2291,34 @@ err_pages: + return PTR_ERR(page); + } + ++static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) ++{ ++ struct drm_gem_object *obj = reg->obj; ++ struct drm_device *dev = obj->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); ++ int regnum = obj_priv->fence_reg; ++ uint64_t val; ++ ++ val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & ++ 0xfffff000) << 32; ++ val |= obj_priv->gtt_offset & 0xfffff000; ++ val |= (uint64_t)((obj_priv->stride / 128) - 1) << ++ SANDYBRIDGE_FENCE_PITCH_SHIFT; ++ ++ if (obj_priv->tiling_mode == I915_TILING_Y) ++ val |= 1 << I965_FENCE_TILING_Y_SHIFT; ++ val |= I965_FENCE_REG_VALID; ++ ++ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); ++} ++ + static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) + { + struct drm_gem_object *obj = reg->obj; + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int regnum = obj_priv->fence_reg; + uint64_t val; + +@@ -2289,7 +2338,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) + struct drm_gem_object *obj = reg->obj; + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int regnum = obj_priv->fence_reg; + int tile_width; + uint32_t fence_reg, val; +@@ -2312,6 +2361,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) + pitch_val = obj_priv->stride / tile_width; + pitch_val = ffs(pitch_val) - 1; + ++ if (obj_priv->tiling_mode == I915_TILING_Y && ++ HAS_128_BYTE_Y_TILING(dev)) ++ WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); ++ else ++ WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); ++ + val = obj_priv->gtt_offset; + if (obj_priv->tiling_mode == I915_TILING_Y) + val |= 1 << I830_FENCE_TILING_Y_SHIFT; +@@ -2331,7 +2386,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) + struct drm_gem_object *obj = reg->obj; + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int regnum = obj_priv->fence_reg; + uint32_t val; + uint32_t pitch_val; +@@ -2360,6 +2415,58 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) + I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); + } + ++static int i915_find_fence_reg(struct drm_device *dev) ++{ ++ struct drm_i915_fence_reg *reg = NULL; ++ struct drm_i915_gem_object *obj_priv = NULL; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj = NULL; ++ int i, avail, ret; ++ ++ /* First try to find a free reg */ ++ avail = 0; ++ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { ++ reg = &dev_priv->fence_regs[i]; ++ if (!reg->obj) ++ return i; ++ ++ obj_priv = to_intel_bo(reg->obj); ++ if (!obj_priv->pin_count) ++ avail++; ++ } ++ ++ if (avail == 0) ++ return -ENOSPC; ++ ++ /* None available, try to steal one or wait for a user to finish */ ++ i = I915_FENCE_REG_NONE; ++ list_for_each_entry(obj_priv, &dev_priv->mm.fence_list, ++ fence_list) { ++ obj = obj_priv->obj; ++ ++ if (obj_priv->pin_count) ++ continue; ++ ++ /* found one! */ ++ i = obj_priv->fence_reg; ++ break; ++ } ++ ++ BUG_ON(i == I915_FENCE_REG_NONE); ++ ++ /* We only have a reference on obj from the active list. put_fence_reg ++ * might drop that one, causing a use-after-free in it. So hold a ++ * private reference to obj like the other callers of put_fence_reg ++ * (set_tiling ioctl) do. */ ++ drm_gem_object_reference(obj); ++ ret = i915_gem_object_put_fence_reg(obj); ++ drm_gem_object_unreference(obj); ++ if (ret != 0) ++ return ret; ++ ++ return i; ++} ++ + /** + * i915_gem_object_get_fence_reg - set up a fence reg for an object + * @obj: object to map through a fence reg +@@ -2378,10 +2485,9 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_i915_fence_reg *reg = NULL; +- struct drm_i915_gem_object *old_obj_priv = NULL; +- int i, ret, avail; ++ int ret; + + /* Just update our place in the LRU if our fence is getting used. */ + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { +@@ -2409,86 +2515,27 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) + break; + } + +- /* First try to find a free reg */ +- avail = 0; +- for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { +- reg = &dev_priv->fence_regs[i]; +- if (!reg->obj) +- break; +- +- old_obj_priv = reg->obj->driver_private; +- if (!old_obj_priv->pin_count) +- avail++; +- } +- +- /* None available, try to steal one or wait for a user to finish */ +- if (i == dev_priv->num_fence_regs) { +- struct drm_gem_object *old_obj = NULL; +- +- if (avail == 0) +- return -ENOSPC; +- +- list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list, +- fence_list) { +- old_obj = old_obj_priv->obj; +- +- if (old_obj_priv->pin_count) +- continue; +- +- /* Take a reference, as otherwise the wait_rendering +- * below may cause the object to get freed out from +- * under us. +- */ +- drm_gem_object_reference(old_obj); +- +- /* i915 uses fences for GPU access to tiled buffers */ +- if (IS_I965G(dev) || !old_obj_priv->active) +- break; +- +- /* This brings the object to the head of the LRU if it +- * had been written to. The only way this should +- * result in us waiting longer than the expected +- * optimal amount of time is if there was a +- * fence-using buffer later that was read-only. +- */ +- i915_gem_object_flush_gpu_write_domain(old_obj); +- ret = i915_gem_object_wait_rendering(old_obj); +- if (ret != 0) { +- drm_gem_object_unreference(old_obj); +- return ret; +- } +- +- break; +- } +- +- /* +- * Zap this virtual mapping so we can set up a fence again +- * for this object next time we need it. +- */ +- i915_gem_release_mmap(old_obj); +- +- i = old_obj_priv->fence_reg; +- reg = &dev_priv->fence_regs[i]; +- +- old_obj_priv->fence_reg = I915_FENCE_REG_NONE; +- list_del_init(&old_obj_priv->fence_list); +- +- drm_gem_object_unreference(old_obj); +- } ++ ret = i915_find_fence_reg(dev); ++ if (ret < 0) ++ return ret; + +- obj_priv->fence_reg = i; ++ obj_priv->fence_reg = ret; ++ reg = &dev_priv->fence_regs[obj_priv->fence_reg]; + list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); + + reg->obj = obj; + +- if (IS_I965G(dev)) ++ if (IS_GEN6(dev)) ++ sandybridge_write_fence_reg(reg); ++ else if (IS_I965G(dev)) + i965_write_fence_reg(reg); + else if (IS_I9XX(dev)) + i915_write_fence_reg(reg); + else + i830_write_fence_reg(reg); + +- trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode); ++ trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, ++ obj_priv->tiling_mode); + + return 0; + } +@@ -2505,11 +2552,14 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + +- if (IS_I965G(dev)) ++ if (IS_GEN6(dev)) { ++ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + ++ (obj_priv->fence_reg * 8), 0); ++ } else if (IS_I965G(dev)) { + I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); +- else { ++ } else { + uint32_t fence_reg; + + if (obj_priv->fence_reg < 8) +@@ -2538,11 +2588,17 @@ int + i915_gem_object_put_fence_reg(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + if (obj_priv->fence_reg == I915_FENCE_REG_NONE) + return 0; + ++ /* If we've changed tiling, GTT-mappings of the object ++ * need to re-fault to ensure that the correct fence register ++ * setup is in place. ++ */ ++ i915_gem_release_mmap(obj); ++ + /* On the i915, GPU access to tiled buffers is via a fence, + * therefore we must wait for any outstanding access to complete + * before clearing the fence. +@@ -2551,12 +2607,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) + int ret; + + i915_gem_object_flush_gpu_write_domain(obj); +- i915_gem_object_flush_gtt_write_domain(obj); + ret = i915_gem_object_wait_rendering(obj); + if (ret != 0) + return ret; + } + ++ i915_gem_object_flush_gtt_write_domain(obj); + i915_gem_clear_fence_reg (obj); + + return 0; +@@ -2570,7 +2626,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) + { + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_mm_node *free_space; + gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; + int ret; +@@ -2677,7 +2733,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) + void + i915_gem_clflush_object(struct drm_gem_object *obj) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + /* If we don't have a page list set up, then we're not pinned + * to GPU, and we can ignore the cache flush because it'll happen +@@ -2696,7 +2752,6 @@ static void + i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; +- uint32_t seqno; + uint32_t old_write_domain; + + if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) +@@ -2705,9 +2760,8 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) + /* Queue the GPU write cache flushing we need. */ + old_write_domain = obj->write_domain; + i915_gem_flush(dev, 0, obj->write_domain); +- seqno = i915_add_request(dev, NULL, obj->write_domain); ++ (void) i915_add_request(dev, NULL, obj->write_domain); + BUG_ON(obj->write_domain); +- i915_gem_object_move_to_active(obj, seqno); + + trace_i915_gem_object_change_domain(obj, + obj->read_domains, +@@ -2780,7 +2834,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj) + int + i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + uint32_t old_write_domain, old_read_domains; + int ret; + +@@ -2830,7 +2884,7 @@ int + i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + uint32_t old_write_domain, old_read_domains; + int ret; + +@@ -3043,7 +3097,7 @@ static void + i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + uint32_t invalidate_domains = 0; + uint32_t flush_domains = 0; + uint32_t old_read_domains; +@@ -3128,7 +3182,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) + static void + i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + if (!obj_priv->page_cpu_valid) + return; +@@ -3168,7 +3222,7 @@ static int + i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, + uint64_t offset, uint64_t size) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + uint32_t old_read_domains; + int i, ret; + +@@ -3237,7 +3291,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, + { + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int i, ret; + void __iomem *reloc_page; + bool need_fence; +@@ -3246,7 +3300,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, + obj_priv->tiling_mode != I915_TILING_NONE; + + /* Check fence reg constraints and rebind if necessary */ +- if (need_fence && !i915_obj_fenceable(dev, obj)) ++ if (need_fence && !i915_gem_object_fence_offset_ok(obj, ++ obj_priv->tiling_mode)) + i915_gem_object_unbind(obj); + + /* Choose the GTT offset for our buffer and put it there. */ +@@ -3287,7 +3342,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, + i915_gem_object_unpin(obj); + return -EBADF; + } +- target_obj_priv = target_obj->driver_private; ++ target_obj_priv = to_intel_bo(target_obj); + + #if WATCH_RELOC + DRM_INFO("%s: obj %p offset %08x target %d " +@@ -3316,6 +3371,16 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, + } + + /* Validate that the target is in a valid r/w GPU domain */ ++ if (reloc->write_domain & (reloc->write_domain - 1)) { ++ DRM_ERROR("reloc with multiple write domains: " ++ "obj %p target %d offset %d " ++ "read %08x write %08x", ++ obj, reloc->target_handle, ++ (int) reloc->offset, ++ reloc->read_domains, ++ reloc->write_domain); ++ return -EINVAL; ++ } + if (reloc->write_domain & I915_GEM_DOMAIN_CPU || + reloc->read_domains & I915_GEM_DOMAIN_CPU) { + DRM_ERROR("reloc with read/write CPU domains: " +@@ -3629,7 +3694,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, + prepare_to_wait(&dev_priv->pending_flip_queue, + &wait, TASK_INTERRUPTIBLE); + for (i = 0; i < count; i++) { +- obj_priv = object_list[i]->driver_private; ++ obj_priv = to_intel_bo(object_list[i]); + if (atomic_read(&obj_priv->pending_flip) > 0) + break; + } +@@ -3738,7 +3803,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, + goto err; + } + +- obj_priv = object_list[i]->driver_private; ++ obj_priv = to_intel_bo(object_list[i]); + if (obj_priv->in_execbuffer) { + DRM_ERROR("Object %p appears more than once in object list\n", + object_list[i]); +@@ -3864,7 +3929,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, + + for (i = 0; i < args->buffer_count; i++) { + struct drm_gem_object *obj = object_list[i]; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + uint32_t old_write_domain = obj->write_domain; + + obj->write_domain = obj->pending_write_domain; +@@ -3939,7 +4004,7 @@ err: + + for (i = 0; i < args->buffer_count; i++) { + if (object_list[i]) { +- obj_priv = object_list[i]->driver_private; ++ obj_priv = to_intel_bo(object_list[i]); + obj_priv->in_execbuffer = false; + } + drm_gem_object_unreference(object_list[i]); +@@ -4117,7 +4182,7 @@ int + i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) + { + struct drm_device *dev = obj->dev; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int ret; + + i915_verify_inactive(dev, __FILE__, __LINE__); +@@ -4150,7 +4215,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + i915_verify_inactive(dev, __FILE__, __LINE__); + obj_priv->pin_count--; +@@ -4190,7 +4255,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, + mutex_unlock(&dev->struct_mutex); + return -EBADF; + } +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + + if (obj_priv->madv != I915_MADV_WILLNEED) { + DRM_ERROR("Attempting to pin a purgeable buffer\n"); +@@ -4247,7 +4312,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, + return -EBADF; + } + +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + if (obj_priv->pin_filp != file_priv) { + DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", + args->handle); +@@ -4289,7 +4354,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, + */ + i915_gem_retire_requests(dev); + +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + /* Don't count being on the flushing list against the object being + * done. Otherwise, a buffer left on the flushing list but not getting + * flushed (because nobody's flushing that domain) won't ever return +@@ -4335,7 +4400,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, + } + + mutex_lock(&dev->struct_mutex); +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + + if (obj_priv->pin_count) { + drm_gem_object_unreference(obj); +@@ -4396,7 +4461,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) + void i915_gem_free_object(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + trace_i915_gem_object_destroy(obj); + +@@ -4444,8 +4509,7 @@ int + i915_gem_idle(struct drm_device *dev) + { + drm_i915_private_t *dev_priv = dev->dev_private; +- uint32_t seqno, cur_seqno, last_seqno; +- int stuck, ret; ++ int ret; + + mutex_lock(&dev->struct_mutex); + +@@ -4454,115 +4518,36 @@ i915_gem_idle(struct drm_device *dev) + return 0; + } + +- /* Hack! Don't let anybody do execbuf while we don't control the chip. +- * We need to replace this with a semaphore, or something. +- */ +- dev_priv->mm.suspended = 1; +- del_timer(&dev_priv->hangcheck_timer); +- +- /* Cancel the retire work handler, wait for it to finish if running +- */ +- mutex_unlock(&dev->struct_mutex); +- cancel_delayed_work_sync(&dev_priv->mm.retire_work); +- mutex_lock(&dev->struct_mutex); +- +- i915_kernel_lost_context(dev); +- +- /* Flush the GPU along with all non-CPU write domains +- */ +- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); +- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); +- +- if (seqno == 0) { ++ ret = i915_gpu_idle(dev); ++ if (ret) { + mutex_unlock(&dev->struct_mutex); +- return -ENOMEM; ++ return ret; + } + +- dev_priv->mm.waiting_gem_seqno = seqno; +- last_seqno = 0; +- stuck = 0; +- for (;;) { +- cur_seqno = i915_get_gem_seqno(dev); +- if (i915_seqno_passed(cur_seqno, seqno)) +- break; +- if (last_seqno == cur_seqno) { +- if (stuck++ > 100) { +- DRM_ERROR("hardware wedged\n"); +- atomic_set(&dev_priv->mm.wedged, 1); +- DRM_WAKEUP(&dev_priv->irq_queue); +- break; +- } ++ /* Under UMS, be paranoid and evict. */ ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) { ++ ret = i915_gem_evict_from_inactive_list(dev); ++ if (ret) { ++ mutex_unlock(&dev->struct_mutex); ++ return ret; + } +- msleep(10); +- last_seqno = cur_seqno; + } +- dev_priv->mm.waiting_gem_seqno = 0; +- +- i915_gem_retire_requests(dev); + +- spin_lock(&dev_priv->mm.active_list_lock); +- if (!atomic_read(&dev_priv->mm.wedged)) { +- /* Active and flushing should now be empty as we've +- * waited for a sequence higher than any pending execbuffer +- */ +- WARN_ON(!list_empty(&dev_priv->mm.active_list)); +- WARN_ON(!list_empty(&dev_priv->mm.flushing_list)); +- /* Request should now be empty as we've also waited +- * for the last request in the list +- */ +- WARN_ON(!list_empty(&dev_priv->mm.request_list)); +- } +- +- /* Empty the active and flushing lists to inactive. If there's +- * anything left at this point, it means that we're wedged and +- * nothing good's going to happen by leaving them there. So strip +- * the GPU domains and just stuff them onto inactive. ++ /* Hack! Don't let anybody do execbuf while we don't control the chip. ++ * We need to replace this with a semaphore, or something. ++ * And not confound mm.suspended! + */ +- while (!list_empty(&dev_priv->mm.active_list)) { +- struct drm_gem_object *obj; +- uint32_t old_write_domain; +- +- obj = list_first_entry(&dev_priv->mm.active_list, +- struct drm_i915_gem_object, +- list)->obj; +- old_write_domain = obj->write_domain; +- obj->write_domain &= ~I915_GEM_GPU_DOMAINS; +- i915_gem_object_move_to_inactive(obj); +- +- trace_i915_gem_object_change_domain(obj, +- obj->read_domains, +- old_write_domain); +- } +- spin_unlock(&dev_priv->mm.active_list_lock); +- +- while (!list_empty(&dev_priv->mm.flushing_list)) { +- struct drm_gem_object *obj; +- uint32_t old_write_domain; +- +- obj = list_first_entry(&dev_priv->mm.flushing_list, +- struct drm_i915_gem_object, +- list)->obj; +- old_write_domain = obj->write_domain; +- obj->write_domain &= ~I915_GEM_GPU_DOMAINS; +- i915_gem_object_move_to_inactive(obj); +- +- trace_i915_gem_object_change_domain(obj, +- obj->read_domains, +- old_write_domain); +- } +- +- +- /* Move all inactive buffers out of the GTT. */ +- ret = i915_gem_evict_from_inactive_list(dev); +- WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); +- if (ret) { +- mutex_unlock(&dev->struct_mutex); +- return ret; +- } ++ dev_priv->mm.suspended = 1; ++ del_timer(&dev_priv->hangcheck_timer); + ++ i915_kernel_lost_context(dev); + i915_gem_cleanup_ringbuffer(dev); ++ + mutex_unlock(&dev->struct_mutex); + ++ /* Cancel the retire work handler, which should be idle now. */ ++ cancel_delayed_work_sync(&dev_priv->mm.retire_work); ++ + return 0; + } + +@@ -4585,7 +4570,7 @@ i915_gem_init_hws(struct drm_device *dev) + DRM_ERROR("Failed to allocate status page\n"); + return -ENOMEM; + } +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + obj_priv->agp_type = AGP_USER_CACHED_MEMORY; + + ret = i915_gem_object_pin(obj, 4096); +@@ -4606,8 +4591,13 @@ i915_gem_init_hws(struct drm_device *dev) + } + dev_priv->hws_obj = obj; + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); +- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); +- I915_READ(HWS_PGA); /* posting read */ ++ if (IS_GEN6(dev)) { ++ I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr); ++ I915_READ(HWS_PGA_GEN6); /* posting read */ ++ } else { ++ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); ++ I915_READ(HWS_PGA); /* posting read */ ++ } + DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); + + return 0; +@@ -4624,7 +4614,7 @@ i915_gem_cleanup_hws(struct drm_device *dev) + return; + + obj = dev_priv->hws_obj; +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + + kunmap(obj_priv->pages[0]); + i915_gem_object_unpin(obj); +@@ -4658,7 +4648,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) + i915_gem_cleanup_hws(dev); + return -ENOMEM; + } +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + + ret = i915_gem_object_pin(obj, 4096); + if (ret != 0) { +@@ -4744,6 +4734,11 @@ i915_gem_init_ringbuffer(struct drm_device *dev) + ring->space += ring->Size; + } + ++ if (IS_I9XX(dev) && !IS_GEN3(dev)) { ++ I915_WRITE(MI_MODE, ++ (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); ++ } ++ + return 0; + } + +@@ -4849,7 +4844,8 @@ i915_gem_load(struct drm_device *dev) + spin_unlock(&shrink_list_lock); + + /* Old X drivers will take 0-2 for front, back, depth buffers */ +- dev_priv->fence_reg_start = 3; ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ dev_priv->fence_reg_start = 3; + + if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + dev_priv->num_fence_regs = 16; +@@ -4945,7 +4941,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, + int ret; + int page_count; + +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + if (!obj_priv->phys_obj) + return; + +@@ -4984,7 +4980,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, + if (id > I915_MAX_PHYS_OBJECT) + return -EINVAL; + +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + + if (obj_priv->phys_obj) { + if (obj_priv->phys_obj->id == id) +@@ -5035,7 +5031,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + void *obj_addr; + int ret; + char __user *user_data; +diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c +--- a/drivers/gpu/drm/i915/i915_gem_debug.c ++++ b/drivers/gpu/drm/i915/i915_gem_debug.c +@@ -72,7 +72,7 @@ void + i915_gem_dump_object(struct drm_gem_object *obj, int len, + const char *where, uint32_t mark) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int page; + + DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); +@@ -137,7 +137,7 @@ void + i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) + { + struct drm_device *dev = obj->dev; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int page; + uint32_t *gtt_mapping; + uint32_t *backing_map = NULL; +diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c +--- a/drivers/gpu/drm/i915/i915_gem_tiling.c ++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c +@@ -25,8 +25,6 @@ + * + */ + +-#include +-#include + #include "linux/string.h" + #include "linux/bitops.h" + #include "drmP.h" +@@ -83,120 +81,6 @@ + * to match what the GPU expects. + */ + +-#define MCHBAR_I915 0x44 +-#define MCHBAR_I965 0x48 +-#define MCHBAR_SIZE (4*4096) +- +-#define DEVEN_REG 0x54 +-#define DEVEN_MCHBAR_EN (1 << 28) +- +-/* Allocate space for the MCH regs if needed, return nonzero on error */ +-static int +-intel_alloc_mchbar_resource(struct drm_device *dev) +-{ +- drm_i915_private_t *dev_priv = dev->dev_private; +- int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; +- u32 temp_lo, temp_hi = 0; +- u64 mchbar_addr; +- int ret = 0; +- +- if (IS_I965G(dev)) +- pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); +- pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); +- mchbar_addr = ((u64)temp_hi << 32) | temp_lo; +- +- /* If ACPI doesn't have it, assume we need to allocate it ourselves */ +-#ifdef CONFIG_PNP +- if (mchbar_addr && +- pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { +- ret = 0; +- goto out; +- } +-#endif +- +- /* Get some space for it */ +- ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, +- MCHBAR_SIZE, MCHBAR_SIZE, +- PCIBIOS_MIN_MEM, +- 0, pcibios_align_resource, +- dev_priv->bridge_dev); +- if (ret) { +- DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); +- dev_priv->mch_res.start = 0; +- goto out; +- } +- +- if (IS_I965G(dev)) +- pci_write_config_dword(dev_priv->bridge_dev, reg + 4, +- upper_32_bits(dev_priv->mch_res.start)); +- +- pci_write_config_dword(dev_priv->bridge_dev, reg, +- lower_32_bits(dev_priv->mch_res.start)); +-out: +- return ret; +-} +- +-/* Setup MCHBAR if possible, return true if we should disable it again */ +-static bool +-intel_setup_mchbar(struct drm_device *dev) +-{ +- drm_i915_private_t *dev_priv = dev->dev_private; +- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; +- u32 temp; +- bool need_disable = false, enabled; +- +- if (IS_I915G(dev) || IS_I915GM(dev)) { +- pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); +- enabled = !!(temp & DEVEN_MCHBAR_EN); +- } else { +- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); +- enabled = temp & 1; +- } +- +- /* If it's already enabled, don't have to do anything */ +- if (enabled) +- goto out; +- +- if (intel_alloc_mchbar_resource(dev)) +- goto out; +- +- need_disable = true; +- +- /* Space is allocated or reserved, so enable it. */ +- if (IS_I915G(dev) || IS_I915GM(dev)) { +- pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, +- temp | DEVEN_MCHBAR_EN); +- } else { +- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); +- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); +- } +-out: +- return need_disable; +-} +- +-static void +-intel_teardown_mchbar(struct drm_device *dev, bool disable) +-{ +- drm_i915_private_t *dev_priv = dev->dev_private; +- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; +- u32 temp; +- +- if (disable) { +- if (IS_I915G(dev) || IS_I915GM(dev)) { +- pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); +- temp &= ~DEVEN_MCHBAR_EN; +- pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); +- } else { +- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); +- temp &= ~1; +- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); +- } +- } +- +- if (dev_priv->mch_res.start) +- release_resource(&dev_priv->mch_res); +-} +- + /** + * Detects bit 6 swizzling of address lookup between IGD access and CPU + * access through main memory. +@@ -207,9 +91,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) + drm_i915_private_t *dev_priv = dev->dev_private; + uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; + uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; +- bool need_disable; + +- if (IS_IRONLAKE(dev)) { ++ if (IS_IRONLAKE(dev) || IS_GEN6(dev)) { + /* On Ironlake whatever DRAM config, GPU always do + * same swizzling setup. + */ +@@ -224,9 +107,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) + } else if (IS_MOBILE(dev)) { + uint32_t dcc; + +- /* Try to make sure MCHBAR is enabled before poking at it */ +- need_disable = intel_setup_mchbar(dev); +- + /* On mobile 9xx chipsets, channel interleave by the CPU is + * determined by DCC. For single-channel, neither the CPU + * nor the GPU do swizzling. For dual channel interleaved, +@@ -266,8 +146,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + } +- +- intel_teardown_mchbar(dev, need_disable); + } else { + /* The 965, G33, and newer, have a very flexible memory + * configuration. It will enable dual-channel mode +@@ -302,39 +180,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) + dev_priv->mm.bit_6_swizzle_y = swizzle_y; + } + +- +-/** +- * Returns whether an object is currently fenceable. If not, it may need +- * to be unbound and have its pitch adjusted. +- */ +-bool +-i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj) +-{ +- struct drm_i915_gem_object *obj_priv = obj->driver_private; +- +- if (IS_I965G(dev)) { +- /* The 965 can have fences at any page boundary. */ +- if (obj->size & 4095) +- return false; +- return true; +- } else if (IS_I9XX(dev)) { +- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) +- return false; +- } else { +- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) +- return false; +- } +- +- /* Power of two sized... */ +- if (obj->size & (obj->size - 1)) +- return false; +- +- /* Objects must be size aligned as well */ +- if (obj_priv->gtt_offset & (obj->size - 1)) +- return false; +- return true; +-} +- + /* Check pitch constriants for all chips & tiling formats */ + bool + i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) +@@ -357,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) + * reg, so dont bother to check the size */ + if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) + return false; +- } else if (IS_I9XX(dev)) { +- uint32_t pitch_val = ffs(stride / tile_width) - 1; +- +- /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB) +- * instead of 4 (2KB) on 945s. +- */ +- if (pitch_val > I915_FENCE_MAX_PITCH_VAL || +- size > (I830_FENCE_MAX_SIZE_VAL << 20)) ++ } else if (IS_GEN3(dev) || IS_GEN2(dev)) { ++ if (stride > 8192) + return false; +- } else { +- uint32_t pitch_val = ffs(stride / tile_width) - 1; + +- if (pitch_val > I830_FENCE_MAX_PITCH_VAL || +- size > (I830_FENCE_MAX_SIZE_VAL << 19)) +- return false; ++ if (IS_GEN3(dev)) { ++ if (size > I830_FENCE_MAX_SIZE_VAL << 20) ++ return false; ++ } else { ++ if (size > I830_FENCE_MAX_SIZE_VAL << 19) ++ return false; ++ } + } + + /* 965+ just needs multiples of tile width */ +@@ -391,11 +232,11 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) + return true; + } + +-static bool ++bool + i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) + { + struct drm_device *dev = obj->dev; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + if (obj_priv->gtt_space == NULL) + return true; +@@ -435,7 +276,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EINVAL; +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + + if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { + drm_gem_object_unreference_unlocked(obj); +@@ -480,9 +321,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, + * need to ensure that any fence register is cleared. + */ + if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) +- ret = i915_gem_object_unbind(obj); ++ ret = i915_gem_object_unbind(obj); ++ else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) ++ ret = i915_gem_object_put_fence_reg(obj); + else +- ret = i915_gem_object_put_fence_reg(obj); ++ i915_gem_release_mmap(obj); ++ + if (ret != 0) { + WARN(ret != -ERESTARTSYS, + "failed to reset object for tiling switch"); +@@ -491,12 +335,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, + goto err; + } + +- /* If we've changed tiling, GTT-mappings of the object +- * need to re-fault to ensure that the correct fence register +- * setup is in place. +- */ +- i915_gem_release_mmap(obj); +- + obj_priv->tiling_mode = args->tiling_mode; + obj_priv->stride = args->stride; + } +@@ -522,7 +360,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EINVAL; +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + + mutex_lock(&dev->struct_mutex); + +@@ -585,7 +423,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int page_count = obj->size >> PAGE_SHIFT; + int i; + +@@ -614,7 +452,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int page_count = obj->size >> PAGE_SHIFT; + int i; + +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -166,11 +166,15 @@ void intel_enable_asle (struct drm_device *dev) + { + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + ironlake_enable_display_irq(dev_priv, DE_GSE); +- else ++ else { + i915_enable_pipestat(dev_priv, 1, + I915_LEGACY_BLC_EVENT_ENABLE); ++ if (IS_I965G(dev)) ++ i915_enable_pipestat(dev_priv, 0, ++ I915_LEGACY_BLC_EVENT_ENABLE); ++ } + } + + /** +@@ -255,20 +259,71 @@ static void i915_hotplug_work_func(struct work_struct *work) + hotplug_work); + struct drm_device *dev = dev_priv->dev; + struct drm_mode_config *mode_config = &dev->mode_config; +- struct drm_connector *connector; ++ struct drm_encoder *encoder; + +- if (mode_config->num_connector) { +- list_for_each_entry(connector, &mode_config->connector_list, head) { +- struct intel_output *intel_output = to_intel_output(connector); ++ if (mode_config->num_encoder) { ++ list_for_each_entry(encoder, &mode_config->encoder_list, head) { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + +- if (intel_output->hot_plug) +- (*intel_output->hot_plug) (intel_output); ++ if (intel_encoder->hot_plug) ++ (*intel_encoder->hot_plug) (intel_encoder); + } + } + /* Just fire off a uevent and let userspace tell us what to do */ + drm_sysfs_hotplug_event(dev); + } + ++static void i915_handle_rps_change(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ u32 busy_up, busy_down, max_avg, min_avg; ++ u16 rgvswctl; ++ u8 new_delay = dev_priv->cur_delay; ++ ++ I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG); ++ busy_up = I915_READ(RCPREVBSYTUPAVG); ++ busy_down = I915_READ(RCPREVBSYTDNAVG); ++ max_avg = I915_READ(RCBMAXAVG); ++ min_avg = I915_READ(RCBMINAVG); ++ ++ /* Handle RCS change request from hw */ ++ if (busy_up > max_avg) { ++ if (dev_priv->cur_delay != dev_priv->max_delay) ++ new_delay = dev_priv->cur_delay - 1; ++ if (new_delay < dev_priv->max_delay) ++ new_delay = dev_priv->max_delay; ++ } else if (busy_down < min_avg) { ++ if (dev_priv->cur_delay != dev_priv->min_delay) ++ new_delay = dev_priv->cur_delay + 1; ++ if (new_delay > dev_priv->min_delay) ++ new_delay = dev_priv->min_delay; ++ } ++ ++ DRM_DEBUG("rps change requested: %d -> %d\n", ++ dev_priv->cur_delay, new_delay); ++ ++ rgvswctl = I915_READ(MEMSWCTL); ++ if (rgvswctl & MEMCTL_CMD_STS) { ++ DRM_ERROR("gpu busy, RCS change rejected\n"); ++ return; /* still busy with another command */ ++ } ++ ++ /* Program the new state */ ++ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | ++ (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; ++ I915_WRITE(MEMSWCTL, rgvswctl); ++ POSTING_READ(MEMSWCTL); ++ ++ rgvswctl |= MEMCTL_CMD_STS; ++ I915_WRITE(MEMSWCTL, rgvswctl); ++ ++ dev_priv->cur_delay = new_delay; ++ ++ DRM_DEBUG("rps changed\n"); ++ ++ return; ++} ++ + irqreturn_t ironlake_irq_handler(struct drm_device *dev) + { + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; +@@ -331,6 +386,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) + queue_work(dev_priv->wq, &dev_priv->hotplug_work); + } + ++ if (de_iir & DE_PCU_EVENT) { ++ I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS)); ++ i915_handle_rps_change(dev); ++ } ++ + /* should clear PCH hotplug event before clear CPU irq */ + I915_WRITE(SDEIIR, pch_iir); + I915_WRITE(GTIIR, gt_iir); +@@ -376,6 +436,121 @@ static void i915_error_work_func(struct work_struct *work) + } + } + ++static struct drm_i915_error_object * ++i915_error_object_create(struct drm_device *dev, ++ struct drm_gem_object *src) ++{ ++ struct drm_i915_error_object *dst; ++ struct drm_i915_gem_object *src_priv; ++ int page, page_count; ++ ++ if (src == NULL) ++ return NULL; ++ ++ src_priv = to_intel_bo(src); ++ if (src_priv->pages == NULL) ++ return NULL; ++ ++ page_count = src->size / PAGE_SIZE; ++ ++ dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); ++ if (dst == NULL) ++ return NULL; ++ ++ for (page = 0; page < page_count; page++) { ++ void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC); ++ if (d == NULL) ++ goto unwind; ++ s = kmap_atomic(src_priv->pages[page], KM_USER0); ++ memcpy(d, s, PAGE_SIZE); ++ kunmap_atomic(s, KM_USER0); ++ dst->pages[page] = d; ++ } ++ dst->page_count = page_count; ++ dst->gtt_offset = src_priv->gtt_offset; ++ ++ return dst; ++ ++unwind: ++ while (page--) ++ kfree(dst->pages[page]); ++ kfree(dst); ++ return NULL; ++} ++ ++static void ++i915_error_object_free(struct drm_i915_error_object *obj) ++{ ++ int page; ++ ++ if (obj == NULL) ++ return; ++ ++ for (page = 0; page < obj->page_count; page++) ++ kfree(obj->pages[page]); ++ ++ kfree(obj); ++} ++ ++static void ++i915_error_state_free(struct drm_device *dev, ++ struct drm_i915_error_state *error) ++{ ++ i915_error_object_free(error->batchbuffer[0]); ++ i915_error_object_free(error->batchbuffer[1]); ++ i915_error_object_free(error->ringbuffer); ++ kfree(error->active_bo); ++ kfree(error); ++} ++ ++static u32 ++i915_get_bbaddr(struct drm_device *dev, u32 *ring) ++{ ++ u32 cmd; ++ ++ if (IS_I830(dev) || IS_845G(dev)) ++ cmd = MI_BATCH_BUFFER; ++ else if (IS_I965G(dev)) ++ cmd = (MI_BATCH_BUFFER_START | (2 << 6) | ++ MI_BATCH_NON_SECURE_I965); ++ else ++ cmd = (MI_BATCH_BUFFER_START | (2 << 6)); ++ ++ return ring[0] == cmd ? ring[1] : 0; ++} ++ ++static u32 ++i915_ringbuffer_last_batch(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ u32 head, bbaddr; ++ u32 *ring; ++ ++ /* Locate the current position in the ringbuffer and walk back ++ * to find the most recently dispatched batch buffer. ++ */ ++ bbaddr = 0; ++ head = I915_READ(PRB0_HEAD) & HEAD_ADDR; ++ ring = (u32 *)(dev_priv->ring.virtual_start + head); ++ ++ while (--ring >= (u32 *)dev_priv->ring.virtual_start) { ++ bbaddr = i915_get_bbaddr(dev, ring); ++ if (bbaddr) ++ break; ++ } ++ ++ if (bbaddr == 0) { ++ ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size); ++ while (--ring >= (u32 *)dev_priv->ring.virtual_start) { ++ bbaddr = i915_get_bbaddr(dev, ring); ++ if (bbaddr) ++ break; ++ } ++ } ++ ++ return bbaddr; ++} ++ + /** + * i915_capture_error_state - capture an error record for later analysis + * @dev: drm device +@@ -388,19 +563,26 @@ static void i915_error_work_func(struct work_struct *work) + static void i915_capture_error_state(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv; + struct drm_i915_error_state *error; ++ struct drm_gem_object *batchbuffer[2]; + unsigned long flags; ++ u32 bbaddr; ++ int count; + + spin_lock_irqsave(&dev_priv->error_lock, flags); +- if (dev_priv->first_error) +- goto out; ++ error = dev_priv->first_error; ++ spin_unlock_irqrestore(&dev_priv->error_lock, flags); ++ if (error) ++ return; + + error = kmalloc(sizeof(*error), GFP_ATOMIC); + if (!error) { +- DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n"); +- goto out; ++ DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); ++ return; + } + ++ error->seqno = i915_get_gem_seqno(dev); + error->eir = I915_READ(EIR); + error->pgtbl_er = I915_READ(PGTBL_ER); + error->pipeastat = I915_READ(PIPEASTAT); +@@ -411,6 +593,7 @@ static void i915_capture_error_state(struct drm_device *dev) + error->ipehr = I915_READ(IPEHR); + error->instdone = I915_READ(INSTDONE); + error->acthd = I915_READ(ACTHD); ++ error->bbaddr = 0; + } else { + error->ipeir = I915_READ(IPEIR_I965); + error->ipehr = I915_READ(IPEHR_I965); +@@ -418,14 +601,101 @@ static void i915_capture_error_state(struct drm_device *dev) + error->instps = I915_READ(INSTPS); + error->instdone1 = I915_READ(INSTDONE1); + error->acthd = I915_READ(ACTHD_I965); ++ error->bbaddr = I915_READ64(BB_ADDR); + } + +- do_gettimeofday(&error->time); ++ bbaddr = i915_ringbuffer_last_batch(dev); + +- dev_priv->first_error = error; ++ /* Grab the current batchbuffer, most likely to have crashed. */ ++ batchbuffer[0] = NULL; ++ batchbuffer[1] = NULL; ++ count = 0; ++ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { ++ struct drm_gem_object *obj = obj_priv->obj; + +-out: ++ if (batchbuffer[0] == NULL && ++ bbaddr >= obj_priv->gtt_offset && ++ bbaddr < obj_priv->gtt_offset + obj->size) ++ batchbuffer[0] = obj; ++ ++ if (batchbuffer[1] == NULL && ++ error->acthd >= obj_priv->gtt_offset && ++ error->acthd < obj_priv->gtt_offset + obj->size && ++ batchbuffer[0] != obj) ++ batchbuffer[1] = obj; ++ ++ count++; ++ } ++ ++ /* We need to copy these to an anonymous buffer as the simplest ++ * method to avoid being overwritten by userpace. ++ */ ++ error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); ++ error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); ++ ++ /* Record the ringbuffer */ ++ error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj); ++ ++ /* Record buffers on the active list. */ ++ error->active_bo = NULL; ++ error->active_bo_count = 0; ++ ++ if (count) ++ error->active_bo = kmalloc(sizeof(*error->active_bo)*count, ++ GFP_ATOMIC); ++ ++ if (error->active_bo) { ++ int i = 0; ++ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { ++ struct drm_gem_object *obj = obj_priv->obj; ++ ++ error->active_bo[i].size = obj->size; ++ error->active_bo[i].name = obj->name; ++ error->active_bo[i].seqno = obj_priv->last_rendering_seqno; ++ error->active_bo[i].gtt_offset = obj_priv->gtt_offset; ++ error->active_bo[i].read_domains = obj->read_domains; ++ error->active_bo[i].write_domain = obj->write_domain; ++ error->active_bo[i].fence_reg = obj_priv->fence_reg; ++ error->active_bo[i].pinned = 0; ++ if (obj_priv->pin_count > 0) ++ error->active_bo[i].pinned = 1; ++ if (obj_priv->user_pin_count > 0) ++ error->active_bo[i].pinned = -1; ++ error->active_bo[i].tiling = obj_priv->tiling_mode; ++ error->active_bo[i].dirty = obj_priv->dirty; ++ error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED; ++ ++ if (++i == count) ++ break; ++ } ++ error->active_bo_count = i; ++ } ++ ++ do_gettimeofday(&error->time); ++ ++ spin_lock_irqsave(&dev_priv->error_lock, flags); ++ if (dev_priv->first_error == NULL) { ++ dev_priv->first_error = error; ++ error = NULL; ++ } + spin_unlock_irqrestore(&dev_priv->error_lock, flags); ++ ++ if (error) ++ i915_error_state_free(dev, error); ++} ++ ++void i915_destroy_error_state(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct drm_i915_error_state *error; ++ ++ spin_lock(&dev_priv->error_lock); ++ error = dev_priv->first_error; ++ dev_priv->first_error = NULL; ++ spin_unlock(&dev_priv->error_lock); ++ ++ if (error) ++ i915_error_state_free(dev, error); + } + + /** +@@ -576,7 +846,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) + + atomic_inc(&dev_priv->irq_received); + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + return ironlake_irq_handler(dev); + + iir = I915_READ(IIR); +@@ -679,7 +949,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) + intel_finish_page_flip(dev, 1); + } + +- if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || ++ if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || ++ (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || + (iir & I915_ASLE_INTERRUPT)) + opregion_asle_intr(dev); + +@@ -737,7 +1008,7 @@ void i915_user_irq_get(struct drm_device *dev) + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); + else + i915_enable_irq(dev_priv, I915_USER_INTERRUPT); +@@ -753,7 +1024,7 @@ void i915_user_irq_put(struct drm_device *dev) + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); + if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); + else + i915_disable_irq(dev_priv, I915_USER_INTERRUPT); +@@ -861,7 +1132,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) + return -EINVAL; + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + ironlake_enable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); + else if (IS_I965G(dev)) +@@ -883,7 +1154,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); + else +@@ -897,7 +1168,7 @@ void i915_enable_interrupt (struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; + +- if (!IS_IRONLAKE(dev)) ++ if (!HAS_PCH_SPLIT(dev)) + opregion_enable_asle(dev); + dev_priv->irq_enabled = 1; + } +@@ -973,7 +1244,11 @@ void i915_hangcheck_elapsed(unsigned long data) + struct drm_device *dev = (struct drm_device *)data; + drm_i915_private_t *dev_priv = dev->dev_private; + uint32_t acthd; +- ++ ++ /* No reset support on this chip yet. */ ++ if (IS_GEN6(dev)) ++ return; ++ + if (!IS_I965G(dev)) + acthd = I915_READ(ACTHD); + else +@@ -1064,6 +1339,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev) + I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); + (void) I915_READ(SDEIER); + ++ if (IS_IRONLAKE_M(dev)) { ++ /* Clear & enable PCU event interrupts */ ++ I915_WRITE(DEIIR, DE_PCU_EVENT); ++ I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); ++ ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); ++ } ++ + return 0; + } + +@@ -1076,7 +1358,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev) + INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); + INIT_WORK(&dev_priv->error_work, i915_error_work_func); + +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + ironlake_irq_preinstall(dev); + return; + } +@@ -1108,7 +1390,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) + + dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + return ironlake_irq_postinstall(dev); + + /* Unmask the interrupts that we always want on. */ +@@ -1196,7 +1478,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev) + + dev_priv->vblank_pipe = 0; + +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + ironlake_irq_uninstall(dev); + return; + } +diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c +--- a/drivers/gpu/drm/i915/i915_opregion.c ++++ b/drivers/gpu/drm/i915/i915_opregion.c +@@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev) + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + struct drm_connector *connector; ++ acpi_handle handle; ++ struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; ++ unsigned long long device_id; ++ acpi_status status; + int i = 0; + ++ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); ++ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) ++ return; ++ ++ if (acpi_is_video_device(acpi_dev)) ++ acpi_video_bus = acpi_dev; ++ else { ++ list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { ++ if (acpi_is_video_device(acpi_cdev)) { ++ acpi_video_bus = acpi_cdev; ++ break; ++ } ++ } ++ } ++ ++ if (!acpi_video_bus) { ++ printk(KERN_WARNING "No ACPI video bus found\n"); ++ return; ++ } ++ ++ list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { ++ if (i >= 8) { ++ dev_printk (KERN_ERR, &dev->pdev->dev, ++ "More than 8 outputs detected\n"); ++ return; ++ } ++ status = ++ acpi_evaluate_integer(acpi_cdev->handle, "_ADR", ++ NULL, &device_id); ++ if (ACPI_SUCCESS(status)) { ++ if (!device_id) ++ goto blind_set; ++ opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f); ++ i++; ++ } ++ } ++ ++end: ++ /* If fewer than 8 outputs, the list must be null terminated */ ++ if (i < 8) ++ opregion->acpi->didl[i] = 0; ++ return; ++ ++blind_set: ++ i = 0; + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + int output_type = ACPI_OTHER_OUTPUT; + if (i >= 8) { +@@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev) + opregion->acpi->didl[i] |= (1<<31) | output_type | i; + i++; + } +- +- /* If fewer than 8 outputs, the list must be null terminated */ +- if (i < 8) +- opregion->acpi->didl[i] = 0; ++ goto end; + } + + int intel_opregion_init(struct drm_device *dev, int resume) +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -53,6 +53,25 @@ + #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) + #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) + ++#define SNB_GMCH_CTRL 0x50 ++#define SNB_GMCH_GMS_STOLEN_MASK 0xF8 ++#define SNB_GMCH_GMS_STOLEN_32M (1 << 3) ++#define SNB_GMCH_GMS_STOLEN_64M (2 << 3) ++#define SNB_GMCH_GMS_STOLEN_96M (3 << 3) ++#define SNB_GMCH_GMS_STOLEN_128M (4 << 3) ++#define SNB_GMCH_GMS_STOLEN_160M (5 << 3) ++#define SNB_GMCH_GMS_STOLEN_192M (6 << 3) ++#define SNB_GMCH_GMS_STOLEN_224M (7 << 3) ++#define SNB_GMCH_GMS_STOLEN_256M (8 << 3) ++#define SNB_GMCH_GMS_STOLEN_288M (9 << 3) ++#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) ++#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) ++#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) ++#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) ++#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) ++#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) ++#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) ++ + /* PCI config space */ + + #define HPLLCC 0xc0 /* 855 only */ +@@ -61,6 +80,7 @@ + #define GC_CLOCK_100_200 (1 << 0) + #define GC_CLOCK_100_133 (2 << 0) + #define GC_CLOCK_166_250 (3 << 0) ++#define GCFGC2 0xda + #define GCFGC 0xf0 /* 915+ only */ + #define GC_LOW_FREQUENCY_ENABLE (1 << 7) + #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) +@@ -221,7 +241,7 @@ + #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) + #define I830_FENCE_PITCH_SHIFT 4 + #define I830_FENCE_REG_VALID (1<<0) +-#define I915_FENCE_MAX_PITCH_VAL 0x10 ++#define I915_FENCE_MAX_PITCH_VAL 4 + #define I830_FENCE_MAX_PITCH_VAL 6 + #define I830_FENCE_MAX_SIZE_VAL (1<<8) + +@@ -234,6 +254,9 @@ + #define I965_FENCE_REG_VALID (1<<0) + #define I965_FENCE_MAX_PITCH_VAL 0x0400 + ++#define FENCE_REG_SANDYBRIDGE_0 0x100000 ++#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 ++ + /* + * Instruction and interrupt control regs + */ +@@ -265,6 +288,7 @@ + #define INSTDONE1 0x0207c /* 965+ only */ + #define ACTHD_I965 0x02074 + #define HWS_PGA 0x02080 ++#define HWS_PGA_GEN6 0x04080 + #define HWS_ADDRESS_MASK 0xfffff000 + #define HWS_START_ADDRESS_SHIFT 4 + #define PWRCTXA 0x2088 /* 965GM+ only */ +@@ -274,6 +298,10 @@ + #define INSTDONE 0x02090 + #define NOPID 0x02094 + #define HWSTAM 0x02098 ++ ++#define MI_MODE 0x0209c ++# define VS_TIMER_DISPATCH (1 << 6) ++ + #define SCPD0 0x0209c /* 915+ only */ + #define IER 0x020a0 + #define IIR 0x020a4 +@@ -282,7 +310,7 @@ + #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) + #define I915_DISPLAY_PORT_INTERRUPT (1<<17) + #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) +-#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) ++#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */ + #define I915_HWB_OOM_INTERRUPT (1<<13) + #define I915_SYNC_STATUS_INTERRUPT (1<<12) + #define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) +@@ -306,11 +334,14 @@ + #define I915_ERROR_MEMORY_REFRESH (1<<1) + #define I915_ERROR_INSTRUCTION (1<<0) + #define INSTPM 0x020c0 ++#define INSTPM_SELF_EN (1<<12) /* 915GM only */ + #define ACTHD 0x020c8 + #define FW_BLC 0x020d8 + #define FW_BLC2 0x020dc + #define FW_BLC_SELF 0x020e0 /* 915+ only */ +-#define FW_BLC_SELF_EN (1<<15) ++#define FW_BLC_SELF_EN_MASK (1<<31) ++#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ ++#define FW_BLC_SELF_EN (1<<15) /* 945 only */ + #define MM_BURST_LENGTH 0x00700000 + #define MM_FIFO_WATERMARK 0x0001F000 + #define LM_BURST_LENGTH 0x00000700 +@@ -324,6 +355,7 @@ + #define CM0_COLOR_EVICT_DISABLE (1<<3) + #define CM0_DEPTH_WRITE_DISABLE (1<<1) + #define CM0_RC_OP_FLUSH_DISABLE (1<<0) ++#define BB_ADDR 0x02140 /* 8 bytes */ + #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ + + +@@ -338,7 +370,7 @@ + #define FBC_CTL_PERIODIC (1<<30) + #define FBC_CTL_INTERVAL_SHIFT (16) + #define FBC_CTL_UNCOMPRESSIBLE (1<<14) +-#define FBC_C3_IDLE (1<<13) ++#define FBC_CTL_C3_IDLE (1<<13) + #define FBC_CTL_STRIDE_SHIFT (5) + #define FBC_CTL_FENCENO (1<<0) + #define FBC_COMMAND 0x0320c +@@ -784,10 +816,144 @@ + #define CLKCFG_MEM_800 (3 << 4) + #define CLKCFG_MEM_MASK (7 << 4) + +-/** GM965 GM45 render standby register */ +-#define MCHBAR_RENDER_STANDBY 0x111B8 ++#define CRSTANDVID 0x11100 ++#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ ++#define PXVFREQ_PX_MASK 0x7f000000 ++#define PXVFREQ_PX_SHIFT 24 ++#define VIDFREQ_BASE 0x11110 ++#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */ ++#define VIDFREQ2 0x11114 ++#define VIDFREQ3 0x11118 ++#define VIDFREQ4 0x1111c ++#define VIDFREQ_P0_MASK 0x1f000000 ++#define VIDFREQ_P0_SHIFT 24 ++#define VIDFREQ_P0_CSCLK_MASK 0x00f00000 ++#define VIDFREQ_P0_CSCLK_SHIFT 20 ++#define VIDFREQ_P0_CRCLK_MASK 0x000f0000 ++#define VIDFREQ_P0_CRCLK_SHIFT 16 ++#define VIDFREQ_P1_MASK 0x00001f00 ++#define VIDFREQ_P1_SHIFT 8 ++#define VIDFREQ_P1_CSCLK_MASK 0x000000f0 ++#define VIDFREQ_P1_CSCLK_SHIFT 4 ++#define VIDFREQ_P1_CRCLK_MASK 0x0000000f ++#define INTTOEXT_BASE_ILK 0x11300 ++#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */ ++#define INTTOEXT_MAP3_SHIFT 24 ++#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT) ++#define INTTOEXT_MAP2_SHIFT 16 ++#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT) ++#define INTTOEXT_MAP1_SHIFT 8 ++#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT) ++#define INTTOEXT_MAP0_SHIFT 0 ++#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT) ++#define MEMSWCTL 0x11170 /* Ironlake only */ ++#define MEMCTL_CMD_MASK 0xe000 ++#define MEMCTL_CMD_SHIFT 13 ++#define MEMCTL_CMD_RCLK_OFF 0 ++#define MEMCTL_CMD_RCLK_ON 1 ++#define MEMCTL_CMD_CHFREQ 2 ++#define MEMCTL_CMD_CHVID 3 ++#define MEMCTL_CMD_VMMOFF 4 ++#define MEMCTL_CMD_VMMON 5 ++#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears ++ when command complete */ ++#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */ ++#define MEMCTL_FREQ_SHIFT 8 ++#define MEMCTL_SFCAVM (1<<7) ++#define MEMCTL_TGT_VID_MASK 0x007f ++#define MEMIHYST 0x1117c ++#define MEMINTREN 0x11180 /* 16 bits */ ++#define MEMINT_RSEXIT_EN (1<<8) ++#define MEMINT_CX_SUPR_EN (1<<7) ++#define MEMINT_CONT_BUSY_EN (1<<6) ++#define MEMINT_AVG_BUSY_EN (1<<5) ++#define MEMINT_EVAL_CHG_EN (1<<4) ++#define MEMINT_MON_IDLE_EN (1<<3) ++#define MEMINT_UP_EVAL_EN (1<<2) ++#define MEMINT_DOWN_EVAL_EN (1<<1) ++#define MEMINT_SW_CMD_EN (1<<0) ++#define MEMINTRSTR 0x11182 /* 16 bits */ ++#define MEM_RSEXIT_MASK 0xc000 ++#define MEM_RSEXIT_SHIFT 14 ++#define MEM_CONT_BUSY_MASK 0x3000 ++#define MEM_CONT_BUSY_SHIFT 12 ++#define MEM_AVG_BUSY_MASK 0x0c00 ++#define MEM_AVG_BUSY_SHIFT 10 ++#define MEM_EVAL_CHG_MASK 0x0300 ++#define MEM_EVAL_BUSY_SHIFT 8 ++#define MEM_MON_IDLE_MASK 0x00c0 ++#define MEM_MON_IDLE_SHIFT 6 ++#define MEM_UP_EVAL_MASK 0x0030 ++#define MEM_UP_EVAL_SHIFT 4 ++#define MEM_DOWN_EVAL_MASK 0x000c ++#define MEM_DOWN_EVAL_SHIFT 2 ++#define MEM_SW_CMD_MASK 0x0003 ++#define MEM_INT_STEER_GFX 0 ++#define MEM_INT_STEER_CMR 1 ++#define MEM_INT_STEER_SMI 2 ++#define MEM_INT_STEER_SCI 3 ++#define MEMINTRSTS 0x11184 ++#define MEMINT_RSEXIT (1<<7) ++#define MEMINT_CONT_BUSY (1<<6) ++#define MEMINT_AVG_BUSY (1<<5) ++#define MEMINT_EVAL_CHG (1<<4) ++#define MEMINT_MON_IDLE (1<<3) ++#define MEMINT_UP_EVAL (1<<2) ++#define MEMINT_DOWN_EVAL (1<<1) ++#define MEMINT_SW_CMD (1<<0) ++#define MEMMODECTL 0x11190 ++#define MEMMODE_BOOST_EN (1<<31) ++#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */ ++#define MEMMODE_BOOST_FREQ_SHIFT 24 ++#define MEMMODE_IDLE_MODE_MASK 0x00030000 ++#define MEMMODE_IDLE_MODE_SHIFT 16 ++#define MEMMODE_IDLE_MODE_EVAL 0 ++#define MEMMODE_IDLE_MODE_CONT 1 ++#define MEMMODE_HWIDLE_EN (1<<15) ++#define MEMMODE_SWMODE_EN (1<<14) ++#define MEMMODE_RCLK_GATE (1<<13) ++#define MEMMODE_HW_UPDATE (1<<12) ++#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */ ++#define MEMMODE_FSTART_SHIFT 8 ++#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */ ++#define MEMMODE_FMAX_SHIFT 4 ++#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */ ++#define RCBMAXAVG 0x1119c ++#define MEMSWCTL2 0x1119e /* Cantiga only */ ++#define SWMEMCMD_RENDER_OFF (0 << 13) ++#define SWMEMCMD_RENDER_ON (1 << 13) ++#define SWMEMCMD_SWFREQ (2 << 13) ++#define SWMEMCMD_TARVID (3 << 13) ++#define SWMEMCMD_VRM_OFF (4 << 13) ++#define SWMEMCMD_VRM_ON (5 << 13) ++#define CMDSTS (1<<12) ++#define SFCAVM (1<<11) ++#define SWFREQ_MASK 0x0380 /* P0-7 */ ++#define SWFREQ_SHIFT 7 ++#define TARVID_MASK 0x001f ++#define MEMSTAT_CTG 0x111a0 ++#define RCBMINAVG 0x111a0 ++#define RCUPEI 0x111b0 ++#define RCDNEI 0x111b4 ++#define MCHBAR_RENDER_STANDBY 0x111b8 + #define RCX_SW_EXIT (1<<23) + #define RSX_STATUS_MASK 0x00700000 ++#define VIDCTL 0x111c0 ++#define VIDSTS 0x111c8 ++#define VIDSTART 0x111cc /* 8 bits */ ++#define MEMSTAT_ILK 0x111f8 ++#define MEMSTAT_VID_MASK 0x7f00 ++#define MEMSTAT_VID_SHIFT 8 ++#define MEMSTAT_PSTATE_MASK 0x00f8 ++#define MEMSTAT_PSTATE_SHIFT 3 ++#define MEMSTAT_MON_ACTV (1<<2) ++#define MEMSTAT_SRC_CTL_MASK 0x0003 ++#define MEMSTAT_SRC_CTL_CORE 0 ++#define MEMSTAT_SRC_CTL_TRB 1 ++#define MEMSTAT_SRC_CTL_THM 2 ++#define MEMSTAT_SRC_CTL_STDBY 3 ++#define RCPREVBSYTUPAVG 0x113b8 ++#define RCPREVBSYTDNAVG 0x113bc + #define PEG_BAND_GAP_DATA 0x14d68 + + /* +@@ -1588,6 +1754,14 @@ + #define DP_LINK_TRAIN_MASK (3 << 28) + #define DP_LINK_TRAIN_SHIFT 28 + ++/* CPT Link training mode */ ++#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8) ++#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8) ++#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8) ++#define DP_LINK_TRAIN_OFF_CPT (3 << 8) ++#define DP_LINK_TRAIN_MASK_CPT (7 << 8) ++#define DP_LINK_TRAIN_SHIFT_CPT 8 ++ + /* Signal voltages. These are mostly controlled by the other end */ + #define DP_VOLTAGE_0_4 (0 << 25) + #define DP_VOLTAGE_0_6 (1 << 25) +@@ -1812,15 +1986,24 @@ + + #define DSPFW1 0x70034 + #define DSPFW_SR_SHIFT 23 ++#define DSPFW_SR_MASK (0x1ff<<23) + #define DSPFW_CURSORB_SHIFT 16 ++#define DSPFW_CURSORB_MASK (0x3f<<16) + #define DSPFW_PLANEB_SHIFT 8 ++#define DSPFW_PLANEB_MASK (0x7f<<8) ++#define DSPFW_PLANEA_MASK (0x7f) + #define DSPFW2 0x70038 + #define DSPFW_CURSORA_MASK 0x00003f00 + #define DSPFW_CURSORA_SHIFT 8 ++#define DSPFW_PLANEC_MASK (0x7f) + #define DSPFW3 0x7003c + #define DSPFW_HPLL_SR_EN (1<<31) + #define DSPFW_CURSOR_SR_SHIFT 24 + #define PINEVIEW_SELF_REFRESH_EN (1<<30) ++#define DSPFW_CURSOR_SR_MASK (0x3f<<24) ++#define DSPFW_HPLL_CURSOR_SHIFT 16 ++#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) ++#define DSPFW_HPLL_SR_MASK (0x1ff) + + /* FIFO watermark sizes etc */ + #define G4X_FIFO_LINE_SIZE 64 +@@ -1847,6 +2030,43 @@ + #define PINEVIEW_CURSOR_DFT_WM 0 + #define PINEVIEW_CURSOR_GUARD_WM 5 + ++ ++/* define the Watermark register on Ironlake */ ++#define WM0_PIPEA_ILK 0x45100 ++#define WM0_PIPE_PLANE_MASK (0x7f<<16) ++#define WM0_PIPE_PLANE_SHIFT 16 ++#define WM0_PIPE_SPRITE_MASK (0x3f<<8) ++#define WM0_PIPE_SPRITE_SHIFT 8 ++#define WM0_PIPE_CURSOR_MASK (0x1f) ++ ++#define WM0_PIPEB_ILK 0x45104 ++#define WM1_LP_ILK 0x45108 ++#define WM1_LP_SR_EN (1<<31) ++#define WM1_LP_LATENCY_SHIFT 24 ++#define WM1_LP_LATENCY_MASK (0x7f<<24) ++#define WM1_LP_SR_MASK (0x1ff<<8) ++#define WM1_LP_SR_SHIFT 8 ++#define WM1_LP_CURSOR_MASK (0x3f) ++ ++/* Memory latency timer register */ ++#define MLTR_ILK 0x11222 ++/* the unit of memory self-refresh latency time is 0.5us */ ++#define ILK_SRLT_MASK 0x3f ++ ++/* define the fifo size on Ironlake */ ++#define ILK_DISPLAY_FIFO 128 ++#define ILK_DISPLAY_MAXWM 64 ++#define ILK_DISPLAY_DFTWM 8 ++ ++#define ILK_DISPLAY_SR_FIFO 512 ++#define ILK_DISPLAY_MAX_SRWM 0x1ff ++#define ILK_DISPLAY_DFT_SRWM 0x3f ++#define ILK_CURSOR_SR_FIFO 64 ++#define ILK_CURSOR_MAX_SRWM 0x3f ++#define ILK_CURSOR_DFT_SRWM 8 ++ ++#define ILK_FIFO_LINE_SIZE 64 ++ + /* + * The two pipe frame counter registers are not synchronized, so + * reading a stable value is somewhat tricky. The following code +@@ -2010,6 +2230,14 @@ + #define DISPLAY_PORT_PLL_BIOS_1 0x46010 + #define DISPLAY_PORT_PLL_BIOS_2 0x46014 + ++#define PCH_DSPCLK_GATE_D 0x42020 ++# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) ++# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) ++ ++#define PCH_3DCGDIS0 0x46020 ++# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) ++# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) ++ + #define FDI_PLL_FREQ_CTL 0x46030 + #define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) + #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 +@@ -2119,8 +2347,15 @@ + #define GTIIR 0x44018 + #define GTIER 0x4401c + ++#define ILK_DISPLAY_CHICKEN2 0x42004 ++#define ILK_DPARB_GATE (1<<22) ++#define ILK_VSDPFD_FULL (1<<21) ++#define ILK_DSPCLK_GATE 0x42020 ++#define ILK_DPARB_CLK_GATE (1<<5) ++ + #define DISP_ARB_CTL 0x45000 + #define DISP_TILE_SURFACE_SWIZZLING (1<<13) ++#define DISP_FBC_WM_DIS (1<<15) + + /* PCH */ + +@@ -2131,6 +2366,11 @@ + #define SDE_PORTB_HOTPLUG (1 << 8) + #define SDE_SDVOB_HOTPLUG (1 << 6) + #define SDE_HOTPLUG_MASK (0xf << 8) ++/* CPT */ ++#define SDE_CRT_HOTPLUG_CPT (1 << 19) ++#define SDE_PORTD_HOTPLUG_CPT (1 << 23) ++#define SDE_PORTC_HOTPLUG_CPT (1 << 22) ++#define SDE_PORTB_HOTPLUG_CPT (1 << 21) + + #define SDEISR 0xc4000 + #define SDEIMR 0xc4004 +@@ -2222,6 +2462,17 @@ + #define PCH_SSC4_PARMS 0xc6210 + #define PCH_SSC4_AUX_PARMS 0xc6214 + ++#define PCH_DPLL_SEL 0xc7000 ++#define TRANSA_DPLL_ENABLE (1<<3) ++#define TRANSA_DPLLB_SEL (1<<0) ++#define TRANSA_DPLLA_SEL 0 ++#define TRANSB_DPLL_ENABLE (1<<7) ++#define TRANSB_DPLLB_SEL (1<<4) ++#define TRANSB_DPLLA_SEL (0) ++#define TRANSC_DPLL_ENABLE (1<<11) ++#define TRANSC_DPLLB_SEL (1<<8) ++#define TRANSC_DPLLA_SEL (0) ++ + /* transcoder */ + + #define TRANS_HTOTAL_A 0xe0000 +@@ -2308,6 +2559,19 @@ + #define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22) + #define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22) + #define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22) ++/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level. ++ SNB has different settings. */ ++/* SNB A-stepping */ ++#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) ++#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) ++#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) ++#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) ++/* SNB B-stepping */ ++#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) ++#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) ++#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) ++#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) ++#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22) + #define FDI_DP_PORT_WIDTH_X1 (0<<19) + #define FDI_DP_PORT_WIDTH_X2 (1<<19) + #define FDI_DP_PORT_WIDTH_X3 (2<<19) +@@ -2340,6 +2604,13 @@ + #define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) + #define FDI_SEL_RAWCLK (0<<4) + #define FDI_SEL_PCDCLK (1<<4) ++/* CPT */ ++#define FDI_AUTO_TRAINING (1<<10) ++#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) ++#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8) ++#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8) ++#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) ++#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) + + #define FDI_RXA_MISC 0xf0010 + #define FDI_RXB_MISC 0xf1010 +@@ -2411,6 +2682,9 @@ + #define HSYNC_ACTIVE_HIGH (1 << 3) + #define PORT_DETECTED (1 << 2) + ++/* PCH SDVOB multiplex with HDMIB */ ++#define PCH_SDVOB HDMIB ++ + #define HDMIC 0xe1150 + #define HDMID 0xe1160 + +@@ -2468,4 +2742,42 @@ + #define PCH_DPD_AUX_CH_DATA4 0xe4320 + #define PCH_DPD_AUX_CH_DATA5 0xe4324 + ++/* CPT */ ++#define PORT_TRANS_A_SEL_CPT 0 ++#define PORT_TRANS_B_SEL_CPT (1<<29) ++#define PORT_TRANS_C_SEL_CPT (2<<29) ++#define PORT_TRANS_SEL_MASK (3<<29) ++ ++#define TRANS_DP_CTL_A 0xe0300 ++#define TRANS_DP_CTL_B 0xe1300 ++#define TRANS_DP_CTL_C 0xe2300 ++#define TRANS_DP_OUTPUT_ENABLE (1<<31) ++#define TRANS_DP_PORT_SEL_B (0<<29) ++#define TRANS_DP_PORT_SEL_C (1<<29) ++#define TRANS_DP_PORT_SEL_D (2<<29) ++#define TRANS_DP_PORT_SEL_MASK (3<<29) ++#define TRANS_DP_AUDIO_ONLY (1<<26) ++#define TRANS_DP_ENH_FRAMING (1<<18) ++#define TRANS_DP_8BPC (0<<9) ++#define TRANS_DP_10BPC (1<<9) ++#define TRANS_DP_6BPC (2<<9) ++#define TRANS_DP_12BPC (3<<9) ++#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) ++#define TRANS_DP_VSYNC_ACTIVE_LOW 0 ++#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) ++#define TRANS_DP_HSYNC_ACTIVE_LOW 0 ++ ++/* SNB eDP training params */ ++/* SNB A-stepping */ ++#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) ++#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) ++#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) ++#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) ++/* SNB B-stepping */ ++#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) ++#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) ++#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) ++#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) ++#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) ++ + #endif /* _I915_REG_H_ */ +diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c +--- a/drivers/gpu/drm/i915/i915_suspend.c ++++ b/drivers/gpu/drm/i915/i915_suspend.c +@@ -600,14 +600,16 @@ void i915_save_display(struct drm_device *dev) + } + /* FIXME: save TV & SDVO state */ + +- /* FBC state */ +- if (IS_GM45(dev)) { +- dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); +- } else { +- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); +- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); +- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); +- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); ++ /* Only save FBC state on the platform that supports FBC */ ++ if (I915_HAS_FBC(dev)) { ++ if (IS_GM45(dev)) { ++ dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); ++ } else { ++ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); ++ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); ++ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); ++ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); ++ } + } + + /* VGA state */ +@@ -682,6 +684,8 @@ void i915_restore_display(struct drm_device *dev) + I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); + I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); + I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); ++ I915_WRITE(MCHBAR_RENDER_STANDBY, ++ dev_priv->saveMCHBAR_RENDER_STANDBY); + } else { + I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); + I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); +@@ -700,18 +704,19 @@ void i915_restore_display(struct drm_device *dev) + } + /* FIXME: restore TV & SDVO state */ + +- /* FBC info */ +- if (IS_GM45(dev)) { +- g4x_disable_fbc(dev); +- I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); +- } else { +- i8xx_disable_fbc(dev); +- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); +- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); +- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); +- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); ++ /* only restore FBC info on the platform that supports FBC*/ ++ if (I915_HAS_FBC(dev)) { ++ if (IS_GM45(dev)) { ++ g4x_disable_fbc(dev); ++ I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); ++ } else { ++ i8xx_disable_fbc(dev); ++ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); ++ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); ++ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); ++ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); ++ } + } +- + /* VGA state */ + if (IS_IRONLAKE(dev)) + I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); +@@ -745,11 +750,16 @@ int i915_save_state(struct drm_device *dev) + dev_priv->saveGTIMR = I915_READ(GTIMR); + dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); + dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); ++ dev_priv->saveMCHBAR_RENDER_STANDBY = ++ I915_READ(MCHBAR_RENDER_STANDBY); + } else { + dev_priv->saveIER = I915_READ(IER); + dev_priv->saveIMR = I915_READ(IMR); + } + ++ if (IS_IRONLAKE_M(dev)) ++ ironlake_disable_drps(dev); ++ + /* Cache mode state */ + dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); + +@@ -820,6 +830,9 @@ int i915_restore_state(struct drm_device *dev) + /* Clock gating state */ + intel_init_clock_gating(dev); + ++ if (IS_IRONLAKE_M(dev)) ++ ironlake_enable_drps(dev); ++ + /* Cache mode state */ + I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); + +diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h +--- a/drivers/gpu/drm/i915/i915_trace.h ++++ b/drivers/gpu/drm/i915/i915_trace.h +@@ -115,7 +115,7 @@ TRACE_EVENT(i915_gem_object_get_fence, + __entry->obj, __entry->fence, __entry->tiling_mode) + ); + +-TRACE_EVENT(i915_gem_object_unbind, ++DECLARE_EVENT_CLASS(i915_gem_object, + + TP_PROTO(struct drm_gem_object *obj), + +@@ -132,21 +132,18 @@ TRACE_EVENT(i915_gem_object_unbind, + TP_printk("obj=%p", __entry->obj) + ); + +-TRACE_EVENT(i915_gem_object_destroy, ++DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, + + TP_PROTO(struct drm_gem_object *obj), + +- TP_ARGS(obj), ++ TP_ARGS(obj) ++); + +- TP_STRUCT__entry( +- __field(struct drm_gem_object *, obj) +- ), ++DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, + +- TP_fast_assign( +- __entry->obj = obj; +- ), ++ TP_PROTO(struct drm_gem_object *obj), + +- TP_printk("obj=%p", __entry->obj) ++ TP_ARGS(obj) + ); + + /* batch tracing */ +@@ -197,8 +194,7 @@ TRACE_EVENT(i915_gem_request_flush, + __entry->flush_domains, __entry->invalidate_domains) + ); + +- +-TRACE_EVENT(i915_gem_request_complete, ++DECLARE_EVENT_CLASS(i915_gem_request, + + TP_PROTO(struct drm_device *dev, u32 seqno), + +@@ -217,64 +213,35 @@ TRACE_EVENT(i915_gem_request_complete, + TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) + ); + +-TRACE_EVENT(i915_gem_request_retire, ++DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, + + TP_PROTO(struct drm_device *dev, u32 seqno), + +- TP_ARGS(dev, seqno), +- +- TP_STRUCT__entry( +- __field(u32, dev) +- __field(u32, seqno) +- ), +- +- TP_fast_assign( +- __entry->dev = dev->primary->index; +- __entry->seqno = seqno; +- ), +- +- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) ++ TP_ARGS(dev, seqno) + ); + +-TRACE_EVENT(i915_gem_request_wait_begin, ++DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, + + TP_PROTO(struct drm_device *dev, u32 seqno), + +- TP_ARGS(dev, seqno), +- +- TP_STRUCT__entry( +- __field(u32, dev) +- __field(u32, seqno) +- ), +- +- TP_fast_assign( +- __entry->dev = dev->primary->index; +- __entry->seqno = seqno; +- ), +- +- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) ++ TP_ARGS(dev, seqno) + ); + +-TRACE_EVENT(i915_gem_request_wait_end, ++DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, + + TP_PROTO(struct drm_device *dev, u32 seqno), + +- TP_ARGS(dev, seqno), ++ TP_ARGS(dev, seqno) ++); + +- TP_STRUCT__entry( +- __field(u32, dev) +- __field(u32, seqno) +- ), ++DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, + +- TP_fast_assign( +- __entry->dev = dev->primary->index; +- __entry->seqno = seqno; +- ), ++ TP_PROTO(struct drm_device *dev, u32 seqno), + +- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) ++ TP_ARGS(dev, seqno) + ); + +-TRACE_EVENT(i915_ring_wait_begin, ++DECLARE_EVENT_CLASS(i915_ring, + + TP_PROTO(struct drm_device *dev), + +@@ -291,21 +258,18 @@ TRACE_EVENT(i915_ring_wait_begin, + TP_printk("dev=%u", __entry->dev) + ); + +-TRACE_EVENT(i915_ring_wait_end, ++DEFINE_EVENT(i915_ring, i915_ring_wait_begin, + + TP_PROTO(struct drm_device *dev), + +- TP_ARGS(dev), ++ TP_ARGS(dev) ++); + +- TP_STRUCT__entry( +- __field(u32, dev) +- ), ++DEFINE_EVENT(i915_ring, i915_ring_wait_end, + +- TP_fast_assign( +- __entry->dev = dev->primary->index; +- ), ++ TP_PROTO(struct drm_device *dev), + +- TP_printk("dev=%u", __entry->dev) ++ TP_ARGS(dev) + ); + + #endif /* _I915_TRACE_H_ */ +diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c +--- a/drivers/gpu/drm/i915/intel_bios.c ++++ b/drivers/gpu/drm/i915/intel_bios.c +@@ -247,6 +247,7 @@ static void + parse_general_features(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) + { ++ struct drm_device *dev = dev_priv->dev; + struct bdb_general_features *general; + + /* Set sensible defaults in case we can't find the general block */ +@@ -263,7 +264,7 @@ parse_general_features(struct drm_i915_private *dev_priv, + if (IS_I85X(dev_priv->dev)) + dev_priv->lvds_ssc_freq = + general->ssc_freq ? 66 : 48; +- else if (IS_IRONLAKE(dev_priv->dev)) ++ else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev)) + dev_priv->lvds_ssc_freq = + general->ssc_freq ? 100 : 120; + else +@@ -416,8 +417,9 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) + edp = find_section(bdb, BDB_EDP); + if (!edp) { + if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) { +- DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported,\ +- assume 18bpp panel color depth.\n"); ++ DRM_DEBUG_KMS("No eDP BDB found but eDP panel " ++ "supported, assume 18bpp panel color " ++ "depth.\n"); + dev_priv->edp_bpp = 18; + } + return; +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c +--- a/drivers/gpu/drm/i915/intel_crt.c ++++ b/drivers/gpu/drm/i915/intel_crt.c +@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) + struct drm_i915_private *dev_priv = dev->dev_private; + u32 temp, reg; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + reg = PCH_ADPA; + else + reg = ADPA; +@@ -113,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, + else + dpll_md_reg = DPLL_B_MD; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + adpa_reg = PCH_ADPA; + else + adpa_reg = ADPA; +@@ -122,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, + * Disable separate mode multiplier used when cloning SDVO to CRT + * XXX this needs to be adjusted when we really are cloning + */ +- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { ++ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + dpll_md = I915_READ(dpll_md_reg); + I915_WRITE(dpll_md_reg, + dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); +@@ -135,12 +135,18 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, + adpa |= ADPA_VSYNC_ACTIVE_HIGH; + + if (intel_crtc->pipe == 0) { +- adpa |= ADPA_PIPE_A_SELECT; +- if (!IS_IRONLAKE(dev)) ++ if (HAS_PCH_CPT(dev)) ++ adpa |= PORT_TRANS_A_SEL_CPT; ++ else ++ adpa |= ADPA_PIPE_A_SELECT; ++ if (!HAS_PCH_SPLIT(dev)) + I915_WRITE(BCLRPAT_A, 0); + } else { +- adpa |= ADPA_PIPE_B_SELECT; +- if (!IS_IRONLAKE(dev)) ++ if (HAS_PCH_CPT(dev)) ++ adpa |= PORT_TRANS_B_SEL_CPT; ++ else ++ adpa |= ADPA_PIPE_B_SELECT; ++ if (!HAS_PCH_SPLIT(dev)) + I915_WRITE(BCLRPAT_B, 0); + } + +@@ -151,15 +157,21 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- u32 adpa; ++ u32 adpa, temp; + bool ret; + +- adpa = I915_READ(PCH_ADPA); ++ temp = adpa = I915_READ(PCH_ADPA); + +- adpa &= ~ADPA_CRT_HOTPLUG_MASK; +- /* disable HPD first */ +- I915_WRITE(PCH_ADPA, adpa); +- (void)I915_READ(PCH_ADPA); ++ if (HAS_PCH_CPT(dev)) { ++ /* Disable DAC before force detect */ ++ I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE); ++ (void)I915_READ(PCH_ADPA); ++ } else { ++ adpa &= ~ADPA_CRT_HOTPLUG_MASK; ++ /* disable HPD first */ ++ I915_WRITE(PCH_ADPA, adpa); ++ (void)I915_READ(PCH_ADPA); ++ } + + adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | + ADPA_CRT_HOTPLUG_WARMUP_10MS | +@@ -175,6 +187,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) + while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) + ; + ++ if (HAS_PCH_CPT(dev)) { ++ I915_WRITE(PCH_ADPA, temp); ++ (void)I915_READ(PCH_ADPA); ++ } ++ + /* Check the status to see if both blue and green are on now */ + adpa = I915_READ(PCH_ADPA); + adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; +@@ -202,7 +219,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) + u32 hotplug_en; + int i, tries = 0; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + return intel_ironlake_crt_detect_hotplug(connector); + + /* +@@ -244,21 +261,21 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) + return false; + } + +-static bool intel_crt_detect_ddc(struct drm_connector *connector) ++static bool intel_crt_detect_ddc(struct drm_encoder *encoder) + { +- struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + + /* CRT should always be at 0, but check anyway */ +- if (intel_output->type != INTEL_OUTPUT_ANALOG) ++ if (intel_encoder->type != INTEL_OUTPUT_ANALOG) + return false; + +- return intel_ddc_probe(intel_output); ++ return intel_ddc_probe(intel_encoder); + } + + static enum drm_connector_status +-intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) ++intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) + { +- struct drm_encoder *encoder = &intel_output->enc; ++ struct drm_encoder *encoder = &intel_encoder->enc; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); +@@ -386,8 +403,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) + static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; +- struct intel_output *intel_output = to_intel_output(connector); +- struct drm_encoder *encoder = &intel_output->enc; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct drm_crtc *crtc; + int dpms_mode; + enum drm_connector_status status; +@@ -399,18 +416,19 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto + return connector_status_disconnected; + } + +- if (intel_crt_detect_ddc(connector)) ++ if (intel_crt_detect_ddc(encoder)) + return connector_status_connected; + + /* for pre-945g platforms use load detect */ + if (encoder->crtc && encoder->crtc->enabled) { +- status = intel_crt_load_detect(encoder->crtc, intel_output); ++ status = intel_crt_load_detect(encoder->crtc, intel_encoder); + } else { +- crtc = intel_get_load_detect_pipe(intel_output, ++ crtc = intel_get_load_detect_pipe(intel_encoder, connector, + NULL, &dpms_mode); + if (crtc) { +- status = intel_crt_load_detect(crtc, intel_output); +- intel_release_load_detect_pipe(intel_output, dpms_mode); ++ status = intel_crt_load_detect(crtc, intel_encoder); ++ intel_release_load_detect_pipe(intel_encoder, ++ connector, dpms_mode); + } else + status = connector_status_unknown; + } +@@ -420,9 +438,6 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto + + static void intel_crt_destroy(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- +- intel_i2c_destroy(intel_output->ddc_bus); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); +@@ -431,29 +446,27 @@ static void intel_crt_destroy(struct drm_connector *connector) + static int intel_crt_get_modes(struct drm_connector *connector) + { + int ret; +- struct intel_output *intel_output = to_intel_output(connector); +- struct i2c_adapter *ddcbus; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct i2c_adapter *ddc_bus; + struct drm_device *dev = connector->dev; + + +- ret = intel_ddc_get_modes(intel_output); ++ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + if (ret || !IS_G4X(dev)) + goto end; + +- ddcbus = intel_output->ddc_bus; + /* Try to probe digital port for output in DVI-I -> VGA mode. */ +- intel_output->ddc_bus = +- intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); ++ ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); + +- if (!intel_output->ddc_bus) { +- intel_output->ddc_bus = ddcbus; ++ if (!ddc_bus) { + dev_printk(KERN_ERR, &connector->dev->pdev->dev, + "DDC bus registration failed for CRTDDC_D.\n"); + goto end; + } + /* Try to get modes by GPIOD port */ +- ret = intel_ddc_get_modes(intel_output); +- intel_i2c_destroy(ddcbus); ++ ret = intel_ddc_get_modes(connector, ddc_bus); ++ intel_i2c_destroy(ddc_bus); + + end: + return ret; +@@ -490,12 +503,16 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { + static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { + .mode_valid = intel_crt_mode_valid, + .get_modes = intel_crt_get_modes, +- .best_encoder = intel_best_encoder, ++ .best_encoder = intel_attached_encoder, + }; + + static void intel_crt_enc_destroy(struct drm_encoder *encoder) + { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ ++ intel_i2c_destroy(intel_encoder->ddc_bus); + drm_encoder_cleanup(encoder); ++ kfree(intel_encoder); + } + + static const struct drm_encoder_funcs intel_crt_enc_funcs = { +@@ -505,26 +522,33 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { + void intel_crt_init(struct drm_device *dev) + { + struct drm_connector *connector; +- struct intel_output *intel_output; ++ struct intel_encoder *intel_encoder; ++ struct intel_connector *intel_connector; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 i2c_reg; + +- intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); +- if (!intel_output) ++ intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); ++ if (!intel_encoder) + return; + +- connector = &intel_output->base; +- drm_connector_init(dev, &intel_output->base, ++ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); ++ if (!intel_connector) { ++ kfree(intel_encoder); ++ return; ++ } ++ ++ connector = &intel_connector->base; ++ drm_connector_init(dev, &intel_connector->base, + &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); + +- drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, ++ drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, + DRM_MODE_ENCODER_DAC); + +- drm_mode_connector_attach_encoder(&intel_output->base, +- &intel_output->enc); ++ drm_mode_connector_attach_encoder(&intel_connector->base, ++ &intel_encoder->enc); + + /* Set up the DDC bus. */ +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + i2c_reg = PCH_GPIOA; + else { + i2c_reg = GPIOA; +@@ -532,22 +556,22 @@ void intel_crt_init(struct drm_device *dev) + if (dev_priv->crt_ddc_bus != 0) + i2c_reg = dev_priv->crt_ddc_bus; + } +- intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); +- if (!intel_output->ddc_bus) { ++ intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); ++ if (!intel_encoder->ddc_bus) { + dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " + "failed.\n"); + return; + } + +- intel_output->type = INTEL_OUTPUT_ANALOG; +- intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | ++ intel_encoder->type = INTEL_OUTPUT_ANALOG; ++ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT) | + (1 << INTEL_SDVO_LVDS_CLONE_BIT); +- intel_output->crtc_mask = (1 << 0) | (1 << 1); ++ intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + +- drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); ++ drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); + drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); + + drm_sysfs_connector_add(connector); +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -232,7 +232,7 @@ struct intel_limit { + #define G4X_P2_DISPLAY_PORT_FAST 10 + #define G4X_P2_DISPLAY_PORT_LIMIT 0 + +-/* Ironlake */ ++/* Ironlake / Sandybridge */ + /* as we calculate clock using (register_value + 2) for + N/M1/M2, so here the range value for them is (actual_value-2). + */ +@@ -690,7 +690,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) + struct drm_device *dev = crtc->dev; + const intel_limit_t *limit; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + limit = intel_ironlake_limit(crtc); + else if (IS_G4X(dev)) { + limit = intel_g4x_limit(crtc); +@@ -741,36 +741,18 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) + { + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; +- struct drm_connector *l_entry; ++ struct drm_encoder *l_entry; + +- list_for_each_entry(l_entry, &mode_config->connector_list, head) { +- if (l_entry->encoder && +- l_entry->encoder->crtc == crtc) { +- struct intel_output *intel_output = to_intel_output(l_entry); +- if (intel_output->type == type) ++ list_for_each_entry(l_entry, &mode_config->encoder_list, head) { ++ if (l_entry && l_entry->crtc == crtc) { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry); ++ if (intel_encoder->type == type) + return true; + } + } + return false; + } + +-struct drm_connector * +-intel_pipe_get_output (struct drm_crtc *crtc) +-{ +- struct drm_device *dev = crtc->dev; +- struct drm_mode_config *mode_config = &dev->mode_config; +- struct drm_connector *l_entry, *ret = NULL; +- +- list_for_each_entry(l_entry, &mode_config->connector_list, head) { +- if (l_entry->encoder && +- l_entry->encoder->crtc == crtc) { +- ret = l_entry; +- break; +- } +- } +- return ret; +-} +- + #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) + /** + * Returns whether the given set of divisors are valid for a given refclk with +@@ -886,7 +868,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + int lvds_reg; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + lvds_reg = PCH_LVDS; + else + lvds_reg = LVDS; +@@ -1002,7 +984,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); +- struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int plane, i; + u32 fbc_ctl, fbc_ctl2; +@@ -1032,7 +1014,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) + /* enable it... */ + fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; + if (IS_I945GM(dev)) +- fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */ ++ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ + fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; + fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; + if (obj_priv->tiling_mode != I915_TILING_NONE) +@@ -1079,7 +1061,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); +- struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : + DPFC_CTL_PLANEB); +@@ -1175,7 +1157,7 @@ static void intel_update_fbc(struct drm_crtc *crtc, + return; + + intel_fb = to_intel_framebuffer(fb); +- obj_priv = intel_fb->obj->driver_private; ++ obj_priv = to_intel_bo(intel_fb->obj); + + /* + * If FBC is already on, we just have to verify that we can +@@ -1188,25 +1170,30 @@ static void intel_update_fbc(struct drm_crtc *crtc, + if (intel_fb->obj->size > dev_priv->cfb_size) { + DRM_DEBUG_KMS("framebuffer too large, disabling " + "compression\n"); ++ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; + goto out_disable; + } + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || + (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { + DRM_DEBUG_KMS("mode incompatible with compression, " + "disabling\n"); ++ dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; + goto out_disable; + } + if ((mode->hdisplay > 2048) || + (mode->vdisplay > 1536)) { + DRM_DEBUG_KMS("mode too large for compression, disabling\n"); ++ dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; + goto out_disable; + } + if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { + DRM_DEBUG_KMS("plane not 0, disabling compression\n"); ++ dev_priv->no_fbc_reason = FBC_BAD_PLANE; + goto out_disable; + } + if (obj_priv->tiling_mode != I915_TILING_X) { + DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); ++ dev_priv->no_fbc_reason = FBC_NOT_TILED; + goto out_disable; + } + +@@ -1237,7 +1224,7 @@ out_disable: + static int + intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) + { +- struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + u32 alignment; + int ret; + +@@ -1317,7 +1304,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, + + intel_fb = to_intel_framebuffer(crtc->fb); + obj = intel_fb->obj; +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + + mutex_lock(&dev->struct_mutex); + ret = intel_pin_and_fence_fb_obj(dev, obj); +@@ -1366,7 +1353,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, + dspcntr &= ~DISPPLANE_TILED; + } + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + /* must disable */ + dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; + +@@ -1395,7 +1382,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, + + if (old_fb) { + intel_fb = to_intel_framebuffer(old_fb); +- obj_priv = intel_fb->obj->driver_private; ++ obj_priv = to_intel_bo(intel_fb->obj); + i915_gem_object_unpin(intel_fb->obj); + } + intel_increase_pllclock(crtc, true); +@@ -1427,7 +1414,7 @@ static void i915_disable_vga (struct drm_device *dev) + u8 sr1; + u32 vga_reg; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + vga_reg = CPU_VGACNTRL; + else + vga_reg = VGACNTRL; +@@ -1504,6 +1491,219 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) + udelay(500); + } + ++/* The FDI link training functions for ILK/Ibexpeak. */ ++static void ironlake_fdi_link_train(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ int pipe = intel_crtc->pipe; ++ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; ++ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; ++ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; ++ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; ++ u32 temp, tries = 0; ++ ++ /* enable CPU FDI TX and PCH FDI RX */ ++ temp = I915_READ(fdi_tx_reg); ++ temp |= FDI_TX_ENABLE; ++ temp &= ~(7 << 19); ++ temp |= (intel_crtc->fdi_lanes - 1) << 19; ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_1; ++ I915_WRITE(fdi_tx_reg, temp); ++ I915_READ(fdi_tx_reg); ++ ++ temp = I915_READ(fdi_rx_reg); ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_1; ++ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); ++ I915_READ(fdi_rx_reg); ++ udelay(150); ++ ++ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit ++ for train result */ ++ temp = I915_READ(fdi_rx_imr_reg); ++ temp &= ~FDI_RX_SYMBOL_LOCK; ++ temp &= ~FDI_RX_BIT_LOCK; ++ I915_WRITE(fdi_rx_imr_reg, temp); ++ I915_READ(fdi_rx_imr_reg); ++ udelay(150); ++ ++ for (;;) { ++ temp = I915_READ(fdi_rx_iir_reg); ++ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); ++ ++ if ((temp & FDI_RX_BIT_LOCK)) { ++ DRM_DEBUG_KMS("FDI train 1 done.\n"); ++ I915_WRITE(fdi_rx_iir_reg, ++ temp | FDI_RX_BIT_LOCK); ++ break; ++ } ++ ++ tries++; ++ ++ if (tries > 5) { ++ DRM_DEBUG_KMS("FDI train 1 fail!\n"); ++ break; ++ } ++ } ++ ++ /* Train 2 */ ++ temp = I915_READ(fdi_tx_reg); ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_2; ++ I915_WRITE(fdi_tx_reg, temp); ++ ++ temp = I915_READ(fdi_rx_reg); ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_2; ++ I915_WRITE(fdi_rx_reg, temp); ++ udelay(150); ++ ++ tries = 0; ++ ++ for (;;) { ++ temp = I915_READ(fdi_rx_iir_reg); ++ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); ++ ++ if (temp & FDI_RX_SYMBOL_LOCK) { ++ I915_WRITE(fdi_rx_iir_reg, ++ temp | FDI_RX_SYMBOL_LOCK); ++ DRM_DEBUG_KMS("FDI train 2 done.\n"); ++ break; ++ } ++ ++ tries++; ++ ++ if (tries > 5) { ++ DRM_DEBUG_KMS("FDI train 2 fail!\n"); ++ break; ++ } ++ } ++ ++ DRM_DEBUG_KMS("FDI train done\n"); ++} ++ ++static int snb_b_fdi_train_param [] = { ++ FDI_LINK_TRAIN_400MV_0DB_SNB_B, ++ FDI_LINK_TRAIN_400MV_6DB_SNB_B, ++ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, ++ FDI_LINK_TRAIN_800MV_0DB_SNB_B, ++}; ++ ++/* The FDI link training functions for SNB/Cougarpoint. */ ++static void gen6_fdi_link_train(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ++ int pipe = intel_crtc->pipe; ++ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; ++ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; ++ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; ++ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; ++ u32 temp, i; ++ ++ /* enable CPU FDI TX and PCH FDI RX */ ++ temp = I915_READ(fdi_tx_reg); ++ temp |= FDI_TX_ENABLE; ++ temp &= ~(7 << 19); ++ temp |= (intel_crtc->fdi_lanes - 1) << 19; ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_1; ++ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; ++ /* SNB-B */ ++ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; ++ I915_WRITE(fdi_tx_reg, temp); ++ I915_READ(fdi_tx_reg); ++ ++ temp = I915_READ(fdi_rx_reg); ++ if (HAS_PCH_CPT(dev)) { ++ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; ++ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; ++ } else { ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_1; ++ } ++ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); ++ I915_READ(fdi_rx_reg); ++ udelay(150); ++ ++ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit ++ for train result */ ++ temp = I915_READ(fdi_rx_imr_reg); ++ temp &= ~FDI_RX_SYMBOL_LOCK; ++ temp &= ~FDI_RX_BIT_LOCK; ++ I915_WRITE(fdi_rx_imr_reg, temp); ++ I915_READ(fdi_rx_imr_reg); ++ udelay(150); ++ ++ for (i = 0; i < 4; i++ ) { ++ temp = I915_READ(fdi_tx_reg); ++ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; ++ temp |= snb_b_fdi_train_param[i]; ++ I915_WRITE(fdi_tx_reg, temp); ++ udelay(500); ++ ++ temp = I915_READ(fdi_rx_iir_reg); ++ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); ++ ++ if (temp & FDI_RX_BIT_LOCK) { ++ I915_WRITE(fdi_rx_iir_reg, ++ temp | FDI_RX_BIT_LOCK); ++ DRM_DEBUG_KMS("FDI train 1 done.\n"); ++ break; ++ } ++ } ++ if (i == 4) ++ DRM_DEBUG_KMS("FDI train 1 fail!\n"); ++ ++ /* Train 2 */ ++ temp = I915_READ(fdi_tx_reg); ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_2; ++ if (IS_GEN6(dev)) { ++ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; ++ /* SNB-B */ ++ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; ++ } ++ I915_WRITE(fdi_tx_reg, temp); ++ ++ temp = I915_READ(fdi_rx_reg); ++ if (HAS_PCH_CPT(dev)) { ++ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; ++ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; ++ } else { ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_2; ++ } ++ I915_WRITE(fdi_rx_reg, temp); ++ udelay(150); ++ ++ for (i = 0; i < 4; i++ ) { ++ temp = I915_READ(fdi_tx_reg); ++ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; ++ temp |= snb_b_fdi_train_param[i]; ++ I915_WRITE(fdi_tx_reg, temp); ++ udelay(500); ++ ++ temp = I915_READ(fdi_rx_iir_reg); ++ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); ++ ++ if (temp & FDI_RX_SYMBOL_LOCK) { ++ I915_WRITE(fdi_rx_iir_reg, ++ temp | FDI_RX_SYMBOL_LOCK); ++ DRM_DEBUG_KMS("FDI train 2 done.\n"); ++ break; ++ } ++ } ++ if (i == 4) ++ DRM_DEBUG_KMS("FDI train 2 fail!\n"); ++ ++ DRM_DEBUG_KMS("FDI train done.\n"); ++} ++ + static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + { + struct drm_device *dev = crtc->dev; +@@ -1517,8 +1717,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; + int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; + int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; +- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; +- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; + int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; + int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; + int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; +@@ -1535,8 +1733,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B; + int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; + int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; ++ int trans_dpll_sel = (pipe == 0) ? 0 : 1; + u32 temp; +- int tries = 5, j, n; ++ int n; + u32 pipe_bpc; + + temp = I915_READ(pipeconf_reg); +@@ -1563,12 +1762,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + /* enable eDP PLL */ + ironlake_enable_pll_edp(crtc); + } else { +- /* enable PCH DPLL */ +- temp = I915_READ(pch_dpll_reg); +- if ((temp & DPLL_VCO_ENABLE) == 0) { +- I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); +- I915_READ(pch_dpll_reg); +- } + + /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ + temp = I915_READ(fdi_rx_reg); +@@ -1578,9 +1771,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + */ + temp &= ~(0x7 << 16); + temp |= (pipe_bpc << 11); +- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | +- FDI_SEL_PCDCLK | +- FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ ++ temp &= ~(7 << 19); ++ temp |= (intel_crtc->fdi_lanes - 1) << 19; ++ I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); ++ I915_READ(fdi_rx_reg); ++ udelay(200); ++ ++ /* Switch from Rawclk to PCDclk */ ++ temp = I915_READ(fdi_rx_reg); ++ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); + I915_READ(fdi_rx_reg); + udelay(200); + +@@ -1623,91 +1822,32 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + } + + if (!HAS_eDP) { +- /* enable CPU FDI TX and PCH FDI RX */ +- temp = I915_READ(fdi_tx_reg); +- temp |= FDI_TX_ENABLE; +- temp |= FDI_DP_PORT_WIDTH_X4; /* default */ +- temp &= ~FDI_LINK_TRAIN_NONE; +- temp |= FDI_LINK_TRAIN_PATTERN_1; +- I915_WRITE(fdi_tx_reg, temp); +- I915_READ(fdi_tx_reg); +- +- temp = I915_READ(fdi_rx_reg); +- temp &= ~FDI_LINK_TRAIN_NONE; +- temp |= FDI_LINK_TRAIN_PATTERN_1; +- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); +- I915_READ(fdi_rx_reg); +- +- udelay(150); +- +- /* Train FDI. */ +- /* umask FDI RX Interrupt symbol_lock and bit_lock bit +- for train result */ +- temp = I915_READ(fdi_rx_imr_reg); +- temp &= ~FDI_RX_SYMBOL_LOCK; +- temp &= ~FDI_RX_BIT_LOCK; +- I915_WRITE(fdi_rx_imr_reg, temp); +- I915_READ(fdi_rx_imr_reg); +- udelay(150); ++ /* For PCH output, training FDI link */ ++ if (IS_GEN6(dev)) ++ gen6_fdi_link_train(crtc); ++ else ++ ironlake_fdi_link_train(crtc); + +- temp = I915_READ(fdi_rx_iir_reg); +- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); +- +- if ((temp & FDI_RX_BIT_LOCK) == 0) { +- for (j = 0; j < tries; j++) { +- temp = I915_READ(fdi_rx_iir_reg); +- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", +- temp); +- if (temp & FDI_RX_BIT_LOCK) +- break; +- udelay(200); +- } +- if (j != tries) +- I915_WRITE(fdi_rx_iir_reg, +- temp | FDI_RX_BIT_LOCK); +- else +- DRM_DEBUG_KMS("train 1 fail\n"); +- } else { +- I915_WRITE(fdi_rx_iir_reg, +- temp | FDI_RX_BIT_LOCK); +- DRM_DEBUG_KMS("train 1 ok 2!\n"); ++ /* enable PCH DPLL */ ++ temp = I915_READ(pch_dpll_reg); ++ if ((temp & DPLL_VCO_ENABLE) == 0) { ++ I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); ++ I915_READ(pch_dpll_reg); + } +- temp = I915_READ(fdi_tx_reg); +- temp &= ~FDI_LINK_TRAIN_NONE; +- temp |= FDI_LINK_TRAIN_PATTERN_2; +- I915_WRITE(fdi_tx_reg, temp); +- +- temp = I915_READ(fdi_rx_reg); +- temp &= ~FDI_LINK_TRAIN_NONE; +- temp |= FDI_LINK_TRAIN_PATTERN_2; +- I915_WRITE(fdi_rx_reg, temp); +- +- udelay(150); ++ udelay(200); + +- temp = I915_READ(fdi_rx_iir_reg); +- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); +- +- if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { +- for (j = 0; j < tries; j++) { +- temp = I915_READ(fdi_rx_iir_reg); +- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", +- temp); +- if (temp & FDI_RX_SYMBOL_LOCK) +- break; +- udelay(200); +- } +- if (j != tries) { +- I915_WRITE(fdi_rx_iir_reg, +- temp | FDI_RX_SYMBOL_LOCK); +- DRM_DEBUG_KMS("train 2 ok 1!\n"); +- } else +- DRM_DEBUG_KMS("train 2 fail\n"); +- } else { +- I915_WRITE(fdi_rx_iir_reg, +- temp | FDI_RX_SYMBOL_LOCK); +- DRM_DEBUG_KMS("train 2 ok 2!\n"); ++ if (HAS_PCH_CPT(dev)) { ++ /* Be sure PCH DPLL SEL is set */ ++ temp = I915_READ(PCH_DPLL_SEL); ++ if (trans_dpll_sel == 0 && ++ (temp & TRANSA_DPLL_ENABLE) == 0) ++ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); ++ else if (trans_dpll_sel == 1 && ++ (temp & TRANSB_DPLL_ENABLE) == 0) ++ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); ++ I915_WRITE(PCH_DPLL_SEL, temp); ++ I915_READ(PCH_DPLL_SEL); + } +- DRM_DEBUG_KMS("train done\n"); + + /* set transcoder timing */ + I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); +@@ -1718,6 +1858,60 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); + I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); + ++ /* enable normal train */ ++ temp = I915_READ(fdi_tx_reg); ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | ++ FDI_TX_ENHANCE_FRAME_ENABLE); ++ I915_READ(fdi_tx_reg); ++ ++ temp = I915_READ(fdi_rx_reg); ++ if (HAS_PCH_CPT(dev)) { ++ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; ++ temp |= FDI_LINK_TRAIN_NORMAL_CPT; ++ } else { ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_NONE; ++ } ++ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); ++ I915_READ(fdi_rx_reg); ++ ++ /* wait one idle pattern time */ ++ udelay(100); ++ ++ /* For PCH DP, enable TRANS_DP_CTL */ ++ if (HAS_PCH_CPT(dev) && ++ intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { ++ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; ++ int reg; ++ ++ reg = I915_READ(trans_dp_ctl); ++ reg &= ~TRANS_DP_PORT_SEL_MASK; ++ reg = TRANS_DP_OUTPUT_ENABLE | ++ TRANS_DP_ENH_FRAMING | ++ TRANS_DP_VSYNC_ACTIVE_HIGH | ++ TRANS_DP_HSYNC_ACTIVE_HIGH; ++ ++ switch (intel_trans_dp_port_sel(crtc)) { ++ case PCH_DP_B: ++ reg |= TRANS_DP_PORT_SEL_B; ++ break; ++ case PCH_DP_C: ++ reg |= TRANS_DP_PORT_SEL_C; ++ break; ++ case PCH_DP_D: ++ reg |= TRANS_DP_PORT_SEL_D; ++ break; ++ default: ++ DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); ++ reg |= TRANS_DP_PORT_SEL_B; ++ break; ++ } ++ ++ I915_WRITE(trans_dp_ctl, reg); ++ POSTING_READ(trans_dp_ctl); ++ } ++ + /* enable PCH transcoder */ + temp = I915_READ(transconf_reg); + /* +@@ -1732,23 +1926,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) + ; + +- /* enable normal */ +- +- temp = I915_READ(fdi_tx_reg); +- temp &= ~FDI_LINK_TRAIN_NONE; +- I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | +- FDI_TX_ENHANCE_FRAME_ENABLE); +- I915_READ(fdi_tx_reg); +- +- temp = I915_READ(fdi_rx_reg); +- temp &= ~FDI_LINK_TRAIN_NONE; +- I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | +- FDI_RX_ENHANCE_FRAME_ENABLE); +- I915_READ(fdi_rx_reg); +- +- /* wait one idle pattern time */ +- udelay(100); +- + } + + intel_crtc_load_lut(crtc); +@@ -1799,6 +1976,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + I915_READ(pf_ctl_reg); + } + I915_WRITE(pf_win_size, 0); ++ POSTING_READ(pf_win_size); ++ + + /* disable CPU FDI tx and PCH FDI rx */ + temp = I915_READ(fdi_tx_reg); +@@ -1819,11 +1998,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(fdi_tx_reg, temp); ++ POSTING_READ(fdi_tx_reg); + + temp = I915_READ(fdi_rx_reg); +- temp &= ~FDI_LINK_TRAIN_NONE; +- temp |= FDI_LINK_TRAIN_PATTERN_1; ++ if (HAS_PCH_CPT(dev)) { ++ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; ++ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; ++ } else { ++ temp &= ~FDI_LINK_TRAIN_NONE; ++ temp |= FDI_LINK_TRAIN_PATTERN_1; ++ } + I915_WRITE(fdi_rx_reg, temp); ++ POSTING_READ(fdi_rx_reg); + + udelay(100); + +@@ -1853,6 +2039,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + } + } + } ++ + temp = I915_READ(transconf_reg); + /* BPC in transcoder is consistent with that in pipeconf */ + temp &= ~PIPE_BPC_MASK; +@@ -1861,35 +2048,53 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) + I915_READ(transconf_reg); + udelay(100); + ++ if (HAS_PCH_CPT(dev)) { ++ /* disable TRANS_DP_CTL */ ++ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; ++ int reg; ++ ++ reg = I915_READ(trans_dp_ctl); ++ reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); ++ I915_WRITE(trans_dp_ctl, reg); ++ POSTING_READ(trans_dp_ctl); ++ ++ /* disable DPLL_SEL */ ++ temp = I915_READ(PCH_DPLL_SEL); ++ if (trans_dpll_sel == 0) ++ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); ++ else ++ temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); ++ I915_WRITE(PCH_DPLL_SEL, temp); ++ I915_READ(PCH_DPLL_SEL); ++ ++ } ++ + /* disable PCH DPLL */ + temp = I915_READ(pch_dpll_reg); +- if ((temp & DPLL_VCO_ENABLE) != 0) { +- I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); +- I915_READ(pch_dpll_reg); +- } ++ I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); ++ I915_READ(pch_dpll_reg); + + if (HAS_eDP) { + ironlake_disable_pll_edp(crtc); + } + ++ /* Switch from PCDclk to Rawclk */ + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_SEL_PCDCLK; + I915_WRITE(fdi_rx_reg, temp); + I915_READ(fdi_rx_reg); + ++ /* Disable CPU FDI TX PLL */ ++ temp = I915_READ(fdi_tx_reg); ++ I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); ++ I915_READ(fdi_tx_reg); ++ udelay(100); ++ + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_RX_PLL_ENABLE; + I915_WRITE(fdi_rx_reg, temp); + I915_READ(fdi_rx_reg); + +- /* Disable CPU FDI TX PLL */ +- temp = I915_READ(fdi_tx_reg); +- if ((temp & FDI_TX_PLL_ENABLE) != 0) { +- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); +- I915_READ(fdi_tx_reg); +- udelay(100); +- } +- + /* Wait for the clocks to turn off. */ + udelay(100); + break; +@@ -2111,7 +2316,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, + struct drm_display_mode *adjusted_mode) + { + struct drm_device *dev = crtc->dev; +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + /* FDI link clock is fixed at 2.7G */ + if (mode->clock * 3 > 27000 * 4) + return MODE_CLOCK_HIGH; +@@ -2325,6 +2530,30 @@ static struct intel_watermark_params i830_wm_info = { + I830_FIFO_LINE_SIZE + }; + ++static struct intel_watermark_params ironlake_display_wm_info = { ++ ILK_DISPLAY_FIFO, ++ ILK_DISPLAY_MAXWM, ++ ILK_DISPLAY_DFTWM, ++ 2, ++ ILK_FIFO_LINE_SIZE ++}; ++ ++static struct intel_watermark_params ironlake_display_srwm_info = { ++ ILK_DISPLAY_SR_FIFO, ++ ILK_DISPLAY_MAX_SRWM, ++ ILK_DISPLAY_DFT_SRWM, ++ 2, ++ ILK_FIFO_LINE_SIZE ++}; ++ ++static struct intel_watermark_params ironlake_cursor_srwm_info = { ++ ILK_CURSOR_SR_FIFO, ++ ILK_CURSOR_MAX_SRWM, ++ ILK_CURSOR_DFT_SRWM, ++ 2, ++ ILK_FIFO_LINE_SIZE ++}; ++ + /** + * intel_calculate_wm - calculate watermark level + * @clock_in_khz: pixel clock +@@ -2443,66 +2672,6 @@ static void pineview_disable_cxsr(struct drm_device *dev) + DRM_INFO("Big FIFO is disabled\n"); + } + +-static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock, +- int pixel_size) +-{ +- struct drm_i915_private *dev_priv = dev->dev_private; +- u32 reg; +- unsigned long wm; +- struct cxsr_latency *latency; +- +- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq, +- dev_priv->mem_freq); +- if (!latency) { +- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); +- pineview_disable_cxsr(dev); +- return; +- } +- +- /* Display SR */ +- wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size, +- latency->display_sr); +- reg = I915_READ(DSPFW1); +- reg &= 0x7fffff; +- reg |= wm << 23; +- I915_WRITE(DSPFW1, reg); +- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); +- +- /* cursor SR */ +- wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size, +- latency->cursor_sr); +- reg = I915_READ(DSPFW3); +- reg &= ~(0x3f << 24); +- reg |= (wm & 0x3f) << 24; +- I915_WRITE(DSPFW3, reg); +- +- /* Display HPLL off SR */ +- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, +- latency->display_hpll_disable, I915_FIFO_LINE_SIZE); +- reg = I915_READ(DSPFW3); +- reg &= 0xfffffe00; +- reg |= wm & 0x1ff; +- I915_WRITE(DSPFW3, reg); +- +- /* cursor HPLL off SR */ +- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size, +- latency->cursor_hpll_disable); +- reg = I915_READ(DSPFW3); +- reg &= ~(0x3f << 16); +- reg |= (wm & 0x3f) << 16; +- I915_WRITE(DSPFW3, reg); +- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); +- +- /* activate cxsr */ +- reg = I915_READ(DSPFW3); +- reg |= PINEVIEW_SELF_REFRESH_EN; +- I915_WRITE(DSPFW3, reg); +- +- DRM_INFO("Big FIFO is enabled\n"); +- +- return; +-} +- + /* + * Latency for FIFO fetches is dependent on several factors: + * - memory configuration (speed, channels) +@@ -2587,6 +2756,71 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) + return size; + } + ++static void pineview_update_wm(struct drm_device *dev, int planea_clock, ++ int planeb_clock, int sr_hdisplay, int pixel_size) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ u32 reg; ++ unsigned long wm; ++ struct cxsr_latency *latency; ++ int sr_clock; ++ ++ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq, ++ dev_priv->mem_freq); ++ if (!latency) { ++ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); ++ pineview_disable_cxsr(dev); ++ return; ++ } ++ ++ if (!planea_clock || !planeb_clock) { ++ sr_clock = planea_clock ? planea_clock : planeb_clock; ++ ++ /* Display SR */ ++ wm = intel_calculate_wm(sr_clock, &pineview_display_wm, ++ pixel_size, latency->display_sr); ++ reg = I915_READ(DSPFW1); ++ reg &= ~DSPFW_SR_MASK; ++ reg |= wm << DSPFW_SR_SHIFT; ++ I915_WRITE(DSPFW1, reg); ++ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); ++ ++ /* cursor SR */ ++ wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm, ++ pixel_size, latency->cursor_sr); ++ reg = I915_READ(DSPFW3); ++ reg &= ~DSPFW_CURSOR_SR_MASK; ++ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; ++ I915_WRITE(DSPFW3, reg); ++ ++ /* Display HPLL off SR */ ++ wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm, ++ pixel_size, latency->display_hpll_disable); ++ reg = I915_READ(DSPFW3); ++ reg &= ~DSPFW_HPLL_SR_MASK; ++ reg |= wm & DSPFW_HPLL_SR_MASK; ++ I915_WRITE(DSPFW3, reg); ++ ++ /* cursor HPLL off SR */ ++ wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm, ++ pixel_size, latency->cursor_hpll_disable); ++ reg = I915_READ(DSPFW3); ++ reg &= ~DSPFW_HPLL_CURSOR_MASK; ++ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; ++ I915_WRITE(DSPFW3, reg); ++ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); ++ ++ /* activate cxsr */ ++ reg = I915_READ(DSPFW3); ++ reg |= PINEVIEW_SELF_REFRESH_EN; ++ I915_WRITE(DSPFW3, reg); ++ DRM_DEBUG_KMS("Self-refresh is enabled\n"); ++ } else { ++ pineview_disable_cxsr(dev); ++ DRM_DEBUG_KMS("Self-refresh is disabled\n"); ++ } ++} ++ + static void g4x_update_wm(struct drm_device *dev, int planea_clock, + int planeb_clock, int sr_hdisplay, int pixel_size) + { +@@ -2757,11 +2991,22 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, + srwm = total_size - sr_entries; + if (srwm < 0) + srwm = 1; +- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); ++ ++ if (IS_I945G(dev) || IS_I945GM(dev)) ++ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); ++ else if (IS_I915GM(dev)) { ++ /* 915M has a smaller SRWM field */ ++ I915_WRITE(FW_BLC_SELF, srwm & 0x3f); ++ I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); ++ } + } else { + /* Turn off self refresh if both pipes are enabled */ +- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) +- & ~FW_BLC_SELF_EN); ++ if (IS_I945G(dev) || IS_I945GM(dev)) { ++ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) ++ & ~FW_BLC_SELF_EN); ++ } else if (IS_I915GM(dev)) { ++ I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); ++ } + } + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", +@@ -2796,6 +3041,108 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, + I915_WRITE(FW_BLC, fwater_lo); + } + ++#define ILK_LP0_PLANE_LATENCY 700 ++ ++static void ironlake_update_wm(struct drm_device *dev, int planea_clock, ++ int planeb_clock, int sr_hdisplay, int pixel_size) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ int planea_wm, planeb_wm, cursora_wm, cursorb_wm; ++ int sr_wm, cursor_wm; ++ unsigned long line_time_us; ++ int sr_clock, entries_required; ++ u32 reg_value; ++ ++ /* Calculate and update the watermark for plane A */ ++ if (planea_clock) { ++ entries_required = ((planea_clock / 1000) * pixel_size * ++ ILK_LP0_PLANE_LATENCY) / 1000; ++ entries_required = DIV_ROUND_UP(entries_required, ++ ironlake_display_wm_info.cacheline_size); ++ planea_wm = entries_required + ++ ironlake_display_wm_info.guard_size; ++ ++ if (planea_wm > (int)ironlake_display_wm_info.max_wm) ++ planea_wm = ironlake_display_wm_info.max_wm; ++ ++ cursora_wm = 16; ++ reg_value = I915_READ(WM0_PIPEA_ILK); ++ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); ++ reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | ++ (cursora_wm & WM0_PIPE_CURSOR_MASK); ++ I915_WRITE(WM0_PIPEA_ILK, reg_value); ++ DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, " ++ "cursor: %d\n", planea_wm, cursora_wm); ++ } ++ /* Calculate and update the watermark for plane B */ ++ if (planeb_clock) { ++ entries_required = ((planeb_clock / 1000) * pixel_size * ++ ILK_LP0_PLANE_LATENCY) / 1000; ++ entries_required = DIV_ROUND_UP(entries_required, ++ ironlake_display_wm_info.cacheline_size); ++ planeb_wm = entries_required + ++ ironlake_display_wm_info.guard_size; ++ ++ if (planeb_wm > (int)ironlake_display_wm_info.max_wm) ++ planeb_wm = ironlake_display_wm_info.max_wm; ++ ++ cursorb_wm = 16; ++ reg_value = I915_READ(WM0_PIPEB_ILK); ++ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); ++ reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | ++ (cursorb_wm & WM0_PIPE_CURSOR_MASK); ++ I915_WRITE(WM0_PIPEB_ILK, reg_value); ++ DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, " ++ "cursor: %d\n", planeb_wm, cursorb_wm); ++ } ++ ++ /* ++ * Calculate and update the self-refresh watermark only when one ++ * display plane is used. ++ */ ++ if (!planea_clock || !planeb_clock) { ++ int line_count; ++ /* Read the self-refresh latency. The unit is 0.5us */ ++ int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; ++ ++ sr_clock = planea_clock ? planea_clock : planeb_clock; ++ line_time_us = ((sr_hdisplay * 1000) / sr_clock); ++ ++ /* Use ns/us then divide to preserve precision */ ++ line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) ++ / 1000; ++ ++ /* calculate the self-refresh watermark for display plane */ ++ entries_required = line_count * sr_hdisplay * pixel_size; ++ entries_required = DIV_ROUND_UP(entries_required, ++ ironlake_display_srwm_info.cacheline_size); ++ sr_wm = entries_required + ++ ironlake_display_srwm_info.guard_size; ++ ++ /* calculate the self-refresh watermark for display cursor */ ++ entries_required = line_count * pixel_size * 64; ++ entries_required = DIV_ROUND_UP(entries_required, ++ ironlake_cursor_srwm_info.cacheline_size); ++ cursor_wm = entries_required + ++ ironlake_cursor_srwm_info.guard_size; ++ ++ /* configure watermark and enable self-refresh */ ++ reg_value = I915_READ(WM1_LP_ILK); ++ reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | ++ WM1_LP_CURSOR_MASK); ++ reg_value |= WM1_LP_SR_EN | ++ (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | ++ (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; ++ ++ I915_WRITE(WM1_LP_ILK, reg_value); ++ DRM_DEBUG_KMS("self-refresh watermark: display plane %d " ++ "cursor %d\n", sr_wm, cursor_wm); ++ ++ } else { ++ /* Turn off self refresh if both pipes are enabled */ ++ I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); ++ } ++} + /** + * intel_update_watermarks - update FIFO watermark values based on current modes + * +@@ -2865,12 +3212,6 @@ static void intel_update_watermarks(struct drm_device *dev) + if (enabled <= 0) + return; + +- /* Single plane configs can enable self refresh */ +- if (enabled == 1 && IS_PINEVIEW(dev)) +- pineview_enable_cxsr(dev, sr_clock, pixel_size); +- else if (IS_PINEVIEW(dev)) +- pineview_disable_cxsr(dev); +- + dev_priv->display.update_wm(dev, planea_clock, planeb_clock, + sr_hdisplay, pixel_size); + } +@@ -2900,14 +3241,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; + int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; + int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; +- int refclk, num_outputs = 0; ++ int refclk, num_connectors = 0; + intel_clock_t clock, reduced_clock; + u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; + bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; + bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; + bool is_edp = false; + struct drm_mode_config *mode_config = &dev->mode_config; +- struct drm_connector *connector; ++ struct drm_encoder *encoder; ++ struct intel_encoder *intel_encoder = NULL; + const intel_limit_t *limit; + int ret; + struct fdi_m_n m_n = {0}; +@@ -2918,6 +3260,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; + int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; + int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; ++ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; ++ int trans_dpll_sel = (pipe == 0) ? 0 : 1; + int lvds_reg = LVDS; + u32 temp; + int sdvo_pixel_multiply; +@@ -2925,20 +3269,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + + drm_vblank_pre_modeset(dev, pipe); + +- list_for_each_entry(connector, &mode_config->connector_list, head) { +- struct intel_output *intel_output = to_intel_output(connector); ++ list_for_each_entry(encoder, &mode_config->encoder_list, head) { + +- if (!connector->encoder || connector->encoder->crtc != crtc) ++ if (!encoder || encoder->crtc != crtc) + continue; + +- switch (intel_output->type) { ++ intel_encoder = enc_to_intel_encoder(encoder); ++ ++ switch (intel_encoder->type) { + case INTEL_OUTPUT_LVDS: + is_lvds = true; + break; + case INTEL_OUTPUT_SDVO: + case INTEL_OUTPUT_HDMI: + is_sdvo = true; +- if (intel_output->needs_tv_clock) ++ if (intel_encoder->needs_tv_clock) + is_tv = true; + break; + case INTEL_OUTPUT_DVO: +@@ -2958,16 +3303,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + break; + } + +- num_outputs++; ++ num_connectors++; + } + +- if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { ++ if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { + refclk = dev_priv->lvds_ssc_freq * 1000; + DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", + refclk / 1000); + } else if (IS_I9XX(dev)) { + refclk = 96000; +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + refclk = 120000; /* 120Mhz refclk */ + } else { + refclk = 48000; +@@ -3025,15 +3370,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + } + + /* FDI link */ +- if (IS_IRONLAKE(dev)) { +- int lane, link_bw, bpp; ++ if (HAS_PCH_SPLIT(dev)) { ++ int lane = 0, link_bw, bpp; + /* eDP doesn't require FDI link, so just set DP M/N + according to current link config */ + if (is_edp) { +- struct drm_connector *edp; + target_clock = mode->clock; +- edp = intel_pipe_get_output(crtc); +- intel_edp_link_config(to_intel_output(edp), ++ intel_edp_link_config(intel_encoder, + &lane, &link_bw); + } else { + /* DP over FDI requires target mode clock +@@ -3042,7 +3385,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + target_clock = mode->clock; + else + target_clock = adjusted_mode->clock; +- lane = 4; + link_bw = 270000; + } + +@@ -3094,6 +3436,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + bpp = 24; + } + ++ if (!lane) { ++ /* ++ * Account for spread spectrum to avoid ++ * oversubscribing the link. Max center spread ++ * is 2.5%; use 5% for safety's sake. ++ */ ++ u32 bps = target_clock * bpp * 21 / 20; ++ lane = bps / (link_bw * 8) + 1; ++ } ++ ++ intel_crtc->fdi_lanes = lane; ++ + ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); + } + +@@ -3102,7 +3456,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + * PCH B stepping, previous chipset stepping should be + * ignoring this setting. + */ +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + temp = I915_READ(PCH_DREF_CONTROL); + /* Always enable nonspread source */ + temp &= ~DREF_NONSPREAD_SOURCE_MASK; +@@ -3149,7 +3503,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + reduced_clock.m2; + } + +- if (!IS_IRONLAKE(dev)) ++ if (!HAS_PCH_SPLIT(dev)) + dpll = DPLL_VGA_MODE_DIS; + + if (IS_I9XX(dev)) { +@@ -3162,7 +3516,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; +- else if (IS_IRONLAKE(dev)) ++ else if (HAS_PCH_SPLIT(dev)) + dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; + } + if (is_dp) +@@ -3174,7 +3528,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + else { + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; + /* also FPA1 */ +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; + if (IS_G4X(dev) && has_reduced_clock) + dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; +@@ -3193,7 +3547,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; + break; + } +- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) ++ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) + dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); + } else { + if (is_lvds) { +@@ -3214,7 +3568,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + /* XXX: just matching BIOS for now */ + /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ + dpll |= 3; +- else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) ++ else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; + else + dpll |= PLL_REF_INPUT_DREFCLK; +@@ -3227,7 +3581,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + + /* Ironlake's plane is forced to pipe, bit 24 is to + enable color space conversion */ +- if (!IS_IRONLAKE(dev)) { ++ if (!HAS_PCH_SPLIT(dev)) { + if (pipe == 0) + dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; + else +@@ -3249,14 +3603,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + } + + /* Disable the panel fitter if it was on our pipe */ +- if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe) ++ if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) + I915_WRITE(PFIT_CONTROL, 0); + + DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); + drm_mode_debug_printmodeline(mode); + + /* assign to Ironlake registers */ +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + fp_reg = pch_fp_reg; + dpll_reg = pch_dpll_reg; + } +@@ -3275,6 +3629,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + udelay(150); + } + ++ /* enable transcoder DPLL */ ++ if (HAS_PCH_CPT(dev)) { ++ temp = I915_READ(PCH_DPLL_SEL); ++ if (trans_dpll_sel == 0) ++ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); ++ else ++ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); ++ I915_WRITE(PCH_DPLL_SEL, temp); ++ I915_READ(PCH_DPLL_SEL); ++ udelay(150); ++ } ++ + /* The LVDS pin pair needs to be on before the DPLLs are enabled. + * This is an exception to the general rule that mode_set doesn't turn + * things on. +@@ -3282,11 +3648,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + if (is_lvds) { + u32 lvds; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + lvds_reg = PCH_LVDS; + + lvds = I915_READ(lvds_reg); +- lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; ++ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; ++ if (pipe == 1) { ++ if (HAS_PCH_CPT(dev)) ++ lvds |= PORT_TRANS_B_SEL_CPT; ++ else ++ lvds |= LVDS_PIPEB_SELECT; ++ } else { ++ if (HAS_PCH_CPT(dev)) ++ lvds &= ~PORT_TRANS_SEL_MASK; ++ else ++ lvds &= ~LVDS_PIPEB_SELECT; ++ } + /* set the corresponsding LVDS_BORDER bit */ + lvds |= dev_priv->lvds_border_bits; + /* Set the B0-B3 data pairs corresponding to whether we're going to +@@ -3304,12 +3681,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + /* set the dithering flag */ + if (IS_I965G(dev)) { + if (dev_priv->lvds_dither) { +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + pipeconf |= PIPE_ENABLE_DITHER; + else + lvds |= LVDS_ENABLE_DITHER; + } else { +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + pipeconf &= ~PIPE_ENABLE_DITHER; + else + lvds &= ~LVDS_ENABLE_DITHER; +@@ -3320,6 +3697,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + } + if (is_dp) + intel_dp_set_m_n(crtc, mode, adjusted_mode); ++ else if (HAS_PCH_SPLIT(dev)) { ++ /* For non-DP output, clear any trans DP clock recovery setting.*/ ++ if (pipe == 0) { ++ I915_WRITE(TRANSA_DATA_M1, 0); ++ I915_WRITE(TRANSA_DATA_N1, 0); ++ I915_WRITE(TRANSA_DP_LINK_M1, 0); ++ I915_WRITE(TRANSA_DP_LINK_N1, 0); ++ } else { ++ I915_WRITE(TRANSB_DATA_M1, 0); ++ I915_WRITE(TRANSB_DATA_N1, 0); ++ I915_WRITE(TRANSB_DP_LINK_M1, 0); ++ I915_WRITE(TRANSB_DP_LINK_N1, 0); ++ } ++ } + + if (!is_edp) { + I915_WRITE(fp_reg, fp); +@@ -3328,7 +3719,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + /* Wait for the clocks to stabilize. */ + udelay(150); + +- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { ++ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + if (is_sdvo) { + sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; + I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | +@@ -3375,14 +3766,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + /* pipesrc and dspsize control the size that is scaled from, which should + * always be the user's requested size. + */ +- if (!IS_IRONLAKE(dev)) { ++ if (!HAS_PCH_SPLIT(dev)) { + I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | + (mode->hdisplay - 1)); + I915_WRITE(dsppos_reg, 0); + } + I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); + +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); + I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); + I915_WRITE(link_m1_reg, m_n.link_m); +@@ -3394,6 +3785,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, + /* enable FDI RX PLL too */ + temp = I915_READ(fdi_rx_reg); + I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); ++ I915_READ(fdi_rx_reg); ++ udelay(200); ++ ++ /* enable FDI TX PLL too */ ++ temp = I915_READ(fdi_tx_reg); ++ I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); ++ I915_READ(fdi_tx_reg); ++ ++ /* enable FDI RX PCDCLK */ ++ temp = I915_READ(fdi_rx_reg); ++ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); ++ I915_READ(fdi_rx_reg); + udelay(200); + } + } +@@ -3438,7 +3841,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) + return; + + /* use legacy palette for Ironlake */ +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : + LGC_PALETTE_B; + +@@ -3494,7 +3897,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, + if (!bo) + return -ENOENT; + +- obj_priv = bo->driver_private; ++ obj_priv = to_intel_bo(bo); + + if (bo->size < width * height * 4) { + DRM_ERROR("buffer is to small\n"); +@@ -3638,9 +4041,9 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + * detection. + * + * It will be up to the load-detect code to adjust the pipe as appropriate for +- * its requirements. The pipe will be connected to no other outputs. ++ * its requirements. The pipe will be connected to no other encoders. + * +- * Currently this code will only succeed if there is a pipe with no outputs ++ * Currently this code will only succeed if there is a pipe with no encoders + * configured for it. In the future, it could choose to temporarily disable + * some outputs to free up a pipe for its use. + * +@@ -3653,14 +4056,15 @@ static struct drm_display_mode load_detect_mode = { + 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), + }; + +-struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, ++struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, ++ struct drm_connector *connector, + struct drm_display_mode *mode, + int *dpms_mode) + { + struct intel_crtc *intel_crtc; + struct drm_crtc *possible_crtc; + struct drm_crtc *supported_crtc =NULL; +- struct drm_encoder *encoder = &intel_output->enc; ++ struct drm_encoder *encoder = &intel_encoder->enc; + struct drm_crtc *crtc = NULL; + struct drm_device *dev = encoder->dev; + struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; +@@ -3712,8 +4116,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, + } + + encoder->crtc = crtc; +- intel_output->base.encoder = encoder; +- intel_output->load_detect_temp = true; ++ connector->encoder = encoder; ++ intel_encoder->load_detect_temp = true; + + intel_crtc = to_intel_crtc(crtc); + *dpms_mode = intel_crtc->dpms_mode; +@@ -3738,23 +4142,24 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, + return crtc; + } + +-void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode) ++void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, ++ struct drm_connector *connector, int dpms_mode) + { +- struct drm_encoder *encoder = &intel_output->enc; ++ struct drm_encoder *encoder = &intel_encoder->enc; + struct drm_device *dev = encoder->dev; + struct drm_crtc *crtc = encoder->crtc; + struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + +- if (intel_output->load_detect_temp) { ++ if (intel_encoder->load_detect_temp) { + encoder->crtc = NULL; +- intel_output->base.encoder = NULL; +- intel_output->load_detect_temp = false; ++ connector->encoder = NULL; ++ intel_encoder->load_detect_temp = false; + crtc->enabled = drm_helper_crtc_in_use(crtc); + drm_helper_disable_unused_functions(dev); + } + +- /* Switch crtc and output back off if necessary */ ++ /* Switch crtc and encoder back off if necessary */ + if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { + if (encoder->crtc == crtc) + encoder_funcs->dpms(encoder, dpms_mode); +@@ -3921,7 +4326,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) + int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; + int dpll = I915_READ(dpll_reg); + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + return; + + if (!dev_priv->lvds_downclock_avail) +@@ -3960,7 +4365,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) + int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; + int dpll = I915_READ(dpll_reg); + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + return; + + if (!dev_priv->lvds_downclock_avail) +@@ -4010,6 +4415,11 @@ static void intel_idle_update(struct work_struct *work) + + mutex_lock(&dev->struct_mutex); + ++ if (IS_I945G(dev) || IS_I945GM(dev)) { ++ DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); ++ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); ++ } ++ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + /* Skip inactive CRTCs */ + if (!crtc->fb) +@@ -4043,9 +4453,17 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + +- if (!dev_priv->busy) ++ if (!dev_priv->busy) { ++ if (IS_I945G(dev) || IS_I945GM(dev)) { ++ u32 fw_blc_self; ++ ++ DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); ++ fw_blc_self = I915_READ(FW_BLC_SELF); ++ fw_blc_self &= ~FW_BLC_SELF_EN; ++ I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); ++ } + dev_priv->busy = true; +- else ++ } else + mod_timer(&dev_priv->idle_timer, jiffies + + msecs_to_jiffies(GPU_IDLE_TIMEOUT)); + +@@ -4057,6 +4475,14 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) + intel_fb = to_intel_framebuffer(crtc->fb); + if (intel_fb->obj == obj) { + if (!intel_crtc->busy) { ++ if (IS_I945G(dev) || IS_I945GM(dev)) { ++ u32 fw_blc_self; ++ ++ DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); ++ fw_blc_self = I915_READ(FW_BLC_SELF); ++ fw_blc_self &= ~FW_BLC_SELF_EN; ++ I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); ++ } + /* Non-busy -> busy, upclock */ + intel_increase_pllclock(crtc, true); + intel_crtc->busy = true; +@@ -4118,7 +4544,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) + work = intel_crtc->unpin_work; + if (work == NULL || !work->pending) { + if (work && !work->pending) { +- obj_priv = work->pending_flip_obj->driver_private; ++ obj_priv = to_intel_bo(work->pending_flip_obj); + DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", + obj_priv, + atomic_read(&obj_priv->pending_flip)); +@@ -4143,7 +4569,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) + + spin_unlock_irqrestore(&dev->event_lock, flags); + +- obj_priv = work->pending_flip_obj->driver_private; ++ obj_priv = to_intel_bo(work->pending_flip_obj); + + /* Initial scanout buffer will have a 0 pending flip count */ + if ((atomic_read(&obj_priv->pending_flip) == 0) || +@@ -4214,7 +4640,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, + ret = intel_pin_and_fence_fb_obj(dev, obj); + if (ret != 0) { + DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", +- obj->driver_private); ++ to_intel_bo(obj)); + kfree(work); + intel_crtc->unpin_work = NULL; + mutex_unlock(&dev->struct_mutex); +@@ -4228,7 +4654,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, + crtc->fb = fb; + i915_gem_object_flush_write_domain(obj); + drm_vblank_get(dev, intel_crtc->pipe); +- obj_priv = obj->driver_private; ++ obj_priv = to_intel_bo(obj); + atomic_inc(&obj_priv->pending_flip); + work->pending_flip_obj = obj; + +@@ -4354,15 +4780,15 @@ struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) + return crtc; + } + +-static int intel_connector_clones(struct drm_device *dev, int type_mask) ++static int intel_encoder_clones(struct drm_device *dev, int type_mask) + { + int index_mask = 0; +- struct drm_connector *connector; ++ struct drm_encoder *encoder; + int entry = 0; + +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- struct intel_output *intel_output = to_intel_output(connector); +- if (type_mask & intel_output->clone_mask) ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ if (type_mask & intel_encoder->clone_mask) + index_mask |= (1 << entry); + entry++; + } +@@ -4373,7 +4799,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask) + static void intel_setup_outputs(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; +- struct drm_connector *connector; ++ struct drm_encoder *encoder; + + intel_crt_init(dev); + +@@ -4381,16 +4807,15 @@ static void intel_setup_outputs(struct drm_device *dev) + if (IS_MOBILE(dev) && !IS_I830(dev)) + intel_lvds_init(dev); + +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + int found; + + if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) + intel_dp_init(dev, DP_A); + + if (I915_READ(HDMIB) & PORT_DETECTED) { +- /* check SDVOB */ +- /* found = intel_sdvo_init(dev, HDMIB); */ +- found = 0; ++ /* PCH SDVOB multiplex with HDMIB */ ++ found = intel_sdvo_init(dev, PCH_SDVOB); + if (!found) + intel_hdmi_init(dev, HDMIB); + if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) +@@ -4450,19 +4875,18 @@ static void intel_setup_outputs(struct drm_device *dev) + DRM_DEBUG_KMS("probing DP_D\n"); + intel_dp_init(dev, DP_D); + } +- } else if (IS_I8XX(dev)) ++ } else if (IS_GEN2(dev)) + intel_dvo_init(dev); + + if (SUPPORTS_TV(dev)) + intel_tv_init(dev); + +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- struct intel_output *intel_output = to_intel_output(connector); +- struct drm_encoder *encoder = &intel_output->enc; ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + +- encoder->possible_crtcs = intel_output->crtc_mask; +- encoder->possible_clones = intel_connector_clones(dev, +- intel_output->clone_mask); ++ encoder->possible_crtcs = intel_encoder->crtc_mask; ++ encoder->possible_clones = intel_encoder_clones(dev, ++ intel_encoder->clone_mask); + } + } + +@@ -4586,6 +5010,91 @@ err_unref: + return NULL; + } + ++void ironlake_enable_drps(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl; ++ u8 fmax, fmin, fstart, vstart; ++ int i = 0; ++ ++ /* 100ms RC evaluation intervals */ ++ I915_WRITE(RCUPEI, 100000); ++ I915_WRITE(RCDNEI, 100000); ++ ++ /* Set max/min thresholds to 90ms and 80ms respectively */ ++ I915_WRITE(RCBMAXAVG, 90000); ++ I915_WRITE(RCBMINAVG, 80000); ++ ++ I915_WRITE(MEMIHYST, 1); ++ ++ /* Set up min, max, and cur for interrupt handling */ ++ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; ++ fmin = (rgvmodectl & MEMMODE_FMIN_MASK); ++ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> ++ MEMMODE_FSTART_SHIFT; ++ vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> ++ PXVFREQ_PX_SHIFT; ++ ++ dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */ ++ dev_priv->min_delay = fmin; ++ dev_priv->cur_delay = fstart; ++ ++ I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); ++ ++ /* ++ * Interrupts will be enabled in ironlake_irq_postinstall ++ */ ++ ++ I915_WRITE(VIDSTART, vstart); ++ POSTING_READ(VIDSTART); ++ ++ rgvmodectl |= MEMMODE_SWMODE_EN; ++ I915_WRITE(MEMMODECTL, rgvmodectl); ++ ++ while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) { ++ if (i++ > 100) { ++ DRM_ERROR("stuck trying to change perf mode\n"); ++ break; ++ } ++ msleep(1); ++ } ++ msleep(1); ++ ++ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | ++ (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; ++ I915_WRITE(MEMSWCTL, rgvswctl); ++ POSTING_READ(MEMSWCTL); ++ ++ rgvswctl |= MEMCTL_CMD_STS; ++ I915_WRITE(MEMSWCTL, rgvswctl); ++} ++ ++void ironlake_disable_drps(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ u32 rgvswctl; ++ u8 fstart; ++ ++ /* Ack interrupts, disable EFC interrupt */ ++ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); ++ I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); ++ I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); ++ I915_WRITE(DEIIR, DE_PCU_EVENT); ++ I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); ++ ++ /* Go back to the starting frequency */ ++ fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >> ++ MEMMODE_FSTART_SHIFT; ++ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | ++ (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; ++ I915_WRITE(MEMSWCTL, rgvswctl); ++ msleep(1); ++ rgvswctl |= MEMCTL_CMD_STS; ++ I915_WRITE(MEMSWCTL, rgvswctl); ++ msleep(1); ++ ++} ++ + void intel_init_clock_gating(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; +@@ -4594,7 +5103,40 @@ void intel_init_clock_gating(struct drm_device *dev) + * Disable clock gating reported to work incorrectly according to the + * specs, but enable as much else as we can. + */ +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { ++ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; ++ ++ if (IS_IRONLAKE(dev)) { ++ /* Required for FBC */ ++ dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; ++ /* Required for CxSR */ ++ dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; ++ ++ I915_WRITE(PCH_3DCGDIS0, ++ MARIUNIT_CLOCK_GATE_DISABLE | ++ SVSMUNIT_CLOCK_GATE_DISABLE); ++ } ++ ++ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); ++ ++ /* ++ * According to the spec the following bits should be set in ++ * order to enable memory self-refresh ++ * The bit 22/21 of 0x42004 ++ * The bit 5 of 0x42020 ++ * The bit 15 of 0x45000 ++ */ ++ if (IS_IRONLAKE(dev)) { ++ I915_WRITE(ILK_DISPLAY_CHICKEN2, ++ (I915_READ(ILK_DISPLAY_CHICKEN2) | ++ ILK_DPARB_GATE | ILK_VSDPFD_FULL)); ++ I915_WRITE(ILK_DSPCLK_GATE, ++ (I915_READ(ILK_DSPCLK_GATE) | ++ ILK_DPARB_CLK_GATE)); ++ I915_WRITE(DISP_ARB_CTL, ++ (I915_READ(DISP_ARB_CTL) | ++ DISP_FBC_WM_DIS)); ++ } + return; + } else if (IS_G4X(dev)) { + uint32_t dspclk_gate; +@@ -4642,14 +5184,14 @@ void intel_init_clock_gating(struct drm_device *dev) + struct drm_i915_gem_object *obj_priv = NULL; + + if (dev_priv->pwrctx) { +- obj_priv = dev_priv->pwrctx->driver_private; ++ obj_priv = to_intel_bo(dev_priv->pwrctx); + } else { + struct drm_gem_object *pwrctx; + + pwrctx = intel_alloc_power_context(dev); + if (pwrctx) { + dev_priv->pwrctx = pwrctx; +- obj_priv = pwrctx->driver_private; ++ obj_priv = to_intel_bo(pwrctx); + } + } + +@@ -4667,7 +5209,7 @@ static void intel_init_display(struct drm_device *dev) + struct drm_i915_private *dev_priv = dev->dev_private; + + /* We always want a DPMS function */ +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + dev_priv->display.dpms = ironlake_crtc_dpms; + else + dev_priv->display.dpms = i9xx_crtc_dpms; +@@ -4678,7 +5220,7 @@ static void intel_init_display(struct drm_device *dev) + dev_priv->display.fbc_enabled = g4x_fbc_enabled; + dev_priv->display.enable_fbc = g4x_enable_fbc; + dev_priv->display.disable_fbc = g4x_disable_fbc; +- } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { ++ } else if (IS_I965GM(dev)) { + dev_priv->display.fbc_enabled = i8xx_fbc_enabled; + dev_priv->display.enable_fbc = i8xx_enable_fbc; + dev_priv->display.disable_fbc = i8xx_disable_fbc; +@@ -4710,23 +5252,46 @@ static void intel_init_display(struct drm_device *dev) + i830_get_display_clock_speed; + + /* For FIFO watermark updates */ +- if (IS_IRONLAKE(dev)) +- dev_priv->display.update_wm = NULL; +- else if (IS_G4X(dev)) ++ if (HAS_PCH_SPLIT(dev)) { ++ if (IS_IRONLAKE(dev)) { ++ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) ++ dev_priv->display.update_wm = ironlake_update_wm; ++ else { ++ DRM_DEBUG_KMS("Failed to get proper latency. " ++ "Disable CxSR\n"); ++ dev_priv->display.update_wm = NULL; ++ } ++ } else ++ dev_priv->display.update_wm = NULL; ++ } else if (IS_PINEVIEW(dev)) { ++ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), ++ dev_priv->fsb_freq, ++ dev_priv->mem_freq)) { ++ DRM_INFO("failed to find known CxSR latency " ++ "(found fsb freq %d, mem freq %d), " ++ "disabling CxSR\n", ++ dev_priv->fsb_freq, dev_priv->mem_freq); ++ /* Disable CxSR and never update its watermark again */ ++ pineview_disable_cxsr(dev); ++ dev_priv->display.update_wm = NULL; ++ } else ++ dev_priv->display.update_wm = pineview_update_wm; ++ } else if (IS_G4X(dev)) + dev_priv->display.update_wm = g4x_update_wm; + else if (IS_I965G(dev)) + dev_priv->display.update_wm = i965_update_wm; +- else if (IS_I9XX(dev) || IS_MOBILE(dev)) { ++ else if (IS_I9XX(dev)) { + dev_priv->display.update_wm = i9xx_update_wm; + dev_priv->display.get_fifo_size = i9xx_get_fifo_size; ++ } else if (IS_I85X(dev)) { ++ dev_priv->display.update_wm = i9xx_update_wm; ++ dev_priv->display.get_fifo_size = i85x_get_fifo_size; + } else { +- if (IS_I85X(dev)) +- dev_priv->display.get_fifo_size = i85x_get_fifo_size; +- else if (IS_845G(dev)) ++ dev_priv->display.update_wm = i830_update_wm; ++ if (IS_845G(dev)) + dev_priv->display.get_fifo_size = i845_get_fifo_size; + else + dev_priv->display.get_fifo_size = i830_get_fifo_size; +- dev_priv->display.update_wm = i830_update_wm; + } + } + +@@ -4769,11 +5334,6 @@ void intel_modeset_init(struct drm_device *dev) + DRM_DEBUG_KMS("%d display pipe%s available.\n", + num_pipe, num_pipe > 1 ? "s" : ""); + +- if (IS_I85X(dev)) +- pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock); +- else if (IS_I9XX(dev) || IS_G4X(dev)) +- pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock); +- + for (i = 0; i < num_pipe; i++) { + intel_crtc_init(dev, i); + } +@@ -4782,18 +5342,14 @@ void intel_modeset_init(struct drm_device *dev) + + intel_init_clock_gating(dev); + ++ if (IS_IRONLAKE_M(dev)) ++ ironlake_enable_drps(dev); ++ + INIT_WORK(&dev_priv->idle_work, intel_idle_update); + setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, + (unsigned long)dev); + + intel_setup_overlay(dev); +- +- if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev), +- dev_priv->fsb_freq, +- dev_priv->mem_freq)) +- DRM_INFO("failed to find known CxSR latency " +- "(found fsb freq %d, mem freq %d), disabling CxSR\n", +- dev_priv->fsb_freq, dev_priv->mem_freq); + } + + void intel_modeset_cleanup(struct drm_device *dev) +@@ -4822,27 +5378,45 @@ void intel_modeset_cleanup(struct drm_device *dev) + if (dev_priv->pwrctx) { + struct drm_i915_gem_object *obj_priv; + +- obj_priv = dev_priv->pwrctx->driver_private; ++ obj_priv = to_intel_bo(dev_priv->pwrctx); + I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); + I915_READ(PWRCTXA); + i915_gem_object_unpin(dev_priv->pwrctx); + drm_gem_object_unreference(dev_priv->pwrctx); + } + ++ if (IS_IRONLAKE_M(dev)) ++ ironlake_disable_drps(dev); ++ + mutex_unlock(&dev->struct_mutex); + + drm_mode_config_cleanup(dev); + } + + +-/* current intel driver doesn't take advantage of encoders +- always give back the encoder for the connector +-*/ +-struct drm_encoder *intel_best_encoder(struct drm_connector *connector) ++/* ++ * Return which encoder is currently attached for connector. ++ */ ++struct drm_encoder *intel_attached_encoder (struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); ++ struct drm_mode_object *obj; ++ struct drm_encoder *encoder; ++ int i; + +- return &intel_output->enc; ++ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { ++ if (connector->encoder_ids[i] == 0) ++ break; ++ ++ obj = drm_mode_object_find(connector->dev, ++ connector->encoder_ids[i], ++ DRM_MODE_OBJECT_ENCODER); ++ if (!obj) ++ continue; ++ ++ encoder = obj_to_encoder(obj); ++ return encoder; ++ } ++ return NULL; + } + + /* +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c +--- a/drivers/gpu/drm/i915/intel_dp.c ++++ b/drivers/gpu/drm/i915/intel_dp.c +@@ -47,30 +47,28 @@ struct intel_dp_priv { + uint32_t output_reg; + uint32_t DP; + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; +- uint32_t save_DP; +- uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE]; + bool has_audio; + int dpms_mode; + uint8_t link_bw; + uint8_t lane_count; + uint8_t dpcd[4]; +- struct intel_output *intel_output; ++ struct intel_encoder *intel_encoder; + struct i2c_adapter adapter; + struct i2c_algo_dp_aux_data algo; + }; + + static void +-intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, ++intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); + + static void +-intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); ++intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP); + + void +-intel_edp_link_config (struct intel_output *intel_output, ++intel_edp_link_config (struct intel_encoder *intel_encoder, + int *lane_num, int *link_bw) + { +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + + *lane_num = dp_priv->lane_count; + if (dp_priv->link_bw == DP_LINK_BW_1_62) +@@ -80,9 +78,9 @@ intel_edp_link_config (struct intel_output *intel_output, + } + + static int +-intel_dp_max_lane_count(struct intel_output *intel_output) ++intel_dp_max_lane_count(struct intel_encoder *intel_encoder) + { +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + int max_lane_count = 4; + + if (dp_priv->dpcd[0] >= 0x11) { +@@ -98,9 +96,9 @@ intel_dp_max_lane_count(struct intel_output *intel_output) + } + + static int +-intel_dp_max_link_bw(struct intel_output *intel_output) ++intel_dp_max_link_bw(struct intel_encoder *intel_encoder) + { +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + int max_link_bw = dp_priv->dpcd[1]; + + switch (max_link_bw) { +@@ -126,11 +124,11 @@ intel_dp_link_clock(uint8_t link_bw) + /* I think this is a fiction */ + static int + intel_dp_link_required(struct drm_device *dev, +- struct intel_output *intel_output, int pixel_clock) ++ struct intel_encoder *intel_encoder, int pixel_clock) + { + struct drm_i915_private *dev_priv = dev->dev_private; + +- if (IS_eDP(intel_output)) ++ if (IS_eDP(intel_encoder)) + return (pixel_clock * dev_priv->edp_bpp) / 8; + else + return pixel_clock * 3; +@@ -140,11 +138,12 @@ static int + intel_dp_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { +- struct intel_output *intel_output = to_intel_output(connector); +- int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); +- int max_lanes = intel_dp_max_lane_count(intel_output); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); ++ int max_lanes = intel_dp_max_lane_count(intel_encoder); + +- if (intel_dp_link_required(connector->dev, intel_output, mode->clock) ++ if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) + > max_link_clock * max_lanes) + return MODE_CLOCK_HIGH; + +@@ -208,13 +207,13 @@ intel_hrawclk(struct drm_device *dev) + } + + static int +-intel_dp_aux_ch(struct intel_output *intel_output, ++intel_dp_aux_ch(struct intel_encoder *intel_encoder, + uint8_t *send, int send_bytes, + uint8_t *recv, int recv_size) + { +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + uint32_t output_reg = dp_priv->output_reg; +- struct drm_device *dev = intel_output->base.dev; ++ struct drm_device *dev = intel_encoder->enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t ch_ctl = output_reg + 0x10; + uint32_t ch_data = ch_ctl + 4; +@@ -223,19 +222,27 @@ intel_dp_aux_ch(struct intel_output *intel_output, + uint32_t ctl; + uint32_t status; + uint32_t aux_clock_divider; +- int try; ++ int try, precharge; + + /* The clock divider is based off the hrawclk, + * and would like to run at 2MHz. So, take the + * hrawclk value and divide by 2 and use that + */ +- if (IS_eDP(intel_output)) +- aux_clock_divider = 225; /* eDP input clock at 450Mhz */ +- else if (IS_IRONLAKE(dev)) ++ if (IS_eDP(intel_encoder)) { ++ if (IS_GEN6(dev)) ++ aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ ++ else ++ aux_clock_divider = 225; /* eDP input clock at 450Mhz */ ++ } else if (HAS_PCH_SPLIT(dev)) + aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ + else + aux_clock_divider = intel_hrawclk(dev) / 2; + ++ if (IS_GEN6(dev)) ++ precharge = 3; ++ else ++ precharge = 5; ++ + /* Must try at least 3 times according to DP spec */ + for (try = 0; try < 5; try++) { + /* Load the send data into the aux channel data registers */ +@@ -248,7 +255,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, + ctl = (DP_AUX_CH_CTL_SEND_BUSY | + DP_AUX_CH_CTL_TIME_OUT_400us | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | +- (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | ++ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | +@@ -312,7 +319,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, + + /* Write data to the aux channel in native mode */ + static int +-intel_dp_aux_native_write(struct intel_output *intel_output, ++intel_dp_aux_native_write(struct intel_encoder *intel_encoder, + uint16_t address, uint8_t *send, int send_bytes) + { + int ret; +@@ -329,7 +336,7 @@ intel_dp_aux_native_write(struct intel_output *intel_output, + memcpy(&msg[4], send, send_bytes); + msg_bytes = send_bytes + 4; + for (;;) { +- ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1); ++ ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1); + if (ret < 0) + return ret; + if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) +@@ -344,15 +351,15 @@ intel_dp_aux_native_write(struct intel_output *intel_output, + + /* Write a single byte to the aux channel in native mode */ + static int +-intel_dp_aux_native_write_1(struct intel_output *intel_output, ++intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder, + uint16_t address, uint8_t byte) + { +- return intel_dp_aux_native_write(intel_output, address, &byte, 1); ++ return intel_dp_aux_native_write(intel_encoder, address, &byte, 1); + } + + /* read bytes from a native aux channel */ + static int +-intel_dp_aux_native_read(struct intel_output *intel_output, ++intel_dp_aux_native_read(struct intel_encoder *intel_encoder, + uint16_t address, uint8_t *recv, int recv_bytes) + { + uint8_t msg[4]; +@@ -371,7 +378,7 @@ intel_dp_aux_native_read(struct intel_output *intel_output, + reply_bytes = recv_bytes + 1; + + for (;;) { +- ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, ++ ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, + reply, reply_bytes); + if (ret == 0) + return -EPROTO; +@@ -397,7 +404,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, + struct intel_dp_priv *dp_priv = container_of(adapter, + struct intel_dp_priv, + adapter); +- struct intel_output *intel_output = dp_priv->intel_output; ++ struct intel_encoder *intel_encoder = dp_priv->intel_encoder; + uint16_t address = algo_data->address; + uint8_t msg[5]; + uint8_t reply[2]; +@@ -436,7 +443,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, + } + + for (;;) { +- ret = intel_dp_aux_ch(intel_output, ++ ret = intel_dp_aux_ch(intel_encoder, + msg, msg_bytes, + reply, reply_bytes); + if (ret < 0) { +@@ -464,9 +471,10 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, + } + + static int +-intel_dp_i2c_init(struct intel_output *intel_output, const char *name) ++intel_dp_i2c_init(struct intel_encoder *intel_encoder, ++ struct intel_connector *intel_connector, const char *name) + { +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + + DRM_DEBUG_KMS("i2c_init %s\n", name); + dp_priv->algo.running = false; +@@ -479,7 +487,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name) + strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); + dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; + dp_priv->adapter.algo_data = &dp_priv->algo; +- dp_priv->adapter.dev.parent = &intel_output->base.kdev; ++ dp_priv->adapter.dev.parent = &intel_connector->base.kdev; + + return i2c_dp_aux_add_bus(&dp_priv->adapter); + } +@@ -488,18 +496,18 @@ static bool + intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) + { +- struct intel_output *intel_output = enc_to_intel_output(encoder); +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + int lane_count, clock; +- int max_lane_count = intel_dp_max_lane_count(intel_output); +- int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0; ++ int max_lane_count = intel_dp_max_lane_count(intel_encoder); ++ int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; + static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; + + for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { + for (clock = 0; clock <= max_clock; clock++) { + int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; + +- if (intel_dp_link_required(encoder->dev, intel_output, mode->clock) ++ if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) + <= link_avail) { + dp_priv->link_bw = bws[clock]; + dp_priv->lane_count = lane_count; +@@ -554,23 +562,26 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, + { + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; +- struct drm_connector *connector; ++ struct drm_encoder *encoder; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int lane_count = 4; + struct intel_dp_m_n m_n; + + /* +- * Find the lane count in the intel_output private ++ * Find the lane count in the intel_encoder private + */ +- list_for_each_entry(connector, &mode_config->connector_list, head) { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; ++ list_for_each_entry(encoder, &mode_config->encoder_list, head) { ++ struct intel_encoder *intel_encoder; ++ struct intel_dp_priv *dp_priv; + +- if (!connector->encoder || connector->encoder->crtc != crtc) ++ if (!encoder || encoder->crtc != crtc) + continue; + +- if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) { ++ intel_encoder = enc_to_intel_encoder(encoder); ++ dp_priv = intel_encoder->dev_priv; ++ ++ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { + lane_count = dp_priv->lane_count; + break; + } +@@ -584,7 +595,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, + intel_dp_compute_m_n(3, lane_count, + mode->clock, adjusted_mode->clock, &m_n); + +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + if (intel_crtc->pipe == 0) { + I915_WRITE(TRANSA_DATA_M1, + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | +@@ -625,16 +636,24 @@ static void + intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) + { +- struct intel_output *intel_output = enc_to_intel_output(encoder); +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; +- struct drm_crtc *crtc = intel_output->enc.crtc; ++ struct drm_device *dev = encoder->dev; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; ++ struct drm_crtc *crtc = intel_encoder->enc.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + +- dp_priv->DP = (DP_LINK_TRAIN_OFF | +- DP_VOLTAGE_0_4 | +- DP_PRE_EMPHASIS_0 | +- DP_SYNC_VS_HIGH | +- DP_SYNC_HS_HIGH); ++ dp_priv->DP = (DP_VOLTAGE_0_4 | ++ DP_PRE_EMPHASIS_0); ++ ++ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ++ dp_priv->DP |= DP_SYNC_HS_HIGH; ++ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ++ dp_priv->DP |= DP_SYNC_VS_HIGH; ++ ++ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) ++ dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT; ++ else ++ dp_priv->DP |= DP_LINK_TRAIN_OFF; + + switch (dp_priv->lane_count) { + case 1: +@@ -663,10 +682,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + dp_priv->DP |= DP_ENHANCED_FRAMING; + } + +- if (intel_crtc->pipe == 1) ++ /* CPT DP's pipe select is decided in TRANS_DP_CTL */ ++ if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) + dp_priv->DP |= DP_PIPEB_SELECT; + +- if (IS_eDP(intel_output)) { ++ if (IS_eDP(intel_encoder)) { + /* don't miss out required setting for eDP */ + dp_priv->DP |= DP_PLL_ENABLE; + if (adjusted_mode->clock < 200000) +@@ -701,22 +721,22 @@ static void ironlake_edp_backlight_off (struct drm_device *dev) + static void + intel_dp_dpms(struct drm_encoder *encoder, int mode) + { +- struct intel_output *intel_output = enc_to_intel_output(encoder); +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; +- struct drm_device *dev = intel_output->base.dev; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; ++ struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dp_reg = I915_READ(dp_priv->output_reg); + + if (mode != DRM_MODE_DPMS_ON) { + if (dp_reg & DP_PORT_EN) { +- intel_dp_link_down(intel_output, dp_priv->DP); +- if (IS_eDP(intel_output)) ++ intel_dp_link_down(intel_encoder, dp_priv->DP); ++ if (IS_eDP(intel_encoder)) + ironlake_edp_backlight_off(dev); + } + } else { + if (!(dp_reg & DP_PORT_EN)) { +- intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); +- if (IS_eDP(intel_output)) ++ intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); ++ if (IS_eDP(intel_encoder)) + ironlake_edp_backlight_on(dev); + } + } +@@ -728,12 +748,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) + * link status information + */ + static bool +-intel_dp_get_link_status(struct intel_output *intel_output, ++intel_dp_get_link_status(struct intel_encoder *intel_encoder, + uint8_t link_status[DP_LINK_STATUS_SIZE]) + { + int ret; + +- ret = intel_dp_aux_native_read(intel_output, ++ ret = intel_dp_aux_native_read(intel_encoder, + DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret != DP_LINK_STATUS_SIZE) +@@ -748,20 +768,6 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], + return link_status[r - DP_LANE0_1_STATUS]; + } + +-static void +-intel_dp_save(struct drm_connector *connector) +-{ +- struct intel_output *intel_output = to_intel_output(connector); +- struct drm_device *dev = intel_output->base.dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; +- +- dp_priv->save_DP = I915_READ(dp_priv->output_reg); +- intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET, +- dp_priv->save_link_configuration, +- sizeof (dp_priv->save_link_configuration)); +-} +- + static uint8_t + intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +@@ -824,7 +830,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing) + } + + static void +-intel_get_adjust_train(struct intel_output *intel_output, ++intel_get_adjust_train(struct intel_encoder *intel_encoder, + uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane_count, + uint8_t train_set[4]) +@@ -891,6 +897,25 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count) + return signal_levels; + } + ++/* Gen6's DP voltage swing and pre-emphasis control */ ++static uint32_t ++intel_gen6_edp_signal_levels(uint8_t train_set) ++{ ++ switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) { ++ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: ++ return EDP_LINK_TRAIN_400MV_0DB_SNB_B; ++ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: ++ return EDP_LINK_TRAIN_400MV_6DB_SNB_B; ++ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: ++ return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B; ++ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: ++ return EDP_LINK_TRAIN_800MV_0DB_SNB_B; ++ default: ++ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n"); ++ return EDP_LINK_TRAIN_400MV_0DB_SNB_B; ++ } ++} ++ + static uint8_t + intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +@@ -941,15 +966,15 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) + } + + static bool +-intel_dp_set_link_train(struct intel_output *intel_output, ++intel_dp_set_link_train(struct intel_encoder *intel_encoder, + uint32_t dp_reg_value, + uint8_t dp_train_pat, + uint8_t train_set[4], + bool first) + { +- struct drm_device *dev = intel_output->base.dev; ++ struct drm_device *dev = intel_encoder->enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + int ret; + + I915_WRITE(dp_priv->output_reg, dp_reg_value); +@@ -957,11 +982,11 @@ intel_dp_set_link_train(struct intel_output *intel_output, + if (first) + intel_wait_for_vblank(dev); + +- intel_dp_aux_native_write_1(intel_output, ++ intel_dp_aux_native_write_1(intel_encoder, + DP_TRAINING_PATTERN_SET, + dp_train_pat); + +- ret = intel_dp_aux_native_write(intel_output, ++ ret = intel_dp_aux_native_write(intel_encoder, + DP_TRAINING_LANE0_SET, train_set, 4); + if (ret != 4) + return false; +@@ -970,12 +995,12 @@ intel_dp_set_link_train(struct intel_output *intel_output, + } + + static void +-intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, ++intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) + { +- struct drm_device *dev = intel_output->base.dev; ++ struct drm_device *dev = intel_encoder->enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + uint8_t train_set[4]; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + int i; +@@ -984,30 +1009,45 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, + bool channel_eq = false; + bool first = true; + int tries; ++ u32 reg; + + /* Write the link configuration data */ +- intel_dp_aux_native_write(intel_output, 0x100, ++ intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET, + link_configuration, DP_LINK_CONFIGURATION_SIZE); + + DP |= DP_PORT_EN; +- DP &= ~DP_LINK_TRAIN_MASK; ++ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) ++ DP &= ~DP_LINK_TRAIN_MASK_CPT; ++ else ++ DP &= ~DP_LINK_TRAIN_MASK; + memset(train_set, 0, 4); + voltage = 0xff; + tries = 0; + clock_recovery = false; + for (;;) { + /* Use train_set[0] to set the voltage and pre emphasis values */ +- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); +- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; ++ uint32_t signal_levels; ++ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { ++ signal_levels = intel_gen6_edp_signal_levels(train_set[0]); ++ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; ++ } else { ++ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); ++ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; ++ } + +- if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1, ++ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) ++ reg = DP | DP_LINK_TRAIN_PAT_1_CPT; ++ else ++ reg = DP | DP_LINK_TRAIN_PAT_1; ++ ++ if (!intel_dp_set_link_train(intel_encoder, reg, + DP_TRAINING_PATTERN_1, train_set, first)) + break; + first = false; + /* Set training pattern 1 */ + + udelay(100); +- if (!intel_dp_get_link_status(intel_output, link_status)) ++ if (!intel_dp_get_link_status(intel_encoder, link_status)) + break; + + if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { +@@ -1032,7 +1072,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, + voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* Compute new train_set as requested by target */ +- intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); ++ intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); + } + + /* channel equalization */ +@@ -1040,17 +1080,29 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, + channel_eq = false; + for (;;) { + /* Use train_set[0] to set the voltage and pre emphasis values */ +- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); +- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; ++ uint32_t signal_levels; ++ ++ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { ++ signal_levels = intel_gen6_edp_signal_levels(train_set[0]); ++ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; ++ } else { ++ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); ++ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; ++ } ++ ++ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) ++ reg = DP | DP_LINK_TRAIN_PAT_2_CPT; ++ else ++ reg = DP | DP_LINK_TRAIN_PAT_2; + + /* channel eq pattern */ +- if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2, ++ if (!intel_dp_set_link_train(intel_encoder, reg, + DP_TRAINING_PATTERN_2, train_set, + false)) + break; + + udelay(400); +- if (!intel_dp_get_link_status(intel_output, link_status)) ++ if (!intel_dp_get_link_status(intel_encoder, link_status)) + break; + + if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { +@@ -1063,56 +1115,55 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, + break; + + /* Compute new train_set as requested by target */ +- intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); ++ intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); + ++tries; + } + +- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); ++ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) ++ reg = DP | DP_LINK_TRAIN_OFF_CPT; ++ else ++ reg = DP | DP_LINK_TRAIN_OFF; ++ ++ I915_WRITE(dp_priv->output_reg, reg); + POSTING_READ(dp_priv->output_reg); +- intel_dp_aux_native_write_1(intel_output, ++ intel_dp_aux_native_write_1(intel_encoder, + DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); + } + + static void +-intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) ++intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) + { +- struct drm_device *dev = intel_output->base.dev; ++ struct drm_device *dev = intel_encoder->enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + + DRM_DEBUG_KMS("\n"); + +- if (IS_eDP(intel_output)) { ++ if (IS_eDP(intel_encoder)) { + DP &= ~DP_PLL_ENABLE; + I915_WRITE(dp_priv->output_reg, DP); + POSTING_READ(dp_priv->output_reg); + udelay(100); + } + +- DP &= ~DP_LINK_TRAIN_MASK; +- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); +- POSTING_READ(dp_priv->output_reg); ++ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) { ++ DP &= ~DP_LINK_TRAIN_MASK_CPT; ++ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); ++ POSTING_READ(dp_priv->output_reg); ++ } else { ++ DP &= ~DP_LINK_TRAIN_MASK; ++ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); ++ POSTING_READ(dp_priv->output_reg); ++ } + + udelay(17000); + +- if (IS_eDP(intel_output)) ++ if (IS_eDP(intel_encoder)) + DP |= DP_LINK_TRAIN_OFF; + I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); + POSTING_READ(dp_priv->output_reg); + } + +-static void +-intel_dp_restore(struct drm_connector *connector) +-{ +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; +- +- if (dp_priv->save_DP & DP_PORT_EN) +- intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration); +- else +- intel_dp_link_down(intel_output, dp_priv->save_DP); +-} +- + /* + * According to DP spec + * 5.1.2: +@@ -1123,32 +1174,33 @@ intel_dp_restore(struct drm_connector *connector) + */ + + static void +-intel_dp_check_link_status(struct intel_output *intel_output) ++intel_dp_check_link_status(struct intel_encoder *intel_encoder) + { +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + +- if (!intel_output->enc.crtc) ++ if (!intel_encoder->enc.crtc) + return; + +- if (!intel_dp_get_link_status(intel_output, link_status)) { +- intel_dp_link_down(intel_output, dp_priv->DP); ++ if (!intel_dp_get_link_status(intel_encoder, link_status)) { ++ intel_dp_link_down(intel_encoder, dp_priv->DP); + return; + } + + if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) +- intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); ++ intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); + } + + static enum drm_connector_status + ironlake_dp_detect(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + enum drm_connector_status status; + + status = connector_status_disconnected; +- if (intel_dp_aux_native_read(intel_output, ++ if (intel_dp_aux_native_read(intel_encoder, + 0x000, dp_priv->dpcd, + sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) + { +@@ -1167,28 +1219,19 @@ ironlake_dp_detect(struct drm_connector *connector) + static enum drm_connector_status + intel_dp_detect(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct drm_device *dev = intel_output->base.dev; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct drm_device *dev = intel_encoder->enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + uint32_t temp, bit; + enum drm_connector_status status; + + dp_priv->has_audio = false; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + return ironlake_dp_detect(connector); + +- temp = I915_READ(PORT_HOTPLUG_EN); +- +- I915_WRITE(PORT_HOTPLUG_EN, +- temp | +- DPB_HOTPLUG_INT_EN | +- DPC_HOTPLUG_INT_EN | +- DPD_HOTPLUG_INT_EN); +- +- POSTING_READ(PORT_HOTPLUG_EN); +- + switch (dp_priv->output_reg) { + case DP_B: + bit = DPB_HOTPLUG_INT_STATUS; +@@ -1209,7 +1252,7 @@ intel_dp_detect(struct drm_connector *connector) + return connector_status_disconnected; + + status = connector_status_disconnected; +- if (intel_dp_aux_native_read(intel_output, ++ if (intel_dp_aux_native_read(intel_encoder, + 0x000, dp_priv->dpcd, + sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) + { +@@ -1221,20 +1264,21 @@ intel_dp_detect(struct drm_connector *connector) + + static int intel_dp_get_modes(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct drm_device *dev = intel_output->base.dev; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct drm_device *dev = intel_encoder->enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + /* We should parse the EDID data and find out if it has an audio sink + */ + +- ret = intel_ddc_get_modes(intel_output); ++ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + if (ret) + return ret; + + /* if eDP has no EDID, try to use fixed panel mode from VBT */ +- if (IS_eDP(intel_output)) { ++ if (IS_eDP(intel_encoder)) { + if (dev_priv->panel_fixed_mode != NULL) { + struct drm_display_mode *mode; + mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); +@@ -1248,13 +1292,9 @@ static int intel_dp_get_modes(struct drm_connector *connector) + static void + intel_dp_destroy (struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- +- if (intel_output->i2c_bus) +- intel_i2c_destroy(intel_output->i2c_bus); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); +- kfree(intel_output); ++ kfree(connector); + } + + static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { +@@ -1267,8 +1307,6 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { + + static const struct drm_connector_funcs intel_dp_connector_funcs = { + .dpms = drm_helper_connector_dpms, +- .save = intel_dp_save, +- .restore = intel_dp_restore, + .detect = intel_dp_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = intel_dp_destroy, +@@ -1277,12 +1315,17 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { + static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { + .get_modes = intel_dp_get_modes, + .mode_valid = intel_dp_mode_valid, +- .best_encoder = intel_best_encoder, ++ .best_encoder = intel_attached_encoder, + }; + + static void intel_dp_enc_destroy(struct drm_encoder *encoder) + { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ ++ if (intel_encoder->i2c_bus) ++ intel_i2c_destroy(intel_encoder->i2c_bus); + drm_encoder_cleanup(encoder); ++ kfree(intel_encoder); + } + + static const struct drm_encoder_funcs intel_dp_enc_funcs = { +@@ -1290,12 +1333,34 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { + }; + + void +-intel_dp_hot_plug(struct intel_output *intel_output) ++intel_dp_hot_plug(struct intel_encoder *intel_encoder) + { +- struct intel_dp_priv *dp_priv = intel_output->dev_priv; ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + + if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) +- intel_dp_check_link_status(intel_output); ++ intel_dp_check_link_status(intel_encoder); ++} ++ ++/* Return which DP Port should be selected for Transcoder DP control */ ++int ++intel_trans_dp_port_sel (struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_mode_config *mode_config = &dev->mode_config; ++ struct drm_encoder *encoder; ++ struct intel_encoder *intel_encoder = NULL; ++ ++ list_for_each_entry(encoder, &mode_config->encoder_list, head) { ++ if (!encoder || encoder->crtc != crtc) ++ continue; ++ ++ intel_encoder = enc_to_intel_encoder(encoder); ++ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { ++ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; ++ return dp_priv->output_reg; ++ } ++ } ++ return -1; + } + + void +@@ -1303,53 +1368,60 @@ intel_dp_init(struct drm_device *dev, int output_reg) + { + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_connector *connector; +- struct intel_output *intel_output; ++ struct intel_encoder *intel_encoder; ++ struct intel_connector *intel_connector; + struct intel_dp_priv *dp_priv; + const char *name = NULL; + +- intel_output = kcalloc(sizeof(struct intel_output) + ++ intel_encoder = kcalloc(sizeof(struct intel_encoder) + + sizeof(struct intel_dp_priv), 1, GFP_KERNEL); +- if (!intel_output) ++ if (!intel_encoder) + return; + +- dp_priv = (struct intel_dp_priv *)(intel_output + 1); ++ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); ++ if (!intel_connector) { ++ kfree(intel_encoder); ++ return; ++ } ++ ++ dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); + +- connector = &intel_output->base; ++ connector = &intel_connector->base; + drm_connector_init(dev, connector, &intel_dp_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); + + if (output_reg == DP_A) +- intel_output->type = INTEL_OUTPUT_EDP; ++ intel_encoder->type = INTEL_OUTPUT_EDP; + else +- intel_output->type = INTEL_OUTPUT_DISPLAYPORT; ++ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + + if (output_reg == DP_B || output_reg == PCH_DP_B) +- intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); ++ intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); + else if (output_reg == DP_C || output_reg == PCH_DP_C) +- intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); ++ intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); + else if (output_reg == DP_D || output_reg == PCH_DP_D) +- intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); ++ intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); + +- if (IS_eDP(intel_output)) +- intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); ++ if (IS_eDP(intel_encoder)) ++ intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); + +- intel_output->crtc_mask = (1 << 0) | (1 << 1); ++ intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + connector->interlace_allowed = true; + connector->doublescan_allowed = 0; + +- dp_priv->intel_output = intel_output; ++ dp_priv->intel_encoder = intel_encoder; + dp_priv->output_reg = output_reg; + dp_priv->has_audio = false; + dp_priv->dpms_mode = DRM_MODE_DPMS_ON; +- intel_output->dev_priv = dp_priv; ++ intel_encoder->dev_priv = dp_priv; + +- drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs, ++ drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, + DRM_MODE_ENCODER_TMDS); +- drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs); ++ drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); + +- drm_mode_connector_attach_encoder(&intel_output->base, +- &intel_output->enc); ++ drm_mode_connector_attach_encoder(&intel_connector->base, ++ &intel_encoder->enc); + drm_sysfs_connector_add(connector); + + /* Set up the DDC bus. */ +@@ -1377,10 +1449,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) + break; + } + +- intel_dp_i2c_init(intel_output, name); ++ intel_dp_i2c_init(intel_encoder, intel_connector, name); + +- intel_output->ddc_bus = &dp_priv->adapter; +- intel_output->hot_plug = intel_dp_hot_plug; ++ intel_encoder->ddc_bus = &dp_priv->adapter; ++ intel_encoder->hot_plug = intel_dp_hot_plug; + + if (output_reg == DP_A) { + /* initialize panel mode from VBT if available for eDP */ +diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h +--- a/drivers/gpu/drm/i915/intel_drv.h ++++ b/drivers/gpu/drm/i915/intel_drv.h +@@ -95,9 +95,7 @@ struct intel_framebuffer { + }; + + +-struct intel_output { +- struct drm_connector base; +- ++struct intel_encoder { + struct drm_encoder enc; + int type; + struct i2c_adapter *i2c_bus; +@@ -105,11 +103,16 @@ struct intel_output { + bool load_detect_temp; + bool needs_tv_clock; + void *dev_priv; +- void (*hot_plug)(struct intel_output *); ++ void (*hot_plug)(struct intel_encoder *); + int crtc_mask; + int clone_mask; + }; + ++struct intel_connector { ++ struct drm_connector base; ++ void *dev_priv; ++}; ++ + struct intel_crtc; + struct intel_overlay { + struct drm_device *dev; +@@ -149,18 +152,19 @@ struct intel_crtc { + bool lowfreq_avail; + struct intel_overlay *overlay; + struct intel_unpin_work *unpin_work; ++ int fdi_lanes; + }; + + #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) +-#define to_intel_output(x) container_of(x, struct intel_output, base) +-#define enc_to_intel_output(x) container_of(x, struct intel_output, enc) ++#define to_intel_connector(x) container_of(x, struct intel_connector, base) ++#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) + #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) + + struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, + const char *name); + void intel_i2c_destroy(struct i2c_adapter *adapter); +-int intel_ddc_get_modes(struct intel_output *intel_output); +-extern bool intel_ddc_probe(struct intel_output *intel_output); ++int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); ++extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); + void intel_i2c_quirk_set(struct drm_device *dev, bool enable); + void intel_i2c_reset_gmbus(struct drm_device *dev); + +@@ -175,7 +179,7 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); + void + intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +-extern void intel_edp_link_config (struct intel_output *, int *, int *); ++extern void intel_edp_link_config (struct intel_encoder *, int *, int *); + + + extern int intel_panel_fitter_pipe (struct drm_device *dev); +@@ -183,7 +187,7 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc); + extern void intel_encoder_prepare (struct drm_encoder *encoder); + extern void intel_encoder_commit (struct drm_encoder *encoder); + +-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); ++extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector); + + extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, + struct drm_crtc *crtc); +@@ -191,10 +195,12 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, + struct drm_file *file_priv); + extern void intel_wait_for_vblank(struct drm_device *dev); + extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); +-extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, ++extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, ++ struct drm_connector *connector, + struct drm_display_mode *mode, + int *dpms_mode); +-extern void intel_release_load_detect_pipe(struct intel_output *intel_output, ++extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, ++ struct drm_connector *connector, + int dpms_mode); + + extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); +@@ -209,6 +215,8 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, + extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, int regno); + extern void intel_init_clock_gating(struct drm_device *dev); ++extern void ironlake_enable_drps(struct drm_device *dev); ++extern void ironlake_disable_drps(struct drm_device *dev); + + extern int intel_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd *mode_cmd, +diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c +--- a/drivers/gpu/drm/i915/intel_dvo.c ++++ b/drivers/gpu/drm/i915/intel_dvo.c +@@ -79,8 +79,8 @@ static struct intel_dvo_device intel_dvo_devices[] = { + static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) + { + struct drm_i915_private *dev_priv = encoder->dev->dev_private; +- struct intel_output *intel_output = enc_to_intel_output(encoder); +- struct intel_dvo_device *dvo = intel_output->dev_priv; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_dvo_device *dvo = intel_encoder->dev_priv; + u32 dvo_reg = dvo->dvo_reg; + u32 temp = I915_READ(dvo_reg); + +@@ -95,40 +95,12 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) + } + } + +-static void intel_dvo_save(struct drm_connector *connector) +-{ +- struct drm_i915_private *dev_priv = connector->dev->dev_private; +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_dvo_device *dvo = intel_output->dev_priv; +- +- /* Each output should probably just save the registers it touches, +- * but for now, use more overkill. +- */ +- dev_priv->saveDVOA = I915_READ(DVOA); +- dev_priv->saveDVOB = I915_READ(DVOB); +- dev_priv->saveDVOC = I915_READ(DVOC); +- +- dvo->dev_ops->save(dvo); +-} +- +-static void intel_dvo_restore(struct drm_connector *connector) +-{ +- struct drm_i915_private *dev_priv = connector->dev->dev_private; +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_dvo_device *dvo = intel_output->dev_priv; +- +- dvo->dev_ops->restore(dvo); +- +- I915_WRITE(DVOA, dev_priv->saveDVOA); +- I915_WRITE(DVOB, dev_priv->saveDVOB); +- I915_WRITE(DVOC, dev_priv->saveDVOC); +-} +- + static int intel_dvo_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_dvo_device *dvo = intel_output->dev_priv; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_dvo_device *dvo = intel_encoder->dev_priv; + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; +@@ -149,8 +121,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) + { +- struct intel_output *intel_output = enc_to_intel_output(encoder); +- struct intel_dvo_device *dvo = intel_output->dev_priv; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_dvo_device *dvo = intel_encoder->dev_priv; + + /* If we have timings from the BIOS for the panel, put them in + * to the adjusted mode. The CRTC will be set up for this mode, +@@ -185,8 +157,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); +- struct intel_output *intel_output = enc_to_intel_output(encoder); +- struct intel_dvo_device *dvo = intel_output->dev_priv; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_dvo_device *dvo = intel_encoder->dev_priv; + int pipe = intel_crtc->pipe; + u32 dvo_val; + u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; +@@ -240,23 +212,25 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, + */ + static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_dvo_device *dvo = intel_output->dev_priv; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_dvo_device *dvo = intel_encoder->dev_priv; + + return dvo->dev_ops->detect(dvo); + } + + static int intel_dvo_get_modes(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_dvo_device *dvo = intel_output->dev_priv; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_dvo_device *dvo = intel_encoder->dev_priv; + + /* We should probably have an i2c driver get_modes function for those + * devices which will have a fixed set of modes determined by the chip + * (TV-out, for example), but for now with just TMDS and LVDS, + * that's not the case. + */ +- intel_ddc_get_modes(intel_output); ++ intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + if (!list_empty(&connector->probed_modes)) + return 1; + +@@ -274,39 +248,11 @@ static int intel_dvo_get_modes(struct drm_connector *connector) + + static void intel_dvo_destroy (struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_dvo_device *dvo = intel_output->dev_priv; +- +- if (dvo) { +- if (dvo->dev_ops->destroy) +- dvo->dev_ops->destroy(dvo); +- if (dvo->panel_fixed_mode) +- kfree(dvo->panel_fixed_mode); +- /* no need, in i830_dvoices[] now */ +- //kfree(dvo); +- } +- if (intel_output->i2c_bus) +- intel_i2c_destroy(intel_output->i2c_bus); +- if (intel_output->ddc_bus) +- intel_i2c_destroy(intel_output->ddc_bus); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); +- kfree(intel_output); ++ kfree(connector); + } + +-#ifdef RANDR_GET_CRTC_INTERFACE +-static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_dvo_device *dvo = intel_output->dev_priv; +- int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); +- +- return intel_pipe_to_crtc(pScrn, pipe); +-} +-#endif +- + static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { + .dpms = intel_dvo_dpms, + .mode_fixup = intel_dvo_mode_fixup, +@@ -317,8 +263,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { + + static const struct drm_connector_funcs intel_dvo_connector_funcs = { + .dpms = drm_helper_connector_dpms, +- .save = intel_dvo_save, +- .restore = intel_dvo_restore, + .detect = intel_dvo_detect, + .destroy = intel_dvo_destroy, + .fill_modes = drm_helper_probe_single_connector_modes, +@@ -327,12 +271,26 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = { + static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { + .mode_valid = intel_dvo_mode_valid, + .get_modes = intel_dvo_get_modes, +- .best_encoder = intel_best_encoder, ++ .best_encoder = intel_attached_encoder, + }; + + static void intel_dvo_enc_destroy(struct drm_encoder *encoder) + { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_dvo_device *dvo = intel_encoder->dev_priv; ++ ++ if (dvo) { ++ if (dvo->dev_ops->destroy) ++ dvo->dev_ops->destroy(dvo); ++ if (dvo->panel_fixed_mode) ++ kfree(dvo->panel_fixed_mode); ++ } ++ if (intel_encoder->i2c_bus) ++ intel_i2c_destroy(intel_encoder->i2c_bus); ++ if (intel_encoder->ddc_bus) ++ intel_i2c_destroy(intel_encoder->ddc_bus); + drm_encoder_cleanup(encoder); ++ kfree(intel_encoder); + } + + static const struct drm_encoder_funcs intel_dvo_enc_funcs = { +@@ -351,8 +309,9 @@ intel_dvo_get_current_mode (struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_dvo_device *dvo = intel_output->dev_priv; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_dvo_device *dvo = intel_encoder->dev_priv; + uint32_t dvo_reg = dvo->dvo_reg; + uint32_t dvo_val = I915_READ(dvo_reg); + struct drm_display_mode *mode = NULL; +@@ -382,24 +341,31 @@ intel_dvo_get_current_mode (struct drm_connector *connector) + + void intel_dvo_init(struct drm_device *dev) + { +- struct intel_output *intel_output; ++ struct intel_encoder *intel_encoder; ++ struct intel_connector *intel_connector; + struct intel_dvo_device *dvo; + struct i2c_adapter *i2cbus = NULL; + int ret = 0; + int i; + int encoder_type = DRM_MODE_ENCODER_NONE; +- intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); +- if (!intel_output) ++ intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL); ++ if (!intel_encoder) ++ return; ++ ++ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); ++ if (!intel_connector) { ++ kfree(intel_encoder); + return; ++ } + + /* Set up the DDC bus */ +- intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); +- if (!intel_output->ddc_bus) ++ intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); ++ if (!intel_encoder->ddc_bus) + goto free_intel; + + /* Now, try to find a controller */ + for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { +- struct drm_connector *connector = &intel_output->base; ++ struct drm_connector *connector = &intel_connector->base; + int gpio; + + dvo = &intel_dvo_devices[i]; +@@ -434,11 +400,11 @@ void intel_dvo_init(struct drm_device *dev) + if (!ret) + continue; + +- intel_output->type = INTEL_OUTPUT_DVO; +- intel_output->crtc_mask = (1 << 0) | (1 << 1); ++ intel_encoder->type = INTEL_OUTPUT_DVO; ++ intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + switch (dvo->type) { + case INTEL_DVO_CHIP_TMDS: +- intel_output->clone_mask = ++ intel_encoder->clone_mask = + (1 << INTEL_DVO_TMDS_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT); + drm_connector_init(dev, connector, +@@ -447,7 +413,7 @@ void intel_dvo_init(struct drm_device *dev) + encoder_type = DRM_MODE_ENCODER_TMDS; + break; + case INTEL_DVO_CHIP_LVDS: +- intel_output->clone_mask = ++ intel_encoder->clone_mask = + (1 << INTEL_DVO_LVDS_CLONE_BIT); + drm_connector_init(dev, connector, + &intel_dvo_connector_funcs, +@@ -462,16 +428,16 @@ void intel_dvo_init(struct drm_device *dev) + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + +- intel_output->dev_priv = dvo; +- intel_output->i2c_bus = i2cbus; ++ intel_encoder->dev_priv = dvo; ++ intel_encoder->i2c_bus = i2cbus; + +- drm_encoder_init(dev, &intel_output->enc, ++ drm_encoder_init(dev, &intel_encoder->enc, + &intel_dvo_enc_funcs, encoder_type); +- drm_encoder_helper_add(&intel_output->enc, ++ drm_encoder_helper_add(&intel_encoder->enc, + &intel_dvo_helper_funcs); + +- drm_mode_connector_attach_encoder(&intel_output->base, +- &intel_output->enc); ++ drm_mode_connector_attach_encoder(&intel_connector->base, ++ &intel_encoder->enc); + if (dvo->type == INTEL_DVO_CHIP_LVDS) { + /* For our LVDS chipsets, we should hopefully be able + * to dig the fixed panel mode out of the BIOS data. +@@ -489,10 +455,11 @@ void intel_dvo_init(struct drm_device *dev) + return; + } + +- intel_i2c_destroy(intel_output->ddc_bus); ++ intel_i2c_destroy(intel_encoder->ddc_bus); + /* Didn't find a chip, so tear down. */ + if (i2cbus != NULL) + intel_i2c_destroy(i2cbus); + free_intel: +- kfree(intel_output); ++ kfree(intel_encoder); ++ kfree(intel_connector); + } +diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c +--- a/drivers/gpu/drm/i915/intel_fb.c ++++ b/drivers/gpu/drm/i915/intel_fb.c +@@ -144,7 +144,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, + ret = -ENOMEM; + goto out; + } +- obj_priv = fbo->driver_private; ++ obj_priv = to_intel_bo(fbo); + + mutex_lock(&dev->struct_mutex); + +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c +--- a/drivers/gpu/drm/i915/intel_hdmi.c ++++ b/drivers/gpu/drm/i915/intel_hdmi.c +@@ -38,7 +38,6 @@ + + struct intel_hdmi_priv { + u32 sdvox_reg; +- u32 save_SDVOX; + bool has_hdmi_sink; + }; + +@@ -50,8 +49,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = encoder->crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); +- struct intel_output *intel_output = enc_to_intel_output(encoder); +- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; + u32 sdvox; + + sdvox = SDVO_ENCODING_HDMI | +@@ -62,8 +61,12 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, + if (hdmi_priv->has_hdmi_sink) + sdvox |= SDVO_AUDIO_ENABLE; + +- if (intel_crtc->pipe == 1) +- sdvox |= SDVO_PIPE_B_SELECT; ++ if (intel_crtc->pipe == 1) { ++ if (HAS_PCH_CPT(dev)) ++ sdvox |= PORT_TRANS_B_SEL_CPT; ++ else ++ sdvox |= SDVO_PIPE_B_SELECT; ++ } + + I915_WRITE(hdmi_priv->sdvox_reg, sdvox); + POSTING_READ(hdmi_priv->sdvox_reg); +@@ -73,8 +76,8 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) + { + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_output *intel_output = enc_to_intel_output(encoder); +- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; + u32 temp; + + temp = I915_READ(hdmi_priv->sdvox_reg); +@@ -82,7 +85,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) + /* HW workaround, need to toggle enable bit off and on for 12bpc, but + * we do this anyway which shows more stable in testing. + */ +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); + POSTING_READ(hdmi_priv->sdvox_reg); + } +@@ -99,33 +102,12 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) + /* HW workaround, need to write this twice for issue that may result + * in first write getting masked. + */ +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(hdmi_priv->sdvox_reg, temp); + POSTING_READ(hdmi_priv->sdvox_reg); + } + } + +-static void intel_hdmi_save(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; +- +- hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); +-} +- +-static void intel_hdmi_restore(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; +- +- I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); +- POSTING_READ(hdmi_priv->sdvox_reg); +-} +- + static int intel_hdmi_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { +@@ -150,21 +132,22 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, + static enum drm_connector_status + intel_hdmi_detect(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; + struct edid *edid = NULL; + enum drm_connector_status status = connector_status_disconnected; + + hdmi_priv->has_hdmi_sink = false; +- edid = drm_get_edid(&intel_output->base, +- intel_output->ddc_bus); ++ edid = drm_get_edid(connector, ++ intel_encoder->ddc_bus); + + if (edid) { + if (edid->input & DRM_EDID_INPUT_DIGITAL) { + status = connector_status_connected; + hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); + } +- intel_output->base.display_info.raw_edid = NULL; ++ connector->display_info.raw_edid = NULL; + kfree(edid); + } + +@@ -173,24 +156,21 @@ intel_hdmi_detect(struct drm_connector *connector) + + static int intel_hdmi_get_modes(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + + /* We should parse the EDID data and find out if it's an HDMI sink so + * we can send audio to it. + */ + +- return intel_ddc_get_modes(intel_output); ++ return intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + } + + static void intel_hdmi_destroy(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- +- if (intel_output->i2c_bus) +- intel_i2c_destroy(intel_output->i2c_bus); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); +- kfree(intel_output); ++ kfree(connector); + } + + static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { +@@ -203,8 +183,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { + + static const struct drm_connector_funcs intel_hdmi_connector_funcs = { + .dpms = drm_helper_connector_dpms, +- .save = intel_hdmi_save, +- .restore = intel_hdmi_restore, + .detect = intel_hdmi_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = intel_hdmi_destroy, +@@ -213,12 +191,17 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { + static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { + .get_modes = intel_hdmi_get_modes, + .mode_valid = intel_hdmi_mode_valid, +- .best_encoder = intel_best_encoder, ++ .best_encoder = intel_attached_encoder, + }; + + static void intel_hdmi_enc_destroy(struct drm_encoder *encoder) + { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ ++ if (intel_encoder->i2c_bus) ++ intel_i2c_destroy(intel_encoder->i2c_bus); + drm_encoder_cleanup(encoder); ++ kfree(intel_encoder); + } + + static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { +@@ -229,63 +212,71 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) + { + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_connector *connector; +- struct intel_output *intel_output; ++ struct intel_encoder *intel_encoder; ++ struct intel_connector *intel_connector; + struct intel_hdmi_priv *hdmi_priv; + +- intel_output = kcalloc(sizeof(struct intel_output) + ++ intel_encoder = kcalloc(sizeof(struct intel_encoder) + + sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); +- if (!intel_output) ++ if (!intel_encoder) + return; +- hdmi_priv = (struct intel_hdmi_priv *)(intel_output + 1); + +- connector = &intel_output->base; ++ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); ++ if (!intel_connector) { ++ kfree(intel_encoder); ++ return; ++ } ++ ++ hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); ++ ++ connector = &intel_connector->base; + drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); + +- intel_output->type = INTEL_OUTPUT_HDMI; ++ intel_encoder->type = INTEL_OUTPUT_HDMI; + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; +- intel_output->crtc_mask = (1 << 0) | (1 << 1); ++ intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + + /* Set up the DDC bus. */ + if (sdvox_reg == SDVOB) { +- intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); +- intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); ++ intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); ++ intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); + dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; + } else if (sdvox_reg == SDVOC) { +- intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); +- intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); ++ intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); ++ intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); + dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; + } else if (sdvox_reg == HDMIB) { +- intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); +- intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, ++ intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); ++ intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, + "HDMIB"); + dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; + } else if (sdvox_reg == HDMIC) { +- intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); +- intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, ++ intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); ++ intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, + "HDMIC"); + dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; + } else if (sdvox_reg == HDMID) { +- intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); +- intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, ++ intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); ++ intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, + "HDMID"); + dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; + } +- if (!intel_output->ddc_bus) ++ if (!intel_encoder->ddc_bus) + goto err_connector; + + hdmi_priv->sdvox_reg = sdvox_reg; +- intel_output->dev_priv = hdmi_priv; ++ intel_encoder->dev_priv = hdmi_priv; + +- drm_encoder_init(dev, &intel_output->enc, &intel_hdmi_enc_funcs, ++ drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, + DRM_MODE_ENCODER_TMDS); +- drm_encoder_helper_add(&intel_output->enc, &intel_hdmi_helper_funcs); ++ drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); + +- drm_mode_connector_attach_encoder(&intel_output->base, +- &intel_output->enc); ++ drm_mode_connector_attach_encoder(&intel_connector->base, ++ &intel_encoder->enc); + drm_sysfs_connector_add(connector); + + /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written +@@ -301,7 +292,8 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) + + err_connector: + drm_connector_cleanup(connector); +- kfree(intel_output); ++ kfree(intel_encoder); ++ kfree(intel_connector); + + return; + } +diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c +--- a/drivers/gpu/drm/i915/intel_i2c.c ++++ b/drivers/gpu/drm/i915/intel_i2c.c +@@ -128,7 +128,7 @@ intel_i2c_reset_gmbus(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; + +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(PCH_GMBUS0, 0); + } else { + I915_WRITE(GMBUS0, 0); +diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c +--- a/drivers/gpu/drm/i915/intel_lvds.c ++++ b/drivers/gpu/drm/i915/intel_lvds.c +@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level) + struct drm_i915_private *dev_priv = dev->dev_private; + u32 blc_pwm_ctl, reg; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + reg = BLC_PWM_CPU_CTL; + else + reg = BLC_PWM_CTL; +@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev) + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + reg = BLC_PWM_PCH_CTL2; + else + reg = BLC_PWM_CTL; +@@ -89,17 +89,22 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev) + static void intel_lvds_set_power(struct drm_device *dev, bool on) + { + struct drm_i915_private *dev_priv = dev->dev_private; +- u32 pp_status, ctl_reg, status_reg; ++ u32 pp_status, ctl_reg, status_reg, lvds_reg; + +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + ctl_reg = PCH_PP_CONTROL; + status_reg = PCH_PP_STATUS; ++ lvds_reg = PCH_LVDS; + } else { + ctl_reg = PP_CONTROL; + status_reg = PP_STATUS; ++ lvds_reg = LVDS; + } + + if (on) { ++ I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); ++ POSTING_READ(lvds_reg); ++ + I915_WRITE(ctl_reg, I915_READ(ctl_reg) | + POWER_TARGET_ON); + do { +@@ -115,6 +120,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) + do { + pp_status = I915_READ(status_reg); + } while (pp_status & PP_ON); ++ ++ I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); ++ POSTING_READ(lvds_reg); + } + } + +@@ -130,75 +138,6 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) + /* XXX: We never power down the LVDS pairs. */ + } + +-static void intel_lvds_save(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; +- u32 pwm_ctl_reg; +- +- if (IS_IRONLAKE(dev)) { +- pp_on_reg = PCH_PP_ON_DELAYS; +- pp_off_reg = PCH_PP_OFF_DELAYS; +- pp_ctl_reg = PCH_PP_CONTROL; +- pp_div_reg = PCH_PP_DIVISOR; +- pwm_ctl_reg = BLC_PWM_CPU_CTL; +- } else { +- pp_on_reg = PP_ON_DELAYS; +- pp_off_reg = PP_OFF_DELAYS; +- pp_ctl_reg = PP_CONTROL; +- pp_div_reg = PP_DIVISOR; +- pwm_ctl_reg = BLC_PWM_CTL; +- } +- +- dev_priv->savePP_ON = I915_READ(pp_on_reg); +- dev_priv->savePP_OFF = I915_READ(pp_off_reg); +- dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg); +- dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg); +- dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg); +- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & +- BACKLIGHT_DUTY_CYCLE_MASK); +- +- /* +- * If the light is off at server startup, just make it full brightness +- */ +- if (dev_priv->backlight_duty_cycle == 0) +- dev_priv->backlight_duty_cycle = +- intel_lvds_get_max_backlight(dev); +-} +- +-static void intel_lvds_restore(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; +- u32 pwm_ctl_reg; +- +- if (IS_IRONLAKE(dev)) { +- pp_on_reg = PCH_PP_ON_DELAYS; +- pp_off_reg = PCH_PP_OFF_DELAYS; +- pp_ctl_reg = PCH_PP_CONTROL; +- pp_div_reg = PCH_PP_DIVISOR; +- pwm_ctl_reg = BLC_PWM_CPU_CTL; +- } else { +- pp_on_reg = PP_ON_DELAYS; +- pp_off_reg = PP_OFF_DELAYS; +- pp_ctl_reg = PP_CONTROL; +- pp_div_reg = PP_DIVISOR; +- pwm_ctl_reg = BLC_PWM_CTL; +- } +- +- I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL); +- I915_WRITE(pp_on_reg, dev_priv->savePP_ON); +- I915_WRITE(pp_off_reg, dev_priv->savePP_OFF); +- I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR); +- I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL); +- if (dev_priv->savePP_CONTROL & POWER_TARGET_ON) +- intel_lvds_set_power(dev, true); +- else +- intel_lvds_set_power(dev, false); +-} +- + static int intel_lvds_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { +@@ -230,8 +169,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct drm_encoder *tmp_encoder; +- struct intel_output *intel_output = enc_to_intel_output(encoder); +- struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; + u32 pfit_control = 0, pfit_pgm_ratios = 0; + int left_border = 0, right_border = 0, top_border = 0; + int bottom_border = 0; +@@ -297,7 +236,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, + } + + /* full screen scale for now */ +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + goto out; + + /* 965+ wants fuzzy fitting */ +@@ -327,7 +266,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, + * to register description and PRM. + * Change the value here to see the borders for debugging + */ +- if (!IS_IRONLAKE(dev)) { ++ if (!HAS_PCH_SPLIT(dev)) { + I915_WRITE(BCLRPAT_A, 0); + I915_WRITE(BCLRPAT_B, 0); + } +@@ -548,7 +487,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + reg = BLC_PWM_CPU_CTL; + else + reg = BLC_PWM_CTL; +@@ -578,8 +517,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, + { + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_output *intel_output = enc_to_intel_output(encoder); +- struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; + + /* + * The LVDS pin pair will already have been turned on in the +@@ -587,7 +526,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, + * settings. + */ + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + return; + + /* +@@ -599,53 +538,6 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, + I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); + } + +-/* Some lid devices report incorrect lid status, assume they're connected */ +-static const struct dmi_system_id bad_lid_status[] = { +- { +- .ident = "Compaq nx9020", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), +- DMI_MATCH(DMI_BOARD_NAME, "3084"), +- }, +- }, +- { +- .ident = "Samsung SX20S", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), +- DMI_MATCH(DMI_BOARD_NAME, "SX20S"), +- }, +- }, +- { +- .ident = "Aspire One", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"), +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), +- }, +- }, +- { +- .ident = "Aspire 1810T", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"), +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), +- }, +- }, +- { +- .ident = "PC-81005", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), +- DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), +- }, +- }, +- { +- .ident = "Clevo M5x0N", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), +- DMI_MATCH(DMI_BOARD_NAME, "M5x0N"), +- }, +- }, +- { } +-}; +- + /** + * Detect the LVDS connection. + * +@@ -661,12 +553,9 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect + /* ACPI lid methods were generally unreliable in this generation, so + * don't even bother. + */ +- if (IS_I8XX(dev)) ++ if (IS_GEN2(dev) || IS_GEN3(dev)) + return connector_status_connected; + +- if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) +- status = connector_status_disconnected; +- + return status; + } + +@@ -676,14 +565,17 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect + static int intel_lvds_get_modes(struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; +- struct intel_output *intel_output = to_intel_output(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct drm_i915_private *dev_priv = dev->dev_private; + int ret = 0; + +- ret = intel_ddc_get_modes(intel_output); ++ if (dev_priv->lvds_edid_good) { ++ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + +- if (ret) +- return ret; ++ if (ret) ++ return ret; ++ } + + /* Didn't get an EDID, so + * Set wide sync ranges so we get all modes +@@ -756,11 +648,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, + static void intel_lvds_destroy(struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; +- struct intel_output *intel_output = to_intel_output(connector); + struct drm_i915_private *dev_priv = dev->dev_private; + +- if (intel_output->ddc_bus) +- intel_i2c_destroy(intel_output->ddc_bus); + if (dev_priv->lid_notifier.notifier_call) + acpi_lid_notifier_unregister(&dev_priv->lid_notifier); + drm_sysfs_connector_remove(connector); +@@ -773,13 +662,14 @@ static int intel_lvds_set_property(struct drm_connector *connector, + uint64_t value) + { + struct drm_device *dev = connector->dev; +- struct intel_output *intel_output = +- to_intel_output(connector); + + if (property == dev->mode_config.scaling_mode_property && + connector->encoder) { + struct drm_crtc *crtc = connector->encoder->crtc; +- struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; ++ struct drm_encoder *encoder = connector->encoder; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; ++ + if (value == DRM_MODE_SCALE_NONE) { + DRM_DEBUG_KMS("no scaling not supported\n"); + return 0; +@@ -813,13 +703,11 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { + static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { + .get_modes = intel_lvds_get_modes, + .mode_valid = intel_lvds_mode_valid, +- .best_encoder = intel_best_encoder, ++ .best_encoder = intel_attached_encoder, + }; + + static const struct drm_connector_funcs intel_lvds_connector_funcs = { + .dpms = drm_helper_connector_dpms, +- .save = intel_lvds_save, +- .restore = intel_lvds_restore, + .detect = intel_lvds_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_lvds_set_property, +@@ -829,7 +717,12 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { + + static void intel_lvds_enc_destroy(struct drm_encoder *encoder) + { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ ++ if (intel_encoder->ddc_bus) ++ intel_i2c_destroy(intel_encoder->ddc_bus); + drm_encoder_cleanup(encoder); ++ kfree(intel_encoder); + } + + static const struct drm_encoder_funcs intel_lvds_enc_funcs = { +@@ -899,6 +792,14 @@ static const struct dmi_system_id intel_no_lvds[] = { + DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), + }, + }, ++ { ++ .callback = intel_no_lvds_dmi_callback, ++ .ident = "Clientron U800", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "U800"), ++ }, ++ }, + + { } /* terminating entry */ + }; +@@ -1009,7 +910,8 @@ static int lvds_is_present_in_vbt(struct drm_device *dev) + void intel_lvds_init(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_output *intel_output; ++ struct intel_encoder *intel_encoder; ++ struct intel_connector *intel_connector; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct drm_display_mode *scan; /* *modes, *bios_mode; */ +@@ -1027,7 +929,7 @@ void intel_lvds_init(struct drm_device *dev) + return; + } + +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) + return; + if (dev_priv->edp_support) { +@@ -1037,40 +939,48 @@ void intel_lvds_init(struct drm_device *dev) + gpio = PCH_GPIOC; + } + +- intel_output = kzalloc(sizeof(struct intel_output) + ++ intel_encoder = kzalloc(sizeof(struct intel_encoder) + + sizeof(struct intel_lvds_priv), GFP_KERNEL); +- if (!intel_output) { ++ if (!intel_encoder) { ++ return; ++ } ++ ++ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); ++ if (!intel_connector) { ++ kfree(intel_encoder); + return; + } + +- connector = &intel_output->base; +- encoder = &intel_output->enc; +- drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs, ++ connector = &intel_connector->base; ++ encoder = &intel_encoder->enc; ++ drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + +- drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs, ++ drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, + DRM_MODE_ENCODER_LVDS); + +- drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); +- intel_output->type = INTEL_OUTPUT_LVDS; ++ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); ++ intel_encoder->type = INTEL_OUTPUT_LVDS; + +- intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); +- intel_output->crtc_mask = (1 << 1); ++ intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); ++ intel_encoder->crtc_mask = (1 << 1); ++ if (IS_I965G(dev)) ++ intel_encoder->crtc_mask |= (1 << 0); + drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); + drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); + connector->display_info.subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + +- lvds_priv = (struct intel_lvds_priv *)(intel_output + 1); +- intel_output->dev_priv = lvds_priv; ++ lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1); ++ intel_encoder->dev_priv = lvds_priv; + /* create the scaling mode property */ + drm_mode_create_scaling_mode_property(dev); + /* + * the initial panel fitting mode will be FULL_SCREEN. + */ + +- drm_connector_attach_property(&intel_output->base, ++ drm_connector_attach_property(&intel_connector->base, + dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_FULLSCREEN); + lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; +@@ -1085,8 +995,8 @@ void intel_lvds_init(struct drm_device *dev) + */ + + /* Set up the DDC bus. */ +- intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); +- if (!intel_output->ddc_bus) { ++ intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); ++ if (!intel_encoder->ddc_bus) { + dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " + "failed.\n"); + goto failed; +@@ -1096,7 +1006,10 @@ void intel_lvds_init(struct drm_device *dev) + * Attempt to get the fixed panel mode from DDC. Assume that the + * preferred mode is the right one. + */ +- intel_ddc_get_modes(intel_output); ++ dev_priv->lvds_edid_good = true; ++ ++ if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus)) ++ dev_priv->lvds_edid_good = false; + + list_for_each_entry(scan, &connector->probed_modes, head) { + mutex_lock(&dev->mode_config.mutex); +@@ -1130,7 +1043,7 @@ void intel_lvds_init(struct drm_device *dev) + */ + + /* Ironlake: FIXME if still fail, not try pipe mode now */ +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + goto failed; + + lvds = I915_READ(LVDS); +@@ -1151,7 +1064,7 @@ void intel_lvds_init(struct drm_device *dev) + goto failed; + + out: +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + u32 pwm; + /* make sure PWM is enabled */ + pwm = I915_READ(BLC_PWM_CPU_CTL2); +@@ -1174,9 +1087,10 @@ out: + + failed: + DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); +- if (intel_output->ddc_bus) +- intel_i2c_destroy(intel_output->ddc_bus); ++ if (intel_encoder->ddc_bus) ++ intel_i2c_destroy(intel_encoder->ddc_bus); + drm_connector_cleanup(connector); + drm_encoder_cleanup(encoder); +- kfree(intel_output); ++ kfree(intel_encoder); ++ kfree(intel_connector); + } +diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c +--- a/drivers/gpu/drm/i915/intel_modes.c ++++ b/drivers/gpu/drm/i915/intel_modes.c +@@ -33,7 +33,7 @@ + * intel_ddc_probe + * + */ +-bool intel_ddc_probe(struct intel_output *intel_output) ++bool intel_ddc_probe(struct intel_encoder *intel_encoder) + { + u8 out_buf[] = { 0x0, 0x0}; + u8 buf[2]; +@@ -53,9 +53,9 @@ bool intel_ddc_probe(struct intel_output *intel_output) + } + }; + +- intel_i2c_quirk_set(intel_output->base.dev, true); +- ret = i2c_transfer(intel_output->ddc_bus, msgs, 2); +- intel_i2c_quirk_set(intel_output->base.dev, false); ++ intel_i2c_quirk_set(intel_encoder->enc.dev, true); ++ ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); ++ intel_i2c_quirk_set(intel_encoder->enc.dev, false); + if (ret == 2) + return true; + +@@ -65,22 +65,23 @@ bool intel_ddc_probe(struct intel_output *intel_output) + /** + * intel_ddc_get_modes - get modelist from monitor + * @connector: DRM connector device to use ++ * @adapter: i2c adapter + * + * Fetch the EDID information from @connector using the DDC bus. + */ +-int intel_ddc_get_modes(struct intel_output *intel_output) ++int intel_ddc_get_modes(struct drm_connector *connector, ++ struct i2c_adapter *adapter) + { + struct edid *edid; + int ret = 0; + +- intel_i2c_quirk_set(intel_output->base.dev, true); +- edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus); +- intel_i2c_quirk_set(intel_output->base.dev, false); ++ intel_i2c_quirk_set(connector->dev, true); ++ edid = drm_get_edid(connector, adapter); ++ intel_i2c_quirk_set(connector->dev, false); + if (edid) { +- drm_mode_connector_update_edid_property(&intel_output->base, +- edid); +- ret = drm_add_edid_modes(&intel_output->base, edid); +- intel_output->base.display_info.raw_edid = NULL; ++ drm_mode_connector_update_edid_property(connector, edid); ++ ret = drm_add_edid_modes(connector, edid); ++ connector->display_info.raw_edid = NULL; + kfree(edid); + } + +diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c +--- a/drivers/gpu/drm/i915/intel_overlay.c ++++ b/drivers/gpu/drm/i915/intel_overlay.c +@@ -172,7 +172,7 @@ struct overlay_registers { + #define OFC_UPDATE 0x1 + + #define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) +-#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev)) ++#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev)) + + + static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) +@@ -199,16 +199,11 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over + + static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) + { +- struct drm_device *dev = overlay->dev; +- drm_i915_private_t *dev_priv = dev->dev_private; +- + if (OVERLAY_NONPHYSICAL(overlay->dev)) + io_mapping_unmap_atomic(overlay->virt_addr); + + overlay->virt_addr = NULL; + +- I915_READ(OVADD); /* flush wc cashes */ +- + return; + } + +@@ -225,9 +220,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) + overlay->active = 1; + overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; + +- BEGIN_LP_RING(6); +- OUT_RING(MI_FLUSH); +- OUT_RING(MI_NOOP); ++ BEGIN_LP_RING(4); + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); + OUT_RING(overlay->flip_addr | OFC_UPDATE); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); +@@ -267,9 +260,7 @@ static void intel_overlay_continue(struct intel_overlay *overlay, + if (tmp & (1 << 17)) + DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); + +- BEGIN_LP_RING(4); +- OUT_RING(MI_FLUSH); +- OUT_RING(MI_NOOP); ++ BEGIN_LP_RING(2); + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + OUT_RING(flip_addr); + ADVANCE_LP_RING(); +@@ -338,9 +329,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) + /* wait for overlay to go idle */ + overlay->hw_wedged = SWITCH_OFF_STAGE_1; + +- BEGIN_LP_RING(6); +- OUT_RING(MI_FLUSH); +- OUT_RING(MI_NOOP); ++ BEGIN_LP_RING(4); + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + OUT_RING(flip_addr); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); +@@ -358,9 +347,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) + /* turn overlay off */ + overlay->hw_wedged = SWITCH_OFF_STAGE_2; + +- BEGIN_LP_RING(6); +- OUT_RING(MI_FLUSH); +- OUT_RING(MI_NOOP); ++ BEGIN_LP_RING(4); + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); + OUT_RING(flip_addr); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); +@@ -435,9 +422,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, + + overlay->hw_wedged = SWITCH_OFF_STAGE_2; + +- BEGIN_LP_RING(6); +- OUT_RING(MI_FLUSH); +- OUT_RING(MI_NOOP); ++ BEGIN_LP_RING(4); + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); + OUT_RING(flip_addr); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); +@@ -739,7 +724,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, + int ret, tmp_width; + struct overlay_registers *regs; + bool scale_changed = false; +- struct drm_i915_gem_object *bo_priv = new_bo->driver_private; ++ struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo); + struct drm_device *dev = overlay->dev; + + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); +@@ -824,7 +809,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, + intel_overlay_continue(overlay, scale_changed); + + overlay->old_vid_bo = overlay->vid_bo; +- overlay->vid_bo = new_bo->driver_private; ++ overlay->vid_bo = to_intel_bo(new_bo); + + return 0; + +@@ -1183,8 +1168,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, + out_unlock: + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->mode_config.mutex); +-out_free: + drm_gem_object_unreference_unlocked(new_bo); ++out_free: + kfree(params); + + return ret; +@@ -1359,7 +1344,7 @@ void intel_setup_overlay(struct drm_device *dev) + reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); + if (!reg_bo) + goto out_free; +- overlay->reg_bo = reg_bo->driver_private; ++ overlay->reg_bo = to_intel_bo(reg_bo); + + if (OVERLAY_NONPHYSICAL(dev)) { + ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c +--- a/drivers/gpu/drm/i915/intel_sdvo.c ++++ b/drivers/gpu/drm/i915/intel_sdvo.c +@@ -35,7 +35,18 @@ + #include "i915_drm.h" + #include "i915_drv.h" + #include "intel_sdvo_regs.h" +-#include ++ ++#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) ++#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) ++#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) ++#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0) ++ ++#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ ++ SDVO_TV_MASK) ++ ++#define IS_TV(c) (c->output_flag & SDVO_TV_MASK) ++#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) ++ + + static char *tv_format_names[] = { + "NTSC_M" , "NTSC_J" , "NTSC_443", +@@ -53,7 +64,7 @@ struct intel_sdvo_priv { + u8 slave_addr; + + /* Register for the SDVO device: SDVOB or SDVOC */ +- int output_device; ++ int sdvo_reg; + + /* Active outputs controlled by this SDVO output */ + uint16_t controlled_output; +@@ -85,12 +96,6 @@ struct intel_sdvo_priv { + /* This is for current tv format name */ + char *tv_format_name; + +- /* This contains all current supported TV format */ +- char *tv_format_supported[TV_FORMAT_NUM]; +- int format_supported_num; +- struct drm_property *tv_format_property; +- struct drm_property *tv_format_name_property[TV_FORMAT_NUM]; +- + /** + * This is set if we treat the device as HDMI, instead of DVI. + */ +@@ -111,29 +116,36 @@ struct intel_sdvo_priv { + */ + struct drm_display_mode *sdvo_lvds_fixed_mode; + +- /** +- * Returned SDTV resolutions allowed for the current format, if the +- * device reported it. +- */ +- struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions; +- + /* + * supported encoding mode, used to determine whether HDMI is + * supported + */ + struct intel_sdvo_encode encode; + +- /* DDC bus used by this SDVO output */ ++ /* DDC bus used by this SDVO encoder */ + uint8_t ddc_bus; + + /* Mac mini hack -- use the same DDC as the analog connector */ + struct i2c_adapter *analog_ddc_bus; + +- int save_sdvo_mult; +- u16 save_active_outputs; +- struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; +- struct intel_sdvo_dtd save_output_dtd[16]; +- u32 save_SDVOX; ++}; ++ ++struct intel_sdvo_connector { ++ /* Mark the type of connector */ ++ uint16_t output_flag; ++ ++ /* This contains all current supported TV format */ ++ char *tv_format_supported[TV_FORMAT_NUM]; ++ int format_supported_num; ++ struct drm_property *tv_format_property; ++ struct drm_property *tv_format_name_property[TV_FORMAT_NUM]; ++ ++ /** ++ * Returned SDTV resolutions allowed for the current format, if the ++ * device reported it. ++ */ ++ struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions; ++ + /* add the property for the SDVO-TV */ + struct drm_property *left_property; + struct drm_property *right_property; +@@ -161,22 +173,33 @@ struct intel_sdvo_priv { + }; + + static bool +-intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags); ++intel_sdvo_output_setup(struct intel_encoder *intel_encoder, ++ uint16_t flags); ++static void ++intel_sdvo_tv_create_property(struct drm_connector *connector, int type); ++static void ++intel_sdvo_create_enhance_property(struct drm_connector *connector); + + /** + * Writes the SDVOB or SDVOC with the given value, but always writes both + * SDVOB and SDVOC to work around apparent hardware issues (according to + * comments in the BIOS). + */ +-static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) ++static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) + { +- struct drm_device *dev = intel_output->base.dev; ++ struct drm_device *dev = intel_encoder->enc.dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + u32 bval = val, cval = val; + int i; + +- if (sdvo_priv->output_device == SDVOB) { ++ if (sdvo_priv->sdvo_reg == PCH_SDVOB) { ++ I915_WRITE(sdvo_priv->sdvo_reg, val); ++ I915_READ(sdvo_priv->sdvo_reg); ++ return; ++ } ++ ++ if (sdvo_priv->sdvo_reg == SDVOB) { + cval = I915_READ(SDVOC); + } else { + bval = I915_READ(SDVOB); +@@ -195,10 +218,10 @@ static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) + } + } + +-static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, ++static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr, + u8 *ch) + { +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + u8 out_buf[2]; + u8 buf[2]; + int ret; +@@ -221,7 +244,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, + out_buf[0] = addr; + out_buf[1] = 0; + +- if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2) ++ if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2) + { + *ch = buf[0]; + return true; +@@ -231,10 +254,10 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, + return false; + } + +-static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, ++static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr, + u8 ch) + { +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + u8 out_buf[2]; + struct i2c_msg msgs[] = { + { +@@ -248,7 +271,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, + out_buf[0] = addr; + out_buf[1] = ch; + +- if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1) ++ if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1) + { + return true; + } +@@ -352,13 +375,14 @@ static const struct _sdvo_cmd_name { + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), + }; + +-#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") +-#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) ++#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) ++#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC") ++#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv) + +-static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, ++static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, + void *args, int args_len) + { +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + int i; + + DRM_DEBUG_KMS("%s: W: %02X ", +@@ -378,19 +402,19 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, + DRM_LOG_KMS("\n"); + } + +-static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, ++static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd, + void *args, int args_len) + { + int i; + +- intel_sdvo_debug_write(intel_output, cmd, args, args_len); ++ intel_sdvo_debug_write(intel_encoder, cmd, args, args_len); + + for (i = 0; i < args_len; i++) { +- intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i, ++ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i, + ((u8*)args)[i]); + } + +- intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); ++ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd); + } + + static const char *cmd_status_names[] = { +@@ -403,11 +427,11 @@ static const char *cmd_status_names[] = { + "Scaling not supported" + }; + +-static void intel_sdvo_debug_response(struct intel_output *intel_output, ++static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder, + void *response, int response_len, + u8 status) + { +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + int i; + + DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); +@@ -422,7 +446,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output, + DRM_LOG_KMS("\n"); + } + +-static u8 intel_sdvo_read_response(struct intel_output *intel_output, ++static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder, + void *response, int response_len) + { + int i; +@@ -432,16 +456,16 @@ static u8 intel_sdvo_read_response(struct intel_output *intel_output, + while (retry--) { + /* Read the command response */ + for (i = 0; i < response_len; i++) { +- intel_sdvo_read_byte(intel_output, ++ intel_sdvo_read_byte(intel_encoder, + SDVO_I2C_RETURN_0 + i, + &((u8 *)response)[i]); + } + + /* read the return status */ +- intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS, ++ intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS, + &status); + +- intel_sdvo_debug_response(intel_output, response, response_len, ++ intel_sdvo_debug_response(intel_encoder, response, response_len, + status); + if (status != SDVO_CMD_STATUS_PENDING) + return status; +@@ -469,10 +493,10 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) + * another I2C transaction after issuing the DDC bus switch, it will be + * switched to the internal SDVO register. + */ +-static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, ++static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder, + u8 target) + { +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + u8 out_buf[2], cmd_buf[2], ret_value[2], ret; + struct i2c_msg msgs[] = { + { +@@ -496,10 +520,10 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, + }, + }; + +- intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, ++ intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH, + &target, 1); + /* write the DDC switch command argument */ +- intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target); ++ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target); + + out_buf[0] = SDVO_I2C_OPCODE; + out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; +@@ -508,7 +532,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, + ret_value[0] = 0; + ret_value[1] = 0; + +- ret = i2c_transfer(intel_output->i2c_bus, msgs, 3); ++ ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3); + if (ret != 3) { + /* failure in I2C transfer */ + DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); +@@ -522,7 +546,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, + return; + } + +-static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) ++static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1) + { + struct intel_sdvo_set_target_input_args targets = {0}; + u8 status; +@@ -533,10 +557,10 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool + if (target_1) + targets.target_1 = 1; + +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets, ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets, + sizeof(targets)); + +- status = intel_sdvo_read_response(intel_output, NULL, 0); ++ status = intel_sdvo_read_response(intel_encoder, NULL, 0); + + return (status == SDVO_CMD_STATUS_SUCCESS); + } +@@ -547,13 +571,13 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool + * This function is making an assumption about the layout of the response, + * which should be checked against the docs. + */ +-static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2) ++static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2) + { + struct intel_sdvo_get_trained_inputs_response response; + u8 status; + +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); +- status = intel_sdvo_read_response(intel_output, &response, sizeof(response)); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); ++ status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response)); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + +@@ -562,29 +586,18 @@ static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, boo + return true; + } + +-static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output, +- u16 *outputs) +-{ +- u8 status; +- +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); +- status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs)); +- +- return (status == SDVO_CMD_STATUS_SUCCESS); +-} +- +-static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output, ++static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, + u16 outputs) + { + u8 status; + +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, + sizeof(outputs)); +- status = intel_sdvo_read_response(intel_output, NULL, 0); ++ status = intel_sdvo_read_response(intel_encoder, NULL, 0); + return (status == SDVO_CMD_STATUS_SUCCESS); + } + +-static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output, ++static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder, + int mode) + { + u8 status, state = SDVO_ENCODER_STATE_ON; +@@ -604,24 +617,24 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output + break; + } + +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, + sizeof(state)); +- status = intel_sdvo_read_response(intel_output, NULL, 0); ++ status = intel_sdvo_read_response(intel_encoder, NULL, 0); + + return (status == SDVO_CMD_STATUS_SUCCESS); + } + +-static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output, ++static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder, + int *clock_min, + int *clock_max) + { + struct intel_sdvo_pixel_clock_range clocks; + u8 status; + +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, + NULL, 0); + +- status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks)); ++ status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks)); + + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; +@@ -633,92 +646,58 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_ou + return true; + } + +-static bool intel_sdvo_set_target_output(struct intel_output *intel_output, ++static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder, + u16 outputs) + { + u8 status; + +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, + sizeof(outputs)); + +- status = intel_sdvo_read_response(intel_output, NULL, 0); ++ status = intel_sdvo_read_response(intel_encoder, NULL, 0); + return (status == SDVO_CMD_STATUS_SUCCESS); + } + +-static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, ++static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, + struct intel_sdvo_dtd *dtd) + { + u8 status; + +- intel_sdvo_write_cmd(intel_output, cmd, NULL, 0); +- status = intel_sdvo_read_response(intel_output, &dtd->part1, +- sizeof(dtd->part1)); ++ intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1)); ++ status = intel_sdvo_read_response(intel_encoder, NULL, 0); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + +- intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0); +- status = intel_sdvo_read_response(intel_output, &dtd->part2, +- sizeof(dtd->part2)); ++ intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2)); ++ status = intel_sdvo_read_response(intel_encoder, NULL, 0); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + + return true; + } + +-static bool intel_sdvo_get_input_timing(struct intel_output *intel_output, ++static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder, + struct intel_sdvo_dtd *dtd) + { +- return intel_sdvo_get_timing(intel_output, +- SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); +-} +- +-static bool intel_sdvo_get_output_timing(struct intel_output *intel_output, +- struct intel_sdvo_dtd *dtd) +-{ +- return intel_sdvo_get_timing(intel_output, +- SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); +-} +- +-static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd, +- struct intel_sdvo_dtd *dtd) +-{ +- u8 status; +- +- intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1)); +- status = intel_sdvo_read_response(intel_output, NULL, 0); +- if (status != SDVO_CMD_STATUS_SUCCESS) +- return false; +- +- intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2)); +- status = intel_sdvo_read_response(intel_output, NULL, 0); +- if (status != SDVO_CMD_STATUS_SUCCESS) +- return false; +- +- return true; +-} +- +-static bool intel_sdvo_set_input_timing(struct intel_output *intel_output, +- struct intel_sdvo_dtd *dtd) +-{ +- return intel_sdvo_set_timing(intel_output, ++ return intel_sdvo_set_timing(intel_encoder, + SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); + } + +-static bool intel_sdvo_set_output_timing(struct intel_output *intel_output, ++static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder, + struct intel_sdvo_dtd *dtd) + { +- return intel_sdvo_set_timing(intel_output, ++ return intel_sdvo_set_timing(intel_encoder, + SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); + } + + static bool +-intel_sdvo_create_preferred_input_timing(struct intel_output *output, ++intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder, + uint16_t clock, + uint16_t width, + uint16_t height) + { + struct intel_sdvo_preferred_input_timing_args args; +- struct intel_sdvo_priv *sdvo_priv = output->dev_priv; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + uint8_t status; + + memset(&args, 0, sizeof(args)); +@@ -732,32 +711,33 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output, + sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) + args.scaled = 1; + +- intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, ++ intel_sdvo_write_cmd(intel_encoder, ++ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, + &args, sizeof(args)); +- status = intel_sdvo_read_response(output, NULL, 0); ++ status = intel_sdvo_read_response(intel_encoder, NULL, 0); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + + return true; + } + +-static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, ++static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder, + struct intel_sdvo_dtd *dtd) + { + bool status; + +- intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, + NULL, 0); + +- status = intel_sdvo_read_response(output, &dtd->part1, ++ status = intel_sdvo_read_response(intel_encoder, &dtd->part1, + sizeof(dtd->part1)); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + +- intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, + NULL, 0); + +- status = intel_sdvo_read_response(output, &dtd->part2, ++ status = intel_sdvo_read_response(intel_encoder, &dtd->part2, + sizeof(dtd->part2)); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; +@@ -765,29 +745,12 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, + return false; + } + +-static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) +-{ +- u8 response, status; +- +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); +- status = intel_sdvo_read_response(intel_output, &response, 1); +- +- if (status != SDVO_CMD_STATUS_SUCCESS) { +- DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); +- return SDVO_CLOCK_RATE_MULT_1X; +- } else { +- DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response); +- } +- +- return response; +-} +- +-static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val) ++static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) + { + u8 status; + +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); +- status = intel_sdvo_read_response(intel_output, NULL, 0); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); ++ status = intel_sdvo_read_response(intel_encoder, NULL, 0); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + +@@ -876,13 +839,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, + mode->flags |= DRM_MODE_FLAG_PVSYNC; + } + +-static bool intel_sdvo_get_supp_encode(struct intel_output *output, ++static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder, + struct intel_sdvo_encode *encode) + { + uint8_t status; + +- intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); +- status = intel_sdvo_read_response(output, encode, sizeof(*encode)); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); ++ status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode)); + if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ + memset(encode, 0, sizeof(*encode)); + return false; +@@ -891,29 +854,30 @@ static bool intel_sdvo_get_supp_encode(struct intel_output *output, + return true; + } + +-static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode) ++static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder, ++ uint8_t mode) + { + uint8_t status; + +- intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1); +- status = intel_sdvo_read_response(output, NULL, 0); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1); ++ status = intel_sdvo_read_response(intel_encoder, NULL, 0); + + return (status == SDVO_CMD_STATUS_SUCCESS); + } + +-static bool intel_sdvo_set_colorimetry(struct intel_output *output, ++static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder, + uint8_t mode) + { + uint8_t status; + +- intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1); +- status = intel_sdvo_read_response(output, NULL, 0); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1); ++ status = intel_sdvo_read_response(intel_encoder, NULL, 0); + + return (status == SDVO_CMD_STATUS_SUCCESS); + } + + #if 0 +-static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) ++static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder) + { + int i, j; + uint8_t set_buf_index[2]; +@@ -922,43 +886,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) + uint8_t buf[48]; + uint8_t *pos; + +- intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); +- intel_sdvo_read_response(output, &av_split, 1); ++ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); ++ intel_sdvo_read_response(encoder, &av_split, 1); + + for (i = 0; i <= av_split; i++) { + set_buf_index[0] = i; set_buf_index[1] = 0; +- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, ++ intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX, + set_buf_index, 2); +- intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0); +- intel_sdvo_read_response(output, &buf_size, 1); ++ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0); ++ intel_sdvo_read_response(encoder, &buf_size, 1); + + pos = buf; + for (j = 0; j <= buf_size; j += 8) { +- intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA, ++ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA, + NULL, 0); +- intel_sdvo_read_response(output, pos, 8); ++ intel_sdvo_read_response(encoder, pos, 8); + pos += 8; + } + } + } + #endif + +-static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index, +- uint8_t *data, int8_t size, uint8_t tx_rate) ++static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder, ++ int index, ++ uint8_t *data, int8_t size, uint8_t tx_rate) + { + uint8_t set_buf_index[2]; + + set_buf_index[0] = index; + set_buf_index[1] = 0; + +- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX, ++ set_buf_index, 2); + + for (; size > 0; size -= 8) { +- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8); + data += 8; + } + +- intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); + } + + static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) +@@ -1033,7 +999,7 @@ struct dip_infoframe { + } __attribute__ ((packed)) u; + } __attribute__((packed)); + +-static void intel_sdvo_set_avi_infoframe(struct intel_output *output, ++static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder, + struct drm_display_mode * mode) + { + struct dip_infoframe avi_if = { +@@ -1044,15 +1010,16 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output, + + avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, + 4 + avi_if.len); +- intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len, ++ intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if, ++ 4 + avi_if.len, + SDVO_HBUF_TX_VSYNC); + } + +-static void intel_sdvo_set_tv_format(struct intel_output *output) ++static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder) + { + + struct intel_sdvo_tv_format format; +- struct intel_sdvo_priv *sdvo_priv = output->dev_priv; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + uint32_t format_map, i; + uint8_t status; + +@@ -1065,10 +1032,10 @@ static void intel_sdvo_set_tv_format(struct intel_output *output) + memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? + sizeof(format) : sizeof(format_map)); + +- intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map, ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format, + sizeof(format)); + +- status = intel_sdvo_read_response(output, NULL, 0); ++ status = intel_sdvo_read_response(intel_encoder, NULL, 0); + if (status != SDVO_CMD_STATUS_SUCCESS) + DRM_DEBUG_KMS("%s: Failed to set TV format\n", + SDVO_NAME(sdvo_priv)); +@@ -1078,8 +1045,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) + { +- struct intel_output *output = enc_to_intel_output(encoder); +- struct intel_sdvo_priv *dev_priv = output->dev_priv; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv; + + if (dev_priv->is_tv) { + struct intel_sdvo_dtd output_dtd; +@@ -1094,22 +1061,22 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, + + /* Set output timings */ + intel_sdvo_get_dtd_from_mode(&output_dtd, mode); +- intel_sdvo_set_target_output(output, +- dev_priv->controlled_output); +- intel_sdvo_set_output_timing(output, &output_dtd); ++ intel_sdvo_set_target_output(intel_encoder, ++ dev_priv->attached_output); ++ intel_sdvo_set_output_timing(intel_encoder, &output_dtd); + + /* Set the input timing to the screen. Assume always input 0. */ +- intel_sdvo_set_target_input(output, true, false); ++ intel_sdvo_set_target_input(intel_encoder, true, false); + + +- success = intel_sdvo_create_preferred_input_timing(output, ++ success = intel_sdvo_create_preferred_input_timing(intel_encoder, + mode->clock / 10, + mode->hdisplay, + mode->vdisplay); + if (success) { + struct intel_sdvo_dtd input_dtd; + +- intel_sdvo_get_preferred_input_timing(output, ++ intel_sdvo_get_preferred_input_timing(intel_encoder, + &input_dtd); + intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); + dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; +@@ -1132,16 +1099,16 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, + intel_sdvo_get_dtd_from_mode(&output_dtd, + dev_priv->sdvo_lvds_fixed_mode); + +- intel_sdvo_set_target_output(output, +- dev_priv->controlled_output); +- intel_sdvo_set_output_timing(output, &output_dtd); ++ intel_sdvo_set_target_output(intel_encoder, ++ dev_priv->attached_output); ++ intel_sdvo_set_output_timing(intel_encoder, &output_dtd); + + /* Set the input timing to the screen. Assume always input 0. */ +- intel_sdvo_set_target_input(output, true, false); ++ intel_sdvo_set_target_input(intel_encoder, true, false); + + + success = intel_sdvo_create_preferred_input_timing( +- output, ++ intel_encoder, + mode->clock / 10, + mode->hdisplay, + mode->vdisplay); +@@ -1149,7 +1116,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, + if (success) { + struct intel_sdvo_dtd input_dtd; + +- intel_sdvo_get_preferred_input_timing(output, ++ intel_sdvo_get_preferred_input_timing(intel_encoder, + &input_dtd); + intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); + dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; +@@ -1181,8 +1148,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = encoder->crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); +- struct intel_output *output = enc_to_intel_output(encoder); +- struct intel_sdvo_priv *sdvo_priv = output->dev_priv; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + u32 sdvox = 0; + int sdvo_pixel_multiply; + struct intel_sdvo_in_out_map in_out; +@@ -1198,15 +1165,15 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, + * channel on the motherboard. In a two-input device, the first input + * will be SDVOB and the second SDVOC. + */ +- in_out.in0 = sdvo_priv->controlled_output; ++ in_out.in0 = sdvo_priv->attached_output; + in_out.in1 = 0; + +- intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, + &in_out, sizeof(in_out)); +- status = intel_sdvo_read_response(output, NULL, 0); ++ status = intel_sdvo_read_response(intel_encoder, NULL, 0); + + if (sdvo_priv->is_hdmi) { +- intel_sdvo_set_avi_infoframe(output, mode); ++ intel_sdvo_set_avi_infoframe(intel_encoder, mode); + sdvox |= SDVO_AUDIO_ENABLE; + } + +@@ -1223,16 +1190,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, + */ + if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { + /* Set the output timing to the screen */ +- intel_sdvo_set_target_output(output, +- sdvo_priv->controlled_output); +- intel_sdvo_set_output_timing(output, &input_dtd); ++ intel_sdvo_set_target_output(intel_encoder, ++ sdvo_priv->attached_output); ++ intel_sdvo_set_output_timing(intel_encoder, &input_dtd); + } + + /* Set the input timing to the screen. Assume always input 0. */ +- intel_sdvo_set_target_input(output, true, false); ++ intel_sdvo_set_target_input(intel_encoder, true, false); + + if (sdvo_priv->is_tv) +- intel_sdvo_set_tv_format(output); ++ intel_sdvo_set_tv_format(intel_encoder); + + /* We would like to use intel_sdvo_create_preferred_input_timing() to + * provide the device with a timing it can support, if it supports that +@@ -1240,29 +1207,29 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, + * output the preferred timing, and we don't support that currently. + */ + #if 0 +- success = intel_sdvo_create_preferred_input_timing(output, clock, ++ success = intel_sdvo_create_preferred_input_timing(encoder, clock, + width, height); + if (success) { + struct intel_sdvo_dtd *input_dtd; + +- intel_sdvo_get_preferred_input_timing(output, &input_dtd); +- intel_sdvo_set_input_timing(output, &input_dtd); ++ intel_sdvo_get_preferred_input_timing(encoder, &input_dtd); ++ intel_sdvo_set_input_timing(encoder, &input_dtd); + } + #else +- intel_sdvo_set_input_timing(output, &input_dtd); ++ intel_sdvo_set_input_timing(intel_encoder, &input_dtd); + #endif + + switch (intel_sdvo_get_pixel_multiplier(mode)) { + case 1: +- intel_sdvo_set_clock_rate_mult(output, ++ intel_sdvo_set_clock_rate_mult(intel_encoder, + SDVO_CLOCK_RATE_MULT_1X); + break; + case 2: +- intel_sdvo_set_clock_rate_mult(output, ++ intel_sdvo_set_clock_rate_mult(intel_encoder, + SDVO_CLOCK_RATE_MULT_2X); + break; + case 4: +- intel_sdvo_set_clock_rate_mult(output, ++ intel_sdvo_set_clock_rate_mult(intel_encoder, + SDVO_CLOCK_RATE_MULT_4X); + break; + } +@@ -1273,8 +1240,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, + SDVO_VSYNC_ACTIVE_HIGH | + SDVO_HSYNC_ACTIVE_HIGH; + } else { +- sdvox |= I915_READ(sdvo_priv->output_device); +- switch (sdvo_priv->output_device) { ++ sdvox |= I915_READ(sdvo_priv->sdvo_reg); ++ switch (sdvo_priv->sdvo_reg) { + case SDVOB: + sdvox &= SDVOB_PRESERVE_MASK; + break; +@@ -1298,26 +1265,26 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, + + if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) + sdvox |= SDVO_STALL_SELECT; +- intel_sdvo_write_sdvox(output, sdvox); ++ intel_sdvo_write_sdvox(intel_encoder, sdvox); + } + + static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) + { + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_output *intel_output = enc_to_intel_output(encoder); +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + u32 temp; + + if (mode != DRM_MODE_DPMS_ON) { +- intel_sdvo_set_active_outputs(intel_output, 0); ++ intel_sdvo_set_active_outputs(intel_encoder, 0); + if (0) +- intel_sdvo_set_encoder_power_state(intel_output, mode); ++ intel_sdvo_set_encoder_power_state(intel_encoder, mode); + + if (mode == DRM_MODE_DPMS_OFF) { +- temp = I915_READ(sdvo_priv->output_device); ++ temp = I915_READ(sdvo_priv->sdvo_reg); + if ((temp & SDVO_ENABLE) != 0) { +- intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE); ++ intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE); + } + } + } else { +@@ -1325,13 +1292,13 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) + int i; + u8 status; + +- temp = I915_READ(sdvo_priv->output_device); ++ temp = I915_READ(sdvo_priv->sdvo_reg); + if ((temp & SDVO_ENABLE) == 0) +- intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE); ++ intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE); + for (i = 0; i < 2; i++) + intel_wait_for_vblank(dev); + +- status = intel_sdvo_get_trained_inputs(intel_output, &input1, ++ status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, + &input2); + + +@@ -1345,109 +1312,18 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) + } + + if (0) +- intel_sdvo_set_encoder_power_state(intel_output, mode); +- intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output); ++ intel_sdvo_set_encoder_power_state(intel_encoder, mode); ++ intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output); + } + return; + } + +-static void intel_sdvo_save(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; +- int o; +- +- sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output); +- intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs); +- +- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { +- intel_sdvo_set_target_input(intel_output, true, false); +- intel_sdvo_get_input_timing(intel_output, +- &sdvo_priv->save_input_dtd_1); +- } +- +- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { +- intel_sdvo_set_target_input(intel_output, false, true); +- intel_sdvo_get_input_timing(intel_output, +- &sdvo_priv->save_input_dtd_2); +- } +- +- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) +- { +- u16 this_output = (1 << o); +- if (sdvo_priv->caps.output_flags & this_output) +- { +- intel_sdvo_set_target_output(intel_output, this_output); +- intel_sdvo_get_output_timing(intel_output, +- &sdvo_priv->save_output_dtd[o]); +- } +- } +- if (sdvo_priv->is_tv) { +- /* XXX: Save TV format/enhancements. */ +- } +- +- sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); +-} +- +-static void intel_sdvo_restore(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; +- int o; +- int i; +- bool input1, input2; +- u8 status; +- +- intel_sdvo_set_active_outputs(intel_output, 0); +- +- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) +- { +- u16 this_output = (1 << o); +- if (sdvo_priv->caps.output_flags & this_output) { +- intel_sdvo_set_target_output(intel_output, this_output); +- intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]); +- } +- } +- +- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { +- intel_sdvo_set_target_input(intel_output, true, false); +- intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1); +- } +- +- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { +- intel_sdvo_set_target_input(intel_output, false, true); +- intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2); +- } +- +- intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); +- +- if (sdvo_priv->is_tv) { +- /* XXX: Restore TV format/enhancements. */ +- } +- +- intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX); +- +- if (sdvo_priv->save_SDVOX & SDVO_ENABLE) +- { +- for (i = 0; i < 2; i++) +- intel_wait_for_vblank(dev); +- status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2); +- if (status == SDVO_CMD_STATUS_SUCCESS && !input1) +- DRM_DEBUG_KMS("First %s output reported failure to " +- "sync\n", SDVO_NAME(sdvo_priv)); +- } +- +- intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs); +-} +- + static int intel_sdvo_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; +@@ -1472,37 +1348,39 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, + return MODE_OK; + } + +-static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps) ++static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps) + { + u8 status; + +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); +- status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps)); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); ++ status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps)); + if (status != SDVO_CMD_STATUS_SUCCESS) + return false; + + return true; + } + ++/* No use! */ ++#if 0 + struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) + { + struct drm_connector *connector = NULL; +- struct intel_output *iout = NULL; ++ struct intel_encoder *iout = NULL; + struct intel_sdvo_priv *sdvo; + + /* find the sdvo connector */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- iout = to_intel_output(connector); ++ iout = to_intel_encoder(connector); + + if (iout->type != INTEL_OUTPUT_SDVO) + continue; + + sdvo = iout->dev_priv; + +- if (sdvo->output_device == SDVOB && sdvoB) ++ if (sdvo->sdvo_reg == SDVOB && sdvoB) + return connector; + +- if (sdvo->output_device == SDVOC && !sdvoB) ++ if (sdvo->sdvo_reg == SDVOC && !sdvoB) + return connector; + + } +@@ -1514,16 +1392,16 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector) + { + u8 response[2]; + u8 status; +- struct intel_output *intel_output; ++ struct intel_encoder *intel_encoder; + DRM_DEBUG_KMS("\n"); + + if (!connector) + return 0; + +- intel_output = to_intel_output(connector); ++ intel_encoder = to_intel_encoder(connector); + +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); +- status = intel_sdvo_read_response(intel_output, &response, 2); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); ++ status = intel_sdvo_read_response(intel_encoder, &response, 2); + + if (response[0] !=0) + return 1; +@@ -1535,30 +1413,31 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) + { + u8 response[2]; + u8 status; +- struct intel_output *intel_output = to_intel_output(connector); ++ struct intel_encoder *intel_encoder = to_intel_encoder(connector); + +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); +- intel_sdvo_read_response(intel_output, &response, 2); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); ++ intel_sdvo_read_response(intel_encoder, &response, 2); + + if (on) { +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); +- status = intel_sdvo_read_response(intel_output, &response, 2); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); ++ status = intel_sdvo_read_response(intel_encoder, &response, 2); + +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); + } else { + response[0] = 0; + response[1] = 0; +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); + } + +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); +- intel_sdvo_read_response(intel_output, &response, 2); ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); ++ intel_sdvo_read_response(intel_encoder, &response, 2); + } ++#endif + + static bool +-intel_sdvo_multifunc_encoder(struct intel_output *intel_output) ++intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) + { +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + int caps = 0; + + if (sdvo_priv->caps.output_flags & +@@ -1592,12 +1471,17 @@ static struct drm_connector * + intel_find_analog_connector(struct drm_device *dev) + { + struct drm_connector *connector; +- struct intel_output *intel_output; +- +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- intel_output = to_intel_output(connector); +- if (intel_output->type == INTEL_OUTPUT_ANALOG) +- return connector; ++ struct drm_encoder *encoder; ++ struct intel_encoder *intel_encoder; ++ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ intel_encoder = enc_to_intel_encoder(encoder); ++ if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ if (connector && encoder == intel_attached_encoder(connector)) ++ return connector; ++ } ++ } + } + return NULL; + } +@@ -1621,16 +1505,17 @@ intel_analog_is_connected(struct drm_device *dev) + enum drm_connector_status + intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + enum drm_connector_status status = connector_status_connected; + struct edid *edid = NULL; + +- edid = drm_get_edid(&intel_output->base, +- intel_output->ddc_bus); ++ edid = drm_get_edid(connector, ++ intel_encoder->ddc_bus); + + /* This is only applied to SDVO cards with multiple outputs */ +- if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) { ++ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) { + uint8_t saved_ddc, temp_ddc; + saved_ddc = sdvo_priv->ddc_bus; + temp_ddc = sdvo_priv->ddc_bus >> 1; +@@ -1640,8 +1525,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) + */ + while(temp_ddc > 1) { + sdvo_priv->ddc_bus = temp_ddc; +- edid = drm_get_edid(&intel_output->base, +- intel_output->ddc_bus); ++ edid = drm_get_edid(connector, ++ intel_encoder->ddc_bus); + if (edid) { + /* + * When we can get the EDID, maybe it is the +@@ -1660,8 +1545,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) + */ + if (edid == NULL && + sdvo_priv->analog_ddc_bus && +- !intel_analog_is_connected(intel_output->base.dev)) +- edid = drm_get_edid(&intel_output->base, ++ !intel_analog_is_connected(connector->dev)) ++ edid = drm_get_edid(connector, + sdvo_priv->analog_ddc_bus); + if (edid != NULL) { + /* Don't report the output as connected if it's a DVI-I +@@ -1676,7 +1561,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) + } + + kfree(edid); +- intel_output->base.display_info.raw_edid = NULL; ++ connector->display_info.raw_edid = NULL; + + } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) + status = connector_status_disconnected; +@@ -1688,16 +1573,20 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect + { + uint16_t response; + u8 status; +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; +- +- intel_sdvo_write_cmd(intel_output, ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; ++ enum drm_connector_status ret; ++ ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); + if (sdvo_priv->is_tv) { + /* add 30ms delay when the output type is SDVO-TV */ + mdelay(30); + } +- status = intel_sdvo_read_response(intel_output, &response, 2); ++ status = intel_sdvo_read_response(intel_encoder, &response, 2); + + DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); + +@@ -1707,24 +1596,41 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect + if (response == 0) + return connector_status_disconnected; + +- if (intel_sdvo_multifunc_encoder(intel_output) && +- sdvo_priv->attached_output != response) { +- if (sdvo_priv->controlled_output != response && +- intel_sdvo_output_setup(intel_output, response) != true) +- return connector_status_unknown; +- sdvo_priv->attached_output = response; ++ sdvo_priv->attached_output = response; ++ ++ if ((sdvo_connector->output_flag & response) == 0) ++ ret = connector_status_disconnected; ++ else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) ++ ret = intel_sdvo_hdmi_sink_detect(connector, response); ++ else ++ ret = connector_status_connected; ++ ++ /* May update encoder flag for like clock for SDVO TV, etc.*/ ++ if (ret == connector_status_connected) { ++ sdvo_priv->is_tv = false; ++ sdvo_priv->is_lvds = false; ++ intel_encoder->needs_tv_clock = false; ++ ++ if (response & SDVO_TV_MASK) { ++ sdvo_priv->is_tv = true; ++ intel_encoder->needs_tv_clock = true; ++ } ++ if (response & SDVO_LVDS_MASK) ++ sdvo_priv->is_lvds = true; + } +- return intel_sdvo_hdmi_sink_detect(connector, response); ++ ++ return ret; + } + + static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + int num_modes; + + /* set the bus switch and get the modes */ +- num_modes = intel_ddc_get_modes(intel_output); ++ num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + + /* + * Mac mini hack. On this device, the DVI-I connector shares one DDC +@@ -1734,17 +1640,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) + */ + if (num_modes == 0 && + sdvo_priv->analog_ddc_bus && +- !intel_analog_is_connected(intel_output->base.dev)) { +- struct i2c_adapter *digital_ddc_bus; +- ++ !intel_analog_is_connected(connector->dev)) { + /* Switch to the analog ddc bus and try that + */ +- digital_ddc_bus = intel_output->ddc_bus; +- intel_output->ddc_bus = sdvo_priv->analog_ddc_bus; +- +- (void) intel_ddc_get_modes(intel_output); +- +- intel_output->ddc_bus = digital_ddc_bus; ++ (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus); + } + } + +@@ -1815,8 +1714,9 @@ struct drm_display_mode sdvo_tv_modes[] = { + + static void intel_sdvo_get_tv_modes(struct drm_connector *connector) + { +- struct intel_output *output = to_intel_output(connector); +- struct intel_sdvo_priv *sdvo_priv = output->dev_priv; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct intel_sdvo_sdtv_resolution_request tv_res; + uint32_t reply = 0, format_map = 0; + int i; +@@ -1836,11 +1736,11 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) + sizeof(format_map) ? sizeof(format_map) : + sizeof(struct intel_sdvo_sdtv_resolution_request)); + +- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); ++ intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output); + +- intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, + &tv_res, sizeof(tv_res)); +- status = intel_sdvo_read_response(output, &reply, 3); ++ status = intel_sdvo_read_response(intel_encoder, &reply, 3); + if (status != SDVO_CMD_STATUS_SUCCESS) + return; + +@@ -1857,9 +1757,10 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) + + static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct drm_i915_private *dev_priv = connector->dev->dev_private; +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct drm_display_mode *newmode; + + /* +@@ -1867,7 +1768,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) + * Assume that the preferred modes are + * arranged in priority order. + */ +- intel_ddc_get_modes(intel_output); ++ intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + if (list_empty(&connector->probed_modes) == false) + goto end; + +@@ -1896,12 +1797,12 @@ end: + + static int intel_sdvo_get_modes(struct drm_connector *connector) + { +- struct intel_output *output = to_intel_output(connector); +- struct intel_sdvo_priv *sdvo_priv = output->dev_priv; ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + +- if (sdvo_priv->is_tv) ++ if (IS_TV(sdvo_connector)) + intel_sdvo_get_tv_modes(connector); +- else if (sdvo_priv->is_lvds == true) ++ else if (IS_LVDS(sdvo_connector)) + intel_sdvo_get_lvds_modes(connector); + else + intel_sdvo_get_ddc_modes(connector); +@@ -1914,11 +1815,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) + static + void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv; + struct drm_device *dev = connector->dev; + +- if (sdvo_priv->is_tv) { ++ if (IS_TV(sdvo_priv)) { + if (sdvo_priv->left_property) + drm_property_destroy(dev, sdvo_priv->left_property); + if (sdvo_priv->right_property) +@@ -1931,8 +1832,6 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) + drm_property_destroy(dev, sdvo_priv->hpos_property); + if (sdvo_priv->vpos_property) + drm_property_destroy(dev, sdvo_priv->vpos_property); +- } +- if (sdvo_priv->is_tv) { + if (sdvo_priv->saturation_property) + drm_property_destroy(dev, + sdvo_priv->saturation_property); +@@ -1942,7 +1841,7 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) + if (sdvo_priv->hue_property) + drm_property_destroy(dev, sdvo_priv->hue_property); + } +- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { ++ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { + if (sdvo_priv->brightness_property) + drm_property_destroy(dev, + sdvo_priv->brightness_property); +@@ -1952,31 +1851,17 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) + + static void intel_sdvo_destroy(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; +- +- if (intel_output->i2c_bus) +- intel_i2c_destroy(intel_output->i2c_bus); +- if (intel_output->ddc_bus) +- intel_i2c_destroy(intel_output->ddc_bus); +- if (sdvo_priv->analog_ddc_bus) +- intel_i2c_destroy(sdvo_priv->analog_ddc_bus); ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + +- if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) +- drm_mode_destroy(connector->dev, +- sdvo_priv->sdvo_lvds_fixed_mode); +- +- if (sdvo_priv->tv_format_property) ++ if (sdvo_connector->tv_format_property) + drm_property_destroy(connector->dev, +- sdvo_priv->tv_format_property); +- +- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) +- intel_sdvo_destroy_enhance_property(connector); ++ sdvo_connector->tv_format_property); + ++ intel_sdvo_destroy_enhance_property(connector); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); +- +- kfree(intel_output); ++ kfree(connector); + } + + static int +@@ -1984,9 +1869,11 @@ intel_sdvo_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; +- struct drm_encoder *encoder = &intel_output->enc; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + struct drm_crtc *crtc = encoder->crtc; + int ret = 0; + bool changed = false; +@@ -1997,105 +1884,105 @@ intel_sdvo_set_property(struct drm_connector *connector, + if (ret < 0) + goto out; + +- if (property == sdvo_priv->tv_format_property) { ++ if (property == sdvo_connector->tv_format_property) { + if (val >= TV_FORMAT_NUM) { + ret = -EINVAL; + goto out; + } + if (sdvo_priv->tv_format_name == +- sdvo_priv->tv_format_supported[val]) ++ sdvo_connector->tv_format_supported[val]) + goto out; + +- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; ++ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val]; + changed = true; + } + +- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { ++ if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) { + cmd = 0; + temp_value = val; +- if (sdvo_priv->left_property == property) { ++ if (sdvo_connector->left_property == property) { + drm_connector_property_set_value(connector, +- sdvo_priv->right_property, val); +- if (sdvo_priv->left_margin == temp_value) ++ sdvo_connector->right_property, val); ++ if (sdvo_connector->left_margin == temp_value) + goto out; + +- sdvo_priv->left_margin = temp_value; +- sdvo_priv->right_margin = temp_value; +- temp_value = sdvo_priv->max_hscan - +- sdvo_priv->left_margin; ++ sdvo_connector->left_margin = temp_value; ++ sdvo_connector->right_margin = temp_value; ++ temp_value = sdvo_connector->max_hscan - ++ sdvo_connector->left_margin; + cmd = SDVO_CMD_SET_OVERSCAN_H; +- } else if (sdvo_priv->right_property == property) { ++ } else if (sdvo_connector->right_property == property) { + drm_connector_property_set_value(connector, +- sdvo_priv->left_property, val); +- if (sdvo_priv->right_margin == temp_value) ++ sdvo_connector->left_property, val); ++ if (sdvo_connector->right_margin == temp_value) + goto out; + +- sdvo_priv->left_margin = temp_value; +- sdvo_priv->right_margin = temp_value; +- temp_value = sdvo_priv->max_hscan - +- sdvo_priv->left_margin; ++ sdvo_connector->left_margin = temp_value; ++ sdvo_connector->right_margin = temp_value; ++ temp_value = sdvo_connector->max_hscan - ++ sdvo_connector->left_margin; + cmd = SDVO_CMD_SET_OVERSCAN_H; +- } else if (sdvo_priv->top_property == property) { ++ } else if (sdvo_connector->top_property == property) { + drm_connector_property_set_value(connector, +- sdvo_priv->bottom_property, val); +- if (sdvo_priv->top_margin == temp_value) ++ sdvo_connector->bottom_property, val); ++ if (sdvo_connector->top_margin == temp_value) + goto out; + +- sdvo_priv->top_margin = temp_value; +- sdvo_priv->bottom_margin = temp_value; +- temp_value = sdvo_priv->max_vscan - +- sdvo_priv->top_margin; ++ sdvo_connector->top_margin = temp_value; ++ sdvo_connector->bottom_margin = temp_value; ++ temp_value = sdvo_connector->max_vscan - ++ sdvo_connector->top_margin; + cmd = SDVO_CMD_SET_OVERSCAN_V; +- } else if (sdvo_priv->bottom_property == property) { ++ } else if (sdvo_connector->bottom_property == property) { + drm_connector_property_set_value(connector, +- sdvo_priv->top_property, val); +- if (sdvo_priv->bottom_margin == temp_value) ++ sdvo_connector->top_property, val); ++ if (sdvo_connector->bottom_margin == temp_value) + goto out; +- sdvo_priv->top_margin = temp_value; +- sdvo_priv->bottom_margin = temp_value; +- temp_value = sdvo_priv->max_vscan - +- sdvo_priv->top_margin; ++ sdvo_connector->top_margin = temp_value; ++ sdvo_connector->bottom_margin = temp_value; ++ temp_value = sdvo_connector->max_vscan - ++ sdvo_connector->top_margin; + cmd = SDVO_CMD_SET_OVERSCAN_V; +- } else if (sdvo_priv->hpos_property == property) { +- if (sdvo_priv->cur_hpos == temp_value) ++ } else if (sdvo_connector->hpos_property == property) { ++ if (sdvo_connector->cur_hpos == temp_value) + goto out; + + cmd = SDVO_CMD_SET_POSITION_H; +- sdvo_priv->cur_hpos = temp_value; +- } else if (sdvo_priv->vpos_property == property) { +- if (sdvo_priv->cur_vpos == temp_value) ++ sdvo_connector->cur_hpos = temp_value; ++ } else if (sdvo_connector->vpos_property == property) { ++ if (sdvo_connector->cur_vpos == temp_value) + goto out; + + cmd = SDVO_CMD_SET_POSITION_V; +- sdvo_priv->cur_vpos = temp_value; +- } else if (sdvo_priv->saturation_property == property) { +- if (sdvo_priv->cur_saturation == temp_value) ++ sdvo_connector->cur_vpos = temp_value; ++ } else if (sdvo_connector->saturation_property == property) { ++ if (sdvo_connector->cur_saturation == temp_value) + goto out; + + cmd = SDVO_CMD_SET_SATURATION; +- sdvo_priv->cur_saturation = temp_value; +- } else if (sdvo_priv->contrast_property == property) { +- if (sdvo_priv->cur_contrast == temp_value) ++ sdvo_connector->cur_saturation = temp_value; ++ } else if (sdvo_connector->contrast_property == property) { ++ if (sdvo_connector->cur_contrast == temp_value) + goto out; + + cmd = SDVO_CMD_SET_CONTRAST; +- sdvo_priv->cur_contrast = temp_value; +- } else if (sdvo_priv->hue_property == property) { +- if (sdvo_priv->cur_hue == temp_value) ++ sdvo_connector->cur_contrast = temp_value; ++ } else if (sdvo_connector->hue_property == property) { ++ if (sdvo_connector->cur_hue == temp_value) + goto out; + + cmd = SDVO_CMD_SET_HUE; +- sdvo_priv->cur_hue = temp_value; +- } else if (sdvo_priv->brightness_property == property) { +- if (sdvo_priv->cur_brightness == temp_value) ++ sdvo_connector->cur_hue = temp_value; ++ } else if (sdvo_connector->brightness_property == property) { ++ if (sdvo_connector->cur_brightness == temp_value) + goto out; + + cmd = SDVO_CMD_SET_BRIGHTNESS; +- sdvo_priv->cur_brightness = temp_value; ++ sdvo_connector->cur_brightness = temp_value; + } + if (cmd) { +- intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2); +- status = intel_sdvo_read_response(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2); ++ status = intel_sdvo_read_response(intel_encoder, + NULL, 0); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO command \n"); +@@ -2121,8 +2008,6 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { + + static const struct drm_connector_funcs intel_sdvo_connector_funcs = { + .dpms = drm_helper_connector_dpms, +- .save = intel_sdvo_save, +- .restore = intel_sdvo_restore, + .detect = intel_sdvo_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_sdvo_set_property, +@@ -2132,12 +2017,27 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { + static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { + .get_modes = intel_sdvo_get_modes, + .mode_valid = intel_sdvo_mode_valid, +- .best_encoder = intel_best_encoder, ++ .best_encoder = intel_attached_encoder, + }; + + static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) + { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ ++ if (intel_encoder->i2c_bus) ++ intel_i2c_destroy(intel_encoder->i2c_bus); ++ if (intel_encoder->ddc_bus) ++ intel_i2c_destroy(intel_encoder->ddc_bus); ++ if (sdvo_priv->analog_ddc_bus) ++ intel_i2c_destroy(sdvo_priv->analog_ddc_bus); ++ ++ if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) ++ drm_mode_destroy(encoder->dev, ++ sdvo_priv->sdvo_lvds_fixed_mode); ++ + drm_encoder_cleanup(encoder); ++ kfree(intel_encoder); + } + + static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { +@@ -2190,12 +2090,15 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv) + } + + static bool +-intel_sdvo_get_digital_encoding_mode(struct intel_output *output) ++intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device) + { + struct intel_sdvo_priv *sdvo_priv = output->dev_priv; + uint8_t status; + +- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); ++ if (device == 0) ++ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0); ++ else ++ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1); + + intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0); + status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1); +@@ -2204,42 +2107,40 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output) + return true; + } + +-static struct intel_output * +-intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) ++static struct intel_encoder * ++intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) + { + struct drm_device *dev = chan->drm_dev; +- struct drm_connector *connector; +- struct intel_output *intel_output = NULL; ++ struct drm_encoder *encoder; ++ struct intel_encoder *intel_encoder = NULL; + +- list_for_each_entry(connector, +- &dev->mode_config.connector_list, head) { +- if (to_intel_output(connector)->ddc_bus == &chan->adapter) { +- intel_output = to_intel_output(connector); ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ intel_encoder = enc_to_intel_encoder(encoder); ++ if (intel_encoder->ddc_bus == &chan->adapter) + break; +- } + } +- return intel_output; ++ return intel_encoder; + } + + static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg msgs[], int num) + { +- struct intel_output *intel_output; ++ struct intel_encoder *intel_encoder; + struct intel_sdvo_priv *sdvo_priv; + struct i2c_algo_bit_data *algo_data; + const struct i2c_algorithm *algo; + + algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; +- intel_output = +- intel_sdvo_chan_to_intel_output( ++ intel_encoder = ++ intel_sdvo_chan_to_intel_encoder( + (struct intel_i2c_chan *)(algo_data->data)); +- if (intel_output == NULL) ++ if (intel_encoder == NULL) + return -EINVAL; + +- sdvo_priv = intel_output->dev_priv; +- algo = intel_output->i2c_bus->algo; ++ sdvo_priv = intel_encoder->dev_priv; ++ algo = intel_encoder->i2c_bus->algo; + +- intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); ++ intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus); + return algo->master_xfer(i2c_adap, msgs, num); + } + +@@ -2248,12 +2149,12 @@ static struct i2c_algorithm intel_sdvo_i2c_bit_algo = { + }; + + static u8 +-intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) ++intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) + { + struct drm_i915_private *dev_priv = dev->dev_private; + struct sdvo_device_mapping *my_mapping, *other_mapping; + +- if (output_device == SDVOB) { ++ if (IS_SDVOB(sdvo_reg)) { + my_mapping = &dev_priv->sdvo_mappings[0]; + other_mapping = &dev_priv->sdvo_mappings[1]; + } else { +@@ -2278,120 +2179,235 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) + /* No SDVO device info is found for another DVO port, + * so use mapping assumption we had before BIOS parsing. + */ +- if (output_device == SDVOB) ++ if (IS_SDVOB(sdvo_reg)) + return 0x70; + else + return 0x72; + } + +-static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id) ++static bool ++intel_sdvo_connector_alloc (struct intel_connector **ret) + { +- DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident); +- return 1; ++ struct intel_connector *intel_connector; ++ struct intel_sdvo_connector *sdvo_connector; ++ ++ *ret = kzalloc(sizeof(*intel_connector) + ++ sizeof(*sdvo_connector), GFP_KERNEL); ++ if (!*ret) ++ return false; ++ ++ intel_connector = *ret; ++ sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1); ++ intel_connector->dev_priv = sdvo_connector; ++ ++ return true; + } + +-static struct dmi_system_id intel_sdvo_bad_tv[] = { +- { +- .callback = intel_sdvo_bad_tv_callback, +- .ident = "IntelG45/ICH10R/DME1737", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"), +- DMI_MATCH(DMI_PRODUCT_NAME, "4800784"), +- }, +- }, ++static void ++intel_sdvo_connector_create (struct drm_encoder *encoder, ++ struct drm_connector *connector) ++{ ++ drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs, ++ connector->connector_type); + +- { } /* terminating entry */ +-}; ++ drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); ++ ++ connector->interlace_allowed = 0; ++ connector->doublescan_allowed = 0; ++ connector->display_info.subpixel_order = SubPixelHorizontalRGB; ++ ++ drm_mode_connector_attach_encoder(connector, encoder); ++ drm_sysfs_connector_add(connector); ++} ++ ++static bool ++intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device) ++{ ++ struct drm_encoder *encoder = &intel_encoder->enc; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct drm_connector *connector; ++ struct intel_connector *intel_connector; ++ struct intel_sdvo_connector *sdvo_connector; ++ ++ if (!intel_sdvo_connector_alloc(&intel_connector)) ++ return false; ++ ++ sdvo_connector = intel_connector->dev_priv; ++ ++ if (device == 0) { ++ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0; ++ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; ++ } else if (device == 1) { ++ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1; ++ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; ++ } ++ ++ connector = &intel_connector->base; ++ encoder->encoder_type = DRM_MODE_ENCODER_TMDS; ++ connector->connector_type = DRM_MODE_CONNECTOR_DVID; ++ ++ if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode) ++ && intel_sdvo_get_digital_encoding_mode(intel_encoder, device) ++ && sdvo_priv->is_hdmi) { ++ /* enable hdmi encoding mode if supported */ ++ intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); ++ intel_sdvo_set_colorimetry(intel_encoder, ++ SDVO_COLORIMETRY_RGB256); ++ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; ++ } ++ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | ++ (1 << INTEL_ANALOG_CLONE_BIT); ++ ++ intel_sdvo_connector_create(encoder, connector); ++ ++ return true; ++} ++ ++static bool ++intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type) ++{ ++ struct drm_encoder *encoder = &intel_encoder->enc; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct drm_connector *connector; ++ struct intel_connector *intel_connector; ++ struct intel_sdvo_connector *sdvo_connector; ++ ++ if (!intel_sdvo_connector_alloc(&intel_connector)) ++ return false; ++ ++ connector = &intel_connector->base; ++ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; ++ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; ++ sdvo_connector = intel_connector->dev_priv; ++ ++ sdvo_priv->controlled_output |= type; ++ sdvo_connector->output_flag = type; ++ ++ sdvo_priv->is_tv = true; ++ intel_encoder->needs_tv_clock = true; ++ intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; ++ ++ intel_sdvo_connector_create(encoder, connector); ++ ++ intel_sdvo_tv_create_property(connector, type); ++ ++ intel_sdvo_create_enhance_property(connector); ++ ++ return true; ++} ++ ++static bool ++intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device) ++{ ++ struct drm_encoder *encoder = &intel_encoder->enc; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct drm_connector *connector; ++ struct intel_connector *intel_connector; ++ struct intel_sdvo_connector *sdvo_connector; ++ ++ if (!intel_sdvo_connector_alloc(&intel_connector)) ++ return false; ++ ++ connector = &intel_connector->base; ++ encoder->encoder_type = DRM_MODE_ENCODER_DAC; ++ connector->connector_type = DRM_MODE_CONNECTOR_VGA; ++ sdvo_connector = intel_connector->dev_priv; ++ ++ if (device == 0) { ++ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0; ++ sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; ++ } else if (device == 1) { ++ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1; ++ sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; ++ } ++ ++ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | ++ (1 << INTEL_ANALOG_CLONE_BIT); ++ ++ intel_sdvo_connector_create(encoder, connector); ++ return true; ++} ++ ++static bool ++intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device) ++{ ++ struct drm_encoder *encoder = &intel_encoder->enc; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct drm_connector *connector; ++ struct intel_connector *intel_connector; ++ struct intel_sdvo_connector *sdvo_connector; ++ ++ if (!intel_sdvo_connector_alloc(&intel_connector)) ++ return false; ++ ++ connector = &intel_connector->base; ++ encoder->encoder_type = DRM_MODE_ENCODER_LVDS; ++ connector->connector_type = DRM_MODE_CONNECTOR_LVDS; ++ sdvo_connector = intel_connector->dev_priv; ++ ++ sdvo_priv->is_lvds = true; ++ ++ if (device == 0) { ++ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0; ++ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; ++ } else if (device == 1) { ++ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1; ++ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; ++ } ++ ++ intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | ++ (1 << INTEL_SDVO_LVDS_CLONE_BIT); ++ ++ intel_sdvo_connector_create(encoder, connector); ++ intel_sdvo_create_enhance_property(connector); ++ return true; ++} + + static bool +-intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) ++intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) + { +- struct drm_connector *connector = &intel_output->base; +- struct drm_encoder *encoder = &intel_output->enc; +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; +- bool ret = true, registered = false; ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + + sdvo_priv->is_tv = false; +- intel_output->needs_tv_clock = false; ++ intel_encoder->needs_tv_clock = false; + sdvo_priv->is_lvds = false; + +- if (device_is_registered(&connector->kdev)) { +- drm_sysfs_connector_remove(connector); +- registered = true; +- } ++ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ + +- if (flags & +- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { +- if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) +- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; +- else +- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; +- +- encoder->encoder_type = DRM_MODE_ENCODER_TMDS; +- connector->connector_type = DRM_MODE_CONNECTOR_DVID; +- +- if (intel_sdvo_get_supp_encode(intel_output, +- &sdvo_priv->encode) && +- intel_sdvo_get_digital_encoding_mode(intel_output) && +- sdvo_priv->is_hdmi) { +- /* enable hdmi encoding mode if supported */ +- intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); +- intel_sdvo_set_colorimetry(intel_output, +- SDVO_COLORIMETRY_RGB256); +- connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; +- intel_output->clone_mask = +- (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | +- (1 << INTEL_ANALOG_CLONE_BIT); +- } +- } else if ((flags & SDVO_OUTPUT_SVID0) && +- !dmi_check_system(intel_sdvo_bad_tv)) { +- +- sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; +- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; +- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; +- sdvo_priv->is_tv = true; +- intel_output->needs_tv_clock = true; +- intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; +- } else if (flags & SDVO_OUTPUT_RGB0) { +- +- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; +- encoder->encoder_type = DRM_MODE_ENCODER_DAC; +- connector->connector_type = DRM_MODE_CONNECTOR_VGA; +- intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | +- (1 << INTEL_ANALOG_CLONE_BIT); +- } else if (flags & SDVO_OUTPUT_RGB1) { +- +- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; +- encoder->encoder_type = DRM_MODE_ENCODER_DAC; +- connector->connector_type = DRM_MODE_CONNECTOR_VGA; +- intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | +- (1 << INTEL_ANALOG_CLONE_BIT); +- } else if (flags & SDVO_OUTPUT_CVBS0) { +- +- sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; +- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; +- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; +- sdvo_priv->is_tv = true; +- intel_output->needs_tv_clock = true; +- intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; +- } else if (flags & SDVO_OUTPUT_LVDS0) { +- +- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; +- encoder->encoder_type = DRM_MODE_ENCODER_LVDS; +- connector->connector_type = DRM_MODE_CONNECTOR_LVDS; +- sdvo_priv->is_lvds = true; +- intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | +- (1 << INTEL_SDVO_LVDS_CLONE_BIT); +- } else if (flags & SDVO_OUTPUT_LVDS1) { +- +- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; +- encoder->encoder_type = DRM_MODE_ENCODER_LVDS; +- connector->connector_type = DRM_MODE_CONNECTOR_LVDS; +- sdvo_priv->is_lvds = true; +- intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | +- (1 << INTEL_SDVO_LVDS_CLONE_BIT); +- } else { ++ if (flags & SDVO_OUTPUT_TMDS0) ++ if (!intel_sdvo_dvi_init(intel_encoder, 0)) ++ return false; ++ ++ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) ++ if (!intel_sdvo_dvi_init(intel_encoder, 1)) ++ return false; ++ ++ /* TV has no XXX1 function block */ ++ if (flags & SDVO_OUTPUT_SVID0) ++ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0)) ++ return false; ++ ++ if (flags & SDVO_OUTPUT_CVBS0) ++ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0)) ++ return false; ++ ++ if (flags & SDVO_OUTPUT_RGB0) ++ if (!intel_sdvo_analog_init(intel_encoder, 0)) ++ return false; ++ ++ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) ++ if (!intel_sdvo_analog_init(intel_encoder, 1)) ++ return false; + ++ if (flags & SDVO_OUTPUT_LVDS0) ++ if (!intel_sdvo_lvds_init(intel_encoder, 0)) ++ return false; ++ ++ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) ++ if (!intel_sdvo_lvds_init(intel_encoder, 1)) ++ return false; ++ ++ if ((flags & SDVO_OUTPUT_MASK) == 0) { + unsigned char bytes[2]; + + sdvo_priv->controlled_output = 0; +@@ -2399,32 +2415,29 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) + DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", + SDVO_NAME(sdvo_priv), + bytes[0], bytes[1]); +- ret = false; ++ return false; + } +- intel_output->crtc_mask = (1 << 0) | (1 << 1); +- +- if (ret && registered) +- ret = drm_sysfs_connector_add(connector) == 0 ? true : false; +- +- +- return ret; ++ intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + ++ return true; + } + +-static void intel_sdvo_tv_create_property(struct drm_connector *connector) ++static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + struct intel_sdvo_tv_format format; + uint32_t format_map, i; + uint8_t status; + +- intel_sdvo_set_target_output(intel_output, +- sdvo_priv->controlled_output); ++ intel_sdvo_set_target_output(intel_encoder, type); + +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &format, sizeof(format)); + if (status != SDVO_CMD_STATUS_SUCCESS) + return; +@@ -2435,43 +2448,45 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector) + if (format_map == 0) + return; + +- sdvo_priv->format_supported_num = 0; ++ sdvo_connector->format_supported_num = 0; + for (i = 0 ; i < TV_FORMAT_NUM; i++) + if (format_map & (1 << i)) { +- sdvo_priv->tv_format_supported +- [sdvo_priv->format_supported_num++] = ++ sdvo_connector->tv_format_supported ++ [sdvo_connector->format_supported_num++] = + tv_format_names[i]; + } + + +- sdvo_priv->tv_format_property = ++ sdvo_connector->tv_format_property = + drm_property_create( + connector->dev, DRM_MODE_PROP_ENUM, +- "mode", sdvo_priv->format_supported_num); ++ "mode", sdvo_connector->format_supported_num); + +- for (i = 0; i < sdvo_priv->format_supported_num; i++) ++ for (i = 0; i < sdvo_connector->format_supported_num; i++) + drm_property_add_enum( +- sdvo_priv->tv_format_property, i, +- i, sdvo_priv->tv_format_supported[i]); ++ sdvo_connector->tv_format_property, i, ++ i, sdvo_connector->tv_format_supported[i]); + +- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0]; ++ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0]; + drm_connector_attach_property( +- connector, sdvo_priv->tv_format_property, 0); ++ connector, sdvo_connector->tv_format_property, 0); + + } + + static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv; + struct intel_sdvo_enhancements_reply sdvo_data; + struct drm_device *dev = connector->dev; + uint8_t status; + uint16_t response, data_value[2]; + +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, ++ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, + NULL, 0); +- status = intel_sdvo_read_response(intel_output, &sdvo_data, ++ status = intel_sdvo_read_response(intel_encoder, &sdvo_data, + sizeof(sdvo_data)); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS(" incorrect response is returned\n"); +@@ -2482,23 +2497,23 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + DRM_DEBUG_KMS("No enhancement is supported\n"); + return; + } +- if (sdvo_priv->is_tv) { ++ if (IS_TV(sdvo_priv)) { + /* when horizontal overscan is supported, Add the left/right + * property + */ + if (sdvo_data.overscan_h) { +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO max " + "h_overscan\n"); + return; + } +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_OVERSCAN_H, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); +@@ -2528,18 +2543,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + data_value[0], data_value[1], response); + } + if (sdvo_data.overscan_v) { +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO max " + "v_overscan\n"); + return; + } +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_OVERSCAN_V, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); +@@ -2569,17 +2584,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + data_value[0], data_value[1], response); + } + if (sdvo_data.position_h) { +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); + return; + } +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_POSITION_H, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); +@@ -2600,17 +2615,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + data_value[0], data_value[1], response); + } + if (sdvo_data.position_v) { +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); + return; + } +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_POSITION_V, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); +@@ -2630,20 +2645,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + "default %d, current %d\n", + data_value[0], data_value[1], response); + } +- } +- if (sdvo_priv->is_tv) { + if (sdvo_data.saturation) { +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_MAX_SATURATION, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); + return; + } +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_SATURATION, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); +@@ -2665,17 +2678,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + data_value[0], data_value[1], response); + } + if (sdvo_data.contrast) { +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); + return; + } +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_CONTRAST, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); +@@ -2696,17 +2709,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + data_value[0], data_value[1], response); + } + if (sdvo_data.hue) { +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_MAX_HUE, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); + return; + } +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_HUE, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); +@@ -2727,19 +2740,19 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + data_value[0], data_value[1], response); + } + } +- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { ++ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { + if (sdvo_data.brightness) { +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &data_value, 4); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); + return; + } +- intel_sdvo_write_cmd(intel_output, ++ intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_GET_BRIGHTNESS, NULL, 0); +- status = intel_sdvo_read_response(intel_output, ++ status = intel_sdvo_read_response(intel_encoder, + &response, 2); + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); +@@ -2764,109 +2777,98 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) + return; + } + +-bool intel_sdvo_init(struct drm_device *dev, int output_device) ++bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) + { + struct drm_i915_private *dev_priv = dev->dev_private; +- struct drm_connector *connector; +- struct intel_output *intel_output; ++ struct intel_encoder *intel_encoder; + struct intel_sdvo_priv *sdvo_priv; +- + u8 ch[0x40]; + int i; ++ u32 i2c_reg, ddc_reg, analog_ddc_reg; + +- intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); +- if (!intel_output) { ++ intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); ++ if (!intel_encoder) { + return false; + } + +- sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); +- sdvo_priv->output_device = output_device; ++ sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1); ++ sdvo_priv->sdvo_reg = sdvo_reg; + +- intel_output->dev_priv = sdvo_priv; +- intel_output->type = INTEL_OUTPUT_SDVO; ++ intel_encoder->dev_priv = sdvo_priv; ++ intel_encoder->type = INTEL_OUTPUT_SDVO; ++ ++ if (HAS_PCH_SPLIT(dev)) { ++ i2c_reg = PCH_GPIOE; ++ ddc_reg = PCH_GPIOE; ++ analog_ddc_reg = PCH_GPIOA; ++ } else { ++ i2c_reg = GPIOE; ++ ddc_reg = GPIOE; ++ analog_ddc_reg = GPIOA; ++ } + + /* setup the DDC bus. */ +- if (output_device == SDVOB) +- intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); ++ if (IS_SDVOB(sdvo_reg)) ++ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB"); + else +- intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); ++ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC"); + +- if (!intel_output->i2c_bus) ++ if (!intel_encoder->i2c_bus) + goto err_inteloutput; + +- sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); ++ sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); + + /* Save the bit-banging i2c functionality for use by the DDC wrapper */ +- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; ++ intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality; + + /* Read the regs to test if we can talk to the device */ + for (i = 0; i < 0x40; i++) { +- if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { ++ if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { + DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", +- output_device == SDVOB ? 'B' : 'C'); ++ IS_SDVOB(sdvo_reg) ? 'B' : 'C'); + goto err_i2c; + } + } + + /* setup the DDC bus. */ +- if (output_device == SDVOB) { +- intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); +- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, ++ if (IS_SDVOB(sdvo_reg)) { ++ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS"); ++ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, + "SDVOB/VGA DDC BUS"); + dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; + } else { +- intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); +- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, ++ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS"); ++ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, + "SDVOC/VGA DDC BUS"); + dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; + } + +- if (intel_output->ddc_bus == NULL) ++ if (intel_encoder->ddc_bus == NULL) + goto err_i2c; + + /* Wrap with our custom algo which switches to DDC mode */ +- intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; ++ intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; ++ ++ /* encoder type will be decided later */ ++ drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0); ++ drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); + + /* In default case sdvo lvds is false */ +- intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); ++ intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); + +- if (intel_sdvo_output_setup(intel_output, ++ if (intel_sdvo_output_setup(intel_encoder, + sdvo_priv->caps.output_flags) != true) { + DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", +- output_device == SDVOB ? 'B' : 'C'); ++ IS_SDVOB(sdvo_reg) ? 'B' : 'C'); + goto err_i2c; + } + +- +- connector = &intel_output->base; +- drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, +- connector->connector_type); +- +- drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); +- connector->interlace_allowed = 0; +- connector->doublescan_allowed = 0; +- connector->display_info.subpixel_order = SubPixelHorizontalRGB; +- +- drm_encoder_init(dev, &intel_output->enc, +- &intel_sdvo_enc_funcs, intel_output->enc.encoder_type); +- +- drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); +- +- drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); +- if (sdvo_priv->is_tv) +- intel_sdvo_tv_create_property(connector); +- +- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) +- intel_sdvo_create_enhance_property(connector); +- +- drm_sysfs_connector_add(connector); +- + intel_sdvo_select_ddc_bus(sdvo_priv); + + /* Set the input timing to the screen. Assume always input 0. */ +- intel_sdvo_set_target_input(intel_output, true, false); ++ intel_sdvo_set_target_input(intel_encoder, true, false); + +- intel_sdvo_get_input_pixel_clock_range(intel_output, ++ intel_sdvo_get_input_pixel_clock_range(intel_encoder, + &sdvo_priv->pixel_clock_min, + &sdvo_priv->pixel_clock_max); + +@@ -2893,12 +2895,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) + err_i2c: + if (sdvo_priv->analog_ddc_bus != NULL) + intel_i2c_destroy(sdvo_priv->analog_ddc_bus); +- if (intel_output->ddc_bus != NULL) +- intel_i2c_destroy(intel_output->ddc_bus); +- if (intel_output->i2c_bus != NULL) +- intel_i2c_destroy(intel_output->i2c_bus); ++ if (intel_encoder->ddc_bus != NULL) ++ intel_i2c_destroy(intel_encoder->ddc_bus); ++ if (intel_encoder->i2c_bus != NULL) ++ intel_i2c_destroy(intel_encoder->i2c_bus); + err_inteloutput: +- kfree(intel_output); ++ kfree(intel_encoder); + + return false; + } +diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c +--- a/drivers/gpu/drm/i915/intel_tv.c ++++ b/drivers/gpu/drm/i915/intel_tv.c +@@ -916,143 +916,6 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode) + } + } + +-static void +-intel_tv_save(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_tv_priv *tv_priv = intel_output->dev_priv; +- int i; +- +- tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); +- tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2); +- tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3); +- tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1); +- tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2); +- tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3); +- tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4); +- tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5); +- tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6); +- tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7); +- tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1); +- tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2); +- tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3); +- +- tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y); +- tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2); +- tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U); +- tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2); +- tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V); +- tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2); +- tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS); +- tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL); +- tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS); +- tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE); +- tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1); +- tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2); +- tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3); +- +- for (i = 0; i < 60; i++) +- tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2)); +- for (i = 0; i < 60; i++) +- tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2)); +- for (i = 0; i < 43; i++) +- tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2)); +- for (i = 0; i < 43; i++) +- tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2)); +- +- tv_priv->save_TV_DAC = I915_READ(TV_DAC); +- tv_priv->save_TV_CTL = I915_READ(TV_CTL); +-} +- +-static void +-intel_tv_restore(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_i915_private *dev_priv = dev->dev_private; +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_tv_priv *tv_priv = intel_output->dev_priv; +- struct drm_crtc *crtc = connector->encoder->crtc; +- struct intel_crtc *intel_crtc; +- int i; +- +- /* FIXME: No CRTC? */ +- if (!crtc) +- return; +- +- intel_crtc = to_intel_crtc(crtc); +- I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1); +- I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2); +- I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3); +- I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1); +- I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2); +- I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3); +- I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4); +- I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5); +- I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6); +- I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7); +- I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1); +- I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2); +- I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3); +- +- I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y); +- I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2); +- I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U); +- I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2); +- I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V); +- I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2); +- I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS); +- I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL); +- +- { +- int pipeconf_reg = (intel_crtc->pipe == 0) ? +- PIPEACONF : PIPEBCONF; +- int dspcntr_reg = (intel_crtc->plane == 0) ? +- DSPACNTR : DSPBCNTR; +- int pipeconf = I915_READ(pipeconf_reg); +- int dspcntr = I915_READ(dspcntr_reg); +- int dspbase_reg = (intel_crtc->plane == 0) ? +- DSPAADDR : DSPBADDR; +- /* Pipe must be off here */ +- I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); +- /* Flush the plane changes */ +- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); +- +- if (!IS_I9XX(dev)) { +- /* Wait for vblank for the disable to take effect */ +- intel_wait_for_vblank(dev); +- } +- +- I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); +- /* Wait for vblank for the disable to take effect. */ +- intel_wait_for_vblank(dev); +- +- /* Filter ctl must be set before TV_WIN_SIZE */ +- I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1); +- I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2); +- I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3); +- I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS); +- I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE); +- I915_WRITE(pipeconf_reg, pipeconf); +- I915_WRITE(dspcntr_reg, dspcntr); +- /* Flush the plane changes */ +- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); +- } +- +- for (i = 0; i < 60; i++) +- I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]); +- for (i = 0; i < 60; i++) +- I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]); +- for (i = 0; i < 43; i++) +- I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]); +- for (i = 0; i < 43; i++) +- I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]); +- +- I915_WRITE(TV_DAC, tv_priv->save_TV_DAC); +- I915_WRITE(TV_CTL, tv_priv->save_TV_CTL); +-} +- + static const struct tv_mode * + intel_tv_mode_lookup (char *tv_format) + { +@@ -1068,9 +931,9 @@ intel_tv_mode_lookup (char *tv_format) + } + + static const struct tv_mode * +-intel_tv_mode_find (struct intel_output *intel_output) ++intel_tv_mode_find (struct intel_encoder *intel_encoder) + { +- struct intel_tv_priv *tv_priv = intel_output->dev_priv; ++ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; + + return intel_tv_mode_lookup(tv_priv->tv_format); + } +@@ -1078,8 +941,9 @@ intel_tv_mode_find (struct intel_output *intel_output) + static enum drm_mode_status + intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) + { +- struct intel_output *intel_output = to_intel_output(connector); +- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); + + /* Ensure TV refresh is close to desired refresh */ + if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) +@@ -1095,8 +959,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, + { + struct drm_device *dev = encoder->dev; + struct drm_mode_config *drm_config = &dev->mode_config; +- struct intel_output *intel_output = enc_to_intel_output(encoder); +- const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder); + struct drm_encoder *other_encoder; + + if (!tv_mode) +@@ -1121,9 +985,9 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = encoder->crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); +- struct intel_output *intel_output = enc_to_intel_output(encoder); +- struct intel_tv_priv *tv_priv = intel_output->dev_priv; +- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; ++ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); + u32 tv_ctl; + u32 hctl1, hctl2, hctl3; + u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; +@@ -1360,9 +1224,9 @@ static const struct drm_display_mode reported_modes[] = { + * \return false if TV is disconnected. + */ + static int +-intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) ++intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder) + { +- struct drm_encoder *encoder = &intel_output->enc; ++ struct drm_encoder *encoder = &intel_encoder->enc; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long irqflags; +@@ -1441,9 +1305,10 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) + */ + static void intel_tv_find_better_format(struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_tv_priv *tv_priv = intel_output->dev_priv; +- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; ++ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); + int i; + + if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == +@@ -1475,9 +1340,9 @@ intel_tv_detect(struct drm_connector *connector) + { + struct drm_crtc *crtc; + struct drm_display_mode mode; +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_tv_priv *tv_priv = intel_output->dev_priv; +- struct drm_encoder *encoder = &intel_output->enc; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; + int dpms_mode; + int type = tv_priv->type; + +@@ -1485,12 +1350,14 @@ intel_tv_detect(struct drm_connector *connector) + drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); + + if (encoder->crtc && encoder->crtc->enabled) { +- type = intel_tv_detect_type(encoder->crtc, intel_output); ++ type = intel_tv_detect_type(encoder->crtc, intel_encoder); + } else { +- crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); ++ crtc = intel_get_load_detect_pipe(intel_encoder, connector, ++ &mode, &dpms_mode); + if (crtc) { +- type = intel_tv_detect_type(crtc, intel_output); +- intel_release_load_detect_pipe(intel_output, dpms_mode); ++ type = intel_tv_detect_type(crtc, intel_encoder); ++ intel_release_load_detect_pipe(intel_encoder, connector, ++ dpms_mode); + } else + type = -1; + } +@@ -1525,8 +1392,9 @@ static void + intel_tv_chose_preferred_modes(struct drm_connector *connector, + struct drm_display_mode *mode_ptr) + { +- struct intel_output *intel_output = to_intel_output(connector); +- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); + + if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) + mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; +@@ -1550,8 +1418,9 @@ static int + intel_tv_get_modes(struct drm_connector *connector) + { + struct drm_display_mode *mode_ptr; +- struct intel_output *intel_output = to_intel_output(connector); +- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); + int j, count = 0; + u64 tmp; + +@@ -1604,11 +1473,9 @@ intel_tv_get_modes(struct drm_connector *connector) + static void + intel_tv_destroy (struct drm_connector *connector) + { +- struct intel_output *intel_output = to_intel_output(connector); +- + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); +- kfree(intel_output); ++ kfree(connector); + } + + +@@ -1617,9 +1484,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop + uint64_t val) + { + struct drm_device *dev = connector->dev; +- struct intel_output *intel_output = to_intel_output(connector); +- struct intel_tv_priv *tv_priv = intel_output->dev_priv; +- struct drm_encoder *encoder = &intel_output->enc; ++ struct drm_encoder *encoder = intel_attached_encoder(connector); ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; + struct drm_crtc *crtc = encoder->crtc; + int ret = 0; + bool changed = false; +@@ -1676,8 +1543,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { + + static const struct drm_connector_funcs intel_tv_connector_funcs = { + .dpms = drm_helper_connector_dpms, +- .save = intel_tv_save, +- .restore = intel_tv_restore, + .detect = intel_tv_detect, + .destroy = intel_tv_destroy, + .set_property = intel_tv_set_property, +@@ -1687,12 +1552,15 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = { + static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { + .mode_valid = intel_tv_mode_valid, + .get_modes = intel_tv_get_modes, +- .best_encoder = intel_best_encoder, ++ .best_encoder = intel_attached_encoder, + }; + + static void intel_tv_enc_destroy(struct drm_encoder *encoder) + { ++ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); ++ + drm_encoder_cleanup(encoder); ++ kfree(intel_encoder); + } + + static const struct drm_encoder_funcs intel_tv_enc_funcs = { +@@ -1740,7 +1608,8 @@ intel_tv_init(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_connector *connector; +- struct intel_output *intel_output; ++ struct intel_encoder *intel_encoder; ++ struct intel_connector *intel_connector; + struct intel_tv_priv *tv_priv; + u32 tv_dac_on, tv_dac_off, save_tv_dac; + char **tv_format_names; +@@ -1780,28 +1649,34 @@ intel_tv_init(struct drm_device *dev) + (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) + return; + +- intel_output = kzalloc(sizeof(struct intel_output) + ++ intel_encoder = kzalloc(sizeof(struct intel_encoder) + + sizeof(struct intel_tv_priv), GFP_KERNEL); +- if (!intel_output) { ++ if (!intel_encoder) { ++ return; ++ } ++ ++ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); ++ if (!intel_connector) { ++ kfree(intel_encoder); + return; + } + +- connector = &intel_output->base; ++ connector = &intel_connector->base; + + drm_connector_init(dev, connector, &intel_tv_connector_funcs, + DRM_MODE_CONNECTOR_SVIDEO); + +- drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs, ++ drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, + DRM_MODE_ENCODER_TVDAC); + +- drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); +- tv_priv = (struct intel_tv_priv *)(intel_output + 1); +- intel_output->type = INTEL_OUTPUT_TVOUT; +- intel_output->crtc_mask = (1 << 0) | (1 << 1); +- intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); +- intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); +- intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); +- intel_output->dev_priv = tv_priv; ++ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); ++ tv_priv = (struct intel_tv_priv *)(intel_encoder + 1); ++ intel_encoder->type = INTEL_OUTPUT_TVOUT; ++ intel_encoder->crtc_mask = (1 << 0) | (1 << 1); ++ intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); ++ intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); ++ intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); ++ intel_encoder->dev_priv = tv_priv; + tv_priv->type = DRM_MODE_CONNECTOR_Unknown; + + /* BIOS margin values */ +@@ -1812,7 +1687,7 @@ intel_tv_init(struct drm_device *dev) + + tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); + +- drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs); ++ drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); + drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); + connector->interlace_allowed = false; + connector->doublescan_allowed = false; +diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h +--- a/include/drm/drm_pciids.h ++++ b/include/drm/drm_pciids.h +@@ -558,4 +558,5 @@ + {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ + {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ + {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ ++ {0x8086, 0x0102, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ + {0, 0, 0} diff --git a/drm-intel-sdvo-fix-2.patch b/drm-intel-sdvo-fix-2.patch new file mode 100644 index 0000000..8a27fa1 --- /dev/null +++ b/drm-intel-sdvo-fix-2.patch @@ -0,0 +1,107 @@ +From 4fd1c5ed73fc02d8bcabcb3a457c4c4efaef2099 Mon Sep 17 00:00:00 2001 +From: Adam Jackson +Date: Thu, 29 Apr 2010 14:05:18 -0400 +Subject: [PATCH] drm/i915: Be extra careful about A/D matching for multifunction SDVO + +If we're both RGB and TMDS capable, we'll have set up one connector for +each. When determining connectivity, require analog/digital state in +the EDID block to match analog/digital support in the connector. +Otherwise, both DVI and VGA will appear to be connected. + +Signed-off-by: Adam Jackson +--- + drivers/gpu/drm/i915/intel_sdvo.c | 45 +++++++++++++++++------------------- + 1 files changed, 21 insertions(+), 24 deletions(-) + +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c +index f55c0d7..31ce975 100644 +--- a/drivers/gpu/drm/i915/intel_sdvo.c ++++ b/drivers/gpu/drm/i915/intel_sdvo.c +@@ -1503,16 +1503,17 @@ intel_analog_is_connected(struct drm_device *dev) + } + + enum drm_connector_status +-intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) ++intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) + { + struct drm_encoder *encoder = intel_attached_encoder(connector); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; ++ struct intel_connector *intel_connector = to_intel_connector(connector); ++ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + enum drm_connector_status status = connector_status_connected; + struct edid *edid = NULL; + +- edid = drm_get_edid(connector, +- intel_encoder->ddc_bus); ++ edid = drm_get_edid(connector, intel_encoder->ddc_bus); + + /* This is only applied to SDVO cards with multiple outputs */ + if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) { +@@ -1525,8 +1526,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) + */ + while(temp_ddc > 1) { + sdvo_priv->ddc_bus = temp_ddc; +- edid = drm_get_edid(connector, +- intel_encoder->ddc_bus); ++ edid = drm_get_edid(connector, intel_encoder->ddc_bus); + if (edid) { + /* + * When we can get the EDID, maybe it is the +@@ -1543,28 +1543,25 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) + /* when there is no edid and no monitor is connected with VGA + * port, try to use the CRT ddc to read the EDID for DVI-connector + */ +- if (edid == NULL && +- sdvo_priv->analog_ddc_bus && ++ if (edid == NULL && sdvo_priv->analog_ddc_bus && + !intel_analog_is_connected(connector->dev)) +- edid = drm_get_edid(connector, +- sdvo_priv->analog_ddc_bus); ++ edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus); ++ + if (edid != NULL) { +- /* Don't report the output as connected if it's a DVI-I +- * connector with a non-digital EDID coming out. +- */ +- if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { +- if (edid->input & DRM_EDID_INPUT_DIGITAL) +- sdvo_priv->is_hdmi = +- drm_detect_hdmi_monitor(edid); +- else +- status = connector_status_disconnected; +- } ++ bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); ++ bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK); + +- kfree(edid); +- connector->display_info.raw_edid = NULL; ++ /* DDC bus is shared, match EDID to connector type */ ++ if (is_digital && need_digital) ++ sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); ++ else if (is_digital != need_digital) ++ status = connector_status_disconnected; + +- } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) ++ connector->display_info.raw_edid = NULL; ++ } else + status = connector_status_disconnected; ++ ++ kfree(edid); + + return status; + } +@@ -1600,8 +1597,8 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect + + if ((sdvo_connector->output_flag & response) == 0) + ret = connector_status_disconnected; +- else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) +- ret = intel_sdvo_hdmi_sink_detect(connector, response); ++ else if (response & SDVO_TMDS_MASK) ++ ret = intel_sdvo_hdmi_sink_detect(connector); + else + ret = connector_status_connected; + +-- +1.7.0.1 + diff --git a/drm-intel-sdvo-fix.patch b/drm-intel-sdvo-fix.patch new file mode 100644 index 0000000..98b6a00 --- /dev/null +++ b/drm-intel-sdvo-fix.patch @@ -0,0 +1,114 @@ +From ef59fcdd9d7fe818d36a0072c80770c0d1a3cc9c Mon Sep 17 00:00:00 2001 +From: Adam Jackson +Date: Fri, 23 Apr 2010 16:07:40 -0400 +Subject: [PATCH] drm/i915: Fix DDC bus selection for multifunction SDVO + +Multifunction SDVO cards stopped working after 14571b4, and would report +something that looked remarkably like an ADD2 SPD ROM instead of EDID. +This appears to be because DDC bus selection was utterly horked by that +commit; controlled_output was no longer always a single bit, so +intel_sdvo_select_ddc_bus would pick bus 0, which is (unsurprisingly) +the SPD ROM bus, not a DDC bus. + +So, instead of that, let's just use the DDC bus the child device table +tells us to use. I'm guessing at the bitmask and shifting from VBIOS +dumps, but it can't possibly be worse. + +cf. https://bugzilla.redhat.com/584229 + +Signed-off-by: Adam Jackson +--- + drivers/gpu/drm/i915/i915_drv.h | 1 + + drivers/gpu/drm/i915/intel_bios.c | 1 + + drivers/gpu/drm/i915/intel_sdvo.c | 41 ++++++++---------------------------- + 3 files changed, 11 insertions(+), 32 deletions(-) + +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index a43a4f5..5d609a8 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -135,6 +135,7 @@ struct sdvo_device_mapping { + u8 slave_addr; + u8 dvo_wiring; + u8 initialized; ++ u8 ddc_pin; + }; + + struct drm_i915_error_state { +diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c +index f9ba452..4c748d8 100644 +--- a/drivers/gpu/drm/i915/intel_bios.c ++++ b/drivers/gpu/drm/i915/intel_bios.c +@@ -366,6 +366,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, + p_mapping->dvo_port = p_child->dvo_port; + p_mapping->slave_addr = p_child->slave_addr; + p_mapping->dvo_wiring = p_child->dvo_wiring; ++ p_mapping->ddc_pin = p_child->ddc_pin; + p_mapping->initialized = 1; + } else { + DRM_DEBUG_KMS("Maybe one SDVO port is shared by " +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c +index df9f997..f55c0d7 100644 +--- a/drivers/gpu/drm/i915/intel_sdvo.c ++++ b/drivers/gpu/drm/i915/intel_sdvo.c +@@ -2053,40 +2053,17 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { + * outputs, then LVDS outputs. + */ + static void +-intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv) ++intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, ++ struct intel_sdvo_priv *sdvo, u32 reg) + { +- uint16_t mask = 0; +- unsigned int num_bits; ++ struct sdvo_device_mapping *mapping; + +- /* Make a mask of outputs less than or equal to our own priority in the +- * list. +- */ +- switch (dev_priv->controlled_output) { +- case SDVO_OUTPUT_LVDS1: +- mask |= SDVO_OUTPUT_LVDS1; +- case SDVO_OUTPUT_LVDS0: +- mask |= SDVO_OUTPUT_LVDS0; +- case SDVO_OUTPUT_TMDS1: +- mask |= SDVO_OUTPUT_TMDS1; +- case SDVO_OUTPUT_TMDS0: +- mask |= SDVO_OUTPUT_TMDS0; +- case SDVO_OUTPUT_RGB1: +- mask |= SDVO_OUTPUT_RGB1; +- case SDVO_OUTPUT_RGB0: +- mask |= SDVO_OUTPUT_RGB0; +- break; +- } +- +- /* Count bits to find what number we are in the priority list. */ +- mask &= dev_priv->caps.output_flags; +- num_bits = hweight16(mask); +- if (num_bits > 3) { +- /* if more than 3 outputs, default to DDC bus 3 for now */ +- num_bits = 3; +- } ++ if (IS_SDVOB(reg)) ++ mapping = &(dev_priv->sdvo_mappings[0]); ++ else ++ mapping = &(dev_priv->sdvo_mappings[1]); + +- /* Corresponds to SDVO_CONTROL_BUS_DDCx */ +- dev_priv->ddc_bus = 1 << num_bits; ++ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); + } + + static bool +@@ -2863,7 +2840,7 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) + goto err_i2c; + } + +- intel_sdvo_select_ddc_bus(sdvo_priv); ++ intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg); + + /* Set the input timing to the screen. Assume always input 0. */ + intel_sdvo_set_target_input(intel_encoder, true, false); +-- +1.7.0.1 + diff --git a/drm-next.patch b/drm-next.patch deleted file mode 100644 index b0954d8..0000000 --- a/drm-next.patch +++ /dev/null @@ -1,44478 +0,0 @@ -commit a77889d1b091dd6783db3e1b059cc378d37f9982 -Author: Kyle McMartin -Date: Wed Jun 16 15:06:54 2010 +0100 - - provide a knob to {en,dis}able radeon_pm - -commit ef24e3e0e644621e2c98d38f27f4b25d23875256 -Author: Kyle McMartin -Date: Wed Jun 16 14:51:26 2010 +0100 - - Merge local branch 'drm-since-1067b6c' - - % git log --no-merges --oneline 1067b6c..v2.6.35-rc3 -- drivers/gpu/drm - 8d86dc6 Revert "drm/i915: Don't enable pipe/plane/VCO early (wait for DPMS on)." - b62e948 drm/radeon: don't poll tv dac if crtc2 is in use. - d294ed6 drm/radeon: reset i2c valid to avoid incorrect tv-out polling. - 4eb3033 drm/nv50: fix iommu errors caused by device reading from address 0 - 7504794 drm/nouveau: off by one in init_i2c_device_find() - 55a4c5c nouveau: off by one in nv50_gpio_location() - 6d69630 drm/nouveau: completely fail init if we fail to map the PRAMIN BAR - 1eb3810 drm/nouveau: match U/DP script against SOR link - f712d0c drm/radeon/kms/pm: resurrect printing power states - 0fcbe94 drm/radeon/kms: add trivial debugging for voltage - a081a9d drm/radeon/kms/r600+: use voltage from requested clock mode (v3) - 4d60173 drm/radeon/kms/pm: track current voltage (v2) - aa1df0f drm/radeon/kms/pm: Disable voltage adjust on RS780/RS880 - cbd4623 drm/radeon/kms: fix typo in printing the HPD info - c9e75b2 drm/radeon/kms/pm: add mid profile - f8ed8b4 drm/radeon/kms/pm: Misc fixes - 8de016e drm/radeon/kms/combios: fix typo in voltage fix - 148a03b drm/radeon/kms/evergreen: set accel_enabled - 9b8eb4d drm/vmwgfx: return -EFAULT for copy_to_user errors - e902a35 drm/drm_crtc: return -EFAULT on copy_to_user errors - fc2362a drm/fb: use printk to print out the switching to text mode error. - 9bad145 drm/radeon: fix PM on non-vram cards. - 5a79395 drm: Propagate error from drm_fb_helper_init(). - a3524f1 drm/i915: fix oops on single crtc devices. - e7b526b drm/i915: Move non-phys cursors into the GTT - -commit e1442526a8b1b9a0ffd3f8778d2ff40597ef4662 -Author: Kyle McMartin -Date: Mon May 31 12:38:09 2010 +0100 - - nouveau is not in staging on Fedora - -commit fcd86a22bc88817a417185602e90451a3c5a25b8 -Author: Linus Torvalds -Date: Thu Jun 3 07:19:45 2010 -0700 - - Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6 - - * 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (41 commits) - % git log --oneline --no-merges a652883..1067b6c - d8dcaa1 drm/radeon/kms: make sure display hw is disabled when suspending - d8bd19d drm/vmwgfx: Allow userspace to change default layout. Bump minor. - 991b7b4 drm/vmwgfx: Fix framebuffer modesetting - 7c4f778 drm/vmwgfx: Fix vga save / restore with display topology. - c0db9cb vgaarb: use MIT license - 2d6e9b9 vgaarb: convert pr_devel() to pr_debug() - ce04cc0 drm: fix typos in Linux DRM Developer's Guide - 84d88f4 drm/radeon/kms/pm: voltage fixes - 9264587 drm/radeon/kms/pm: radeon_set_power_state fixes - c5e8ce6 drm/radeon/kms/pm: patch default power state with default clocks/voltages on r6xx+ - 9349d5c drm/radeon/kms/pm: enable SetVoltage on r7xx/evergreen - 7ac9aa5 drm/radeon/kms/pm: add support for SetVoltage cmd table (V2) - cb5fcbd drm/radeon/kms/evergreen: add initial CS parser - fbf8176 drm/kms: disable/enable poll around switcheroo on/off - fc5ea29 drm/nouveau: fixup confusion over which handle the DSM is hanging off. - afeb3e1 drm/nouveau: attempt to get bios from ACPI v3 - 8b281db drm/nv50: cast IGP memory location to u64 before shifting - 4abe438 drm/ttm: Fix ttm_page_alloc.c - e8613c0 drm/ttm: Fix cached TTM page allocation. - 1ca14e7 drm/vmwgfx: Remove some leftover debug messages. - 316ab13 drm/vmwgfx: Print warnings in kernel log about bo pinning that fails. - 792778e drm/vmwgfx: Unpause overlay on update. - 259600d drm/vmwgfx: Some modesetting cleanups and fixes. - d451f62 drm/vmwgfx: Don't use SVGA_REG_ENABLE in modesetting code. - bbfad33 drm/vmwgfx: Remove duplicate member from struct vmw_legacy_display_unit. - 22ee861 drm/vmwgfx: Reserve first part of VRAM for framebuffer. - d7e1958 drm/vmwgfx: Support older hardware. - 1ae1ddd drm/vmwgfx: Get connector status from detection function. - 1925d45 drm/vmwgfx: Add kernel throttling support. Bump minor. - 04e9e94 drm/vmwgfx: Make sure to unpin old and pin new framebuffer. - 6a591a9 drm/vmwgfx: Fix single framebuffer detection. - 7e71f8a drm/vmwgfx: Assume larger framebuffer max size. - becd214 drm/nv50: use alternate source of SOR_MODE_CTRL for DP hack - 26099a7 drm/nouveau: fix dual-link displays when plugged into single-link outputs - 2c58077 drm/nv50: obey dcb->duallink_possible - 2348487 drm/nv50: fix duallink_possible calculation for DCB 4.0 cards - 73db4be drm/nouveau: don't execute INIT_GPIO unless we're really running the table - f50c0b9 drm/nv40: allow cold-booting of nv4x chipsets - d13102c drm/nouveau: fix POST detection for certain chipsets - 7fc74f1 drm/nouveau: Add getparam for current PTIMER time. - b334f2b drm/nouveau: allow cursor image and position to survive suspend - -commit 663568ea6a7503a12898c7f1ba8192c8d42a28ac -Author: Linus Torvalds -Date: Tue Jun 1 14:12:27 2010 -0700 - - Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel - - * 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: (41 commits) - % git log --oneline --no-merges 08a6685..709d015 - e3a815f drm/i915: add HAS_BSD check to i915_getparam - 9bc3549 drm/i915: Honor sync polarity from VBT panel timing descriptors - a1786bd drm/i915: Unmask interrupt for render engine on Sandybridge - ca76482 drm/i915: Fix PIPE_CONTROL command on Sandybridge - ab34c22 drm/i915: Fix up address spaces in slow_kernel_write() - 99a03df drm/i915: Use non-atomic kmap for slow copy paths - 9b8c4a0 drm/i915: Avoid moving from CPU domain during pwrite - 68f95ba drm/i915: Cleanup after failed initialization of ringbuffers - 654fc60 drm/i915: Reject bind_to_gtt() early if object > aperture - 85cd461 drm/i915: Check error code whilst moving buffer to GTT domain. - 3d1cc47 drm/i915: Remove spurious warning "Failure to install fence" - ac0c6b5 drm/i915: Rebind bo if currently bound with incorrect alignment. - a7faf32 drm/i915: Include pitch in set_base debug statement. - a939406 drm/i915: Only print "nothing to do" debug message as required. - 808b24d drm/i915: Propagate error from unbinding an unfenceable object. - b118c1e drm/i915: Avoid nesting of domain changes when setting display plane - 468f0b4 drm/i915: Hold the spinlock whilst resetting unpin_work along error path - 35aed2e drm/i915: Only print an message if there was an error - e20f9c6 drm/i915: Clean up leftover bits from hws move to ring structure. - 9553426 drm/i915: Add CxSR support on Pineview DDR3 - d8201ab i915: remove unneeded null checks - 90a78e8 i915/intel_sdvo: remove unneeded null check - 467b200 drm/i915: Fix HDMI mode select for Cougarpoint PCH - 778c354 drm/i915: combine all small integers into one single bitfield - a7de64e drm/i915/dp: Add DPCD data to debug output - 9962c92 drm/i915/dp: Only enable enhanced framing if the sink supports it - 9908ff7 drm/i915: Kill dangerous pending-flip debugging - f1befe7 agp/intel: Restrict GTT mapping to valid range on i915 and i945 - 9a7e849 drm/i915: Storage class should be before const qualifier - 7648fa9 drm/i915: add power monitoring support - 7a772c4 drm/i915/gen4: Extra CRT hotplug paranoia - 734b415 drm/i915: Add support for interlaced display. - f953c93 i915: fix lock imbalance on error path... - f41275e drm/i915: Convert more trace events to DEFINE_EVENT - 9517a92 drm/i915: add timeout to FBC disable waits - d1b851f drm/i915: implement BSD ring buffer V2 - 852835f drm/i915: convert some gem structures to per-ring V2 - 8187a2b drm/i915: introduce intel_ring_buffer structure (V2) - d3301d8 drm/i915: Rename dev_priv->ring to dev_priv->render_ring. - 62fdfea drm/i915: Move ringbuffer-related code to intel_ringbuffer.c. - 79a78dd drm/i915: Fail to load driver if KMS request without GEM - -commit 30f0d753b32570886e6b98812d33df30229dcf87 -Author: Linus Torvalds -Date: Fri May 28 16:14:40 2010 -0700 - - Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6 - - * 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (1 commit) - % git log --oneline --no-merges e4f2e5e..24010e4 - cf22f20 drm/radeon: fix the r100/r200 ums block 0 page fix - -commit 5bf8778218d6085190bed41b729f6001e712b057 -Author: Linus Torvalds -Date: Wed May 26 12:30:09 2010 -0700 - - Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6 - - * 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (6 commits) - % git log --oneline --no-merges 91803b4..31f4671 - 2101d6f agp: amd64, fix pci reference leaks - 4a638b4 drm/edid: Allow non-fatal checksum errors in CEA blocks - 921d98b drm/radeon/kms: suppress a build warning (unused variable) - f49d273 drm: Fixes linux-next & linux-2.6 checkstack warnings: - 5797660 nouveau: fix acpi_lid_open undefined - 10b0612 drm/radeon/kms: release AGP bridge at suspend - -commit 019d6c44898a414e7d6ef16fce1950577163cccb -Author: Linus Torvalds -Date: Fri May 21 11:14:52 2010 -0700 - - Merge branch 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6 - - * 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (207 commits) - % git log --oneline --no-merges ac3ee84..59534f7 - b486787 drm/radeon/kms/pm/r600: select the mid clock mode for single head low profile - 5d9b7e2 drm/radeon: fix power supply kconfig interaction. - e865275 drm/radeon/kms: record object that have been list reserved - 365048f drm/radeon: AGP memory is only I/O if the aperture can be mapped by the CPU. - 4573744 drm/radeon/kms: don't default display priority to high on rs4xx - c43ae47 drm/edid: fix typo in 1600x1200@75 mode - 893887ed drm/nouveau: fix i2c-related init table handlers - 04f542c drm/nouveau: support init table i2c device identifier 0x81 - f8b0be1 drm/nouveau: ensure we've parsed i2c table entry for INIT_*I2C* handlers - 92b9618 drm/nouveau: display error message for any failed init table opcode - 9170a82 drm/nouveau: fix init table handlers to return proper error codes - e9ebb68 drm/nv50: support fractional feedback divider on newer chips - 7e99a9b drm/nv50: fix monitor detection on certain chipsets - 07fee3d drm/nv50: store full dcb i2c entry from vbios - afa3b4c drm/nv50: fix suspend/resume with DP outputs - 17b96cc drm/nv50: output calculated crtc pll when debugging on - 4c389f0 drm/nouveau: dump pll limits entries when debugging is on - 25908b7 drm/nouveau: bios parser fixes for eDP boards - 90af89b drm/nouveau: fix a nouveau_bo dereference after it's been destroyed - de1f46a drm/nv40: remove some completed ctxprog TODOs - f23d4cf drm/nv04: Implement missing nv04 PGRAPH methods in software. - a0e6544 drm/nouveau: Use 0x5f instead of 0x9f as imageblit on original NV10. - 6698998 drm/radeon: fix hdmi offset bug reported by smatch. - 8e36ed0 drm/radeon/kms: hpd cleanup - 2bfcc0f drm/radeon/kms: reset ddc_bus in object header parsing - 6fd0248 amd64-agp: Probe unknown AGP devices the right way - d831692 sis-agp: Remove SIS 760, handled by amd64-agp - 26481fb drm/radeon/pm: fix device_create_file return value checks. - 4bff517 drm/radeon/kms/pm: fix r6xx+ profile setup - ce8a3eb drm/radeon/kms/pm: make pm spam debug only - ce8f537 drm/radeon/kms/pm: rework power management - d731117 drm/radeon/kms/pm: add support for no display power states - ca2af92 drm/radeon/kms: fix lock ordering in ring, ib handling - 01434b4 radeon: Use fences to gate entry to reclocking on drm handoff - 06415c5 fbmem, drm/nouveau: kick firmware framebuffers as soon as possible - 1471ca9 fbdev: allow passing more than one aperture for handoff - 3da1f33 drm: Prefix info printk about registering panic notifier with 'drm' - bc35afd drm/radeon/kms: add query for crtc hw id from crtc id to get info V2 - 61dd98f drm/edid: Fix 1024x768@85Hz - 6ebc22e drivers/gpu/drm: Use kzalloc - 96525a2 drm_edid: There should be 6 Standard Timings - f405a1a drivers/gpu/drm: Use kmemdup - ca117d6 vga: fix kconfig text typos - 0bcad4c drm/edid: remove an unneeded variable - 68b61a7 drm/radeon/kms/combios: match lvds panel info parsing to ddx - 1ff26a3 drm/radeon/kms/atom: fix typo in LVDS panel info parsing - 8bf3aae drm/radeon/kms: fix copy pasto in disable encoders patch - a7c5427 drm/i915: Fix out of tree builds - 007cc8a drm/i915: move fence lru to struct drm_i915_fence_reg - 31770bd drm/i915: don't allow tiling changes on pinned buffers v2 - 149c36a drm/i915: Be extra careful about A/D matching for multifunction SDVO - b108333 drm/i915: Fix DDC bus selection for multifunction SDVO - aa96139 drm/radeon/kms/atom: disable the encoders in encoder_disable - 3d8620c drm/i915: cleanup mode setting before unmapping registers - ee5382a drm/i915: Make fbc control wrapper functions - 1637ef4 drm/i915: Wait for the GPU whilst shrinking, if truly desperate. - 0a31a44 drm/i915: Use spatio-temporal dithering on PCH - 9e51159 drm/ttm: fix, avoid iomapping system memory - a1e9ada drm/radeon/kms: R3XX-R4XX fix GPU reset code - f259493 drm/radeon/kms: HDMI irq support - 58bd086 drm/radeon/kms: rework audio polling timer - 61cf059 agp: use scratch page on memory remove and at GATT creation V4 - 2d2ef82 drm: add initial DRM developer documentation - 10fd883 agp/intel: put back check that we have a driver for the bridge. - d4b74bf Revert "drm/i915: Configure the TV sense state correctly on GM45 to make TV detection reliable" - 6b8b178 drm/radeon/kms: enable use of unmappable VRAM V2 - 0c321c7 drm/ttm: remove io_ field from TTM V6 - 96bf8b8 drm/vmwgfx: add support for new TTM fault callback V5 - f32f02f drm/nouveau/kms: add support for new TTM fault callback V5 - 0a2d50e drm/radeon/kms: add support for new fault callback V7 - 82c5da6 drm/ttm: ttm_fault callback to allow driver to handle bo placement V6 - a8089e8 drm/i915: drop pointer to drm_gem_object - 62b8b21 drm/i915: don't use ->driver_private anymore - c397b90 drm/i915: embed the gem object into drm_i915_gem_object - ac52bc5 drm/i915: introduce i915_gem_alloc_object - fd632aa drm: free core gem object from driver callbacks - 1d39704 drm: extract drm_gem_object_init - e158316 agp/intel-gtt: kill previous_size assignments - 1ca46bd agp/intel-gtt: kill intel_i830_tlbflush - 22dd82a agp/intel: split out gmch/gtt probe, part 1 - 059efc6 agp/intel: kill mutli_gmch_chip - e5a04d5 agp/intel: uncoditionally reconfigure driver on resume - f51b766 agp/intel: split out the GTT support - ff7cdd6 agp/intel: introduce intel-agp.h header file - 6e0032f drm/i915: Don't touch PORT_HOTPLUG_EN in intel_dp_detect() - 77ffb59 drm/i915/pch: Use minimal number of FDI lanes (v2) - 7f8a856 drm/i915: Add the support of memory self-refresh on Ironlake - d429434 drm/i915: Move Pineview CxSR and watermark code into update_wm hook. - a2c459e drm/i915: Only save/restore FBC on the platform that supports FBC - 8a1837c drm/i915: Fix the incorrect argument for SDVO SET_TV_format command - 461ed3c drm/i915: Add support of SDVO on Ibexpeak PCH - cfecde4 drm/i915: Don't enable pipe/plane/VCO early (wait for DPMS on). - ea059a1 drm/i915: do not read uninitialized ->dev_private - a1f4b7f Revert "drm/i915: Use a dmi quirk to skip a broken SDVO TV output." - 14571b4 drm/i915: implement multifunction SDVO device support - 409608b drm/i915: remove unused intel_pipe_get_connector() - 1f254ec drm/i915: remove connector object in old output structure - 0c41ee2 drm/i915: convert TV driver to new encoder/connector structure - d2a82a6 drm/i915: convert SDVO driver to new encoder/connector structure - 599be16 drm/i915: convert DVO driver to new encoder/connector structure - 55f78c4 drm/i915: convert DP/eDP driver to new encoder/connector structure - 674e2d0 drm/i915: convert HDMI driver to new encoder/connector structure - bb8a356 drm/i915: convert LVDS driver to new encoder/connector structure - 454c1ca drm/i915: convert VGA driver to new encoder/connector structure - 9c9e792 drm/i915: Set sync polarity correctly on DisplayPort - ab00a9e drm/i915: Un-magic a DPCD register write - e3421a1 drm/i915: enable DP/eDP for Sandybridge/Cougarpoint - 0f22906 drm/i915: enable HDMI on Cougarpoint - b3b095b drm/i915: enable LVDS on Cougarpoint - a4a6b90 drm/i915: Fix CRT force detect on Cougarpoint - 8db9d77 drm/i915: Support for Cougarpoint PCH display pipeline - 3bad078 drm/i915: Probe for PCH chipset type - 7da9f6c drm/i915: Sandybridge has no integrated TV - edcb49c drm/i915: Fix legacy BLC event for pipe A - d275f66 drm/i915: Clear the LVDS pipe B select bit when moving the LVDS to pipe A. - 0f3ee80 drm/i915: Allow LVDS on pipe A on gen4+ - 6443170 drm/i915: Remove dead KMS encoder save/restore code. - 522032d drm/edid: When checking duplicate standard modes, walked the probed list - 335af9a drm/i915: change intel_ddc_get_modes() function parameters - c1c4397 drm/i915: passing drm connector param for load detection - f1c79df drm/i915: Add new helper to return current attached encoder for connector - 5daa55e drm/i915: Add new 'intel_connector' structure - c5e4df3 drm/i915: more conversion from connector_list walk to encoder_list - 5bf4c9c drm/i915: use encoder_list for hotplug callback - 903cf20 drm/i915: Convert some trace events to DEFINE_TRACE - fb8b5a3 drm/i915: Configure the TV sense state correctly on GM45 to make TV detection reliable - a743374 drm/radeon: fix cypress firmware typo. - 0ca2ab5 drm/radeon/kms/evergreen: add hpd support - 45f9a39 drm/radeon/kms/evergreen: implement irq support - fe251e2 drm/radeon/kms/evergreen: setup and enable the CP - 32fcdbf drm/radeon/kms/evergreen: implement gfx init - 747943e drm/radeon/kms/evergreen: add soft reset function - 0fcdb61 drm/radeon/kms/evergreen: add gart support - 49f6598 drm/radeon/kms: add support for evergreen power tables - 08c5c51 drm/radeon/kms: update atombios.h power tables for evergreen - c385e50c drm/edid: Fix sync polarity for secondary GTF curve - 2125b8a drm/ttm: using kmalloc/kfree requires including slab.h - 9d87fa2 drm/ttm: split no_wait argument in 2 GPU or reserve wait - b1f2019 drm/fb: remove drm_fb_helper_setcolreg - 4cdc840 drm/ttm: include linux/seq_file.h for seq_printf - 4abe352 drm/kms/fb: use slow work mechanism for normal hotplug also. - 5c4426a drm/kms/fb: add polling support for when nothing is connected. - 19b4b44 drm/kms/fb: provide a 1024x768 fbcon if no outputs found. - 0b4c0f3 drm/kms/fb: separate fbdev connector list from core drm connectors - 8be48d9 drm/kms/fb: move to using fb helper crtc grouping instead of core crtc list - 3865167 drm/fb: fix fbdev object model + cleanup properly. - c96af79 drm/ttm: Add sysfs interface to control pool allocator. - 975efdb drm/ttm: Use set_pages_array_wc instead of set_memory_wc. - 4f64625 arch/x86: Add array variants for setting memory to wc caching. - bf62acd drm/nouveau: Add ttm page pool debugfs file. - 8d7cddc drm/radeon/kms: Add ttm page pool debugfs file. - 0745866 drm/ttm: Add debugfs output entry to pool allocator. - 1403b1a drm/ttm: add pool wc/uc page allocator V3 - 90aca4d drm/radeon/kms: simplify & improve GPU reset V2 - a2d07b7 drm/radeon/kms: rename gpu_reset to asic_reset - 225758d drm/radeon/kms: fence cleanup + more reliable GPU lockup detection V4 - 171fdd8 drm/modes: Fix interlaced mode names - 7a37435 drm/edid: Add secondary GTF curve support - 7ca6adb drm/edid: Strengthen the algorithm for standard mode codes - a0910c8 drm/edid: Fix the HDTV hack. - b17e52e drm/edid: Extend range-based mode addition for EDID 1.4 - d1ff640 drm/edid: Add test for monitor reduced blanking support. - a327f6b drm/edid: Fix preferred mode parse for EDID 1.4 - 59d8aff drm/edid: Remove some silly comments - 7466f4c drm/edid: Remove arbitrary EDID extension limit - 2255be1 drm/edid: Add modes for Established Timings III section - c867df7 drm/edid: Reshuffle mode list construction to closer match the spec - 2b470ab drm/edid: Remove a redundant check - fbcc06b drm/edid: Remove some misleading comments - 61e57a8 drm/edid: Fix secondary block fetch. - - Documentation/DocBook/Makefile | 2 +- - Documentation/DocBook/drm.tmpl | 839 ++++++++++ - arch/x86/include/asm/cacheflush.h | 2 + - arch/x86/mm/pageattr.c | 53 +- - drivers/char/agp/agp.h | 80 - - drivers/char/agp/ali-agp.c | 1 + - drivers/char/agp/amd-k7-agp.c | 9 + - drivers/char/agp/amd64-agp.c | 56 +- - drivers/char/agp/ati-agp.c | 8 + - drivers/char/agp/efficeon-agp.c | 1 + - drivers/char/agp/intel-agp.c | 1883 ++--------------------- - drivers/char/agp/intel-agp.h | 239 +++ - drivers/char/agp/intel-gtt.c | 1548 +++++++++++++++++++ - drivers/char/agp/nvidia-agp.c | 1 + - drivers/char/agp/sis-agp.c | 9 +- - drivers/char/agp/uninorth-agp.c | 16 +- - drivers/char/agp/via-agp.c | 2 + - drivers/gpu/drm/Kconfig | 4 + - drivers/gpu/drm/drm_auth.c | 3 +- - drivers/gpu/drm/drm_crtc.c | 13 +- - drivers/gpu/drm/drm_crtc_helper.c | 506 ++----- - drivers/gpu/drm/drm_dma.c | 4 +- - drivers/gpu/drm/drm_edid.c | 807 +++++++--- - drivers/gpu/drm/drm_fb_helper.c | 910 ++++++++---- - drivers/gpu/drm/drm_fops.c | 3 +- - drivers/gpu/drm/drm_gem.c | 49 +- - drivers/gpu/drm/drm_modes.c | 105 +- - drivers/gpu/drm/drm_sysfs.c | 2 +- - drivers/gpu/drm/i915/Makefile | 3 + - drivers/gpu/drm/i915/dvo.h | 10 - - drivers/gpu/drm/i915/dvo_ch7017.c | 46 +- - drivers/gpu/drm/i915/dvo_ch7xxx.c | 44 +- - drivers/gpu/drm/i915/dvo_ivch.c | 21 - - drivers/gpu/drm/i915/dvo_sil164.c | 38 - - drivers/gpu/drm/i915/dvo_tfp410.c | 32 - - drivers/gpu/drm/i915/i915_debugfs.c | 110 +- - drivers/gpu/drm/i915/i915_dma.c | 745 ++++++++-- - drivers/gpu/drm/i915/i915_drv.c | 99 +- - drivers/gpu/drm/i915/i915_drv.h | 246 ++-- - drivers/gpu/drm/i915/i915_gem.c | 1045 ++++++-------- - drivers/gpu/drm/i915/i915_gem_debug.c | 2 +- - drivers/gpu/drm/i915/i915_gem_tiling.c | 5 + - drivers/gpu/drm/i915/i915_irq.c | 205 ++-- - drivers/gpu/drm/i915/i915_reg.h | 225 +++- - drivers/gpu/drm/i915/i915_suspend.c | 41 +- - drivers/gpu/drm/i915/i915_trace.h | 112 +- - drivers/gpu/drm/i915/intel_bios.c | 11 + - drivers/gpu/drm/i915/intel_crt.c | 116 +- - drivers/gpu/drm/i915/intel_display.c | 1350 ++++++++++++----- - drivers/gpu/drm/i915/intel_dp.c | 263 ++-- - drivers/gpu/drm/i915/intel_drv.h | 31 +- - drivers/gpu/drm/i915/intel_dvo.c | 103 +- - drivers/gpu/drm/i915/intel_fb.c | 223 ++-- - drivers/gpu/drm/i915/intel_hdmi.c | 76 +- - drivers/gpu/drm/i915/intel_lvds.c | 111 +- - drivers/gpu/drm/i915/intel_modes.c | 21 +- - drivers/gpu/drm/i915/intel_overlay.c | 60 +- - drivers/gpu/drm/i915/intel_ringbuffer.c | 849 ++++++++++ - drivers/gpu/drm/i915/intel_ringbuffer.h | 124 ++ - drivers/gpu/drm/i915/intel_sdvo.c | 1009 ++++++------- - drivers/gpu/drm/i915/intel_tv.c | 185 +-- - drivers/gpu/drm/nouveau/Makefile | 3 +- - drivers/gpu/drm/nouveau/nouveau_acpi.c | 71 +- - drivers/gpu/drm/nouveau/nouveau_bios.c | 594 +++++--- - drivers/gpu/drm/nouveau/nouveau_bios.h | 1 + - drivers/gpu/drm/nouveau/nouveau_bo.c | 116 +- - drivers/gpu/drm/nouveau/nouveau_connector.c | 49 +- - drivers/gpu/drm/nouveau/nouveau_crtc.h | 2 + - drivers/gpu/drm/nouveau/nouveau_debugfs.c | 3 + - drivers/gpu/drm/nouveau/nouveau_display.c | 42 +- - drivers/gpu/drm/nouveau/nouveau_drv.c | 48 +- - drivers/gpu/drm/nouveau/nouveau_drv.h | 15 + - drivers/gpu/drm/nouveau/nouveau_encoder.h | 2 + - drivers/gpu/drm/nouveau/nouveau_fb.h | 6 +- - drivers/gpu/drm/nouveau/nouveau_fbcon.c | 265 ++-- - drivers/gpu/drm/nouveau/nouveau_fbcon.h | 19 +- - drivers/gpu/drm/nouveau/nouveau_gem.c | 5 +- - drivers/gpu/drm/nouveau/nouveau_grctx.c | 6 +- - drivers/gpu/drm/nouveau/nouveau_i2c.c | 21 +- - drivers/gpu/drm/nouveau/nouveau_irq.c | 10 +- - drivers/gpu/drm/nouveau/nouveau_mem.c | 3 +- - drivers/gpu/drm/nouveau/nouveau_reg.h | 1 + - drivers/gpu/drm/nouveau/nouveau_state.c | 83 +- - drivers/gpu/drm/nouveau/nv04_cursor.c | 1 + - drivers/gpu/drm/nouveau/nv04_fbcon.c | 18 +- - drivers/gpu/drm/nouveau/nv04_graph.c | 566 +++++++- - drivers/gpu/drm/nouveau/nv40_graph.c | 8 +- - drivers/gpu/drm/nouveau/nv40_grctx.c | 5 - - drivers/gpu/drm/nouveau/nv50_calc.c | 87 ++ - drivers/gpu/drm/nouveau/nv50_crtc.c | 46 +- - drivers/gpu/drm/nouveau/nv50_cursor.c | 1 + - drivers/gpu/drm/nouveau/nv50_display.c | 36 + - drivers/gpu/drm/nouveau/nv50_fb.c | 10 +- - drivers/gpu/drm/nouveau/nv50_fbcon.c | 16 +- - drivers/gpu/drm/nouveau/nv50_gpio.c | 2 +- - drivers/gpu/drm/nouveau/nv50_sor.c | 18 +- - drivers/gpu/drm/radeon/Makefile | 7 +- - drivers/gpu/drm/radeon/atombios.h | 76 +- - drivers/gpu/drm/radeon/atombios_crtc.c | 23 +- - drivers/gpu/drm/radeon/atombios_dp.c | 2 +- - drivers/gpu/drm/radeon/evergreen.c | 1562 ++++++++++++++++++- - drivers/gpu/drm/radeon/evergreen_cs.c | 1356 ++++++++++++++++ - drivers/gpu/drm/radeon/evergreen_reg.h | 7 + - drivers/gpu/drm/radeon/evergreend.h | 1020 ++++++++++++ - drivers/gpu/drm/radeon/r100.c | 739 +++++++--- - drivers/gpu/drm/radeon/r100d.h | 164 ++ - drivers/gpu/drm/radeon/r300.c | 151 +- - drivers/gpu/drm/radeon/r300d.h | 47 +- - drivers/gpu/drm/radeon/r420.c | 46 +- - drivers/gpu/drm/radeon/r500_reg.h | 3 + - drivers/gpu/drm/radeon/r520.c | 7 +- - drivers/gpu/drm/radeon/r600.c | 693 ++++++++- - drivers/gpu/drm/radeon/r600_audio.c | 58 +- - drivers/gpu/drm/radeon/r600_blit_kms.c | 3 + - drivers/gpu/drm/radeon/r600_hdmi.c | 65 +- - drivers/gpu/drm/radeon/r600_reg.h | 57 +- - drivers/gpu/drm/radeon/radeon.h | 265 +++- - drivers/gpu/drm/radeon/radeon_agp.c | 5 + - drivers/gpu/drm/radeon/radeon_asic.c | 144 ++- - drivers/gpu/drm/radeon/radeon_asic.h | 45 +- - drivers/gpu/drm/radeon/radeon_atombios.c | 321 +++- - drivers/gpu/drm/radeon/radeon_bios.c | 3 +- - drivers/gpu/drm/radeon/radeon_combios.c | 71 +- - drivers/gpu/drm/radeon/radeon_connectors.c | 63 +- - drivers/gpu/drm/radeon/radeon_cs.c | 4 - - drivers/gpu/drm/radeon/radeon_device.c | 72 +- - drivers/gpu/drm/radeon/radeon_display.c | 135 +- - drivers/gpu/drm/radeon/radeon_drv.c | 12 +- - drivers/gpu/drm/radeon/radeon_encoders.c | 44 +- - drivers/gpu/drm/radeon/radeon_fb.c | 364 +++-- - drivers/gpu/drm/radeon/radeon_fence.c | 107 +- - drivers/gpu/drm/radeon/radeon_fixed.h | 67 - - drivers/gpu/drm/radeon/radeon_gart.c | 2 +- - drivers/gpu/drm/radeon/radeon_gem.c | 6 +- - drivers/gpu/drm/radeon/radeon_irq_kms.c | 5 +- - drivers/gpu/drm/radeon/radeon_kms.c | 25 + - drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 14 +- - drivers/gpu/drm/radeon/radeon_legacy_encoders.c | 26 +- - drivers/gpu/drm/radeon/radeon_mode.h | 49 +- - drivers/gpu/drm/radeon/radeon_object.c | 44 +- - drivers/gpu/drm/radeon/radeon_object.h | 2 +- - drivers/gpu/drm/radeon/radeon_pm.c | 834 +++++++---- - drivers/gpu/drm/radeon/radeon_reg.h | 4 +- - drivers/gpu/drm/radeon/radeon_ring.c | 66 +- - drivers/gpu/drm/radeon/radeon_state.c | 5 +- - drivers/gpu/drm/radeon/radeon_ttm.c | 122 +- - drivers/gpu/drm/radeon/reg_srcs/evergreen | 611 ++++++++ - drivers/gpu/drm/radeon/rs400.c | 9 +- - drivers/gpu/drm/radeon/rs600.c | 232 +++- - drivers/gpu/drm/radeon/rs600d.h | 80 + - drivers/gpu/drm/radeon/rs690.c | 289 ++-- - drivers/gpu/drm/radeon/rv515.c | 287 ++--- - drivers/gpu/drm/radeon/rv515d.h | 46 + - drivers/gpu/drm/radeon/rv770.c | 39 +- - drivers/gpu/drm/savage/savage_bci.c | 3 +- - drivers/gpu/drm/ttm/Makefile | 2 +- - drivers/gpu/drm/ttm/ttm_bo.c | 98 +- - drivers/gpu/drm/ttm/ttm_bo_util.c | 122 +- - drivers/gpu/drm/ttm/ttm_bo_vm.c | 41 +- - drivers/gpu/drm/ttm/ttm_memory.c | 7 +- - drivers/gpu/drm/ttm/ttm_page_alloc.c | 855 ++++++++++ - drivers/gpu/drm/ttm/ttm_tt.c | 44 +- - drivers/gpu/drm/vmwgfx/Makefile | 2 +- - drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 50 +- - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 24 +- - drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 49 +- - drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 14 +- - drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 101 +- - drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 173 +++ - drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 23 +- - drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 17 +- - drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 209 ++- - drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 4 +- - drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 189 ++- - drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | 4 +- - drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 8 +- - drivers/gpu/vga/Kconfig | 6 +- - drivers/gpu/vga/vgaarb.c | 61 +- - drivers/staging/Kconfig | 2 - - drivers/video/efifb.c | 11 +- - drivers/video/fbmem.c | 74 +- - drivers/video/fbsysfs.c | 1 + - drivers/video/offb.c | 28 +- - drivers/video/vesafb.c | 11 +- - drivers/video/vga16fb.c | 26 +- - include/drm/drmP.h | 3 + - include/drm/drm_crtc.h | 43 +- - include/drm/drm_crtc_helper.h | 12 +- - include/drm/drm_edid.h | 5 +- - include/drm/drm_fb_helper.h | 67 +- - include/drm/drm_fixed.h | 67 + - include/drm/i915_drm.h | 5 +- - include/drm/nouveau_drm.h | 1 + - include/drm/radeon_drm.h | 2 + - include/drm/ttm/ttm_bo_api.h | 46 +- - include/drm/ttm/ttm_bo_driver.h | 57 +- - include/drm/ttm/ttm_page_alloc.h | 74 + - include/drm/vmwgfx_drm.h | 26 + - include/linux/fb.h | 19 +- - include/linux/vgaarb.h | 21 + - 200 files changed, 21571 insertions(+), 8636 deletions(-) - -diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile -index 325cfd1..c7e5dc7 100644 ---- a/Documentation/DocBook/Makefile -+++ b/Documentation/DocBook/Makefile -@@ -14,7 +14,7 @@ DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \ - genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ - mac80211.xml debugobjects.xml sh.xml regulator.xml \ - alsa-driver-api.xml writing-an-alsa-driver.xml \ -- tracepoint.xml utrace.xml media.xml -+ tracepoint.xml utrace.xml media.xml drm.xml - - ### - # The build process is as follows (targets): -diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl -new file mode 100644 -index 0000000..910c923 ---- /dev/null -+++ b/Documentation/DocBook/drm.tmpl -@@ -0,0 +1,839 @@ -+ -+ -+ -+ -+ -+ Linux DRM Developer's Guide -+ -+ -+ 2008-2009 -+ -+ Intel Corporation (Jesse Barnes <jesse.barnes@intel.com>) -+ -+ -+ -+ -+ -+ The contents of this file may be used under the terms of the GNU -+ General Public License version 2 (the "GPL") as distributed in -+ the kernel source COPYING file. -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ Introduction -+ -+ The Linux DRM layer contains code intended to support the needs -+ of complex graphics devices, usually containing programmable -+ pipelines well suited to 3D graphics acceleration. Graphics -+ drivers in the kernel can make use of DRM functions to make -+ tasks like memory management, interrupt handling and DMA easier, -+ and provide a uniform interface to applications. -+ -+ -+ A note on versions: this guide covers features found in the DRM -+ tree, including the TTM memory manager, output configuration and -+ mode setting, and the new vblank internals, in addition to all -+ the regular features found in current kernels. -+ -+ -+ [Insert diagram of typical DRM stack here] -+ -+ -+ -+ -+ -+ -+ DRM Internals -+ -+ This chapter documents DRM internals relevant to driver authors -+ and developers working to add support for the latest features to -+ existing drivers. -+ -+ -+ First, we'll go over some typical driver initialization -+ requirements, like setting up command buffers, creating an -+ initial output configuration, and initializing core services. -+ Subsequent sections will cover core internals in more detail, -+ providing implementation notes and examples. -+ -+ -+ The DRM layer provides several services to graphics drivers, -+ many of them driven by the application interfaces it provides -+ through libdrm, the library that wraps most of the DRM ioctls. -+ These include vblank event handling, memory -+ management, output management, framebuffer management, command -+ submission & fencing, suspend/resume support, and DMA -+ services. -+ -+ -+ The core of every DRM driver is struct drm_device. Drivers -+ will typically statically initialize a drm_device structure, -+ then pass it to drm_init() at load time. -+ -+ -+ -+ -+ -+ Driver initialization -+ -+ Before calling the DRM initialization routines, the driver must -+ first create and fill out a struct drm_device structure. -+ -+ -+ static struct drm_driver driver = { -+ /* don't use mtrr's here, the Xserver or user space app should -+ * deal with them for intel hardware. -+ */ -+ .driver_features = -+ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | -+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET, -+ .load = i915_driver_load, -+ .unload = i915_driver_unload, -+ .firstopen = i915_driver_firstopen, -+ .lastclose = i915_driver_lastclose, -+ .preclose = i915_driver_preclose, -+ .save = i915_save, -+ .restore = i915_restore, -+ .device_is_agp = i915_driver_device_is_agp, -+ .get_vblank_counter = i915_get_vblank_counter, -+ .enable_vblank = i915_enable_vblank, -+ .disable_vblank = i915_disable_vblank, -+ .irq_preinstall = i915_driver_irq_preinstall, -+ .irq_postinstall = i915_driver_irq_postinstall, -+ .irq_uninstall = i915_driver_irq_uninstall, -+ .irq_handler = i915_driver_irq_handler, -+ .reclaim_buffers = drm_core_reclaim_buffers, -+ .get_map_ofs = drm_core_get_map_ofs, -+ .get_reg_ofs = drm_core_get_reg_ofs, -+ .fb_probe = intelfb_probe, -+ .fb_remove = intelfb_remove, -+ .fb_resize = intelfb_resize, -+ .master_create = i915_master_create, -+ .master_destroy = i915_master_destroy, -+#if defined(CONFIG_DEBUG_FS) -+ .debugfs_init = i915_debugfs_init, -+ .debugfs_cleanup = i915_debugfs_cleanup, -+#endif -+ .gem_init_object = i915_gem_init_object, -+ .gem_free_object = i915_gem_free_object, -+ .gem_vm_ops = &i915_gem_vm_ops, -+ .ioctls = i915_ioctls, -+ .fops = { -+ .owner = THIS_MODULE, -+ .open = drm_open, -+ .release = drm_release, -+ .ioctl = drm_ioctl, -+ .mmap = drm_mmap, -+ .poll = drm_poll, -+ .fasync = drm_fasync, -+#ifdef CONFIG_COMPAT -+ .compat_ioctl = i915_compat_ioctl, -+#endif -+ }, -+ .pci_driver = { -+ .name = DRIVER_NAME, -+ .id_table = pciidlist, -+ .probe = probe, -+ .remove = __devexit_p(drm_cleanup_pci), -+ }, -+ .name = DRIVER_NAME, -+ .desc = DRIVER_DESC, -+ .date = DRIVER_DATE, -+ .major = DRIVER_MAJOR, -+ .minor = DRIVER_MINOR, -+ .patchlevel = DRIVER_PATCHLEVEL, -+ }; -+ -+ -+ In the example above, taken from the i915 DRM driver, the driver -+ sets several flags indicating what core features it supports. -+ We'll go over the individual callbacks in later sections. Since -+ flags indicate which features your driver supports to the DRM -+ core, you need to set most of them prior to calling drm_init(). Some, -+ like DRIVER_MODESET can be set later based on user supplied parameters, -+ but that's the exception rather than the rule. -+ -+ -+ Driver flags -+ -+ DRIVER_USE_AGP -+ -+ Driver uses AGP interface -+ -+ -+ -+ DRIVER_REQUIRE_AGP -+ -+ Driver needs AGP interface to function. -+ -+ -+ -+ DRIVER_USE_MTRR -+ -+ -+ Driver uses MTRR interface for mapping memory. Deprecated. -+ -+ -+ -+ -+ DRIVER_PCI_DMA -+ -+ Driver is capable of PCI DMA. Deprecated. -+ -+ -+ -+ DRIVER_SG -+ -+ Driver can perform scatter/gather DMA. Deprecated. -+ -+ -+ -+ DRIVER_HAVE_DMA -+ Driver supports DMA. Deprecated. -+ -+ -+ DRIVER_HAVE_IRQDRIVER_IRQ_SHARED -+ -+ -+ DRIVER_HAVE_IRQ indicates whether the driver has a IRQ -+ handler, DRIVER_IRQ_SHARED indicates whether the device & -+ handler support shared IRQs (note that this is required of -+ PCI drivers). -+ -+ -+ -+ -+ DRIVER_DMA_QUEUE -+ -+ -+ If the driver queues DMA requests and completes them -+ asynchronously, this flag should be set. Deprecated. -+ -+ -+ -+ -+ DRIVER_FB_DMA -+ -+ -+ Driver supports DMA to/from the framebuffer. Deprecated. -+ -+ -+ -+ -+ DRIVER_MODESET -+ -+ -+ Driver supports mode setting interfaces. -+ -+ -+ -+ -+ -+ In this specific case, the driver requires AGP and supports -+ IRQs. DMA, as we'll see, is handled by device specific ioctls -+ in this case. It also supports the kernel mode setting APIs, though -+ unlike in the actual i915 driver source, this example unconditionally -+ exports KMS capability. -+ -+ -+ -+ -+ -+ -+ Driver load -+ -+ In the previous section, we saw what a typical drm_driver -+ structure might look like. One of the more important fields in -+ the structure is the hook for the load function. -+ -+ -+ static struct drm_driver driver = { -+ ... -+ .load = i915_driver_load, -+ ... -+ }; -+ -+ -+ The load function has many responsibilities: allocating a driver -+ private structure, specifying supported performance counters, -+ configuring the device (e.g. mapping registers & command -+ buffers), initializing the memory manager, and setting up the -+ initial output configuration. -+ -+ -+ Note that the tasks performed at driver load time must not -+ conflict with DRM client requirements. For instance, if user -+ level mode setting drivers are in use, it would be problematic -+ to perform output discovery & configuration at load time. -+ Likewise, if pre-memory management aware user level drivers are -+ in use, memory management and command buffer setup may need to -+ be omitted. These requirements are driver specific, and care -+ needs to be taken to keep both old and new applications and -+ libraries working. The i915 driver supports the "modeset" -+ module parameter to control whether advanced features are -+ enabled at load time or in legacy fashion. If compatibility is -+ a concern (e.g. with drivers converted over to the new interfaces -+ from the old ones), care must be taken to prevent incompatible -+ device initialization and control with the currently active -+ userspace drivers. -+ -+ -+ -+ Driver private & performance counters -+ -+ The driver private hangs off the main drm_device structure and -+ can be used for tracking various device specific bits of -+ information, like register offsets, command buffer status, -+ register state for suspend/resume, etc. At load time, a -+ driver can simply allocate one and set drm_device.dev_priv -+ appropriately; at unload the driver can free it and set -+ drm_device.dev_priv to NULL. -+ -+ -+ The DRM supports several counters which can be used for rough -+ performance characterization. Note that the DRM stat counter -+ system is not often used by applications, and supporting -+ additional counters is completely optional. -+ -+ -+ These interfaces are deprecated and should not be used. If performance -+ monitoring is desired, the developer should investigate and -+ potentially enhance the kernel perf and tracing infrastructure to export -+ GPU related performance information to performance monitoring -+ tools and applications. -+ -+ -+ -+ -+ Configuring the device -+ -+ Obviously, device configuration will be device specific. -+ However, there are several common operations: finding a -+ device's PCI resources, mapping them, and potentially setting -+ up an IRQ handler. -+ -+ -+ Finding & mapping resources is fairly straightforward. The -+ DRM wrapper functions, drm_get_resource_start() and -+ drm_get_resource_len() can be used to find BARs on the given -+ drm_device struct. Once those values have been retrieved, the -+ driver load function can call drm_addmap() to create a new -+ mapping for the BAR in question. Note you'll probably want a -+ drm_local_map_t in your driver private structure to track any -+ mappings you create. -+ -+ -+ -+ -+ if compatibility with other operating systems isn't a concern -+ (DRM drivers can run under various BSD variants and OpenSolaris), -+ native Linux calls can be used for the above, e.g. pci_resource_* -+ and iomap*/iounmap. See the Linux device driver book for more -+ info. -+ -+ -+ Once you have a register map, you can use the DRM_READn() and -+ DRM_WRITEn() macros to access the registers on your device, or -+ use driver specific versions to offset into your MMIO space -+ relative to a driver specific base pointer (see I915_READ for -+ example). -+ -+ -+ If your device supports interrupt generation, you may want to -+ setup an interrupt handler at driver load time as well. This -+ is done using the drm_irq_install() function. If your device -+ supports vertical blank interrupts, it should call -+ drm_vblank_init() to initialize the core vblank handling code before -+ enabling interrupts on your device. This ensures the vblank related -+ structures are allocated and allows the core to handle vblank events. -+ -+ -+ -+ Once your interrupt handler is registered (it'll use your -+ drm_driver.irq_handler as the actual interrupt handling -+ function), you can safely enable interrupts on your device, -+ assuming any other state your interrupt handler uses is also -+ initialized. -+ -+ -+ Another task that may be necessary during configuration is -+ mapping the video BIOS. On many devices, the VBIOS describes -+ device configuration, LCD panel timings (if any), and contains -+ flags indicating device state. Mapping the BIOS can be done -+ using the pci_map_rom() call, a convenience function that -+ takes care of mapping the actual ROM, whether it has been -+ shadowed into memory (typically at address 0xc0000) or exists -+ on the PCI device in the ROM BAR. Note that once you've -+ mapped the ROM and extracted any necessary information, be -+ sure to unmap it; on many devices the ROM address decoder is -+ shared with other BARs, so leaving it mapped can cause -+ undesired behavior like hangs or memory corruption. -+ -+ -+ -+ -+ -+ Memory manager initialization -+ -+ In order to allocate command buffers, cursor memory, scanout -+ buffers, etc., as well as support the latest features provided -+ by packages like Mesa and the X.Org X server, your driver -+ should support a memory manager. -+ -+ -+ If your driver supports memory management (it should!), you'll -+ need to set that up at load time as well. How you initialize -+ it depends on which memory manager you're using, TTM or GEM. -+ -+ -+ TTM initialization -+ -+ TTM (for Translation Table Manager) manages video memory and -+ aperture space for graphics devices. TTM supports both UMA devices -+ and devices with dedicated video RAM (VRAM), i.e. most discrete -+ graphics devices. If your device has dedicated RAM, supporting -+ TTM is desirable. TTM also integrates tightly with your -+ driver specific buffer execution function. See the radeon -+ driver for examples. -+ -+ -+ The core TTM structure is the ttm_bo_driver struct. It contains -+ several fields with function pointers for initializing the TTM, -+ allocating and freeing memory, waiting for command completion -+ and fence synchronization, and memory migration. See the -+ radeon_ttm.c file for an example of usage. -+ -+ -+ The ttm_global_reference structure is made up of several fields: -+ -+ -+ struct ttm_global_reference { -+ enum ttm_global_types global_type; -+ size_t size; -+ void *object; -+ int (*init) (struct ttm_global_reference *); -+ void (*release) (struct ttm_global_reference *); -+ }; -+ -+ -+ There should be one global reference structure for your memory -+ manager as a whole, and there will be others for each object -+ created by the memory manager at runtime. Your global TTM should -+ have a type of TTM_GLOBAL_TTM_MEM. The size field for the global -+ object should be sizeof(struct ttm_mem_global), and the init and -+ release hooks should point at your driver specific init and -+ release routines, which will probably eventually call -+ ttm_mem_global_init and ttm_mem_global_release respectively. -+ -+ -+ Once your global TTM accounting structure is set up and initialized -+ (done by calling ttm_global_item_ref on the global object you -+ just created), you'll need to create a buffer object TTM to -+ provide a pool for buffer object allocation by clients and the -+ kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO, -+ and its size should be sizeof(struct ttm_bo_global). Again, -+ driver specific init and release functions can be provided, -+ likely eventually calling ttm_bo_global_init and -+ ttm_bo_global_release, respectively. Also like the previous -+ object, ttm_global_item_ref is used to create an initial reference -+ count for the TTM, which will call your initialization function. -+ -+ -+ -+ GEM initialization -+ -+ GEM is an alternative to TTM, designed specifically for UMA -+ devices. It has simpler initialization and execution requirements -+ than TTM, but has no VRAM management capability. Core GEM -+ initialization is comprised of a basic drm_mm_init call to create -+ a GTT DRM MM object, which provides an address space pool for -+ object allocation. In a KMS configuration, the driver will -+ need to allocate and initialize a command ring buffer following -+ basic GEM initialization. Most UMA devices have a so-called -+ "stolen" memory region, which provides space for the initial -+ framebuffer and large, contiguous memory regions required by the -+ device. This space is not typically managed by GEM, and must -+ be initialized separately into its own DRM MM object. -+ -+ -+ Initialization will be driver specific, and will depend on -+ the architecture of the device. In the case of Intel -+ integrated graphics chips like 965GM, GEM initialization can -+ be done by calling the internal GEM init function, -+ i915_gem_do_init(). Since the 965GM is a UMA device -+ (i.e. it doesn't have dedicated VRAM), GEM will manage -+ making regular RAM available for GPU operations. Memory set -+ aside by the BIOS (called "stolen" memory by the i915 -+ driver) will be managed by the DRM memrange allocator; the -+ rest of the aperture will be managed by GEM. -+ -+ /* Basic memrange allocator for stolen space (aka vram) */ -+ drm_memrange_init(&dev_priv->vram, 0, prealloc_size); -+ /* Let GEM Manage from end of prealloc space to end of aperture */ -+ i915_gem_do_init(dev, prealloc_size, agp_size); -+ -+ -+ -+ -+ Once the memory manager has been set up, we can allocate the -+ command buffer. In the i915 case, this is also done with a -+ GEM function, i915_gem_init_ringbuffer(). -+ -+ -+ -+ -+ -+ Output configuration -+ -+ The final initialization task is output configuration. This involves -+ finding and initializing the CRTCs, encoders and connectors -+ for your device, creating an initial configuration and -+ registering a framebuffer console driver. -+ -+ -+ Output discovery and initialization -+ -+ Several core functions exist to create CRTCs, encoders and -+ connectors, namely drm_crtc_init(), drm_connector_init() and -+ drm_encoder_init(), along with several "helper" functions to -+ perform common tasks. -+ -+ -+ Connectors should be registered with sysfs once they've been -+ detected and initialized, using the -+ drm_sysfs_connector_add() function. Likewise, when they're -+ removed from the system, they should be destroyed with -+ drm_sysfs_connector_remove(). -+ -+ -+base; -+ drm_connector_init(dev, &intel_output->base, -+ &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); -+ -+ drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, -+ DRM_MODE_ENCODER_DAC); -+ -+ drm_mode_connector_attach_encoder(&intel_output->base, -+ &intel_output->enc); -+ -+ /* Set up the DDC bus. */ -+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A"); -+ if (!intel_output->ddc_bus) { -+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " -+ "failed.\n"); -+ return; -+ } -+ -+ intel_output->type = INTEL_OUTPUT_ANALOG; -+ connector->interlace_allowed = 0; -+ connector->doublescan_allowed = 0; -+ -+ drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); -+ drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); -+ -+ drm_sysfs_connector_add(connector); -+} -+]]> -+ -+ -+ In the example above (again, taken from the i915 driver), a -+ CRT connector and encoder combination is created. A device -+ specific i2c bus is also created, for fetching EDID data and -+ performing monitor detection. Once the process is complete, -+ the new connector is registered with sysfs, to make its -+ properties available to applications. -+ -+ -+ Helper functions and core functions -+ -+ Since many PC-class graphics devices have similar display output -+ designs, the DRM provides a set of helper functions to make -+ output management easier. The core helper routines handle -+ encoder re-routing and disabling of unused functions following -+ mode set. Using the helpers is optional, but recommended for -+ devices with PC-style architectures (i.e. a set of display planes -+ for feeding pixels to encoders which are in turn routed to -+ connectors). Devices with more complex requirements needing -+ finer grained management can opt to use the core callbacks -+ directly. -+ -+ -+ [Insert typical diagram here.] [Insert OMAP style config here.] -+ -+ -+ -+ For each encoder, CRTC and connector, several functions must -+ be provided, depending on the object type. Encoder objects -+ need to provide a DPMS (basically on/off) function, mode fixup -+ (for converting requested modes into native hardware timings), -+ and prepare, set and commit functions for use by the core DRM -+ helper functions. Connector helpers need to provide mode fetch and -+ validity functions as well as an encoder matching function for -+ returning an ideal encoder for a given connector. The core -+ connector functions include a DPMS callback, (deprecated) -+ save/restore routines, detection, mode probing, property handling, -+ and cleanup functions. -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ VBlank event handling -+ -+ The DRM core exposes two vertical blank related ioctls: -+ DRM_IOCTL_WAIT_VBLANK and DRM_IOCTL_MODESET_CTL. -+ -+ -+ -+ DRM_IOCTL_WAIT_VBLANK takes a struct drm_wait_vblank structure -+ as its argument, and is used to block or request a signal when a -+ specified vblank event occurs. -+ -+ -+ DRM_IOCTL_MODESET_CTL should be called by application level -+ drivers before and after mode setting, since on many devices the -+ vertical blank counter will be reset at that time. Internally, -+ the DRM snapshots the last vblank count when the ioctl is called -+ with the _DRM_PRE_MODESET command so that the counter won't go -+ backwards (which is dealt with when _DRM_POST_MODESET is used). -+ -+ -+ To support the functions above, the DRM core provides several -+ helper functions for tracking vertical blank counters, and -+ requires drivers to provide several callbacks: -+ get_vblank_counter(), enable_vblank() and disable_vblank(). The -+ core uses get_vblank_counter() to keep the counter accurate -+ across interrupt disable periods. It should return the current -+ vertical blank event count, which is often tracked in a device -+ register. The enable and disable vblank callbacks should enable -+ and disable vertical blank interrupts, respectively. In the -+ absence of DRM clients waiting on vblank events, the core DRM -+ code will use the disable_vblank() function to disable -+ interrupts, which saves power. They'll be re-enabled again when -+ a client calls the vblank wait ioctl above. -+ -+ -+ Devices that don't provide a count register can simply use an -+ internal atomic counter incremented on every vertical blank -+ interrupt, and can make their enable and disable vblank -+ functions into no-ops. -+ -+ -+ -+ -+ Memory management -+ -+ The memory manager lies at the heart of many DRM operations, and -+ is also required to support advanced client features like OpenGL -+ pbuffers. The DRM currently contains two memory managers, TTM -+ and GEM. -+ -+ -+ -+ The Translation Table Manager (TTM) -+ -+ TTM was developed by Tungsten Graphics, primarily by Thomas -+ Hellström, and is intended to be a flexible, high performance -+ graphics memory manager. -+ -+ -+ Drivers wishing to support TTM must fill out a drm_bo_driver -+ structure. -+ -+ -+ TTM design background and information belongs here. -+ -+ -+ -+ -+ The Graphics Execution Manager (GEM) -+ -+ GEM is an Intel project, authored by Eric Anholt and Keith -+ Packard. It provides simpler interfaces than TTM, and is well -+ suited for UMA devices. -+ -+ -+ GEM-enabled drivers must provide gem_init_object() and -+ gem_free_object() callbacks to support the core memory -+ allocation routines. They should also provide several driver -+ specific ioctls to support command execution, pinning, buffer -+ read & write, mapping, and domain ownership transfers. -+ -+ -+ On a fundamental level, GEM involves several operations: memory -+ allocation and freeing, command execution, and aperture management -+ at command execution time. Buffer object allocation is relatively -+ straightforward and largely provided by Linux's shmem layer, which -+ provides memory to back each object. When mapped into the GTT -+ or used in a command buffer, the backing pages for an object are -+ flushed to memory and marked write combined so as to be coherent -+ with the GPU. Likewise, when the GPU finishes rendering to an object, -+ if the CPU accesses it, it must be made coherent with the CPU's view -+ of memory, usually involving GPU cache flushing of various kinds. -+ This core CPU<->GPU coherency management is provided by the GEM -+ set domain function, which evaluates an object's current domain and -+ performs any necessary flushing or synchronization to put the object -+ into the desired coherency domain (note that the object may be busy, -+ i.e. an active render target; in that case the set domain function -+ will block the client and wait for rendering to complete before -+ performing any necessary flushing operations). -+ -+ -+ Perhaps the most important GEM function is providing a command -+ execution interface to clients. Client programs construct command -+ buffers containing references to previously allocated memory objects -+ and submit them to GEM. At that point, GEM will take care to bind -+ all the objects into the GTT, execute the buffer, and provide -+ necessary synchronization between clients accessing the same buffers. -+ This often involves evicting some objects from the GTT and re-binding -+ others (a fairly expensive operation), and providing relocation -+ support which hides fixed GTT offsets from clients. Clients must -+ take care not to submit command buffers that reference more objects -+ than can fit in the GTT or GEM will reject them and no rendering -+ will occur. Similarly, if several objects in the buffer require -+ fence registers to be allocated for correct rendering (e.g. 2D blits -+ on pre-965 chips), care must be taken not to require more fence -+ registers than are available to the client. Such resource management -+ should be abstracted from the client in libdrm. -+ -+ -+ -+ -+ -+ -+ -+ Output management -+ -+ At the core of the DRM output management code is a set of -+ structures representing CRTCs, encoders and connectors. -+ -+ -+ A CRTC is an abstraction representing a part of the chip that -+ contains a pointer to a scanout buffer. Therefore, the number -+ of CRTCs available determines how many independent scanout -+ buffers can be active at any given time. The CRTC structure -+ contains several fields to support this: a pointer to some video -+ memory, a display mode, and an (x, y) offset into the video -+ memory to support panning or configurations where one piece of -+ video memory spans multiple CRTCs. -+ -+ -+ An encoder takes pixel data from a CRTC and converts it to a -+ format suitable for any attached connectors. On some devices, -+ it may be possible to have a CRTC send data to more than one -+ encoder. In that case, both encoders would receive data from -+ the same scanout buffer, resulting in a "cloned" display -+ configuration across the connectors attached to each encoder. -+ -+ -+ A connector is the final destination for pixel data on a device, -+ and usually connects directly to an external display device like -+ a monitor or laptop panel. A connector can only be attached to -+ one encoder at a time. The connector is also the structure -+ where information about the attached display is kept, so it -+ contains fields for display data, EDID data, DPMS & -+ connection status, and information about modes supported on the -+ attached displays. -+ -+ -+ -+ -+ -+ Framebuffer management -+ -+ In order to set a mode on a given CRTC, encoder and connector -+ configuration, clients need to provide a framebuffer object which -+ will provide a source of pixels for the CRTC to deliver to the encoder(s) -+ and ultimately the connector(s) in the configuration. A framebuffer -+ is fundamentally a driver specific memory object, made into an opaque -+ handle by the DRM addfb function. Once an fb has been created this -+ way it can be passed to the KMS mode setting routines for use in -+ a configuration. -+ -+ -+ -+ -+ Command submission & fencing -+ -+ This should cover a few device specific command submission -+ implementations. -+ -+ -+ -+ -+ Suspend/resume -+ -+ The DRM core provides some suspend/resume code, but drivers -+ wanting full suspend/resume support should provide save() and -+ restore() functions. These will be called at suspend, -+ hibernate, or resume time, and should perform any state save or -+ restore required by your device across suspend or hibernate -+ states. -+ -+ -+ -+ -+ DMA services -+ -+ This should cover how DMA mapping etc. is supported by the core. -+ These functions are deprecated and should not be used. -+ -+ -+ -+ -+ -+ -+ -+ Userland interfaces -+ -+ The DRM core exports several interfaces to applications, -+ generally intended to be used through corresponding libdrm -+ wrapper functions. In addition, drivers export device specific -+ interfaces for use by userspace drivers & device aware -+ applications through ioctls and sysfs files. -+ -+ -+ External interfaces include: memory mapping, context management, -+ DMA operations, AGP management, vblank control, fence -+ management, memory management, and output management. -+ -+ -+ Cover generic ioctls and sysfs layout here. Only need high -+ level info, since man pages will cover the rest. -+ -+ -+ -+ -+ -+ -+ DRM Driver API -+ -+ Include auto-generated API reference here (need to reference it -+ from paragraphs above too). -+ -+ -+ -+ -diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h -index 634c40a..d92d63a 100644 ---- a/arch/x86/include/asm/cacheflush.h -+++ b/arch/x86/include/asm/cacheflush.h -@@ -139,9 +139,11 @@ int set_memory_np(unsigned long addr, int numpages); - int set_memory_4k(unsigned long addr, int numpages); - - int set_memory_array_uc(unsigned long *addr, int addrinarray); -+int set_memory_array_wc(unsigned long *addr, int addrinarray); - int set_memory_array_wb(unsigned long *addr, int addrinarray); - - int set_pages_array_uc(struct page **pages, int addrinarray); -+int set_pages_array_wc(struct page **pages, int addrinarray); - int set_pages_array_wb(struct page **pages, int addrinarray); - - /* -diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c -index 28195c3..532e793 100644 ---- a/arch/x86/mm/pageattr.c -+++ b/arch/x86/mm/pageattr.c -@@ -997,7 +997,8 @@ out_err: - } - EXPORT_SYMBOL(set_memory_uc); - --int set_memory_array_uc(unsigned long *addr, int addrinarray) -+int _set_memory_array(unsigned long *addr, int addrinarray, -+ unsigned long new_type) - { - int i, j; - int ret; -@@ -1007,13 +1008,19 @@ int set_memory_array_uc(unsigned long *addr, int addrinarray) - */ - for (i = 0; i < addrinarray; i++) { - ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, -- _PAGE_CACHE_UC_MINUS, NULL); -+ new_type, NULL); - if (ret) - goto out_free; - } - - ret = change_page_attr_set(addr, addrinarray, - __pgprot(_PAGE_CACHE_UC_MINUS), 1); -+ -+ if (!ret && new_type == _PAGE_CACHE_WC) -+ ret = change_page_attr_set_clr(addr, addrinarray, -+ __pgprot(_PAGE_CACHE_WC), -+ __pgprot(_PAGE_CACHE_MASK), -+ 0, CPA_ARRAY, NULL); - if (ret) - goto out_free; - -@@ -1025,8 +1032,19 @@ out_free: - - return ret; - } -+ -+int set_memory_array_uc(unsigned long *addr, int addrinarray) -+{ -+ return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS); -+} - EXPORT_SYMBOL(set_memory_array_uc); - -+int set_memory_array_wc(unsigned long *addr, int addrinarray) -+{ -+ return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC); -+} -+EXPORT_SYMBOL(set_memory_array_wc); -+ - int _set_memory_wc(unsigned long addr, int numpages) - { - int ret; -@@ -1153,26 +1171,34 @@ int set_pages_uc(struct page *page, int numpages) - } - EXPORT_SYMBOL(set_pages_uc); - --int set_pages_array_uc(struct page **pages, int addrinarray) -+static int _set_pages_array(struct page **pages, int addrinarray, -+ unsigned long new_type) - { - unsigned long start; - unsigned long end; - int i; - int free_idx; -+ int ret; - - for (i = 0; i < addrinarray; i++) { - if (PageHighMem(pages[i])) - continue; - start = page_to_pfn(pages[i]) << PAGE_SHIFT; - end = start + PAGE_SIZE; -- if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL)) -+ if (reserve_memtype(start, end, new_type, NULL)) - goto err_out; - } - -- if (cpa_set_pages_array(pages, addrinarray, -- __pgprot(_PAGE_CACHE_UC_MINUS)) == 0) { -- return 0; /* Success */ -- } -+ ret = cpa_set_pages_array(pages, addrinarray, -+ __pgprot(_PAGE_CACHE_UC_MINUS)); -+ if (!ret && new_type == _PAGE_CACHE_WC) -+ ret = change_page_attr_set_clr(NULL, addrinarray, -+ __pgprot(_PAGE_CACHE_WC), -+ __pgprot(_PAGE_CACHE_MASK), -+ 0, CPA_PAGES_ARRAY, pages); -+ if (ret) -+ goto err_out; -+ return 0; /* Success */ - err_out: - free_idx = i; - for (i = 0; i < free_idx; i++) { -@@ -1184,8 +1210,19 @@ err_out: - } - return -EINVAL; - } -+ -+int set_pages_array_uc(struct page **pages, int addrinarray) -+{ -+ return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS); -+} - EXPORT_SYMBOL(set_pages_array_uc); - -+int set_pages_array_wc(struct page **pages, int addrinarray) -+{ -+ return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC); -+} -+EXPORT_SYMBOL(set_pages_array_wc); -+ - int set_pages_wb(struct page *page, int numpages) - { - unsigned long addr = (unsigned long)page_address(page); -diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h -index 870f12c..1204909 100644 ---- a/drivers/char/agp/agp.h -+++ b/drivers/char/agp/agp.h -@@ -178,86 +178,6 @@ struct agp_bridge_data { - #define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page) - - --/* Intel registers */ --#define INTEL_APSIZE 0xb4 --#define INTEL_ATTBASE 0xb8 --#define INTEL_AGPCTRL 0xb0 --#define INTEL_NBXCFG 0x50 --#define INTEL_ERRSTS 0x91 -- --/* Intel i830 registers */ --#define I830_GMCH_CTRL 0x52 --#define I830_GMCH_ENABLED 0x4 --#define I830_GMCH_MEM_MASK 0x1 --#define I830_GMCH_MEM_64M 0x1 --#define I830_GMCH_MEM_128M 0 --#define I830_GMCH_GMS_MASK 0x70 --#define I830_GMCH_GMS_DISABLED 0x00 --#define I830_GMCH_GMS_LOCAL 0x10 --#define I830_GMCH_GMS_STOLEN_512 0x20 --#define I830_GMCH_GMS_STOLEN_1024 0x30 --#define I830_GMCH_GMS_STOLEN_8192 0x40 --#define I830_RDRAM_CHANNEL_TYPE 0x03010 --#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5) --#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3) -- --/* This one is for I830MP w. an external graphic card */ --#define INTEL_I830_ERRSTS 0x92 -- --/* Intel 855GM/852GM registers */ --#define I855_GMCH_GMS_MASK 0xF0 --#define I855_GMCH_GMS_STOLEN_0M 0x0 --#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) --#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) --#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) --#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) --#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) --#define I85X_CAPID 0x44 --#define I85X_VARIANT_MASK 0x7 --#define I85X_VARIANT_SHIFT 5 --#define I855_GME 0x0 --#define I855_GM 0x4 --#define I852_GME 0x2 --#define I852_GM 0x5 -- --/* Intel i845 registers */ --#define INTEL_I845_AGPM 0x51 --#define INTEL_I845_ERRSTS 0xc8 -- --/* Intel i860 registers */ --#define INTEL_I860_MCHCFG 0x50 --#define INTEL_I860_ERRSTS 0xc8 -- --/* Intel i810 registers */ --#define I810_GMADDR 0x10 --#define I810_MMADDR 0x14 --#define I810_PTE_BASE 0x10000 --#define I810_PTE_MAIN_UNCACHED 0x00000000 --#define I810_PTE_LOCAL 0x00000002 --#define I810_PTE_VALID 0x00000001 --#define I830_PTE_SYSTEM_CACHED 0x00000006 --#define I810_SMRAM_MISCC 0x70 --#define I810_GFX_MEM_WIN_SIZE 0x00010000 --#define I810_GFX_MEM_WIN_32M 0x00010000 --#define I810_GMS 0x000000c0 --#define I810_GMS_DISABLE 0x00000000 --#define I810_PGETBL_CTL 0x2020 --#define I810_PGETBL_ENABLED 0x00000001 --#define I965_PGETBL_SIZE_MASK 0x0000000e --#define I965_PGETBL_SIZE_512KB (0 << 1) --#define I965_PGETBL_SIZE_256KB (1 << 1) --#define I965_PGETBL_SIZE_128KB (2 << 1) --#define I965_PGETBL_SIZE_1MB (3 << 1) --#define I965_PGETBL_SIZE_2MB (4 << 1) --#define I965_PGETBL_SIZE_1_5MB (5 << 1) --#define G33_PGETBL_SIZE_MASK (3 << 8) --#define G33_PGETBL_SIZE_1M (1 << 8) --#define G33_PGETBL_SIZE_2M (2 << 8) -- --#define I810_DRAM_CTL 0x3000 --#define I810_DRAM_ROW_0 0x00000001 --#define I810_DRAM_ROW_0_SDRAM 0x00000001 -- - struct agp_device_ids { - unsigned short device_id; /* first, to make table easier to read */ - enum chipset_type chipset; -diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c -index d2ce68f..fd79351 100644 ---- a/drivers/char/agp/ali-agp.c -+++ b/drivers/char/agp/ali-agp.c -@@ -204,6 +204,7 @@ static const struct agp_bridge_driver ali_generic_bridge = { - .aperture_sizes = ali_generic_sizes, - .size_type = U32_APER_SIZE, - .num_aperture_sizes = 7, -+ .needs_scratch_page = true, - .configure = ali_configure, - .fetch_size = ali_fetch_size, - .cleanup = ali_cleanup, -diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c -index a7637d7..b6b1568 100644 ---- a/drivers/char/agp/amd-k7-agp.c -+++ b/drivers/char/agp/amd-k7-agp.c -@@ -142,6 +142,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge) - { - struct aper_size_info_lvl2 *value; - struct amd_page_map page_dir; -+ unsigned long __iomem *cur_gatt; - unsigned long addr; - int retval; - u32 temp; -@@ -178,6 +179,13 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge) - readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ - } - -+ for (i = 0; i < value->num_entries; i++) { -+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; -+ cur_gatt = GET_GATT(addr); -+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); -+ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ -+ } -+ - return 0; - } - -@@ -375,6 +383,7 @@ static const struct agp_bridge_driver amd_irongate_driver = { - .aperture_sizes = amd_irongate_sizes, - .size_type = LVL2_APER_SIZE, - .num_aperture_sizes = 7, -+ .needs_scratch_page = true, - .configure = amd_irongate_configure, - .fetch_size = amd_irongate_fetch_size, - .cleanup = amd_irongate_cleanup, -diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c -index fd50ead..70312da 100644 ---- a/drivers/char/agp/amd64-agp.c -+++ b/drivers/char/agp/amd64-agp.c -@@ -210,6 +210,7 @@ static const struct agp_bridge_driver amd_8151_driver = { - .aperture_sizes = amd_8151_sizes, - .size_type = U32_APER_SIZE, - .num_aperture_sizes = 7, -+ .needs_scratch_page = true, - .configure = amd_8151_configure, - .fetch_size = amd64_fetch_size, - .cleanup = amd64_cleanup, -@@ -383,7 +384,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) - { - u32 httfea,baseaddr,enuscr; - struct pci_dev *dev1; -- int i; -+ int i, ret; - unsigned size = amd64_fetch_size(); - - dev_info(&pdev->dev, "setting up ULi AGP\n"); -@@ -399,15 +400,18 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) - - if (i == ARRAY_SIZE(uli_sizes)) { - dev_info(&pdev->dev, "no ULi size found for %d\n", size); -- return -ENODEV; -+ ret = -ENODEV; -+ goto put; - } - - /* shadow x86-64 registers into ULi registers */ - pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); - - /* if x86-64 aperture base is beyond 4G, exit here */ -- if ((httfea & 0x7fff) >> (32 - 25)) -- return -ENODEV; -+ if ((httfea & 0x7fff) >> (32 - 25)) { -+ ret = -ENODEV; -+ goto put; -+ } - - httfea = (httfea& 0x7fff) << 25; - -@@ -419,9 +423,10 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) - enuscr= httfea+ (size * 1024 * 1024) - 1; - pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea); - pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr); -- -+ ret = 0; -+put: - pci_dev_put(dev1); -- return 0; -+ return ret; - } - - -@@ -440,7 +445,7 @@ static int nforce3_agp_init(struct pci_dev *pdev) - { - u32 tmp, apbase, apbar, aplimit; - struct pci_dev *dev1; -- int i; -+ int i, ret; - unsigned size = amd64_fetch_size(); - - dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); -@@ -457,7 +462,8 @@ static int nforce3_agp_init(struct pci_dev *pdev) - - if (i == ARRAY_SIZE(nforce3_sizes)) { - dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); -- return -ENODEV; -+ ret = -ENODEV; -+ goto put; - } - - pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp); -@@ -471,7 +477,8 @@ static int nforce3_agp_init(struct pci_dev *pdev) - /* if x86-64 aperture base is beyond 4G, exit here */ - if ( (apbase & 0x7fff) >> (32 - 25) ) { - dev_info(&pdev->dev, "aperture base > 4G\n"); -- return -ENODEV; -+ ret = -ENODEV; -+ goto put; - } - - apbase = (apbase & 0x7fff) << 25; -@@ -487,9 +494,11 @@ static int nforce3_agp_init(struct pci_dev *pdev) - pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase); - pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit); - -+ ret = 0; -+put: - pci_dev_put(dev1); - -- return 0; -+ return ret; - } - - static int __devinit agp_amd64_probe(struct pci_dev *pdev, -@@ -499,6 +508,10 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev, - u8 cap_ptr; - int err; - -+ /* The Highlander principle */ -+ if (agp_bridges_found) -+ return -ENODEV; -+ - cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); - if (!cap_ptr) - return -ENODEV; -@@ -562,6 +575,8 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev) - amd64_aperture_sizes[bridge->aperture_size_idx].size); - agp_remove_bridge(bridge); - agp_put_bridge(bridge); -+ -+ agp_bridges_found--; - } - - #ifdef CONFIG_PM -@@ -709,6 +724,11 @@ static struct pci_device_id agp_amd64_pci_table[] = { - - MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); - -+static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = { -+ { PCI_DEVICE_CLASS(0, 0) }, -+ { } -+}; -+ - static struct pci_driver agp_amd64_pci_driver = { - .name = "agpgart-amd64", - .id_table = agp_amd64_pci_table, -@@ -734,7 +754,6 @@ int __init agp_amd64_init(void) - return err; - - if (agp_bridges_found == 0) { -- struct pci_dev *dev; - if (!agp_try_unsupported && !agp_try_unsupported_boot) { - printk(KERN_INFO PFX "No supported AGP bridge found.\n"); - #ifdef MODULE -@@ -750,17 +769,10 @@ int __init agp_amd64_init(void) - return -ENODEV; - - /* Look for any AGP bridge */ -- dev = NULL; -- err = -ENODEV; -- for_each_pci_dev(dev) { -- if (!pci_find_capability(dev, PCI_CAP_ID_AGP)) -- continue; -- /* Only one bridge supported right now */ -- if (agp_amd64_probe(dev, NULL) == 0) { -- err = 0; -- break; -- } -- } -+ agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; -+ err = driver_attach(&agp_amd64_pci_driver.driver); -+ if (err == 0 && agp_bridges_found == 0) -+ err = -ENODEV; - } - return err; - } -diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c -index 3b2ecbe..dc30e22 100644 ---- a/drivers/char/agp/ati-agp.c -+++ b/drivers/char/agp/ati-agp.c -@@ -341,6 +341,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge) - { - struct aper_size_info_lvl2 *value; - struct ati_page_map page_dir; -+ unsigned long __iomem *cur_gatt; - unsigned long addr; - int retval; - u32 temp; -@@ -395,6 +396,12 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge) - readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ - } - -+ for (i = 0; i < value->num_entries; i++) { -+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr; -+ cur_gatt = GET_GATT(addr); -+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr)); -+ } -+ - return 0; - } - -@@ -415,6 +422,7 @@ static const struct agp_bridge_driver ati_generic_bridge = { - .aperture_sizes = ati_generic_sizes, - .size_type = LVL2_APER_SIZE, - .num_aperture_sizes = 7, -+ .needs_scratch_page = true, - .configure = ati_configure, - .fetch_size = ati_fetch_size, - .cleanup = ati_cleanup, -diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c -index 793f39e..aa109cb 100644 ---- a/drivers/char/agp/efficeon-agp.c -+++ b/drivers/char/agp/efficeon-agp.c -@@ -28,6 +28,7 @@ - #include - #include - #include "agp.h" -+#include "intel-agp.h" - - /* - * The real differences to the generic AGP code is -diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c -index aa4248e..d836a71 100644 ---- a/drivers/char/agp/intel-agp.c -+++ b/drivers/char/agp/intel-agp.c -@@ -11,1531 +11,13 @@ - #include - #include - #include "agp.h" -+#include "intel-agp.h" -+ -+#include "intel-gtt.c" - - int intel_agp_enabled; - EXPORT_SYMBOL(intel_agp_enabled); - --/* -- * If we have Intel graphics, we're not going to have anything other than -- * an Intel IOMMU. So make the correct use of the PCI DMA API contingent -- * on the Intel IOMMU support (CONFIG_DMAR). -- * Only newer chipsets need to bother with this, of course. -- */ --#ifdef CONFIG_DMAR --#define USE_PCI_DMA_API 1 --#endif -- --#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 --#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a --#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 --#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 --#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980 --#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982 --#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 --#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992 --#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 --#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2 --#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 --#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02 --#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 --#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 --#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC --#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE --#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010 --#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011 --#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000 --#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001 --#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 --#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 --#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 --#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 --#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 --#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 --#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 --#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 --#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 --#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 --#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 --#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02 --#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 --#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 --#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 --#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 --#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 --#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 --#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 --#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 --#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 --#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 --#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a --#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 --#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 --#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 --#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 --#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 -- --/* cover 915 and 945 variants */ --#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB) -- --#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB) -- --#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) -- --#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) -- --#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) -- --#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ -- IS_SNB) -- --extern int agp_memory_reserved; -- -- --/* Intel 815 register */ --#define INTEL_815_APCONT 0x51 --#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF -- --/* Intel i820 registers */ --#define INTEL_I820_RDCR 0x51 --#define INTEL_I820_ERRSTS 0xc8 -- --/* Intel i840 registers */ --#define INTEL_I840_MCHCFG 0x50 --#define INTEL_I840_ERRSTS 0xc8 -- --/* Intel i850 registers */ --#define INTEL_I850_MCHCFG 0x50 --#define INTEL_I850_ERRSTS 0xc8 -- --/* intel 915G registers */ --#define I915_GMADDR 0x18 --#define I915_MMADDR 0x10 --#define I915_PTEADDR 0x1C --#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) --#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) --#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) --#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) --#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) --#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) --#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) --#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) -- --#define I915_IFPADDR 0x60 -- --/* Intel 965G registers */ --#define I965_MSAC 0x62 --#define I965_IFPADDR 0x70 -- --/* Intel 7505 registers */ --#define INTEL_I7505_APSIZE 0x74 --#define INTEL_I7505_NCAPID 0x60 --#define INTEL_I7505_NISTAT 0x6c --#define INTEL_I7505_ATTBASE 0x78 --#define INTEL_I7505_ERRSTS 0x42 --#define INTEL_I7505_AGPCTRL 0x70 --#define INTEL_I7505_MCHCFG 0x50 -- --#define SNB_GMCH_CTRL 0x50 --#define SNB_GMCH_GMS_STOLEN_MASK 0xF8 --#define SNB_GMCH_GMS_STOLEN_32M (1 << 3) --#define SNB_GMCH_GMS_STOLEN_64M (2 << 3) --#define SNB_GMCH_GMS_STOLEN_96M (3 << 3) --#define SNB_GMCH_GMS_STOLEN_128M (4 << 3) --#define SNB_GMCH_GMS_STOLEN_160M (5 << 3) --#define SNB_GMCH_GMS_STOLEN_192M (6 << 3) --#define SNB_GMCH_GMS_STOLEN_224M (7 << 3) --#define SNB_GMCH_GMS_STOLEN_256M (8 << 3) --#define SNB_GMCH_GMS_STOLEN_288M (9 << 3) --#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) --#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) --#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) --#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) --#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) --#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) --#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) --#define SNB_GTT_SIZE_0M (0 << 8) --#define SNB_GTT_SIZE_1M (1 << 8) --#define SNB_GTT_SIZE_2M (2 << 8) --#define SNB_GTT_SIZE_MASK (3 << 8) -- --static const struct aper_size_info_fixed intel_i810_sizes[] = --{ -- {64, 16384, 4}, -- /* The 32M mode still requires a 64k gatt */ -- {32, 8192, 4} --}; -- --#define AGP_DCACHE_MEMORY 1 --#define AGP_PHYS_MEMORY 2 --#define INTEL_AGP_CACHED_MEMORY 3 -- --static struct gatt_mask intel_i810_masks[] = --{ -- {.mask = I810_PTE_VALID, .type = 0}, -- {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, -- {.mask = I810_PTE_VALID, .type = 0}, -- {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, -- .type = INTEL_AGP_CACHED_MEMORY} --}; -- --static struct _intel_private { -- struct pci_dev *pcidev; /* device one */ -- u8 __iomem *registers; -- u32 __iomem *gtt; /* I915G */ -- int num_dcache_entries; -- /* gtt_entries is the number of gtt entries that are already mapped -- * to stolen memory. Stolen memory is larger than the memory mapped -- * through gtt_entries, as it includes some reserved space for the BIOS -- * popup and for the GTT. -- */ -- int gtt_entries; /* i830+ */ -- int gtt_total_size; -- union { -- void __iomem *i9xx_flush_page; -- void *i8xx_flush_page; -- }; -- struct page *i8xx_page; -- struct resource ifp_resource; -- int resource_valid; --} intel_private; -- --#ifdef USE_PCI_DMA_API --static int intel_agp_map_page(struct page *page, dma_addr_t *ret) --{ -- *ret = pci_map_page(intel_private.pcidev, page, 0, -- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); -- if (pci_dma_mapping_error(intel_private.pcidev, *ret)) -- return -EINVAL; -- return 0; --} -- --static void intel_agp_unmap_page(struct page *page, dma_addr_t dma) --{ -- pci_unmap_page(intel_private.pcidev, dma, -- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); --} -- --static void intel_agp_free_sglist(struct agp_memory *mem) --{ -- struct sg_table st; -- -- st.sgl = mem->sg_list; -- st.orig_nents = st.nents = mem->page_count; -- -- sg_free_table(&st); -- -- mem->sg_list = NULL; -- mem->num_sg = 0; --} -- --static int intel_agp_map_memory(struct agp_memory *mem) --{ -- struct sg_table st; -- struct scatterlist *sg; -- int i; -- -- DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); -- -- if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) -- return -ENOMEM; -- -- mem->sg_list = sg = st.sgl; -- -- for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) -- sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); -- -- mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, -- mem->page_count, PCI_DMA_BIDIRECTIONAL); -- if (unlikely(!mem->num_sg)) { -- intel_agp_free_sglist(mem); -- return -ENOMEM; -- } -- return 0; --} -- --static void intel_agp_unmap_memory(struct agp_memory *mem) --{ -- DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); -- -- pci_unmap_sg(intel_private.pcidev, mem->sg_list, -- mem->page_count, PCI_DMA_BIDIRECTIONAL); -- intel_agp_free_sglist(mem); --} -- --static void intel_agp_insert_sg_entries(struct agp_memory *mem, -- off_t pg_start, int mask_type) --{ -- struct scatterlist *sg; -- int i, j; -- -- j = pg_start; -- -- WARN_ON(!mem->num_sg); -- -- if (mem->num_sg == mem->page_count) { -- for_each_sg(mem->sg_list, sg, mem->page_count, i) { -- writel(agp_bridge->driver->mask_memory(agp_bridge, -- sg_dma_address(sg), mask_type), -- intel_private.gtt+j); -- j++; -- } -- } else { -- /* sg may merge pages, but we have to separate -- * per-page addr for GTT */ -- unsigned int len, m; -- -- for_each_sg(mem->sg_list, sg, mem->num_sg, i) { -- len = sg_dma_len(sg) / PAGE_SIZE; -- for (m = 0; m < len; m++) { -- writel(agp_bridge->driver->mask_memory(agp_bridge, -- sg_dma_address(sg) + m * PAGE_SIZE, -- mask_type), -- intel_private.gtt+j); -- j++; -- } -- } -- } -- readl(intel_private.gtt+j-1); --} -- --#else -- --static void intel_agp_insert_sg_entries(struct agp_memory *mem, -- off_t pg_start, int mask_type) --{ -- int i, j; -- u32 cache_bits = 0; -- -- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) -- { -- cache_bits = I830_PTE_SYSTEM_CACHED; -- } -- -- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { -- writel(agp_bridge->driver->mask_memory(agp_bridge, -- page_to_phys(mem->pages[i]), mask_type), -- intel_private.gtt+j); -- } -- -- readl(intel_private.gtt+j-1); --} -- --#endif -- --static int intel_i810_fetch_size(void) --{ -- u32 smram_miscc; -- struct aper_size_info_fixed *values; -- -- pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); -- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); -- -- if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { -- dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); -- return 0; -- } -- if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { -- agp_bridge->previous_size = -- agp_bridge->current_size = (void *) (values + 1); -- agp_bridge->aperture_size_idx = 1; -- return values[1].size; -- } else { -- agp_bridge->previous_size = -- agp_bridge->current_size = (void *) (values); -- agp_bridge->aperture_size_idx = 0; -- return values[0].size; -- } -- -- return 0; --} -- --static int intel_i810_configure(void) --{ -- struct aper_size_info_fixed *current_size; -- u32 temp; -- int i; -- -- current_size = A_SIZE_FIX(agp_bridge->current_size); -- -- if (!intel_private.registers) { -- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); -- temp &= 0xfff80000; -- -- intel_private.registers = ioremap(temp, 128 * 4096); -- if (!intel_private.registers) { -- dev_err(&intel_private.pcidev->dev, -- "can't remap memory\n"); -- return -ENOMEM; -- } -- } -- -- if ((readl(intel_private.registers+I810_DRAM_CTL) -- & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { -- /* This will need to be dynamically assigned */ -- dev_info(&intel_private.pcidev->dev, -- "detected 4MB dedicated video ram\n"); -- intel_private.num_dcache_entries = 1024; -- } -- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); -- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); -- writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); -- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ -- -- if (agp_bridge->driver->needs_scratch_page) { -- for (i = 0; i < current_size->num_entries; i++) { -- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); -- } -- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */ -- } -- global_cache_flush(); -- return 0; --} -- --static void intel_i810_cleanup(void) --{ -- writel(0, intel_private.registers+I810_PGETBL_CTL); -- readl(intel_private.registers); /* PCI Posting. */ -- iounmap(intel_private.registers); --} -- --static void intel_i810_tlbflush(struct agp_memory *mem) --{ -- return; --} -- --static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) --{ -- return; --} -- --/* Exists to support ARGB cursors */ --static struct page *i8xx_alloc_pages(void) --{ -- struct page *page; -- -- page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); -- if (page == NULL) -- return NULL; -- -- if (set_pages_uc(page, 4) < 0) { -- set_pages_wb(page, 4); -- __free_pages(page, 2); -- return NULL; -- } -- get_page(page); -- atomic_inc(&agp_bridge->current_memory_agp); -- return page; --} -- --static void i8xx_destroy_pages(struct page *page) --{ -- if (page == NULL) -- return; -- -- set_pages_wb(page, 4); -- put_page(page); -- __free_pages(page, 2); -- atomic_dec(&agp_bridge->current_memory_agp); --} -- --static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, -- int type) --{ -- if (type < AGP_USER_TYPES) -- return type; -- else if (type == AGP_USER_CACHED_MEMORY) -- return INTEL_AGP_CACHED_MEMORY; -- else -- return 0; --} -- --static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, -- int type) --{ -- int i, j, num_entries; -- void *temp; -- int ret = -EINVAL; -- int mask_type; -- -- if (mem->page_count == 0) -- goto out; -- -- temp = agp_bridge->current_size; -- num_entries = A_SIZE_FIX(temp)->num_entries; -- -- if ((pg_start + mem->page_count) > num_entries) -- goto out_err; -- -- -- for (j = pg_start; j < (pg_start + mem->page_count); j++) { -- if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { -- ret = -EBUSY; -- goto out_err; -- } -- } -- -- if (type != mem->type) -- goto out_err; -- -- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); -- -- switch (mask_type) { -- case AGP_DCACHE_MEMORY: -- if (!mem->is_flushed) -- global_cache_flush(); -- for (i = pg_start; i < (pg_start + mem->page_count); i++) { -- writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, -- intel_private.registers+I810_PTE_BASE+(i*4)); -- } -- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); -- break; -- case AGP_PHYS_MEMORY: -- case AGP_NORMAL_MEMORY: -- if (!mem->is_flushed) -- global_cache_flush(); -- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { -- writel(agp_bridge->driver->mask_memory(agp_bridge, -- page_to_phys(mem->pages[i]), mask_type), -- intel_private.registers+I810_PTE_BASE+(j*4)); -- } -- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); -- break; -- default: -- goto out_err; -- } -- -- agp_bridge->driver->tlb_flush(mem); --out: -- ret = 0; --out_err: -- mem->is_flushed = true; -- return ret; --} -- --static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, -- int type) --{ -- int i; -- -- if (mem->page_count == 0) -- return 0; -- -- for (i = pg_start; i < (mem->page_count + pg_start); i++) { -- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); -- } -- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); -- -- agp_bridge->driver->tlb_flush(mem); -- return 0; --} -- --/* -- * The i810/i830 requires a physical address to program its mouse -- * pointer into hardware. -- * However the Xserver still writes to it through the agp aperture. -- */ --static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) --{ -- struct agp_memory *new; -- struct page *page; -- -- switch (pg_count) { -- case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); -- break; -- case 4: -- /* kludge to get 4 physical pages for ARGB cursor */ -- page = i8xx_alloc_pages(); -- break; -- default: -- return NULL; -- } -- -- if (page == NULL) -- return NULL; -- -- new = agp_create_memory(pg_count); -- if (new == NULL) -- return NULL; -- -- new->pages[0] = page; -- if (pg_count == 4) { -- /* kludge to get 4 physical pages for ARGB cursor */ -- new->pages[1] = new->pages[0] + 1; -- new->pages[2] = new->pages[1] + 1; -- new->pages[3] = new->pages[2] + 1; -- } -- new->page_count = pg_count; -- new->num_scratch_pages = pg_count; -- new->type = AGP_PHYS_MEMORY; -- new->physical = page_to_phys(new->pages[0]); -- return new; --} -- --static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) --{ -- struct agp_memory *new; -- -- if (type == AGP_DCACHE_MEMORY) { -- if (pg_count != intel_private.num_dcache_entries) -- return NULL; -- -- new = agp_create_memory(1); -- if (new == NULL) -- return NULL; -- -- new->type = AGP_DCACHE_MEMORY; -- new->page_count = pg_count; -- new->num_scratch_pages = 0; -- agp_free_page_array(new); -- return new; -- } -- if (type == AGP_PHYS_MEMORY) -- return alloc_agpphysmem_i8xx(pg_count, type); -- return NULL; --} -- --static void intel_i810_free_by_type(struct agp_memory *curr) --{ -- agp_free_key(curr->key); -- if (curr->type == AGP_PHYS_MEMORY) { -- if (curr->page_count == 4) -- i8xx_destroy_pages(curr->pages[0]); -- else { -- agp_bridge->driver->agp_destroy_page(curr->pages[0], -- AGP_PAGE_DESTROY_UNMAP); -- agp_bridge->driver->agp_destroy_page(curr->pages[0], -- AGP_PAGE_DESTROY_FREE); -- } -- agp_free_page_array(curr); -- } -- kfree(curr); --} -- --static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, -- dma_addr_t addr, int type) --{ -- /* Type checking must be done elsewhere */ -- return addr | bridge->driver->masks[type].mask; --} -- --static struct aper_size_info_fixed intel_i830_sizes[] = --{ -- {128, 32768, 5}, -- /* The 64M mode still requires a 128k gatt */ -- {64, 16384, 5}, -- {256, 65536, 6}, -- {512, 131072, 7}, --}; -- --static void intel_i830_init_gtt_entries(void) --{ -- u16 gmch_ctrl; -- int gtt_entries = 0; -- u8 rdct; -- int local = 0; -- static const int ddt[4] = { 0, 16, 32, 64 }; -- int size; /* reserved space (in kb) at the top of stolen memory */ -- -- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); -- -- if (IS_I965) { -- u32 pgetbl_ctl; -- pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); -- -- /* The 965 has a field telling us the size of the GTT, -- * which may be larger than what is necessary to map the -- * aperture. -- */ -- switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { -- case I965_PGETBL_SIZE_128KB: -- size = 128; -- break; -- case I965_PGETBL_SIZE_256KB: -- size = 256; -- break; -- case I965_PGETBL_SIZE_512KB: -- size = 512; -- break; -- case I965_PGETBL_SIZE_1MB: -- size = 1024; -- break; -- case I965_PGETBL_SIZE_2MB: -- size = 2048; -- break; -- case I965_PGETBL_SIZE_1_5MB: -- size = 1024 + 512; -- break; -- default: -- dev_info(&intel_private.pcidev->dev, -- "unknown page table size, assuming 512KB\n"); -- size = 512; -- } -- size += 4; /* add in BIOS popup space */ -- } else if (IS_G33 && !IS_PINEVIEW) { -- /* G33's GTT size defined in gmch_ctrl */ -- switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { -- case G33_PGETBL_SIZE_1M: -- size = 1024; -- break; -- case G33_PGETBL_SIZE_2M: -- size = 2048; -- break; -- default: -- dev_info(&agp_bridge->dev->dev, -- "unknown page table size 0x%x, assuming 512KB\n", -- (gmch_ctrl & G33_PGETBL_SIZE_MASK)); -- size = 512; -- } -- size += 4; -- } else if (IS_G4X || IS_PINEVIEW) { -- /* On 4 series hardware, GTT stolen is separate from graphics -- * stolen, ignore it in stolen gtt entries counting. However, -- * 4KB of the stolen memory doesn't get mapped to the GTT. -- */ -- size = 4; -- } else { -- /* On previous hardware, the GTT size was just what was -- * required to map the aperture. -- */ -- size = agp_bridge->driver->fetch_size() + 4; -- } -- -- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { -- switch (gmch_ctrl & I830_GMCH_GMS_MASK) { -- case I830_GMCH_GMS_STOLEN_512: -- gtt_entries = KB(512) - KB(size); -- break; -- case I830_GMCH_GMS_STOLEN_1024: -- gtt_entries = MB(1) - KB(size); -- break; -- case I830_GMCH_GMS_STOLEN_8192: -- gtt_entries = MB(8) - KB(size); -- break; -- case I830_GMCH_GMS_LOCAL: -- rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); -- gtt_entries = (I830_RDRAM_ND(rdct) + 1) * -- MB(ddt[I830_RDRAM_DDT(rdct)]); -- local = 1; -- break; -- default: -- gtt_entries = 0; -- break; -- } -- } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || -- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { -- /* -- * SandyBridge has new memory control reg at 0x50.w -- */ -- u16 snb_gmch_ctl; -- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); -- switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { -- case SNB_GMCH_GMS_STOLEN_32M: -- gtt_entries = MB(32) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_64M: -- gtt_entries = MB(64) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_96M: -- gtt_entries = MB(96) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_128M: -- gtt_entries = MB(128) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_160M: -- gtt_entries = MB(160) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_192M: -- gtt_entries = MB(192) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_224M: -- gtt_entries = MB(224) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_256M: -- gtt_entries = MB(256) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_288M: -- gtt_entries = MB(288) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_320M: -- gtt_entries = MB(320) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_352M: -- gtt_entries = MB(352) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_384M: -- gtt_entries = MB(384) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_416M: -- gtt_entries = MB(416) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_448M: -- gtt_entries = MB(448) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_480M: -- gtt_entries = MB(480) - KB(size); -- break; -- case SNB_GMCH_GMS_STOLEN_512M: -- gtt_entries = MB(512) - KB(size); -- break; -- } -- } else { -- switch (gmch_ctrl & I855_GMCH_GMS_MASK) { -- case I855_GMCH_GMS_STOLEN_1M: -- gtt_entries = MB(1) - KB(size); -- break; -- case I855_GMCH_GMS_STOLEN_4M: -- gtt_entries = MB(4) - KB(size); -- break; -- case I855_GMCH_GMS_STOLEN_8M: -- gtt_entries = MB(8) - KB(size); -- break; -- case I855_GMCH_GMS_STOLEN_16M: -- gtt_entries = MB(16) - KB(size); -- break; -- case I855_GMCH_GMS_STOLEN_32M: -- gtt_entries = MB(32) - KB(size); -- break; -- case I915_GMCH_GMS_STOLEN_48M: -- /* Check it's really I915G */ -- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) -- gtt_entries = MB(48) - KB(size); -- else -- gtt_entries = 0; -- break; -- case I915_GMCH_GMS_STOLEN_64M: -- /* Check it's really I915G */ -- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) -- gtt_entries = MB(64) - KB(size); -- else -- gtt_entries = 0; -- break; -- case G33_GMCH_GMS_STOLEN_128M: -- if (IS_G33 || IS_I965 || IS_G4X) -- gtt_entries = MB(128) - KB(size); -- else -- gtt_entries = 0; -- break; -- case G33_GMCH_GMS_STOLEN_256M: -- if (IS_G33 || IS_I965 || IS_G4X) -- gtt_entries = MB(256) - KB(size); -- else -- gtt_entries = 0; -- break; -- case INTEL_GMCH_GMS_STOLEN_96M: -- if (IS_I965 || IS_G4X) -- gtt_entries = MB(96) - KB(size); -- else -- gtt_entries = 0; -- break; -- case INTEL_GMCH_GMS_STOLEN_160M: -- if (IS_I965 || IS_G4X) -- gtt_entries = MB(160) - KB(size); -- else -- gtt_entries = 0; -- break; -- case INTEL_GMCH_GMS_STOLEN_224M: -- if (IS_I965 || IS_G4X) -- gtt_entries = MB(224) - KB(size); -- else -- gtt_entries = 0; -- break; -- case INTEL_GMCH_GMS_STOLEN_352M: -- if (IS_I965 || IS_G4X) -- gtt_entries = MB(352) - KB(size); -- else -- gtt_entries = 0; -- break; -- default: -- gtt_entries = 0; -- break; -- } -- } -- if (gtt_entries > 0) { -- dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", -- gtt_entries / KB(1), local ? "local" : "stolen"); -- gtt_entries /= KB(4); -- } else { -- dev_info(&agp_bridge->dev->dev, -- "no pre-allocated video memory detected\n"); -- gtt_entries = 0; -- } -- -- intel_private.gtt_entries = gtt_entries; --} -- --static void intel_i830_fini_flush(void) --{ -- kunmap(intel_private.i8xx_page); -- intel_private.i8xx_flush_page = NULL; -- unmap_page_from_agp(intel_private.i8xx_page); -- -- __free_page(intel_private.i8xx_page); -- intel_private.i8xx_page = NULL; --} -- --static void intel_i830_setup_flush(void) --{ -- /* return if we've already set the flush mechanism up */ -- if (intel_private.i8xx_page) -- return; -- -- intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); -- if (!intel_private.i8xx_page) -- return; -- -- intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); -- if (!intel_private.i8xx_flush_page) -- intel_i830_fini_flush(); --} -- --/* The chipset_flush interface needs to get data that has already been -- * flushed out of the CPU all the way out to main memory, because the GPU -- * doesn't snoop those buffers. -- * -- * The 8xx series doesn't have the same lovely interface for flushing the -- * chipset write buffers that the later chips do. According to the 865 -- * specs, it's 64 octwords, or 1KB. So, to get those previous things in -- * that buffer out, we just fill 1KB and clflush it out, on the assumption -- * that it'll push whatever was in there out. It appears to work. -- */ --static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) --{ -- unsigned int *pg = intel_private.i8xx_flush_page; -- -- memset(pg, 0, 1024); -- -- if (cpu_has_clflush) -- clflush_cache_range(pg, 1024); -- else if (wbinvd_on_all_cpus() != 0) -- printk(KERN_ERR "Timed out waiting for cache flush.\n"); --} -- --/* The intel i830 automatically initializes the agp aperture during POST. -- * Use the memory already set aside for in the GTT. -- */ --static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) --{ -- int page_order; -- struct aper_size_info_fixed *size; -- int num_entries; -- u32 temp; -- -- size = agp_bridge->current_size; -- page_order = size->page_order; -- num_entries = size->num_entries; -- agp_bridge->gatt_table_real = NULL; -- -- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); -- temp &= 0xfff80000; -- -- intel_private.registers = ioremap(temp, 128 * 4096); -- if (!intel_private.registers) -- return -ENOMEM; -- -- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; -- global_cache_flush(); /* FIXME: ?? */ -- -- /* we have to call this as early as possible after the MMIO base address is known */ -- intel_i830_init_gtt_entries(); -- -- agp_bridge->gatt_table = NULL; -- -- agp_bridge->gatt_bus_addr = temp; -- -- return 0; --} -- --/* Return the gatt table to a sane state. Use the top of stolen -- * memory for the GTT. -- */ --static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge) --{ -- return 0; --} -- --static int intel_i830_fetch_size(void) --{ -- u16 gmch_ctrl; -- struct aper_size_info_fixed *values; -- -- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); -- -- if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB && -- agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { -- /* 855GM/852GM/865G has 128MB aperture size */ -- agp_bridge->previous_size = agp_bridge->current_size = (void *) values; -- agp_bridge->aperture_size_idx = 0; -- return values[0].size; -- } -- -- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); -- -- if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { -- agp_bridge->previous_size = agp_bridge->current_size = (void *) values; -- agp_bridge->aperture_size_idx = 0; -- return values[0].size; -- } else { -- agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1); -- agp_bridge->aperture_size_idx = 1; -- return values[1].size; -- } -- -- return 0; --} -- --static int intel_i830_configure(void) --{ -- struct aper_size_info_fixed *current_size; -- u32 temp; -- u16 gmch_ctrl; -- int i; -- -- current_size = A_SIZE_FIX(agp_bridge->current_size); -- -- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); -- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); -- -- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); -- gmch_ctrl |= I830_GMCH_ENABLED; -- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); -- -- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); -- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ -- -- if (agp_bridge->driver->needs_scratch_page) { -- for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { -- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); -- } -- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ -- } -- -- global_cache_flush(); -- -- intel_i830_setup_flush(); -- return 0; --} -- --static void intel_i830_cleanup(void) --{ -- iounmap(intel_private.registers); --} -- --static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, -- int type) --{ -- int i, j, num_entries; -- void *temp; -- int ret = -EINVAL; -- int mask_type; -- -- if (mem->page_count == 0) -- goto out; -- -- temp = agp_bridge->current_size; -- num_entries = A_SIZE_FIX(temp)->num_entries; -- -- if (pg_start < intel_private.gtt_entries) { -- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, -- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", -- pg_start, intel_private.gtt_entries); -- -- dev_info(&intel_private.pcidev->dev, -- "trying to insert into local/stolen memory\n"); -- goto out_err; -- } -- -- if ((pg_start + mem->page_count) > num_entries) -- goto out_err; -- -- /* The i830 can't check the GTT for entries since its read only, -- * depend on the caller to make the correct offset decisions. -- */ -- -- if (type != mem->type) -- goto out_err; -- -- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); -- -- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && -- mask_type != INTEL_AGP_CACHED_MEMORY) -- goto out_err; -- -- if (!mem->is_flushed) -- global_cache_flush(); -- -- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { -- writel(agp_bridge->driver->mask_memory(agp_bridge, -- page_to_phys(mem->pages[i]), mask_type), -- intel_private.registers+I810_PTE_BASE+(j*4)); -- } -- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); -- agp_bridge->driver->tlb_flush(mem); -- --out: -- ret = 0; --out_err: -- mem->is_flushed = true; -- return ret; --} -- --static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, -- int type) --{ -- int i; -- -- if (mem->page_count == 0) -- return 0; -- -- if (pg_start < intel_private.gtt_entries) { -- dev_info(&intel_private.pcidev->dev, -- "trying to disable local/stolen memory\n"); -- return -EINVAL; -- } -- -- for (i = pg_start; i < (mem->page_count + pg_start); i++) { -- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); -- } -- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); -- -- agp_bridge->driver->tlb_flush(mem); -- return 0; --} -- --static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) --{ -- if (type == AGP_PHYS_MEMORY) -- return alloc_agpphysmem_i8xx(pg_count, type); -- /* always return NULL for other allocation types for now */ -- return NULL; --} -- --static int intel_alloc_chipset_flush_resource(void) --{ -- int ret; -- ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, -- PAGE_SIZE, PCIBIOS_MIN_MEM, 0, -- pcibios_align_resource, agp_bridge->dev); -- -- return ret; --} -- --static void intel_i915_setup_chipset_flush(void) --{ -- int ret; -- u32 temp; -- -- pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); -- if (!(temp & 0x1)) { -- intel_alloc_chipset_flush_resource(); -- intel_private.resource_valid = 1; -- pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); -- } else { -- temp &= ~1; -- -- intel_private.resource_valid = 1; -- intel_private.ifp_resource.start = temp; -- intel_private.ifp_resource.end = temp + PAGE_SIZE; -- ret = request_resource(&iomem_resource, &intel_private.ifp_resource); -- /* some BIOSes reserve this area in a pnp some don't */ -- if (ret) -- intel_private.resource_valid = 0; -- } --} -- --static void intel_i965_g33_setup_chipset_flush(void) --{ -- u32 temp_hi, temp_lo; -- int ret; -- -- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); -- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); -- -- if (!(temp_lo & 0x1)) { -- -- intel_alloc_chipset_flush_resource(); -- -- intel_private.resource_valid = 1; -- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, -- upper_32_bits(intel_private.ifp_resource.start)); -- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); -- } else { -- u64 l64; -- -- temp_lo &= ~0x1; -- l64 = ((u64)temp_hi << 32) | temp_lo; -- -- intel_private.resource_valid = 1; -- intel_private.ifp_resource.start = l64; -- intel_private.ifp_resource.end = l64 + PAGE_SIZE; -- ret = request_resource(&iomem_resource, &intel_private.ifp_resource); -- /* some BIOSes reserve this area in a pnp some don't */ -- if (ret) -- intel_private.resource_valid = 0; -- } --} -- --static void intel_i9xx_setup_flush(void) --{ -- /* return if already configured */ -- if (intel_private.ifp_resource.start) -- return; -- -- if (IS_SNB) -- return; -- -- /* setup a resource for this object */ -- intel_private.ifp_resource.name = "Intel Flush Page"; -- intel_private.ifp_resource.flags = IORESOURCE_MEM; -- -- /* Setup chipset flush for 915 */ -- if (IS_I965 || IS_G33 || IS_G4X) { -- intel_i965_g33_setup_chipset_flush(); -- } else { -- intel_i915_setup_chipset_flush(); -- } -- -- if (intel_private.ifp_resource.start) { -- intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); -- if (!intel_private.i9xx_flush_page) -- dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); -- } --} -- --static int intel_i915_configure(void) --{ -- struct aper_size_info_fixed *current_size; -- u32 temp; -- u16 gmch_ctrl; -- int i; -- -- current_size = A_SIZE_FIX(agp_bridge->current_size); -- -- pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp); -- -- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); -- -- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); -- gmch_ctrl |= I830_GMCH_ENABLED; -- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); -- -- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); -- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ -- -- if (agp_bridge->driver->needs_scratch_page) { -- for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) { -- writel(agp_bridge->scratch_page, intel_private.gtt+i); -- } -- readl(intel_private.gtt+i-1); /* PCI Posting. */ -- } -- -- global_cache_flush(); -- -- intel_i9xx_setup_flush(); -- -- return 0; --} -- --static void intel_i915_cleanup(void) --{ -- if (intel_private.i9xx_flush_page) -- iounmap(intel_private.i9xx_flush_page); -- if (intel_private.resource_valid) -- release_resource(&intel_private.ifp_resource); -- intel_private.ifp_resource.start = 0; -- intel_private.resource_valid = 0; -- iounmap(intel_private.gtt); -- iounmap(intel_private.registers); --} -- --static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) --{ -- if (intel_private.i9xx_flush_page) -- writel(1, intel_private.i9xx_flush_page); --} -- --static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, -- int type) --{ -- int num_entries; -- void *temp; -- int ret = -EINVAL; -- int mask_type; -- -- if (mem->page_count == 0) -- goto out; -- -- temp = agp_bridge->current_size; -- num_entries = A_SIZE_FIX(temp)->num_entries; -- -- if (pg_start < intel_private.gtt_entries) { -- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, -- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", -- pg_start, intel_private.gtt_entries); -- -- dev_info(&intel_private.pcidev->dev, -- "trying to insert into local/stolen memory\n"); -- goto out_err; -- } -- -- if ((pg_start + mem->page_count) > num_entries) -- goto out_err; -- -- /* The i915 can't check the GTT for entries since it's read only; -- * depend on the caller to make the correct offset decisions. -- */ -- -- if (type != mem->type) -- goto out_err; -- -- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); -- -- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && -- mask_type != INTEL_AGP_CACHED_MEMORY) -- goto out_err; -- -- if (!mem->is_flushed) -- global_cache_flush(); -- -- intel_agp_insert_sg_entries(mem, pg_start, mask_type); -- agp_bridge->driver->tlb_flush(mem); -- -- out: -- ret = 0; -- out_err: -- mem->is_flushed = true; -- return ret; --} -- --static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, -- int type) --{ -- int i; -- -- if (mem->page_count == 0) -- return 0; -- -- if (pg_start < intel_private.gtt_entries) { -- dev_info(&intel_private.pcidev->dev, -- "trying to disable local/stolen memory\n"); -- return -EINVAL; -- } -- -- for (i = pg_start; i < (mem->page_count + pg_start); i++) -- writel(agp_bridge->scratch_page, intel_private.gtt+i); -- -- readl(intel_private.gtt+i-1); -- -- agp_bridge->driver->tlb_flush(mem); -- return 0; --} -- --/* Return the aperture size by just checking the resource length. The effect -- * described in the spec of the MSAC registers is just changing of the -- * resource size. -- */ --static int intel_i9xx_fetch_size(void) --{ -- int num_sizes = ARRAY_SIZE(intel_i830_sizes); -- int aper_size; /* size in megabytes */ -- int i; -- -- aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1); -- -- for (i = 0; i < num_sizes; i++) { -- if (aper_size == intel_i830_sizes[i].size) { -- agp_bridge->current_size = intel_i830_sizes + i; -- agp_bridge->previous_size = agp_bridge->current_size; -- return aper_size; -- } -- } -- -- return 0; --} -- --/* The intel i915 automatically initializes the agp aperture during POST. -- * Use the memory already set aside for in the GTT. -- */ --static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) --{ -- int page_order; -- struct aper_size_info_fixed *size; -- int num_entries; -- u32 temp, temp2; -- int gtt_map_size = 256 * 1024; -- -- size = agp_bridge->current_size; -- page_order = size->page_order; -- num_entries = size->num_entries; -- agp_bridge->gatt_table_real = NULL; -- -- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); -- pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); -- -- if (IS_G33) -- gtt_map_size = 1024 * 1024; /* 1M on G33 */ -- intel_private.gtt = ioremap(temp2, gtt_map_size); -- if (!intel_private.gtt) -- return -ENOMEM; -- -- intel_private.gtt_total_size = gtt_map_size / 4; -- -- temp &= 0xfff80000; -- -- intel_private.registers = ioremap(temp, 128 * 4096); -- if (!intel_private.registers) { -- iounmap(intel_private.gtt); -- return -ENOMEM; -- } -- -- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; -- global_cache_flush(); /* FIXME: ? */ -- -- /* we have to call this as early as possible after the MMIO base address is known */ -- intel_i830_init_gtt_entries(); -- -- agp_bridge->gatt_table = NULL; -- -- agp_bridge->gatt_bus_addr = temp; -- -- return 0; --} -- --/* -- * The i965 supports 36-bit physical addresses, but to keep -- * the format of the GTT the same, the bits that don't fit -- * in a 32-bit word are shifted down to bits 4..7. -- * -- * Gcc is smart enough to notice that "(addr >> 28) & 0xf0" -- * is always zero on 32-bit architectures, so no need to make -- * this conditional. -- */ --static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, -- dma_addr_t addr, int type) --{ -- /* Shift high bits down */ -- addr |= (addr >> 28) & 0xf0; -- -- /* Type checking must be done elsewhere */ -- return addr | bridge->driver->masks[type].mask; --} -- --static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) --{ -- u16 snb_gmch_ctl; -- -- switch (agp_bridge->dev->device) { -- case PCI_DEVICE_ID_INTEL_GM45_HB: -- case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: -- case PCI_DEVICE_ID_INTEL_Q45_HB: -- case PCI_DEVICE_ID_INTEL_G45_HB: -- case PCI_DEVICE_ID_INTEL_G41_HB: -- case PCI_DEVICE_ID_INTEL_B43_HB: -- case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB: -- case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: -- case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: -- case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: -- *gtt_offset = *gtt_size = MB(2); -- break; -- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: -- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: -- *gtt_offset = MB(2); -- -- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); -- switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { -- default: -- case SNB_GTT_SIZE_0M: -- printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); -- *gtt_size = MB(0); -- break; -- case SNB_GTT_SIZE_1M: -- *gtt_size = MB(1); -- break; -- case SNB_GTT_SIZE_2M: -- *gtt_size = MB(2); -- break; -- } -- break; -- default: -- *gtt_offset = *gtt_size = KB(512); -- } --} -- --/* The intel i965 automatically initializes the agp aperture during POST. -- * Use the memory already set aside for in the GTT. -- */ --static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) --{ -- int page_order; -- struct aper_size_info_fixed *size; -- int num_entries; -- u32 temp; -- int gtt_offset, gtt_size; -- -- size = agp_bridge->current_size; -- page_order = size->page_order; -- num_entries = size->num_entries; -- agp_bridge->gatt_table_real = NULL; -- -- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); -- -- temp &= 0xfff00000; -- -- intel_i965_get_gtt_range(>t_offset, >t_size); -- -- intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); -- -- if (!intel_private.gtt) -- return -ENOMEM; -- -- intel_private.gtt_total_size = gtt_size / 4; -- -- intel_private.registers = ioremap(temp, 128 * 4096); -- if (!intel_private.registers) { -- iounmap(intel_private.gtt); -- return -ENOMEM; -- } -- -- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; -- global_cache_flush(); /* FIXME: ? */ -- -- /* we have to call this as early as possible after the MMIO base address is known */ -- intel_i830_init_gtt_entries(); -- -- agp_bridge->gatt_table = NULL; -- -- agp_bridge->gatt_bus_addr = temp; -- -- return 0; --} -- -- - static int intel_fetch_size(void) - { - int i; -@@ -1982,6 +464,7 @@ static const struct agp_bridge_driver intel_generic_driver = { - .aperture_sizes = intel_generic_sizes, - .size_type = U16_APER_SIZE, - .num_aperture_sizes = 7, -+ .needs_scratch_page = true, - .configure = intel_configure, - .fetch_size = intel_fetch_size, - .cleanup = intel_cleanup, -@@ -2003,38 +486,12 @@ static const struct agp_bridge_driver intel_generic_driver = { - .agp_type_to_mask_type = agp_generic_type_to_mask_type, - }; - --static const struct agp_bridge_driver intel_810_driver = { -- .owner = THIS_MODULE, -- .aperture_sizes = intel_i810_sizes, -- .size_type = FIXED_APER_SIZE, -- .num_aperture_sizes = 2, -- .needs_scratch_page = true, -- .configure = intel_i810_configure, -- .fetch_size = intel_i810_fetch_size, -- .cleanup = intel_i810_cleanup, -- .tlb_flush = intel_i810_tlbflush, -- .mask_memory = intel_i810_mask_memory, -- .masks = intel_i810_masks, -- .agp_enable = intel_i810_agp_enable, -- .cache_flush = global_cache_flush, -- .create_gatt_table = agp_generic_create_gatt_table, -- .free_gatt_table = agp_generic_free_gatt_table, -- .insert_memory = intel_i810_insert_entries, -- .remove_memory = intel_i810_remove_entries, -- .alloc_by_type = intel_i810_alloc_by_type, -- .free_by_type = intel_i810_free_by_type, -- .agp_alloc_page = agp_generic_alloc_page, -- .agp_alloc_pages = agp_generic_alloc_pages, -- .agp_destroy_page = agp_generic_destroy_page, -- .agp_destroy_pages = agp_generic_destroy_pages, -- .agp_type_to_mask_type = agp_generic_type_to_mask_type, --}; -- - static const struct agp_bridge_driver intel_815_driver = { - .owner = THIS_MODULE, - .aperture_sizes = intel_815_sizes, - .size_type = U8_APER_SIZE, - .num_aperture_sizes = 2, -+ .needs_scratch_page = true, - .configure = intel_815_configure, - .fetch_size = intel_815_fetch_size, - .cleanup = intel_8xx_cleanup, -@@ -2056,39 +513,12 @@ static const struct agp_bridge_driver intel_815_driver = { - .agp_type_to_mask_type = agp_generic_type_to_mask_type, - }; - --static const struct agp_bridge_driver intel_830_driver = { -- .owner = THIS_MODULE, -- .aperture_sizes = intel_i830_sizes, -- .size_type = FIXED_APER_SIZE, -- .num_aperture_sizes = 4, -- .needs_scratch_page = true, -- .configure = intel_i830_configure, -- .fetch_size = intel_i830_fetch_size, -- .cleanup = intel_i830_cleanup, -- .tlb_flush = intel_i810_tlbflush, -- .mask_memory = intel_i810_mask_memory, -- .masks = intel_i810_masks, -- .agp_enable = intel_i810_agp_enable, -- .cache_flush = global_cache_flush, -- .create_gatt_table = intel_i830_create_gatt_table, -- .free_gatt_table = intel_i830_free_gatt_table, -- .insert_memory = intel_i830_insert_entries, -- .remove_memory = intel_i830_remove_entries, -- .alloc_by_type = intel_i830_alloc_by_type, -- .free_by_type = intel_i810_free_by_type, -- .agp_alloc_page = agp_generic_alloc_page, -- .agp_alloc_pages = agp_generic_alloc_pages, -- .agp_destroy_page = agp_generic_destroy_page, -- .agp_destroy_pages = agp_generic_destroy_pages, -- .agp_type_to_mask_type = intel_i830_type_to_mask_type, -- .chipset_flush = intel_i830_chipset_flush, --}; -- - static const struct agp_bridge_driver intel_820_driver = { - .owner = THIS_MODULE, - .aperture_sizes = intel_8xx_sizes, - .size_type = U8_APER_SIZE, - .num_aperture_sizes = 7, -+ .needs_scratch_page = true, - .configure = intel_820_configure, - .fetch_size = intel_8xx_fetch_size, - .cleanup = intel_820_cleanup, -@@ -2115,6 +545,7 @@ static const struct agp_bridge_driver intel_830mp_driver = { - .aperture_sizes = intel_830mp_sizes, - .size_type = U8_APER_SIZE, - .num_aperture_sizes = 4, -+ .needs_scratch_page = true, - .configure = intel_830mp_configure, - .fetch_size = intel_8xx_fetch_size, - .cleanup = intel_8xx_cleanup, -@@ -2141,6 +572,7 @@ static const struct agp_bridge_driver intel_840_driver = { - .aperture_sizes = intel_8xx_sizes, - .size_type = U8_APER_SIZE, - .num_aperture_sizes = 7, -+ .needs_scratch_page = true, - .configure = intel_840_configure, - .fetch_size = intel_8xx_fetch_size, - .cleanup = intel_8xx_cleanup, -@@ -2167,6 +599,7 @@ static const struct agp_bridge_driver intel_845_driver = { - .aperture_sizes = intel_8xx_sizes, - .size_type = U8_APER_SIZE, - .num_aperture_sizes = 7, -+ .needs_scratch_page = true, - .configure = intel_845_configure, - .fetch_size = intel_8xx_fetch_size, - .cleanup = intel_8xx_cleanup, -@@ -2193,6 +626,7 @@ static const struct agp_bridge_driver intel_850_driver = { - .aperture_sizes = intel_8xx_sizes, - .size_type = U8_APER_SIZE, - .num_aperture_sizes = 7, -+ .needs_scratch_page = true, - .configure = intel_850_configure, - .fetch_size = intel_8xx_fetch_size, - .cleanup = intel_8xx_cleanup, -@@ -2219,6 +653,7 @@ static const struct agp_bridge_driver intel_860_driver = { - .aperture_sizes = intel_8xx_sizes, - .size_type = U8_APER_SIZE, - .num_aperture_sizes = 7, -+ .needs_scratch_page = true, - .configure = intel_860_configure, - .fetch_size = intel_8xx_fetch_size, - .cleanup = intel_8xx_cleanup, -@@ -2240,79 +675,12 @@ static const struct agp_bridge_driver intel_860_driver = { - .agp_type_to_mask_type = agp_generic_type_to_mask_type, - }; - --static const struct agp_bridge_driver intel_915_driver = { -- .owner = THIS_MODULE, -- .aperture_sizes = intel_i830_sizes, -- .size_type = FIXED_APER_SIZE, -- .num_aperture_sizes = 4, -- .needs_scratch_page = true, -- .configure = intel_i915_configure, -- .fetch_size = intel_i9xx_fetch_size, -- .cleanup = intel_i915_cleanup, -- .tlb_flush = intel_i810_tlbflush, -- .mask_memory = intel_i810_mask_memory, -- .masks = intel_i810_masks, -- .agp_enable = intel_i810_agp_enable, -- .cache_flush = global_cache_flush, -- .create_gatt_table = intel_i915_create_gatt_table, -- .free_gatt_table = intel_i830_free_gatt_table, -- .insert_memory = intel_i915_insert_entries, -- .remove_memory = intel_i915_remove_entries, -- .alloc_by_type = intel_i830_alloc_by_type, -- .free_by_type = intel_i810_free_by_type, -- .agp_alloc_page = agp_generic_alloc_page, -- .agp_alloc_pages = agp_generic_alloc_pages, -- .agp_destroy_page = agp_generic_destroy_page, -- .agp_destroy_pages = agp_generic_destroy_pages, -- .agp_type_to_mask_type = intel_i830_type_to_mask_type, -- .chipset_flush = intel_i915_chipset_flush, --#ifdef USE_PCI_DMA_API -- .agp_map_page = intel_agp_map_page, -- .agp_unmap_page = intel_agp_unmap_page, -- .agp_map_memory = intel_agp_map_memory, -- .agp_unmap_memory = intel_agp_unmap_memory, --#endif --}; -- --static const struct agp_bridge_driver intel_i965_driver = { -- .owner = THIS_MODULE, -- .aperture_sizes = intel_i830_sizes, -- .size_type = FIXED_APER_SIZE, -- .num_aperture_sizes = 4, -- .needs_scratch_page = true, -- .configure = intel_i915_configure, -- .fetch_size = intel_i9xx_fetch_size, -- .cleanup = intel_i915_cleanup, -- .tlb_flush = intel_i810_tlbflush, -- .mask_memory = intel_i965_mask_memory, -- .masks = intel_i810_masks, -- .agp_enable = intel_i810_agp_enable, -- .cache_flush = global_cache_flush, -- .create_gatt_table = intel_i965_create_gatt_table, -- .free_gatt_table = intel_i830_free_gatt_table, -- .insert_memory = intel_i915_insert_entries, -- .remove_memory = intel_i915_remove_entries, -- .alloc_by_type = intel_i830_alloc_by_type, -- .free_by_type = intel_i810_free_by_type, -- .agp_alloc_page = agp_generic_alloc_page, -- .agp_alloc_pages = agp_generic_alloc_pages, -- .agp_destroy_page = agp_generic_destroy_page, -- .agp_destroy_pages = agp_generic_destroy_pages, -- .agp_type_to_mask_type = intel_i830_type_to_mask_type, -- .chipset_flush = intel_i915_chipset_flush, --#ifdef USE_PCI_DMA_API -- .agp_map_page = intel_agp_map_page, -- .agp_unmap_page = intel_agp_unmap_page, -- .agp_map_memory = intel_agp_map_memory, -- .agp_unmap_memory = intel_agp_unmap_memory, --#endif --}; -- - static const struct agp_bridge_driver intel_7505_driver = { - .owner = THIS_MODULE, - .aperture_sizes = intel_8xx_sizes, - .size_type = U8_APER_SIZE, - .num_aperture_sizes = 7, -+ .needs_scratch_page = true, - .configure = intel_7505_configure, - .fetch_size = intel_8xx_fetch_size, - .cleanup = intel_8xx_cleanup, -@@ -2334,40 +702,6 @@ static const struct agp_bridge_driver intel_7505_driver = { - .agp_type_to_mask_type = agp_generic_type_to_mask_type, - }; - --static const struct agp_bridge_driver intel_g33_driver = { -- .owner = THIS_MODULE, -- .aperture_sizes = intel_i830_sizes, -- .size_type = FIXED_APER_SIZE, -- .num_aperture_sizes = 4, -- .needs_scratch_page = true, -- .configure = intel_i915_configure, -- .fetch_size = intel_i9xx_fetch_size, -- .cleanup = intel_i915_cleanup, -- .tlb_flush = intel_i810_tlbflush, -- .mask_memory = intel_i965_mask_memory, -- .masks = intel_i810_masks, -- .agp_enable = intel_i810_agp_enable, -- .cache_flush = global_cache_flush, -- .create_gatt_table = intel_i915_create_gatt_table, -- .free_gatt_table = intel_i830_free_gatt_table, -- .insert_memory = intel_i915_insert_entries, -- .remove_memory = intel_i915_remove_entries, -- .alloc_by_type = intel_i830_alloc_by_type, -- .free_by_type = intel_i810_free_by_type, -- .agp_alloc_page = agp_generic_alloc_page, -- .agp_alloc_pages = agp_generic_alloc_pages, -- .agp_destroy_page = agp_generic_destroy_page, -- .agp_destroy_pages = agp_generic_destroy_pages, -- .agp_type_to_mask_type = intel_i830_type_to_mask_type, -- .chipset_flush = intel_i915_chipset_flush, --#ifdef USE_PCI_DMA_API -- .agp_map_page = intel_agp_map_page, -- .agp_unmap_page = intel_agp_unmap_page, -- .agp_map_memory = intel_agp_map_memory, -- .agp_unmap_memory = intel_agp_unmap_memory, --#endif --}; -- - static int find_gmch(u16 device) - { - struct pci_dev *gmch_device; -@@ -2392,103 +726,137 @@ static int find_gmch(u16 device) - static const struct intel_driver_description { - unsigned int chip_id; - unsigned int gmch_chip_id; -- unsigned int multi_gmch_chip; /* if we have more gfx chip type on this HB. */ - char *name; - const struct agp_bridge_driver *driver; - const struct agp_bridge_driver *gmch_driver; - } intel_agp_chipsets[] = { -- { PCI_DEVICE_ID_INTEL_82443LX_0, 0, 0, "440LX", &intel_generic_driver, NULL }, -- { PCI_DEVICE_ID_INTEL_82443BX_0, 0, 0, "440BX", &intel_generic_driver, NULL }, -- { PCI_DEVICE_ID_INTEL_82443GX_0, 0, 0, "440GX", &intel_generic_driver, NULL }, -- { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, 0, "i810", -+ { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL }, -+ { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL }, -+ { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL }, -+ { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810", - NULL, &intel_810_driver }, -- { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, 0, "i810", -+ { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810", - NULL, &intel_810_driver }, -- { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, 0, "i810", -+ { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810", - NULL, &intel_810_driver }, -- { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, 0, "i815", -+ { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815", - &intel_815_driver, &intel_810_driver }, -- { PCI_DEVICE_ID_INTEL_82820_HB, 0, 0, "i820", &intel_820_driver, NULL }, -- { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, 0, "i820", &intel_820_driver, NULL }, -- { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, 0, "830M", -+ { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL }, -+ { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL }, -+ { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M", - &intel_830mp_driver, &intel_830_driver }, -- { PCI_DEVICE_ID_INTEL_82840_HB, 0, 0, "i840", &intel_840_driver, NULL }, -- { PCI_DEVICE_ID_INTEL_82845_HB, 0, 0, "845G", &intel_845_driver, NULL }, -- { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M", -+ { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL }, -+ { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL }, -+ { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M", - &intel_845_driver, &intel_830_driver }, -- { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL }, -- { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854", -+ { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL }, -+ { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854", - &intel_845_driver, &intel_830_driver }, -- { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL }, -- { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM", -+ { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL }, -+ { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", - &intel_845_driver, &intel_830_driver }, -- { PCI_DEVICE_ID_INTEL_82860_HB, 0, 0, "i860", &intel_860_driver, NULL }, -- { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865", -+ { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL }, -+ { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865", - &intel_845_driver, &intel_830_driver }, -- { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL }, -- { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)", -+ { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL }, -+ { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", - NULL, &intel_915_driver }, -- { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G", -+ { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G", - NULL, &intel_915_driver }, -- { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM", -+ { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", - NULL, &intel_915_driver }, -- { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G", -+ { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G", - NULL, &intel_915_driver }, -- { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM", -+ { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", - NULL, &intel_915_driver }, -- { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME", -+ { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", - NULL, &intel_915_driver }, -- { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ", -+ { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", - NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35", -+ { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35", - NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q", -+ { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", - NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G", -+ { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G", - NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM", -+ { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", - NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE", -+ { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", - NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL }, -- { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL }, -- { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33", -+ { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL }, -+ { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL }, -+ { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33", - NULL, &intel_g33_driver }, -- { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35", -+ { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", - NULL, &intel_g33_driver }, -- { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", -+ { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", - NULL, &intel_g33_driver }, -- { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150", -+ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", - NULL, &intel_g33_driver }, -- { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150", -+ { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", - NULL, &intel_g33_driver }, -- { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, -+ { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, - "GM45", NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0, -+ { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, - "Eaglelake", NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, -+ { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, - "Q45/Q43", NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, -+ { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, - "G45/G43", NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0, -+ { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, - "B43", NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, -+ { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, - "G41", NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0, -+ { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, - "HD Graphics", NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, -+ { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, -+ { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, -+ { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0, -+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, - "Sandybridge", NULL, &intel_i965_driver }, -- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0, -+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, - "Sandybridge", NULL, &intel_i965_driver }, -- { 0, 0, 0, NULL, NULL, NULL } -+ { 0, 0, NULL, NULL, NULL } - }; - -+static int __devinit intel_gmch_probe(struct pci_dev *pdev, -+ struct agp_bridge_data *bridge) -+{ -+ int i; -+ bridge->driver = NULL; -+ -+ for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { -+ if ((intel_agp_chipsets[i].gmch_chip_id != 0) && -+ find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { -+ bridge->driver = -+ intel_agp_chipsets[i].gmch_driver; -+ break; -+ } -+ } -+ -+ if (!bridge->driver) -+ return 0; -+ -+ bridge->dev_private_data = &intel_private; -+ bridge->dev = pdev; -+ -+ dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); -+ -+ if (bridge->driver->mask_memory == intel_i965_mask_memory) { -+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) -+ dev_err(&intel_private.pcidev->dev, -+ "set gfx device dma mask 36bit failed!\n"); -+ else -+ pci_set_consistent_dma_mask(intel_private.pcidev, -+ DMA_BIT_MASK(36)); -+ } -+ -+ return 1; -+} -+ - static int __devinit agp_intel_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) - { -@@ -2503,22 +871,18 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, - if (!bridge) - return -ENOMEM; - -+ bridge->capndx = cap_ptr; -+ -+ if (intel_gmch_probe(pdev, bridge)) -+ goto found_gmch; -+ - for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { - /* In case that multiple models of gfx chip may - stand on same host bridge type, this can be - sure we detect the right IGD. */ - if (pdev->device == intel_agp_chipsets[i].chip_id) { -- if ((intel_agp_chipsets[i].gmch_chip_id != 0) && -- find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { -- bridge->driver = -- intel_agp_chipsets[i].gmch_driver; -- break; -- } else if (intel_agp_chipsets[i].multi_gmch_chip) { -- continue; -- } else { -- bridge->driver = intel_agp_chipsets[i].driver; -- break; -- } -+ bridge->driver = intel_agp_chipsets[i].driver; -+ break; - } - } - -@@ -2530,18 +894,16 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, - return -ENODEV; - } - -- if (bridge->driver == NULL) { -- /* bridge has no AGP and no IGD detected */ -+ if (!bridge->driver) { - if (cap_ptr) - dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n", -- intel_agp_chipsets[i].gmch_chip_id); -+ intel_agp_chipsets[i].gmch_chip_id); - agp_put_bridge(bridge); - return -ENODEV; - } - - bridge->dev = pdev; -- bridge->capndx = cap_ptr; -- bridge->dev_private_data = &intel_private; -+ bridge->dev_private_data = NULL; - - dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); - -@@ -2577,15 +939,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, - &bridge->mode); - } - -- if (bridge->driver->mask_memory == intel_i965_mask_memory) { -- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) -- dev_err(&intel_private.pcidev->dev, -- "set gfx device dma mask 36bit failed!\n"); -- else -- pci_set_consistent_dma_mask(intel_private.pcidev, -- DMA_BIT_MASK(36)); -- } -- -+found_gmch: - pci_set_drvdata(pdev, bridge); - err = agp_add_bridge(bridge); - if (!err) -@@ -2611,22 +965,7 @@ static int agp_intel_resume(struct pci_dev *pdev) - struct agp_bridge_data *bridge = pci_get_drvdata(pdev); - int ret_val; - -- if (bridge->driver == &intel_generic_driver) -- intel_configure(); -- else if (bridge->driver == &intel_850_driver) -- intel_850_configure(); -- else if (bridge->driver == &intel_845_driver) -- intel_845_configure(); -- else if (bridge->driver == &intel_830mp_driver) -- intel_830mp_configure(); -- else if (bridge->driver == &intel_915_driver) -- intel_i915_configure(); -- else if (bridge->driver == &intel_830_driver) -- intel_i830_configure(); -- else if (bridge->driver == &intel_810_driver) -- intel_i810_configure(); -- else if (bridge->driver == &intel_i965_driver) -- intel_i915_configure(); -+ bridge->driver->configure(); - - ret_val = agp_rebind_memory(); - if (ret_val != 0) -diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h -new file mode 100644 -index 0000000..2547465 ---- /dev/null -+++ b/drivers/char/agp/intel-agp.h -@@ -0,0 +1,239 @@ -+/* -+ * Common Intel AGPGART and GTT definitions. -+ */ -+ -+/* Intel registers */ -+#define INTEL_APSIZE 0xb4 -+#define INTEL_ATTBASE 0xb8 -+#define INTEL_AGPCTRL 0xb0 -+#define INTEL_NBXCFG 0x50 -+#define INTEL_ERRSTS 0x91 -+ -+/* Intel i830 registers */ -+#define I830_GMCH_CTRL 0x52 -+#define I830_GMCH_ENABLED 0x4 -+#define I830_GMCH_MEM_MASK 0x1 -+#define I830_GMCH_MEM_64M 0x1 -+#define I830_GMCH_MEM_128M 0 -+#define I830_GMCH_GMS_MASK 0x70 -+#define I830_GMCH_GMS_DISABLED 0x00 -+#define I830_GMCH_GMS_LOCAL 0x10 -+#define I830_GMCH_GMS_STOLEN_512 0x20 -+#define I830_GMCH_GMS_STOLEN_1024 0x30 -+#define I830_GMCH_GMS_STOLEN_8192 0x40 -+#define I830_RDRAM_CHANNEL_TYPE 0x03010 -+#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5) -+#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3) -+ -+/* This one is for I830MP w. an external graphic card */ -+#define INTEL_I830_ERRSTS 0x92 -+ -+/* Intel 855GM/852GM registers */ -+#define I855_GMCH_GMS_MASK 0xF0 -+#define I855_GMCH_GMS_STOLEN_0M 0x0 -+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) -+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) -+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) -+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) -+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) -+#define I85X_CAPID 0x44 -+#define I85X_VARIANT_MASK 0x7 -+#define I85X_VARIANT_SHIFT 5 -+#define I855_GME 0x0 -+#define I855_GM 0x4 -+#define I852_GME 0x2 -+#define I852_GM 0x5 -+ -+/* Intel i845 registers */ -+#define INTEL_I845_AGPM 0x51 -+#define INTEL_I845_ERRSTS 0xc8 -+ -+/* Intel i860 registers */ -+#define INTEL_I860_MCHCFG 0x50 -+#define INTEL_I860_ERRSTS 0xc8 -+ -+/* Intel i810 registers */ -+#define I810_GMADDR 0x10 -+#define I810_MMADDR 0x14 -+#define I810_PTE_BASE 0x10000 -+#define I810_PTE_MAIN_UNCACHED 0x00000000 -+#define I810_PTE_LOCAL 0x00000002 -+#define I810_PTE_VALID 0x00000001 -+#define I830_PTE_SYSTEM_CACHED 0x00000006 -+#define I810_SMRAM_MISCC 0x70 -+#define I810_GFX_MEM_WIN_SIZE 0x00010000 -+#define I810_GFX_MEM_WIN_32M 0x00010000 -+#define I810_GMS 0x000000c0 -+#define I810_GMS_DISABLE 0x00000000 -+#define I810_PGETBL_CTL 0x2020 -+#define I810_PGETBL_ENABLED 0x00000001 -+#define I965_PGETBL_SIZE_MASK 0x0000000e -+#define I965_PGETBL_SIZE_512KB (0 << 1) -+#define I965_PGETBL_SIZE_256KB (1 << 1) -+#define I965_PGETBL_SIZE_128KB (2 << 1) -+#define I965_PGETBL_SIZE_1MB (3 << 1) -+#define I965_PGETBL_SIZE_2MB (4 << 1) -+#define I965_PGETBL_SIZE_1_5MB (5 << 1) -+#define G33_PGETBL_SIZE_MASK (3 << 8) -+#define G33_PGETBL_SIZE_1M (1 << 8) -+#define G33_PGETBL_SIZE_2M (2 << 8) -+ -+#define I810_DRAM_CTL 0x3000 -+#define I810_DRAM_ROW_0 0x00000001 -+#define I810_DRAM_ROW_0_SDRAM 0x00000001 -+ -+/* Intel 815 register */ -+#define INTEL_815_APCONT 0x51 -+#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF -+ -+/* Intel i820 registers */ -+#define INTEL_I820_RDCR 0x51 -+#define INTEL_I820_ERRSTS 0xc8 -+ -+/* Intel i840 registers */ -+#define INTEL_I840_MCHCFG 0x50 -+#define INTEL_I840_ERRSTS 0xc8 -+ -+/* Intel i850 registers */ -+#define INTEL_I850_MCHCFG 0x50 -+#define INTEL_I850_ERRSTS 0xc8 -+ -+/* intel 915G registers */ -+#define I915_GMADDR 0x18 -+#define I915_MMADDR 0x10 -+#define I915_PTEADDR 0x1C -+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) -+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) -+#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) -+#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) -+#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) -+#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) -+#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) -+#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) -+ -+#define I915_IFPADDR 0x60 -+ -+/* Intel 965G registers */ -+#define I965_MSAC 0x62 -+#define I965_IFPADDR 0x70 -+ -+/* Intel 7505 registers */ -+#define INTEL_I7505_APSIZE 0x74 -+#define INTEL_I7505_NCAPID 0x60 -+#define INTEL_I7505_NISTAT 0x6c -+#define INTEL_I7505_ATTBASE 0x78 -+#define INTEL_I7505_ERRSTS 0x42 -+#define INTEL_I7505_AGPCTRL 0x70 -+#define INTEL_I7505_MCHCFG 0x50 -+ -+#define SNB_GMCH_CTRL 0x50 -+#define SNB_GMCH_GMS_STOLEN_MASK 0xF8 -+#define SNB_GMCH_GMS_STOLEN_32M (1 << 3) -+#define SNB_GMCH_GMS_STOLEN_64M (2 << 3) -+#define SNB_GMCH_GMS_STOLEN_96M (3 << 3) -+#define SNB_GMCH_GMS_STOLEN_128M (4 << 3) -+#define SNB_GMCH_GMS_STOLEN_160M (5 << 3) -+#define SNB_GMCH_GMS_STOLEN_192M (6 << 3) -+#define SNB_GMCH_GMS_STOLEN_224M (7 << 3) -+#define SNB_GMCH_GMS_STOLEN_256M (8 << 3) -+#define SNB_GMCH_GMS_STOLEN_288M (9 << 3) -+#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) -+#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) -+#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) -+#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) -+#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) -+#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) -+#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) -+#define SNB_GTT_SIZE_0M (0 << 8) -+#define SNB_GTT_SIZE_1M (1 << 8) -+#define SNB_GTT_SIZE_2M (2 << 8) -+#define SNB_GTT_SIZE_MASK (3 << 8) -+ -+/* pci devices ids */ -+#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 -+#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a -+#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 -+#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 -+#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980 -+#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982 -+#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 -+#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992 -+#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 -+#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2 -+#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 -+#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02 -+#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 -+#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 -+#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC -+#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE -+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010 -+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011 -+#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000 -+#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001 -+#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 -+#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 -+#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 -+#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 -+#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 -+#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 -+#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 -+#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 -+#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 -+#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 -+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 -+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02 -+#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 -+#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 -+#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 -+#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 -+#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 -+#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 -+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 -+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 -+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 -+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 -+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a -+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 -+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 -+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 -+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 -+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 -+ -+/* cover 915 and 945 variants */ -+#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB) -+ -+#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB) -+ -+#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) -+ -+#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) -+ -+#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) -+ -+#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ -+ IS_SNB) -diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c -new file mode 100644 -index 0000000..9344216 ---- /dev/null -+++ b/drivers/char/agp/intel-gtt.c -@@ -0,0 +1,1548 @@ -+/* -+ * Intel GTT (Graphics Translation Table) routines -+ * -+ * Caveat: This driver implements the linux agp interface, but this is far from -+ * a agp driver! GTT support ended up here for purely historical reasons: The -+ * old userspace intel graphics drivers needed an interface to map memory into -+ * the GTT. And the drm provides a default interface for graphic devices sitting -+ * on an agp port. So it made sense to fake the GTT support as an agp port to -+ * avoid having to create a new api. -+ * -+ * With gem this does not make much sense anymore, just needlessly complicates -+ * the code. But as long as the old graphics stack is still support, it's stuck -+ * here. -+ * -+ * /fairy-tale-mode off -+ */ -+ -+/* -+ * If we have Intel graphics, we're not going to have anything other than -+ * an Intel IOMMU. So make the correct use of the PCI DMA API contingent -+ * on the Intel IOMMU support (CONFIG_DMAR). -+ * Only newer chipsets need to bother with this, of course. -+ */ -+#ifdef CONFIG_DMAR -+#define USE_PCI_DMA_API 1 -+#endif -+ -+static const struct aper_size_info_fixed intel_i810_sizes[] = -+{ -+ {64, 16384, 4}, -+ /* The 32M mode still requires a 64k gatt */ -+ {32, 8192, 4} -+}; -+ -+#define AGP_DCACHE_MEMORY 1 -+#define AGP_PHYS_MEMORY 2 -+#define INTEL_AGP_CACHED_MEMORY 3 -+ -+static struct gatt_mask intel_i810_masks[] = -+{ -+ {.mask = I810_PTE_VALID, .type = 0}, -+ {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, -+ {.mask = I810_PTE_VALID, .type = 0}, -+ {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, -+ .type = INTEL_AGP_CACHED_MEMORY} -+}; -+ -+static struct _intel_private { -+ struct pci_dev *pcidev; /* device one */ -+ u8 __iomem *registers; -+ u32 __iomem *gtt; /* I915G */ -+ int num_dcache_entries; -+ /* gtt_entries is the number of gtt entries that are already mapped -+ * to stolen memory. Stolen memory is larger than the memory mapped -+ * through gtt_entries, as it includes some reserved space for the BIOS -+ * popup and for the GTT. -+ */ -+ int gtt_entries; /* i830+ */ -+ int gtt_total_size; -+ union { -+ void __iomem *i9xx_flush_page; -+ void *i8xx_flush_page; -+ }; -+ struct page *i8xx_page; -+ struct resource ifp_resource; -+ int resource_valid; -+} intel_private; -+ -+#ifdef USE_PCI_DMA_API -+static int intel_agp_map_page(struct page *page, dma_addr_t *ret) -+{ -+ *ret = pci_map_page(intel_private.pcidev, page, 0, -+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); -+ if (pci_dma_mapping_error(intel_private.pcidev, *ret)) -+ return -EINVAL; -+ return 0; -+} -+ -+static void intel_agp_unmap_page(struct page *page, dma_addr_t dma) -+{ -+ pci_unmap_page(intel_private.pcidev, dma, -+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); -+} -+ -+static void intel_agp_free_sglist(struct agp_memory *mem) -+{ -+ struct sg_table st; -+ -+ st.sgl = mem->sg_list; -+ st.orig_nents = st.nents = mem->page_count; -+ -+ sg_free_table(&st); -+ -+ mem->sg_list = NULL; -+ mem->num_sg = 0; -+} -+ -+static int intel_agp_map_memory(struct agp_memory *mem) -+{ -+ struct sg_table st; -+ struct scatterlist *sg; -+ int i; -+ -+ DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); -+ -+ if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) -+ return -ENOMEM; -+ -+ mem->sg_list = sg = st.sgl; -+ -+ for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) -+ sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); -+ -+ mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, -+ mem->page_count, PCI_DMA_BIDIRECTIONAL); -+ if (unlikely(!mem->num_sg)) { -+ intel_agp_free_sglist(mem); -+ return -ENOMEM; -+ } -+ return 0; -+} -+ -+static void intel_agp_unmap_memory(struct agp_memory *mem) -+{ -+ DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); -+ -+ pci_unmap_sg(intel_private.pcidev, mem->sg_list, -+ mem->page_count, PCI_DMA_BIDIRECTIONAL); -+ intel_agp_free_sglist(mem); -+} -+ -+static void intel_agp_insert_sg_entries(struct agp_memory *mem, -+ off_t pg_start, int mask_type) -+{ -+ struct scatterlist *sg; -+ int i, j; -+ -+ j = pg_start; -+ -+ WARN_ON(!mem->num_sg); -+ -+ if (mem->num_sg == mem->page_count) { -+ for_each_sg(mem->sg_list, sg, mem->page_count, i) { -+ writel(agp_bridge->driver->mask_memory(agp_bridge, -+ sg_dma_address(sg), mask_type), -+ intel_private.gtt+j); -+ j++; -+ } -+ } else { -+ /* sg may merge pages, but we have to separate -+ * per-page addr for GTT */ -+ unsigned int len, m; -+ -+ for_each_sg(mem->sg_list, sg, mem->num_sg, i) { -+ len = sg_dma_len(sg) / PAGE_SIZE; -+ for (m = 0; m < len; m++) { -+ writel(agp_bridge->driver->mask_memory(agp_bridge, -+ sg_dma_address(sg) + m * PAGE_SIZE, -+ mask_type), -+ intel_private.gtt+j); -+ j++; -+ } -+ } -+ } -+ readl(intel_private.gtt+j-1); -+} -+ -+#else -+ -+static void intel_agp_insert_sg_entries(struct agp_memory *mem, -+ off_t pg_start, int mask_type) -+{ -+ int i, j; -+ u32 cache_bits = 0; -+ -+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) -+ { -+ cache_bits = I830_PTE_SYSTEM_CACHED; -+ } -+ -+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { -+ writel(agp_bridge->driver->mask_memory(agp_bridge, -+ page_to_phys(mem->pages[i]), mask_type), -+ intel_private.gtt+j); -+ } -+ -+ readl(intel_private.gtt+j-1); -+} -+ -+#endif -+ -+static int intel_i810_fetch_size(void) -+{ -+ u32 smram_miscc; -+ struct aper_size_info_fixed *values; -+ -+ pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); -+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); -+ -+ if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { -+ dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); -+ return 0; -+ } -+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { -+ agp_bridge->current_size = (void *) (values + 1); -+ agp_bridge->aperture_size_idx = 1; -+ return values[1].size; -+ } else { -+ agp_bridge->current_size = (void *) (values); -+ agp_bridge->aperture_size_idx = 0; -+ return values[0].size; -+ } -+ -+ return 0; -+} -+ -+static int intel_i810_configure(void) -+{ -+ struct aper_size_info_fixed *current_size; -+ u32 temp; -+ int i; -+ -+ current_size = A_SIZE_FIX(agp_bridge->current_size); -+ -+ if (!intel_private.registers) { -+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); -+ temp &= 0xfff80000; -+ -+ intel_private.registers = ioremap(temp, 128 * 4096); -+ if (!intel_private.registers) { -+ dev_err(&intel_private.pcidev->dev, -+ "can't remap memory\n"); -+ return -ENOMEM; -+ } -+ } -+ -+ if ((readl(intel_private.registers+I810_DRAM_CTL) -+ & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { -+ /* This will need to be dynamically assigned */ -+ dev_info(&intel_private.pcidev->dev, -+ "detected 4MB dedicated video ram\n"); -+ intel_private.num_dcache_entries = 1024; -+ } -+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); -+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); -+ writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); -+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ -+ -+ if (agp_bridge->driver->needs_scratch_page) { -+ for (i = 0; i < current_size->num_entries; i++) { -+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); -+ } -+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */ -+ } -+ global_cache_flush(); -+ return 0; -+} -+ -+static void intel_i810_cleanup(void) -+{ -+ writel(0, intel_private.registers+I810_PGETBL_CTL); -+ readl(intel_private.registers); /* PCI Posting. */ -+ iounmap(intel_private.registers); -+} -+ -+static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) -+{ -+ return; -+} -+ -+/* Exists to support ARGB cursors */ -+static struct page *i8xx_alloc_pages(void) -+{ -+ struct page *page; -+ -+ page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); -+ if (page == NULL) -+ return NULL; -+ -+ if (set_pages_uc(page, 4) < 0) { -+ set_pages_wb(page, 4); -+ __free_pages(page, 2); -+ return NULL; -+ } -+ get_page(page); -+ atomic_inc(&agp_bridge->current_memory_agp); -+ return page; -+} -+ -+static void i8xx_destroy_pages(struct page *page) -+{ -+ if (page == NULL) -+ return; -+ -+ set_pages_wb(page, 4); -+ put_page(page); -+ __free_pages(page, 2); -+ atomic_dec(&agp_bridge->current_memory_agp); -+} -+ -+static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, -+ int type) -+{ -+ if (type < AGP_USER_TYPES) -+ return type; -+ else if (type == AGP_USER_CACHED_MEMORY) -+ return INTEL_AGP_CACHED_MEMORY; -+ else -+ return 0; -+} -+ -+static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, -+ int type) -+{ -+ int i, j, num_entries; -+ void *temp; -+ int ret = -EINVAL; -+ int mask_type; -+ -+ if (mem->page_count == 0) -+ goto out; -+ -+ temp = agp_bridge->current_size; -+ num_entries = A_SIZE_FIX(temp)->num_entries; -+ -+ if ((pg_start + mem->page_count) > num_entries) -+ goto out_err; -+ -+ -+ for (j = pg_start; j < (pg_start + mem->page_count); j++) { -+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { -+ ret = -EBUSY; -+ goto out_err; -+ } -+ } -+ -+ if (type != mem->type) -+ goto out_err; -+ -+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); -+ -+ switch (mask_type) { -+ case AGP_DCACHE_MEMORY: -+ if (!mem->is_flushed) -+ global_cache_flush(); -+ for (i = pg_start; i < (pg_start + mem->page_count); i++) { -+ writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, -+ intel_private.registers+I810_PTE_BASE+(i*4)); -+ } -+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); -+ break; -+ case AGP_PHYS_MEMORY: -+ case AGP_NORMAL_MEMORY: -+ if (!mem->is_flushed) -+ global_cache_flush(); -+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { -+ writel(agp_bridge->driver->mask_memory(agp_bridge, -+ page_to_phys(mem->pages[i]), mask_type), -+ intel_private.registers+I810_PTE_BASE+(j*4)); -+ } -+ readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); -+ break; -+ default: -+ goto out_err; -+ } -+ -+out: -+ ret = 0; -+out_err: -+ mem->is_flushed = true; -+ return ret; -+} -+ -+static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, -+ int type) -+{ -+ int i; -+ -+ if (mem->page_count == 0) -+ return 0; -+ -+ for (i = pg_start; i < (mem->page_count + pg_start); i++) { -+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); -+ } -+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); -+ -+ return 0; -+} -+ -+/* -+ * The i810/i830 requires a physical address to program its mouse -+ * pointer into hardware. -+ * However the Xserver still writes to it through the agp aperture. -+ */ -+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) -+{ -+ struct agp_memory *new; -+ struct page *page; -+ -+ switch (pg_count) { -+ case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); -+ break; -+ case 4: -+ /* kludge to get 4 physical pages for ARGB cursor */ -+ page = i8xx_alloc_pages(); -+ break; -+ default: -+ return NULL; -+ } -+ -+ if (page == NULL) -+ return NULL; -+ -+ new = agp_create_memory(pg_count); -+ if (new == NULL) -+ return NULL; -+ -+ new->pages[0] = page; -+ if (pg_count == 4) { -+ /* kludge to get 4 physical pages for ARGB cursor */ -+ new->pages[1] = new->pages[0] + 1; -+ new->pages[2] = new->pages[1] + 1; -+ new->pages[3] = new->pages[2] + 1; -+ } -+ new->page_count = pg_count; -+ new->num_scratch_pages = pg_count; -+ new->type = AGP_PHYS_MEMORY; -+ new->physical = page_to_phys(new->pages[0]); -+ return new; -+} -+ -+static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) -+{ -+ struct agp_memory *new; -+ -+ if (type == AGP_DCACHE_MEMORY) { -+ if (pg_count != intel_private.num_dcache_entries) -+ return NULL; -+ -+ new = agp_create_memory(1); -+ if (new == NULL) -+ return NULL; -+ -+ new->type = AGP_DCACHE_MEMORY; -+ new->page_count = pg_count; -+ new->num_scratch_pages = 0; -+ agp_free_page_array(new); -+ return new; -+ } -+ if (type == AGP_PHYS_MEMORY) -+ return alloc_agpphysmem_i8xx(pg_count, type); -+ return NULL; -+} -+ -+static void intel_i810_free_by_type(struct agp_memory *curr) -+{ -+ agp_free_key(curr->key); -+ if (curr->type == AGP_PHYS_MEMORY) { -+ if (curr->page_count == 4) -+ i8xx_destroy_pages(curr->pages[0]); -+ else { -+ agp_bridge->driver->agp_destroy_page(curr->pages[0], -+ AGP_PAGE_DESTROY_UNMAP); -+ agp_bridge->driver->agp_destroy_page(curr->pages[0], -+ AGP_PAGE_DESTROY_FREE); -+ } -+ agp_free_page_array(curr); -+ } -+ kfree(curr); -+} -+ -+static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, -+ dma_addr_t addr, int type) -+{ -+ /* Type checking must be done elsewhere */ -+ return addr | bridge->driver->masks[type].mask; -+} -+ -+static struct aper_size_info_fixed intel_i830_sizes[] = -+{ -+ {128, 32768, 5}, -+ /* The 64M mode still requires a 128k gatt */ -+ {64, 16384, 5}, -+ {256, 65536, 6}, -+ {512, 131072, 7}, -+}; -+ -+static void intel_i830_init_gtt_entries(void) -+{ -+ u16 gmch_ctrl; -+ int gtt_entries = 0; -+ u8 rdct; -+ int local = 0; -+ static const int ddt[4] = { 0, 16, 32, 64 }; -+ int size; /* reserved space (in kb) at the top of stolen memory */ -+ -+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); -+ -+ if (IS_I965) { -+ u32 pgetbl_ctl; -+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); -+ -+ /* The 965 has a field telling us the size of the GTT, -+ * which may be larger than what is necessary to map the -+ * aperture. -+ */ -+ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { -+ case I965_PGETBL_SIZE_128KB: -+ size = 128; -+ break; -+ case I965_PGETBL_SIZE_256KB: -+ size = 256; -+ break; -+ case I965_PGETBL_SIZE_512KB: -+ size = 512; -+ break; -+ case I965_PGETBL_SIZE_1MB: -+ size = 1024; -+ break; -+ case I965_PGETBL_SIZE_2MB: -+ size = 2048; -+ break; -+ case I965_PGETBL_SIZE_1_5MB: -+ size = 1024 + 512; -+ break; -+ default: -+ dev_info(&intel_private.pcidev->dev, -+ "unknown page table size, assuming 512KB\n"); -+ size = 512; -+ } -+ size += 4; /* add in BIOS popup space */ -+ } else if (IS_G33 && !IS_PINEVIEW) { -+ /* G33's GTT size defined in gmch_ctrl */ -+ switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { -+ case G33_PGETBL_SIZE_1M: -+ size = 1024; -+ break; -+ case G33_PGETBL_SIZE_2M: -+ size = 2048; -+ break; -+ default: -+ dev_info(&agp_bridge->dev->dev, -+ "unknown page table size 0x%x, assuming 512KB\n", -+ (gmch_ctrl & G33_PGETBL_SIZE_MASK)); -+ size = 512; -+ } -+ size += 4; -+ } else if (IS_G4X || IS_PINEVIEW) { -+ /* On 4 series hardware, GTT stolen is separate from graphics -+ * stolen, ignore it in stolen gtt entries counting. However, -+ * 4KB of the stolen memory doesn't get mapped to the GTT. -+ */ -+ size = 4; -+ } else { -+ /* On previous hardware, the GTT size was just what was -+ * required to map the aperture. -+ */ -+ size = agp_bridge->driver->fetch_size() + 4; -+ } -+ -+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { -+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) { -+ case I830_GMCH_GMS_STOLEN_512: -+ gtt_entries = KB(512) - KB(size); -+ break; -+ case I830_GMCH_GMS_STOLEN_1024: -+ gtt_entries = MB(1) - KB(size); -+ break; -+ case I830_GMCH_GMS_STOLEN_8192: -+ gtt_entries = MB(8) - KB(size); -+ break; -+ case I830_GMCH_GMS_LOCAL: -+ rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); -+ gtt_entries = (I830_RDRAM_ND(rdct) + 1) * -+ MB(ddt[I830_RDRAM_DDT(rdct)]); -+ local = 1; -+ break; -+ default: -+ gtt_entries = 0; -+ break; -+ } -+ } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || -+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { -+ /* -+ * SandyBridge has new memory control reg at 0x50.w -+ */ -+ u16 snb_gmch_ctl; -+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); -+ switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { -+ case SNB_GMCH_GMS_STOLEN_32M: -+ gtt_entries = MB(32) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_64M: -+ gtt_entries = MB(64) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_96M: -+ gtt_entries = MB(96) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_128M: -+ gtt_entries = MB(128) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_160M: -+ gtt_entries = MB(160) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_192M: -+ gtt_entries = MB(192) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_224M: -+ gtt_entries = MB(224) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_256M: -+ gtt_entries = MB(256) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_288M: -+ gtt_entries = MB(288) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_320M: -+ gtt_entries = MB(320) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_352M: -+ gtt_entries = MB(352) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_384M: -+ gtt_entries = MB(384) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_416M: -+ gtt_entries = MB(416) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_448M: -+ gtt_entries = MB(448) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_480M: -+ gtt_entries = MB(480) - KB(size); -+ break; -+ case SNB_GMCH_GMS_STOLEN_512M: -+ gtt_entries = MB(512) - KB(size); -+ break; -+ } -+ } else { -+ switch (gmch_ctrl & I855_GMCH_GMS_MASK) { -+ case I855_GMCH_GMS_STOLEN_1M: -+ gtt_entries = MB(1) - KB(size); -+ break; -+ case I855_GMCH_GMS_STOLEN_4M: -+ gtt_entries = MB(4) - KB(size); -+ break; -+ case I855_GMCH_GMS_STOLEN_8M: -+ gtt_entries = MB(8) - KB(size); -+ break; -+ case I855_GMCH_GMS_STOLEN_16M: -+ gtt_entries = MB(16) - KB(size); -+ break; -+ case I855_GMCH_GMS_STOLEN_32M: -+ gtt_entries = MB(32) - KB(size); -+ break; -+ case I915_GMCH_GMS_STOLEN_48M: -+ /* Check it's really I915G */ -+ if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) -+ gtt_entries = MB(48) - KB(size); -+ else -+ gtt_entries = 0; -+ break; -+ case I915_GMCH_GMS_STOLEN_64M: -+ /* Check it's really I915G */ -+ if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) -+ gtt_entries = MB(64) - KB(size); -+ else -+ gtt_entries = 0; -+ break; -+ case G33_GMCH_GMS_STOLEN_128M: -+ if (IS_G33 || IS_I965 || IS_G4X) -+ gtt_entries = MB(128) - KB(size); -+ else -+ gtt_entries = 0; -+ break; -+ case G33_GMCH_GMS_STOLEN_256M: -+ if (IS_G33 || IS_I965 || IS_G4X) -+ gtt_entries = MB(256) - KB(size); -+ else -+ gtt_entries = 0; -+ break; -+ case INTEL_GMCH_GMS_STOLEN_96M: -+ if (IS_I965 || IS_G4X) -+ gtt_entries = MB(96) - KB(size); -+ else -+ gtt_entries = 0; -+ break; -+ case INTEL_GMCH_GMS_STOLEN_160M: -+ if (IS_I965 || IS_G4X) -+ gtt_entries = MB(160) - KB(size); -+ else -+ gtt_entries = 0; -+ break; -+ case INTEL_GMCH_GMS_STOLEN_224M: -+ if (IS_I965 || IS_G4X) -+ gtt_entries = MB(224) - KB(size); -+ else -+ gtt_entries = 0; -+ break; -+ case INTEL_GMCH_GMS_STOLEN_352M: -+ if (IS_I965 || IS_G4X) -+ gtt_entries = MB(352) - KB(size); -+ else -+ gtt_entries = 0; -+ break; -+ default: -+ gtt_entries = 0; -+ break; -+ } -+ } -+ if (gtt_entries > 0) { -+ dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", -+ gtt_entries / KB(1), local ? "local" : "stolen"); -+ gtt_entries /= KB(4); -+ } else { -+ dev_info(&agp_bridge->dev->dev, -+ "no pre-allocated video memory detected\n"); -+ gtt_entries = 0; -+ } -+ -+ intel_private.gtt_entries = gtt_entries; -+} -+ -+static void intel_i830_fini_flush(void) -+{ -+ kunmap(intel_private.i8xx_page); -+ intel_private.i8xx_flush_page = NULL; -+ unmap_page_from_agp(intel_private.i8xx_page); -+ -+ __free_page(intel_private.i8xx_page); -+ intel_private.i8xx_page = NULL; -+} -+ -+static void intel_i830_setup_flush(void) -+{ -+ /* return if we've already set the flush mechanism up */ -+ if (intel_private.i8xx_page) -+ return; -+ -+ intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); -+ if (!intel_private.i8xx_page) -+ return; -+ -+ intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); -+ if (!intel_private.i8xx_flush_page) -+ intel_i830_fini_flush(); -+} -+ -+/* The chipset_flush interface needs to get data that has already been -+ * flushed out of the CPU all the way out to main memory, because the GPU -+ * doesn't snoop those buffers. -+ * -+ * The 8xx series doesn't have the same lovely interface for flushing the -+ * chipset write buffers that the later chips do. According to the 865 -+ * specs, it's 64 octwords, or 1KB. So, to get those previous things in -+ * that buffer out, we just fill 1KB and clflush it out, on the assumption -+ * that it'll push whatever was in there out. It appears to work. -+ */ -+static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) -+{ -+ unsigned int *pg = intel_private.i8xx_flush_page; -+ -+ memset(pg, 0, 1024); -+ -+ if (cpu_has_clflush) -+ clflush_cache_range(pg, 1024); -+ else if (wbinvd_on_all_cpus() != 0) -+ printk(KERN_ERR "Timed out waiting for cache flush.\n"); -+} -+ -+/* The intel i830 automatically initializes the agp aperture during POST. -+ * Use the memory already set aside for in the GTT. -+ */ -+static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) -+{ -+ int page_order; -+ struct aper_size_info_fixed *size; -+ int num_entries; -+ u32 temp; -+ -+ size = agp_bridge->current_size; -+ page_order = size->page_order; -+ num_entries = size->num_entries; -+ agp_bridge->gatt_table_real = NULL; -+ -+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); -+ temp &= 0xfff80000; -+ -+ intel_private.registers = ioremap(temp, 128 * 4096); -+ if (!intel_private.registers) -+ return -ENOMEM; -+ -+ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; -+ global_cache_flush(); /* FIXME: ?? */ -+ -+ /* we have to call this as early as possible after the MMIO base address is known */ -+ intel_i830_init_gtt_entries(); -+ -+ agp_bridge->gatt_table = NULL; -+ -+ agp_bridge->gatt_bus_addr = temp; -+ -+ return 0; -+} -+ -+/* Return the gatt table to a sane state. Use the top of stolen -+ * memory for the GTT. -+ */ -+static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge) -+{ -+ return 0; -+} -+ -+static int intel_i830_fetch_size(void) -+{ -+ u16 gmch_ctrl; -+ struct aper_size_info_fixed *values; -+ -+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); -+ -+ if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB && -+ agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { -+ /* 855GM/852GM/865G has 128MB aperture size */ -+ agp_bridge->current_size = (void *) values; -+ agp_bridge->aperture_size_idx = 0; -+ return values[0].size; -+ } -+ -+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); -+ -+ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { -+ agp_bridge->current_size = (void *) values; -+ agp_bridge->aperture_size_idx = 0; -+ return values[0].size; -+ } else { -+ agp_bridge->current_size = (void *) (values + 1); -+ agp_bridge->aperture_size_idx = 1; -+ return values[1].size; -+ } -+ -+ return 0; -+} -+ -+static int intel_i830_configure(void) -+{ -+ struct aper_size_info_fixed *current_size; -+ u32 temp; -+ u16 gmch_ctrl; -+ int i; -+ -+ current_size = A_SIZE_FIX(agp_bridge->current_size); -+ -+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); -+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); -+ -+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); -+ gmch_ctrl |= I830_GMCH_ENABLED; -+ pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); -+ -+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); -+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ -+ -+ if (agp_bridge->driver->needs_scratch_page) { -+ for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { -+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); -+ } -+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ -+ } -+ -+ global_cache_flush(); -+ -+ intel_i830_setup_flush(); -+ return 0; -+} -+ -+static void intel_i830_cleanup(void) -+{ -+ iounmap(intel_private.registers); -+} -+ -+static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, -+ int type) -+{ -+ int i, j, num_entries; -+ void *temp; -+ int ret = -EINVAL; -+ int mask_type; -+ -+ if (mem->page_count == 0) -+ goto out; -+ -+ temp = agp_bridge->current_size; -+ num_entries = A_SIZE_FIX(temp)->num_entries; -+ -+ if (pg_start < intel_private.gtt_entries) { -+ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, -+ "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", -+ pg_start, intel_private.gtt_entries); -+ -+ dev_info(&intel_private.pcidev->dev, -+ "trying to insert into local/stolen memory\n"); -+ goto out_err; -+ } -+ -+ if ((pg_start + mem->page_count) > num_entries) -+ goto out_err; -+ -+ /* The i830 can't check the GTT for entries since its read only, -+ * depend on the caller to make the correct offset decisions. -+ */ -+ -+ if (type != mem->type) -+ goto out_err; -+ -+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); -+ -+ if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && -+ mask_type != INTEL_AGP_CACHED_MEMORY) -+ goto out_err; -+ -+ if (!mem->is_flushed) -+ global_cache_flush(); -+ -+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { -+ writel(agp_bridge->driver->mask_memory(agp_bridge, -+ page_to_phys(mem->pages[i]), mask_type), -+ intel_private.registers+I810_PTE_BASE+(j*4)); -+ } -+ readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); -+ -+out: -+ ret = 0; -+out_err: -+ mem->is_flushed = true; -+ return ret; -+} -+ -+static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, -+ int type) -+{ -+ int i; -+ -+ if (mem->page_count == 0) -+ return 0; -+ -+ if (pg_start < intel_private.gtt_entries) { -+ dev_info(&intel_private.pcidev->dev, -+ "trying to disable local/stolen memory\n"); -+ return -EINVAL; -+ } -+ -+ for (i = pg_start; i < (mem->page_count + pg_start); i++) { -+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); -+ } -+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); -+ -+ return 0; -+} -+ -+static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) -+{ -+ if (type == AGP_PHYS_MEMORY) -+ return alloc_agpphysmem_i8xx(pg_count, type); -+ /* always return NULL for other allocation types for now */ -+ return NULL; -+} -+ -+static int intel_alloc_chipset_flush_resource(void) -+{ -+ int ret; -+ ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, -+ PAGE_SIZE, PCIBIOS_MIN_MEM, 0, -+ pcibios_align_resource, agp_bridge->dev); -+ -+ return ret; -+} -+ -+static void intel_i915_setup_chipset_flush(void) -+{ -+ int ret; -+ u32 temp; -+ -+ pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); -+ if (!(temp & 0x1)) { -+ intel_alloc_chipset_flush_resource(); -+ intel_private.resource_valid = 1; -+ pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); -+ } else { -+ temp &= ~1; -+ -+ intel_private.resource_valid = 1; -+ intel_private.ifp_resource.start = temp; -+ intel_private.ifp_resource.end = temp + PAGE_SIZE; -+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource); -+ /* some BIOSes reserve this area in a pnp some don't */ -+ if (ret) -+ intel_private.resource_valid = 0; -+ } -+} -+ -+static void intel_i965_g33_setup_chipset_flush(void) -+{ -+ u32 temp_hi, temp_lo; -+ int ret; -+ -+ pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); -+ pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); -+ -+ if (!(temp_lo & 0x1)) { -+ -+ intel_alloc_chipset_flush_resource(); -+ -+ intel_private.resource_valid = 1; -+ pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, -+ upper_32_bits(intel_private.ifp_resource.start)); -+ pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); -+ } else { -+ u64 l64; -+ -+ temp_lo &= ~0x1; -+ l64 = ((u64)temp_hi << 32) | temp_lo; -+ -+ intel_private.resource_valid = 1; -+ intel_private.ifp_resource.start = l64; -+ intel_private.ifp_resource.end = l64 + PAGE_SIZE; -+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource); -+ /* some BIOSes reserve this area in a pnp some don't */ -+ if (ret) -+ intel_private.resource_valid = 0; -+ } -+} -+ -+static void intel_i9xx_setup_flush(void) -+{ -+ /* return if already configured */ -+ if (intel_private.ifp_resource.start) -+ return; -+ -+ if (IS_SNB) -+ return; -+ -+ /* setup a resource for this object */ -+ intel_private.ifp_resource.name = "Intel Flush Page"; -+ intel_private.ifp_resource.flags = IORESOURCE_MEM; -+ -+ /* Setup chipset flush for 915 */ -+ if (IS_I965 || IS_G33 || IS_G4X) { -+ intel_i965_g33_setup_chipset_flush(); -+ } else { -+ intel_i915_setup_chipset_flush(); -+ } -+ -+ if (intel_private.ifp_resource.start) { -+ intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); -+ if (!intel_private.i9xx_flush_page) -+ dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); -+ } -+} -+ -+static int intel_i9xx_configure(void) -+{ -+ struct aper_size_info_fixed *current_size; -+ u32 temp; -+ u16 gmch_ctrl; -+ int i; -+ -+ current_size = A_SIZE_FIX(agp_bridge->current_size); -+ -+ pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp); -+ -+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); -+ -+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); -+ gmch_ctrl |= I830_GMCH_ENABLED; -+ pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); -+ -+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); -+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ -+ -+ if (agp_bridge->driver->needs_scratch_page) { -+ for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) { -+ writel(agp_bridge->scratch_page, intel_private.gtt+i); -+ } -+ readl(intel_private.gtt+i-1); /* PCI Posting. */ -+ } -+ -+ global_cache_flush(); -+ -+ intel_i9xx_setup_flush(); -+ -+ return 0; -+} -+ -+static void intel_i915_cleanup(void) -+{ -+ if (intel_private.i9xx_flush_page) -+ iounmap(intel_private.i9xx_flush_page); -+ if (intel_private.resource_valid) -+ release_resource(&intel_private.ifp_resource); -+ intel_private.ifp_resource.start = 0; -+ intel_private.resource_valid = 0; -+ iounmap(intel_private.gtt); -+ iounmap(intel_private.registers); -+} -+ -+static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) -+{ -+ if (intel_private.i9xx_flush_page) -+ writel(1, intel_private.i9xx_flush_page); -+} -+ -+static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, -+ int type) -+{ -+ int num_entries; -+ void *temp; -+ int ret = -EINVAL; -+ int mask_type; -+ -+ if (mem->page_count == 0) -+ goto out; -+ -+ temp = agp_bridge->current_size; -+ num_entries = A_SIZE_FIX(temp)->num_entries; -+ -+ if (pg_start < intel_private.gtt_entries) { -+ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, -+ "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", -+ pg_start, intel_private.gtt_entries); -+ -+ dev_info(&intel_private.pcidev->dev, -+ "trying to insert into local/stolen memory\n"); -+ goto out_err; -+ } -+ -+ if ((pg_start + mem->page_count) > num_entries) -+ goto out_err; -+ -+ /* The i915 can't check the GTT for entries since it's read only; -+ * depend on the caller to make the correct offset decisions. -+ */ -+ -+ if (type != mem->type) -+ goto out_err; -+ -+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); -+ -+ if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && -+ mask_type != INTEL_AGP_CACHED_MEMORY) -+ goto out_err; -+ -+ if (!mem->is_flushed) -+ global_cache_flush(); -+ -+ intel_agp_insert_sg_entries(mem, pg_start, mask_type); -+ -+ out: -+ ret = 0; -+ out_err: -+ mem->is_flushed = true; -+ return ret; -+} -+ -+static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, -+ int type) -+{ -+ int i; -+ -+ if (mem->page_count == 0) -+ return 0; -+ -+ if (pg_start < intel_private.gtt_entries) { -+ dev_info(&intel_private.pcidev->dev, -+ "trying to disable local/stolen memory\n"); -+ return -EINVAL; -+ } -+ -+ for (i = pg_start; i < (mem->page_count + pg_start); i++) -+ writel(agp_bridge->scratch_page, intel_private.gtt+i); -+ -+ readl(intel_private.gtt+i-1); -+ -+ return 0; -+} -+ -+/* Return the aperture size by just checking the resource length. The effect -+ * described in the spec of the MSAC registers is just changing of the -+ * resource size. -+ */ -+static int intel_i9xx_fetch_size(void) -+{ -+ int num_sizes = ARRAY_SIZE(intel_i830_sizes); -+ int aper_size; /* size in megabytes */ -+ int i; -+ -+ aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1); -+ -+ for (i = 0; i < num_sizes; i++) { -+ if (aper_size == intel_i830_sizes[i].size) { -+ agp_bridge->current_size = intel_i830_sizes + i; -+ return aper_size; -+ } -+ } -+ -+ return 0; -+} -+ -+static int intel_i915_get_gtt_size(void) -+{ -+ int size; -+ -+ if (IS_G33) { -+ u16 gmch_ctrl; -+ -+ /* G33's GTT size defined in gmch_ctrl */ -+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); -+ switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { -+ case G33_PGETBL_SIZE_1M: -+ size = 1024; -+ break; -+ case G33_PGETBL_SIZE_2M: -+ size = 2048; -+ break; -+ default: -+ dev_info(&agp_bridge->dev->dev, -+ "unknown page table size 0x%x, assuming 512KB\n", -+ (gmch_ctrl & G33_PGETBL_SIZE_MASK)); -+ size = 512; -+ } -+ } else { -+ /* On previous hardware, the GTT size was just what was -+ * required to map the aperture. -+ */ -+ size = agp_bridge->driver->fetch_size(); -+ } -+ -+ return KB(size); -+} -+ -+/* The intel i915 automatically initializes the agp aperture during POST. -+ * Use the memory already set aside for in the GTT. -+ */ -+static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) -+{ -+ int page_order; -+ struct aper_size_info_fixed *size; -+ int num_entries; -+ u32 temp, temp2; -+ int gtt_map_size; -+ -+ size = agp_bridge->current_size; -+ page_order = size->page_order; -+ num_entries = size->num_entries; -+ agp_bridge->gatt_table_real = NULL; -+ -+ pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); -+ pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); -+ -+ gtt_map_size = intel_i915_get_gtt_size(); -+ -+ intel_private.gtt = ioremap(temp2, gtt_map_size); -+ if (!intel_private.gtt) -+ return -ENOMEM; -+ -+ intel_private.gtt_total_size = gtt_map_size / 4; -+ -+ temp &= 0xfff80000; -+ -+ intel_private.registers = ioremap(temp, 128 * 4096); -+ if (!intel_private.registers) { -+ iounmap(intel_private.gtt); -+ return -ENOMEM; -+ } -+ -+ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; -+ global_cache_flush(); /* FIXME: ? */ -+ -+ /* we have to call this as early as possible after the MMIO base address is known */ -+ intel_i830_init_gtt_entries(); -+ -+ agp_bridge->gatt_table = NULL; -+ -+ agp_bridge->gatt_bus_addr = temp; -+ -+ return 0; -+} -+ -+/* -+ * The i965 supports 36-bit physical addresses, but to keep -+ * the format of the GTT the same, the bits that don't fit -+ * in a 32-bit word are shifted down to bits 4..7. -+ * -+ * Gcc is smart enough to notice that "(addr >> 28) & 0xf0" -+ * is always zero on 32-bit architectures, so no need to make -+ * this conditional. -+ */ -+static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, -+ dma_addr_t addr, int type) -+{ -+ /* Shift high bits down */ -+ addr |= (addr >> 28) & 0xf0; -+ -+ /* Type checking must be done elsewhere */ -+ return addr | bridge->driver->masks[type].mask; -+} -+ -+static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) -+{ -+ u16 snb_gmch_ctl; -+ -+ switch (agp_bridge->dev->device) { -+ case PCI_DEVICE_ID_INTEL_GM45_HB: -+ case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: -+ case PCI_DEVICE_ID_INTEL_Q45_HB: -+ case PCI_DEVICE_ID_INTEL_G45_HB: -+ case PCI_DEVICE_ID_INTEL_G41_HB: -+ case PCI_DEVICE_ID_INTEL_B43_HB: -+ case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB: -+ case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: -+ case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: -+ case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: -+ *gtt_offset = *gtt_size = MB(2); -+ break; -+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: -+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: -+ *gtt_offset = MB(2); -+ -+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); -+ switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { -+ default: -+ case SNB_GTT_SIZE_0M: -+ printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); -+ *gtt_size = MB(0); -+ break; -+ case SNB_GTT_SIZE_1M: -+ *gtt_size = MB(1); -+ break; -+ case SNB_GTT_SIZE_2M: -+ *gtt_size = MB(2); -+ break; -+ } -+ break; -+ default: -+ *gtt_offset = *gtt_size = KB(512); -+ } -+} -+ -+/* The intel i965 automatically initializes the agp aperture during POST. -+ * Use the memory already set aside for in the GTT. -+ */ -+static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) -+{ -+ int page_order; -+ struct aper_size_info_fixed *size; -+ int num_entries; -+ u32 temp; -+ int gtt_offset, gtt_size; -+ -+ size = agp_bridge->current_size; -+ page_order = size->page_order; -+ num_entries = size->num_entries; -+ agp_bridge->gatt_table_real = NULL; -+ -+ pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); -+ -+ temp &= 0xfff00000; -+ -+ intel_i965_get_gtt_range(>t_offset, >t_size); -+ -+ intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); -+ -+ if (!intel_private.gtt) -+ return -ENOMEM; -+ -+ intel_private.gtt_total_size = gtt_size / 4; -+ -+ intel_private.registers = ioremap(temp, 128 * 4096); -+ if (!intel_private.registers) { -+ iounmap(intel_private.gtt); -+ return -ENOMEM; -+ } -+ -+ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; -+ global_cache_flush(); /* FIXME: ? */ -+ -+ /* we have to call this as early as possible after the MMIO base address is known */ -+ intel_i830_init_gtt_entries(); -+ -+ agp_bridge->gatt_table = NULL; -+ -+ agp_bridge->gatt_bus_addr = temp; -+ -+ return 0; -+} -+ -+static const struct agp_bridge_driver intel_810_driver = { -+ .owner = THIS_MODULE, -+ .aperture_sizes = intel_i810_sizes, -+ .size_type = FIXED_APER_SIZE, -+ .num_aperture_sizes = 2, -+ .needs_scratch_page = true, -+ .configure = intel_i810_configure, -+ .fetch_size = intel_i810_fetch_size, -+ .cleanup = intel_i810_cleanup, -+ .mask_memory = intel_i810_mask_memory, -+ .masks = intel_i810_masks, -+ .agp_enable = intel_i810_agp_enable, -+ .cache_flush = global_cache_flush, -+ .create_gatt_table = agp_generic_create_gatt_table, -+ .free_gatt_table = agp_generic_free_gatt_table, -+ .insert_memory = intel_i810_insert_entries, -+ .remove_memory = intel_i810_remove_entries, -+ .alloc_by_type = intel_i810_alloc_by_type, -+ .free_by_type = intel_i810_free_by_type, -+ .agp_alloc_page = agp_generic_alloc_page, -+ .agp_alloc_pages = agp_generic_alloc_pages, -+ .agp_destroy_page = agp_generic_destroy_page, -+ .agp_destroy_pages = agp_generic_destroy_pages, -+ .agp_type_to_mask_type = agp_generic_type_to_mask_type, -+}; -+ -+static const struct agp_bridge_driver intel_830_driver = { -+ .owner = THIS_MODULE, -+ .aperture_sizes = intel_i830_sizes, -+ .size_type = FIXED_APER_SIZE, -+ .num_aperture_sizes = 4, -+ .needs_scratch_page = true, -+ .configure = intel_i830_configure, -+ .fetch_size = intel_i830_fetch_size, -+ .cleanup = intel_i830_cleanup, -+ .mask_memory = intel_i810_mask_memory, -+ .masks = intel_i810_masks, -+ .agp_enable = intel_i810_agp_enable, -+ .cache_flush = global_cache_flush, -+ .create_gatt_table = intel_i830_create_gatt_table, -+ .free_gatt_table = intel_i830_free_gatt_table, -+ .insert_memory = intel_i830_insert_entries, -+ .remove_memory = intel_i830_remove_entries, -+ .alloc_by_type = intel_i830_alloc_by_type, -+ .free_by_type = intel_i810_free_by_type, -+ .agp_alloc_page = agp_generic_alloc_page, -+ .agp_alloc_pages = agp_generic_alloc_pages, -+ .agp_destroy_page = agp_generic_destroy_page, -+ .agp_destroy_pages = agp_generic_destroy_pages, -+ .agp_type_to_mask_type = intel_i830_type_to_mask_type, -+ .chipset_flush = intel_i830_chipset_flush, -+}; -+ -+static const struct agp_bridge_driver intel_915_driver = { -+ .owner = THIS_MODULE, -+ .aperture_sizes = intel_i830_sizes, -+ .size_type = FIXED_APER_SIZE, -+ .num_aperture_sizes = 4, -+ .needs_scratch_page = true, -+ .configure = intel_i9xx_configure, -+ .fetch_size = intel_i9xx_fetch_size, -+ .cleanup = intel_i915_cleanup, -+ .mask_memory = intel_i810_mask_memory, -+ .masks = intel_i810_masks, -+ .agp_enable = intel_i810_agp_enable, -+ .cache_flush = global_cache_flush, -+ .create_gatt_table = intel_i915_create_gatt_table, -+ .free_gatt_table = intel_i830_free_gatt_table, -+ .insert_memory = intel_i915_insert_entries, -+ .remove_memory = intel_i915_remove_entries, -+ .alloc_by_type = intel_i830_alloc_by_type, -+ .free_by_type = intel_i810_free_by_type, -+ .agp_alloc_page = agp_generic_alloc_page, -+ .agp_alloc_pages = agp_generic_alloc_pages, -+ .agp_destroy_page = agp_generic_destroy_page, -+ .agp_destroy_pages = agp_generic_destroy_pages, -+ .agp_type_to_mask_type = intel_i830_type_to_mask_type, -+ .chipset_flush = intel_i915_chipset_flush, -+#ifdef USE_PCI_DMA_API -+ .agp_map_page = intel_agp_map_page, -+ .agp_unmap_page = intel_agp_unmap_page, -+ .agp_map_memory = intel_agp_map_memory, -+ .agp_unmap_memory = intel_agp_unmap_memory, -+#endif -+}; -+ -+static const struct agp_bridge_driver intel_i965_driver = { -+ .owner = THIS_MODULE, -+ .aperture_sizes = intel_i830_sizes, -+ .size_type = FIXED_APER_SIZE, -+ .num_aperture_sizes = 4, -+ .needs_scratch_page = true, -+ .configure = intel_i9xx_configure, -+ .fetch_size = intel_i9xx_fetch_size, -+ .cleanup = intel_i915_cleanup, -+ .mask_memory = intel_i965_mask_memory, -+ .masks = intel_i810_masks, -+ .agp_enable = intel_i810_agp_enable, -+ .cache_flush = global_cache_flush, -+ .create_gatt_table = intel_i965_create_gatt_table, -+ .free_gatt_table = intel_i830_free_gatt_table, -+ .insert_memory = intel_i915_insert_entries, -+ .remove_memory = intel_i915_remove_entries, -+ .alloc_by_type = intel_i830_alloc_by_type, -+ .free_by_type = intel_i810_free_by_type, -+ .agp_alloc_page = agp_generic_alloc_page, -+ .agp_alloc_pages = agp_generic_alloc_pages, -+ .agp_destroy_page = agp_generic_destroy_page, -+ .agp_destroy_pages = agp_generic_destroy_pages, -+ .agp_type_to_mask_type = intel_i830_type_to_mask_type, -+ .chipset_flush = intel_i915_chipset_flush, -+#ifdef USE_PCI_DMA_API -+ .agp_map_page = intel_agp_map_page, -+ .agp_unmap_page = intel_agp_unmap_page, -+ .agp_map_memory = intel_agp_map_memory, -+ .agp_unmap_memory = intel_agp_unmap_memory, -+#endif -+}; -+ -+static const struct agp_bridge_driver intel_g33_driver = { -+ .owner = THIS_MODULE, -+ .aperture_sizes = intel_i830_sizes, -+ .size_type = FIXED_APER_SIZE, -+ .num_aperture_sizes = 4, -+ .needs_scratch_page = true, -+ .configure = intel_i9xx_configure, -+ .fetch_size = intel_i9xx_fetch_size, -+ .cleanup = intel_i915_cleanup, -+ .mask_memory = intel_i965_mask_memory, -+ .masks = intel_i810_masks, -+ .agp_enable = intel_i810_agp_enable, -+ .cache_flush = global_cache_flush, -+ .create_gatt_table = intel_i915_create_gatt_table, -+ .free_gatt_table = intel_i830_free_gatt_table, -+ .insert_memory = intel_i915_insert_entries, -+ .remove_memory = intel_i915_remove_entries, -+ .alloc_by_type = intel_i830_alloc_by_type, -+ .free_by_type = intel_i810_free_by_type, -+ .agp_alloc_page = agp_generic_alloc_page, -+ .agp_alloc_pages = agp_generic_alloc_pages, -+ .agp_destroy_page = agp_generic_destroy_page, -+ .agp_destroy_pages = agp_generic_destroy_pages, -+ .agp_type_to_mask_type = intel_i830_type_to_mask_type, -+ .chipset_flush = intel_i915_chipset_flush, -+#ifdef USE_PCI_DMA_API -+ .agp_map_page = intel_agp_map_page, -+ .agp_unmap_page = intel_agp_unmap_page, -+ .agp_map_memory = intel_agp_map_memory, -+ .agp_unmap_memory = intel_agp_unmap_memory, -+#endif -+}; -diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c -index 10f24e3..b9734a9 100644 ---- a/drivers/char/agp/nvidia-agp.c -+++ b/drivers/char/agp/nvidia-agp.c -@@ -310,6 +310,7 @@ static const struct agp_bridge_driver nvidia_driver = { - .aperture_sizes = nvidia_generic_sizes, - .size_type = U8_APER_SIZE, - .num_aperture_sizes = 5, -+ .needs_scratch_page = true, - .configure = nvidia_configure, - .fetch_size = nvidia_fetch_size, - .cleanup = nvidia_cleanup, -diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c -index 6c3837a..29aacd8 100644 ---- a/drivers/char/agp/sis-agp.c -+++ b/drivers/char/agp/sis-agp.c -@@ -125,6 +125,7 @@ static struct agp_bridge_driver sis_driver = { - .aperture_sizes = sis_generic_sizes, - .size_type = U8_APER_SIZE, - .num_aperture_sizes = 7, -+ .needs_scratch_page = true, - .configure = sis_configure, - .fetch_size = sis_fetch_size, - .cleanup = sis_cleanup, -@@ -415,14 +416,6 @@ static struct pci_device_id agp_sis_pci_table[] = { - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - }, -- { -- .class = (PCI_CLASS_BRIDGE_HOST << 8), -- .class_mask = ~0, -- .vendor = PCI_VENDOR_ID_SI, -- .device = PCI_DEVICE_ID_SI_760, -- .subvendor = PCI_ANY_ID, -- .subdevice = PCI_ANY_ID, -- }, - { } - }; - -diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c -index 6f48931..95db713 100644 ---- a/drivers/char/agp/uninorth-agp.c -+++ b/drivers/char/agp/uninorth-agp.c -@@ -28,6 +28,7 @@ - */ - static int uninorth_rev; - static int is_u3; -+static u32 scratch_value; - - #define DEFAULT_APERTURE_SIZE 256 - #define DEFAULT_APERTURE_STRING "256" -@@ -172,7 +173,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty - - gp = (u32 *) &agp_bridge->gatt_table[pg_start]; - for (i = 0; i < mem->page_count; ++i) { -- if (gp[i]) { -+ if (gp[i] != scratch_value) { - dev_info(&agp_bridge->dev->dev, - "uninorth_insert_memory: entry 0x%x occupied (%x)\n", - i, gp[i]); -@@ -214,8 +215,9 @@ int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) - return 0; - - gp = (u32 *) &agp_bridge->gatt_table[pg_start]; -- for (i = 0; i < mem->page_count; ++i) -- gp[i] = 0; -+ for (i = 0; i < mem->page_count; ++i) { -+ gp[i] = scratch_value; -+ } - mb(); - uninorth_tlbflush(mem); - -@@ -421,8 +423,13 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge) - - bridge->gatt_bus_addr = virt_to_phys(table); - -+ if (is_u3) -+ scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL; -+ else -+ scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) | -+ 0x1UL); - for (i = 0; i < num_entries; i++) -- bridge->gatt_table[i] = 0; -+ bridge->gatt_table[i] = scratch_value; - - return 0; - -@@ -519,6 +526,7 @@ const struct agp_bridge_driver uninorth_agp_driver = { - .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = agp_generic_type_to_mask_type, - .cant_use_aperture = true, -+ .needs_scratch_page = true, - }; - - const struct agp_bridge_driver u3_agp_driver = { -diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c -index d3bd243..df67e80 100644 ---- a/drivers/char/agp/via-agp.c -+++ b/drivers/char/agp/via-agp.c -@@ -175,6 +175,7 @@ static const struct agp_bridge_driver via_agp3_driver = { - .aperture_sizes = agp3_generic_sizes, - .size_type = U8_APER_SIZE, - .num_aperture_sizes = 10, -+ .needs_scratch_page = true, - .configure = via_configure_agp3, - .fetch_size = via_fetch_size_agp3, - .cleanup = via_cleanup_agp3, -@@ -201,6 +202,7 @@ static const struct agp_bridge_driver via_driver = { - .aperture_sizes = via_generic_sizes, - .size_type = U8_APER_SIZE, - .num_aperture_sizes = 9, -+ .needs_scratch_page = true, - .configure = via_configure, - .fetch_size = via_fetch_size, - .cleanup = via_cleanup, -diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig -index 305c590..c2711c6 100644 ---- a/drivers/gpu/drm/Kconfig -+++ b/drivers/gpu/drm/Kconfig -@@ -9,6 +9,7 @@ menuconfig DRM - depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU - select I2C - select I2C_ALGOBIT -+ select SLOW_WORK - help - Kernel-level support for the Direct Rendering Infrastructure (DRI) - introduced in XFree86 4.0. If you say Y here, you need to select -@@ -59,6 +60,7 @@ config DRM_RADEON - select FW_LOADER - select DRM_KMS_HELPER - select DRM_TTM -+ select POWER_SUPPLY - help - Choose this option if you have an ATI Radeon graphics card. There - are both PCI and AGP versions. You don't need to choose this to -@@ -157,3 +159,5 @@ config DRM_SAVAGE - help - Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister - chipset. If M is selected the module will be called savage. -+ -+source "drivers/gpu/drm/nouveau/Kconfig" -diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c -index 932b5aa..3f46772 100644 ---- a/drivers/gpu/drm/drm_auth.c -+++ b/drivers/gpu/drm/drm_auth.c -@@ -79,10 +79,9 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv, - struct drm_device *dev = master->minor->dev; - DRM_DEBUG("%d\n", magic); - -- entry = kmalloc(sizeof(*entry), GFP_KERNEL); -+ entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; -- memset(entry, 0, sizeof(*entry)); - entry->priv = priv; - entry->hash_item.key = (unsigned long)magic; - mutex_lock(&dev->struct_mutex); -diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c -index 61b9bcf..57cea01 100644 ---- a/drivers/gpu/drm/drm_crtc.c -+++ b/drivers/gpu/drm/drm_crtc.c -@@ -34,6 +34,7 @@ - #include "drm.h" - #include "drmP.h" - #include "drm_crtc.h" -+#include "drm_edid.h" - - struct drm_prop_enum_list { - int type; -@@ -494,7 +495,6 @@ void drm_connector_cleanup(struct drm_connector *connector) - list_for_each_entry_safe(mode, t, &connector->user_modes, head) - drm_mode_remove(connector, mode); - -- kfree(connector->fb_helper_private); - mutex_lock(&dev->mode_config.mutex); - drm_mode_object_put(dev, &connector->base); - list_del(&connector->head); -@@ -858,7 +858,6 @@ void drm_mode_config_init(struct drm_device *dev) - mutex_init(&dev->mode_config.mutex); - mutex_init(&dev->mode_config.idr_mutex); - INIT_LIST_HEAD(&dev->mode_config.fb_list); -- INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list); - INIT_LIST_HEAD(&dev->mode_config.crtc_list); - INIT_LIST_HEAD(&dev->mode_config.connector_list); - INIT_LIST_HEAD(&dev->mode_config.encoder_list); -@@ -1841,8 +1840,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, - - ret = copy_from_user(clips, clips_ptr, - num_clips * sizeof(*clips)); -- if (ret) -+ if (ret) { -+ ret = -EFAULT; - goto out_err2; -+ } - } - - if (fb->funcs->dirty) { -@@ -2350,7 +2351,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, - struct edid *edid) - { - struct drm_device *dev = connector->dev; -- int ret = 0; -+ int ret = 0, size; - - if (connector->edid_blob_ptr) - drm_property_destroy_blob(dev, connector->edid_blob_ptr); -@@ -2362,7 +2363,9 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, - return ret; - } - -- connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid); -+ size = EDID_LENGTH * (1 + edid->extensions); -+ connector->edid_blob_ptr = drm_property_create_blob(connector->dev, -+ size, edid); - - ret = drm_connector_property_set_value(connector, - dev->mode_config.edid_property, -diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c -index 51103aa..9b2a541 100644 ---- a/drivers/gpu/drm/drm_crtc_helper.c -+++ b/drivers/gpu/drm/drm_crtc_helper.c -@@ -55,7 +55,7 @@ static void drm_mode_validate_flag(struct drm_connector *connector, - } - - /** -- * drm_helper_probe_connector_modes - get complete set of display modes -+ * drm_helper_probe_single_connector_modes - get complete set of display modes - * @dev: DRM device - * @maxX: max width for modes - * @maxY: max height for modes -@@ -154,21 +154,6 @@ prune: - } - EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); - --int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX, -- uint32_t maxY) --{ -- struct drm_connector *connector; -- int count = 0; -- -- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -- count += drm_helper_probe_single_connector_modes(connector, -- maxX, maxY); -- } -- -- return count; --} --EXPORT_SYMBOL(drm_helper_probe_connector_modes); -- - /** - * drm_helper_encoder_in_use - check if a given encoder is in use - * @encoder: encoder to check -@@ -263,302 +248,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) - } - EXPORT_SYMBOL(drm_helper_disable_unused_functions); - --static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height) --{ -- struct drm_display_mode *mode; -- -- list_for_each_entry(mode, &connector->modes, head) { -- if (drm_mode_width(mode) > width || -- drm_mode_height(mode) > height) -- continue; -- if (mode->type & DRM_MODE_TYPE_PREFERRED) -- return mode; -- } -- return NULL; --} -- --static bool drm_has_cmdline_mode(struct drm_connector *connector) --{ -- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; -- struct drm_fb_helper_cmdline_mode *cmdline_mode; -- -- if (!fb_help_conn) -- return false; -- -- cmdline_mode = &fb_help_conn->cmdline_mode; -- return cmdline_mode->specified; --} -- --static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height) --{ -- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; -- struct drm_fb_helper_cmdline_mode *cmdline_mode; -- struct drm_display_mode *mode = NULL; -- -- if (!fb_help_conn) -- return mode; -- -- cmdline_mode = &fb_help_conn->cmdline_mode; -- if (cmdline_mode->specified == false) -- return mode; -- -- /* attempt to find a matching mode in the list of modes -- * we have gotten so far, if not add a CVT mode that conforms -- */ -- if (cmdline_mode->rb || cmdline_mode->margins) -- goto create_mode; -- -- list_for_each_entry(mode, &connector->modes, head) { -- /* check width/height */ -- if (mode->hdisplay != cmdline_mode->xres || -- mode->vdisplay != cmdline_mode->yres) -- continue; -- -- if (cmdline_mode->refresh_specified) { -- if (mode->vrefresh != cmdline_mode->refresh) -- continue; -- } -- -- if (cmdline_mode->interlace) { -- if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) -- continue; -- } -- return mode; -- } -- --create_mode: -- mode = drm_cvt_mode(connector->dev, cmdline_mode->xres, -- cmdline_mode->yres, -- cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, -- cmdline_mode->rb, cmdline_mode->interlace, -- cmdline_mode->margins); -- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); -- list_add(&mode->head, &connector->modes); -- return mode; --} -- --static bool drm_connector_enabled(struct drm_connector *connector, bool strict) --{ -- bool enable; -- -- if (strict) { -- enable = connector->status == connector_status_connected; -- } else { -- enable = connector->status != connector_status_disconnected; -- } -- return enable; --} -- --static void drm_enable_connectors(struct drm_device *dev, bool *enabled) --{ -- bool any_enabled = false; -- struct drm_connector *connector; -- int i = 0; -- -- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -- enabled[i] = drm_connector_enabled(connector, true); -- DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, -- enabled[i] ? "yes" : "no"); -- any_enabled |= enabled[i]; -- i++; -- } -- -- if (any_enabled) -- return; -- -- i = 0; -- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -- enabled[i] = drm_connector_enabled(connector, false); -- i++; -- } --} -- --static bool drm_target_preferred(struct drm_device *dev, -- struct drm_display_mode **modes, -- bool *enabled, int width, int height) --{ -- struct drm_connector *connector; -- int i = 0; -- -- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -- -- if (enabled[i] == false) { -- i++; -- continue; -- } -- -- DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", -- connector->base.id); -- -- /* got for command line mode first */ -- modes[i] = drm_pick_cmdline_mode(connector, width, height); -- if (!modes[i]) { -- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", -- connector->base.id); -- modes[i] = drm_has_preferred_mode(connector, width, height); -- } -- /* No preferred modes, pick one off the list */ -- if (!modes[i] && !list_empty(&connector->modes)) { -- list_for_each_entry(modes[i], &connector->modes, head) -- break; -- } -- DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : -- "none"); -- i++; -- } -- return true; --} -- --static int drm_pick_crtcs(struct drm_device *dev, -- struct drm_crtc **best_crtcs, -- struct drm_display_mode **modes, -- int n, int width, int height) --{ -- int c, o; -- struct drm_connector *connector; -- struct drm_connector_helper_funcs *connector_funcs; -- struct drm_encoder *encoder; -- struct drm_crtc *best_crtc; -- int my_score, best_score, score; -- struct drm_crtc **crtcs, *crtc; -- -- if (n == dev->mode_config.num_connector) -- return 0; -- c = 0; -- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -- if (c == n) -- break; -- c++; -- } -- -- best_crtcs[n] = NULL; -- best_crtc = NULL; -- best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height); -- if (modes[n] == NULL) -- return best_score; -- -- crtcs = kmalloc(dev->mode_config.num_connector * -- sizeof(struct drm_crtc *), GFP_KERNEL); -- if (!crtcs) -- return best_score; -- -- my_score = 1; -- if (connector->status == connector_status_connected) -- my_score++; -- if (drm_has_cmdline_mode(connector)) -- my_score++; -- if (drm_has_preferred_mode(connector, width, height)) -- my_score++; -- -- connector_funcs = connector->helper_private; -- encoder = connector_funcs->best_encoder(connector); -- if (!encoder) -- goto out; -- -- connector->encoder = encoder; -- -- /* select a crtc for this connector and then attempt to configure -- remaining connectors */ -- c = 0; -- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -- -- if ((encoder->possible_crtcs & (1 << c)) == 0) { -- c++; -- continue; -- } -- -- for (o = 0; o < n; o++) -- if (best_crtcs[o] == crtc) -- break; -- -- if (o < n) { -- /* ignore cloning for now */ -- c++; -- continue; -- } -- -- crtcs[n] = crtc; -- memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *)); -- score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1, -- width, height); -- if (score > best_score) { -- best_crtc = crtc; -- best_score = score; -- memcpy(best_crtcs, crtcs, -- dev->mode_config.num_connector * -- sizeof(struct drm_crtc *)); -- } -- c++; -- } --out: -- kfree(crtcs); -- return best_score; --} -- --static void drm_setup_crtcs(struct drm_device *dev) --{ -- struct drm_crtc **crtcs; -- struct drm_display_mode **modes; -- struct drm_encoder *encoder; -- struct drm_connector *connector; -- bool *enabled; -- int width, height; -- int i, ret; -- -- DRM_DEBUG_KMS("\n"); -- -- width = dev->mode_config.max_width; -- height = dev->mode_config.max_height; -- -- /* clean out all the encoder/crtc combos */ -- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -- encoder->crtc = NULL; -- } -- -- crtcs = kcalloc(dev->mode_config.num_connector, -- sizeof(struct drm_crtc *), GFP_KERNEL); -- modes = kcalloc(dev->mode_config.num_connector, -- sizeof(struct drm_display_mode *), GFP_KERNEL); -- enabled = kcalloc(dev->mode_config.num_connector, -- sizeof(bool), GFP_KERNEL); -- -- drm_enable_connectors(dev, enabled); -- -- ret = drm_target_preferred(dev, modes, enabled, width, height); -- if (!ret) -- DRM_ERROR("Unable to find initial modes\n"); -- -- DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height); -- -- drm_pick_crtcs(dev, crtcs, modes, 0, width, height); -- -- i = 0; -- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -- struct drm_display_mode *mode = modes[i]; -- struct drm_crtc *crtc = crtcs[i]; -- -- if (connector->encoder == NULL) { -- i++; -- continue; -- } -- -- if (mode && crtc) { -- DRM_DEBUG_KMS("desired mode %s set on crtc %d\n", -- mode->name, crtc->base.id); -- crtc->desired_mode = mode; -- connector->encoder->crtc = crtc; -- } else { -- connector->encoder->crtc = NULL; -- connector->encoder = NULL; -- } -- i++; -- } -- -- kfree(crtcs); -- kfree(modes); -- kfree(enabled); --} -- - /** - * drm_encoder_crtc_ok - can a given crtc drive a given encoder? - * @encoder: encoder to test -@@ -936,10 +625,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) - ret = -EINVAL; - goto fail; - } -- /* TODO are these needed? */ -- set->crtc->desired_x = set->x; -- set->crtc->desired_y = set->y; -- set->crtc->desired_mode = set->mode; - } - drm_helper_disable_unused_functions(dev); - } else if (fb_changed) { -@@ -984,63 +669,6 @@ fail: - } - EXPORT_SYMBOL(drm_crtc_helper_set_config); - --bool drm_helper_plugged_event(struct drm_device *dev) --{ -- DRM_DEBUG_KMS("\n"); -- -- drm_helper_probe_connector_modes(dev, dev->mode_config.max_width, -- dev->mode_config.max_height); -- -- drm_setup_crtcs(dev); -- -- /* alert the driver fb layer */ -- dev->mode_config.funcs->fb_changed(dev); -- -- /* FIXME: send hotplug event */ -- return true; --} --/** -- * drm_initial_config - setup a sane initial connector configuration -- * @dev: DRM device -- * -- * LOCKING: -- * Called at init time, must take mode config lock. -- * -- * Scan the CRTCs and connectors and try to put together an initial setup. -- * At the moment, this is a cloned configuration across all heads with -- * a new framebuffer object as the backing store. -- * -- * RETURNS: -- * Zero if everything went ok, nonzero otherwise. -- */ --bool drm_helper_initial_config(struct drm_device *dev) --{ -- int count = 0; -- -- /* disable all the possible outputs/crtcs before entering KMS mode */ -- drm_helper_disable_unused_functions(dev); -- -- drm_fb_helper_parse_command_line(dev); -- -- count = drm_helper_probe_connector_modes(dev, -- dev->mode_config.max_width, -- dev->mode_config.max_height); -- -- /* -- * we shouldn't end up with no modes here. -- */ -- if (count == 0) -- printk(KERN_INFO "No connectors reported connected with modes\n"); -- -- drm_setup_crtcs(dev); -- -- /* alert the driver fb layer */ -- dev->mode_config.funcs->fb_changed(dev); -- -- return 0; --} --EXPORT_SYMBOL(drm_helper_initial_config); -- - static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) - { - int dpms = DRM_MODE_DPMS_OFF; -@@ -1123,27 +751,6 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode) - } - EXPORT_SYMBOL(drm_helper_connector_dpms); - --/** -- * drm_hotplug_stage_two -- * @dev DRM device -- * @connector hotpluged connector -- * -- * LOCKING. -- * Caller must hold mode config lock, function might grab struct lock. -- * -- * Stage two of a hotplug. -- * -- * RETURNS: -- * Zero on success, errno on failure. -- */ --int drm_helper_hotplug_stage_two(struct drm_device *dev) --{ -- drm_helper_plugged_event(dev); -- -- return 0; --} --EXPORT_SYMBOL(drm_helper_hotplug_stage_two); -- - int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, - struct drm_mode_fb_cmd *mode_cmd) - { -@@ -1200,3 +807,114 @@ int drm_helper_resume_force_mode(struct drm_device *dev) - return 0; - } - EXPORT_SYMBOL(drm_helper_resume_force_mode); -+ -+static struct slow_work_ops output_poll_ops; -+ -+#define DRM_OUTPUT_POLL_PERIOD (10*HZ) -+static void output_poll_execute(struct slow_work *work) -+{ -+ struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work); -+ struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work); -+ struct drm_connector *connector; -+ enum drm_connector_status old_status, status; -+ bool repoll = false, changed = false; -+ int ret; -+ -+ mutex_lock(&dev->mode_config.mutex); -+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -+ -+ /* if this is HPD or polled don't check it - -+ TV out for instance */ -+ if (!connector->polled) -+ continue; -+ -+ else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT)) -+ repoll = true; -+ -+ old_status = connector->status; -+ /* if we are connected and don't want to poll for disconnect -+ skip it */ -+ if (old_status == connector_status_connected && -+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) && -+ !(connector->polled & DRM_CONNECTOR_POLL_HPD)) -+ continue; -+ -+ status = connector->funcs->detect(connector); -+ if (old_status != status) -+ changed = true; -+ } -+ -+ mutex_unlock(&dev->mode_config.mutex); -+ -+ if (changed) { -+ /* send a uevent + call fbdev */ -+ drm_sysfs_hotplug_event(dev); -+ if (dev->mode_config.funcs->output_poll_changed) -+ dev->mode_config.funcs->output_poll_changed(dev); -+ } -+ -+ if (repoll) { -+ ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD); -+ if (ret) -+ DRM_ERROR("delayed enqueue failed %d\n", ret); -+ } -+} -+ -+void drm_kms_helper_poll_disable(struct drm_device *dev) -+{ -+ if (!dev->mode_config.poll_enabled) -+ return; -+ delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); -+} -+EXPORT_SYMBOL(drm_kms_helper_poll_disable); -+ -+void drm_kms_helper_poll_enable(struct drm_device *dev) -+{ -+ bool poll = false; -+ struct drm_connector *connector; -+ int ret; -+ -+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -+ if (connector->polled) -+ poll = true; -+ } -+ -+ if (poll) { -+ ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); -+ if (ret) -+ DRM_ERROR("delayed enqueue failed %d\n", ret); -+ } -+} -+EXPORT_SYMBOL(drm_kms_helper_poll_enable); -+ -+void drm_kms_helper_poll_init(struct drm_device *dev) -+{ -+ slow_work_register_user(THIS_MODULE); -+ delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, -+ &output_poll_ops); -+ dev->mode_config.poll_enabled = true; -+ -+ drm_kms_helper_poll_enable(dev); -+} -+EXPORT_SYMBOL(drm_kms_helper_poll_init); -+ -+void drm_kms_helper_poll_fini(struct drm_device *dev) -+{ -+ drm_kms_helper_poll_disable(dev); -+ slow_work_unregister_user(THIS_MODULE); -+} -+EXPORT_SYMBOL(drm_kms_helper_poll_fini); -+ -+void drm_helper_hpd_irq_event(struct drm_device *dev) -+{ -+ if (!dev->mode_config.poll_enabled) -+ return; -+ delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); -+ /* schedule a slow work asap */ -+ delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0); -+} -+EXPORT_SYMBOL(drm_helper_hpd_irq_event); -+ -+static struct slow_work_ops output_poll_ops = { -+ .execute = output_poll_execute, -+}; -diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c -index 13f1537..252cbd7 100644 ---- a/drivers/gpu/drm/drm_dma.c -+++ b/drivers/gpu/drm/drm_dma.c -@@ -47,12 +47,10 @@ int drm_dma_setup(struct drm_device *dev) - { - int i; - -- dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL); -+ dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL); - if (!dev->dma) - return -ENOMEM; - -- memset(dev->dma, 0, sizeof(*dev->dma)); -- - for (i = 0; i <= DRM_MAX_ORDER; i++) - memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0])); - -diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c -index 18f41d7..c198186 100644 ---- a/drivers/gpu/drm/drm_edid.c -+++ b/drivers/gpu/drm/drm_edid.c -@@ -2,6 +2,7 @@ - * Copyright (c) 2006 Luc Verhaegen (quirks list) - * Copyright (c) 2007-2008 Intel Corporation - * Jesse Barnes -+ * Copyright 2010 Red Hat, Inc. - * - * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from - * FB layer. -@@ -33,10 +34,9 @@ - #include "drmP.h" - #include "drm_edid.h" - --/* -- * TODO: -- * - support EDID 1.4 (incl. CE blocks) -- */ -+#define EDID_EST_TIMINGS 16 -+#define EDID_STD_TIMINGS 8 -+#define EDID_DETAILED_TIMINGS 4 - - /* - * EDID blocks out in the wild have a variety of bugs, try to collect -@@ -65,7 +65,8 @@ - - #define LEVEL_DMT 0 - #define LEVEL_GTF 1 --#define LEVEL_CVT 2 -+#define LEVEL_GTF2 2 -+#define LEVEL_CVT 3 - - static struct edid_quirk { - char *vendor; -@@ -109,51 +110,64 @@ static struct edid_quirk { - { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, - }; - -+/*** DDC fetch and block validation ***/ - --/* Valid EDID header has these bytes */ - static const u8 edid_header[] = { - 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 - }; - --/** -- * drm_edid_is_valid - sanity check EDID data -- * @edid: EDID data -- * -- * Sanity check the EDID block by looking at the header, the version number -- * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's -- * valid. -+/* -+ * Sanity check the EDID block (base or extension). Return 0 if the block -+ * doesn't check out, or 1 if it's valid. - */ --bool drm_edid_is_valid(struct edid *edid) -+static bool -+drm_edid_block_valid(u8 *raw_edid) - { -- int i, score = 0; -+ int i; - u8 csum = 0; -- u8 *raw_edid = (u8 *)edid; -+ struct edid *edid = (struct edid *)raw_edid; -+ -+ if (raw_edid[0] == 0x00) { -+ int score = 0; - -- for (i = 0; i < sizeof(edid_header); i++) -- if (raw_edid[i] == edid_header[i]) -- score++; -+ for (i = 0; i < sizeof(edid_header); i++) -+ if (raw_edid[i] == edid_header[i]) -+ score++; - -- if (score == 8) ; -- else if (score >= 6) { -- DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); -- memcpy(raw_edid, edid_header, sizeof(edid_header)); -- } else -- goto bad; -+ if (score == 8) ; -+ else if (score >= 6) { -+ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); -+ memcpy(raw_edid, edid_header, sizeof(edid_header)); -+ } else { -+ goto bad; -+ } -+ } - - for (i = 0; i < EDID_LENGTH; i++) - csum += raw_edid[i]; - if (csum) { - DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); -- goto bad; -- } - -- if (edid->version != 1) { -- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); -- goto bad; -+ /* allow CEA to slide through, switches mangle this */ -+ if (raw_edid[0] != 0x02) -+ goto bad; - } - -- if (edid->revision > 4) -- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); -+ /* per-block-type checks */ -+ switch (raw_edid[0]) { -+ case 0: /* base */ -+ if (edid->version != 1) { -+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); -+ goto bad; -+ } -+ -+ if (edid->revision > 4) -+ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); -+ break; -+ -+ default: -+ break; -+ } - - return 1; - -@@ -165,8 +179,158 @@ bad: - } - return 0; - } -+ -+/** -+ * drm_edid_is_valid - sanity check EDID data -+ * @edid: EDID data -+ * -+ * Sanity-check an entire EDID record (including extensions) -+ */ -+bool drm_edid_is_valid(struct edid *edid) -+{ -+ int i; -+ u8 *raw = (u8 *)edid; -+ -+ if (!edid) -+ return false; -+ -+ for (i = 0; i <= edid->extensions; i++) -+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH)) -+ return false; -+ -+ return true; -+} - EXPORT_SYMBOL(drm_edid_is_valid); - -+#define DDC_ADDR 0x50 -+#define DDC_SEGMENT_ADDR 0x30 -+/** -+ * Get EDID information via I2C. -+ * -+ * \param adapter : i2c device adaptor -+ * \param buf : EDID data buffer to be filled -+ * \param len : EDID data buffer length -+ * \return 0 on success or -1 on failure. -+ * -+ * Try to fetch EDID information by calling i2c driver function. -+ */ -+static int -+drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, -+ int block, int len) -+{ -+ unsigned char start = block * EDID_LENGTH; -+ struct i2c_msg msgs[] = { -+ { -+ .addr = DDC_ADDR, -+ .flags = 0, -+ .len = 1, -+ .buf = &start, -+ }, { -+ .addr = DDC_ADDR, -+ .flags = I2C_M_RD, -+ .len = len, -+ .buf = buf + start, -+ } -+ }; -+ -+ if (i2c_transfer(adapter, msgs, 2) == 2) -+ return 0; -+ -+ return -1; -+} -+ -+static u8 * -+drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) -+{ -+ int i, j = 0; -+ u8 *block, *new; -+ -+ if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) -+ return NULL; -+ -+ /* base block fetch */ -+ for (i = 0; i < 4; i++) { -+ if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH)) -+ goto out; -+ if (drm_edid_block_valid(block)) -+ break; -+ } -+ if (i == 4) -+ goto carp; -+ -+ /* if there's no extensions, we're done */ -+ if (block[0x7e] == 0) -+ return block; -+ -+ new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); -+ if (!new) -+ goto out; -+ block = new; -+ -+ for (j = 1; j <= block[0x7e]; j++) { -+ for (i = 0; i < 4; i++) { -+ if (drm_do_probe_ddc_edid(adapter, block, j, -+ EDID_LENGTH)) -+ goto out; -+ if (drm_edid_block_valid(block + j * EDID_LENGTH)) -+ break; -+ } -+ if (i == 4) -+ goto carp; -+ } -+ -+ return block; -+ -+carp: -+ dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n", -+ drm_get_connector_name(connector), j); -+ -+out: -+ kfree(block); -+ return NULL; -+} -+ -+/** -+ * Probe DDC presence. -+ * -+ * \param adapter : i2c device adaptor -+ * \return 1 on success -+ */ -+static bool -+drm_probe_ddc(struct i2c_adapter *adapter) -+{ -+ unsigned char out; -+ -+ return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0); -+} -+ -+/** -+ * drm_get_edid - get EDID data, if available -+ * @connector: connector we're probing -+ * @adapter: i2c adapter to use for DDC -+ * -+ * Poke the given i2c channel to grab EDID data if possible. If found, -+ * attach it to the connector. -+ * -+ * Return edid data or NULL if we couldn't find any. -+ */ -+struct edid *drm_get_edid(struct drm_connector *connector, -+ struct i2c_adapter *adapter) -+{ -+ struct edid *edid = NULL; -+ -+ if (drm_probe_ddc(adapter)) -+ edid = (struct edid *)drm_do_get_edid(connector, adapter); -+ -+ connector->display_info.raw_edid = (char *)edid; -+ -+ return edid; -+ -+} -+EXPORT_SYMBOL(drm_get_edid); -+ -+/*** EDID parsing ***/ -+ - /** - * edid_vendor - match a string against EDID's obfuscated vendor field - * @edid: EDID to match -@@ -335,7 +499,7 @@ static struct drm_display_mode drm_dmt_modes[] = { - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1024x768@85Hz */ - { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, -- 1072, 1376, 0, 768, 769, 772, 808, 0, -+ 1168, 1376, 0, 768, 769, 772, 808, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1152x864@75Hz */ - { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, -@@ -426,7 +590,7 @@ static struct drm_display_mode drm_dmt_modes[] = { - 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1600x1200@75Hz */ -- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664, -+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664, - 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1600x1200@85Hz */ -@@ -497,8 +661,8 @@ static struct drm_display_mode drm_dmt_modes[] = { - static const int drm_num_dmt_modes = - sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); - --static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, -- int hsize, int vsize, int fresh) -+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, -+ int hsize, int vsize, int fresh) - { - int i; - struct drm_display_mode *ptr, *mode; -@@ -516,6 +680,111 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, - } - return mode; - } -+EXPORT_SYMBOL(drm_mode_find_dmt); -+ -+typedef void detailed_cb(struct detailed_timing *timing, void *closure); -+ -+static void -+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure) -+{ -+ int i; -+ struct edid *edid = (struct edid *)raw_edid; -+ -+ if (edid == NULL) -+ return; -+ -+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++) -+ cb(&(edid->detailed_timings[i]), closure); -+ -+ /* XXX extension block walk */ -+} -+ -+static void -+is_rb(struct detailed_timing *t, void *data) -+{ -+ u8 *r = (u8 *)t; -+ if (r[3] == EDID_DETAIL_MONITOR_RANGE) -+ if (r[15] & 0x10) -+ *(bool *)data = true; -+} -+ -+/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */ -+static bool -+drm_monitor_supports_rb(struct edid *edid) -+{ -+ if (edid->revision >= 4) { -+ bool ret; -+ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); -+ return ret; -+ } -+ -+ return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0); -+} -+ -+static void -+find_gtf2(struct detailed_timing *t, void *data) -+{ -+ u8 *r = (u8 *)t; -+ if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02) -+ *(u8 **)data = r; -+} -+ -+/* Secondary GTF curve kicks in above some break frequency */ -+static int -+drm_gtf2_hbreak(struct edid *edid) -+{ -+ u8 *r = NULL; -+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); -+ return r ? (r[12] * 2) : 0; -+} -+ -+static int -+drm_gtf2_2c(struct edid *edid) -+{ -+ u8 *r = NULL; -+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); -+ return r ? r[13] : 0; -+} -+ -+static int -+drm_gtf2_m(struct edid *edid) -+{ -+ u8 *r = NULL; -+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); -+ return r ? (r[15] << 8) + r[14] : 0; -+} -+ -+static int -+drm_gtf2_k(struct edid *edid) -+{ -+ u8 *r = NULL; -+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); -+ return r ? r[16] : 0; -+} -+ -+static int -+drm_gtf2_2j(struct edid *edid) -+{ -+ u8 *r = NULL; -+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); -+ return r ? r[17] : 0; -+} -+ -+/** -+ * standard_timing_level - get std. timing level(CVT/GTF/DMT) -+ * @edid: EDID block to scan -+ */ -+static int standard_timing_level(struct edid *edid) -+{ -+ if (edid->revision >= 2) { -+ if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) -+ return LEVEL_CVT; -+ if (drm_gtf2_hbreak(edid)) -+ return LEVEL_GTF2; -+ return LEVEL_GTF; -+ } -+ return LEVEL_DMT; -+} - - /* - * 0 is reserved. The spec says 0x01 fill for unused timings. Some old -@@ -536,22 +805,20 @@ bad_std_timing(u8 a, u8 b) - * - * Take the standard timing params (in this case width, aspect, and refresh) - * and convert them into a real mode using CVT/GTF/DMT. -- * -- * Punts for now, but should eventually use the FB layer's CVT based mode -- * generation code. - */ --struct drm_display_mode *drm_mode_std(struct drm_device *dev, -- struct std_timing *t, -- int revision, -- int timing_level) -+static struct drm_display_mode * -+drm_mode_std(struct drm_connector *connector, struct edid *edid, -+ struct std_timing *t, int revision) - { -- struct drm_display_mode *mode; -+ struct drm_device *dev = connector->dev; -+ struct drm_display_mode *m, *mode = NULL; - int hsize, vsize; - int vrefresh_rate; - unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) - >> EDID_TIMING_ASPECT_SHIFT; - unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) - >> EDID_TIMING_VFREQ_SHIFT; -+ int timing_level = standard_timing_level(edid); - - if (bad_std_timing(t->hsize, t->vfreq_aspect)) - return NULL; -@@ -572,18 +839,38 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, - vsize = (hsize * 4) / 5; - else - vsize = (hsize * 9) / 16; -- /* HDTV hack */ -- if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) { -- mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, -+ -+ /* HDTV hack, part 1 */ -+ if (vrefresh_rate == 60 && -+ ((hsize == 1360 && vsize == 765) || -+ (hsize == 1368 && vsize == 769))) { -+ hsize = 1366; -+ vsize = 768; -+ } -+ -+ /* -+ * If this connector already has a mode for this size and refresh -+ * rate (because it came from detailed or CVT info), use that -+ * instead. This way we don't have to guess at interlace or -+ * reduced blanking. -+ */ -+ list_for_each_entry(m, &connector->probed_modes, head) -+ if (m->hdisplay == hsize && m->vdisplay == vsize && -+ drm_mode_vrefresh(m) == vrefresh_rate) -+ return NULL; -+ -+ /* HDTV hack, part 2 */ -+ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) { -+ mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, - false); - mode->hdisplay = 1366; - mode->hsync_start = mode->hsync_start - 1; - mode->hsync_end = mode->hsync_end - 1; - return mode; - } -- mode = NULL; -+ - /* check whether it can be found in default mode table */ -- mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate); -+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate); - if (mode) - return mode; - -@@ -593,6 +880,23 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, - case LEVEL_GTF: - mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); - break; -+ case LEVEL_GTF2: -+ /* -+ * This is potentially wrong if there's ever a monitor with -+ * more than one ranges section, each claiming a different -+ * secondary GTF curve. Please don't do that. -+ */ -+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); -+ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) { -+ kfree(mode); -+ mode = drm_gtf_mode_complex(dev, hsize, vsize, -+ vrefresh_rate, 0, 0, -+ drm_gtf2_m(edid), -+ drm_gtf2_2c(edid), -+ drm_gtf2_k(edid), -+ drm_gtf2_2j(edid)); -+ } -+ break; - case LEVEL_CVT: - mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, - false); -@@ -716,10 +1020,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, - if (mode->vsync_end > mode->vtotal) - mode->vtotal = mode->vsync_end + 1; - -- drm_mode_set_name(mode); -- - drm_mode_do_interlace_quirk(mode, pt); - -+ drm_mode_set_name(mode); -+ - if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { - pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; - } -@@ -802,10 +1106,6 @@ static struct drm_display_mode edid_est_modes[] = { - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ - }; - --#define EDID_EST_TIMINGS 16 --#define EDID_STD_TIMINGS 8 --#define EDID_DETAILED_TIMINGS 4 -- - /** - * add_established_modes - get est. modes from EDID and add them - * @edid: EDID block to scan -@@ -833,19 +1133,6 @@ static int add_established_modes(struct drm_connector *connector, struct edid *e - - return modes; - } --/** -- * stanard_timing_level - get std. timing level(CVT/GTF/DMT) -- * @edid: EDID block to scan -- */ --static int standard_timing_level(struct edid *edid) --{ -- if (edid->revision >= 2) { -- if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) -- return LEVEL_CVT; -- return LEVEL_GTF; -- } -- return LEVEL_DMT; --} - - /** - * add_standard_modes - get std. modes from EDID and add them -@@ -856,22 +1143,14 @@ static int standard_timing_level(struct edid *edid) - */ - static int add_standard_modes(struct drm_connector *connector, struct edid *edid) - { -- struct drm_device *dev = connector->dev; - int i, modes = 0; -- int timing_level; -- -- timing_level = standard_timing_level(edid); - - for (i = 0; i < EDID_STD_TIMINGS; i++) { -- struct std_timing *t = &edid->standard_timings[i]; - struct drm_display_mode *newmode; - -- /* If std timings bytes are 1, 1 it's empty */ -- if (t->hsize == 1 && t->vfreq_aspect == 1) -- continue; -- -- newmode = drm_mode_std(dev, &edid->standard_timings[i], -- edid->revision, timing_level); -+ newmode = drm_mode_std(connector, edid, -+ &edid->standard_timings[i], -+ edid->revision); - if (newmode) { - drm_mode_probed_add(connector, newmode); - modes++; -@@ -881,36 +1160,86 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid - return modes; - } - --/* -- * XXX fix this for: -- * - GTF secondary curve formula -- * - EDID 1.4 range offsets -- * - CVT extended bits -- */ - static bool --mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing) -+mode_is_rb(struct drm_display_mode *mode) - { -- struct detailed_data_monitor_range *range; -- int hsync, vrefresh; -- -- range = &timing->data.other_data.data.range; -+ return (mode->htotal - mode->hdisplay == 160) && -+ (mode->hsync_end - mode->hdisplay == 80) && -+ (mode->hsync_end - mode->hsync_start == 32) && -+ (mode->vsync_start - mode->vdisplay == 3); -+} - -+static bool -+mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t) -+{ -+ int hsync, hmin, hmax; -+ -+ hmin = t[7]; -+ if (edid->revision >= 4) -+ hmin += ((t[4] & 0x04) ? 255 : 0); -+ hmax = t[8]; -+ if (edid->revision >= 4) -+ hmax += ((t[4] & 0x08) ? 255 : 0); - hsync = drm_mode_hsync(mode); -- vrefresh = drm_mode_vrefresh(mode); - -- if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz) -+ return (hsync <= hmax && hsync >= hmin); -+} -+ -+static bool -+mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t) -+{ -+ int vsync, vmin, vmax; -+ -+ vmin = t[5]; -+ if (edid->revision >= 4) -+ vmin += ((t[4] & 0x01) ? 255 : 0); -+ vmax = t[6]; -+ if (edid->revision >= 4) -+ vmax += ((t[4] & 0x02) ? 255 : 0); -+ vsync = drm_mode_vrefresh(mode); -+ -+ return (vsync <= vmax && vsync >= vmin); -+} -+ -+static u32 -+range_pixel_clock(struct edid *edid, u8 *t) -+{ -+ /* unspecified */ -+ if (t[9] == 0 || t[9] == 255) -+ return 0; -+ -+ /* 1.4 with CVT support gives us real precision, yay */ -+ if (edid->revision >= 4 && t[10] == 0x04) -+ return (t[9] * 10000) - ((t[12] >> 2) * 250); -+ -+ /* 1.3 is pathetic, so fuzz up a bit */ -+ return t[9] * 10000 + 5001; -+} -+ -+static bool -+mode_in_range(struct drm_display_mode *mode, struct edid *edid, -+ struct detailed_timing *timing) -+{ -+ u32 max_clock; -+ u8 *t = (u8 *)timing; -+ -+ if (!mode_in_hsync_range(mode, edid, t)) - return false; - -- if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq) -+ if (!mode_in_vsync_range(mode, edid, t)) - return false; - -- if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) { -- /* be forgiving since it's in units of 10MHz */ -- int max_clock = range->pixel_clock_mhz * 10 + 9; -- max_clock *= 1000; -+ if ((max_clock = range_pixel_clock(edid, t))) - if (mode->clock > max_clock) - return false; -- } -+ -+ /* 1.4 max horizontal check */ -+ if (edid->revision >= 4 && t[10] == 0x04) -+ if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3)))) -+ return false; -+ -+ if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid)) -+ return false; - - return true; - } -@@ -919,15 +1248,16 @@ mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing) - * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will - * need to account for them. - */ --static int drm_gtf_modes_for_range(struct drm_connector *connector, -- struct detailed_timing *timing) -+static int -+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, -+ struct detailed_timing *timing) - { - int i, modes = 0; - struct drm_display_mode *newmode; - struct drm_device *dev = connector->dev; - - for (i = 0; i < drm_num_dmt_modes; i++) { -- if (mode_in_range(drm_dmt_modes + i, timing)) { -+ if (mode_in_range(drm_dmt_modes + i, edid, timing)) { - newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); - if (newmode) { - drm_mode_probed_add(connector, newmode); -@@ -988,13 +1318,100 @@ static int drm_cvt_modes(struct drm_connector *connector, - return modes; - } - -+static const struct { -+ short w; -+ short h; -+ short r; -+ short rb; -+} est3_modes[] = { -+ /* byte 6 */ -+ { 640, 350, 85, 0 }, -+ { 640, 400, 85, 0 }, -+ { 720, 400, 85, 0 }, -+ { 640, 480, 85, 0 }, -+ { 848, 480, 60, 0 }, -+ { 800, 600, 85, 0 }, -+ { 1024, 768, 85, 0 }, -+ { 1152, 864, 75, 0 }, -+ /* byte 7 */ -+ { 1280, 768, 60, 1 }, -+ { 1280, 768, 60, 0 }, -+ { 1280, 768, 75, 0 }, -+ { 1280, 768, 85, 0 }, -+ { 1280, 960, 60, 0 }, -+ { 1280, 960, 85, 0 }, -+ { 1280, 1024, 60, 0 }, -+ { 1280, 1024, 85, 0 }, -+ /* byte 8 */ -+ { 1360, 768, 60, 0 }, -+ { 1440, 900, 60, 1 }, -+ { 1440, 900, 60, 0 }, -+ { 1440, 900, 75, 0 }, -+ { 1440, 900, 85, 0 }, -+ { 1400, 1050, 60, 1 }, -+ { 1400, 1050, 60, 0 }, -+ { 1400, 1050, 75, 0 }, -+ /* byte 9 */ -+ { 1400, 1050, 85, 0 }, -+ { 1680, 1050, 60, 1 }, -+ { 1680, 1050, 60, 0 }, -+ { 1680, 1050, 75, 0 }, -+ { 1680, 1050, 85, 0 }, -+ { 1600, 1200, 60, 0 }, -+ { 1600, 1200, 65, 0 }, -+ { 1600, 1200, 70, 0 }, -+ /* byte 10 */ -+ { 1600, 1200, 75, 0 }, -+ { 1600, 1200, 85, 0 }, -+ { 1792, 1344, 60, 0 }, -+ { 1792, 1344, 85, 0 }, -+ { 1856, 1392, 60, 0 }, -+ { 1856, 1392, 75, 0 }, -+ { 1920, 1200, 60, 1 }, -+ { 1920, 1200, 60, 0 }, -+ /* byte 11 */ -+ { 1920, 1200, 75, 0 }, -+ { 1920, 1200, 85, 0 }, -+ { 1920, 1440, 60, 0 }, -+ { 1920, 1440, 75, 0 }, -+}; -+static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]); -+ -+static int -+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing) -+{ -+ int i, j, m, modes = 0; -+ struct drm_display_mode *mode; -+ u8 *est = ((u8 *)timing) + 5; -+ -+ for (i = 0; i < 6; i++) { -+ for (j = 7; j > 0; j--) { -+ m = (i * 8) + (7 - j); -+ if (m >= num_est3_modes) -+ break; -+ if (est[i] & (1 << j)) { -+ mode = drm_mode_find_dmt(connector->dev, -+ est3_modes[m].w, -+ est3_modes[m].h, -+ est3_modes[m].r -+ /*, est3_modes[m].rb */); -+ if (mode) { -+ drm_mode_probed_add(connector, mode); -+ modes++; -+ } -+ } -+ } -+ } -+ -+ return modes; -+} -+ - static int add_detailed_modes(struct drm_connector *connector, - struct detailed_timing *timing, - struct edid *edid, u32 quirks, int preferred) - { - int i, modes = 0; - struct detailed_non_pixel *data = &timing->data.other_data; -- int timing_level = standard_timing_level(edid); - int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF); - struct drm_display_mode *newmode; - struct drm_device *dev = connector->dev; -@@ -1015,7 +1432,8 @@ static int add_detailed_modes(struct drm_connector *connector, - switch (data->type) { - case EDID_DETAIL_MONITOR_RANGE: - if (gtf) -- modes += drm_gtf_modes_for_range(connector, timing); -+ modes += drm_gtf_modes_for_range(connector, edid, -+ timing); - break; - case EDID_DETAIL_STD_MODES: - /* Six modes per detailed section */ -@@ -1024,8 +1442,8 @@ static int add_detailed_modes(struct drm_connector *connector, - struct drm_display_mode *newmode; - - std = &data->data.timings[i]; -- newmode = drm_mode_std(dev, std, edid->revision, -- timing_level); -+ newmode = drm_mode_std(connector, edid, std, -+ edid->revision); - if (newmode) { - drm_mode_probed_add(connector, newmode); - modes++; -@@ -1035,6 +1453,9 @@ static int add_detailed_modes(struct drm_connector *connector, - case EDID_DETAIL_CVT_3BYTE: - modes += drm_cvt_modes(connector, timing); - break; -+ case EDID_DETAIL_EST_TIMINGS: -+ modes += drm_est3_modes(connector, timing); -+ break; - default: - break; - } -@@ -1058,7 +1479,10 @@ static int add_detailed_info(struct drm_connector *connector, - - for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { - struct detailed_timing *timing = &edid->detailed_timings[i]; -- int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); -+ int preferred = (i == 0); -+ -+ if (preferred && edid->version == 1 && edid->revision < 4) -+ preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); - - /* In 1.0, only timings are allowed */ - if (!timing->pixel_clock && edid->version == 1 && -@@ -1088,39 +1512,22 @@ static int add_detailed_info_eedid(struct drm_connector *connector, - int i, modes = 0; - char *edid_ext = NULL; - struct detailed_timing *timing; -- int edid_ext_num; - int start_offset, end_offset; -- int timing_level; - -- if (edid->version == 1 && edid->revision < 3) { -- /* If the EDID version is less than 1.3, there is no -- * extension EDID. -- */ -+ if (edid->version == 1 && edid->revision < 3) - return 0; -- } -- if (!edid->extensions) { -- /* if there is no extension EDID, it is unnecessary to -- * parse the E-EDID to get detailed info -- */ -+ if (!edid->extensions) - return 0; -- } -- -- /* Chose real EDID extension number */ -- edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ? -- DRM_MAX_EDID_EXT_NUM : edid->extensions; - - /* Find CEA extension */ -- for (i = 0; i < edid_ext_num; i++) { -+ for (i = 0; i < edid->extensions; i++) { - edid_ext = (char *)edid + EDID_LENGTH * (i + 1); -- /* This block is CEA extension */ - if (edid_ext[0] == 0x02) - break; - } - -- if (i == edid_ext_num) { -- /* if there is no additional timing EDID block, return */ -+ if (i == edid->extensions) - return 0; -- } - - /* Get the start offset of detailed timing block */ - start_offset = edid_ext[2]; -@@ -1132,7 +1539,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector, - return 0; - } - -- timing_level = standard_timing_level(edid); - end_offset = EDID_LENGTH; - end_offset -= sizeof(struct detailed_timing); - for (i = start_offset; i < end_offset; -@@ -1144,123 +1550,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector, - return modes; - } - --#define DDC_ADDR 0x50 --/** -- * Get EDID information via I2C. -- * -- * \param adapter : i2c device adaptor -- * \param buf : EDID data buffer to be filled -- * \param len : EDID data buffer length -- * \return 0 on success or -1 on failure. -- * -- * Try to fetch EDID information by calling i2c driver function. -- */ --int drm_do_probe_ddc_edid(struct i2c_adapter *adapter, -- unsigned char *buf, int len) --{ -- unsigned char start = 0x0; -- struct i2c_msg msgs[] = { -- { -- .addr = DDC_ADDR, -- .flags = 0, -- .len = 1, -- .buf = &start, -- }, { -- .addr = DDC_ADDR, -- .flags = I2C_M_RD, -- .len = len, -- .buf = buf, -- } -- }; -- -- if (i2c_transfer(adapter, msgs, 2) == 2) -- return 0; -- -- return -1; --} --EXPORT_SYMBOL(drm_do_probe_ddc_edid); -- --static int drm_ddc_read_edid(struct drm_connector *connector, -- struct i2c_adapter *adapter, -- char *buf, int len) --{ -- int i; -- -- for (i = 0; i < 4; i++) { -- if (drm_do_probe_ddc_edid(adapter, buf, len)) -- return -1; -- if (drm_edid_is_valid((struct edid *)buf)) -- return 0; -- } -- -- /* repeated checksum failures; warn, but carry on */ -- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", -- drm_get_connector_name(connector)); -- return -1; --} -- --/** -- * drm_get_edid - get EDID data, if available -- * @connector: connector we're probing -- * @adapter: i2c adapter to use for DDC -- * -- * Poke the given connector's i2c channel to grab EDID data if possible. -- * -- * Return edid data or NULL if we couldn't find any. -- */ --struct edid *drm_get_edid(struct drm_connector *connector, -- struct i2c_adapter *adapter) --{ -- int ret; -- struct edid *edid; -- -- edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1), -- GFP_KERNEL); -- if (edid == NULL) { -- dev_warn(&connector->dev->pdev->dev, -- "Failed to allocate EDID\n"); -- goto end; -- } -- -- /* Read first EDID block */ -- ret = drm_ddc_read_edid(connector, adapter, -- (unsigned char *)edid, EDID_LENGTH); -- if (ret != 0) -- goto clean_up; -- -- /* There are EDID extensions to be read */ -- if (edid->extensions != 0) { -- int edid_ext_num = edid->extensions; -- -- if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) { -- dev_warn(&connector->dev->pdev->dev, -- "The number of extension(%d) is " -- "over max (%d), actually read number (%d)\n", -- edid_ext_num, DRM_MAX_EDID_EXT_NUM, -- DRM_MAX_EDID_EXT_NUM); -- /* Reset EDID extension number to be read */ -- edid_ext_num = DRM_MAX_EDID_EXT_NUM; -- } -- /* Read EDID including extensions too */ -- ret = drm_ddc_read_edid(connector, adapter, (char *)edid, -- EDID_LENGTH * (edid_ext_num + 1)); -- if (ret != 0) -- goto clean_up; -- -- } -- -- connector->display_info.raw_edid = (char *)edid; -- goto end; -- --clean_up: -- kfree(edid); -- edid = NULL; --end: -- return edid; -- --} --EXPORT_SYMBOL(drm_get_edid); -- - #define HDMI_IDENTIFIER 0x000C03 - #define VENDOR_BLOCK 0x03 - /** -@@ -1273,7 +1562,7 @@ EXPORT_SYMBOL(drm_get_edid); - bool drm_detect_hdmi_monitor(struct edid *edid) - { - char *edid_ext = NULL; -- int i, hdmi_id, edid_ext_num; -+ int i, hdmi_id; - int start_offset, end_offset; - bool is_hdmi = false; - -@@ -1281,19 +1570,15 @@ bool drm_detect_hdmi_monitor(struct edid *edid) - if (edid == NULL || edid->extensions == 0) - goto end; - -- /* Chose real EDID extension number */ -- edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ? -- DRM_MAX_EDID_EXT_NUM : edid->extensions; -- - /* Find CEA extension */ -- for (i = 0; i < edid_ext_num; i++) { -+ for (i = 0; i < edid->extensions; i++) { - edid_ext = (char *)edid + EDID_LENGTH * (i + 1); - /* This block is CEA extension */ - if (edid_ext[0] == 0x02) - break; - } - -- if (i == edid_ext_num) -+ if (i == edid->extensions) - goto end; - - /* Data block offset in CEA extension block */ -@@ -1348,10 +1633,24 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) - - quirks = edid_get_quirks(edid); - -- num_modes += add_established_modes(connector, edid); -- num_modes += add_standard_modes(connector, edid); -+ /* -+ * EDID spec says modes should be preferred in this order: -+ * - preferred detailed mode -+ * - other detailed modes from base block -+ * - detailed modes from extension blocks -+ * - CVT 3-byte code modes -+ * - standard timing codes -+ * - established timing codes -+ * - modes inferred from GTF or CVT range information -+ * -+ * We don't quite implement this yet, but we're close. -+ * -+ * XXX order for additional mode types in extension blocks? -+ */ - num_modes += add_detailed_info(connector, edid, quirks); - num_modes += add_detailed_info_eedid(connector, edid, quirks); -+ num_modes += add_standard_modes(connector, edid); -+ num_modes += add_established_modes(connector, edid); - - if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) - edid_fixup_preferred(connector, quirks); -diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c -index 288ea2f..08c4c92 100644 ---- a/drivers/gpu/drm/drm_fb_helper.c -+++ b/drivers/gpu/drm/drm_fb_helper.c -@@ -42,15 +42,33 @@ MODULE_LICENSE("GPL and additional rights"); - - static LIST_HEAD(kernel_fb_helper_list); - --int drm_fb_helper_add_connector(struct drm_connector *connector) -+/* simple single crtc case helper function */ -+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) - { -- connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL); -- if (!connector->fb_helper_private) -- return -ENOMEM; -+ struct drm_device *dev = fb_helper->dev; -+ struct drm_connector *connector; -+ int i; -+ -+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -+ struct drm_fb_helper_connector *fb_helper_connector; -+ -+ fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL); -+ if (!fb_helper_connector) -+ goto fail; - -+ fb_helper_connector->connector = connector; -+ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector; -+ } - return 0; -+fail: -+ for (i = 0; i < fb_helper->connector_count; i++) { -+ kfree(fb_helper->connector_info[i]); -+ fb_helper->connector_info[i] = NULL; -+ } -+ fb_helper->connector_count = 0; -+ return -ENOMEM; - } --EXPORT_SYMBOL(drm_fb_helper_add_connector); -+EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); - - /** - * drm_fb_helper_connector_parse_command_line - parse command line for connector -@@ -65,7 +83,7 @@ EXPORT_SYMBOL(drm_fb_helper_add_connector); - * - * enable/enable Digital/disable bit at the end - */ --static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector, -+static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn, - const char *mode_option) - { - const char *name; -@@ -75,13 +93,13 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con - int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0; - int i; - enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; -- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; - struct drm_fb_helper_cmdline_mode *cmdline_mode; -+ struct drm_connector *connector = fb_helper_conn->connector; - -- if (!fb_help_conn) -+ if (!fb_helper_conn) - return false; - -- cmdline_mode = &fb_help_conn->cmdline_mode; -+ cmdline_mode = &fb_helper_conn->cmdline_mode; - if (!mode_option) - mode_option = fb_mode_option; - -@@ -204,18 +222,21 @@ done: - return true; - } - --int drm_fb_helper_parse_command_line(struct drm_device *dev) -+static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) - { -- struct drm_connector *connector; -+ struct drm_fb_helper_connector *fb_helper_conn; -+ int i; - -- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -+ for (i = 0; i < fb_helper->connector_count; i++) { - char *option = NULL; - -+ fb_helper_conn = fb_helper->connector_info[i]; -+ - /* do something on return - turn off connector maybe */ -- if (fb_get_options(drm_get_connector_name(connector), &option)) -+ if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option)) - continue; - -- drm_fb_helper_connector_parse_command_line(connector, option); -+ drm_fb_helper_connector_parse_command_line(fb_helper_conn, option); - } - return 0; - } -@@ -243,7 +264,7 @@ bool drm_fb_helper_force_kernel_mode(void) - int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, - void *panic_str) - { -- DRM_ERROR("panic occurred, switching back to text console\n"); -+ printk(KERN_ERR "panic occurred, switching back to text console\n"); - return drm_fb_helper_force_kernel_mode(); - return 0; - } -@@ -293,6 +314,7 @@ static void drm_fb_helper_on(struct fb_info *info) - struct drm_fb_helper *fb_helper = info->par; - struct drm_device *dev = fb_helper->dev; - struct drm_crtc *crtc; -+ struct drm_crtc_helper_funcs *crtc_funcs; - struct drm_encoder *encoder; - int i; - -@@ -300,33 +322,28 @@ static void drm_fb_helper_on(struct fb_info *info) - * For each CRTC in this fb, turn the crtc on then, - * find all associated encoders and turn them on. - */ -+ mutex_lock(&dev->mode_config.mutex); - for (i = 0; i < fb_helper->crtc_count; i++) { -- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -- struct drm_crtc_helper_funcs *crtc_funcs = -- crtc->helper_private; -+ crtc = fb_helper->crtc_info[i].mode_set.crtc; -+ crtc_funcs = crtc->helper_private; - -- /* Only mess with CRTCs in this fb */ -- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id || -- !crtc->enabled) -- continue; -+ if (!crtc->enabled) -+ continue; - -- mutex_lock(&dev->mode_config.mutex); -- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); -- mutex_unlock(&dev->mode_config.mutex); -+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); - -- /* Found a CRTC on this fb, now find encoders */ -- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -- if (encoder->crtc == crtc) { -- struct drm_encoder_helper_funcs *encoder_funcs; - -- encoder_funcs = encoder->helper_private; -- mutex_lock(&dev->mode_config.mutex); -- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); -- mutex_unlock(&dev->mode_config.mutex); -- } -+ /* Found a CRTC on this fb, now find encoders */ -+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -+ if (encoder->crtc == crtc) { -+ struct drm_encoder_helper_funcs *encoder_funcs; -+ -+ encoder_funcs = encoder->helper_private; -+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); - } - } - } -+ mutex_unlock(&dev->mode_config.mutex); - } - - static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) -@@ -334,6 +351,7 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) - struct drm_fb_helper *fb_helper = info->par; - struct drm_device *dev = fb_helper->dev; - struct drm_crtc *crtc; -+ struct drm_crtc_helper_funcs *crtc_funcs; - struct drm_encoder *encoder; - int i; - -@@ -341,32 +359,26 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) - * For each CRTC in this fb, find all associated encoders - * and turn them off, then turn off the CRTC. - */ -+ mutex_lock(&dev->mode_config.mutex); - for (i = 0; i < fb_helper->crtc_count; i++) { -- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -- struct drm_crtc_helper_funcs *crtc_funcs = -- crtc->helper_private; -+ crtc = fb_helper->crtc_info[i].mode_set.crtc; -+ crtc_funcs = crtc->helper_private; - -- /* Only mess with CRTCs in this fb */ -- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id || -- !crtc->enabled) -- continue; -+ if (!crtc->enabled) -+ continue; - -- /* Found a CRTC on this fb, now find encoders */ -- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -- if (encoder->crtc == crtc) { -- struct drm_encoder_helper_funcs *encoder_funcs; -+ /* Found a CRTC on this fb, now find encoders */ -+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -+ if (encoder->crtc == crtc) { -+ struct drm_encoder_helper_funcs *encoder_funcs; - -- encoder_funcs = encoder->helper_private; -- mutex_lock(&dev->mode_config.mutex); -- encoder_funcs->dpms(encoder, dpms_mode); -- mutex_unlock(&dev->mode_config.mutex); -- } -+ encoder_funcs = encoder->helper_private; -+ encoder_funcs->dpms(encoder, dpms_mode); - } -- mutex_lock(&dev->mode_config.mutex); -- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); -- mutex_unlock(&dev->mode_config.mutex); - } -+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); - } -+ mutex_unlock(&dev->mode_config.mutex); - } - - int drm_fb_helper_blank(int blank, struct fb_info *info) -@@ -401,50 +413,81 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) - { - int i; - -+ for (i = 0; i < helper->connector_count; i++) -+ kfree(helper->connector_info[i]); -+ kfree(helper->connector_info); - for (i = 0; i < helper->crtc_count; i++) - kfree(helper->crtc_info[i].mode_set.connectors); - kfree(helper->crtc_info); - } - --int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count) -+int drm_fb_helper_init(struct drm_device *dev, -+ struct drm_fb_helper *fb_helper, -+ int crtc_count, int max_conn_count) - { -- struct drm_device *dev = helper->dev; - struct drm_crtc *crtc; - int ret = 0; - int i; - -- helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); -- if (!helper->crtc_info) -+ fb_helper->dev = dev; -+ -+ INIT_LIST_HEAD(&fb_helper->kernel_fb_list); -+ -+ fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); -+ if (!fb_helper->crtc_info) - return -ENOMEM; - -- helper->crtc_count = crtc_count; -+ fb_helper->crtc_count = crtc_count; -+ fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL); -+ if (!fb_helper->connector_info) { -+ kfree(fb_helper->crtc_info); -+ return -ENOMEM; -+ } -+ fb_helper->connector_count = 0; - - for (i = 0; i < crtc_count; i++) { -- helper->crtc_info[i].mode_set.connectors = -+ fb_helper->crtc_info[i].mode_set.connectors = - kcalloc(max_conn_count, - sizeof(struct drm_connector *), - GFP_KERNEL); - -- if (!helper->crtc_info[i].mode_set.connectors) { -+ if (!fb_helper->crtc_info[i].mode_set.connectors) { - ret = -ENOMEM; - goto out_free; - } -- helper->crtc_info[i].mode_set.num_connectors = 0; -+ fb_helper->crtc_info[i].mode_set.num_connectors = 0; - } - - i = 0; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -- helper->crtc_info[i].crtc_id = crtc->base.id; -- helper->crtc_info[i].mode_set.crtc = crtc; -+ fb_helper->crtc_info[i].crtc_id = crtc->base.id; -+ fb_helper->crtc_info[i].mode_set.crtc = crtc; - i++; - } -- helper->conn_limit = max_conn_count; -+ fb_helper->conn_limit = max_conn_count; - return 0; - out_free: -- drm_fb_helper_crtc_free(helper); -+ drm_fb_helper_crtc_free(fb_helper); - return -ENOMEM; - } --EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); -+EXPORT_SYMBOL(drm_fb_helper_init); -+ -+void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) -+{ -+ if (!list_empty(&fb_helper->kernel_fb_list)) { -+ list_del(&fb_helper->kernel_fb_list); -+ if (list_empty(&kernel_fb_helper_list)) { -+ printk(KERN_INFO "drm: unregistered panic notifier\n"); -+ atomic_notifier_chain_unregister(&panic_notifier_list, -+ &paniced); -+ unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); -+ } -+ } -+ -+ drm_fb_helper_crtc_free(fb_helper); -+ -+} -+EXPORT_SYMBOL(drm_fb_helper_fini); - - static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, - u16 blue, u16 regno, struct fb_info *info) -@@ -508,20 +551,15 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, - int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) - { - struct drm_fb_helper *fb_helper = info->par; -- struct drm_device *dev = fb_helper->dev; -+ struct drm_crtc_helper_funcs *crtc_funcs; - u16 *red, *green, *blue, *transp; - struct drm_crtc *crtc; - int i, rc = 0; - int start; - -- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; -- for (i = 0; i < fb_helper->crtc_count; i++) { -- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) -- break; -- } -- if (i == fb_helper->crtc_count) -- continue; -+ for (i = 0; i < fb_helper->crtc_count; i++) { -+ crtc = fb_helper->crtc_info[i].mode_set.crtc; -+ crtc_funcs = crtc->helper_private; - - red = cmap->red; - green = cmap->green; -@@ -549,41 +587,6 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) - } - EXPORT_SYMBOL(drm_fb_helper_setcmap); - --int drm_fb_helper_setcolreg(unsigned regno, -- unsigned red, -- unsigned green, -- unsigned blue, -- unsigned transp, -- struct fb_info *info) --{ -- struct drm_fb_helper *fb_helper = info->par; -- struct drm_device *dev = fb_helper->dev; -- struct drm_crtc *crtc; -- int i; -- int ret; -- -- if (regno > 255) -- return 1; -- -- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; -- for (i = 0; i < fb_helper->crtc_count; i++) { -- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) -- break; -- } -- if (i == fb_helper->crtc_count) -- continue; -- -- ret = setcolreg(crtc, red, green, blue, regno, info); -- if (ret) -- return ret; -- -- crtc_funcs->load_lut(crtc); -- } -- return 0; --} --EXPORT_SYMBOL(drm_fb_helper_setcolreg); -- - int drm_fb_helper_check_var(struct fb_var_screeninfo *var, - struct fb_info *info) - { -@@ -687,23 +690,21 @@ int drm_fb_helper_set_par(struct fb_info *info) - return -EINVAL; - } - -- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -- -- for (i = 0; i < fb_helper->crtc_count; i++) { -- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) -- break; -- } -- if (i == fb_helper->crtc_count) -- continue; -- -- if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) { -- mutex_lock(&dev->mode_config.mutex); -- ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set); -+ mutex_lock(&dev->mode_config.mutex); -+ for (i = 0; i < fb_helper->crtc_count; i++) { -+ crtc = fb_helper->crtc_info[i].mode_set.crtc; -+ ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set); -+ if (ret) { - mutex_unlock(&dev->mode_config.mutex); -- if (ret) -- return ret; -+ return ret; - } - } -+ mutex_unlock(&dev->mode_config.mutex); -+ -+ if (fb_helper->delayed_hotplug) { -+ fb_helper->delayed_hotplug = false; -+ drm_fb_helper_hotplug_event(fb_helper); -+ } - return 0; - } - EXPORT_SYMBOL(drm_fb_helper_set_par); -@@ -718,14 +719,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, - int ret = 0; - int i; - -- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -- for (i = 0; i < fb_helper->crtc_count; i++) { -- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) -- break; -- } -- -- if (i == fb_helper->crtc_count) -- continue; -+ mutex_lock(&dev->mode_config.mutex); -+ for (i = 0; i < fb_helper->crtc_count; i++) { -+ crtc = fb_helper->crtc_info[i].mode_set.crtc; - - modeset = &fb_helper->crtc_info[i].mode_set; - -@@ -733,209 +729,138 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, - modeset->y = var->yoffset; - - if (modeset->num_connectors) { -- mutex_lock(&dev->mode_config.mutex); - ret = crtc->funcs->set_config(modeset); -- mutex_unlock(&dev->mode_config.mutex); - if (!ret) { - info->var.xoffset = var->xoffset; - info->var.yoffset = var->yoffset; - } - } - } -+ mutex_unlock(&dev->mode_config.mutex); - return ret; - } - EXPORT_SYMBOL(drm_fb_helper_pan_display); - --int drm_fb_helper_single_fb_probe(struct drm_device *dev, -- int preferred_bpp, -- int (*fb_create)(struct drm_device *dev, -- uint32_t fb_width, -- uint32_t fb_height, -- uint32_t surface_width, -- uint32_t surface_height, -- uint32_t surface_depth, -- uint32_t surface_bpp, -- struct drm_framebuffer **fb_ptr)) -+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, -+ int preferred_bpp) - { -- struct drm_crtc *crtc; -- struct drm_connector *connector; -- unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1; -- unsigned int surface_width = 0, surface_height = 0; - int new_fb = 0; - int crtc_count = 0; -- int ret, i, conn_count = 0; -+ int i; - struct fb_info *info; -- struct drm_framebuffer *fb; -- struct drm_mode_set *modeset = NULL; -- struct drm_fb_helper *fb_helper; -- uint32_t surface_depth = 24, surface_bpp = 32; -+ struct drm_fb_helper_surface_size sizes; -+ int gamma_size = 0; -+ -+ memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size)); -+ sizes.surface_depth = 24; -+ sizes.surface_bpp = 32; -+ sizes.fb_width = (unsigned)-1; -+ sizes.fb_height = (unsigned)-1; - - /* if driver picks 8 or 16 by default use that - for both depth/bpp */ -- if (preferred_bpp != surface_bpp) { -- surface_depth = surface_bpp = preferred_bpp; -+ if (preferred_bpp != sizes.surface_bpp) { -+ sizes.surface_depth = sizes.surface_bpp = preferred_bpp; - } - /* first up get a count of crtcs now in use and new min/maxes width/heights */ -- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; -- -+ for (i = 0; i < fb_helper->connector_count; i++) { -+ struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; - struct drm_fb_helper_cmdline_mode *cmdline_mode; - -- if (!fb_help_conn) -- continue; -- -- cmdline_mode = &fb_help_conn->cmdline_mode; -+ cmdline_mode = &fb_helper_conn->cmdline_mode; - - if (cmdline_mode->bpp_specified) { - switch (cmdline_mode->bpp) { - case 8: -- surface_depth = surface_bpp = 8; -+ sizes.surface_depth = sizes.surface_bpp = 8; - break; - case 15: -- surface_depth = 15; -- surface_bpp = 16; -+ sizes.surface_depth = 15; -+ sizes.surface_bpp = 16; - break; - case 16: -- surface_depth = surface_bpp = 16; -+ sizes.surface_depth = sizes.surface_bpp = 16; - break; - case 24: -- surface_depth = surface_bpp = 24; -+ sizes.surface_depth = sizes.surface_bpp = 24; - break; - case 32: -- surface_depth = 24; -- surface_bpp = 32; -+ sizes.surface_depth = 24; -+ sizes.surface_bpp = 32; - break; - } - break; - } - } - -- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -- if (drm_helper_crtc_in_use(crtc)) { -- if (crtc->desired_mode) { -- if (crtc->desired_mode->hdisplay < fb_width) -- fb_width = crtc->desired_mode->hdisplay; -- -- if (crtc->desired_mode->vdisplay < fb_height) -- fb_height = crtc->desired_mode->vdisplay; -- -- if (crtc->desired_mode->hdisplay > surface_width) -- surface_width = crtc->desired_mode->hdisplay; -- -- if (crtc->desired_mode->vdisplay > surface_height) -- surface_height = crtc->desired_mode->vdisplay; -- } -+ crtc_count = 0; -+ for (i = 0; i < fb_helper->crtc_count; i++) { -+ struct drm_display_mode *desired_mode; -+ desired_mode = fb_helper->crtc_info[i].desired_mode; -+ -+ if (desired_mode) { -+ if (gamma_size == 0) -+ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size; -+ if (desired_mode->hdisplay < sizes.fb_width) -+ sizes.fb_width = desired_mode->hdisplay; -+ if (desired_mode->vdisplay < sizes.fb_height) -+ sizes.fb_height = desired_mode->vdisplay; -+ if (desired_mode->hdisplay > sizes.surface_width) -+ sizes.surface_width = desired_mode->hdisplay; -+ if (desired_mode->vdisplay > sizes.surface_height) -+ sizes.surface_height = desired_mode->vdisplay; - crtc_count++; - } - } - -- if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { -+ if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { - /* hmm everyone went away - assume VGA cable just fell out - and will come back later. */ -- return 0; -+ DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n"); -+ sizes.fb_width = sizes.surface_width = 1024; -+ sizes.fb_height = sizes.surface_height = 768; - } - -- /* do we have an fb already? */ -- if (list_empty(&dev->mode_config.fb_kernel_list)) { -- ret = (*fb_create)(dev, fb_width, fb_height, surface_width, -- surface_height, surface_depth, surface_bpp, -- &fb); -- if (ret) -- return -EINVAL; -- new_fb = 1; -- } else { -- fb = list_first_entry(&dev->mode_config.fb_kernel_list, -- struct drm_framebuffer, filp_head); -- -- /* if someone hotplugs something bigger than we have already allocated, we are pwned. -- As really we can't resize an fbdev that is in the wild currently due to fbdev -- not really being designed for the lower layers moving stuff around under it. -- - so in the grand style of things - punt. */ -- if ((fb->width < surface_width) || -- (fb->height < surface_height)) { -- DRM_ERROR("Framebuffer not large enough to scale console onto.\n"); -- return -EINVAL; -- } -- } -- -- info = fb->fbdev; -- fb_helper = info->par; -- -- crtc_count = 0; -- /* okay we need to setup new connector sets in the crtcs */ -- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -- modeset = &fb_helper->crtc_info[crtc_count].mode_set; -- modeset->fb = fb; -- conn_count = 0; -- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -- if (connector->encoder) -- if (connector->encoder->crtc == modeset->crtc) { -- modeset->connectors[conn_count] = connector; -- conn_count++; -- if (conn_count > fb_helper->conn_limit) -- BUG(); -- } -- } -- -- for (i = conn_count; i < fb_helper->conn_limit; i++) -- modeset->connectors[i] = NULL; -+ /* push down into drivers */ -+ new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); -+ if (new_fb < 0) -+ return new_fb; - -- modeset->crtc = crtc; -- crtc_count++; -+ info = fb_helper->fbdev; - -- modeset->num_connectors = conn_count; -- if (modeset->crtc->desired_mode) { -- if (modeset->mode) -- drm_mode_destroy(dev, modeset->mode); -- modeset->mode = drm_mode_duplicate(dev, -- modeset->crtc->desired_mode); -- } -+ /* set the fb pointer */ -+ for (i = 0; i < fb_helper->crtc_count; i++) { -+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb; - } -- fb_helper->crtc_count = crtc_count; -- fb_helper->fb = fb; - - if (new_fb) { - info->var.pixclock = 0; -- ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0); -- if (ret) -- return ret; - if (register_framebuffer(info) < 0) { -- fb_dealloc_cmap(&info->cmap); - return -EINVAL; - } -+ -+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, -+ info->fix.id); -+ - } else { - drm_fb_helper_set_par(info); - } -- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, -- info->fix.id); - - /* Switch back to kernel console on panic */ - /* multi card linked list maybe */ - if (list_empty(&kernel_fb_helper_list)) { -- printk(KERN_INFO "registered panic notifier\n"); -+ printk(KERN_INFO "drm: registered panic notifier\n"); - atomic_notifier_chain_register(&panic_notifier_list, - &paniced); - register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); - } -- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); -+ if (new_fb) -+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); -+ - return 0; - } - EXPORT_SYMBOL(drm_fb_helper_single_fb_probe); - --void drm_fb_helper_free(struct drm_fb_helper *helper) --{ -- list_del(&helper->kernel_fb_list); -- if (list_empty(&kernel_fb_helper_list)) { -- printk(KERN_INFO "unregistered panic notifier\n"); -- atomic_notifier_chain_unregister(&panic_notifier_list, -- &paniced); -- unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); -- } -- drm_fb_helper_crtc_free(helper); -- fb_dealloc_cmap(&helper->fb->fbdev->cmap); --} --EXPORT_SYMBOL(drm_fb_helper_free); -- - void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, - uint32_t depth) - { -@@ -954,10 +879,11 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, - } - EXPORT_SYMBOL(drm_fb_helper_fill_fix); - --void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, -+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, - uint32_t fb_width, uint32_t fb_height) - { -- info->pseudo_palette = fb->pseudo_palette; -+ struct drm_framebuffer *fb = fb_helper->fb; -+ info->pseudo_palette = fb_helper->pseudo_palette; - info->var.xres_virtual = fb->width; - info->var.yres_virtual = fb->height; - info->var.bits_per_pixel = fb->bits_per_pixel; -@@ -1025,3 +951,457 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, - info->var.yres = fb_height; - } - EXPORT_SYMBOL(drm_fb_helper_fill_var); -+ -+static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper, -+ uint32_t maxX, -+ uint32_t maxY) -+{ -+ struct drm_connector *connector; -+ int count = 0; -+ int i; -+ -+ for (i = 0; i < fb_helper->connector_count; i++) { -+ connector = fb_helper->connector_info[i]->connector; -+ count += connector->funcs->fill_modes(connector, maxX, maxY); -+ } -+ -+ return count; -+} -+ -+static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height) -+{ -+ struct drm_display_mode *mode; -+ -+ list_for_each_entry(mode, &fb_connector->connector->modes, head) { -+ if (drm_mode_width(mode) > width || -+ drm_mode_height(mode) > height) -+ continue; -+ if (mode->type & DRM_MODE_TYPE_PREFERRED) -+ return mode; -+ } -+ return NULL; -+} -+ -+static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector) -+{ -+ struct drm_fb_helper_cmdline_mode *cmdline_mode; -+ cmdline_mode = &fb_connector->cmdline_mode; -+ return cmdline_mode->specified; -+} -+ -+static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, -+ int width, int height) -+{ -+ struct drm_fb_helper_cmdline_mode *cmdline_mode; -+ struct drm_display_mode *mode = NULL; -+ -+ cmdline_mode = &fb_helper_conn->cmdline_mode; -+ if (cmdline_mode->specified == false) -+ return mode; -+ -+ /* attempt to find a matching mode in the list of modes -+ * we have gotten so far, if not add a CVT mode that conforms -+ */ -+ if (cmdline_mode->rb || cmdline_mode->margins) -+ goto create_mode; -+ -+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { -+ /* check width/height */ -+ if (mode->hdisplay != cmdline_mode->xres || -+ mode->vdisplay != cmdline_mode->yres) -+ continue; -+ -+ if (cmdline_mode->refresh_specified) { -+ if (mode->vrefresh != cmdline_mode->refresh) -+ continue; -+ } -+ -+ if (cmdline_mode->interlace) { -+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) -+ continue; -+ } -+ return mode; -+ } -+ -+create_mode: -+ mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres, -+ cmdline_mode->yres, -+ cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, -+ cmdline_mode->rb, cmdline_mode->interlace, -+ cmdline_mode->margins); -+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); -+ list_add(&mode->head, &fb_helper_conn->connector->modes); -+ return mode; -+} -+ -+static bool drm_connector_enabled(struct drm_connector *connector, bool strict) -+{ -+ bool enable; -+ -+ if (strict) { -+ enable = connector->status == connector_status_connected; -+ } else { -+ enable = connector->status != connector_status_disconnected; -+ } -+ return enable; -+} -+ -+static void drm_enable_connectors(struct drm_fb_helper *fb_helper, -+ bool *enabled) -+{ -+ bool any_enabled = false; -+ struct drm_connector *connector; -+ int i = 0; -+ -+ for (i = 0; i < fb_helper->connector_count; i++) { -+ connector = fb_helper->connector_info[i]->connector; -+ enabled[i] = drm_connector_enabled(connector, true); -+ DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, -+ enabled[i] ? "yes" : "no"); -+ any_enabled |= enabled[i]; -+ } -+ -+ if (any_enabled) -+ return; -+ -+ for (i = 0; i < fb_helper->connector_count; i++) { -+ connector = fb_helper->connector_info[i]->connector; -+ enabled[i] = drm_connector_enabled(connector, false); -+ } -+} -+ -+static bool drm_target_cloned(struct drm_fb_helper *fb_helper, -+ struct drm_display_mode **modes, -+ bool *enabled, int width, int height) -+{ -+ int count, i, j; -+ bool can_clone = false; -+ struct drm_fb_helper_connector *fb_helper_conn; -+ struct drm_display_mode *dmt_mode, *mode; -+ -+ /* only contemplate cloning in the single crtc case */ -+ if (fb_helper->crtc_count > 1) -+ return false; -+ -+ count = 0; -+ for (i = 0; i < fb_helper->connector_count; i++) { -+ if (enabled[i]) -+ count++; -+ } -+ -+ /* only contemplate cloning if more than one connector is enabled */ -+ if (count <= 1) -+ return false; -+ -+ /* check the command line or if nothing common pick 1024x768 */ -+ can_clone = true; -+ for (i = 0; i < fb_helper->connector_count; i++) { -+ if (!enabled[i]) -+ continue; -+ fb_helper_conn = fb_helper->connector_info[i]; -+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); -+ if (!modes[i]) { -+ can_clone = false; -+ break; -+ } -+ for (j = 0; j < i; j++) { -+ if (!enabled[j]) -+ continue; -+ if (!drm_mode_equal(modes[j], modes[i])) -+ can_clone = false; -+ } -+ } -+ -+ if (can_clone) { -+ DRM_DEBUG_KMS("can clone using command line\n"); -+ return true; -+ } -+ -+ /* try and find a 1024x768 mode on each connector */ -+ can_clone = true; -+ dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60); -+ -+ for (i = 0; i < fb_helper->connector_count; i++) { -+ -+ if (!enabled[i]) -+ continue; -+ -+ fb_helper_conn = fb_helper->connector_info[i]; -+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { -+ if (drm_mode_equal(mode, dmt_mode)) -+ modes[i] = mode; -+ } -+ if (!modes[i]) -+ can_clone = false; -+ } -+ -+ if (can_clone) { -+ DRM_DEBUG_KMS("can clone using 1024x768\n"); -+ return true; -+ } -+ DRM_INFO("kms: can't enable cloning when we probably wanted to.\n"); -+ return false; -+} -+ -+static bool drm_target_preferred(struct drm_fb_helper *fb_helper, -+ struct drm_display_mode **modes, -+ bool *enabled, int width, int height) -+{ -+ struct drm_fb_helper_connector *fb_helper_conn; -+ int i; -+ -+ for (i = 0; i < fb_helper->connector_count; i++) { -+ fb_helper_conn = fb_helper->connector_info[i]; -+ -+ if (enabled[i] == false) -+ continue; -+ -+ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", -+ fb_helper_conn->connector->base.id); -+ -+ /* got for command line mode first */ -+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); -+ if (!modes[i]) { -+ DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", -+ fb_helper_conn->connector->base.id); -+ modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height); -+ } -+ /* No preferred modes, pick one off the list */ -+ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) { -+ list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head) -+ break; -+ } -+ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : -+ "none"); -+ } -+ return true; -+} -+ -+static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, -+ struct drm_fb_helper_crtc **best_crtcs, -+ struct drm_display_mode **modes, -+ int n, int width, int height) -+{ -+ int c, o; -+ struct drm_device *dev = fb_helper->dev; -+ struct drm_connector *connector; -+ struct drm_connector_helper_funcs *connector_funcs; -+ struct drm_encoder *encoder; -+ struct drm_fb_helper_crtc *best_crtc; -+ int my_score, best_score, score; -+ struct drm_fb_helper_crtc **crtcs, *crtc; -+ struct drm_fb_helper_connector *fb_helper_conn; -+ -+ if (n == fb_helper->connector_count) -+ return 0; -+ -+ fb_helper_conn = fb_helper->connector_info[n]; -+ connector = fb_helper_conn->connector; -+ -+ best_crtcs[n] = NULL; -+ best_crtc = NULL; -+ best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height); -+ if (modes[n] == NULL) -+ return best_score; -+ -+ crtcs = kzalloc(dev->mode_config.num_connector * -+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); -+ if (!crtcs) -+ return best_score; -+ -+ my_score = 1; -+ if (connector->status == connector_status_connected) -+ my_score++; -+ if (drm_has_cmdline_mode(fb_helper_conn)) -+ my_score++; -+ if (drm_has_preferred_mode(fb_helper_conn, width, height)) -+ my_score++; -+ -+ connector_funcs = connector->helper_private; -+ encoder = connector_funcs->best_encoder(connector); -+ if (!encoder) -+ goto out; -+ -+ /* select a crtc for this connector and then attempt to configure -+ remaining connectors */ -+ for (c = 0; c < fb_helper->crtc_count; c++) { -+ crtc = &fb_helper->crtc_info[c]; -+ -+ if ((encoder->possible_crtcs & (1 << c)) == 0) { -+ continue; -+ } -+ -+ for (o = 0; o < n; o++) -+ if (best_crtcs[o] == crtc) -+ break; -+ -+ if (o < n) { -+ /* ignore cloning unless only a single crtc */ -+ if (fb_helper->crtc_count > 1) -+ continue; -+ -+ if (!drm_mode_equal(modes[o], modes[n])) -+ continue; -+ } -+ -+ crtcs[n] = crtc; -+ memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *)); -+ score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1, -+ width, height); -+ if (score > best_score) { -+ best_crtc = crtc; -+ best_score = score; -+ memcpy(best_crtcs, crtcs, -+ dev->mode_config.num_connector * -+ sizeof(struct drm_fb_helper_crtc *)); -+ } -+ } -+out: -+ kfree(crtcs); -+ return best_score; -+} -+ -+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) -+{ -+ struct drm_device *dev = fb_helper->dev; -+ struct drm_fb_helper_crtc **crtcs; -+ struct drm_display_mode **modes; -+ struct drm_encoder *encoder; -+ struct drm_mode_set *modeset; -+ bool *enabled; -+ int width, height; -+ int i, ret; -+ -+ DRM_DEBUG_KMS("\n"); -+ -+ width = dev->mode_config.max_width; -+ height = dev->mode_config.max_height; -+ -+ /* clean out all the encoder/crtc combos */ -+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -+ encoder->crtc = NULL; -+ } -+ -+ crtcs = kcalloc(dev->mode_config.num_connector, -+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); -+ modes = kcalloc(dev->mode_config.num_connector, -+ sizeof(struct drm_display_mode *), GFP_KERNEL); -+ enabled = kcalloc(dev->mode_config.num_connector, -+ sizeof(bool), GFP_KERNEL); -+ -+ drm_enable_connectors(fb_helper, enabled); -+ -+ ret = drm_target_cloned(fb_helper, modes, enabled, width, height); -+ if (!ret) { -+ ret = drm_target_preferred(fb_helper, modes, enabled, width, height); -+ if (!ret) -+ DRM_ERROR("Unable to find initial modes\n"); -+ } -+ -+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height); -+ -+ drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height); -+ -+ /* need to set the modesets up here for use later */ -+ /* fill out the connector<->crtc mappings into the modesets */ -+ for (i = 0; i < fb_helper->crtc_count; i++) { -+ modeset = &fb_helper->crtc_info[i].mode_set; -+ modeset->num_connectors = 0; -+ } -+ -+ for (i = 0; i < fb_helper->connector_count; i++) { -+ struct drm_display_mode *mode = modes[i]; -+ struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; -+ modeset = &fb_crtc->mode_set; -+ -+ if (mode && fb_crtc) { -+ DRM_DEBUG_KMS("desired mode %s set on crtc %d\n", -+ mode->name, fb_crtc->mode_set.crtc->base.id); -+ fb_crtc->desired_mode = mode; -+ if (modeset->mode) -+ drm_mode_destroy(dev, modeset->mode); -+ modeset->mode = drm_mode_duplicate(dev, -+ fb_crtc->desired_mode); -+ modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; -+ } -+ } -+ -+ kfree(crtcs); -+ kfree(modes); -+ kfree(enabled); -+} -+ -+/** -+ * drm_helper_initial_config - setup a sane initial connector configuration -+ * @dev: DRM device -+ * -+ * LOCKING: -+ * Called at init time, must take mode config lock. -+ * -+ * Scan the CRTCs and connectors and try to put together an initial setup. -+ * At the moment, this is a cloned configuration across all heads with -+ * a new framebuffer object as the backing store. -+ * -+ * RETURNS: -+ * Zero if everything went ok, nonzero otherwise. -+ */ -+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) -+{ -+ struct drm_device *dev = fb_helper->dev; -+ int count = 0; -+ -+ /* disable all the possible outputs/crtcs before entering KMS mode */ -+ drm_helper_disable_unused_functions(fb_helper->dev); -+ -+ drm_fb_helper_parse_command_line(fb_helper); -+ -+ count = drm_fb_helper_probe_connector_modes(fb_helper, -+ dev->mode_config.max_width, -+ dev->mode_config.max_height); -+ /* -+ * we shouldn't end up with no modes here. -+ */ -+ if (count == 0) { -+ printk(KERN_INFO "No connectors reported connected with modes\n"); -+ } -+ drm_setup_crtcs(fb_helper); -+ -+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); -+} -+EXPORT_SYMBOL(drm_fb_helper_initial_config); -+ -+bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) -+{ -+ int count = 0; -+ u32 max_width, max_height, bpp_sel; -+ bool bound = false, crtcs_bound = false; -+ struct drm_crtc *crtc; -+ -+ if (!fb_helper->fb) -+ return false; -+ -+ list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { -+ if (crtc->fb) -+ crtcs_bound = true; -+ if (crtc->fb == fb_helper->fb) -+ bound = true; -+ } -+ -+ if (!bound && crtcs_bound) { -+ fb_helper->delayed_hotplug = true; -+ return false; -+ } -+ DRM_DEBUG_KMS("\n"); -+ -+ max_width = fb_helper->fb->width; -+ max_height = fb_helper->fb->height; -+ bpp_sel = fb_helper->fb->bits_per_pixel; -+ -+ count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, -+ max_height); -+ drm_setup_crtcs(fb_helper); -+ -+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); -+} -+EXPORT_SYMBOL(drm_fb_helper_hotplug_event); -+ -diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c -index 9d532d7..e7aace2 100644 ---- a/drivers/gpu/drm/drm_fops.c -+++ b/drivers/gpu/drm/drm_fops.c -@@ -243,11 +243,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp, - - DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); - -- priv = kmalloc(sizeof(*priv), GFP_KERNEL); -+ priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - -- memset(priv, 0, sizeof(*priv)); - filp->private_data = priv; - priv->filp = filp; - priv->uid = current_euid(); -diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c -index aa89d4b..33dad3f 100644 ---- a/drivers/gpu/drm/drm_gem.c -+++ b/drivers/gpu/drm/drm_gem.c -@@ -124,6 +124,31 @@ drm_gem_destroy(struct drm_device *dev) - } - - /** -+ * Initialize an already allocate GEM object of the specified size with -+ * shmfs backing store. -+ */ -+int drm_gem_object_init(struct drm_device *dev, -+ struct drm_gem_object *obj, size_t size) -+{ -+ BUG_ON((size & (PAGE_SIZE - 1)) != 0); -+ -+ obj->dev = dev; -+ obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); -+ if (IS_ERR(obj->filp)) -+ return -ENOMEM; -+ -+ kref_init(&obj->refcount); -+ kref_init(&obj->handlecount); -+ obj->size = size; -+ -+ atomic_inc(&dev->object_count); -+ atomic_add(obj->size, &dev->object_memory); -+ -+ return 0; -+} -+EXPORT_SYMBOL(drm_gem_object_init); -+ -+/** - * Allocate a GEM object of the specified size with shmfs backing store - */ - struct drm_gem_object * -@@ -131,28 +156,22 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) - { - struct drm_gem_object *obj; - -- BUG_ON((size & (PAGE_SIZE - 1)) != 0); -- - obj = kzalloc(sizeof(*obj), GFP_KERNEL); - if (!obj) - goto free; - -- obj->dev = dev; -- obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); -- if (IS_ERR(obj->filp)) -+ if (drm_gem_object_init(dev, obj, size) != 0) - goto free; - -- kref_init(&obj->refcount); -- kref_init(&obj->handlecount); -- obj->size = size; - if (dev->driver->gem_init_object != NULL && - dev->driver->gem_init_object(obj) != 0) { - goto fput; - } -- atomic_inc(&dev->object_count); -- atomic_add(obj->size, &dev->object_memory); - return obj; - fput: -+ /* Object_init mangles the global counters - readjust them. */ -+ atomic_dec(&dev->object_count); -+ atomic_sub(obj->size, &dev->object_memory); - fput(obj->filp); - free: - kfree(obj); -@@ -403,15 +422,15 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) - idr_destroy(&file_private->object_idr); - } - --static void --drm_gem_object_free_common(struct drm_gem_object *obj) -+void -+drm_gem_object_release(struct drm_gem_object *obj) - { - struct drm_device *dev = obj->dev; - fput(obj->filp); - atomic_dec(&dev->object_count); - atomic_sub(obj->size, &dev->object_memory); -- kfree(obj); - } -+EXPORT_SYMBOL(drm_gem_object_release); - - /** - * Called after the last reference to the object has been lost. -@@ -429,8 +448,6 @@ drm_gem_object_free(struct kref *kref) - - if (dev->driver->gem_free_object != NULL) - dev->driver->gem_free_object(obj); -- -- drm_gem_object_free_common(obj); - } - EXPORT_SYMBOL(drm_gem_object_free); - -@@ -453,8 +470,6 @@ drm_gem_object_free_unlocked(struct kref *kref) - dev->driver->gem_free_object(obj); - mutex_unlock(&dev->struct_mutex); - } -- -- drm_gem_object_free_common(obj); - } - EXPORT_SYMBOL(drm_gem_object_free_unlocked); - -diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c -index 76d6339..f1f473e 100644 ---- a/drivers/gpu/drm/drm_modes.c -+++ b/drivers/gpu/drm/drm_modes.c -@@ -258,8 +258,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, - drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP; - /* 18/16. Find actual vertical frame frequency */ - /* ignore - just set the mode flag for interlaced */ -- if (interlaced) -+ if (interlaced) { - drm_mode->vtotal *= 2; -+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; -+ } - /* Fill the mode line name */ - drm_mode_set_name(drm_mode); - if (reduced) -@@ -268,43 +270,35 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, - else - drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC | - DRM_MODE_FLAG_NHSYNC); -- if (interlaced) -- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; - -- return drm_mode; -+ return drm_mode; - } - EXPORT_SYMBOL(drm_cvt_mode); - - /** -- * drm_gtf_mode - create the modeline based on GTF algorithm -+ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm - * - * @dev :drm device - * @hdisplay :hdisplay size - * @vdisplay :vdisplay size - * @vrefresh :vrefresh rate. - * @interlaced :whether the interlace is supported -- * @margins :whether the margin is supported -+ * @margins :desired margin size -+ * @GTF_[MCKJ] :extended GTF formula parameters - * - * LOCKING. - * none. - * -- * return the modeline based on GTF algorithm -- * -- * This function is to create the modeline based on the GTF algorithm. -- * Generalized Timing Formula is derived from: -- * GTF Spreadsheet by Andy Morrish (1/5/97) -- * available at http://www.vesa.org -+ * return the modeline based on full GTF algorithm. - * -- * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. -- * What I have done is to translate it by using integer calculation. -- * I also refer to the function of fb_get_mode in the file of -- * drivers/video/fbmon.c -+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them -+ * in here multiplied by two. For a C of 40, pass in 80. - */ --struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, -- int vdisplay, int vrefresh, -- bool interlaced, int margins) --{ -- /* 1) top/bottom margin size (% of height) - default: 1.8, */ -+struct drm_display_mode * -+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay, -+ int vrefresh, bool interlaced, int margins, -+ int GTF_M, int GTF_2C, int GTF_K, int GTF_2J) -+{ /* 1) top/bottom margin size (% of height) - default: 1.8, */ - #define GTF_MARGIN_PERCENTAGE 18 - /* 2) character cell horizontal granularity (pixels) - default 8 */ - #define GTF_CELL_GRAN 8 -@@ -316,17 +310,9 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, - #define H_SYNC_PERCENT 8 - /* min time of vsync + back porch (microsec) */ - #define MIN_VSYNC_PLUS_BP 550 -- /* blanking formula gradient */ --#define GTF_M 600 -- /* blanking formula offset */ --#define GTF_C 40 -- /* blanking formula scaling factor */ --#define GTF_K 128 -- /* blanking formula scaling factor */ --#define GTF_J 20 - /* C' and M' are part of the Blanking Duty Cycle computation */ --#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J) --#define GTF_M_PRIME (GTF_K * GTF_M / 256) -+#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2) -+#define GTF_M_PRIME (GTF_K * GTF_M / 256) - struct drm_display_mode *drm_mode; - unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd; - int top_margin, bottom_margin; -@@ -460,17 +446,61 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, - - drm_mode->clock = pixel_freq; - -- drm_mode_set_name(drm_mode); -- drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC; -- - if (interlaced) { - drm_mode->vtotal *= 2; - drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; - } - -+ drm_mode_set_name(drm_mode); -+ if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40) -+ drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC; -+ else -+ drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC; -+ - return drm_mode; - } -+EXPORT_SYMBOL(drm_gtf_mode_complex); -+ -+/** -+ * drm_gtf_mode - create the modeline based on GTF algorithm -+ * -+ * @dev :drm device -+ * @hdisplay :hdisplay size -+ * @vdisplay :vdisplay size -+ * @vrefresh :vrefresh rate. -+ * @interlaced :whether the interlace is supported -+ * @margins :whether the margin is supported -+ * -+ * LOCKING. -+ * none. -+ * -+ * return the modeline based on GTF algorithm -+ * -+ * This function is to create the modeline based on the GTF algorithm. -+ * Generalized Timing Formula is derived from: -+ * GTF Spreadsheet by Andy Morrish (1/5/97) -+ * available at http://www.vesa.org -+ * -+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. -+ * What I have done is to translate it by using integer calculation. -+ * I also refer to the function of fb_get_mode in the file of -+ * drivers/video/fbmon.c -+ * -+ * Standard GTF parameters: -+ * M = 600 -+ * C = 40 -+ * K = 128 -+ * J = 20 -+ */ -+struct drm_display_mode * -+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, -+ bool lace, int margins) -+{ -+ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace, -+ margins, 600, 40 * 2, 128, 20 * 2); -+} - EXPORT_SYMBOL(drm_gtf_mode); -+ - /** - * drm_mode_set_name - set the name on a mode - * @mode: name will be set in this mode -@@ -482,8 +512,11 @@ EXPORT_SYMBOL(drm_gtf_mode); - */ - void drm_mode_set_name(struct drm_display_mode *mode) - { -- snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay, -- mode->vdisplay); -+ bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); -+ -+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s", -+ mode->hdisplay, mode->vdisplay, -+ interlaced ? "i" : ""); - } - EXPORT_SYMBOL(drm_mode_set_name); - -diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c -index 25bbd30..3a3a451 100644 ---- a/drivers/gpu/drm/drm_sysfs.c -+++ b/drivers/gpu/drm/drm_sysfs.c -@@ -333,7 +333,7 @@ static struct device_attribute connector_attrs_opt1[] = { - static struct bin_attribute edid_attr = { - .attr.name = "edid", - .attr.mode = 0444, -- .size = 128, -+ .size = 0, - .read = edid_show, - }; - -diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile -index 9929f84..da78f2c 100644 ---- a/drivers/gpu/drm/i915/Makefile -+++ b/drivers/gpu/drm/i915/Makefile -@@ -22,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ - intel_fb.o \ - intel_tv.o \ - intel_dvo.o \ -+ intel_ringbuffer.o \ - intel_overlay.o \ - dvo_ch7xxx.o \ - dvo_ch7017.o \ -@@ -33,3 +34,5 @@ i915-$(CONFIG_ACPI) += i915_opregion.o - i915-$(CONFIG_COMPAT) += i915_ioc32.o - - obj-$(CONFIG_DRM_I915) += i915.o -+ -+CFLAGS_i915_trace_points.o := -I$(src) -diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h -index 288fc50..0d6ff64 100644 ---- a/drivers/gpu/drm/i915/dvo.h -+++ b/drivers/gpu/drm/i915/dvo.h -@@ -70,16 +70,6 @@ struct intel_dvo_dev_ops { - void (*dpms)(struct intel_dvo_device *dvo, int mode); - - /* -- * Saves the output's state for restoration on VT switch. -- */ -- void (*save)(struct intel_dvo_device *dvo); -- -- /* -- * Restore's the output's state at VT switch. -- */ -- void (*restore)(struct intel_dvo_device *dvo); -- -- /* - * Callback for testing a video mode for a given output. - * - * This function should only check for cases where a mode can't -diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c -index 1184c14..14d5980 100644 ---- a/drivers/gpu/drm/i915/dvo_ch7017.c -+++ b/drivers/gpu/drm/i915/dvo_ch7017.c -@@ -159,16 +159,7 @@ - #define CH7017_BANG_LIMIT_CONTROL 0x7f - - struct ch7017_priv { -- uint8_t save_hapi; -- uint8_t save_vali; -- uint8_t save_valo; -- uint8_t save_ailo; -- uint8_t save_lvds_pll_vco; -- uint8_t save_feedback_div; -- uint8_t save_lvds_control_2; -- uint8_t save_outputs_enable; -- uint8_t save_lvds_power_down; -- uint8_t save_power_management; -+ uint8_t dummy; - }; - - static void ch7017_dump_regs(struct intel_dvo_device *dvo); -@@ -401,39 +392,6 @@ do { \ - DUMP(CH7017_LVDS_POWER_DOWN); - } - --static void ch7017_save(struct intel_dvo_device *dvo) --{ -- struct ch7017_priv *priv = dvo->dev_priv; -- -- ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi); -- ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo); -- ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo); -- ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco); -- ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div); -- ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2); -- ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable); -- ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down); -- ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management); --} -- --static void ch7017_restore(struct intel_dvo_device *dvo) --{ -- struct ch7017_priv *priv = dvo->dev_priv; -- -- /* Power down before changing mode */ -- ch7017_dpms(dvo, DRM_MODE_DPMS_OFF); -- -- ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi); -- ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo); -- ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo); -- ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco); -- ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div); -- ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2); -- ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable); -- ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down); -- ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management); --} -- - static void ch7017_destroy(struct intel_dvo_device *dvo) - { - struct ch7017_priv *priv = dvo->dev_priv; -@@ -451,7 +409,5 @@ struct intel_dvo_dev_ops ch7017_ops = { - .mode_set = ch7017_mode_set, - .dpms = ch7017_dpms, - .dump_regs = ch7017_dump_regs, -- .save = ch7017_save, -- .restore = ch7017_restore, - .destroy = ch7017_destroy, - }; -diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c -index d56ff5c..6f1944b 100644 ---- a/drivers/gpu/drm/i915/dvo_ch7xxx.c -+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c -@@ -92,21 +92,10 @@ static struct ch7xxx_id_struct { - { CH7301_VID, "CH7301" }, - }; - --struct ch7xxx_reg_state { -- uint8_t regs[CH7xxx_NUM_REGS]; --}; -- - struct ch7xxx_priv { - bool quiet; -- -- struct ch7xxx_reg_state save_reg; -- struct ch7xxx_reg_state mode_reg; -- uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT; -- uint8_t save_TLPF, save_TCT, save_PM, save_IDF; - }; - --static void ch7xxx_save(struct intel_dvo_device *dvo); -- - static char *ch7xxx_get_id(uint8_t vid) - { - int i; -@@ -312,42 +301,17 @@ static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode) - - static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) - { -- struct ch7xxx_priv *ch7xxx = dvo->dev_priv; - int i; - - for (i = 0; i < CH7xxx_NUM_REGS; i++) { -+ uint8_t val; - if ((i % 8) == 0 ) - DRM_LOG_KMS("\n %02X: ", i); -- DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]); -+ ch7xxx_readb(dvo, i, &val); -+ DRM_LOG_KMS("%02X ", val); - } - } - --static void ch7xxx_save(struct intel_dvo_device *dvo) --{ -- struct ch7xxx_priv *ch7xxx= dvo->dev_priv; -- -- ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL); -- ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP); -- ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD); -- ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT); -- ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF); -- ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM); -- ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF); --} -- --static void ch7xxx_restore(struct intel_dvo_device *dvo) --{ -- struct ch7xxx_priv *ch7xxx = dvo->dev_priv; -- -- ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL); -- ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP); -- ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD); -- ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT); -- ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF); -- ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF); -- ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM); --} -- - static void ch7xxx_destroy(struct intel_dvo_device *dvo) - { - struct ch7xxx_priv *ch7xxx = dvo->dev_priv; -@@ -365,7 +329,5 @@ struct intel_dvo_dev_ops ch7xxx_ops = { - .mode_set = ch7xxx_mode_set, - .dpms = ch7xxx_dpms, - .dump_regs = ch7xxx_dump_regs, -- .save = ch7xxx_save, -- .restore = ch7xxx_restore, - .destroy = ch7xxx_destroy, - }; -diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c -index 24169e5..a2ec3f4 100644 ---- a/drivers/gpu/drm/i915/dvo_ivch.c -+++ b/drivers/gpu/drm/i915/dvo_ivch.c -@@ -153,9 +153,6 @@ struct ivch_priv { - bool quiet; - - uint16_t width, height; -- -- uint16_t save_VR01; -- uint16_t save_VR40; - }; - - -@@ -405,22 +402,6 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo) - DRM_LOG_KMS("VR8F: 0x%04x\n", val); - } - --static void ivch_save(struct intel_dvo_device *dvo) --{ -- struct ivch_priv *priv = dvo->dev_priv; -- -- ivch_read(dvo, VR01, &priv->save_VR01); -- ivch_read(dvo, VR40, &priv->save_VR40); --} -- --static void ivch_restore(struct intel_dvo_device *dvo) --{ -- struct ivch_priv *priv = dvo->dev_priv; -- -- ivch_write(dvo, VR01, priv->save_VR01); -- ivch_write(dvo, VR40, priv->save_VR40); --} -- - static void ivch_destroy(struct intel_dvo_device *dvo) - { - struct ivch_priv *priv = dvo->dev_priv; -@@ -434,8 +415,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo) - struct intel_dvo_dev_ops ivch_ops= { - .init = ivch_init, - .dpms = ivch_dpms, -- .save = ivch_save, -- .restore = ivch_restore, - .mode_valid = ivch_mode_valid, - .mode_set = ivch_mode_set, - .detect = ivch_detect, -diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c -index 0001c13..9b8e676 100644 ---- a/drivers/gpu/drm/i915/dvo_sil164.c -+++ b/drivers/gpu/drm/i915/dvo_sil164.c -@@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - #define SIL164_REGC 0x0c - --struct sil164_save_rec { -- uint8_t reg8; -- uint8_t reg9; -- uint8_t regc; --}; -- - struct sil164_priv { - //I2CDevRec d; - bool quiet; -- struct sil164_save_rec save_regs; -- struct sil164_save_rec mode_regs; - }; - - #define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) -@@ -252,34 +244,6 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo) - DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val); - } - --static void sil164_save(struct intel_dvo_device *dvo) --{ -- struct sil164_priv *sil= dvo->dev_priv; -- -- if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8)) -- return; -- -- if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9)) -- return; -- -- if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc)) -- return; -- -- return; --} -- --static void sil164_restore(struct intel_dvo_device *dvo) --{ -- struct sil164_priv *sil = dvo->dev_priv; -- -- /* Restore it powered down initially */ -- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1); -- -- sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9); -- sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc); -- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8); --} -- - static void sil164_destroy(struct intel_dvo_device *dvo) - { - struct sil164_priv *sil = dvo->dev_priv; -@@ -297,7 +261,5 @@ struct intel_dvo_dev_ops sil164_ops = { - .mode_set = sil164_mode_set, - .dpms = sil164_dpms, - .dump_regs = sil164_dump_regs, -- .save = sil164_save, -- .restore = sil164_restore, - .destroy = sil164_destroy, - }; -diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c -index c7c391b..66c697b 100644 ---- a/drivers/gpu/drm/i915/dvo_tfp410.c -+++ b/drivers/gpu/drm/i915/dvo_tfp410.c -@@ -86,16 +86,8 @@ - #define TFP410_V_RES_LO 0x3C - #define TFP410_V_RES_HI 0x3D - --struct tfp410_save_rec { -- uint8_t ctl1; -- uint8_t ctl2; --}; -- - struct tfp410_priv { - bool quiet; -- -- struct tfp410_save_rec saved_reg; -- struct tfp410_save_rec mode_reg; - }; - - static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) -@@ -293,28 +285,6 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo) - DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); - } - --static void tfp410_save(struct intel_dvo_device *dvo) --{ -- struct tfp410_priv *tfp = dvo->dev_priv; -- -- if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1)) -- return; -- -- if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2)) -- return; --} -- --static void tfp410_restore(struct intel_dvo_device *dvo) --{ -- struct tfp410_priv *tfp = dvo->dev_priv; -- -- /* Restore it powered down initially */ -- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1); -- -- tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2); -- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1); --} -- - static void tfp410_destroy(struct intel_dvo_device *dvo) - { - struct tfp410_priv *tfp = dvo->dev_priv; -@@ -332,7 +302,5 @@ struct intel_dvo_dev_ops tfp410_ops = { - .mode_set = tfp410_mode_set, - .dpms = tfp410_dpms, - .dump_regs = tfp410_dump_regs, -- .save = tfp410_save, -- .restore = tfp410_restore, - .destroy = tfp410_destroy, - }; -diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c -index a0b8447..52510ad 100644 ---- a/drivers/gpu/drm/i915/i915_debugfs.c -+++ b/drivers/gpu/drm/i915/i915_debugfs.c -@@ -77,7 +77,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) - case ACTIVE_LIST: - seq_printf(m, "Active:\n"); - lock = &dev_priv->mm.active_list_lock; -- head = &dev_priv->mm.active_list; -+ head = &dev_priv->render_ring.active_list; - break; - case INACTIVE_LIST: - seq_printf(m, "Inactive:\n"); -@@ -96,19 +96,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) - spin_lock(lock); - list_for_each_entry(obj_priv, head, list) - { -- struct drm_gem_object *obj = obj_priv->obj; -- - seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s", -- obj, -+ &obj_priv->base, - get_pin_flag(obj_priv), -- obj->size, -- obj->read_domains, obj->write_domain, -+ obj_priv->base.size, -+ obj_priv->base.read_domains, -+ obj_priv->base.write_domain, - obj_priv->last_rendering_seqno, - obj_priv->dirty ? " dirty" : "", - obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : ""); - -- if (obj->name) -- seq_printf(m, " (name: %d)", obj->name); -+ if (obj_priv->base.name) -+ seq_printf(m, " (name: %d)", obj_priv->base.name); - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) - seq_printf(m, " (fence: %d)", obj_priv->fence_reg); - if (obj_priv->gtt_space != NULL) -@@ -130,7 +129,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data) - struct drm_i915_gem_request *gem_request; - - seq_printf(m, "Request:\n"); -- list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) { -+ list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, -+ list) { - seq_printf(m, " %d @ %d\n", - gem_request->seqno, - (int) (jiffies - gem_request->emitted_jiffies)); -@@ -144,9 +144,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) - struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - -- if (dev_priv->hw_status_page != NULL) { -+ if (dev_priv->render_ring.status_page.page_addr != NULL) { - seq_printf(m, "Current sequence: %d\n", -- i915_get_gem_seqno(dev)); -+ i915_get_gem_seqno(dev, &dev_priv->render_ring)); - } else { - seq_printf(m, "Current sequence: hws uninitialized\n"); - } -@@ -196,9 +196,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data) - } - seq_printf(m, "Interrupts received: %d\n", - atomic_read(&dev_priv->irq_received)); -- if (dev_priv->hw_status_page != NULL) { -+ if (dev_priv->render_ring.status_page.page_addr != NULL) { - seq_printf(m, "Current sequence: %d\n", -- i915_get_gem_seqno(dev)); -+ i915_get_gem_seqno(dev, &dev_priv->render_ring)); - } else { - seq_printf(m, "Current sequence: hws uninitialized\n"); - } -@@ -252,7 +252,7 @@ static int i915_hws_info(struct seq_file *m, void *data) - int i; - volatile u32 *hws; - -- hws = (volatile u32 *)dev_priv->hw_status_page; -+ hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr; - if (hws == NULL) - return 0; - -@@ -288,8 +288,9 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) - - spin_lock(&dev_priv->mm.active_list_lock); - -- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { -- obj = obj_priv->obj; -+ list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, -+ list) { -+ obj = &obj_priv->base; - if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { - ret = i915_gem_object_get_pages(obj, 0); - if (ret) { -@@ -318,14 +319,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) - u8 *virt; - uint32_t *ptr, off; - -- if (!dev_priv->ring.ring_obj) { -+ if (!dev_priv->render_ring.gem_object) { - seq_printf(m, "No ringbuffer setup\n"); - return 0; - } - -- virt = dev_priv->ring.virtual_start; -+ virt = dev_priv->render_ring.virtual_start; - -- for (off = 0; off < dev_priv->ring.Size; off += 4) { -+ for (off = 0; off < dev_priv->render_ring.size; off += 4) { - ptr = (uint32_t *)(virt + off); - seq_printf(m, "%08x : %08x\n", off, *ptr); - } -@@ -345,7 +346,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) - - seq_printf(m, "RingHead : %08x\n", head); - seq_printf(m, "RingTail : %08x\n", tail); -- seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); -+ seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); - seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); - - return 0; -@@ -490,11 +491,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) - struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - u16 rgvswctl = I915_READ16(MEMSWCTL); -+ u16 rgvstat = I915_READ16(MEMSTAT_ILK); - -- seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3); -- seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1); -- seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf, -- rgvswctl & 0x3f); -+ seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); -+ seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); -+ seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> -+ MEMSTAT_VID_SHIFT); -+ seq_printf(m, "Current P-state: %d\n", -+ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); - - return 0; - } -@@ -509,7 +513,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused) - - for (i = 0; i < 16; i++) { - delayfreq = I915_READ(PXVFREQ_BASE + i * 4); -- seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq); -+ seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, -+ (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); - } - - return 0; -@@ -542,6 +547,8 @@ static int i915_drpc_info(struct seq_file *m, void *unused) - struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - u32 rgvmodectl = I915_READ(MEMMODECTL); -+ u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY); -+ u16 crstandvid = I915_READ16(CRSTANDVID); - - seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? - "yes" : "no"); -@@ -556,9 +563,13 @@ static int i915_drpc_info(struct seq_file *m, void *unused) - rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); - seq_printf(m, "Starting frequency: P%d\n", - (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); -- seq_printf(m, "Max frequency: P%d\n", -+ seq_printf(m, "Max P-state: P%d\n", - (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); -- seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); -+ seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); -+ seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); -+ seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); -+ seq_printf(m, "Render standby enabled: %s\n", -+ (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); - - return 0; - } -@@ -567,23 +578,14 @@ static int i915_fbc_status(struct seq_file *m, void *unused) - { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; -- struct drm_crtc *crtc; - drm_i915_private_t *dev_priv = dev->dev_private; -- bool fbc_enabled = false; - -- if (!dev_priv->display.fbc_enabled) { -+ if (!I915_HAS_FBC(dev)) { - seq_printf(m, "FBC unsupported on this chipset\n"); - return 0; - } - -- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -- if (!crtc->enabled) -- continue; -- if (dev_priv->display.fbc_enabled(crtc)) -- fbc_enabled = true; -- } -- -- if (fbc_enabled) { -+ if (intel_fbc_enabled(dev)) { - seq_printf(m, "FBC enabled\n"); - } else { - seq_printf(m, "FBC disabled: "); -@@ -631,6 +633,36 @@ static int i915_sr_status(struct seq_file *m, void *unused) - return 0; - } - -+static int i915_emon_status(struct seq_file *m, void *unused) -+{ -+ struct drm_info_node *node = (struct drm_info_node *) m->private; -+ struct drm_device *dev = node->minor->dev; -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ unsigned long temp, chipset, gfx; -+ -+ temp = i915_mch_val(dev_priv); -+ chipset = i915_chipset_val(dev_priv); -+ gfx = i915_gfx_val(dev_priv); -+ -+ seq_printf(m, "GMCH temp: %ld\n", temp); -+ seq_printf(m, "Chipset power: %ld\n", chipset); -+ seq_printf(m, "GFX power: %ld\n", gfx); -+ seq_printf(m, "Total power: %ld\n", chipset + gfx); -+ -+ return 0; -+} -+ -+static int i915_gfxec(struct seq_file *m, void *unused) -+{ -+ struct drm_info_node *node = (struct drm_info_node *) m->private; -+ struct drm_device *dev = node->minor->dev; -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ -+ seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); -+ -+ return 0; -+} -+ - static int - i915_wedged_open(struct inode *inode, - struct file *filp) -@@ -753,6 +785,8 @@ static struct drm_info_list i915_debugfs_list[] = { - {"i915_delayfreq_table", i915_delayfreq_table, 0}, - {"i915_inttoext_table", i915_inttoext_table, 0}, - {"i915_drpc_info", i915_drpc_info, 0}, -+ {"i915_emon_status", i915_emon_status, 0}, -+ {"i915_gfxec", i915_gfxec, 0}, - {"i915_fbc_status", i915_fbc_status, 0}, - {"i915_sr_status", i915_sr_status, 0}, - }; -diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c -index c3cfafc..59a2bf8 100644 ---- a/drivers/gpu/drm/i915/i915_dma.c -+++ b/drivers/gpu/drm/i915/i915_dma.c -@@ -40,84 +40,6 @@ - #include - #include - --/* Really want an OS-independent resettable timer. Would like to have -- * this loop run for (eg) 3 sec, but have the timer reset every time -- * the head pointer changes, so that EBUSY only happens if the ring -- * actually stalls for (eg) 3 seconds. -- */ --int i915_wait_ring(struct drm_device * dev, int n, const char *caller) --{ -- drm_i915_private_t *dev_priv = dev->dev_private; -- drm_i915_ring_buffer_t *ring = &(dev_priv->ring); -- u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; -- u32 last_acthd = I915_READ(acthd_reg); -- u32 acthd; -- u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; -- int i; -- -- trace_i915_ring_wait_begin (dev); -- -- for (i = 0; i < 100000; i++) { -- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; -- acthd = I915_READ(acthd_reg); -- ring->space = ring->head - (ring->tail + 8); -- if (ring->space < 0) -- ring->space += ring->Size; -- if (ring->space >= n) { -- trace_i915_ring_wait_end (dev); -- return 0; -- } -- -- if (dev->primary->master) { -- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; -- if (master_priv->sarea_priv) -- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; -- } -- -- -- if (ring->head != last_head) -- i = 0; -- if (acthd != last_acthd) -- i = 0; -- -- last_head = ring->head; -- last_acthd = acthd; -- msleep_interruptible(10); -- -- } -- -- trace_i915_ring_wait_end (dev); -- return -EBUSY; --} -- --/* As a ringbuffer is only allowed to wrap between instructions, fill -- * the tail with NOOPs. -- */ --int i915_wrap_ring(struct drm_device *dev) --{ -- drm_i915_private_t *dev_priv = dev->dev_private; -- volatile unsigned int *virt; -- int rem; -- -- rem = dev_priv->ring.Size - dev_priv->ring.tail; -- if (dev_priv->ring.space < rem) { -- int ret = i915_wait_ring(dev, rem, __func__); -- if (ret) -- return ret; -- } -- dev_priv->ring.space -= rem; -- -- virt = (unsigned int *) -- (dev_priv->ring.virtual_start + dev_priv->ring.tail); -- rem /= 4; -- while (rem--) -- *virt++ = MI_NOOP; -- -- dev_priv->ring.tail = 0; -- -- return 0; --} -- - /** - * Sets up the hardware status page for devices that need a physical address - * in the register. -@@ -133,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev) - DRM_ERROR("Can not allocate hardware status page\n"); - return -ENOMEM; - } -- dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; -+ dev_priv->render_ring.status_page.page_addr -+ = dev_priv->status_page_dmah->vaddr; - dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; - -- memset(dev_priv->hw_status_page, 0, PAGE_SIZE); -+ memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); - - if (IS_I965G(dev)) - dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & -@@ -159,8 +82,8 @@ static void i915_free_hws(struct drm_device *dev) - dev_priv->status_page_dmah = NULL; - } - -- if (dev_priv->status_gfx_addr) { -- dev_priv->status_gfx_addr = 0; -+ if (dev_priv->render_ring.status_page.gfx_addr) { -+ dev_priv->render_ring.status_page.gfx_addr = 0; - drm_core_ioremapfree(&dev_priv->hws_map, dev); - } - -@@ -172,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev) - { - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv; -- drm_i915_ring_buffer_t *ring = &(dev_priv->ring); -+ struct intel_ring_buffer *ring = &dev_priv->render_ring; - - /* - * We should never lose context on the ring with modesetting -@@ -185,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev) - ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; - ring->space = ring->head - (ring->tail + 8); - if (ring->space < 0) -- ring->space += ring->Size; -+ ring->space += ring->size; - - if (!dev->primary->master) - return; -@@ -205,12 +128,9 @@ static int i915_dma_cleanup(struct drm_device * dev) - if (dev->irq_enabled) - drm_irq_uninstall(dev); - -- if (dev_priv->ring.virtual_start) { -- drm_core_ioremapfree(&dev_priv->ring.map, dev); -- dev_priv->ring.virtual_start = NULL; -- dev_priv->ring.map.handle = NULL; -- dev_priv->ring.map.size = 0; -- } -+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); -+ if (HAS_BSD(dev)) -+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); - - /* Clear the HWS virtual address at teardown */ - if (I915_NEED_GFX_HWS(dev)) -@@ -233,24 +153,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) - } - - if (init->ring_size != 0) { -- if (dev_priv->ring.ring_obj != NULL) { -+ if (dev_priv->render_ring.gem_object != NULL) { - i915_dma_cleanup(dev); - DRM_ERROR("Client tried to initialize ringbuffer in " - "GEM mode\n"); - return -EINVAL; - } - -- dev_priv->ring.Size = init->ring_size; -+ dev_priv->render_ring.size = init->ring_size; - -- dev_priv->ring.map.offset = init->ring_start; -- dev_priv->ring.map.size = init->ring_size; -- dev_priv->ring.map.type = 0; -- dev_priv->ring.map.flags = 0; -- dev_priv->ring.map.mtrr = 0; -+ dev_priv->render_ring.map.offset = init->ring_start; -+ dev_priv->render_ring.map.size = init->ring_size; -+ dev_priv->render_ring.map.type = 0; -+ dev_priv->render_ring.map.flags = 0; -+ dev_priv->render_ring.map.mtrr = 0; - -- drm_core_ioremap_wc(&dev_priv->ring.map, dev); -+ drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); - -- if (dev_priv->ring.map.handle == NULL) { -+ if (dev_priv->render_ring.map.handle == NULL) { - i915_dma_cleanup(dev); - DRM_ERROR("can not ioremap virtual address for" - " ring buffer\n"); -@@ -258,7 +178,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) - } - } - -- dev_priv->ring.virtual_start = dev_priv->ring.map.handle; -+ dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; - - dev_priv->cpp = init->cpp; - dev_priv->back_offset = init->back_offset; -@@ -278,26 +198,29 @@ static int i915_dma_resume(struct drm_device * dev) - { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - -+ struct intel_ring_buffer *ring; - DRM_DEBUG_DRIVER("%s\n", __func__); - -- if (dev_priv->ring.map.handle == NULL) { -+ ring = &dev_priv->render_ring; -+ -+ if (ring->map.handle == NULL) { - DRM_ERROR("can not ioremap virtual address for" - " ring buffer\n"); - return -ENOMEM; - } - - /* Program Hardware Status Page */ -- if (!dev_priv->hw_status_page) { -+ if (!ring->status_page.page_addr) { - DRM_ERROR("Can not find hardware status page\n"); - return -EINVAL; - } - DRM_DEBUG_DRIVER("hw status page @ %p\n", -- dev_priv->hw_status_page); -- -- if (dev_priv->status_gfx_addr != 0) -- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); -+ ring->status_page.page_addr); -+ if (ring->status_page.gfx_addr != 0) -+ ring->setup_status_page(dev, ring); - else - I915_WRITE(HWS_PGA, dev_priv->dma_status_page); -+ - DRM_DEBUG_DRIVER("Enabled hardware status page\n"); - - return 0; -@@ -407,9 +330,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) - { - drm_i915_private_t *dev_priv = dev->dev_private; - int i; -- RING_LOCALS; - -- if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) -+ if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) - return -EINVAL; - - BEGIN_LP_RING((dwords+1)&~1); -@@ -442,9 +364,7 @@ i915_emit_box(struct drm_device *dev, - struct drm_clip_rect *boxes, - int i, int DR1, int DR4) - { -- drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_clip_rect box = boxes[i]; -- RING_LOCALS; - - if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { - DRM_ERROR("Bad box %d,%d..%d,%d\n", -@@ -481,7 +401,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev) - { - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; -- RING_LOCALS; - - dev_priv->counter++; - if (dev_priv->counter > 0x7FFFFFFFUL) -@@ -535,10 +454,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, - drm_i915_batchbuffer_t * batch, - struct drm_clip_rect *cliprects) - { -- drm_i915_private_t *dev_priv = dev->dev_private; - int nbox = batch->num_cliprects; - int i = 0, count; -- RING_LOCALS; - - if ((batch->start | batch->used) & 0x7) { - DRM_ERROR("alignment"); -@@ -587,7 +504,6 @@ static int i915_dispatch_flip(struct drm_device * dev) - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv = - dev->primary->master->driver_priv; -- RING_LOCALS; - - if (!master_priv->sarea_priv) - return -EINVAL; -@@ -640,7 +556,8 @@ static int i915_quiescent(struct drm_device * dev) - drm_i915_private_t *dev_priv = dev->dev_private; - - i915_kernel_lost_context(dev); -- return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); -+ return intel_wait_ring_buffer(dev, &dev_priv->render_ring, -+ dev_priv->render_ring.size - 8); - } - - static int i915_flush_ioctl(struct drm_device *dev, void *data, -@@ -827,6 +744,9 @@ static int i915_getparam(struct drm_device *dev, void *data, - /* depends on GEM */ - value = dev_priv->has_gem; - break; -+ case I915_PARAM_HAS_BSD: -+ value = HAS_BSD(dev); -+ break; - default: - DRM_DEBUG_DRIVER("Unknown parameter %d\n", - param->param); -@@ -882,6 +802,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, - { - drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_hws_addr_t *hws = data; -+ struct intel_ring_buffer *ring = &dev_priv->render_ring; - - if (!I915_NEED_GFX_HWS(dev)) - return -EINVAL; -@@ -898,7 +819,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, - - DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); - -- dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); -+ ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); - - dev_priv->hws_map.offset = dev->agp->base + hws->addr; - dev_priv->hws_map.size = 4*1024; -@@ -909,19 +830,19 @@ static int i915_set_status_page(struct drm_device *dev, void *data, - drm_core_ioremap_wc(&dev_priv->hws_map, dev); - if (dev_priv->hws_map.handle == NULL) { - i915_dma_cleanup(dev); -- dev_priv->status_gfx_addr = 0; -+ ring->status_page.gfx_addr = 0; - DRM_ERROR("can not ioremap virtual address for" - " G33 hw status page\n"); - return -ENOMEM; - } -- dev_priv->hw_status_page = dev_priv->hws_map.handle; -+ ring->status_page.page_addr = dev_priv->hws_map.handle; -+ memset(ring->status_page.page_addr, 0, PAGE_SIZE); -+ I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); - -- memset(dev_priv->hw_status_page, 0, PAGE_SIZE); -- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); - DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", -- dev_priv->status_gfx_addr); -+ ring->status_page.gfx_addr); - DRM_DEBUG_DRIVER("load hws at %p\n", -- dev_priv->hw_status_page); -+ ring->status_page.page_addr); - return 0; - } - -@@ -1357,13 +1278,12 @@ static void i915_setup_compression(struct drm_device *dev, int size) - - dev_priv->cfb_size = size; - -+ intel_disable_fbc(dev); - dev_priv->compressed_fb = compressed_fb; - - if (IS_GM45(dev)) { -- g4x_disable_fbc(dev); - I915_WRITE(DPFC_CB_BASE, compressed_fb->start); - } else { -- i8xx_disable_fbc(dev); - I915_WRITE(FBC_CFB_BASE, cfb_base); - I915_WRITE(FBC_LL_BASE, ll_base); - dev_priv->compressed_llb = compressed_llb; -@@ -1400,12 +1320,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ - struct drm_device *dev = pci_get_drvdata(pdev); - pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; - if (state == VGA_SWITCHEROO_ON) { -- printk(KERN_INFO "i915: switched off\n"); -+ printk(KERN_INFO "i915: switched on\n"); - /* i915 resume handler doesn't set to D0 */ - pci_set_power_state(dev->pdev, PCI_D0); - i915_resume(dev); -+ drm_kms_helper_poll_enable(dev); - } else { - printk(KERN_ERR "i915: switched off\n"); -+ drm_kms_helper_poll_disable(dev); - i915_suspend(dev, pmm); - } - } -@@ -1480,23 +1402,23 @@ static int i915_load_modeset_init(struct drm_device *dev, - /* if we have > 1 VGA cards, then disable the radeon VGA resources */ - ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); - if (ret) -- goto destroy_ringbuffer; -+ goto cleanup_ringbuffer; - - ret = vga_switcheroo_register_client(dev->pdev, - i915_switcheroo_set_state, - i915_switcheroo_can_switch); - if (ret) -- goto destroy_ringbuffer; -+ goto cleanup_vga_client; - - /* IIR "flip pending" bit means done if this bit is set */ - if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) - dev_priv->flip_pending_is_done = true; - - intel_modeset_init(dev); - - ret = drm_irq_install(dev); - if (ret) -- goto destroy_ringbuffer; -+ goto cleanup_vga_switcheroo; - - /* Always safe in the mode setting case. */ - /* FIXME: do pre/post-mode set stuff in core KMS code */ -@@ -1504,11 +1426,20 @@ static int i915_load_modeset_init(struct drm_device *dev, - - I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); - -- drm_helper_initial_config(dev); -+ ret = intel_fbdev_init(dev); -+ if (ret) -+ goto cleanup_irq; - -+ drm_kms_helper_poll_init(dev); - return 0; - --destroy_ringbuffer: -+cleanup_irq: -+ drm_irq_uninstall(dev); -+cleanup_vga_switcheroo: -+ vga_switcheroo_unregister_client(dev->pdev); -+cleanup_vga_client: -+ vga_client_register(dev->pdev, NULL, NULL, NULL); -+cleanup_ringbuffer: - mutex_lock(&dev->struct_mutex); - i915_gem_cleanup_ringbuffer(dev); - mutex_unlock(&dev->struct_mutex); -@@ -1540,14 +1471,11 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) - master->driver_priv = NULL; - } - --static void i915_get_mem_freq(struct drm_device *dev) -+static void i915_pineview_get_mem_freq(struct drm_device *dev) - { - drm_i915_private_t *dev_priv = dev->dev_private; - u32 tmp; - -- if (!IS_PINEVIEW(dev)) -- return; -- - tmp = I915_READ(CLKCFG); - - switch (tmp & CLKCFG_FSB_MASK) { -@@ -1576,8 +1504,525 @@ static void i915_get_mem_freq(struct drm_device *dev) - dev_priv->mem_freq = 800; - break; - } -+ -+ /* detect pineview DDR3 setting */ -+ tmp = I915_READ(CSHRDDR3CTL); -+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; -+} -+ -+static void i915_ironlake_get_mem_freq(struct drm_device *dev) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ u16 ddrpll, csipll; -+ -+ ddrpll = I915_READ16(DDRMPLL1); -+ csipll = I915_READ16(CSIPLL0); -+ -+ switch (ddrpll & 0xff) { -+ case 0xc: -+ dev_priv->mem_freq = 800; -+ break; -+ case 0x10: -+ dev_priv->mem_freq = 1066; -+ break; -+ case 0x14: -+ dev_priv->mem_freq = 1333; -+ break; -+ case 0x18: -+ dev_priv->mem_freq = 1600; -+ break; -+ default: -+ DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", -+ ddrpll & 0xff); -+ dev_priv->mem_freq = 0; -+ break; -+ } -+ -+ dev_priv->r_t = dev_priv->mem_freq; -+ -+ switch (csipll & 0x3ff) { -+ case 0x00c: -+ dev_priv->fsb_freq = 3200; -+ break; -+ case 0x00e: -+ dev_priv->fsb_freq = 3733; -+ break; -+ case 0x010: -+ dev_priv->fsb_freq = 4266; -+ break; -+ case 0x012: -+ dev_priv->fsb_freq = 4800; -+ break; -+ case 0x014: -+ dev_priv->fsb_freq = 5333; -+ break; -+ case 0x016: -+ dev_priv->fsb_freq = 5866; -+ break; -+ case 0x018: -+ dev_priv->fsb_freq = 6400; -+ break; -+ default: -+ DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", -+ csipll & 0x3ff); -+ dev_priv->fsb_freq = 0; -+ break; -+ } -+ -+ if (dev_priv->fsb_freq == 3200) { -+ dev_priv->c_m = 0; -+ } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { -+ dev_priv->c_m = 1; -+ } else { -+ dev_priv->c_m = 2; -+ } -+} -+ -+struct v_table { -+ u8 vid; -+ unsigned long vd; /* in .1 mil */ -+ unsigned long vm; /* in .1 mil */ -+ u8 pvid; -+}; -+ -+static struct v_table v_table[] = { -+ { 0, 16125, 15000, 0x7f, }, -+ { 1, 16000, 14875, 0x7e, }, -+ { 2, 15875, 14750, 0x7d, }, -+ { 3, 15750, 14625, 0x7c, }, -+ { 4, 15625, 14500, 0x7b, }, -+ { 5, 15500, 14375, 0x7a, }, -+ { 6, 15375, 14250, 0x79, }, -+ { 7, 15250, 14125, 0x78, }, -+ { 8, 15125, 14000, 0x77, }, -+ { 9, 15000, 13875, 0x76, }, -+ { 10, 14875, 13750, 0x75, }, -+ { 11, 14750, 13625, 0x74, }, -+ { 12, 14625, 13500, 0x73, }, -+ { 13, 14500, 13375, 0x72, }, -+ { 14, 14375, 13250, 0x71, }, -+ { 15, 14250, 13125, 0x70, }, -+ { 16, 14125, 13000, 0x6f, }, -+ { 17, 14000, 12875, 0x6e, }, -+ { 18, 13875, 12750, 0x6d, }, -+ { 19, 13750, 12625, 0x6c, }, -+ { 20, 13625, 12500, 0x6b, }, -+ { 21, 13500, 12375, 0x6a, }, -+ { 22, 13375, 12250, 0x69, }, -+ { 23, 13250, 12125, 0x68, }, -+ { 24, 13125, 12000, 0x67, }, -+ { 25, 13000, 11875, 0x66, }, -+ { 26, 12875, 11750, 0x65, }, -+ { 27, 12750, 11625, 0x64, }, -+ { 28, 12625, 11500, 0x63, }, -+ { 29, 12500, 11375, 0x62, }, -+ { 30, 12375, 11250, 0x61, }, -+ { 31, 12250, 11125, 0x60, }, -+ { 32, 12125, 11000, 0x5f, }, -+ { 33, 12000, 10875, 0x5e, }, -+ { 34, 11875, 10750, 0x5d, }, -+ { 35, 11750, 10625, 0x5c, }, -+ { 36, 11625, 10500, 0x5b, }, -+ { 37, 11500, 10375, 0x5a, }, -+ { 38, 11375, 10250, 0x59, }, -+ { 39, 11250, 10125, 0x58, }, -+ { 40, 11125, 10000, 0x57, }, -+ { 41, 11000, 9875, 0x56, }, -+ { 42, 10875, 9750, 0x55, }, -+ { 43, 10750, 9625, 0x54, }, -+ { 44, 10625, 9500, 0x53, }, -+ { 45, 10500, 9375, 0x52, }, -+ { 46, 10375, 9250, 0x51, }, -+ { 47, 10250, 9125, 0x50, }, -+ { 48, 10125, 9000, 0x4f, }, -+ { 49, 10000, 8875, 0x4e, }, -+ { 50, 9875, 8750, 0x4d, }, -+ { 51, 9750, 8625, 0x4c, }, -+ { 52, 9625, 8500, 0x4b, }, -+ { 53, 9500, 8375, 0x4a, }, -+ { 54, 9375, 8250, 0x49, }, -+ { 55, 9250, 8125, 0x48, }, -+ { 56, 9125, 8000, 0x47, }, -+ { 57, 9000, 7875, 0x46, }, -+ { 58, 8875, 7750, 0x45, }, -+ { 59, 8750, 7625, 0x44, }, -+ { 60, 8625, 7500, 0x43, }, -+ { 61, 8500, 7375, 0x42, }, -+ { 62, 8375, 7250, 0x41, }, -+ { 63, 8250, 7125, 0x40, }, -+ { 64, 8125, 7000, 0x3f, }, -+ { 65, 8000, 6875, 0x3e, }, -+ { 66, 7875, 6750, 0x3d, }, -+ { 67, 7750, 6625, 0x3c, }, -+ { 68, 7625, 6500, 0x3b, }, -+ { 69, 7500, 6375, 0x3a, }, -+ { 70, 7375, 6250, 0x39, }, -+ { 71, 7250, 6125, 0x38, }, -+ { 72, 7125, 6000, 0x37, }, -+ { 73, 7000, 5875, 0x36, }, -+ { 74, 6875, 5750, 0x35, }, -+ { 75, 6750, 5625, 0x34, }, -+ { 76, 6625, 5500, 0x33, }, -+ { 77, 6500, 5375, 0x32, }, -+ { 78, 6375, 5250, 0x31, }, -+ { 79, 6250, 5125, 0x30, }, -+ { 80, 6125, 5000, 0x2f, }, -+ { 81, 6000, 4875, 0x2e, }, -+ { 82, 5875, 4750, 0x2d, }, -+ { 83, 5750, 4625, 0x2c, }, -+ { 84, 5625, 4500, 0x2b, }, -+ { 85, 5500, 4375, 0x2a, }, -+ { 86, 5375, 4250, 0x29, }, -+ { 87, 5250, 4125, 0x28, }, -+ { 88, 5125, 4000, 0x27, }, -+ { 89, 5000, 3875, 0x26, }, -+ { 90, 4875, 3750, 0x25, }, -+ { 91, 4750, 3625, 0x24, }, -+ { 92, 4625, 3500, 0x23, }, -+ { 93, 4500, 3375, 0x22, }, -+ { 94, 4375, 3250, 0x21, }, -+ { 95, 4250, 3125, 0x20, }, -+ { 96, 4125, 3000, 0x1f, }, -+ { 97, 4125, 3000, 0x1e, }, -+ { 98, 4125, 3000, 0x1d, }, -+ { 99, 4125, 3000, 0x1c, }, -+ { 100, 4125, 3000, 0x1b, }, -+ { 101, 4125, 3000, 0x1a, }, -+ { 102, 4125, 3000, 0x19, }, -+ { 103, 4125, 3000, 0x18, }, -+ { 104, 4125, 3000, 0x17, }, -+ { 105, 4125, 3000, 0x16, }, -+ { 106, 4125, 3000, 0x15, }, -+ { 107, 4125, 3000, 0x14, }, -+ { 108, 4125, 3000, 0x13, }, -+ { 109, 4125, 3000, 0x12, }, -+ { 110, 4125, 3000, 0x11, }, -+ { 111, 4125, 3000, 0x10, }, -+ { 112, 4125, 3000, 0x0f, }, -+ { 113, 4125, 3000, 0x0e, }, -+ { 114, 4125, 3000, 0x0d, }, -+ { 115, 4125, 3000, 0x0c, }, -+ { 116, 4125, 3000, 0x0b, }, -+ { 117, 4125, 3000, 0x0a, }, -+ { 118, 4125, 3000, 0x09, }, -+ { 119, 4125, 3000, 0x08, }, -+ { 120, 1125, 0, 0x07, }, -+ { 121, 1000, 0, 0x06, }, -+ { 122, 875, 0, 0x05, }, -+ { 123, 750, 0, 0x04, }, -+ { 124, 625, 0, 0x03, }, -+ { 125, 500, 0, 0x02, }, -+ { 126, 375, 0, 0x01, }, -+ { 127, 0, 0, 0x00, }, -+}; -+ -+struct cparams { -+ int i; -+ int t; -+ int m; -+ int c; -+}; -+ -+static struct cparams cparams[] = { -+ { 1, 1333, 301, 28664 }, -+ { 1, 1066, 294, 24460 }, -+ { 1, 800, 294, 25192 }, -+ { 0, 1333, 276, 27605 }, -+ { 0, 1066, 276, 27605 }, -+ { 0, 800, 231, 23784 }, -+}; -+ -+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) -+{ -+ u64 total_count, diff, ret; -+ u32 count1, count2, count3, m = 0, c = 0; -+ unsigned long now = jiffies_to_msecs(jiffies), diff1; -+ int i; -+ -+ diff1 = now - dev_priv->last_time1; -+ -+ count1 = I915_READ(DMIEC); -+ count2 = I915_READ(DDREC); -+ count3 = I915_READ(CSIEC); -+ -+ total_count = count1 + count2 + count3; -+ -+ /* FIXME: handle per-counter overflow */ -+ if (total_count < dev_priv->last_count1) { -+ diff = ~0UL - dev_priv->last_count1; -+ diff += total_count; -+ } else { -+ diff = total_count - dev_priv->last_count1; -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(cparams); i++) { -+ if (cparams[i].i == dev_priv->c_m && -+ cparams[i].t == dev_priv->r_t) { -+ m = cparams[i].m; -+ c = cparams[i].c; -+ break; -+ } -+ } -+ -+ div_u64(diff, diff1); -+ ret = ((m * diff) + c); -+ div_u64(ret, 10); -+ -+ dev_priv->last_count1 = total_count; -+ dev_priv->last_time1 = now; -+ -+ return ret; -+} -+ -+unsigned long i915_mch_val(struct drm_i915_private *dev_priv) -+{ -+ unsigned long m, x, b; -+ u32 tsfs; -+ -+ tsfs = I915_READ(TSFS); -+ -+ m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); -+ x = I915_READ8(TR1); -+ -+ b = tsfs & TSFS_INTR_MASK; -+ -+ return ((m * x) / 127) - b; - } - -+static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) -+{ -+ unsigned long val = 0; -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(v_table); i++) { -+ if (v_table[i].pvid == pxvid) { -+ if (IS_MOBILE(dev_priv->dev)) -+ val = v_table[i].vm; -+ else -+ val = v_table[i].vd; -+ } -+ } -+ -+ return val; -+} -+ -+void i915_update_gfx_val(struct drm_i915_private *dev_priv) -+{ -+ struct timespec now, diff1; -+ u64 diff; -+ unsigned long diffms; -+ u32 count; -+ -+ getrawmonotonic(&now); -+ diff1 = timespec_sub(now, dev_priv->last_time2); -+ -+ /* Don't divide by 0 */ -+ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; -+ if (!diffms) -+ return; -+ -+ count = I915_READ(GFXEC); -+ -+ if (count < dev_priv->last_count2) { -+ diff = ~0UL - dev_priv->last_count2; -+ diff += count; -+ } else { -+ diff = count - dev_priv->last_count2; -+ } -+ -+ dev_priv->last_count2 = count; -+ dev_priv->last_time2 = now; -+ -+ /* More magic constants... */ -+ diff = diff * 1181; -+ div_u64(diff, diffms * 10); -+ dev_priv->gfx_power = diff; -+} -+ -+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) -+{ -+ unsigned long t, corr, state1, corr2, state2; -+ u32 pxvid, ext_v; -+ -+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); -+ pxvid = (pxvid >> 24) & 0x7f; -+ ext_v = pvid_to_extvid(dev_priv, pxvid); -+ -+ state1 = ext_v; -+ -+ t = i915_mch_val(dev_priv); -+ -+ /* Revel in the empirically derived constants */ -+ -+ /* Correction factor in 1/100000 units */ -+ if (t > 80) -+ corr = ((t * 2349) + 135940); -+ else if (t >= 50) -+ corr = ((t * 964) + 29317); -+ else /* < 50 */ -+ corr = ((t * 301) + 1004); -+ -+ corr = corr * ((150142 * state1) / 10000 - 78642); -+ corr /= 100000; -+ corr2 = (corr * dev_priv->corr); -+ -+ state2 = (corr2 * state1) / 10000; -+ state2 /= 100; /* convert to mW */ -+ -+ i915_update_gfx_val(dev_priv); -+ -+ return dev_priv->gfx_power + state2; -+} -+ -+/* Global for IPS driver to get at the current i915 device */ -+static struct drm_i915_private *i915_mch_dev; -+/* -+ * Lock protecting IPS related data structures -+ * - i915_mch_dev -+ * - dev_priv->max_delay -+ * - dev_priv->min_delay -+ * - dev_priv->fmax -+ * - dev_priv->gpu_busy -+ */ -+DEFINE_SPINLOCK(mchdev_lock); -+ -+/** -+ * i915_read_mch_val - return value for IPS use -+ * -+ * Calculate and return a value for the IPS driver to use when deciding whether -+ * we have thermal and power headroom to increase CPU or GPU power budget. -+ */ -+unsigned long i915_read_mch_val(void) -+{ -+ struct drm_i915_private *dev_priv; -+ unsigned long chipset_val, graphics_val, ret = 0; -+ -+ spin_lock(&mchdev_lock); -+ if (!i915_mch_dev) -+ goto out_unlock; -+ dev_priv = i915_mch_dev; -+ -+ chipset_val = i915_chipset_val(dev_priv); -+ graphics_val = i915_gfx_val(dev_priv); -+ -+ ret = chipset_val + graphics_val; -+ -+out_unlock: -+ spin_unlock(&mchdev_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(i915_read_mch_val); -+ -+/** -+ * i915_gpu_raise - raise GPU frequency limit -+ * -+ * Raise the limit; IPS indicates we have thermal headroom. -+ */ -+bool i915_gpu_raise(void) -+{ -+ struct drm_i915_private *dev_priv; -+ bool ret = true; -+ -+ spin_lock(&mchdev_lock); -+ if (!i915_mch_dev) { -+ ret = false; -+ goto out_unlock; -+ } -+ dev_priv = i915_mch_dev; -+ -+ if (dev_priv->max_delay > dev_priv->fmax) -+ dev_priv->max_delay--; -+ -+out_unlock: -+ spin_unlock(&mchdev_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(i915_gpu_raise); -+ -+/** -+ * i915_gpu_lower - lower GPU frequency limit -+ * -+ * IPS indicates we're close to a thermal limit, so throttle back the GPU -+ * frequency maximum. -+ */ -+bool i915_gpu_lower(void) -+{ -+ struct drm_i915_private *dev_priv; -+ bool ret = true; -+ -+ spin_lock(&mchdev_lock); -+ if (!i915_mch_dev) { -+ ret = false; -+ goto out_unlock; -+ } -+ dev_priv = i915_mch_dev; -+ -+ if (dev_priv->max_delay < dev_priv->min_delay) -+ dev_priv->max_delay++; -+ -+out_unlock: -+ spin_unlock(&mchdev_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(i915_gpu_lower); -+ -+/** -+ * i915_gpu_busy - indicate GPU business to IPS -+ * -+ * Tell the IPS driver whether or not the GPU is busy. -+ */ -+bool i915_gpu_busy(void) -+{ -+ struct drm_i915_private *dev_priv; -+ bool ret = false; -+ -+ spin_lock(&mchdev_lock); -+ if (!i915_mch_dev) -+ goto out_unlock; -+ dev_priv = i915_mch_dev; -+ -+ ret = dev_priv->busy; -+ -+out_unlock: -+ spin_unlock(&mchdev_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(i915_gpu_busy); -+ -+/** -+ * i915_gpu_turbo_disable - disable graphics turbo -+ * -+ * Disable graphics turbo by resetting the max frequency and setting the -+ * current frequency to the default. -+ */ -+bool i915_gpu_turbo_disable(void) -+{ -+ struct drm_i915_private *dev_priv; -+ bool ret = true; -+ -+ spin_lock(&mchdev_lock); -+ if (!i915_mch_dev) { -+ ret = false; -+ goto out_unlock; -+ } -+ dev_priv = i915_mch_dev; -+ -+ dev_priv->max_delay = dev_priv->fstart; -+ -+ if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) -+ ret = false; -+ -+out_unlock: -+ spin_unlock(&mchdev_lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); -+ - /** - * i915_driver_load - setup chip and create an initial config - * @dev: DRM device -@@ -1591,11 +2036,10 @@ static void i915_get_mem_freq(struct drm_device *dev) - */ - int i915_driver_load(struct drm_device *dev, unsigned long flags) - { -- struct drm_i915_private *dev_priv = dev->dev_private; -+ struct drm_i915_private *dev_priv; - resource_size_t base, size; - int ret = 0, mmio_bar; - uint32_t agp_size, prealloc_size, prealloc_start; -- - /* i915 has 4 more counters */ - dev->counters += 4; - dev->types[6] = _DRM_STAT_IRQ; -@@ -1673,6 +2117,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) - dev_priv->has_gem = 0; - } - -+ if (dev_priv->has_gem == 0 && -+ drm_core_check_feature(dev, DRIVER_MODESET)) { -+ DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); -+ ret = -ENODEV; -+ goto out_iomapfree; -+ } -+ - dev->driver->get_vblank_counter = i915_get_vblank_counter; - dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { -@@ -1692,7 +2143,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) - goto out_workqueue_free; - } - -- i915_get_mem_freq(dev); -+ if (IS_PINEVIEW(dev)) -+ i915_pineview_get_mem_freq(dev); -+ else if (IS_IRONLAKE(dev)) -+ i915_ironlake_get_mem_freq(dev); - - /* On the 945G/GM, the chipset reports the MSI capability on the - * integrated graphics even though the support isn't actually there -@@ -1710,7 +2164,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) - - spin_lock_init(&dev_priv->user_irq_lock); - spin_lock_init(&dev_priv->error_lock); -- dev_priv->user_irq_refcount = 0; - dev_priv->trace_irq_seqno = 0; - - ret = drm_vblank_init(dev, I915_NUM_PIPE); -@@ -1723,6 +2176,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) - /* Start out suspended */ - dev_priv->mm.suspended = 1; - -+ intel_detect_pch(dev); -+ - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_load_modeset_init(dev, prealloc_start, - prealloc_size, agp_size); -@@ -1737,6 +2192,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) - - setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, - (unsigned long) dev); -+ -+ spin_lock(&mchdev_lock); -+ i915_mch_dev = dev_priv; -+ dev_priv->mchdev_lock = &mchdev_lock; -+ spin_unlock(&mchdev_lock); -+ - return 0; - - out_workqueue_free: -@@ -1758,6 +2219,10 @@ int i915_driver_unload(struct drm_device *dev) - - i915_destroy_error_state(dev); - -+ spin_lock(&mchdev_lock); -+ i915_mch_dev = NULL; -+ spin_unlock(&mchdev_lock); -+ - destroy_workqueue(dev_priv->wq); - del_timer_sync(&dev_priv->hangcheck_timer); - -@@ -1769,6 +2234,8 @@ int i915_driver_unload(struct drm_device *dev) - } - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { -+ intel_modeset_cleanup(dev); -+ - /* - * free the memory space allocated for the child device - * config parsed from VBT -@@ -1792,8 +2259,6 @@ int i915_driver_unload(struct drm_device *dev) - intel_opregion_free(dev, 0); - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { -- intel_modeset_cleanup(dev); -- - i915_gem_free_all_phys_object(dev); - - mutex_lock(&dev->struct_mutex); -diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c -index cc03537..423dc90 100644 ---- a/drivers/gpu/drm/i915/i915_drv.c -+++ b/drivers/gpu/drm/i915/i915_drv.c -@@ -60,95 +60,95 @@ extern int intel_agp_enabled; - .subdevice = PCI_ANY_ID, \ - .driver_data = (unsigned long) info } - --const static struct intel_device_info intel_i830_info = { -+static const struct intel_device_info intel_i830_info = { - .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, - }; - --const static struct intel_device_info intel_845g_info = { -+static const struct intel_device_info intel_845g_info = { - .is_i8xx = 1, - }; - --const static struct intel_device_info intel_i85x_info = { -+static const struct intel_device_info intel_i85x_info = { - .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, - .cursor_needs_physical = 1, - }; - --const static struct intel_device_info intel_i865g_info = { -+static const struct intel_device_info intel_i865g_info = { - .is_i8xx = 1, - }; - --const static struct intel_device_info intel_i915g_info = { -+static const struct intel_device_info intel_i915g_info = { - .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, - }; --const static struct intel_device_info intel_i915gm_info = { -+static const struct intel_device_info intel_i915gm_info = { - .is_i9xx = 1, .is_mobile = 1, - .cursor_needs_physical = 1, - }; --const static struct intel_device_info intel_i945g_info = { -+static const struct intel_device_info intel_i945g_info = { - .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, - }; --const static struct intel_device_info intel_i945gm_info = { -+static const struct intel_device_info intel_i945gm_info = { - .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, - .has_hotplug = 1, .cursor_needs_physical = 1, - }; - --const static struct intel_device_info intel_i965g_info = { -+static const struct intel_device_info intel_i965g_info = { - .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, - }; - --const static struct intel_device_info intel_i965gm_info = { -+static const struct intel_device_info intel_i965gm_info = { - .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, - .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, - .has_hotplug = 1, - }; - --const static struct intel_device_info intel_g33_info = { -+static const struct intel_device_info intel_g33_info = { - .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, - .has_hotplug = 1, - }; - --const static struct intel_device_info intel_g45_info = { -+static const struct intel_device_info intel_g45_info = { - .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, - .has_pipe_cxsr = 1, - .has_hotplug = 1, - }; - --const static struct intel_device_info intel_gm45_info = { -+static const struct intel_device_info intel_gm45_info = { - .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, - .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, - .has_pipe_cxsr = 1, - .has_hotplug = 1, - }; - --const static struct intel_device_info intel_pineview_info = { -+static const struct intel_device_info intel_pineview_info = { - .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, - .need_gfx_hws = 1, - .has_hotplug = 1, - }; - --const static struct intel_device_info intel_ironlake_d_info = { -+static const struct intel_device_info intel_ironlake_d_info = { - .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, - .has_pipe_cxsr = 1, - .has_hotplug = 1, - }; - --const static struct intel_device_info intel_ironlake_m_info = { -+static const struct intel_device_info intel_ironlake_m_info = { - .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, - .need_gfx_hws = 1, .has_rc6 = 1, - .has_hotplug = 1, - }; - --const static struct intel_device_info intel_sandybridge_d_info = { -+static const struct intel_device_info intel_sandybridge_d_info = { - .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, - .has_hotplug = 1, .is_gen6 = 1, - }; - --const static struct intel_device_info intel_sandybridge_m_info = { -+static const struct intel_device_info intel_sandybridge_m_info = { - .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, - .has_hotplug = 1, .is_gen6 = 1, - }; - --const static struct pci_device_id pciidlist[] = { -+static const struct pci_device_id pciidlist[] = { - INTEL_VGA_DEVICE(0x3577, &intel_i830_info), - INTEL_VGA_DEVICE(0x2562, &intel_845g_info), - INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), -@@ -188,6 +188,35 @@ const static struct pci_device_id pciidlist[] = { - MODULE_DEVICE_TABLE(pci, pciidlist); - #endif - -+#define INTEL_PCH_DEVICE_ID_MASK 0xff00 -+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 -+ -+void intel_detect_pch (struct drm_device *dev) -+{ -+ struct drm_i915_private *dev_priv = dev->dev_private; -+ struct pci_dev *pch; -+ -+ /* -+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to -+ * make graphics device passthrough work easy for VMM, that only -+ * need to expose ISA bridge to let driver know the real hardware -+ * underneath. This is a requirement from virtualization team. -+ */ -+ pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); -+ if (pch) { -+ if (pch->vendor == PCI_VENDOR_ID_INTEL) { -+ int id; -+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK; -+ -+ if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { -+ dev_priv->pch_type = PCH_CPT; -+ DRM_DEBUG_KMS("Found CougarPoint PCH\n"); -+ } -+ } -+ pci_dev_put(pch); -+ } -+} -+ - static int i915_drm_freeze(struct drm_device *dev) - { - struct drm_i915_private *dev_priv = dev->dev_private; -@@ -311,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags) - /* - * Clear request list - */ -- i915_gem_retire_requests(dev); -+ i915_gem_retire_requests(dev, &dev_priv->render_ring); - - if (need_display) - i915_save_display(dev); -@@ -341,6 +370,7 @@ int i965_reset(struct drm_device *dev, u8 flags) - } - } else { - DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); -+ mutex_unlock(&dev->struct_mutex); - return -ENODEV; - } - -@@ -359,33 +389,10 @@ int i965_reset(struct drm_device *dev, u8 flags) - * switched away). - */ - if (drm_core_check_feature(dev, DRIVER_MODESET) || -- !dev_priv->mm.suspended) { -- drm_i915_ring_buffer_t *ring = &dev_priv->ring; -- struct drm_gem_object *obj = ring->ring_obj; -- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); -+ !dev_priv->mm.suspended) { -+ struct intel_ring_buffer *ring = &dev_priv->render_ring; - dev_priv->mm.suspended = 0; -- -- /* Stop the ring if it's running. */ -- I915_WRITE(PRB0_CTL, 0); -- I915_WRITE(PRB0_TAIL, 0); -- I915_WRITE(PRB0_HEAD, 0); -- -- /* Initialize the ring. */ -- I915_WRITE(PRB0_START, obj_priv->gtt_offset); -- I915_WRITE(PRB0_CTL, -- ((obj->size - 4096) & RING_NR_PAGES) | -- RING_NO_REPORT | -- RING_VALID); -- if (!drm_core_check_feature(dev, DRIVER_MODESET)) -- i915_kernel_lost_context(dev); -- else { -- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; -- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; -- ring->space = ring->head - (ring->tail + 8); -- if (ring->space < 0) -- ring->space += ring->Size; -- } -- -+ ring->init(dev, ring); - mutex_unlock(&dev->struct_mutex); - drm_irq_uninstall(dev); - drm_irq_install(dev); -diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h -index 6e47900..2765831 100644 ---- a/drivers/gpu/drm/i915/i915_drv.h -+++ b/drivers/gpu/drm/i915/i915_drv.h -@@ -32,6 +32,7 @@ - - #include "i915_reg.h" - #include "intel_bios.h" -+#include "intel_ringbuffer.h" - #include - - /* General customization: -@@ -55,6 +56,8 @@ enum plane { - - #define I915_NUM_PIPE 2 - -+#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) -+ - /* Interface history: - * - * 1.1: Original. -@@ -89,16 +92,6 @@ struct drm_i915_gem_phys_object { - struct drm_gem_object *cur_obj; - }; - --typedef struct _drm_i915_ring_buffer { -- unsigned long Size; -- u8 *virtual_start; -- int head; -- int tail; -- int space; -- drm_local_map_t map; -- struct drm_gem_object *ring_obj; --} drm_i915_ring_buffer_t; -- - struct mem_block { - struct mem_block *next; - struct mem_block *prev; -@@ -128,6 +121,7 @@ struct drm_i915_master_private { - - struct drm_i915_fence_reg { - struct drm_gem_object *obj; -+ struct list_head lru_list; - }; - - struct sdvo_device_mapping { -@@ -135,6 +129,7 @@ struct sdvo_device_mapping { - u8 slave_addr; - u8 dvo_wiring; - u8 initialized; -+ u8 ddc_pin; - }; - - struct drm_i915_error_state { -@@ -175,7 +170,7 @@ struct drm_i915_error_state { - - struct drm_i915_display_funcs { - void (*dpms)(struct drm_crtc *crtc, int mode); -- bool (*fbc_enabled)(struct drm_crtc *crtc); -+ bool (*fbc_enabled)(struct drm_device *dev); - void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); - void (*disable_fbc)(struct drm_device *dev); - int (*get_display_clock_speed)(struct drm_device *dev); -@@ -222,6 +217,13 @@ enum no_fbc_reason { - FBC_NOT_TILED, /* buffer not tiled */ - }; - -+enum intel_pch { -+ PCH_IBX, /* Ibexpeak PCH */ -+ PCH_CPT, /* Cougarpoint PCH */ -+}; -+ -+struct intel_fbdev; -+ - typedef struct drm_i915_private { - struct drm_device *dev; - -@@ -232,17 +234,15 @@ typedef struct drm_i915_private { - void __iomem *regs; - - struct pci_dev *bridge_dev; -- drm_i915_ring_buffer_t ring; -+ struct intel_ring_buffer render_ring; -+ struct intel_ring_buffer bsd_ring; - - drm_dma_handle_t *status_page_dmah; -- void *hw_status_page; - void *seqno_page; - dma_addr_t dma_status_page; - uint32_t counter; -- unsigned int status_gfx_addr; - unsigned int seqno_gfx_addr; - drm_local_map_t hws_map; -- struct drm_gem_object *hws_obj; - struct drm_gem_object *seqno_obj; - struct drm_gem_object *pwrctx; - -@@ -258,8 +258,6 @@ typedef struct drm_i915_private { - atomic_t irq_received; - /** Protects user_irq_refcount and irq_mask_reg */ - spinlock_t user_irq_lock; -- /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ -- int user_irq_refcount; - u32 trace_irq_seqno; - /** Cached value of IMR to avoid reads in updating the bitfield */ - u32 irq_mask_reg; -@@ -280,6 +278,7 @@ typedef struct drm_i915_private { - struct mem_block *agp_heap; - unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; - int vblank_pipe; -+ int num_pipe; - - /* For hangcheck timer */ - #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ -@@ -325,7 +324,7 @@ typedef struct drm_i915_private { - int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ - int num_fence_regs; /* 8 on pre-965, 16 otherwise */ - -- unsigned int fsb_freq, mem_freq; -+ unsigned int fsb_freq, mem_freq, is_ddr3; - - spinlock_t error_lock; - struct drm_i915_error_state *first_error; -@@ -335,6 +334,9 @@ typedef struct drm_i915_private { - /* Display functions */ - struct drm_i915_display_funcs display; - -+ /* PCH chipset type */ -+ enum intel_pch pch_type; -+ - /* Register state */ - bool modeset_on_lid; - u8 saveLBB; -@@ -502,18 +504,7 @@ typedef struct drm_i915_private { - */ - struct list_head shrink_list; - -- /** -- * List of objects currently involved in rendering from the -- * ringbuffer. -- * -- * Includes buffers having the contents of their GPU caches -- * flushed, not necessarily primitives. last_rendering_seqno -- * represents when the rendering involved will be completed. -- * -- * A reference is held on the buffer while on this list. -- */ - spinlock_t active_list_lock; -- struct list_head active_list; - - /** - * List of objects which are not in the ringbuffer but which -@@ -551,12 +542,6 @@ typedef struct drm_i915_private { - struct list_head fence_list; - - /** -- * List of breadcrumbs associated with GPU requests currently -- * outstanding. -- */ -- struct list_head request_list; -- -- /** - * We leave the user IRQ off as much as possible, - * but this means that requests will finish and never - * be retired once the system goes idle. Set a timer to -@@ -632,16 +617,31 @@ typedef struct drm_i915_private { - u8 cur_delay; - u8 min_delay; - u8 max_delay; -+ u8 fmax; -+ u8 fstart; -+ -+ u64 last_count1; -+ unsigned long last_time1; -+ u64 last_count2; -+ struct timespec last_time2; -+ unsigned long gfx_power; -+ int c_m; -+ int r_t; -+ u8 corr; -+ spinlock_t *mchdev_lock; - - enum no_fbc_reason no_fbc_reason; - - struct drm_mm_node *compressed_fb; - struct drm_mm_node *compressed_llb; -+ -+ /* list of fbdev register on this device */ -+ struct intel_fbdev *fbdev; - } drm_i915_private_t; - - /** driver private structure attached to each drm_gem_object */ - struct drm_i915_gem_object { -- struct drm_gem_object *obj; -+ struct drm_gem_object base; - - /** Current space allocated to this object in the GTT, if any. */ - struct drm_mm_node *gtt_space; -@@ -651,27 +651,69 @@ struct drm_i915_gem_object { - /** This object's place on GPU write list */ - struct list_head gpu_write_list; - -- /** This object's place on the fenced object LRU */ -- struct list_head fence_list; -- - /** - * This is set if the object is on the active or flushing lists - * (has pending rendering), and is not set if it's on inactive (ready - * to be unbound). - */ -- int active; -+ unsigned int active : 1; - - /** - * This is set if the object has been written to since last bound - * to the GTT - */ -- int dirty; -+ unsigned int dirty : 1; -+ -+ /** -+ * Fence register bits (if any) for this object. Will be set -+ * as needed when mapped into the GTT. -+ * Protected by dev->struct_mutex. -+ * -+ * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) -+ */ -+ int fence_reg : 5; -+ -+ /** -+ * Used for checking the object doesn't appear more than once -+ * in an execbuffer object list. -+ */ -+ unsigned int in_execbuffer : 1; -+ -+ /** -+ * Advice: are the backing pages purgeable? -+ */ -+ unsigned int madv : 2; -+ -+ /** -+ * Refcount for the pages array. With the current locking scheme, there -+ * are at most two concurrent users: Binding a bo to the gtt and -+ * pwrite/pread using physical addresses. So two bits for a maximum -+ * of two users are enough. -+ */ -+ unsigned int pages_refcount : 2; -+#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3 -+ -+ /** -+ * Current tiling mode for the object. -+ */ -+ unsigned int tiling_mode : 2; -+ -+ /** How many users have pinned this object in GTT space. The following -+ * users can each hold at most one reference: pwrite/pread, pin_ioctl -+ * (via user_pin_count), execbuffer (objects are not allowed multiple -+ * times for the same batchbuffer), and the framebuffer code. When -+ * switching/pageflipping, the framebuffer code has at most two buffers -+ * pinned per crtc. -+ * -+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 -+ * bits with absolutely no headroom. So use 4 bits. */ -+ int pin_count : 4; -+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf - - /** AGP memory structure for our GTT binding. */ - DRM_AGP_MEM *agp_mem; - - struct page **pages; -- int pages_refcount; - - /** - * Current offset of the object in GTT space. -@@ -680,26 +722,18 @@ struct drm_i915_gem_object { - */ - uint32_t gtt_offset; - -+ /* Which ring is refering to is this object */ -+ struct intel_ring_buffer *ring; -+ - /** - * Fake offset for use by mmap(2) - */ - uint64_t mmap_offset; - -- /** -- * Fence register bits (if any) for this object. Will be set -- * as needed when mapped into the GTT. -- * Protected by dev->struct_mutex. -- */ -- int fence_reg; -- -- /** How many users have pinned this object in GTT space */ -- int pin_count; -- - /** Breadcrumb of last rendering to the buffer. */ - uint32_t last_rendering_seqno; - -- /** Current tiling mode for the object. */ -- uint32_t tiling_mode; -+ /** Current tiling stride for the object, if it's tiled. */ - uint32_t stride; - - /** Record of address bit 17 of each page at last unbind. */ -@@ -722,17 +756,6 @@ struct drm_i915_gem_object { - struct drm_i915_gem_phys_object *phys_obj; - - /** -- * Used for checking the object doesn't appear more than once -- * in an execbuffer object list. -- */ -- int in_execbuffer; -- -- /** -- * Advice: are the backing pages purgeable? -- */ -- int madv; -- -- /** - * Number of crtcs where this object is currently the fb, but - * will be page flipped away on the next vblank. When it - * reaches 0, dev_priv->pending_flip_queue will be woken up. -@@ -740,7 +763,7 @@ struct drm_i915_gem_object { - atomic_t pending_flip; - }; - --#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private) -+#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) - - /** - * Request queue structure. -@@ -753,6 +776,9 @@ struct drm_i915_gem_object { - * an emission time with seqnos for tracking how far ahead of the GPU we are. - */ - struct drm_i915_gem_request { -+ /** On Which ring this request was generated */ -+ struct intel_ring_buffer *ring; -+ - /** GEM sequence number associated with this request. */ - uint32_t seqno; - -@@ -809,6 +835,11 @@ extern int i915_emit_box(struct drm_device *dev, - struct drm_clip_rect *boxes, - int i, int DR1, int DR4); - extern int i965_reset(struct drm_device *dev, u8 flags); -+extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); -+extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); -+extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); -+extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); -+ - - /* i915_irq.c */ - void i915_hangcheck_elapsed(unsigned long data); -@@ -817,9 +848,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data, - struct drm_file *file_priv); - extern int i915_irq_wait(struct drm_device *dev, void *data, - struct drm_file *file_priv); --void i915_user_irq_get(struct drm_device *dev); - void i915_trace_irq_get(struct drm_device *dev, u32 seqno); --void i915_user_irq_put(struct drm_device *dev); - extern void i915_enable_interrupt (struct drm_device *dev); - - extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); -@@ -837,6 +866,11 @@ extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); - extern int i915_vblank_swap(struct drm_device *dev, void *data, - struct drm_file *file_priv); - extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); -+extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask); -+extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, -+ u32 mask); -+extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, -+ u32 mask); - - void - i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); -@@ -902,17 +936,21 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); - void i915_gem_load(struct drm_device *dev); - int i915_gem_init_object(struct drm_gem_object *obj); -+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, -+ size_t size); - void i915_gem_free_object(struct drm_gem_object *obj); - int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); - void i915_gem_object_unpin(struct drm_gem_object *obj); - int i915_gem_object_unbind(struct drm_gem_object *obj); - void i915_gem_release_mmap(struct drm_gem_object *obj); - void i915_gem_lastclose(struct drm_device *dev); --uint32_t i915_get_gem_seqno(struct drm_device *dev); -+uint32_t i915_get_gem_seqno(struct drm_device *dev, -+ struct intel_ring_buffer *ring); - bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); - int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); - int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); --void i915_gem_retire_requests(struct drm_device *dev); -+void i915_gem_retire_requests(struct drm_device *dev, -+ struct intel_ring_buffer *ring); - void i915_gem_retire_work_handler(struct work_struct *work); - void i915_gem_clflush_object(struct drm_gem_object *obj); - int i915_gem_object_set_domain(struct drm_gem_object *obj, -@@ -923,9 +961,13 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev); - int i915_gem_do_init(struct drm_device *dev, unsigned long start, - unsigned long end); - int i915_gem_idle(struct drm_device *dev); --uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, -- uint32_t flush_domains); --int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible); -+uint32_t i915_add_request(struct drm_device *dev, -+ struct drm_file *file_priv, -+ uint32_t flush_domains, -+ struct intel_ring_buffer *ring); -+int i915_do_wait_request(struct drm_device *dev, -+ uint32_t seqno, int interruptible, -+ struct intel_ring_buffer *ring); - int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); - int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, - int write); -@@ -998,6 +1040,12 @@ extern void intel_modeset_cleanup(struct drm_device *dev); - extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); - extern void i8xx_disable_fbc(struct drm_device *dev); - extern void g4x_disable_fbc(struct drm_device *dev); -+extern void intel_disable_fbc(struct drm_device *dev); -+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); -+extern bool intel_fbc_enabled(struct drm_device *dev); -+extern bool ironlake_set_drps(struct drm_device *dev, u8 val); -+extern void intel_detect_pch (struct drm_device *dev); -+extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); - - /** - * Lock test for when it's just for synchronization of ring access. -@@ -1006,7 +1054,8 @@ extern void g4x_disable_fbc(struct drm_device *dev); - * has access to the ring. - */ - #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ -- if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \ -+ if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ -+ == NULL) \ - LOCK_TEST_WITH_RETURN(dev, file_priv); \ - } while (0) - -@@ -1019,35 +1068,31 @@ extern void g4x_disable_fbc(struct drm_device *dev); - #define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) - #define I915_READ64(reg) readq(dev_priv->regs + (reg)) - #define POSTING_READ(reg) (void)I915_READ(reg) -+#define POSTING_READ16(reg) (void)I915_READ16(reg) - - #define I915_VERBOSE 0 - --#define RING_LOCALS volatile unsigned int *ring_virt__; -- --#define BEGIN_LP_RING(n) do { \ -- int bytes__ = 4*(n); \ -- if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ -- /* a wrap must occur between instructions so pad beforehand */ \ -- if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \ -- i915_wrap_ring(dev); \ -- if (unlikely (dev_priv->ring.space < bytes__)) \ -- i915_wait_ring(dev, bytes__, __func__); \ -- ring_virt__ = (unsigned int *) \ -- (dev_priv->ring.virtual_start + dev_priv->ring.tail); \ -- dev_priv->ring.tail += bytes__; \ -- dev_priv->ring.tail &= dev_priv->ring.Size - 1; \ -- dev_priv->ring.space -= bytes__; \ -+#define BEGIN_LP_RING(n) do { \ -+ drm_i915_private_t *dev_priv = dev->dev_private; \ -+ if (I915_VERBOSE) \ -+ DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ -+ intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \ - } while (0) - --#define OUT_RING(n) do { \ -- if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ -- *ring_virt__++ = (n); \ -+ -+#define OUT_RING(x) do { \ -+ drm_i915_private_t *dev_priv = dev->dev_private; \ -+ if (I915_VERBOSE) \ -+ DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ -+ intel_ring_emit(dev, &dev_priv->render_ring, x); \ - } while (0) - - #define ADVANCE_LP_RING() do { \ -+ drm_i915_private_t *dev_priv = dev->dev_private; \ - if (I915_VERBOSE) \ -- DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \ -- I915_WRITE(PRB0_TAIL, dev_priv->ring.tail); \ -+ DRM_DEBUG("ADVANCE_LP_RING %x\n", \ -+ dev_priv->render_ring.tail); \ -+ intel_ring_advance(dev, &dev_priv->render_ring); \ - } while(0) - - /** -@@ -1065,14 +1110,12 @@ extern void g4x_disable_fbc(struct drm_device *dev); - * - * The area from dword 0x20 to 0x3ff is available for driver usage. - */ --#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) -+#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ -+ (dev_priv->render_ring.status_page.page_addr))[reg]) - #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) - #define I915_GEM_HWS_INDEX 0x20 - #define I915_BREADCRUMB_INDEX 0x21 - --extern int i915_wrap_ring(struct drm_device * dev); --extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); -- - #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) - - #define IS_I830(dev) ((dev)->pci_device == 0x3577) -@@ -1118,6 +1161,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); - (dev)->pci_device == 0x2A42 || \ - (dev)->pci_device == 0x2E42) - -+#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) - #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) - - /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte -@@ -1130,7 +1174,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); - #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) - #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) - #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ -- !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) -+ !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \ -+ !IS_GEN6(dev)) - #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) - /* dsparb controlled by hw only */ - #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) -@@ -1144,6 +1189,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); - IS_GEN6(dev)) - #define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) - -+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) -+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) -+ - #define PRIMARY_RINGBUFFER_SIZE (128*1024) - - #endif -diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c -index ef3d91d..9ded3da 100644 ---- a/drivers/gpu/drm/i915/i915_gem.c -+++ b/drivers/gpu/drm/i915/i915_gem.c -@@ -35,8 +35,6 @@ - #include - #include - --#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) -- - static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); - static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); - static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); -@@ -124,7 +122,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, - args->size = roundup(args->size, PAGE_SIZE); - - /* Allocate the new object */ -- obj = drm_gem_object_alloc(dev, args->size); -+ obj = i915_gem_alloc_object(dev, args->size); - if (obj == NULL) - return -ENOMEM; - -@@ -169,7 +167,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) - obj_priv->tiling_mode != I915_TILING_NONE; - } - --static inline int -+static inline void - slow_shmem_copy(struct page *dst_page, - int dst_offset, - struct page *src_page, -@@ -178,25 +176,16 @@ slow_shmem_copy(struct page *dst_page, - { - char *dst_vaddr, *src_vaddr; - -- dst_vaddr = kmap_atomic(dst_page, KM_USER0); -- if (dst_vaddr == NULL) -- return -ENOMEM; -- -- src_vaddr = kmap_atomic(src_page, KM_USER1); -- if (src_vaddr == NULL) { -- kunmap_atomic(dst_vaddr, KM_USER0); -- return -ENOMEM; -- } -+ dst_vaddr = kmap(dst_page); -+ src_vaddr = kmap(src_page); - - memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); - -- kunmap_atomic(src_vaddr, KM_USER1); -- kunmap_atomic(dst_vaddr, KM_USER0); -- -- return 0; -+ kunmap(src_page); -+ kunmap(dst_page); - } - --static inline int -+static inline void - slow_shmem_bit17_copy(struct page *gpu_page, - int gpu_offset, - struct page *cpu_page, -@@ -216,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page, - cpu_page, cpu_offset, length); - } - -- gpu_vaddr = kmap_atomic(gpu_page, KM_USER0); -- if (gpu_vaddr == NULL) -- return -ENOMEM; -- -- cpu_vaddr = kmap_atomic(cpu_page, KM_USER1); -- if (cpu_vaddr == NULL) { -- kunmap_atomic(gpu_vaddr, KM_USER0); -- return -ENOMEM; -- } -+ gpu_vaddr = kmap(gpu_page); -+ cpu_vaddr = kmap(cpu_page); - - /* Copy the data, XORing A6 with A17 (1). The user already knows he's - * XORing with the other bits (A9 for Y, A9 and A10 for X) -@@ -248,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page, - length -= this_length; - } - -- kunmap_atomic(cpu_vaddr, KM_USER1); -- kunmap_atomic(gpu_vaddr, KM_USER0); -- -- return 0; -+ kunmap(cpu_page); -+ kunmap(gpu_page); - } - - /** -@@ -427,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, - page_length = PAGE_SIZE - data_page_offset; - - if (do_bit17_swizzling) { -- ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], -- shmem_page_offset, -- user_pages[data_page_index], -- data_page_offset, -- page_length, -- 1); -- } else { -- ret = slow_shmem_copy(user_pages[data_page_index], -- data_page_offset, -- obj_priv->pages[shmem_page_index], -+ slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], - shmem_page_offset, -- page_length); -+ user_pages[data_page_index], -+ data_page_offset, -+ page_length, -+ 1); -+ } else { -+ slow_shmem_copy(user_pages[data_page_index], -+ data_page_offset, -+ obj_priv->pages[shmem_page_index], -+ shmem_page_offset, -+ page_length); - } -- if (ret) -- goto fail_put_pages; - - remain -= page_length; - data_ptr += page_length; -@@ -531,25 +509,24 @@ fast_user_write(struct io_mapping *mapping, - * page faults - */ - --static inline int -+static inline void - slow_kernel_write(struct io_mapping *mapping, - loff_t gtt_base, int gtt_offset, - struct page *user_page, int user_offset, - int length) - { -- char *src_vaddr, *dst_vaddr; -- unsigned long unwritten; -+ char __iomem *dst_vaddr; -+ char *src_vaddr; - -- dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base); -- src_vaddr = kmap_atomic(user_page, KM_USER1); -- unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset, -- src_vaddr + user_offset, -- length); -- kunmap_atomic(src_vaddr, KM_USER1); -- io_mapping_unmap_atomic(dst_vaddr); -- if (unwritten) -- return -EFAULT; -- return 0; -+ dst_vaddr = io_mapping_map_wc(mapping, gtt_base); -+ src_vaddr = kmap(user_page); -+ -+ memcpy_toio(dst_vaddr + gtt_offset, -+ src_vaddr + user_offset, -+ length); -+ -+ kunmap(user_page); -+ io_mapping_unmap(dst_vaddr); - } - - static inline int -@@ -722,18 +699,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, - if ((data_page_offset + page_length) > PAGE_SIZE) - page_length = PAGE_SIZE - data_page_offset; - -- ret = slow_kernel_write(dev_priv->mm.gtt_mapping, -- gtt_page_base, gtt_page_offset, -- user_pages[data_page_index], -- data_page_offset, -- page_length); -- -- /* If we get a fault while copying data, then (presumably) our -- * source page isn't available. Return the error and we'll -- * retry in the slow path. -- */ -- if (ret) -- goto out_unpin_object; -+ slow_kernel_write(dev_priv->mm.gtt_mapping, -+ gtt_page_base, gtt_page_offset, -+ user_pages[data_page_index], -+ data_page_offset, -+ page_length); - - remain -= page_length; - offset += page_length; -@@ -902,21 +872,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, - page_length = PAGE_SIZE - data_page_offset; - - if (do_bit17_swizzling) { -- ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], -- shmem_page_offset, -- user_pages[data_page_index], -- data_page_offset, -- page_length, -- 0); -- } else { -- ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], -+ slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], - shmem_page_offset, - user_pages[data_page_index], - data_page_offset, -- page_length); -+ page_length, -+ 0); -+ } else { -+ slow_shmem_copy(obj_priv->pages[shmem_page_index], -+ shmem_page_offset, -+ user_pages[data_page_index], -+ data_page_offset, -+ page_length); - } -- if (ret) -- goto fail_put_pages; - - remain -= page_length; - data_ptr += page_length; -@@ -973,7 +941,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, - if (obj_priv->phys_obj) - ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); - else if (obj_priv->tiling_mode == I915_TILING_NONE && -- dev->gtt_total != 0) { -+ dev->gtt_total != 0 && -+ obj->write_domain != I915_GEM_DOMAIN_CPU) { - ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); - if (ret == -EFAULT) { - ret = i915_gem_gtt_pwrite_slow(dev, obj, args, -@@ -1051,7 +1020,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, - * about to occur. - */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { -- list_move_tail(&obj_priv->fence_list, -+ struct drm_i915_fence_reg *reg = -+ &dev_priv->fence_regs[obj_priv->fence_reg]; -+ list_move_tail(®->lru_list, - &dev_priv->mm.fence_list); - } - -@@ -1482,11 +1453,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) - } - - static void --i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) -+i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, -+ struct intel_ring_buffer *ring) - { - struct drm_device *dev = obj->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); -+ BUG_ON(ring == NULL); -+ obj_priv->ring = ring; - - /* Add a reference if we're newly entering the active list. */ - if (!obj_priv->active) { -@@ -1495,8 +1469,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) - } - /* Move from whatever list we were on to the tail of execution. */ - spin_lock(&dev_priv->mm.active_list_lock); -- list_move_tail(&obj_priv->list, -- &dev_priv->mm.active_list); -+ list_move_tail(&obj_priv->list, &ring->active_list); - spin_unlock(&dev_priv->mm.active_list_lock); - obj_priv->last_rendering_seqno = seqno; - } -@@ -1549,6 +1522,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) - BUG_ON(!list_empty(&obj_priv->gpu_write_list)); - - obj_priv->last_rendering_seqno = 0; -+ obj_priv->ring = NULL; - if (obj_priv->active) { - obj_priv->active = 0; - drm_gem_object_unreference(obj); -@@ -1558,7 +1532,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) - - static void - i915_gem_process_flushing_list(struct drm_device *dev, -- uint32_t flush_domains, uint32_t seqno) -+ uint32_t flush_domains, uint32_t seqno, -+ struct intel_ring_buffer *ring) - { - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv, *next; -@@ -1566,20 +1541,24 @@ i915_gem_process_flushing_list(struct drm_device *dev, - list_for_each_entry_safe(obj_priv, next, - &dev_priv->mm.gpu_write_list, - gpu_write_list) { -- struct drm_gem_object *obj = obj_priv->obj; -+ struct drm_gem_object *obj = &obj_priv->base; - - if ((obj->write_domain & flush_domains) == -- obj->write_domain) { -+ obj->write_domain && -+ obj_priv->ring->ring_flag == ring->ring_flag) { - uint32_t old_write_domain = obj->write_domain; - - obj->write_domain = 0; - list_del_init(&obj_priv->gpu_write_list); -- i915_gem_object_move_to_active(obj, seqno); -+ i915_gem_object_move_to_active(obj, seqno, ring); - - /* update the fence lru list */ -- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) -- list_move_tail(&obj_priv->fence_list, -+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { -+ struct drm_i915_fence_reg *reg = -+ &dev_priv->fence_regs[obj_priv->fence_reg]; -+ list_move_tail(®->lru_list, - &dev_priv->mm.fence_list); -+ } - - trace_i915_gem_object_change_domain(obj, - obj->read_domains, -@@ -1588,31 +1567,15 @@ i915_gem_process_flushing_list(struct drm_device *dev, - } - } - --#define PIPE_CONTROL_FLUSH(addr) \ -- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ -- PIPE_CONTROL_DEPTH_STALL); \ -- OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ -- OUT_RING(0); \ -- OUT_RING(0); \ -- --/** -- * Creates a new sequence number, emitting a write of it to the status page -- * plus an interrupt, which will trigger i915_user_interrupt_handler. -- * -- * Must be called with struct_lock held. -- * -- * Returned sequence numbers are nonzero on success. -- */ - uint32_t - i915_add_request(struct drm_device *dev, struct drm_file *file_priv, -- uint32_t flush_domains) -+ uint32_t flush_domains, struct intel_ring_buffer *ring) - { - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_file_private *i915_file_priv = NULL; - struct drm_i915_gem_request *request; - uint32_t seqno; - int was_empty; -- RING_LOCALS; - - if (file_priv != NULL) - i915_file_priv = file_priv->driver_priv; -@@ -1621,62 +1584,14 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, - if (request == NULL) - return 0; - -- /* Grab the seqno we're going to make this request be, and bump the -- * next (skipping 0 so it can be the reserved no-seqno value). -- */ -- seqno = dev_priv->mm.next_gem_seqno; -- dev_priv->mm.next_gem_seqno++; -- if (dev_priv->mm.next_gem_seqno == 0) -- dev_priv->mm.next_gem_seqno++; -- -- if (HAS_PIPE_CONTROL(dev)) { -- u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; -- -- /* -- * Workaround qword write incoherence by flushing the -- * PIPE_NOTIFY buffers out to memory before requesting -- * an interrupt. -- */ -- BEGIN_LP_RING(32); -- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | -- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); -- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); -- OUT_RING(seqno); -- OUT_RING(0); -- PIPE_CONTROL_FLUSH(scratch_addr); -- scratch_addr += 128; /* write to separate cachelines */ -- PIPE_CONTROL_FLUSH(scratch_addr); -- scratch_addr += 128; -- PIPE_CONTROL_FLUSH(scratch_addr); -- scratch_addr += 128; -- PIPE_CONTROL_FLUSH(scratch_addr); -- scratch_addr += 128; -- PIPE_CONTROL_FLUSH(scratch_addr); -- scratch_addr += 128; -- PIPE_CONTROL_FLUSH(scratch_addr); -- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | -- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | -- PIPE_CONTROL_NOTIFY); -- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); -- OUT_RING(seqno); -- OUT_RING(0); -- ADVANCE_LP_RING(); -- } else { -- BEGIN_LP_RING(4); -- OUT_RING(MI_STORE_DWORD_INDEX); -- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); -- OUT_RING(seqno); -- -- OUT_RING(MI_USER_INTERRUPT); -- ADVANCE_LP_RING(); -- } -- -- DRM_DEBUG_DRIVER("%d\n", seqno); -+ seqno = ring->add_request(dev, ring, file_priv, flush_domains); - - request->seqno = seqno; -+ request->ring = ring; - request->emitted_jiffies = jiffies; -- was_empty = list_empty(&dev_priv->mm.request_list); -- list_add_tail(&request->list, &dev_priv->mm.request_list); -+ was_empty = list_empty(&ring->request_list); -+ list_add_tail(&request->list, &ring->request_list); -+ - if (i915_file_priv) { - list_add_tail(&request->client_list, - &i915_file_priv->mm.request_list); -@@ -1688,7 +1603,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, - * domain we're flushing with our flush. - */ - if (flush_domains != 0) -- i915_gem_process_flushing_list(dev, flush_domains, seqno); -+ i915_gem_process_flushing_list(dev, flush_domains, seqno, ring); - - if (!dev_priv->mm.suspended) { - mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); -@@ -1705,20 +1620,16 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, - * before signalling the CPU - */ - static uint32_t --i915_retire_commands(struct drm_device *dev) -+i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) - { -- drm_i915_private_t *dev_priv = dev->dev_private; -- uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; - uint32_t flush_domains = 0; -- RING_LOCALS; - - /* The sampler always gets flushed on i965 (sigh) */ - if (IS_I965G(dev)) - flush_domains |= I915_GEM_DOMAIN_SAMPLER; -- BEGIN_LP_RING(2); -- OUT_RING(cmd); -- OUT_RING(0); /* noop */ -- ADVANCE_LP_RING(); -+ -+ ring->flush(dev, ring, -+ I915_GEM_DOMAIN_COMMAND, flush_domains); - return flush_domains; - } - -@@ -1738,14 +1649,14 @@ i915_gem_retire_request(struct drm_device *dev, - * by the ringbuffer to the flushing/inactive lists as appropriate. - */ - spin_lock(&dev_priv->mm.active_list_lock); -- while (!list_empty(&dev_priv->mm.active_list)) { -+ while (!list_empty(&request->ring->active_list)) { - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; - -- obj_priv = list_first_entry(&dev_priv->mm.active_list, -+ obj_priv = list_first_entry(&request->ring->active_list, - struct drm_i915_gem_object, - list); -- obj = obj_priv->obj; -+ obj = &obj_priv->base; - - /* If the seqno being retired doesn't match the oldest in the - * list, then the oldest in the list must still be newer than -@@ -1789,35 +1700,33 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) - } - - uint32_t --i915_get_gem_seqno(struct drm_device *dev) -+i915_get_gem_seqno(struct drm_device *dev, -+ struct intel_ring_buffer *ring) - { -- drm_i915_private_t *dev_priv = dev->dev_private; -- -- if (HAS_PIPE_CONTROL(dev)) -- return ((volatile u32 *)(dev_priv->seqno_page))[0]; -- else -- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); -+ return ring->get_gem_seqno(dev, ring); - } - - /** - * This function clears the request list as sequence numbers are passed. - */ - void --i915_gem_retire_requests(struct drm_device *dev) -+i915_gem_retire_requests(struct drm_device *dev, -+ struct intel_ring_buffer *ring) - { - drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t seqno; - -- if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list)) -+ if (!ring->status_page.page_addr -+ || list_empty(&ring->request_list)) - return; - -- seqno = i915_get_gem_seqno(dev); -+ seqno = i915_get_gem_seqno(dev, ring); - -- while (!list_empty(&dev_priv->mm.request_list)) { -+ while (!list_empty(&ring->request_list)) { - struct drm_i915_gem_request *request; - uint32_t retiring_seqno; - -- request = list_first_entry(&dev_priv->mm.request_list, -+ request = list_first_entry(&ring->request_list, - struct drm_i915_gem_request, - list); - retiring_seqno = request->seqno; -@@ -1835,7 +1744,8 @@ i915_gem_retire_requests(struct drm_device *dev) - - if (unlikely (dev_priv->trace_irq_seqno && - i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { -- i915_user_irq_put(dev); -+ -+ ring->user_irq_put(dev, ring); - dev_priv->trace_irq_seqno = 0; - } - } -@@ -1851,15 +1761,22 @@ i915_gem_retire_work_handler(struct work_struct *work) - dev = dev_priv->dev; - - mutex_lock(&dev->struct_mutex); -- i915_gem_retire_requests(dev); -+ i915_gem_retire_requests(dev, &dev_priv->render_ring); -+ -+ if (HAS_BSD(dev)) -+ i915_gem_retire_requests(dev, &dev_priv->bsd_ring); -+ - if (!dev_priv->mm.suspended && -- !list_empty(&dev_priv->mm.request_list)) -+ (!list_empty(&dev_priv->render_ring.request_list) || -+ (HAS_BSD(dev) && -+ !list_empty(&dev_priv->bsd_ring.request_list)))) - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); - mutex_unlock(&dev->struct_mutex); - } - - int --i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) -+i915_do_wait_request(struct drm_device *dev, uint32_t seqno, -+ int interruptible, struct intel_ring_buffer *ring) - { - drm_i915_private_t *dev_priv = dev->dev_private; - u32 ier; -@@ -1870,7 +1787,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) - if (atomic_read(&dev_priv->mm.wedged)) - return -EIO; - -- if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { -+ if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) { - if (HAS_PCH_SPLIT(dev)) - ier = I915_READ(DEIER) | I915_READ(GTIER); - else -@@ -1884,19 +1801,21 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) - - trace_i915_gem_request_wait_begin(dev, seqno); - -- dev_priv->mm.waiting_gem_seqno = seqno; -- i915_user_irq_get(dev); -+ ring->waiting_gem_seqno = seqno; -+ ring->user_irq_get(dev, ring); - if (interruptible) -- ret = wait_event_interruptible(dev_priv->irq_queue, -- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || -- atomic_read(&dev_priv->mm.wedged)); -+ ret = wait_event_interruptible(ring->irq_queue, -+ i915_seqno_passed( -+ ring->get_gem_seqno(dev, ring), seqno) -+ || atomic_read(&dev_priv->mm.wedged)); - else -- wait_event(dev_priv->irq_queue, -- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || -- atomic_read(&dev_priv->mm.wedged)); -+ wait_event(ring->irq_queue, -+ i915_seqno_passed( -+ ring->get_gem_seqno(dev, ring), seqno) -+ || atomic_read(&dev_priv->mm.wedged)); - -- i915_user_irq_put(dev); -- dev_priv->mm.waiting_gem_seqno = 0; -+ ring->user_irq_put(dev, ring); -+ ring->waiting_gem_seqno = 0; - - trace_i915_gem_request_wait_end(dev, seqno); - } -@@ -1905,7 +1824,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) - - if (ret && ret != -ERESTARTSYS) - DRM_ERROR("%s returns %d (awaiting %d at %d)\n", -- __func__, ret, seqno, i915_get_gem_seqno(dev)); -+ __func__, ret, seqno, ring->get_gem_seqno(dev, ring)); - - /* Directly dispatch request retiring. While we have the work queue - * to handle this, the waiter on a request often wants an associated -@@ -1913,7 +1832,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) - * a separate wait queue to handle that. - */ - if (ret == 0) -- i915_gem_retire_requests(dev); -+ i915_gem_retire_requests(dev, ring); - - return ret; - } -@@ -1923,9 +1842,10 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) - * request and object lists appropriately for that event. - */ - static int --i915_wait_request(struct drm_device *dev, uint32_t seqno) -+i915_wait_request(struct drm_device *dev, uint32_t seqno, -+ struct intel_ring_buffer *ring) - { -- return i915_do_wait_request(dev, seqno, 1); -+ return i915_do_wait_request(dev, seqno, 1, ring); - } - - static void -@@ -1934,71 +1854,29 @@ i915_gem_flush(struct drm_device *dev, - uint32_t flush_domains) - { - drm_i915_private_t *dev_priv = dev->dev_private; -- uint32_t cmd; -- RING_LOCALS; -- --#if WATCH_EXEC -- DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, -- invalidate_domains, flush_domains); --#endif -- trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno, -- invalidate_domains, flush_domains); -- - if (flush_domains & I915_GEM_DOMAIN_CPU) - drm_agp_chipset_flush(dev); -+ dev_priv->render_ring.flush(dev, &dev_priv->render_ring, -+ invalidate_domains, -+ flush_domains); -+ -+ if (HAS_BSD(dev)) -+ dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, -+ invalidate_domains, -+ flush_domains); -+} - -- if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { -- /* -- * read/write caches: -- * -- * I915_GEM_DOMAIN_RENDER is always invalidated, but is -- * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is -- * also flushed at 2d versus 3d pipeline switches. -- * -- * read-only caches: -- * -- * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if -- * MI_READ_FLUSH is set, and is always flushed on 965. -- * -- * I915_GEM_DOMAIN_COMMAND may not exist? -- * -- * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is -- * invalidated when MI_EXE_FLUSH is set. -- * -- * I915_GEM_DOMAIN_VERTEX, which exists on 965, is -- * invalidated with every MI_FLUSH. -- * -- * TLBs: -- * -- * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND -- * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and -- * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER -- * are flushed at any MI_FLUSH. -- */ -- -- cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; -- if ((invalidate_domains|flush_domains) & -- I915_GEM_DOMAIN_RENDER) -- cmd &= ~MI_NO_WRITE_FLUSH; -- if (!IS_I965G(dev)) { -- /* -- * On the 965, the sampler cache always gets flushed -- * and this bit is reserved. -- */ -- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) -- cmd |= MI_READ_FLUSH; -- } -- if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) -- cmd |= MI_EXE_FLUSH; -- --#if WATCH_EXEC -- DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); --#endif -- BEGIN_LP_RING(2); -- OUT_RING(cmd); -- OUT_RING(MI_NOOP); -- ADVANCE_LP_RING(); -- } -+static void -+i915_gem_flush_ring(struct drm_device *dev, -+ uint32_t invalidate_domains, -+ uint32_t flush_domains, -+ struct intel_ring_buffer *ring) -+{ -+ if (flush_domains & I915_GEM_DOMAIN_CPU) -+ drm_agp_chipset_flush(dev); -+ ring->flush(dev, ring, -+ invalidate_domains, -+ flush_domains); - } - - /** -@@ -2025,7 +1903,8 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) - DRM_INFO("%s: object %p wait for seqno %08x\n", - __func__, obj, obj_priv->last_rendering_seqno); - #endif -- ret = i915_wait_request(dev, obj_priv->last_rendering_seqno); -+ ret = i915_wait_request(dev, -+ obj_priv->last_rendering_seqno, obj_priv->ring); - if (ret != 0) - return ret; - } -@@ -2119,7 +1998,7 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size) - - /* Try to find the smallest clean object */ - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { -- struct drm_gem_object *obj = obj_priv->obj; -+ struct drm_gem_object *obj = &obj_priv->base; - if (obj->size >= min_size) { - if ((!obj_priv->dirty || - i915_gem_object_is_purgeable(obj_priv)) && -@@ -2141,11 +2020,14 @@ i915_gpu_idle(struct drm_device *dev) - { - drm_i915_private_t *dev_priv = dev->dev_private; - bool lists_empty; -- uint32_t seqno; -+ uint32_t seqno1, seqno2; -+ int ret; - - spin_lock(&dev_priv->mm.active_list_lock); -- lists_empty = list_empty(&dev_priv->mm.flushing_list) && -- list_empty(&dev_priv->mm.active_list); -+ lists_empty = (list_empty(&dev_priv->mm.flushing_list) && -+ list_empty(&dev_priv->render_ring.active_list) && -+ (!HAS_BSD(dev) || -+ list_empty(&dev_priv->bsd_ring.active_list))); - spin_unlock(&dev_priv->mm.active_list_lock); - - if (lists_empty) -@@ -2153,11 +2035,25 @@ i915_gpu_idle(struct drm_device *dev) - - /* Flush everything onto the inactive list. */ - i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); -- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); -- if (seqno == 0) -+ seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, -+ &dev_priv->render_ring); -+ if (seqno1 == 0) - return -ENOMEM; -+ ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring); - -- return i915_wait_request(dev, seqno); -+ if (HAS_BSD(dev)) { -+ seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, -+ &dev_priv->bsd_ring); -+ if (seqno2 == 0) -+ return -ENOMEM; -+ -+ ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring); -+ if (ret) -+ return ret; -+ } -+ -+ -+ return ret; - } - - static int -@@ -2170,7 +2066,9 @@ i915_gem_evict_everything(struct drm_device *dev) - spin_lock(&dev_priv->mm.active_list_lock); - lists_empty = (list_empty(&dev_priv->mm.inactive_list) && - list_empty(&dev_priv->mm.flushing_list) && -- list_empty(&dev_priv->mm.active_list)); -+ list_empty(&dev_priv->render_ring.active_list) && -+ (!HAS_BSD(dev) -+ || list_empty(&dev_priv->bsd_ring.active_list))); - spin_unlock(&dev_priv->mm.active_list_lock); - - if (lists_empty) -@@ -2190,7 +2088,9 @@ i915_gem_evict_everything(struct drm_device *dev) - spin_lock(&dev_priv->mm.active_list_lock); - lists_empty = (list_empty(&dev_priv->mm.inactive_list) && - list_empty(&dev_priv->mm.flushing_list) && -- list_empty(&dev_priv->mm.active_list)); -+ list_empty(&dev_priv->render_ring.active_list) && -+ (!HAS_BSD(dev) -+ || list_empty(&dev_priv->bsd_ring.active_list))); - spin_unlock(&dev_priv->mm.active_list_lock); - BUG_ON(!lists_empty); - -@@ -2204,8 +2104,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) - struct drm_gem_object *obj; - int ret; - -+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring; -+ struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; - for (;;) { -- i915_gem_retire_requests(dev); -+ i915_gem_retire_requests(dev, render_ring); -+ -+ if (HAS_BSD(dev)) -+ i915_gem_retire_requests(dev, bsd_ring); - - /* If there's an inactive buffer available now, grab it - * and be done. -@@ -2229,14 +2134,30 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) - * things, wait for the next to finish and hopefully leave us - * a buffer to evict. - */ -- if (!list_empty(&dev_priv->mm.request_list)) { -+ if (!list_empty(&render_ring->request_list)) { -+ struct drm_i915_gem_request *request; -+ -+ request = list_first_entry(&render_ring->request_list, -+ struct drm_i915_gem_request, -+ list); -+ -+ ret = i915_wait_request(dev, -+ request->seqno, request->ring); -+ if (ret) -+ return ret; -+ -+ continue; -+ } -+ -+ if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) { - struct drm_i915_gem_request *request; - -- request = list_first_entry(&dev_priv->mm.request_list, -+ request = list_first_entry(&bsd_ring->request_list, - struct drm_i915_gem_request, - list); - -- ret = i915_wait_request(dev, request->seqno); -+ ret = i915_wait_request(dev, -+ request->seqno, request->ring); - if (ret) - return ret; - -@@ -2253,7 +2174,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) - - /* Find an object that we can immediately reuse */ - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { -- obj = obj_priv->obj; -+ obj = &obj_priv->base; - if (obj->size >= min_size) - break; - -@@ -2263,10 +2184,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) - if (obj != NULL) { - uint32_t seqno; - -- i915_gem_flush(dev, -+ i915_gem_flush_ring(dev, - obj->write_domain, -- obj->write_domain); -- seqno = i915_add_request(dev, NULL, obj->write_domain); -+ obj->write_domain, -+ obj_priv->ring); -+ seqno = i915_add_request(dev, NULL, -+ obj->write_domain, -+ obj_priv->ring); - if (seqno == 0) - return -ENOMEM; - continue; -@@ -2294,6 +2218,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, - struct inode *inode; - struct page *page; - -+ BUG_ON(obj_priv->pages_refcount -+ == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT); -+ - if (obj_priv->pages_refcount++ != 0) - return 0; - -@@ -2485,9 +2412,10 @@ static int i915_find_fence_reg(struct drm_device *dev) - - /* None available, try to steal one or wait for a user to finish */ - i = I915_FENCE_REG_NONE; -- list_for_each_entry(obj_priv, &dev_priv->mm.fence_list, -- fence_list) { -- obj = obj_priv->obj; -+ list_for_each_entry(reg, &dev_priv->mm.fence_list, -+ lru_list) { -+ obj = reg->obj; -+ obj_priv = to_intel_bo(obj); - - if (obj_priv->pin_count) - continue; -@@ -2536,7 +2464,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) - - /* Just update our place in the LRU if our fence is getting used. */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { -- list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); -+ reg = &dev_priv->fence_regs[obj_priv->fence_reg]; -+ list_move_tail(®->lru_list, &dev_priv->mm.fence_list); - return 0; - } - -@@ -2566,7 +2495,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) - - obj_priv->fence_reg = ret; - reg = &dev_priv->fence_regs[obj_priv->fence_reg]; -- list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); -+ list_add_tail(®->lru_list, &dev_priv->mm.fence_list); - - reg->obj = obj; - -@@ -2598,6 +2527,8 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) - struct drm_device *dev = obj->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); -+ struct drm_i915_fence_reg *reg = -+ &dev_priv->fence_regs[obj_priv->fence_reg]; - - if (IS_GEN6(dev)) { - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + -@@ -2616,9 +2547,9 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) - I915_WRITE(fence_reg, 0); - } - -- dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; -+ reg->obj = NULL; - obj_priv->fence_reg = I915_FENCE_REG_NONE; -- list_del_init(&obj_priv->fence_list); -+ list_del_init(®->lru_list); - } - - /** -@@ -2688,6 +2619,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) - return -EINVAL; - } - -+ /* If the object is bigger than the entire aperture, reject it early -+ * before evicting everything in a vain attempt to find space. -+ */ -+ if (obj->size > dev->gtt_total) { -+ DRM_ERROR("Attempting to bind an object larger than the aperture\n"); -+ return -E2BIG; -+ } -+ - search_free: - free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, - obj->size, alignment, 0); -@@ -2798,6 +2737,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) - { - struct drm_device *dev = obj->dev; - uint32_t old_write_domain; -+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - - if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) - return; -@@ -2805,7 +2745,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) - /* Queue the GPU write cache flushing we need. */ - old_write_domain = obj->write_domain; - i915_gem_flush(dev, 0, obj->write_domain); -- (void) i915_add_request(dev, NULL, obj->write_domain); -+ (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring); - BUG_ON(obj->write_domain); - - trace_i915_gem_object_change_domain(obj, -@@ -2945,23 +2885,24 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) - DRM_INFO("%s: object %p wait for seqno %08x\n", - __func__, obj, obj_priv->last_rendering_seqno); - #endif -- ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); -+ ret = i915_do_wait_request(dev, -+ obj_priv->last_rendering_seqno, -+ 0, -+ obj_priv->ring); - if (ret != 0) - return ret; - } - -+ i915_gem_object_flush_cpu_write_domain(obj); -+ - old_write_domain = obj->write_domain; - old_read_domains = obj->read_domains; - -- obj->read_domains &= I915_GEM_DOMAIN_GTT; -- -- i915_gem_object_flush_cpu_write_domain(obj); -- - /* It should now be out of any other write domains, and we can update - * the domain values for our changes. - */ - BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); -- obj->read_domains |= I915_GEM_DOMAIN_GTT; -+ obj->read_domains = I915_GEM_DOMAIN_GTT; - obj->write_domain = I915_GEM_DOMAIN_GTT; - obj_priv->dirty = 1; - -@@ -3345,9 +3286,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, - obj_priv->tiling_mode != I915_TILING_NONE; - - /* Check fence reg constraints and rebind if necessary */ -- if (need_fence && !i915_gem_object_fence_offset_ok(obj, -- obj_priv->tiling_mode)) -- i915_gem_object_unbind(obj); -+ if (need_fence && -+ !i915_gem_object_fence_offset_ok(obj, -+ obj_priv->tiling_mode)) { -+ ret = i915_gem_object_unbind(obj); -+ if (ret) -+ return ret; -+ } - - /* Choose the GTT offset for our buffer and put it there. */ - ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); -@@ -3361,9 +3306,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, - if (need_fence) { - ret = i915_gem_object_get_fence_reg(obj); - if (ret != 0) { -- if (ret != -EBUSY && ret != -ERESTARTSYS) -- DRM_ERROR("Failure to install fence: %d\n", -- ret); - i915_gem_object_unpin(obj); - return ret; - } -@@ -3536,62 +3478,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, - return 0; - } - --/** Dispatch a batchbuffer to the ring -- */ --static int --i915_dispatch_gem_execbuffer(struct drm_device *dev, -- struct drm_i915_gem_execbuffer2 *exec, -- struct drm_clip_rect *cliprects, -- uint64_t exec_offset) --{ -- drm_i915_private_t *dev_priv = dev->dev_private; -- int nbox = exec->num_cliprects; -- int i = 0, count; -- uint32_t exec_start, exec_len; -- RING_LOCALS; -- -- exec_start = (uint32_t) exec_offset + exec->batch_start_offset; -- exec_len = (uint32_t) exec->batch_len; -- -- trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); -- -- count = nbox ? nbox : 1; -- -- for (i = 0; i < count; i++) { -- if (i < nbox) { -- int ret = i915_emit_box(dev, cliprects, i, -- exec->DR1, exec->DR4); -- if (ret) -- return ret; -- } -- -- if (IS_I830(dev) || IS_845G(dev)) { -- BEGIN_LP_RING(4); -- OUT_RING(MI_BATCH_BUFFER); -- OUT_RING(exec_start | MI_BATCH_NON_SECURE); -- OUT_RING(exec_start + exec_len - 4); -- OUT_RING(0); -- ADVANCE_LP_RING(); -- } else { -- BEGIN_LP_RING(2); -- if (IS_I965G(dev)) { -- OUT_RING(MI_BATCH_BUFFER_START | -- (2 << 6) | -- MI_BATCH_NON_SECURE_I965); -- OUT_RING(exec_start); -- } else { -- OUT_RING(MI_BATCH_BUFFER_START | -- (2 << 6)); -- OUT_RING(exec_start | MI_BATCH_NON_SECURE); -- } -- ADVANCE_LP_RING(); -- } -- } -- -- /* XXX breadcrumb */ -- return 0; --} -- - /* Throttle our rendering by waiting until the ring has completed our requests - * emitted over 20 msec ago. - * -@@ -3620,7 +3506,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) - if (time_after_eq(request->emitted_jiffies, recent_enough)) - break; - -- ret = i915_wait_request(dev, request->seqno); -+ ret = i915_wait_request(dev, request->seqno, request->ring); - if (ret != 0) - break; - } -@@ -3777,10 +3663,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, - uint32_t seqno, flush_domains, reloc_index; - int pin_tries, flips; - -+ struct intel_ring_buffer *ring = NULL; -+ - #if WATCH_EXEC - DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", - (int) args->buffers_ptr, args->buffer_count, args->batch_len); - #endif -+ if (args->flags & I915_EXEC_BSD) { -+ if (!HAS_BSD(dev)) { -+ DRM_ERROR("execbuf with wrong flag\n"); -+ return -EINVAL; -+ } -+ ring = &dev_priv->bsd_ring; -+ } else { -+ ring = &dev_priv->render_ring; -+ } -+ - - if (args->buffer_count < 1) { - DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); -@@ -3893,11 +3791,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, - if (ret != -ENOSPC || pin_tries >= 1) { - if (ret != -ERESTARTSYS) { - unsigned long long total_size = 0; -- for (i = 0; i < args->buffer_count; i++) -+ int num_fences = 0; -+ for (i = 0; i < args->buffer_count; i++) { -+ obj_priv = object_list[i]->driver_private; -+ - total_size += object_list[i]->size; -- DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n", -+ num_fences += -+ exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE && -+ obj_priv->tiling_mode != I915_TILING_NONE; -+ } -+ DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n", - pinned+1, args->buffer_count, -- total_size, ret); -+ total_size, num_fences, -+ ret); - DRM_ERROR("%d objects [%d pinned], " - "%d object bytes [%d pinned], " - "%d/%d gtt bytes\n", -@@ -3967,9 +3873,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, - i915_gem_flush(dev, - dev->invalidate_domains, - dev->flush_domains); -- if (dev->flush_domains & I915_GEM_GPU_DOMAINS) -+ if (dev->flush_domains & I915_GEM_GPU_DOMAINS) { - (void)i915_add_request(dev, file_priv, -- dev->flush_domains); -+ dev->flush_domains, -+ &dev_priv->render_ring); -+ -+ if (HAS_BSD(dev)) -+ (void)i915_add_request(dev, file_priv, -+ dev->flush_domains, -+ &dev_priv->bsd_ring); -+ } - } - - for (i = 0; i < args->buffer_count; i++) { -@@ -4006,7 +3919,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, - #endif - - /* Exec the batchbuffer */ -- ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); -+ ret = ring->dispatch_gem_execbuffer(dev, ring, args, -+ cliprects, exec_offset); - if (ret) { - DRM_ERROR("dispatch failed %d\n", ret); - goto err; -@@ -4016,7 +3930,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, - * Ensure that the commands in the batch buffer are - * finished before the interrupt fires - */ -- flush_domains = i915_retire_commands(dev); -+ flush_domains = i915_retire_commands(dev, ring); - - i915_verify_inactive(dev, __FILE__, __LINE__); - -@@ -4027,12 +3941,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, - * *some* interrupts representing completion of buffers that we can - * wait on when trying to clear up gtt space). - */ -- seqno = i915_add_request(dev, file_priv, flush_domains); -+ seqno = i915_add_request(dev, file_priv, flush_domains, ring); - BUG_ON(seqno == 0); - for (i = 0; i < args->buffer_count; i++) { - struct drm_gem_object *obj = object_list[i]; -+ obj_priv = to_intel_bo(obj); - -- i915_gem_object_move_to_active(obj, seqno); -+ i915_gem_object_move_to_active(obj, seqno, ring); - #if WATCH_LRU - DRM_INFO("%s: move to exec list %p\n", __func__, obj); - #endif -@@ -4144,7 +4059,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, - exec2.DR4 = args->DR4; - exec2.num_cliprects = args->num_cliprects; - exec2.cliprects_ptr = args->cliprects_ptr; -- exec2.flags = 0; -+ exec2.flags = I915_EXEC_RENDER; - - ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); - if (!ret) { -@@ -4230,7 +4145,20 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int ret; - -+ BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); -+ - i915_verify_inactive(dev, __FILE__, __LINE__); -+ -+ if (obj_priv->gtt_space != NULL) { -+ if (alignment == 0) -+ alignment = i915_gem_get_gtt_alignment(obj); -+ if (obj_priv->gtt_offset & (alignment - 1)) { -+ ret = i915_gem_object_unbind(obj); -+ if (ret) -+ return ret; -+ } -+ } -+ - if (obj_priv->gtt_space == NULL) { - ret = i915_gem_object_bind_to_gtt(obj, alignment); - if (ret) -@@ -4383,6 +4311,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, - struct drm_i915_gem_busy *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; -+ drm_i915_private_t *dev_priv = dev->dev_private; - - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) { -@@ -4397,7 +4326,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, - * actually unmasked, and our working set ends up being larger than - * required. - */ -- i915_gem_retire_requests(dev); -+ i915_gem_retire_requests(dev, &dev_priv->render_ring); -+ -+ if (HAS_BSD(dev)) -+ i915_gem_retire_requests(dev, &dev_priv->bsd_ring); - - obj_priv = to_intel_bo(obj); - /* Don't count being on the flushing list against the object being -@@ -4471,34 +4403,38 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, - return 0; - } - --int i915_gem_init_object(struct drm_gem_object *obj) -+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, -+ size_t size) - { -- struct drm_i915_gem_object *obj_priv; -+ struct drm_i915_gem_object *obj; - -- obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL); -- if (obj_priv == NULL) -- return -ENOMEM; -+ obj = kzalloc(sizeof(*obj), GFP_KERNEL); -+ if (obj == NULL) -+ return NULL; - -- /* -- * We've just allocated pages from the kernel, -- * so they've just been written by the CPU with -- * zeros. They'll need to be clflushed before we -- * use them with the GPU. -- */ -- obj->write_domain = I915_GEM_DOMAIN_CPU; -- obj->read_domains = I915_GEM_DOMAIN_CPU; -+ if (drm_gem_object_init(dev, &obj->base, size) != 0) { -+ kfree(obj); -+ return NULL; -+ } - -- obj_priv->agp_type = AGP_USER_MEMORY; -+ obj->base.write_domain = I915_GEM_DOMAIN_CPU; -+ obj->base.read_domains = I915_GEM_DOMAIN_CPU; - -- obj->driver_private = obj_priv; -- obj_priv->obj = obj; -- obj_priv->fence_reg = I915_FENCE_REG_NONE; -- INIT_LIST_HEAD(&obj_priv->list); -- INIT_LIST_HEAD(&obj_priv->gpu_write_list); -- INIT_LIST_HEAD(&obj_priv->fence_list); -- obj_priv->madv = I915_MADV_WILLNEED; -+ obj->agp_type = AGP_USER_MEMORY; -+ obj->base.driver_private = NULL; -+ obj->fence_reg = I915_FENCE_REG_NONE; -+ INIT_LIST_HEAD(&obj->list); -+ INIT_LIST_HEAD(&obj->gpu_write_list); -+ obj->madv = I915_MADV_WILLNEED; -+ -+ trace_i915_gem_object_create(&obj->base); - -- trace_i915_gem_object_create(obj); -+ return &obj->base; -+} -+ -+int i915_gem_init_object(struct drm_gem_object *obj) -+{ -+ BUG(); - - return 0; - } -@@ -4521,9 +4457,11 @@ void i915_gem_free_object(struct drm_gem_object *obj) - if (obj_priv->mmap_offset) - i915_gem_free_mmap_offset(obj); - -+ drm_gem_object_release(obj); -+ - kfree(obj_priv->page_cpu_valid); - kfree(obj_priv->bit_17); -- kfree(obj->driver_private); -+ kfree(obj_priv); - } - - /** Unbinds all inactive objects. */ -@@ -4536,9 +4474,9 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev) - struct drm_gem_object *obj; - int ret; - -- obj = list_first_entry(&dev_priv->mm.inactive_list, -- struct drm_i915_gem_object, -- list)->obj; -+ obj = &list_first_entry(&dev_priv->mm.inactive_list, -+ struct drm_i915_gem_object, -+ list)->base; - - ret = i915_gem_object_unbind(obj); - if (ret != 0) { -@@ -4558,7 +4496,10 @@ i915_gem_idle(struct drm_device *dev) - - mutex_lock(&dev->struct_mutex); - -- if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) { -+ if (dev_priv->mm.suspended || -+ (dev_priv->render_ring.gem_object == NULL) || -+ (HAS_BSD(dev) && -+ dev_priv->bsd_ring.gem_object == NULL)) { - mutex_unlock(&dev->struct_mutex); - return 0; - } -@@ -4608,7 +4549,7 @@ i915_gem_init_pipe_control(struct drm_device *dev) - struct drm_i915_gem_object *obj_priv; - int ret; - -- obj = drm_gem_object_alloc(dev, 4096); -+ obj = i915_gem_alloc_object(dev, 4096); - if (obj == NULL) { - DRM_ERROR("Failed to allocate seqno page\n"); - ret = -ENOMEM; -@@ -4639,71 +4580,6 @@ err: - return ret; - } - --static int --i915_gem_init_hws(struct drm_device *dev) --{ -- drm_i915_private_t *dev_priv = dev->dev_private; -- struct drm_gem_object *obj; -- struct drm_i915_gem_object *obj_priv; -- int ret; -- -- /* If we need a physical address for the status page, it's already -- * initialized at driver load time. -- */ -- if (!I915_NEED_GFX_HWS(dev)) -- return 0; -- -- obj = drm_gem_object_alloc(dev, 4096); -- if (obj == NULL) { -- DRM_ERROR("Failed to allocate status page\n"); -- ret = -ENOMEM; -- goto err; -- } -- obj_priv = to_intel_bo(obj); -- obj_priv->agp_type = AGP_USER_CACHED_MEMORY; -- -- ret = i915_gem_object_pin(obj, 4096); -- if (ret != 0) { -- drm_gem_object_unreference(obj); -- goto err_unref; -- } -- -- dev_priv->status_gfx_addr = obj_priv->gtt_offset; -- -- dev_priv->hw_status_page = kmap(obj_priv->pages[0]); -- if (dev_priv->hw_status_page == NULL) { -- DRM_ERROR("Failed to map status page.\n"); -- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); -- ret = -EINVAL; -- goto err_unpin; -- } -- -- if (HAS_PIPE_CONTROL(dev)) { -- ret = i915_gem_init_pipe_control(dev); -- if (ret) -- goto err_unpin; -- } -- -- dev_priv->hws_obj = obj; -- memset(dev_priv->hw_status_page, 0, PAGE_SIZE); -- if (IS_GEN6(dev)) { -- I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr); -- I915_READ(HWS_PGA_GEN6); /* posting read */ -- } else { -- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); -- I915_READ(HWS_PGA); /* posting read */ -- } -- DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); -- -- return 0; -- --err_unpin: -- i915_gem_object_unpin(obj); --err_unref: -- drm_gem_object_unreference(obj); --err: -- return 0; --} - - static void - i915_gem_cleanup_pipe_control(struct drm_device *dev) -@@ -4722,146 +4598,46 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev) - dev_priv->seqno_page = NULL; - } - --static void --i915_gem_cleanup_hws(struct drm_device *dev) --{ -- drm_i915_private_t *dev_priv = dev->dev_private; -- struct drm_gem_object *obj; -- struct drm_i915_gem_object *obj_priv; -- -- if (dev_priv->hws_obj == NULL) -- return; -- -- obj = dev_priv->hws_obj; -- obj_priv = to_intel_bo(obj); -- -- kunmap(obj_priv->pages[0]); -- i915_gem_object_unpin(obj); -- drm_gem_object_unreference(obj); -- dev_priv->hws_obj = NULL; -- -- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); -- dev_priv->hw_status_page = NULL; -- -- if (HAS_PIPE_CONTROL(dev)) -- i915_gem_cleanup_pipe_control(dev); -- -- /* Write high address into HWS_PGA when disabling. */ -- I915_WRITE(HWS_PGA, 0x1ffff000); --} -- - int - i915_gem_init_ringbuffer(struct drm_device *dev) - { - drm_i915_private_t *dev_priv = dev->dev_private; -- struct drm_gem_object *obj; -- struct drm_i915_gem_object *obj_priv; -- drm_i915_ring_buffer_t *ring = &dev_priv->ring; - int ret; -- u32 head; - -- ret = i915_gem_init_hws(dev); -- if (ret != 0) -- return ret; -+ dev_priv->render_ring = render_ring; - -- obj = drm_gem_object_alloc(dev, 128 * 1024); -- if (obj == NULL) { -- DRM_ERROR("Failed to allocate ringbuffer\n"); -- i915_gem_cleanup_hws(dev); -- return -ENOMEM; -- } -- obj_priv = to_intel_bo(obj); -- -- ret = i915_gem_object_pin(obj, 4096); -- if (ret != 0) { -- drm_gem_object_unreference(obj); -- i915_gem_cleanup_hws(dev); -- return ret; -+ if (!I915_NEED_GFX_HWS(dev)) { -+ dev_priv->render_ring.status_page.page_addr -+ = dev_priv->status_page_dmah->vaddr; -+ memset(dev_priv->render_ring.status_page.page_addr, -+ 0, PAGE_SIZE); - } - -- /* Set up the kernel mapping for the ring. */ -- ring->Size = obj->size; -- -- ring->map.offset = dev->agp->base + obj_priv->gtt_offset; -- ring->map.size = obj->size; -- ring->map.type = 0; -- ring->map.flags = 0; -- ring->map.mtrr = 0; -- -- drm_core_ioremap_wc(&ring->map, dev); -- if (ring->map.handle == NULL) { -- DRM_ERROR("Failed to map ringbuffer.\n"); -- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); -- i915_gem_object_unpin(obj); -- drm_gem_object_unreference(obj); -- i915_gem_cleanup_hws(dev); -- return -EINVAL; -- } -- ring->ring_obj = obj; -- ring->virtual_start = ring->map.handle; -- -- /* Stop the ring if it's running. */ -- I915_WRITE(PRB0_CTL, 0); -- I915_WRITE(PRB0_TAIL, 0); -- I915_WRITE(PRB0_HEAD, 0); -- -- /* Initialize the ring. */ -- I915_WRITE(PRB0_START, obj_priv->gtt_offset); -- head = I915_READ(PRB0_HEAD) & HEAD_ADDR; -- -- /* G45 ring initialization fails to reset head to zero */ -- if (head != 0) { -- DRM_ERROR("Ring head not reset to zero " -- "ctl %08x head %08x tail %08x start %08x\n", -- I915_READ(PRB0_CTL), -- I915_READ(PRB0_HEAD), -- I915_READ(PRB0_TAIL), -- I915_READ(PRB0_START)); -- I915_WRITE(PRB0_HEAD, 0); -- -- DRM_ERROR("Ring head forced to zero " -- "ctl %08x head %08x tail %08x start %08x\n", -- I915_READ(PRB0_CTL), -- I915_READ(PRB0_HEAD), -- I915_READ(PRB0_TAIL), -- I915_READ(PRB0_START)); -- } -- -- I915_WRITE(PRB0_CTL, -- ((obj->size - 4096) & RING_NR_PAGES) | -- RING_NO_REPORT | -- RING_VALID); -- -- head = I915_READ(PRB0_HEAD) & HEAD_ADDR; -- -- /* If the head is still not zero, the ring is dead */ -- if (head != 0) { -- DRM_ERROR("Ring initialization failed " -- "ctl %08x head %08x tail %08x start %08x\n", -- I915_READ(PRB0_CTL), -- I915_READ(PRB0_HEAD), -- I915_READ(PRB0_TAIL), -- I915_READ(PRB0_START)); -- return -EIO; -+ if (HAS_PIPE_CONTROL(dev)) { -+ ret = i915_gem_init_pipe_control(dev); -+ if (ret) -+ return ret; - } - -- /* Update our cache of the ring state */ -- if (!drm_core_check_feature(dev, DRIVER_MODESET)) -- i915_kernel_lost_context(dev); -- else { -- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; -- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; -- ring->space = ring->head - (ring->tail + 8); -- if (ring->space < 0) -- ring->space += ring->Size; -- } -+ ret = intel_init_ring_buffer(dev, &dev_priv->render_ring); -+ if (ret) -+ goto cleanup_pipe_control; - -- if (IS_I9XX(dev) && !IS_GEN3(dev)) { -- I915_WRITE(MI_MODE, -- (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); -+ if (HAS_BSD(dev)) { -+ dev_priv->bsd_ring = bsd_ring; -+ ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring); -+ if (ret) -+ goto cleanup_render_ring; - } - - return 0; -+ -+cleanup_render_ring: -+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); -+cleanup_pipe_control: -+ if (HAS_PIPE_CONTROL(dev)) -+ i915_gem_cleanup_pipe_control(dev); -+ return ret; - } - - void -@@ -4869,17 +4645,11 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) - { - drm_i915_private_t *dev_priv = dev->dev_private; - -- if (dev_priv->ring.ring_obj == NULL) -- return; -- -- drm_core_ioremapfree(&dev_priv->ring.map, dev); -- -- i915_gem_object_unpin(dev_priv->ring.ring_obj); -- drm_gem_object_unreference(dev_priv->ring.ring_obj); -- dev_priv->ring.ring_obj = NULL; -- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); -- -- i915_gem_cleanup_hws(dev); -+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); -+ if (HAS_BSD(dev)) -+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); -+ if (HAS_PIPE_CONTROL(dev)) -+ i915_gem_cleanup_pipe_control(dev); - } - - int -@@ -4907,12 +4677,14 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, - } - - spin_lock(&dev_priv->mm.active_list_lock); -- BUG_ON(!list_empty(&dev_priv->mm.active_list)); -+ BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); -+ BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); - spin_unlock(&dev_priv->mm.active_list_lock); - - BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); - BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); -- BUG_ON(!list_empty(&dev_priv->mm.request_list)); -+ BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); -+ BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); - mutex_unlock(&dev->struct_mutex); - - drm_irq_install(dev); -@@ -4951,16 +4723,20 @@ i915_gem_load(struct drm_device *dev) - drm_i915_private_t *dev_priv = dev->dev_private; - - spin_lock_init(&dev_priv->mm.active_list_lock); -- INIT_LIST_HEAD(&dev_priv->mm.active_list); - INIT_LIST_HEAD(&dev_priv->mm.flushing_list); - INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); - INIT_LIST_HEAD(&dev_priv->mm.inactive_list); -- INIT_LIST_HEAD(&dev_priv->mm.request_list); - INIT_LIST_HEAD(&dev_priv->mm.fence_list); -+ INIT_LIST_HEAD(&dev_priv->render_ring.active_list); -+ INIT_LIST_HEAD(&dev_priv->render_ring.request_list); -+ if (HAS_BSD(dev)) { -+ INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); -+ INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); -+ } -+ for (i = 0; i < 16; i++) -+ INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); - INIT_DELAYED_WORK(&dev_priv->mm.retire_work, - i915_gem_retire_work_handler); -- dev_priv->mm.next_gem_seqno = 1; -- - spin_lock(&shrink_list_lock); - list_add(&dev_priv->mm.shrink_list, &shrink_list); - spin_unlock(&shrink_list_lock); -@@ -5185,6 +4961,22 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) - } - - static int -+i915_gpu_is_active(struct drm_device *dev) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ int lists_empty; -+ -+ spin_lock(&dev_priv->mm.active_list_lock); -+ lists_empty = list_empty(&dev_priv->mm.flushing_list) && -+ list_empty(&dev_priv->render_ring.active_list); -+ if (HAS_BSD(dev)) -+ lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); -+ spin_unlock(&dev_priv->mm.active_list_lock); -+ -+ return !lists_empty; -+} -+ -+static int - i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) - { - drm_i915_private_t *dev_priv, *next_dev; -@@ -5213,6 +5005,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) - - spin_lock(&shrink_list_lock); - -+rescan: - /* first scan for clean buffers */ - list_for_each_entry_safe(dev_priv, next_dev, - &shrink_list, mm.shrink_list) { -@@ -5222,14 +5015,16 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) - continue; - - spin_unlock(&shrink_list_lock); -+ i915_gem_retire_requests(dev, &dev_priv->render_ring); - -- i915_gem_retire_requests(dev); -+ if (HAS_BSD(dev)) -+ i915_gem_retire_requests(dev, &dev_priv->bsd_ring); - - list_for_each_entry_safe(obj_priv, next_obj, - &dev_priv->mm.inactive_list, - list) { - if (i915_gem_object_is_purgeable(obj_priv)) { -- i915_gem_object_unbind(obj_priv->obj); -+ i915_gem_object_unbind(&obj_priv->base); - if (--nr_to_scan <= 0) - break; - } -@@ -5258,7 +5053,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) - &dev_priv->mm.inactive_list, - list) { - if (nr_to_scan > 0) { -- i915_gem_object_unbind(obj_priv->obj); -+ i915_gem_object_unbind(&obj_priv->base); - nr_to_scan--; - } else - cnt++; -@@ -5270,6 +5065,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) - would_deadlock = 0; - } - -+ if (nr_to_scan) { -+ int active = 0; -+ -+ /* -+ * We are desperate for pages, so as a last resort, wait -+ * for the GPU to finish and discard whatever we can. -+ * This has a dramatic impact to reduce the number of -+ * OOM-killer events whilst running the GPU aggressively. -+ */ -+ list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { -+ struct drm_device *dev = dev_priv->dev; -+ -+ if (!mutex_trylock(&dev->struct_mutex)) -+ continue; -+ -+ spin_unlock(&shrink_list_lock); -+ -+ if (i915_gpu_is_active(dev)) { -+ i915_gpu_idle(dev); -+ active++; -+ } -+ -+ spin_lock(&shrink_list_lock); -+ mutex_unlock(&dev->struct_mutex); -+ } -+ -+ if (active) -+ goto rescan; -+ } -+ - spin_unlock(&shrink_list_lock); - - if (would_deadlock) -diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c -index 35507cf..80f380b 100644 ---- a/drivers/gpu/drm/i915/i915_gem_debug.c -+++ b/drivers/gpu/drm/i915/i915_gem_debug.c -@@ -39,7 +39,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line) - struct drm_i915_gem_object *obj_priv; - - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { -- obj = obj_priv->obj; -+ obj = &obj_priv->base; - if (obj_priv->pin_count || obj_priv->active || - (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | - I915_GEM_DOMAIN_GTT))) -diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c -index 4bdccef..4b7c49d 100644 ---- a/drivers/gpu/drm/i915/i915_gem_tiling.c -+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c -@@ -283,6 +283,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, - return -EINVAL; - } - -+ if (obj_priv->pin_count) { -+ drm_gem_object_unreference_unlocked(obj); -+ return -EBUSY; -+ } -+ - if (args->tiling_mode == I915_TILING_NONE) { - args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; - args->stride = 0; -diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c -index df6a9cd..2479be0 100644 ---- a/drivers/gpu/drm/i915/i915_irq.c -+++ b/drivers/gpu/drm/i915/i915_irq.c -@@ -53,7 +53,7 @@ - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - - /** Interrupts that we mask and unmask at runtime. */ --#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) -+#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT) - - #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ - PIPE_VBLANK_INTERRUPT_STATUS) -@@ -74,7 +74,7 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) - } - } - --static inline void -+void - ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) - { - if ((dev_priv->gt_irq_mask_reg & mask) != mask) { -@@ -115,7 +115,7 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) - } - } - --static inline void -+void - i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) - { - if ((dev_priv->irq_mask_reg & mask) != mask) { -@@ -169,9 +169,13 @@ void intel_enable_asle (struct drm_device *dev) - - if (HAS_PCH_SPLIT(dev)) - ironlake_enable_display_irq(dev_priv, DE_GSE); -- else -+ else { - i915_enable_pipestat(dev_priv, 1, - I915_LEGACY_BLC_EVENT_ENABLE); -+ if (IS_I965G(dev)) -+ i915_enable_pipestat(dev_priv, 0, -+ I915_LEGACY_BLC_EVENT_ENABLE); -+ } - } - - /** -@@ -256,28 +260,27 @@ static void i915_hotplug_work_func(struct work_struct *work) - hotplug_work); - struct drm_device *dev = dev_priv->dev; - struct drm_mode_config *mode_config = &dev->mode_config; -- struct drm_connector *connector; -+ struct drm_encoder *encoder; - -- if (mode_config->num_connector) { -- list_for_each_entry(connector, &mode_config->connector_list, head) { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ if (mode_config->num_encoder) { -+ list_for_each_entry(encoder, &mode_config->encoder_list, head) { -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - - if (intel_encoder->hot_plug) - (*intel_encoder->hot_plug) (intel_encoder); - } - } - /* Just fire off a uevent and let userspace tell us what to do */ -- drm_sysfs_hotplug_event(dev); -+ drm_helper_hpd_irq_event(dev); - } - - static void i915_handle_rps_change(struct drm_device *dev) - { - drm_i915_private_t *dev_priv = dev->dev_private; - u32 busy_up, busy_down, max_avg, min_avg; -- u16 rgvswctl; - u8 new_delay = dev_priv->cur_delay; - -- I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG); -+ I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); - busy_up = I915_READ(RCPREVBSYTUPAVG); - busy_down = I915_READ(RCPREVBSYTDNAVG); - max_avg = I915_READ(RCBMAXAVG); -@@ -296,27 +299,8 @@ static void i915_handle_rps_change(struct drm_device *dev) - new_delay = dev_priv->min_delay; - } - -- DRM_DEBUG("rps change requested: %d -> %d\n", -- dev_priv->cur_delay, new_delay); -- -- rgvswctl = I915_READ(MEMSWCTL); -- if (rgvswctl & MEMCTL_CMD_STS) { -- DRM_ERROR("gpu busy, RCS change rejected\n"); -- return; /* still busy with another command */ -- } -- -- /* Program the new state */ -- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | -- (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; -- I915_WRITE(MEMSWCTL, rgvswctl); -- POSTING_READ(MEMSWCTL); -- -- rgvswctl |= MEMCTL_CMD_STS; -- I915_WRITE(MEMSWCTL, rgvswctl); -- -- dev_priv->cur_delay = new_delay; -- -- DRM_DEBUG("rps changed\n"); -+ if (ironlake_set_drps(dev, new_delay)) -+ dev_priv->cur_delay = new_delay; - - return; - } -@@ -327,6 +311,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) - int ret = IRQ_NONE; - u32 de_iir, gt_iir, de_ier, pch_iir; - struct drm_i915_master_private *master_priv; -+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring; - - /* disable master interrupt before clearing iir */ - de_ier = I915_READ(DEIER); -@@ -350,13 +335,16 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) - } - - if (gt_iir & GT_PIPE_NOTIFY) { -- u32 seqno = i915_get_gem_seqno(dev); -- dev_priv->mm.irq_gem_seqno = seqno; -+ u32 seqno = render_ring->get_gem_seqno(dev, render_ring); -+ render_ring->irq_gem_seqno = seqno; - trace_i915_gem_request_complete(dev, seqno); -- DRM_WAKEUP(&dev_priv->irq_queue); -+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue); - dev_priv->hangcheck_count = 0; - mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); - } -+ if (gt_iir & GT_BSD_USER_INTERRUPT) -+ DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); -+ - - if (de_iir & DE_GSE) - ironlake_opregion_gse_intr(dev); -@@ -384,7 +372,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) - } - - if (de_iir & DE_PCU_EVENT) { -- I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS)); -+ I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); - i915_handle_rps_change(dev); - } - -@@ -532,17 +520,18 @@ i915_ringbuffer_last_batch(struct drm_device *dev) - */ - bbaddr = 0; - head = I915_READ(PRB0_HEAD) & HEAD_ADDR; -- ring = (u32 *)(dev_priv->ring.virtual_start + head); -+ ring = (u32 *)(dev_priv->render_ring.virtual_start + head); - -- while (--ring >= (u32 *)dev_priv->ring.virtual_start) { -+ while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { - bbaddr = i915_get_bbaddr(dev, ring); - if (bbaddr) - break; - } - - if (bbaddr == 0) { -- ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size); -- while (--ring >= (u32 *)dev_priv->ring.virtual_start) { -+ ring = (u32 *)(dev_priv->render_ring.virtual_start -+ + dev_priv->render_ring.size); -+ while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { - bbaddr = i915_get_bbaddr(dev, ring); - if (bbaddr) - break; -@@ -583,7 +572,7 @@ static void i915_capture_error_state(struct drm_device *dev) - return; - } - -- error->seqno = i915_get_gem_seqno(dev); -+ error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring); - error->eir = I915_READ(EIR); - error->pgtbl_er = I915_READ(PGTBL_ER); - error->pipeastat = I915_READ(PIPEASTAT); -@@ -611,8 +600,10 @@ static void i915_capture_error_state(struct drm_device *dev) - batchbuffer[0] = NULL; - batchbuffer[1] = NULL; - count = 0; -- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { -- struct drm_gem_object *obj = obj_priv->obj; -+ list_for_each_entry(obj_priv, -+ &dev_priv->render_ring.active_list, list) { -+ -+ struct drm_gem_object *obj = &obj_priv->base; - - if (batchbuffer[0] == NULL && - bbaddr >= obj_priv->gtt_offset && -@@ -635,7 +626,8 @@ static void i915_capture_error_state(struct drm_device *dev) - error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); - - /* Record the ringbuffer */ -- error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj); -+ error->ringbuffer = i915_error_object_create(dev, -+ dev_priv->render_ring.gem_object); - - /* Record buffers on the active list. */ - error->active_bo = NULL; -@@ -647,8 +639,9 @@ static void i915_capture_error_state(struct drm_device *dev) - - if (error->active_bo) { - int i = 0; -- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { -- struct drm_gem_object *obj = obj_priv->obj; -+ list_for_each_entry(obj_priv, -+ &dev_priv->render_ring.active_list, list) { -+ struct drm_gem_object *obj = &obj_priv->base; - - error->active_bo[i].size = obj->size; - error->active_bo[i].name = obj->name; -@@ -699,24 +692,13 @@ void i915_destroy_error_state(struct drm_device *dev) - i915_error_state_free(dev, error); - } - --/** -- * i915_handle_error - handle an error interrupt -- * @dev: drm device -- * -- * Do some basic checking of regsiter state at error interrupt time and -- * dump it to the syslog. Also call i915_capture_error_state() to make -- * sure we get a record and make it available in debugfs. Fire a uevent -- * so userspace knows something bad happened (should trigger collection -- * of a ring dump etc.). -- */ --static void i915_handle_error(struct drm_device *dev, bool wedged) -+static void i915_report_and_clear_eir(struct drm_device *dev) - { - struct drm_i915_private *dev_priv = dev->dev_private; - u32 eir = I915_READ(EIR); -- u32 pipea_stats = I915_READ(PIPEASTAT); -- u32 pipeb_stats = I915_READ(PIPEBSTAT); - -- i915_capture_error_state(dev); -+ if (!eir) -+ return; - - printk(KERN_ERR "render error detected, EIR: 0x%08x\n", - eir); -@@ -762,6 +744,9 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) - } - - if (eir & I915_ERROR_MEMORY_REFRESH) { -+ u32 pipea_stats = I915_READ(PIPEASTAT); -+ u32 pipeb_stats = I915_READ(PIPEBSTAT); -+ - printk(KERN_ERR "memory refresh error\n"); - printk(KERN_ERR "PIPEASTAT: 0x%08x\n", - pipea_stats); -@@ -818,6 +803,24 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) - I915_WRITE(EMR, I915_READ(EMR) | eir); - I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); - } -+} -+ -+/** -+ * i915_handle_error - handle an error interrupt -+ * @dev: drm device -+ * -+ * Do some basic checking of regsiter state at error interrupt time and -+ * dump it to the syslog. Also call i915_capture_error_state() to make -+ * sure we get a record and make it available in debugfs. Fire a uevent -+ * so userspace knows something bad happened (should trigger collection -+ * of a ring dump etc.). -+ */ -+static void i915_handle_error(struct drm_device *dev, bool wedged) -+{ -+ struct drm_i915_private *dev_priv = dev->dev_private; -+ -+ i915_capture_error_state(dev); -+ i915_report_and_clear_eir(dev); - - if (wedged) { - atomic_set(&dev_priv->mm.wedged, 1); -@@ -825,7 +828,7 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) - /* - * Wakeup waiting processes so they don't hang - */ -- DRM_WAKEUP(&dev_priv->irq_queue); -+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue); - } - - queue_work(dev_priv->wq, &dev_priv->error_work); -@@ -844,6 +847,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) - unsigned long irqflags; - int irq_received; - int ret = IRQ_NONE; -+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring; - - atomic_inc(&dev_priv->irq_received); - -@@ -924,14 +928,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) - } - - if (iir & I915_USER_INTERRUPT) { -- u32 seqno = i915_get_gem_seqno(dev); -- dev_priv->mm.irq_gem_seqno = seqno; -+ u32 seqno = -+ render_ring->get_gem_seqno(dev, render_ring); -+ render_ring->irq_gem_seqno = seqno; - trace_i915_gem_request_complete(dev, seqno); -- DRM_WAKEUP(&dev_priv->irq_queue); -+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue); - dev_priv->hangcheck_count = 0; - mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); - } - -+ if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) -+ DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); -+ - if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { - intel_prepare_page_flip(dev, 0); - if (dev_priv->flip_pending_is_done) -@@ -950,7 +958,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) - intel_finish_page_flip(dev, 1); - } - -- if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || -+ if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || -+ (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || - (iir & I915_ASLE_INTERRUPT)) - opregion_asle_intr(dev); - -@@ -979,7 +988,6 @@ static int i915_emit_irq(struct drm_device * dev) - { - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; -- RING_LOCALS; - - i915_kernel_lost_context(dev); - -@@ -1001,43 +1009,13 @@ static int i915_emit_irq(struct drm_device * dev) - return dev_priv->counter; - } - --void i915_user_irq_get(struct drm_device *dev) --{ -- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; -- unsigned long irqflags; -- -- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); -- if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { -- if (HAS_PCH_SPLIT(dev)) -- ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); -- else -- i915_enable_irq(dev_priv, I915_USER_INTERRUPT); -- } -- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); --} -- --void i915_user_irq_put(struct drm_device *dev) --{ -- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; -- unsigned long irqflags; -- -- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); -- BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); -- if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { -- if (HAS_PCH_SPLIT(dev)) -- ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); -- else -- i915_disable_irq(dev_priv, I915_USER_INTERRUPT); -- } -- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); --} -- - void i915_trace_irq_get(struct drm_device *dev, u32 seqno) - { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; -+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring; - - if (dev_priv->trace_irq_seqno == 0) -- i915_user_irq_get(dev); -+ render_ring->user_irq_get(dev, render_ring); - - dev_priv->trace_irq_seqno = seqno; - } -@@ -1047,6 +1025,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - int ret = 0; -+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring; - - DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, - READ_BREADCRUMB(dev_priv)); -@@ -1060,10 +1039,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) - if (master_priv->sarea_priv) - master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - -- i915_user_irq_get(dev); -- DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, -+ render_ring->user_irq_get(dev, render_ring); -+ DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, - READ_BREADCRUMB(dev_priv) >= irq_nr); -- i915_user_irq_put(dev); -+ render_ring->user_irq_put(dev, render_ring); - - if (ret == -EBUSY) { - DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", -@@ -1082,7 +1061,7 @@ int i915_irq_emit(struct drm_device *dev, void *data, - drm_i915_irq_emit_t *emit = data; - int result; - -- if (!dev_priv || !dev_priv->ring.virtual_start) { -+ if (!dev_priv || !dev_priv->render_ring.virtual_start) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } -@@ -1228,9 +1207,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data, - return -EINVAL; - } - --struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) { -+struct drm_i915_gem_request * -+i915_get_tail_request(struct drm_device *dev) -+{ - drm_i915_private_t *dev_priv = dev->dev_private; -- return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list); -+ return list_entry(dev_priv->render_ring.request_list.prev, -+ struct drm_i915_gem_request, list); - } - - /** -@@ -1255,8 +1237,10 @@ void i915_hangcheck_elapsed(unsigned long data) - acthd = I915_READ(ACTHD_I965); - - /* If all work is done then ACTHD clearly hasn't advanced. */ -- if (list_empty(&dev_priv->mm.request_list) || -- i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) { -+ if (list_empty(&dev_priv->render_ring.request_list) || -+ i915_seqno_passed(i915_get_gem_seqno(dev, -+ &dev_priv->render_ring), -+ i915_get_tail_request(dev)->seqno)) { - dev_priv->hangcheck_count = 0; - return; - } -@@ -1309,7 +1293,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) - /* enable kind of interrupts always enabled */ - u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | - DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; -- u32 render_mask = GT_PIPE_NOTIFY; -+ u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; - u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | - SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; - -@@ -1323,7 +1307,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) - (void) I915_READ(DEIER); - - /* user interrupt should be enabled, but masked initial */ -- dev_priv->gt_irq_mask_reg = 0xffffffff; -+ dev_priv->gt_irq_mask_reg = ~render_mask; - dev_priv->gt_irq_enable_reg = render_mask; - - I915_WRITE(GTIIR, I915_READ(GTIIR)); -@@ -1386,7 +1370,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev) - u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; - u32 error_mask; - -- DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); -+ DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); -+ -+ if (HAS_BSD(dev)) -+ DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); - - dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; - -diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h -index 4cbc521..64b0a3a 100644 ---- a/drivers/gpu/drm/i915/i915_reg.h -+++ b/drivers/gpu/drm/i915/i915_reg.h -@@ -334,6 +334,7 @@ - #define I915_DEBUG_INTERRUPT (1<<2) - #define I915_USER_INTERRUPT (1<<1) - #define I915_ASLE_INTERRUPT (1<<0) -+#define I915_BSD_USER_INTERRUPT (1<<25) - #define EIR 0x020b0 - #define EMR 0x020b4 - #define ESR 0x020b8 -@@ -368,6 +369,36 @@ - #define ECO_GATING_CX_ONLY (1<<3) - #define ECO_FLIP_DONE (1<<0) - -+/* GEN6 interrupt control */ -+#define GEN6_RENDER_HWSTAM 0x2098 -+#define GEN6_RENDER_IMR 0x20a8 -+#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) -+#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) -+#define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6) -+#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) -+#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) -+#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) -+#define GEN6_RENDER_SYNC_STATUS (1 << 2) -+#define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1) -+#define GEN6_RENDER_USER_INTERRUPT (1 << 0) -+ -+#define GEN6_BLITTER_HWSTAM 0x22098 -+#define GEN6_BLITTER_IMR 0x220a8 -+#define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26) -+#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25) -+#define GEN6_BLITTER_SYNC_STATUS (1 << 24) -+#define GEN6_BLITTER_USER_INTERRUPT (1 << 22) -+/* -+ * BSD (bit stream decoder instruction and interrupt control register defines -+ * (G4X and Ironlake only) -+ */ -+ -+#define BSD_RING_TAIL 0x04030 -+#define BSD_RING_HEAD 0x04034 -+#define BSD_RING_START 0x04038 -+#define BSD_RING_CTL 0x0403c -+#define BSD_RING_ACTHD 0x04074 -+#define BSD_HWS_PGA 0x04080 - - /* - * Framebuffer compression (915+ only) -@@ -805,6 +836,10 @@ - #define DCC_CHANNEL_XOR_DISABLE (1 << 10) - #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) - -+/** Pineview MCH register contains DDR3 setting */ -+#define CSHRDDR3CTL 0x101a8 -+#define CSHRDDR3CTL_DDR3 (1 << 2) -+ - /** 965 MCH register controlling DRAM channel configuration */ - #define C0DRB3 0x10206 - #define C1DRB3 0x10606 -@@ -826,6 +861,12 @@ - #define CLKCFG_MEM_800 (3 << 4) - #define CLKCFG_MEM_MASK (7 << 4) - -+#define TR1 0x11006 -+#define TSFS 0x11020 -+#define TSFS_SLOPE_MASK 0x0000ff00 -+#define TSFS_SLOPE_SHIFT 8 -+#define TSFS_INTR_MASK 0x000000ff -+ - #define CRSTANDVID 0x11100 - #define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ - #define PXVFREQ_PX_MASK 0x7f000000 -@@ -964,6 +1005,41 @@ - #define MEMSTAT_SRC_CTL_STDBY 3 - #define RCPREVBSYTUPAVG 0x113b8 - #define RCPREVBSYTDNAVG 0x113bc -+#define SDEW 0x1124c -+#define CSIEW0 0x11250 -+#define CSIEW1 0x11254 -+#define CSIEW2 0x11258 -+#define PEW 0x1125c -+#define DEW 0x11270 -+#define MCHAFE 0x112c0 -+#define CSIEC 0x112e0 -+#define DMIEC 0x112e4 -+#define DDREC 0x112e8 -+#define PEG0EC 0x112ec -+#define PEG1EC 0x112f0 -+#define GFXEC 0x112f4 -+#define RPPREVBSYTUPAVG 0x113b8 -+#define RPPREVBSYTDNAVG 0x113bc -+#define ECR 0x11600 -+#define ECR_GPFE (1<<31) -+#define ECR_IMONE (1<<30) -+#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ -+#define OGW0 0x11608 -+#define OGW1 0x1160c -+#define EG0 0x11610 -+#define EG1 0x11614 -+#define EG2 0x11618 -+#define EG3 0x1161c -+#define EG4 0x11620 -+#define EG5 0x11624 -+#define EG6 0x11628 -+#define EG7 0x1162c -+#define PXW 0x11664 -+#define PXWL 0x11680 -+#define LCFUSE02 0x116c0 -+#define LCFUSE_HIV_MASK 0x000000ff -+#define CSIPLL0 0x12c10 -+#define DDRMPLL1 0X12c20 - #define PEG_BAND_GAP_DATA 0x14d68 - - /* -@@ -1055,7 +1131,6 @@ - #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) - #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) - #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ --#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f - - #define PORT_HOTPLUG_STAT 0x61114 - #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) -@@ -1764,6 +1839,14 @@ - #define DP_LINK_TRAIN_MASK (3 << 28) - #define DP_LINK_TRAIN_SHIFT 28 - -+/* CPT Link training mode */ -+#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8) -+#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8) -+#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8) -+#define DP_LINK_TRAIN_OFF_CPT (3 << 8) -+#define DP_LINK_TRAIN_MASK_CPT (7 << 8) -+#define DP_LINK_TRAIN_SHIFT_CPT 8 -+ - /* Signal voltages. These are mostly controlled by the other end */ - #define DP_VOLTAGE_0_4 (0 << 25) - #define DP_VOLTAGE_0_6 (1 << 25) -@@ -1924,7 +2007,10 @@ - /* Display & cursor control */ - - /* dithering flag on Ironlake */ --#define PIPE_ENABLE_DITHER (1 << 4) -+#define PIPE_ENABLE_DITHER (1 << 4) -+#define PIPE_DITHER_TYPE_MASK (3 << 2) -+#define PIPE_DITHER_TYPE_SPATIAL (0 << 2) -+#define PIPE_DITHER_TYPE_ST01 (1 << 2) - /* Pipe A */ - #define PIPEADSL 0x70000 - #define PIPEACONF 0x70008 -@@ -1988,15 +2074,24 @@ - - #define DSPFW1 0x70034 - #define DSPFW_SR_SHIFT 23 -+#define DSPFW_SR_MASK (0x1ff<<23) - #define DSPFW_CURSORB_SHIFT 16 -+#define DSPFW_CURSORB_MASK (0x3f<<16) - #define DSPFW_PLANEB_SHIFT 8 -+#define DSPFW_PLANEB_MASK (0x7f<<8) -+#define DSPFW_PLANEA_MASK (0x7f) - #define DSPFW2 0x70038 - #define DSPFW_CURSORA_MASK 0x00003f00 - #define DSPFW_CURSORA_SHIFT 8 -+#define DSPFW_PLANEC_MASK (0x7f) - #define DSPFW3 0x7003c - #define DSPFW_HPLL_SR_EN (1<<31) - #define DSPFW_CURSOR_SR_SHIFT 24 - #define PINEVIEW_SELF_REFRESH_EN (1<<30) -+#define DSPFW_CURSOR_SR_MASK (0x3f<<24) -+#define DSPFW_HPLL_CURSOR_SHIFT 16 -+#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) -+#define DSPFW_HPLL_SR_MASK (0x1ff) - - /* FIFO watermark sizes etc */ - #define G4X_FIFO_LINE_SIZE 64 -@@ -2023,6 +2118,43 @@ - #define PINEVIEW_CURSOR_DFT_WM 0 - #define PINEVIEW_CURSOR_GUARD_WM 5 - -+ -+/* define the Watermark register on Ironlake */ -+#define WM0_PIPEA_ILK 0x45100 -+#define WM0_PIPE_PLANE_MASK (0x7f<<16) -+#define WM0_PIPE_PLANE_SHIFT 16 -+#define WM0_PIPE_SPRITE_MASK (0x3f<<8) -+#define WM0_PIPE_SPRITE_SHIFT 8 -+#define WM0_PIPE_CURSOR_MASK (0x1f) -+ -+#define WM0_PIPEB_ILK 0x45104 -+#define WM1_LP_ILK 0x45108 -+#define WM1_LP_SR_EN (1<<31) -+#define WM1_LP_LATENCY_SHIFT 24 -+#define WM1_LP_LATENCY_MASK (0x7f<<24) -+#define WM1_LP_SR_MASK (0x1ff<<8) -+#define WM1_LP_SR_SHIFT 8 -+#define WM1_LP_CURSOR_MASK (0x3f) -+ -+/* Memory latency timer register */ -+#define MLTR_ILK 0x11222 -+/* the unit of memory self-refresh latency time is 0.5us */ -+#define ILK_SRLT_MASK 0x3f -+ -+/* define the fifo size on Ironlake */ -+#define ILK_DISPLAY_FIFO 128 -+#define ILK_DISPLAY_MAXWM 64 -+#define ILK_DISPLAY_DFTWM 8 -+ -+#define ILK_DISPLAY_SR_FIFO 512 -+#define ILK_DISPLAY_MAX_SRWM 0x1ff -+#define ILK_DISPLAY_DFT_SRWM 0x3f -+#define ILK_CURSOR_SR_FIFO 64 -+#define ILK_CURSOR_MAX_SRWM 0x3f -+#define ILK_CURSOR_DFT_SRWM 8 -+ -+#define ILK_FIFO_LINE_SIZE 64 -+ - /* - * The two pipe frame counter registers are not synchronized, so - * reading a stable value is somewhat tricky. The following code -@@ -2298,14 +2430,23 @@ - #define GT_PIPE_NOTIFY (1 << 4) - #define GT_SYNC_STATUS (1 << 2) - #define GT_USER_INTERRUPT (1 << 0) -+#define GT_BSD_USER_INTERRUPT (1 << 5) -+ - - #define GTISR 0x44010 - #define GTIMR 0x44014 - #define GTIIR 0x44018 - #define GTIER 0x4401c - -+#define ILK_DISPLAY_CHICKEN2 0x42004 -+#define ILK_DPARB_GATE (1<<22) -+#define ILK_VSDPFD_FULL (1<<21) -+#define ILK_DSPCLK_GATE 0x42020 -+#define ILK_DPARB_CLK_GATE (1<<5) -+ - #define DISP_ARB_CTL 0x45000 - #define DISP_TILE_SURFACE_SWIZZLING (1<<13) -+#define DISP_FBC_WM_DIS (1<<15) - - /* PCH */ - -@@ -2316,6 +2457,11 @@ - #define SDE_PORTB_HOTPLUG (1 << 8) - #define SDE_SDVOB_HOTPLUG (1 << 6) - #define SDE_HOTPLUG_MASK (0xf << 8) -+/* CPT */ -+#define SDE_CRT_HOTPLUG_CPT (1 << 19) -+#define SDE_PORTD_HOTPLUG_CPT (1 << 23) -+#define SDE_PORTC_HOTPLUG_CPT (1 << 22) -+#define SDE_PORTB_HOTPLUG_CPT (1 << 21) - - #define SDEISR 0xc4000 - #define SDEIMR 0xc4004 -@@ -2407,6 +2553,17 @@ - #define PCH_SSC4_PARMS 0xc6210 - #define PCH_SSC4_AUX_PARMS 0xc6214 - -+#define PCH_DPLL_SEL 0xc7000 -+#define TRANSA_DPLL_ENABLE (1<<3) -+#define TRANSA_DPLLB_SEL (1<<0) -+#define TRANSA_DPLLA_SEL 0 -+#define TRANSB_DPLL_ENABLE (1<<7) -+#define TRANSB_DPLLB_SEL (1<<4) -+#define TRANSB_DPLLA_SEL (0) -+#define TRANSC_DPLL_ENABLE (1<<11) -+#define TRANSC_DPLLB_SEL (1<<8) -+#define TRANSC_DPLLA_SEL (0) -+ - /* transcoder */ - - #define TRANS_HTOTAL_A 0xe0000 -@@ -2493,6 +2650,19 @@ - #define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22) - #define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22) - #define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22) -+/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level. -+ SNB has different settings. */ -+/* SNB A-stepping */ -+#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) -+#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) -+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) -+#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) -+/* SNB B-stepping */ -+#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) -+#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) -+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) -+#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) -+#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22) - #define FDI_DP_PORT_WIDTH_X1 (0<<19) - #define FDI_DP_PORT_WIDTH_X2 (1<<19) - #define FDI_DP_PORT_WIDTH_X3 (2<<19) -@@ -2525,6 +2695,13 @@ - #define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) - #define FDI_SEL_RAWCLK (0<<4) - #define FDI_SEL_PCDCLK (1<<4) -+/* CPT */ -+#define FDI_AUTO_TRAINING (1<<10) -+#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) -+#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8) -+#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8) -+#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) -+#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) - - #define FDI_RXA_MISC 0xf0010 - #define FDI_RXB_MISC 0xf1010 -@@ -2590,12 +2767,18 @@ - #define SDVO_ENCODING (0) - #define TMDS_ENCODING (2 << 10) - #define NULL_PACKET_VSYNC_ENABLE (1 << 9) -+/* CPT */ -+#define HDMI_MODE_SELECT (1 << 9) -+#define DVI_MODE_SELECT (0) - #define SDVOB_BORDER_ENABLE (1 << 7) - #define AUDIO_ENABLE (1 << 6) - #define VSYNC_ACTIVE_HIGH (1 << 4) - #define HSYNC_ACTIVE_HIGH (1 << 3) - #define PORT_DETECTED (1 << 2) - -+/* PCH SDVOB multiplex with HDMIB */ -+#define PCH_SDVOB HDMIB -+ - #define HDMIC 0xe1150 - #define HDMID 0xe1160 - -@@ -2653,4 +2836,42 @@ - #define PCH_DPD_AUX_CH_DATA4 0xe4320 - #define PCH_DPD_AUX_CH_DATA5 0xe4324 - -+/* CPT */ -+#define PORT_TRANS_A_SEL_CPT 0 -+#define PORT_TRANS_B_SEL_CPT (1<<29) -+#define PORT_TRANS_C_SEL_CPT (2<<29) -+#define PORT_TRANS_SEL_MASK (3<<29) -+ -+#define TRANS_DP_CTL_A 0xe0300 -+#define TRANS_DP_CTL_B 0xe1300 -+#define TRANS_DP_CTL_C 0xe2300 -+#define TRANS_DP_OUTPUT_ENABLE (1<<31) -+#define TRANS_DP_PORT_SEL_B (0<<29) -+#define TRANS_DP_PORT_SEL_C (1<<29) -+#define TRANS_DP_PORT_SEL_D (2<<29) -+#define TRANS_DP_PORT_SEL_MASK (3<<29) -+#define TRANS_DP_AUDIO_ONLY (1<<26) -+#define TRANS_DP_ENH_FRAMING (1<<18) -+#define TRANS_DP_8BPC (0<<9) -+#define TRANS_DP_10BPC (1<<9) -+#define TRANS_DP_6BPC (2<<9) -+#define TRANS_DP_12BPC (3<<9) -+#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) -+#define TRANS_DP_VSYNC_ACTIVE_LOW 0 -+#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) -+#define TRANS_DP_HSYNC_ACTIVE_LOW 0 -+ -+/* SNB eDP training params */ -+/* SNB A-stepping */ -+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) -+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) -+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) -+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) -+/* SNB B-stepping */ -+#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) -+#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) -+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) -+#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) -+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) -+ - #endif /* _I915_REG_H_ */ -diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c -index ac0d1a7..60a5800 100644 ---- a/drivers/gpu/drm/i915/i915_suspend.c -+++ b/drivers/gpu/drm/i915/i915_suspend.c -@@ -600,14 +600,16 @@ void i915_save_display(struct drm_device *dev) - } - /* FIXME: save TV & SDVO state */ - -- /* FBC state */ -- if (IS_GM45(dev)) { -- dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); -- } else { -- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); -- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); -- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); -- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); -+ /* Only save FBC state on the platform that supports FBC */ -+ if (I915_HAS_FBC(dev)) { -+ if (IS_GM45(dev)) { -+ dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); -+ } else { -+ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); -+ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); -+ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); -+ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); -+ } - } - - /* VGA state */ -@@ -702,18 +704,19 @@ void i915_restore_display(struct drm_device *dev) - } - /* FIXME: restore TV & SDVO state */ - -- /* FBC info */ -- if (IS_GM45(dev)) { -- g4x_disable_fbc(dev); -- I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); -- } else { -- i8xx_disable_fbc(dev); -- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); -- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); -- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); -- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); -+ /* only restore FBC info on the platform that supports FBC*/ -+ if (I915_HAS_FBC(dev)) { -+ if (IS_GM45(dev)) { -+ g4x_disable_fbc(dev); -+ I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); -+ } else { -+ i8xx_disable_fbc(dev); -+ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); -+ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); -+ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); -+ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); -+ } - } -- - /* VGA state */ - if (IS_IRONLAKE(dev)) - I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); -diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h -index 01840d9..fab2176 100644 ---- a/drivers/gpu/drm/i915/i915_trace.h -+++ b/drivers/gpu/drm/i915/i915_trace.h -@@ -53,23 +53,6 @@ TRACE_EVENT(i915_gem_object_bind, - __entry->obj, __entry->gtt_offset) - ); - --TRACE_EVENT(i915_gem_object_clflush, -- -- TP_PROTO(struct drm_gem_object *obj), -- -- TP_ARGS(obj), -- -- TP_STRUCT__entry( -- __field(struct drm_gem_object *, obj) -- ), -- -- TP_fast_assign( -- __entry->obj = obj; -- ), -- -- TP_printk("obj=%p", __entry->obj) --); -- - TRACE_EVENT(i915_gem_object_change_domain, - - TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), -@@ -115,7 +98,7 @@ TRACE_EVENT(i915_gem_object_get_fence, - __entry->obj, __entry->fence, __entry->tiling_mode) - ); - --TRACE_EVENT(i915_gem_object_unbind, -+DECLARE_EVENT_CLASS(i915_gem_object, - - TP_PROTO(struct drm_gem_object *obj), - -@@ -132,21 +115,25 @@ TRACE_EVENT(i915_gem_object_unbind, - TP_printk("obj=%p", __entry->obj) - ); - --TRACE_EVENT(i915_gem_object_destroy, -+DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, - - TP_PROTO(struct drm_gem_object *obj), - -- TP_ARGS(obj), -+ TP_ARGS(obj) -+); - -- TP_STRUCT__entry( -- __field(struct drm_gem_object *, obj) -- ), -+DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, - -- TP_fast_assign( -- __entry->obj = obj; -- ), -+ TP_PROTO(struct drm_gem_object *obj), - -- TP_printk("obj=%p", __entry->obj) -+ TP_ARGS(obj) -+); -+ -+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, -+ -+ TP_PROTO(struct drm_gem_object *obj), -+ -+ TP_ARGS(obj) - ); - - /* batch tracing */ -@@ -197,8 +184,7 @@ TRACE_EVENT(i915_gem_request_flush, - __entry->flush_domains, __entry->invalidate_domains) - ); - -- --TRACE_EVENT(i915_gem_request_complete, -+DECLARE_EVENT_CLASS(i915_gem_request, - - TP_PROTO(struct drm_device *dev, u32 seqno), - -@@ -217,64 +203,35 @@ TRACE_EVENT(i915_gem_request_complete, - TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) - ); - --TRACE_EVENT(i915_gem_request_retire, -+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, - - TP_PROTO(struct drm_device *dev, u32 seqno), - -- TP_ARGS(dev, seqno), -- -- TP_STRUCT__entry( -- __field(u32, dev) -- __field(u32, seqno) -- ), -- -- TP_fast_assign( -- __entry->dev = dev->primary->index; -- __entry->seqno = seqno; -- ), -- -- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) -+ TP_ARGS(dev, seqno) - ); - --TRACE_EVENT(i915_gem_request_wait_begin, -+DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, - - TP_PROTO(struct drm_device *dev, u32 seqno), - -- TP_ARGS(dev, seqno), -- -- TP_STRUCT__entry( -- __field(u32, dev) -- __field(u32, seqno) -- ), -- -- TP_fast_assign( -- __entry->dev = dev->primary->index; -- __entry->seqno = seqno; -- ), -- -- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) -+ TP_ARGS(dev, seqno) - ); - --TRACE_EVENT(i915_gem_request_wait_end, -+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, - - TP_PROTO(struct drm_device *dev, u32 seqno), - -- TP_ARGS(dev, seqno), -+ TP_ARGS(dev, seqno) -+); - -- TP_STRUCT__entry( -- __field(u32, dev) -- __field(u32, seqno) -- ), -+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, - -- TP_fast_assign( -- __entry->dev = dev->primary->index; -- __entry->seqno = seqno; -- ), -+ TP_PROTO(struct drm_device *dev, u32 seqno), - -- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) -+ TP_ARGS(dev, seqno) - ); - --TRACE_EVENT(i915_ring_wait_begin, -+DECLARE_EVENT_CLASS(i915_ring, - - TP_PROTO(struct drm_device *dev), - -@@ -291,26 +248,23 @@ TRACE_EVENT(i915_ring_wait_begin, - TP_printk("dev=%u", __entry->dev) - ); - --TRACE_EVENT(i915_ring_wait_end, -+DEFINE_EVENT(i915_ring, i915_ring_wait_begin, - - TP_PROTO(struct drm_device *dev), - -- TP_ARGS(dev), -+ TP_ARGS(dev) -+); - -- TP_STRUCT__entry( -- __field(u32, dev) -- ), -+DEFINE_EVENT(i915_ring, i915_ring_wait_end, - -- TP_fast_assign( -- __entry->dev = dev->primary->index; -- ), -+ TP_PROTO(struct drm_device *dev), - -- TP_printk("dev=%u", __entry->dev) -+ TP_ARGS(dev) - ); - - #endif /* _I915_TRACE_H_ */ - - /* This part must be outside protection */ - #undef TRACE_INCLUDE_PATH --#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 -+#define TRACE_INCLUDE_PATH . - #include -diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c -index f9ba452..96f75d7 100644 ---- a/drivers/gpu/drm/i915/intel_bios.c -+++ b/drivers/gpu/drm/i915/intel_bios.c -@@ -95,6 +95,16 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, - panel_fixed_mode->clock = dvo_timing->clock * 10; - panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; - -+ if (dvo_timing->hsync_positive) -+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC; -+ else -+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC; -+ -+ if (dvo_timing->vsync_positive) -+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC; -+ else -+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; -+ - /* Some VBTs have bogus h/vtotal values */ - if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) - panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; -@@ -366,6 +376,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, - p_mapping->dvo_port = p_child->dvo_port; - p_mapping->slave_addr = p_child->slave_addr; - p_mapping->dvo_wiring = p_child->dvo_wiring; -+ p_mapping->ddc_pin = p_child->ddc_pin; - p_mapping->initialized = 1; - } else { - DRM_DEBUG_KMS("Maybe one SDVO port is shared by " -diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c -index 759c2ef..22ff384 100644 ---- a/drivers/gpu/drm/i915/intel_crt.c -+++ b/drivers/gpu/drm/i915/intel_crt.c -@@ -136,11 +136,17 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, - adpa |= ADPA_VSYNC_ACTIVE_HIGH; - - if (intel_crtc->pipe == 0) { -- adpa |= ADPA_PIPE_A_SELECT; -+ if (HAS_PCH_CPT(dev)) -+ adpa |= PORT_TRANS_A_SEL_CPT; -+ else -+ adpa |= ADPA_PIPE_A_SELECT; - if (!HAS_PCH_SPLIT(dev)) - I915_WRITE(BCLRPAT_A, 0); - } else { -- adpa |= ADPA_PIPE_B_SELECT; -+ if (HAS_PCH_CPT(dev)) -+ adpa |= PORT_TRANS_B_SEL_CPT; -+ else -+ adpa |= ADPA_PIPE_B_SELECT; - if (!HAS_PCH_SPLIT(dev)) - I915_WRITE(BCLRPAT_B, 0); - } -@@ -152,15 +158,21 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) - { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; -- u32 adpa; -+ u32 adpa, temp; - bool ret; - -- adpa = I915_READ(PCH_ADPA); -+ temp = adpa = I915_READ(PCH_ADPA); - -- adpa &= ~ADPA_CRT_HOTPLUG_MASK; -- /* disable HPD first */ -- I915_WRITE(PCH_ADPA, adpa); -- (void)I915_READ(PCH_ADPA); -+ if (HAS_PCH_CPT(dev)) { -+ /* Disable DAC before force detect */ -+ I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE); -+ (void)I915_READ(PCH_ADPA); -+ } else { -+ adpa &= ~ADPA_CRT_HOTPLUG_MASK; -+ /* disable HPD first */ -+ I915_WRITE(PCH_ADPA, adpa); -+ (void)I915_READ(PCH_ADPA); -+ } - - adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | - ADPA_CRT_HOTPLUG_WARMUP_10MS | -@@ -176,6 +188,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) - while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) - ; - -+ if (HAS_PCH_CPT(dev)) { -+ I915_WRITE(PCH_ADPA, temp); -+ (void)I915_READ(PCH_ADPA); -+ } -+ - /* Check the status to see if both blue and green are on now */ - adpa = I915_READ(PCH_ADPA); - adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; -@@ -200,7 +217,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) - { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; -- u32 hotplug_en; -+ u32 hotplug_en, orig, stat; -+ bool ret = false; - int i, tries = 0; - - if (HAS_PCH_SPLIT(dev)) -@@ -215,8 +233,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) - tries = 2; - else - tries = 1; -- hotplug_en = I915_READ(PORT_HOTPLUG_EN); -- hotplug_en &= CRT_FORCE_HOTPLUG_MASK; -+ hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); -+ hotplug_en &= CRT_HOTPLUG_MASK; - hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; - - if (IS_G4X(dev)) -@@ -238,16 +256,22 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) - } while (time_after(timeout, jiffies)); - } - -- if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) != -- CRT_HOTPLUG_MONITOR_NONE) -- return true; -+ stat = I915_READ(PORT_HOTPLUG_STAT); -+ if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE) -+ ret = true; -+ -+ /* clear the interrupt we just generated, if any */ -+ I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS); -+ -+ /* and put the bits back */ -+ I915_WRITE(PORT_HOTPLUG_EN, orig); - -- return false; -+ return ret; - } - --static bool intel_crt_detect_ddc(struct drm_connector *connector) -+static bool intel_crt_detect_ddc(struct drm_encoder *encoder) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - - /* CRT should always be at 0, but check anyway */ - if (intel_encoder->type != INTEL_OUTPUT_ANALOG) -@@ -387,8 +411,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder - static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) - { - struct drm_device *dev = connector->dev; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct drm_encoder *encoder = &intel_encoder->enc; -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct drm_crtc *crtc; - int dpms_mode; - enum drm_connector_status status; -@@ -400,18 +424,19 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto - return connector_status_disconnected; - } - -- if (intel_crt_detect_ddc(connector)) -+ if (intel_crt_detect_ddc(encoder)) - return connector_status_connected; - - /* for pre-945g platforms use load detect */ - if (encoder->crtc && encoder->crtc->enabled) { - status = intel_crt_load_detect(encoder->crtc, intel_encoder); - } else { -- crtc = intel_get_load_detect_pipe(intel_encoder, -+ crtc = intel_get_load_detect_pipe(intel_encoder, connector, - NULL, &dpms_mode); - if (crtc) { - status = intel_crt_load_detect(crtc, intel_encoder); -- intel_release_load_detect_pipe(intel_encoder, dpms_mode); -+ intel_release_load_detect_pipe(intel_encoder, -+ connector, dpms_mode); - } else - status = connector_status_unknown; - } -@@ -421,9 +446,6 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto - - static void intel_crt_destroy(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- -- intel_i2c_destroy(intel_encoder->ddc_bus); - drm_sysfs_connector_remove(connector); - drm_connector_cleanup(connector); - kfree(connector); -@@ -432,29 +454,27 @@ static void intel_crt_destroy(struct drm_connector *connector) - static int intel_crt_get_modes(struct drm_connector *connector) - { - int ret; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct i2c_adapter *ddcbus; -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); -+ struct i2c_adapter *ddc_bus; - struct drm_device *dev = connector->dev; - - -- ret = intel_ddc_get_modes(intel_encoder); -+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); - if (ret || !IS_G4X(dev)) - goto end; - -- ddcbus = intel_encoder->ddc_bus; - /* Try to probe digital port for output in DVI-I -> VGA mode. */ -- intel_encoder->ddc_bus = -- intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); -+ ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); - -- if (!intel_encoder->ddc_bus) { -- intel_encoder->ddc_bus = ddcbus; -+ if (!ddc_bus) { - dev_printk(KERN_ERR, &connector->dev->pdev->dev, - "DDC bus registration failed for CRTDDC_D.\n"); - goto end; - } - /* Try to get modes by GPIOD port */ -- ret = intel_ddc_get_modes(intel_encoder); -- intel_i2c_destroy(ddcbus); -+ ret = intel_ddc_get_modes(connector, ddc_bus); -+ intel_i2c_destroy(ddc_bus); - - end: - return ret; -@@ -491,12 +511,16 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { - static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { - .mode_valid = intel_crt_mode_valid, - .get_modes = intel_crt_get_modes, -- .best_encoder = intel_best_encoder, -+ .best_encoder = intel_attached_encoder, - }; - - static void intel_crt_enc_destroy(struct drm_encoder *encoder) - { -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); -+ -+ intel_i2c_destroy(intel_encoder->ddc_bus); - drm_encoder_cleanup(encoder); -+ kfree(intel_encoder); - } - - static const struct drm_encoder_funcs intel_crt_enc_funcs = { -@@ -507,6 +531,7 @@ void intel_crt_init(struct drm_device *dev) - { - struct drm_connector *connector; - struct intel_encoder *intel_encoder; -+ struct intel_connector *intel_connector; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 i2c_reg; - -@@ -514,14 +539,20 @@ void intel_crt_init(struct drm_device *dev) - if (!intel_encoder) - return; - -- connector = &intel_encoder->base; -- drm_connector_init(dev, &intel_encoder->base, -+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); -+ if (!intel_connector) { -+ kfree(intel_encoder); -+ return; -+ } -+ -+ connector = &intel_connector->base; -+ drm_connector_init(dev, &intel_connector->base, - &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); - - drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, - DRM_MODE_ENCODER_DAC); - -- drm_mode_connector_attach_encoder(&intel_encoder->base, -+ drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->enc); - - /* Set up the DDC bus. */ -@@ -545,7 +576,7 @@ void intel_crt_init(struct drm_device *dev) - (1 << INTEL_ANALOG_CLONE_BIT) | - (1 << INTEL_SDVO_LVDS_CLONE_BIT); - intel_encoder->crtc_mask = (1 << 0) | (1 << 1); -- connector->interlace_allowed = 0; -+ connector->interlace_allowed = 1; - connector->doublescan_allowed = 0; - - drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); -@@ -553,5 +584,10 @@ void intel_crt_init(struct drm_device *dev) - - drm_sysfs_connector_add(connector); - -+ if (I915_HAS_HOTPLUG(dev)) -+ connector->polled = DRM_CONNECTOR_POLL_HPD; -+ else -+ connector->polled = DRM_CONNECTOR_POLL_CONNECT; -+ - dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; - } -diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c -index c7502b6..d753257 100644 ---- a/drivers/gpu/drm/i915/intel_display.c -+++ b/drivers/gpu/drm/i915/intel_display.c -@@ -742,12 +742,11 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) - { - struct drm_device *dev = crtc->dev; - struct drm_mode_config *mode_config = &dev->mode_config; -- struct drm_connector *l_entry; -+ struct drm_encoder *l_entry; - -- list_for_each_entry(l_entry, &mode_config->connector_list, head) { -- if (l_entry->encoder && -- l_entry->encoder->crtc == crtc) { -- struct intel_encoder *intel_encoder = to_intel_encoder(l_entry); -+ list_for_each_entry(l_entry, &mode_config->encoder_list, head) { -+ if (l_entry && l_entry->crtc == crtc) { -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry); - if (intel_encoder->type == type) - return true; - } -@@ -755,23 +754,6 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) - return false; - } - --static struct drm_connector * --intel_pipe_get_connector (struct drm_crtc *crtc) --{ -- struct drm_device *dev = crtc->dev; -- struct drm_mode_config *mode_config = &dev->mode_config; -- struct drm_connector *l_entry, *ret = NULL; -- -- list_for_each_entry(l_entry, &mode_config->connector_list, head) { -- if (l_entry->encoder && -- l_entry->encoder->crtc == crtc) { -- ret = l_entry; -- break; -- } -- } -- return ret; --} -- - #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) - /** - * Returns whether the given set of divisors are valid for a given refclk with -@@ -1047,28 +1029,36 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) - void i8xx_disable_fbc(struct drm_device *dev) - { - struct drm_i915_private *dev_priv = dev->dev_private; -+ unsigned long timeout = jiffies + msecs_to_jiffies(1); - u32 fbc_ctl; - - if (!I915_HAS_FBC(dev)) - return; - -+ if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN)) -+ return; /* Already off, just return */ -+ - /* Disable compression */ - fbc_ctl = I915_READ(FBC_CONTROL); - fbc_ctl &= ~FBC_CTL_EN; - I915_WRITE(FBC_CONTROL, fbc_ctl); - - /* Wait for compressing bit to clear */ -- while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) -- ; /* nothing */ -+ while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) { -+ if (time_after(jiffies, timeout)) { -+ DRM_DEBUG_DRIVER("FBC idle timed out\n"); -+ break; -+ } -+ ; /* do nothing */ -+ } - - intel_wait_for_vblank(dev); - - DRM_DEBUG_KMS("disabled FBC\n"); - } - --static bool i8xx_fbc_enabled(struct drm_crtc *crtc) -+static bool i8xx_fbc_enabled(struct drm_device *dev) - { -- struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - return I915_READ(FBC_CONTROL) & FBC_CTL_EN; -@@ -1125,14 +1115,43 @@ void g4x_disable_fbc(struct drm_device *dev) - DRM_DEBUG_KMS("disabled FBC\n"); - } - --static bool g4x_fbc_enabled(struct drm_crtc *crtc) -+static bool g4x_fbc_enabled(struct drm_device *dev) - { -- struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; - } - -+bool intel_fbc_enabled(struct drm_device *dev) -+{ -+ struct drm_i915_private *dev_priv = dev->dev_private; -+ -+ if (!dev_priv->display.fbc_enabled) -+ return false; -+ -+ return dev_priv->display.fbc_enabled(dev); -+} -+ -+void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) -+{ -+ struct drm_i915_private *dev_priv = crtc->dev->dev_private; -+ -+ if (!dev_priv->display.enable_fbc) -+ return; -+ -+ dev_priv->display.enable_fbc(crtc, interval); -+} -+ -+void intel_disable_fbc(struct drm_device *dev) -+{ -+ struct drm_i915_private *dev_priv = dev->dev_private; -+ -+ if (!dev_priv->display.disable_fbc) -+ return; -+ -+ dev_priv->display.disable_fbc(dev); -+} -+ - /** - * intel_update_fbc - enable/disable FBC as needed - * @crtc: CRTC to point the compressor at -@@ -1167,9 +1186,7 @@ static void intel_update_fbc(struct drm_crtc *crtc, - if (!i915_powersave) - return; - -- if (!dev_priv->display.fbc_enabled || -- !dev_priv->display.enable_fbc || -- !dev_priv->display.disable_fbc) -+ if (!I915_HAS_FBC(dev)) - return; - - if (!crtc->fb) -@@ -1216,28 +1233,26 @@ static void intel_update_fbc(struct drm_crtc *crtc, - goto out_disable; - } - -- if (dev_priv->display.fbc_enabled(crtc)) { -+ if (intel_fbc_enabled(dev)) { - /* We can re-enable it in this case, but need to update pitch */ -- if (fb->pitch > dev_priv->cfb_pitch) -- dev_priv->display.disable_fbc(dev); -- if (obj_priv->fence_reg != dev_priv->cfb_fence) -- dev_priv->display.disable_fbc(dev); -- if (plane != dev_priv->cfb_plane) -- dev_priv->display.disable_fbc(dev); -+ if ((fb->pitch > dev_priv->cfb_pitch) || -+ (obj_priv->fence_reg != dev_priv->cfb_fence) || -+ (plane != dev_priv->cfb_plane)) -+ intel_disable_fbc(dev); - } - -- if (!dev_priv->display.fbc_enabled(crtc)) { -- /* Now try to turn it back on if possible */ -- dev_priv->display.enable_fbc(crtc, 500); -- } -+ /* Now try to turn it back on if possible */ -+ if (!intel_fbc_enabled(dev)) -+ intel_enable_fbc(crtc, 500); - - return; - - out_disable: -- DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); - /* Multiple disables should be harmless */ -- if (dev_priv->display.fbc_enabled(crtc)) -- dev_priv->display.disable_fbc(dev); -+ if (intel_fbc_enabled(dev)) { -+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); -+ intel_disable_fbc(dev); -+ } - } - - static int -@@ -1381,7 +1396,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, - Start = obj_priv->gtt_offset; - Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); - -- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); -+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", -+ Start, Offset, x, y, crtc->fb->pitch); - I915_WRITE(dspstride, crtc->fb->pitch); - if (IS_I965G(dev)) { - I915_WRITE(dspbase, Offset); -@@ -1510,6 +1526,219 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) - udelay(500); - } - -+/* The FDI link training functions for ILK/Ibexpeak. */ -+static void ironlake_fdi_link_train(struct drm_crtc *crtc) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct drm_i915_private *dev_priv = dev->dev_private; -+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); -+ int pipe = intel_crtc->pipe; -+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; -+ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; -+ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; -+ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; -+ u32 temp, tries = 0; -+ -+ /* enable CPU FDI TX and PCH FDI RX */ -+ temp = I915_READ(fdi_tx_reg); -+ temp |= FDI_TX_ENABLE; -+ temp &= ~(7 << 19); -+ temp |= (intel_crtc->fdi_lanes - 1) << 19; -+ temp &= ~FDI_LINK_TRAIN_NONE; -+ temp |= FDI_LINK_TRAIN_PATTERN_1; -+ I915_WRITE(fdi_tx_reg, temp); -+ I915_READ(fdi_tx_reg); -+ -+ temp = I915_READ(fdi_rx_reg); -+ temp &= ~FDI_LINK_TRAIN_NONE; -+ temp |= FDI_LINK_TRAIN_PATTERN_1; -+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); -+ I915_READ(fdi_rx_reg); -+ udelay(150); -+ -+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit -+ for train result */ -+ temp = I915_READ(fdi_rx_imr_reg); -+ temp &= ~FDI_RX_SYMBOL_LOCK; -+ temp &= ~FDI_RX_BIT_LOCK; -+ I915_WRITE(fdi_rx_imr_reg, temp); -+ I915_READ(fdi_rx_imr_reg); -+ udelay(150); -+ -+ for (;;) { -+ temp = I915_READ(fdi_rx_iir_reg); -+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); -+ -+ if ((temp & FDI_RX_BIT_LOCK)) { -+ DRM_DEBUG_KMS("FDI train 1 done.\n"); -+ I915_WRITE(fdi_rx_iir_reg, -+ temp | FDI_RX_BIT_LOCK); -+ break; -+ } -+ -+ tries++; -+ -+ if (tries > 5) { -+ DRM_DEBUG_KMS("FDI train 1 fail!\n"); -+ break; -+ } -+ } -+ -+ /* Train 2 */ -+ temp = I915_READ(fdi_tx_reg); -+ temp &= ~FDI_LINK_TRAIN_NONE; -+ temp |= FDI_LINK_TRAIN_PATTERN_2; -+ I915_WRITE(fdi_tx_reg, temp); -+ -+ temp = I915_READ(fdi_rx_reg); -+ temp &= ~FDI_LINK_TRAIN_NONE; -+ temp |= FDI_LINK_TRAIN_PATTERN_2; -+ I915_WRITE(fdi_rx_reg, temp); -+ udelay(150); -+ -+ tries = 0; -+ -+ for (;;) { -+ temp = I915_READ(fdi_rx_iir_reg); -+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); -+ -+ if (temp & FDI_RX_SYMBOL_LOCK) { -+ I915_WRITE(fdi_rx_iir_reg, -+ temp | FDI_RX_SYMBOL_LOCK); -+ DRM_DEBUG_KMS("FDI train 2 done.\n"); -+ break; -+ } -+ -+ tries++; -+ -+ if (tries > 5) { -+ DRM_DEBUG_KMS("FDI train 2 fail!\n"); -+ break; -+ } -+ } -+ -+ DRM_DEBUG_KMS("FDI train done\n"); -+} -+ -+static int snb_b_fdi_train_param [] = { -+ FDI_LINK_TRAIN_400MV_0DB_SNB_B, -+ FDI_LINK_TRAIN_400MV_6DB_SNB_B, -+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, -+ FDI_LINK_TRAIN_800MV_0DB_SNB_B, -+}; -+ -+/* The FDI link training functions for SNB/Cougarpoint. */ -+static void gen6_fdi_link_train(struct drm_crtc *crtc) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct drm_i915_private *dev_priv = dev->dev_private; -+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); -+ int pipe = intel_crtc->pipe; -+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; -+ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; -+ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; -+ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; -+ u32 temp, i; -+ -+ /* enable CPU FDI TX and PCH FDI RX */ -+ temp = I915_READ(fdi_tx_reg); -+ temp |= FDI_TX_ENABLE; -+ temp &= ~(7 << 19); -+ temp |= (intel_crtc->fdi_lanes - 1) << 19; -+ temp &= ~FDI_LINK_TRAIN_NONE; -+ temp |= FDI_LINK_TRAIN_PATTERN_1; -+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; -+ /* SNB-B */ -+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; -+ I915_WRITE(fdi_tx_reg, temp); -+ I915_READ(fdi_tx_reg); -+ -+ temp = I915_READ(fdi_rx_reg); -+ if (HAS_PCH_CPT(dev)) { -+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; -+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; -+ } else { -+ temp &= ~FDI_LINK_TRAIN_NONE; -+ temp |= FDI_LINK_TRAIN_PATTERN_1; -+ } -+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); -+ I915_READ(fdi_rx_reg); -+ udelay(150); -+ -+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit -+ for train result */ -+ temp = I915_READ(fdi_rx_imr_reg); -+ temp &= ~FDI_RX_SYMBOL_LOCK; -+ temp &= ~FDI_RX_BIT_LOCK; -+ I915_WRITE(fdi_rx_imr_reg, temp); -+ I915_READ(fdi_rx_imr_reg); -+ udelay(150); -+ -+ for (i = 0; i < 4; i++ ) { -+ temp = I915_READ(fdi_tx_reg); -+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; -+ temp |= snb_b_fdi_train_param[i]; -+ I915_WRITE(fdi_tx_reg, temp); -+ udelay(500); -+ -+ temp = I915_READ(fdi_rx_iir_reg); -+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); -+ -+ if (temp & FDI_RX_BIT_LOCK) { -+ I915_WRITE(fdi_rx_iir_reg, -+ temp | FDI_RX_BIT_LOCK); -+ DRM_DEBUG_KMS("FDI train 1 done.\n"); -+ break; -+ } -+ } -+ if (i == 4) -+ DRM_DEBUG_KMS("FDI train 1 fail!\n"); -+ -+ /* Train 2 */ -+ temp = I915_READ(fdi_tx_reg); -+ temp &= ~FDI_LINK_TRAIN_NONE; -+ temp |= FDI_LINK_TRAIN_PATTERN_2; -+ if (IS_GEN6(dev)) { -+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; -+ /* SNB-B */ -+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; -+ } -+ I915_WRITE(fdi_tx_reg, temp); -+ -+ temp = I915_READ(fdi_rx_reg); -+ if (HAS_PCH_CPT(dev)) { -+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; -+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; -+ } else { -+ temp &= ~FDI_LINK_TRAIN_NONE; -+ temp |= FDI_LINK_TRAIN_PATTERN_2; -+ } -+ I915_WRITE(fdi_rx_reg, temp); -+ udelay(150); -+ -+ for (i = 0; i < 4; i++ ) { -+ temp = I915_READ(fdi_tx_reg); -+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; -+ temp |= snb_b_fdi_train_param[i]; -+ I915_WRITE(fdi_tx_reg, temp); -+ udelay(500); -+ -+ temp = I915_READ(fdi_rx_iir_reg); -+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); -+ -+ if (temp & FDI_RX_SYMBOL_LOCK) { -+ I915_WRITE(fdi_rx_iir_reg, -+ temp | FDI_RX_SYMBOL_LOCK); -+ DRM_DEBUG_KMS("FDI train 2 done.\n"); -+ break; -+ } -+ } -+ if (i == 4) -+ DRM_DEBUG_KMS("FDI train 2 fail!\n"); -+ -+ DRM_DEBUG_KMS("FDI train done.\n"); -+} -+ - static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) - { - struct drm_device *dev = crtc->dev; -@@ -1523,8 +1752,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) - int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; - int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; - int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; -- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; -- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; - int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; - int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; - int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; -@@ -1541,8 +1768,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) - int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B; - int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; - int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; -+ int trans_dpll_sel = (pipe == 0) ? 0 : 1; - u32 temp; -- int tries = 5, j, n; -+ int n; - u32 pipe_bpc; - - temp = I915_READ(pipeconf_reg); -@@ -1569,12 +1797,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) - /* enable eDP PLL */ - ironlake_enable_pll_edp(crtc); - } else { -- /* enable PCH DPLL */ -- temp = I915_READ(pch_dpll_reg); -- if ((temp & DPLL_VCO_ENABLE) == 0) { -- I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); -- I915_READ(pch_dpll_reg); -- } - - /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ - temp = I915_READ(fdi_rx_reg); -@@ -1584,9 +1806,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) - */ - temp &= ~(0x7 << 16); - temp |= (pipe_bpc << 11); -- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | -- FDI_SEL_PCDCLK | -- FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ -+ temp &= ~(7 << 19); -+ temp |= (intel_crtc->fdi_lanes - 1) << 19; -+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); -+ I915_READ(fdi_rx_reg); -+ udelay(200); -+ -+ /* Switch from Rawclk to PCDclk */ -+ temp = I915_READ(fdi_rx_reg); -+ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); - I915_READ(fdi_rx_reg); - udelay(200); - -@@ -1629,91 +1857,32 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) - } - - if (!HAS_eDP) { -- /* enable CPU FDI TX and PCH FDI RX */ -- temp = I915_READ(fdi_tx_reg); -- temp |= FDI_TX_ENABLE; -- temp |= FDI_DP_PORT_WIDTH_X4; /* default */ -- temp &= ~FDI_LINK_TRAIN_NONE; -- temp |= FDI_LINK_TRAIN_PATTERN_1; -- I915_WRITE(fdi_tx_reg, temp); -- I915_READ(fdi_tx_reg); -- -- temp = I915_READ(fdi_rx_reg); -- temp &= ~FDI_LINK_TRAIN_NONE; -- temp |= FDI_LINK_TRAIN_PATTERN_1; -- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); -- I915_READ(fdi_rx_reg); -- -- udelay(150); -- -- /* Train FDI. */ -- /* umask FDI RX Interrupt symbol_lock and bit_lock bit -- for train result */ -- temp = I915_READ(fdi_rx_imr_reg); -- temp &= ~FDI_RX_SYMBOL_LOCK; -- temp &= ~FDI_RX_BIT_LOCK; -- I915_WRITE(fdi_rx_imr_reg, temp); -- I915_READ(fdi_rx_imr_reg); -- udelay(150); -+ /* For PCH output, training FDI link */ -+ if (IS_GEN6(dev)) -+ gen6_fdi_link_train(crtc); -+ else -+ ironlake_fdi_link_train(crtc); - -- temp = I915_READ(fdi_rx_iir_reg); -- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); -- -- if ((temp & FDI_RX_BIT_LOCK) == 0) { -- for (j = 0; j < tries; j++) { -- temp = I915_READ(fdi_rx_iir_reg); -- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", -- temp); -- if (temp & FDI_RX_BIT_LOCK) -- break; -- udelay(200); -- } -- if (j != tries) -- I915_WRITE(fdi_rx_iir_reg, -- temp | FDI_RX_BIT_LOCK); -- else -- DRM_DEBUG_KMS("train 1 fail\n"); -- } else { -- I915_WRITE(fdi_rx_iir_reg, -- temp | FDI_RX_BIT_LOCK); -- DRM_DEBUG_KMS("train 1 ok 2!\n"); -+ /* enable PCH DPLL */ -+ temp = I915_READ(pch_dpll_reg); -+ if ((temp & DPLL_VCO_ENABLE) == 0) { -+ I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); -+ I915_READ(pch_dpll_reg); - } -- temp = I915_READ(fdi_tx_reg); -- temp &= ~FDI_LINK_TRAIN_NONE; -- temp |= FDI_LINK_TRAIN_PATTERN_2; -- I915_WRITE(fdi_tx_reg, temp); -- -- temp = I915_READ(fdi_rx_reg); -- temp &= ~FDI_LINK_TRAIN_NONE; -- temp |= FDI_LINK_TRAIN_PATTERN_2; -- I915_WRITE(fdi_rx_reg, temp); -- -- udelay(150); -+ udelay(200); - -- temp = I915_READ(fdi_rx_iir_reg); -- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); -- -- if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { -- for (j = 0; j < tries; j++) { -- temp = I915_READ(fdi_rx_iir_reg); -- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", -- temp); -- if (temp & FDI_RX_SYMBOL_LOCK) -- break; -- udelay(200); -- } -- if (j != tries) { -- I915_WRITE(fdi_rx_iir_reg, -- temp | FDI_RX_SYMBOL_LOCK); -- DRM_DEBUG_KMS("train 2 ok 1!\n"); -- } else -- DRM_DEBUG_KMS("train 2 fail\n"); -- } else { -- I915_WRITE(fdi_rx_iir_reg, -- temp | FDI_RX_SYMBOL_LOCK); -- DRM_DEBUG_KMS("train 2 ok 2!\n"); -+ if (HAS_PCH_CPT(dev)) { -+ /* Be sure PCH DPLL SEL is set */ -+ temp = I915_READ(PCH_DPLL_SEL); -+ if (trans_dpll_sel == 0 && -+ (temp & TRANSA_DPLL_ENABLE) == 0) -+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); -+ else if (trans_dpll_sel == 1 && -+ (temp & TRANSB_DPLL_ENABLE) == 0) -+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); -+ I915_WRITE(PCH_DPLL_SEL, temp); -+ I915_READ(PCH_DPLL_SEL); - } -- DRM_DEBUG_KMS("train done\n"); - - /* set transcoder timing */ - I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); -@@ -1724,6 +1893,60 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) - I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); - I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); - -+ /* enable normal train */ -+ temp = I915_READ(fdi_tx_reg); -+ temp &= ~FDI_LINK_TRAIN_NONE; -+ I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | -+ FDI_TX_ENHANCE_FRAME_ENABLE); -+ I915_READ(fdi_tx_reg); -+ -+ temp = I915_READ(fdi_rx_reg); -+ if (HAS_PCH_CPT(dev)) { -+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; -+ temp |= FDI_LINK_TRAIN_NORMAL_CPT; -+ } else { -+ temp &= ~FDI_LINK_TRAIN_NONE; -+ temp |= FDI_LINK_TRAIN_NONE; -+ } -+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); -+ I915_READ(fdi_rx_reg); -+ -+ /* wait one idle pattern time */ -+ udelay(100); -+ -+ /* For PCH DP, enable TRANS_DP_CTL */ -+ if (HAS_PCH_CPT(dev) && -+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { -+ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; -+ int reg; -+ -+ reg = I915_READ(trans_dp_ctl); -+ reg &= ~TRANS_DP_PORT_SEL_MASK; -+ reg = TRANS_DP_OUTPUT_ENABLE | -+ TRANS_DP_ENH_FRAMING | -+ TRANS_DP_VSYNC_ACTIVE_HIGH | -+ TRANS_DP_HSYNC_ACTIVE_HIGH; -+ -+ switch (intel_trans_dp_port_sel(crtc)) { -+ case PCH_DP_B: -+ reg |= TRANS_DP_PORT_SEL_B; -+ break; -+ case PCH_DP_C: -+ reg |= TRANS_DP_PORT_SEL_C; -+ break; -+ case PCH_DP_D: -+ reg |= TRANS_DP_PORT_SEL_D; -+ break; -+ default: -+ DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); -+ reg |= TRANS_DP_PORT_SEL_B; -+ break; -+ } -+ -+ I915_WRITE(trans_dp_ctl, reg); -+ POSTING_READ(trans_dp_ctl); -+ } -+ - /* enable PCH transcoder */ - temp = I915_READ(transconf_reg); - /* -@@ -1738,23 +1961,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) - while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) - ; - -- /* enable normal */ -- -- temp = I915_READ(fdi_tx_reg); -- temp &= ~FDI_LINK_TRAIN_NONE; -- I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | -- FDI_TX_ENHANCE_FRAME_ENABLE); -- I915_READ(fdi_tx_reg); -- -- temp = I915_READ(fdi_rx_reg); -- temp &= ~FDI_LINK_TRAIN_NONE; -- I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | -- FDI_RX_ENHANCE_FRAME_ENABLE); -- I915_READ(fdi_rx_reg); -- -- /* wait one idle pattern time */ -- udelay(100); -- - } - - intel_crtc_load_lut(crtc); -@@ -1805,6 +2011,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) - I915_READ(pf_ctl_reg); - } - I915_WRITE(pf_win_size, 0); -+ POSTING_READ(pf_win_size); -+ - - /* disable CPU FDI tx and PCH FDI rx */ - temp = I915_READ(fdi_tx_reg); -@@ -1825,11 +2033,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(fdi_tx_reg, temp); -+ POSTING_READ(fdi_tx_reg); - - temp = I915_READ(fdi_rx_reg); -- temp &= ~FDI_LINK_TRAIN_NONE; -- temp |= FDI_LINK_TRAIN_PATTERN_1; -+ if (HAS_PCH_CPT(dev)) { -+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; -+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; -+ } else { -+ temp &= ~FDI_LINK_TRAIN_NONE; -+ temp |= FDI_LINK_TRAIN_PATTERN_1; -+ } - I915_WRITE(fdi_rx_reg, temp); -+ POSTING_READ(fdi_rx_reg); - - udelay(100); - -@@ -1859,6 +2074,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) - } - } - } -+ - temp = I915_READ(transconf_reg); - /* BPC in transcoder is consistent with that in pipeconf */ - temp &= ~PIPE_BPC_MASK; -@@ -1867,35 +2083,53 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) - I915_READ(transconf_reg); - udelay(100); - -+ if (HAS_PCH_CPT(dev)) { -+ /* disable TRANS_DP_CTL */ -+ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; -+ int reg; -+ -+ reg = I915_READ(trans_dp_ctl); -+ reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); -+ I915_WRITE(trans_dp_ctl, reg); -+ POSTING_READ(trans_dp_ctl); -+ -+ /* disable DPLL_SEL */ -+ temp = I915_READ(PCH_DPLL_SEL); -+ if (trans_dpll_sel == 0) -+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); -+ else -+ temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); -+ I915_WRITE(PCH_DPLL_SEL, temp); -+ I915_READ(PCH_DPLL_SEL); -+ -+ } -+ - /* disable PCH DPLL */ - temp = I915_READ(pch_dpll_reg); -- if ((temp & DPLL_VCO_ENABLE) != 0) { -- I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); -- I915_READ(pch_dpll_reg); -- } -+ I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); -+ I915_READ(pch_dpll_reg); - - if (HAS_eDP) { - ironlake_disable_pll_edp(crtc); - } - -+ /* Switch from PCDclk to Rawclk */ - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_SEL_PCDCLK; - I915_WRITE(fdi_rx_reg, temp); - I915_READ(fdi_rx_reg); - -+ /* Disable CPU FDI TX PLL */ -+ temp = I915_READ(fdi_tx_reg); -+ I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); -+ I915_READ(fdi_tx_reg); -+ udelay(100); -+ - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_RX_PLL_ENABLE; - I915_WRITE(fdi_rx_reg, temp); - I915_READ(fdi_rx_reg); - -- /* Disable CPU FDI TX PLL */ -- temp = I915_READ(fdi_tx_reg); -- if ((temp & FDI_TX_PLL_ENABLE) != 0) { -- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); -- I915_READ(fdi_tx_reg); -- udelay(100); -- } -- - /* Wait for the clocks to turn off. */ - udelay(100); - break; -@@ -2122,6 +2356,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, - if (mode->clock * 3 > 27000 * 4) - return MODE_CLOCK_HIGH; - } -+ -+ drm_mode_set_crtcinfo(adjusted_mode, 0); - return true; - } - -@@ -2331,6 +2567,30 @@ static struct intel_watermark_params i830_wm_info = { - I830_FIFO_LINE_SIZE - }; - -+static struct intel_watermark_params ironlake_display_wm_info = { -+ ILK_DISPLAY_FIFO, -+ ILK_DISPLAY_MAXWM, -+ ILK_DISPLAY_DFTWM, -+ 2, -+ ILK_FIFO_LINE_SIZE -+}; -+ -+static struct intel_watermark_params ironlake_display_srwm_info = { -+ ILK_DISPLAY_SR_FIFO, -+ ILK_DISPLAY_MAX_SRWM, -+ ILK_DISPLAY_DFT_SRWM, -+ 2, -+ ILK_FIFO_LINE_SIZE -+}; -+ -+static struct intel_watermark_params ironlake_cursor_srwm_info = { -+ ILK_CURSOR_SR_FIFO, -+ ILK_CURSOR_MAX_SRWM, -+ ILK_CURSOR_DFT_SRWM, -+ 2, -+ ILK_FIFO_LINE_SIZE -+}; -+ - /** - * intel_calculate_wm - calculate watermark level - * @clock_in_khz: pixel clock -@@ -2382,6 +2642,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, - - struct cxsr_latency { - int is_desktop; -+ int is_ddr3; - unsigned long fsb_freq; - unsigned long mem_freq; - unsigned long display_sr; -@@ -2391,33 +2652,45 @@ struct cxsr_latency { - }; - - static struct cxsr_latency cxsr_latency_table[] = { -- {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ -- {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ -- {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ -- -- {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ -- {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ -- {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ -- -- {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ -- {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ -- {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ -- -- {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ -- {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ -- {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ -- -- {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ -- {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ -- {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ -- -- {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ -- {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ -- {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ -+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ -+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ -+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ -+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ -+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ -+ -+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ -+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ -+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ -+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ -+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ -+ -+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ -+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ -+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ -+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ -+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ -+ -+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ -+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ -+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ -+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ -+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ -+ -+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ -+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ -+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ -+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ -+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ -+ -+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ -+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ -+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ -+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ -+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ - }; - --static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, -- int mem) -+static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3, -+ int fsb, int mem) - { - int i; - struct cxsr_latency *latency; -@@ -2428,6 +2701,7 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, - for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { - latency = &cxsr_latency_table[i]; - if (is_desktop == latency->is_desktop && -+ is_ddr3 == latency->is_ddr3 && - fsb == latency->fsb_freq && mem == latency->mem_freq) - return latency; - } -@@ -2449,66 +2723,6 @@ static void pineview_disable_cxsr(struct drm_device *dev) - DRM_INFO("Big FIFO is disabled\n"); - } - --static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock, -- int pixel_size) --{ -- struct drm_i915_private *dev_priv = dev->dev_private; -- u32 reg; -- unsigned long wm; -- struct cxsr_latency *latency; -- -- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq, -- dev_priv->mem_freq); -- if (!latency) { -- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); -- pineview_disable_cxsr(dev); -- return; -- } -- -- /* Display SR */ -- wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size, -- latency->display_sr); -- reg = I915_READ(DSPFW1); -- reg &= 0x7fffff; -- reg |= wm << 23; -- I915_WRITE(DSPFW1, reg); -- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); -- -- /* cursor SR */ -- wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size, -- latency->cursor_sr); -- reg = I915_READ(DSPFW3); -- reg &= ~(0x3f << 24); -- reg |= (wm & 0x3f) << 24; -- I915_WRITE(DSPFW3, reg); -- -- /* Display HPLL off SR */ -- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, -- latency->display_hpll_disable, I915_FIFO_LINE_SIZE); -- reg = I915_READ(DSPFW3); -- reg &= 0xfffffe00; -- reg |= wm & 0x1ff; -- I915_WRITE(DSPFW3, reg); -- -- /* cursor HPLL off SR */ -- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size, -- latency->cursor_hpll_disable); -- reg = I915_READ(DSPFW3); -- reg &= ~(0x3f << 16); -- reg |= (wm & 0x3f) << 16; -- I915_WRITE(DSPFW3, reg); -- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); -- -- /* activate cxsr */ -- reg = I915_READ(DSPFW3); -- reg |= PINEVIEW_SELF_REFRESH_EN; -- I915_WRITE(DSPFW3, reg); -- -- DRM_INFO("Big FIFO is enabled\n"); -- -- return; --} -- - /* - * Latency for FIFO fetches is dependent on several factors: - * - memory configuration (speed, channels) -@@ -2593,6 +2807,71 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) - return size; - } - -+static void pineview_update_wm(struct drm_device *dev, int planea_clock, -+ int planeb_clock, int sr_hdisplay, int pixel_size) -+{ -+ struct drm_i915_private *dev_priv = dev->dev_private; -+ u32 reg; -+ unsigned long wm; -+ struct cxsr_latency *latency; -+ int sr_clock; -+ -+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, -+ dev_priv->fsb_freq, dev_priv->mem_freq); -+ if (!latency) { -+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); -+ pineview_disable_cxsr(dev); -+ return; -+ } -+ -+ if (!planea_clock || !planeb_clock) { -+ sr_clock = planea_clock ? planea_clock : planeb_clock; -+ -+ /* Display SR */ -+ wm = intel_calculate_wm(sr_clock, &pineview_display_wm, -+ pixel_size, latency->display_sr); -+ reg = I915_READ(DSPFW1); -+ reg &= ~DSPFW_SR_MASK; -+ reg |= wm << DSPFW_SR_SHIFT; -+ I915_WRITE(DSPFW1, reg); -+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); -+ -+ /* cursor SR */ -+ wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm, -+ pixel_size, latency->cursor_sr); -+ reg = I915_READ(DSPFW3); -+ reg &= ~DSPFW_CURSOR_SR_MASK; -+ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; -+ I915_WRITE(DSPFW3, reg); -+ -+ /* Display HPLL off SR */ -+ wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm, -+ pixel_size, latency->display_hpll_disable); -+ reg = I915_READ(DSPFW3); -+ reg &= ~DSPFW_HPLL_SR_MASK; -+ reg |= wm & DSPFW_HPLL_SR_MASK; -+ I915_WRITE(DSPFW3, reg); -+ -+ /* cursor HPLL off SR */ -+ wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm, -+ pixel_size, latency->cursor_hpll_disable); -+ reg = I915_READ(DSPFW3); -+ reg &= ~DSPFW_HPLL_CURSOR_MASK; -+ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; -+ I915_WRITE(DSPFW3, reg); -+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); -+ -+ /* activate cxsr */ -+ reg = I915_READ(DSPFW3); -+ reg |= PINEVIEW_SELF_REFRESH_EN; -+ I915_WRITE(DSPFW3, reg); -+ DRM_DEBUG_KMS("Self-refresh is enabled\n"); -+ } else { -+ pineview_disable_cxsr(dev); -+ DRM_DEBUG_KMS("Self-refresh is disabled\n"); -+ } -+} -+ - static void g4x_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int pixel_size) - { -@@ -2813,6 +3092,108 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, - I915_WRITE(FW_BLC, fwater_lo); - } - -+#define ILK_LP0_PLANE_LATENCY 700 -+ -+static void ironlake_update_wm(struct drm_device *dev, int planea_clock, -+ int planeb_clock, int sr_hdisplay, int pixel_size) -+{ -+ struct drm_i915_private *dev_priv = dev->dev_private; -+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm; -+ int sr_wm, cursor_wm; -+ unsigned long line_time_us; -+ int sr_clock, entries_required; -+ u32 reg_value; -+ -+ /* Calculate and update the watermark for plane A */ -+ if (planea_clock) { -+ entries_required = ((planea_clock / 1000) * pixel_size * -+ ILK_LP0_PLANE_LATENCY) / 1000; -+ entries_required = DIV_ROUND_UP(entries_required, -+ ironlake_display_wm_info.cacheline_size); -+ planea_wm = entries_required + -+ ironlake_display_wm_info.guard_size; -+ -+ if (planea_wm > (int)ironlake_display_wm_info.max_wm) -+ planea_wm = ironlake_display_wm_info.max_wm; -+ -+ cursora_wm = 16; -+ reg_value = I915_READ(WM0_PIPEA_ILK); -+ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); -+ reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | -+ (cursora_wm & WM0_PIPE_CURSOR_MASK); -+ I915_WRITE(WM0_PIPEA_ILK, reg_value); -+ DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, " -+ "cursor: %d\n", planea_wm, cursora_wm); -+ } -+ /* Calculate and update the watermark for plane B */ -+ if (planeb_clock) { -+ entries_required = ((planeb_clock / 1000) * pixel_size * -+ ILK_LP0_PLANE_LATENCY) / 1000; -+ entries_required = DIV_ROUND_UP(entries_required, -+ ironlake_display_wm_info.cacheline_size); -+ planeb_wm = entries_required + -+ ironlake_display_wm_info.guard_size; -+ -+ if (planeb_wm > (int)ironlake_display_wm_info.max_wm) -+ planeb_wm = ironlake_display_wm_info.max_wm; -+ -+ cursorb_wm = 16; -+ reg_value = I915_READ(WM0_PIPEB_ILK); -+ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); -+ reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | -+ (cursorb_wm & WM0_PIPE_CURSOR_MASK); -+ I915_WRITE(WM0_PIPEB_ILK, reg_value); -+ DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, " -+ "cursor: %d\n", planeb_wm, cursorb_wm); -+ } -+ -+ /* -+ * Calculate and update the self-refresh watermark only when one -+ * display plane is used. -+ */ -+ if (!planea_clock || !planeb_clock) { -+ int line_count; -+ /* Read the self-refresh latency. The unit is 0.5us */ -+ int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; -+ -+ sr_clock = planea_clock ? planea_clock : planeb_clock; -+ line_time_us = ((sr_hdisplay * 1000) / sr_clock); -+ -+ /* Use ns/us then divide to preserve precision */ -+ line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) -+ / 1000; -+ -+ /* calculate the self-refresh watermark for display plane */ -+ entries_required = line_count * sr_hdisplay * pixel_size; -+ entries_required = DIV_ROUND_UP(entries_required, -+ ironlake_display_srwm_info.cacheline_size); -+ sr_wm = entries_required + -+ ironlake_display_srwm_info.guard_size; -+ -+ /* calculate the self-refresh watermark for display cursor */ -+ entries_required = line_count * pixel_size * 64; -+ entries_required = DIV_ROUND_UP(entries_required, -+ ironlake_cursor_srwm_info.cacheline_size); -+ cursor_wm = entries_required + -+ ironlake_cursor_srwm_info.guard_size; -+ -+ /* configure watermark and enable self-refresh */ -+ reg_value = I915_READ(WM1_LP_ILK); -+ reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | -+ WM1_LP_CURSOR_MASK); -+ reg_value |= WM1_LP_SR_EN | -+ (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | -+ (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; -+ -+ I915_WRITE(WM1_LP_ILK, reg_value); -+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d " -+ "cursor %d\n", sr_wm, cursor_wm); -+ -+ } else { -+ /* Turn off self refresh if both pipes are enabled */ -+ I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); -+ } -+} - /** - * intel_update_watermarks - update FIFO watermark values based on current modes - * -@@ -2882,12 +3263,6 @@ static void intel_update_watermarks(struct drm_device *dev) - if (enabled <= 0) - return; - -- /* Single plane configs can enable self refresh */ -- if (enabled == 1 && IS_PINEVIEW(dev)) -- pineview_enable_cxsr(dev, sr_clock, pixel_size); -- else if (IS_PINEVIEW(dev)) -- pineview_disable_cxsr(dev); -- - dev_priv->display.update_wm(dev, planea_clock, planeb_clock, - sr_hdisplay, pixel_size); - } -@@ -2924,7 +3299,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; - bool is_edp = false; - struct drm_mode_config *mode_config = &dev->mode_config; -- struct drm_connector *connector; -+ struct drm_encoder *encoder; -+ struct intel_encoder *intel_encoder = NULL; - const intel_limit_t *limit; - int ret; - struct fdi_m_n m_n = {0}; -@@ -2935,6 +3311,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; - int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; - int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; -+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; -+ int trans_dpll_sel = (pipe == 0) ? 0 : 1; - int lvds_reg = LVDS; - u32 temp; - int sdvo_pixel_multiply; -@@ -2942,12 +3320,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - - drm_vblank_pre_modeset(dev, pipe); - -- list_for_each_entry(connector, &mode_config->connector_list, head) { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ list_for_each_entry(encoder, &mode_config->encoder_list, head) { - -- if (!connector->encoder || connector->encoder->crtc != crtc) -+ if (!encoder || encoder->crtc != crtc) - continue; - -+ intel_encoder = enc_to_intel_encoder(encoder); -+ - switch (intel_encoder->type) { - case INTEL_OUTPUT_LVDS: - is_lvds = true; -@@ -3043,14 +3422,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - - /* FDI link */ - if (HAS_PCH_SPLIT(dev)) { -- int lane, link_bw, bpp; -+ int lane = 0, link_bw, bpp; - /* eDP doesn't require FDI link, so just set DP M/N - according to current link config */ - if (is_edp) { -- struct drm_connector *edp; - target_clock = mode->clock; -- edp = intel_pipe_get_connector(crtc); -- intel_edp_link_config(to_intel_encoder(edp), -+ intel_edp_link_config(intel_encoder, - &lane, &link_bw); - } else { - /* DP over FDI requires target mode clock -@@ -3059,7 +3436,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - target_clock = mode->clock; - else - target_clock = adjusted_mode->clock; -- lane = 4; - link_bw = 270000; - } - -@@ -3111,6 +3487,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - bpp = 24; - } - -+ if (!lane) { -+ /* -+ * Account for spread spectrum to avoid -+ * oversubscribing the link. Max center spread -+ * is 2.5%; use 5% for safety's sake. -+ */ -+ u32 bps = target_clock * bpp * 21 / 20; -+ lane = bps / (link_bw * 8) + 1; -+ } -+ -+ intel_crtc->fdi_lanes = lane; -+ - ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); - } - -@@ -3292,6 +3680,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - udelay(150); - } - -+ /* enable transcoder DPLL */ -+ if (HAS_PCH_CPT(dev)) { -+ temp = I915_READ(PCH_DPLL_SEL); -+ if (trans_dpll_sel == 0) -+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); -+ else -+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); -+ I915_WRITE(PCH_DPLL_SEL, temp); -+ I915_READ(PCH_DPLL_SEL); -+ udelay(150); -+ } -+ - /* The LVDS pin pair needs to be on before the DPLLs are enabled. - * This is an exception to the general rule that mode_set doesn't turn - * things on. -@@ -3303,7 +3703,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - lvds_reg = PCH_LVDS; - - lvds = I915_READ(lvds_reg); -- lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; -+ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; -+ if (pipe == 1) { -+ if (HAS_PCH_CPT(dev)) -+ lvds |= PORT_TRANS_B_SEL_CPT; -+ else -+ lvds |= LVDS_PIPEB_SELECT; -+ } else { -+ if (HAS_PCH_CPT(dev)) -+ lvds &= ~PORT_TRANS_SEL_MASK; -+ else -+ lvds &= ~LVDS_PIPEB_SELECT; -+ } - /* set the corresponsding LVDS_BORDER bit */ - lvds |= dev_priv->lvds_border_bits; - /* Set the B0-B3 data pairs corresponding to whether we're going to -@@ -3321,14 +3732,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - /* set the dithering flag */ - if (IS_I965G(dev)) { - if (dev_priv->lvds_dither) { -- if (HAS_PCH_SPLIT(dev)) -+ if (HAS_PCH_SPLIT(dev)) { - pipeconf |= PIPE_ENABLE_DITHER; -- else -+ pipeconf |= PIPE_DITHER_TYPE_ST01; -+ } else - lvds |= LVDS_ENABLE_DITHER; - } else { -- if (HAS_PCH_SPLIT(dev)) -+ if (HAS_PCH_SPLIT(dev)) { - pipeconf &= ~PIPE_ENABLE_DITHER; -- else -+ pipeconf &= ~PIPE_DITHER_TYPE_MASK; -+ } else - lvds &= ~LVDS_ENABLE_DITHER; - } - } -@@ -3337,6 +3750,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - } - if (is_dp) - intel_dp_set_m_n(crtc, mode, adjusted_mode); -+ else if (HAS_PCH_SPLIT(dev)) { -+ /* For non-DP output, clear any trans DP clock recovery setting.*/ -+ if (pipe == 0) { -+ I915_WRITE(TRANSA_DATA_M1, 0); -+ I915_WRITE(TRANSA_DATA_N1, 0); -+ I915_WRITE(TRANSA_DP_LINK_M1, 0); -+ I915_WRITE(TRANSA_DP_LINK_N1, 0); -+ } else { -+ I915_WRITE(TRANSB_DATA_M1, 0); -+ I915_WRITE(TRANSB_DATA_N1, 0); -+ I915_WRITE(TRANSB_DP_LINK_M1, 0); -+ I915_WRITE(TRANSB_DP_LINK_N1, 0); -+ } -+ } - - if (!is_edp) { - I915_WRITE(fp_reg, fp); -@@ -3377,6 +3804,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - } - } - -+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { -+ pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; -+ /* the chip adds 2 halflines automatically */ -+ adjusted_mode->crtc_vdisplay -= 1; -+ adjusted_mode->crtc_vtotal -= 1; -+ adjusted_mode->crtc_vblank_start -= 1; -+ adjusted_mode->crtc_vblank_end -= 1; -+ adjusted_mode->crtc_vsync_end -= 1; -+ adjusted_mode->crtc_vsync_start -= 1; -+ } else -+ pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ -+ - I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | - ((adjusted_mode->crtc_htotal - 1) << 16)); - I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | -@@ -3411,6 +3850,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, - /* enable FDI RX PLL too */ - temp = I915_READ(fdi_rx_reg); - I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); -+ I915_READ(fdi_rx_reg); -+ udelay(200); -+ -+ /* enable FDI TX PLL too */ -+ temp = I915_READ(fdi_tx_reg); -+ I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); -+ I915_READ(fdi_tx_reg); -+ -+ /* enable FDI RX PCDCLK */ -+ temp = I915_READ(fdi_rx_reg); -+ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); -+ I915_READ(fdi_rx_reg); - udelay(200); - } - } -@@ -3527,6 +3978,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, - DRM_ERROR("failed to pin cursor bo\n"); - goto fail_locked; - } -+ -+ ret = i915_gem_object_set_to_gtt_domain(bo, 0); -+ if (ret) { -+ DRM_ERROR("failed to move cursor bo into the GTT\n"); -+ goto fail_unpin; -+ } -+ - addr = obj_priv->gtt_offset; - } else { - ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); -@@ -3570,6 +4028,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, - intel_crtc->cursor_bo = bo; - - return 0; -+fail_unpin: -+ i915_gem_object_unpin(bo); - fail_locked: - mutex_unlock(&dev->struct_mutex); - fail: -@@ -3671,6 +4131,7 @@ static struct drm_display_mode load_detect_mode = { - }; - - struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, -+ struct drm_connector *connector, - struct drm_display_mode *mode, - int *dpms_mode) - { -@@ -3729,7 +4190,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, - } - - encoder->crtc = crtc; -- intel_encoder->base.encoder = encoder; -+ connector->encoder = encoder; - intel_encoder->load_detect_temp = true; - - intel_crtc = to_intel_crtc(crtc); -@@ -3755,7 +4216,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, - return crtc; - } - --void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode) -+void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, -+ struct drm_connector *connector, int dpms_mode) - { - struct drm_encoder *encoder = &intel_encoder->enc; - struct drm_device *dev = encoder->dev; -@@ -3765,7 +4227,7 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpm - - if (intel_encoder->load_detect_temp) { - encoder->crtc = NULL; -- intel_encoder->base.encoder = NULL; -+ connector->encoder = NULL; - intel_encoder->load_detect_temp = false; - crtc->enabled = drm_helper_crtc_in_use(crtc); - drm_helper_disable_unused_functions(dev); -@@ -4027,6 +4489,8 @@ static void intel_idle_update(struct work_struct *work) - - mutex_lock(&dev->struct_mutex); - -+ i915_update_gfx_val(dev_priv); -+ - if (IS_I945G(dev) || IS_I945GM(dev)) { - DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); -@@ -4155,12 +4619,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) - spin_lock_irqsave(&dev->event_lock, flags); - work = intel_crtc->unpin_work; - if (work == NULL || !work->pending) { -- if (work && !work->pending) { -- obj_priv = to_intel_bo(work->pending_flip_obj); -- DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", -- obj_priv, -- atomic_read(&obj_priv->pending_flip)); -- } - spin_unlock_irqrestore(&dev->event_lock, flags); - return; - } -@@ -4220,14 +4678,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, - int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; - int ret, pipesrc; - u32 flip_mask; -- RING_LOCALS; - - work = kzalloc(sizeof *work, GFP_KERNEL); - if (work == NULL) - return -ENOMEM; - -- mutex_lock(&dev->struct_mutex); -- - work->event = event; - work->dev = crtc->dev; - intel_fb = to_intel_framebuffer(crtc->fb); -@@ -4237,10 +4692,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, - /* We borrow the event spin lock for protecting unpin_work */ - spin_lock_irqsave(&dev->event_lock, flags); - if (intel_crtc->unpin_work) { -- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); - spin_unlock_irqrestore(&dev->event_lock, flags); - kfree(work); -- mutex_unlock(&dev->struct_mutex); -+ -+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); - return -EBUSY; - } - intel_crtc->unpin_work = work; -@@ -4249,13 +4704,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, - intel_fb = to_intel_framebuffer(fb); - obj = intel_fb->obj; - -+ mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, obj); - if (ret != 0) { -- DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", -- to_intel_bo(obj)); -- kfree(work); -- intel_crtc->unpin_work = NULL; - mutex_unlock(&dev->struct_mutex); -+ -+ spin_lock_irqsave(&dev->event_lock, flags); -+ intel_crtc->unpin_work = NULL; -+ spin_unlock_irqrestore(&dev->event_lock, flags); -+ -+ kfree(work); -+ -+ DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", -+ to_intel_bo(obj)); - return ret; - } - -@@ -4392,14 +4853,14 @@ struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) - return crtc; - } - --static int intel_connector_clones(struct drm_device *dev, int type_mask) -+static int intel_encoder_clones(struct drm_device *dev, int type_mask) - { - int index_mask = 0; -- struct drm_connector *connector; -+ struct drm_encoder *encoder; - int entry = 0; - -- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - if (type_mask & intel_encoder->clone_mask) - index_mask |= (1 << entry); - entry++; -@@ -4411,7 +4872,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask) - static void intel_setup_outputs(struct drm_device *dev) - { - struct drm_i915_private *dev_priv = dev->dev_private; -- struct drm_connector *connector; -+ struct drm_encoder *encoder; - - intel_crt_init(dev); - -@@ -4426,9 +4887,8 @@ static void intel_setup_outputs(struct drm_device *dev) - intel_dp_init(dev, DP_A); - - if (I915_READ(HDMIB) & PORT_DETECTED) { -- /* check SDVOB */ -- /* found = intel_sdvo_init(dev, HDMIB); */ -- found = 0; -+ /* PCH SDVOB multiplex with HDMIB */ -+ found = intel_sdvo_init(dev, PCH_SDVOB); - if (!found) - intel_hdmi_init(dev, HDMIB); - if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) -@@ -4494,12 +4954,11 @@ static void intel_setup_outputs(struct drm_device *dev) - if (SUPPORTS_TV(dev)) - intel_tv_init(dev); - -- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct drm_encoder *encoder = &intel_encoder->enc; -+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - - encoder->possible_crtcs = intel_encoder->crtc_mask; -- encoder->possible_clones = intel_connector_clones(dev, -+ encoder->possible_clones = intel_encoder_clones(dev, - intel_encoder->clone_mask); - } - } -@@ -4507,10 +4966,6 @@ static void intel_setup_outputs(struct drm_device *dev) - static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) - { - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); -- struct drm_device *dev = fb->dev; -- -- if (fb->fbdev) -- intelfb_remove(dev, fb); - - drm_framebuffer_cleanup(fb); - drm_gem_object_unreference_unlocked(intel_fb->obj); -@@ -4533,18 +4988,13 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { - .create_handle = intel_user_framebuffer_create_handle, - }; - --int intel_framebuffer_create(struct drm_device *dev, -- struct drm_mode_fb_cmd *mode_cmd, -- struct drm_framebuffer **fb, -- struct drm_gem_object *obj) -+int intel_framebuffer_init(struct drm_device *dev, -+ struct intel_framebuffer *intel_fb, -+ struct drm_mode_fb_cmd *mode_cmd, -+ struct drm_gem_object *obj) - { -- struct intel_framebuffer *intel_fb; - int ret; - -- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); -- if (!intel_fb) -- return -ENOMEM; -- - ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); - if (ret) { - DRM_ERROR("framebuffer init failed %d\n", ret); -@@ -4552,40 +5002,41 @@ int intel_framebuffer_create(struct drm_device *dev, - } - - drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); -- - intel_fb->obj = obj; -- -- *fb = &intel_fb->base; -- - return 0; - } - -- - static struct drm_framebuffer * - intel_user_framebuffer_create(struct drm_device *dev, - struct drm_file *filp, - struct drm_mode_fb_cmd *mode_cmd) - { - struct drm_gem_object *obj; -- struct drm_framebuffer *fb; -+ struct intel_framebuffer *intel_fb; - int ret; - - obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); - if (!obj) - return NULL; - -- ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); -+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); -+ if (!intel_fb) -+ return NULL; -+ -+ ret = intel_framebuffer_init(dev, intel_fb, -+ mode_cmd, obj); - if (ret) { - drm_gem_object_unreference_unlocked(obj); -+ kfree(intel_fb); - return NULL; - } - -- return fb; -+ return &intel_fb->base; - } - - static const struct drm_mode_config_funcs intel_mode_funcs = { - .fb_create = intel_user_framebuffer_create, -- .fb_changed = intelfb_probe, -+ .output_poll_changed = intel_fb_output_poll_changed, - }; - - static struct drm_gem_object * -@@ -4594,7 +5045,7 @@ intel_alloc_power_context(struct drm_device *dev) - struct drm_gem_object *pwrctx; - int ret; - -- pwrctx = drm_gem_object_alloc(dev, 4096); -+ pwrctx = i915_gem_alloc_object(dev, 4096); - if (!pwrctx) { - DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); - return NULL; -@@ -4624,10 +5075,32 @@ err_unref: - return NULL; - } - -+bool ironlake_set_drps(struct drm_device *dev, u8 val) -+{ -+ struct drm_i915_private *dev_priv = dev->dev_private; -+ u16 rgvswctl; -+ -+ rgvswctl = I915_READ16(MEMSWCTL); -+ if (rgvswctl & MEMCTL_CMD_STS) { -+ DRM_DEBUG("gpu busy, RCS change rejected\n"); -+ return false; /* still busy with another command */ -+ } -+ -+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | -+ (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; -+ I915_WRITE16(MEMSWCTL, rgvswctl); -+ POSTING_READ16(MEMSWCTL); -+ -+ rgvswctl |= MEMCTL_CMD_STS; -+ I915_WRITE16(MEMSWCTL, rgvswctl); -+ -+ return true; -+} -+ - void ironlake_enable_drps(struct drm_device *dev) - { - struct drm_i915_private *dev_priv = dev->dev_private; -- u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl; -+ u32 rgvmodectl = I915_READ(MEMMODECTL); - u8 fmax, fmin, fstart, vstart; - int i = 0; - -@@ -4646,13 +5119,21 @@ void ironlake_enable_drps(struct drm_device *dev) - fmin = (rgvmodectl & MEMMODE_FMIN_MASK); - fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> - MEMMODE_FSTART_SHIFT; -+ fstart = fmax; -+ - vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> - PXVFREQ_PX_SHIFT; - -- dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */ -+ dev_priv->fmax = fstart; /* IPS callback will increase this */ -+ dev_priv->fstart = fstart; -+ -+ dev_priv->max_delay = fmax; - dev_priv->min_delay = fmin; - dev_priv->cur_delay = fstart; - -+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, -+ fstart); -+ - I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); - - /* -@@ -4674,20 +5155,19 @@ void ironlake_enable_drps(struct drm_device *dev) - } - msleep(1); - -- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | -- (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; -- I915_WRITE(MEMSWCTL, rgvswctl); -- POSTING_READ(MEMSWCTL); -+ ironlake_set_drps(dev, fstart); - -- rgvswctl |= MEMCTL_CMD_STS; -- I915_WRITE(MEMSWCTL, rgvswctl); -+ dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + -+ I915_READ(0x112e0); -+ dev_priv->last_time1 = jiffies_to_msecs(jiffies); -+ dev_priv->last_count2 = I915_READ(0x112f4); -+ getrawmonotonic(&dev_priv->last_time2); - } - - void ironlake_disable_drps(struct drm_device *dev) - { - struct drm_i915_private *dev_priv = dev->dev_private; -- u32 rgvswctl; -- u8 fstart; -+ u16 rgvswctl = I915_READ16(MEMSWCTL); - - /* Ack interrupts, disable EFC interrupt */ - I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); -@@ -4697,11 +5177,7 @@ void ironlake_disable_drps(struct drm_device *dev) - I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); - - /* Go back to the starting frequency */ -- fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >> -- MEMMODE_FSTART_SHIFT; -- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | -- (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; -- I915_WRITE(MEMSWCTL, rgvswctl); -+ ironlake_set_drps(dev, dev_priv->fstart); - msleep(1); - rgvswctl |= MEMCTL_CMD_STS; - I915_WRITE(MEMSWCTL, rgvswctl); -@@ -4709,6 +5185,92 @@ void ironlake_disable_drps(struct drm_device *dev) - - } - -+static unsigned long intel_pxfreq(u32 vidfreq) -+{ -+ unsigned long freq; -+ int div = (vidfreq & 0x3f0000) >> 16; -+ int post = (vidfreq & 0x3000) >> 12; -+ int pre = (vidfreq & 0x7); -+ -+ if (!pre) -+ return 0; -+ -+ freq = ((div * 133333) / ((1<dev_private; -+ u32 lcfuse; -+ u8 pxw[16]; -+ int i; -+ -+ /* Disable to program */ -+ I915_WRITE(ECR, 0); -+ POSTING_READ(ECR); -+ -+ /* Program energy weights for various events */ -+ I915_WRITE(SDEW, 0x15040d00); -+ I915_WRITE(CSIEW0, 0x007f0000); -+ I915_WRITE(CSIEW1, 0x1e220004); -+ I915_WRITE(CSIEW2, 0x04000004); -+ -+ for (i = 0; i < 5; i++) -+ I915_WRITE(PEW + (i * 4), 0); -+ for (i = 0; i < 3; i++) -+ I915_WRITE(DEW + (i * 4), 0); -+ -+ /* Program P-state weights to account for frequency power adjustment */ -+ for (i = 0; i < 16; i++) { -+ u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); -+ unsigned long freq = intel_pxfreq(pxvidfreq); -+ unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> -+ PXVFREQ_PX_SHIFT; -+ unsigned long val; -+ -+ val = vid * vid; -+ val *= (freq / 1000); -+ val *= 255; -+ val /= (127*127*900); -+ if (val > 0xff) -+ DRM_ERROR("bad pxval: %ld\n", val); -+ pxw[i] = val; -+ } -+ /* Render standby states get 0 weight */ -+ pxw[14] = 0; -+ pxw[15] = 0; -+ -+ for (i = 0; i < 4; i++) { -+ u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | -+ (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); -+ I915_WRITE(PXW + (i * 4), val); -+ } -+ -+ /* Adjust magic regs to magic values (more experimental results) */ -+ I915_WRITE(OGW0, 0); -+ I915_WRITE(OGW1, 0); -+ I915_WRITE(EG0, 0x00007f00); -+ I915_WRITE(EG1, 0x0000000e); -+ I915_WRITE(EG2, 0x000e0000); -+ I915_WRITE(EG3, 0x68000300); -+ I915_WRITE(EG4, 0x42000000); -+ I915_WRITE(EG5, 0x00140031); -+ I915_WRITE(EG6, 0); -+ I915_WRITE(EG7, 0); -+ -+ for (i = 0; i < 8; i++) -+ I915_WRITE(PXWL + (i * 4), 0); -+ -+ /* Enable PMON + select events */ -+ I915_WRITE(ECR, 0x80000019); -+ -+ lcfuse = I915_READ(LCFUSE02); -+ -+ dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); -+} -+ - void intel_init_clock_gating(struct drm_device *dev) - { - struct drm_i915_private *dev_priv = dev->dev_private; -@@ -4732,6 +5294,25 @@ void intel_init_clock_gating(struct drm_device *dev) - } - - I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); -+ -+ /* -+ * According to the spec the following bits should be set in -+ * order to enable memory self-refresh -+ * The bit 22/21 of 0x42004 -+ * The bit 5 of 0x42020 -+ * The bit 15 of 0x45000 -+ */ -+ if (IS_IRONLAKE(dev)) { -+ I915_WRITE(ILK_DISPLAY_CHICKEN2, -+ (I915_READ(ILK_DISPLAY_CHICKEN2) | -+ ILK_DPARB_GATE | ILK_VSDPFD_FULL)); -+ I915_WRITE(ILK_DSPCLK_GATE, -+ (I915_READ(ILK_DSPCLK_GATE) | -+ ILK_DPARB_CLK_GATE)); -+ I915_WRITE(DISP_ARB_CTL, -+ (I915_READ(DISP_ARB_CTL) | -+ DISP_FBC_WM_DIS)); -+ } - return; - } else if (IS_G4X(dev)) { - uint32_t dspclk_gate; -@@ -4809,8 +5390,7 @@ static void intel_init_display(struct drm_device *dev) - else - dev_priv->display.dpms = i9xx_crtc_dpms; - -- /* Only mobile has FBC, leave pointers NULL for other chips */ -- if (IS_MOBILE(dev)) { -+ if (I915_HAS_FBC(dev)) { - if (IS_GM45(dev)) { - dev_priv->display.fbc_enabled = g4x_fbc_enabled; - dev_priv->display.enable_fbc = g4x_enable_fbc; -@@ -4847,9 +5427,33 @@ static void intel_init_display(struct drm_device *dev) - i830_get_display_clock_speed; - - /* For FIFO watermark updates */ -- if (HAS_PCH_SPLIT(dev)) -- dev_priv->display.update_wm = NULL; -- else if (IS_G4X(dev)) -+ if (HAS_PCH_SPLIT(dev)) { -+ if (IS_IRONLAKE(dev)) { -+ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) -+ dev_priv->display.update_wm = ironlake_update_wm; -+ else { -+ DRM_DEBUG_KMS("Failed to get proper latency. " -+ "Disable CxSR\n"); -+ dev_priv->display.update_wm = NULL; -+ } -+ } else -+ dev_priv->display.update_wm = NULL; -+ } else if (IS_PINEVIEW(dev)) { -+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), -+ dev_priv->is_ddr3, -+ dev_priv->fsb_freq, -+ dev_priv->mem_freq)) { -+ DRM_INFO("failed to find known CxSR latency " -+ "(found ddr%s fsb freq %d, mem freq %d), " -+ "disabling CxSR\n", -+ (dev_priv->is_ddr3 == 1) ? "3": "2", -+ dev_priv->fsb_freq, dev_priv->mem_freq); -+ /* Disable CxSR and never update its watermark again */ -+ pineview_disable_cxsr(dev); -+ dev_priv->display.update_wm = NULL; -+ } else -+ dev_priv->display.update_wm = pineview_update_wm; -+ } else if (IS_G4X(dev)) - dev_priv->display.update_wm = g4x_update_wm; - else if (IS_I965G(dev)) - dev_priv->display.update_wm = i965_update_wm; -@@ -4871,7 +5475,6 @@ static void intel_init_display(struct drm_device *dev) - void intel_modeset_init(struct drm_device *dev) - { - struct drm_i915_private *dev_priv = dev->dev_private; -- int num_pipe; - int i; - - drm_mode_config_init(dev); -@@ -4901,13 +5504,13 @@ void intel_modeset_init(struct drm_device *dev) - dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); - - if (IS_MOBILE(dev) || IS_I9XX(dev)) -- num_pipe = 2; -+ dev_priv->num_pipe = 2; - else -- num_pipe = 1; -+ dev_priv->num_pipe = 1; - DRM_DEBUG_KMS("%d display pipe%s available.\n", -- num_pipe, num_pipe > 1 ? "s" : ""); -+ dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); - -- for (i = 0; i < num_pipe; i++) { -+ for (i = 0; i < dev_priv->num_pipe; i++) { - intel_crtc_init(dev, i); - } - -@@ -4915,21 +5518,16 @@ void intel_modeset_init(struct drm_device *dev) - - intel_init_clock_gating(dev); - -- if (IS_IRONLAKE_M(dev)) -+ if (IS_IRONLAKE_M(dev)) { - ironlake_enable_drps(dev); -+ intel_init_emon(dev); -+ } - - INIT_WORK(&dev_priv->idle_work, intel_idle_update); - setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, - (unsigned long)dev); - - intel_setup_overlay(dev); -- -- if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev), -- dev_priv->fsb_freq, -- dev_priv->mem_freq)) -- DRM_INFO("failed to find known CxSR latency " -- "(found fsb freq %d, mem freq %d), disabling CxSR\n", -- dev_priv->fsb_freq, dev_priv->mem_freq); - } - - void intel_modeset_cleanup(struct drm_device *dev) -@@ -4940,6 +5538,9 @@ void intel_modeset_cleanup(struct drm_device *dev) - - mutex_lock(&dev->struct_mutex); - -+ drm_kms_helper_poll_fini(dev); -+ intel_fbdev_fini(dev); -+ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - /* Skip inactive CRTCs */ - if (!crtc->fb) -@@ -4974,14 +5575,29 @@ void intel_modeset_cleanup(struct drm_device *dev) - } - - --/* current intel driver doesn't take advantage of encoders -- always give back the encoder for the connector --*/ --struct drm_encoder *intel_best_encoder(struct drm_connector *connector) -+/* -+ * Return which encoder is currently attached for connector. -+ */ -+struct drm_encoder *intel_attached_encoder (struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_mode_object *obj; -+ struct drm_encoder *encoder; -+ int i; - -- return &intel_encoder->enc; -+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { -+ if (connector->encoder_ids[i] == 0) -+ break; -+ -+ obj = drm_mode_object_find(connector->dev, -+ connector->encoder_ids[i], -+ DRM_MODE_OBJECT_ENCODER); -+ if (!obj) -+ continue; -+ -+ encoder = obj_to_encoder(obj); -+ return encoder; -+ } -+ return NULL; - } - - /* -diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c -index 77e40cf..49b54f0 100644 ---- a/drivers/gpu/drm/i915/intel_dp.c -+++ b/drivers/gpu/drm/i915/intel_dp.c -@@ -48,8 +48,6 @@ struct intel_dp_priv { - uint32_t output_reg; - uint32_t DP; - uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; -- uint32_t save_DP; -- uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE]; - bool has_audio; - int dpms_mode; - uint8_t link_bw; -@@ -141,7 +139,8 @@ static int - intel_dp_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); - int max_lanes = intel_dp_max_lane_count(intel_encoder); - -@@ -215,7 +214,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, - { - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - uint32_t output_reg = dp_priv->output_reg; -- struct drm_device *dev = intel_encoder->base.dev; -+ struct drm_device *dev = intel_encoder->enc.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t ch_ctl = output_reg + 0x10; - uint32_t ch_data = ch_ctl + 4; -@@ -224,19 +223,27 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, - uint32_t ctl; - uint32_t status; - uint32_t aux_clock_divider; -- int try; -+ int try, precharge; - - /* The clock divider is based off the hrawclk, - * and would like to run at 2MHz. So, take the - * hrawclk value and divide by 2 and use that - */ -- if (IS_eDP(intel_encoder)) -- aux_clock_divider = 225; /* eDP input clock at 450Mhz */ -- else if (HAS_PCH_SPLIT(dev)) -+ if (IS_eDP(intel_encoder)) { -+ if (IS_GEN6(dev)) -+ aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ -+ else -+ aux_clock_divider = 225; /* eDP input clock at 450Mhz */ -+ } else if (HAS_PCH_SPLIT(dev)) - aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ - else - aux_clock_divider = intel_hrawclk(dev) / 2; - -+ if (IS_GEN6(dev)) -+ precharge = 3; -+ else -+ precharge = 5; -+ - /* Must try at least 3 times according to DP spec */ - for (try = 0; try < 5; try++) { - /* Load the send data into the aux channel data registers */ -@@ -249,7 +256,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, - ctl = (DP_AUX_CH_CTL_SEND_BUSY | - DP_AUX_CH_CTL_TIME_OUT_400us | - (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | -- (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | -+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | - (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_TIME_OUT_ERROR | -@@ -465,7 +472,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, - } - - static int --intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name) -+intel_dp_i2c_init(struct intel_encoder *intel_encoder, -+ struct intel_connector *intel_connector, const char *name) - { - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - -@@ -480,7 +488,7 @@ intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name) - strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); - dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; - dp_priv->adapter.algo_data = &dp_priv->algo; -- dp_priv->adapter.dev.parent = &intel_encoder->base.kdev; -+ dp_priv->adapter.dev.parent = &intel_connector->base.kdev; - - return i2c_dp_aux_add_bus(&dp_priv->adapter); - } -@@ -555,7 +563,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, - { - struct drm_device *dev = crtc->dev; - struct drm_mode_config *mode_config = &dev->mode_config; -- struct drm_connector *connector; -+ struct drm_encoder *encoder; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int lane_count = 4; -@@ -564,13 +572,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, - /* - * Find the lane count in the intel_encoder private - */ -- list_for_each_entry(connector, &mode_config->connector_list, head) { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; -+ list_for_each_entry(encoder, &mode_config->encoder_list, head) { -+ struct intel_encoder *intel_encoder; -+ struct intel_dp_priv *dp_priv; - -- if (!connector->encoder || connector->encoder->crtc != crtc) -+ if (encoder->crtc != crtc) - continue; - -+ intel_encoder = enc_to_intel_encoder(encoder); -+ dp_priv = intel_encoder->dev_priv; -+ - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { - lane_count = dp_priv->lane_count; - break; -@@ -626,16 +637,24 @@ static void - intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) - { -+ struct drm_device *dev = encoder->dev; - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - struct drm_crtc *crtc = intel_encoder->enc.crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - -- dp_priv->DP = (DP_LINK_TRAIN_OFF | -- DP_VOLTAGE_0_4 | -- DP_PRE_EMPHASIS_0 | -- DP_SYNC_VS_HIGH | -- DP_SYNC_HS_HIGH); -+ dp_priv->DP = (DP_VOLTAGE_0_4 | -+ DP_PRE_EMPHASIS_0); -+ -+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) -+ dp_priv->DP |= DP_SYNC_HS_HIGH; -+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) -+ dp_priv->DP |= DP_SYNC_VS_HIGH; -+ -+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) -+ dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT; -+ else -+ dp_priv->DP |= DP_LINK_TRAIN_OFF; - - switch (dp_priv->lane_count) { - case 1: -@@ -656,15 +675,15 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, - dp_priv->link_configuration[1] = dp_priv->lane_count; - - /* -- * Check for DPCD version > 1.1, -- * enable enahanced frame stuff in that case -+ * Check for DPCD version > 1.1 and enhanced framing support - */ -- if (dp_priv->dpcd[0] >= 0x11) { -+ if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) { - dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - dp_priv->DP |= DP_ENHANCED_FRAMING; - } - -- if (intel_crtc->pipe == 1) -+ /* CPT DP's pipe select is decided in TRANS_DP_CTL */ -+ if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) - dp_priv->DP |= DP_PIPEB_SELECT; - - if (IS_eDP(intel_encoder)) { -@@ -704,7 +723,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) - { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; -- struct drm_device *dev = intel_encoder->base.dev; -+ struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dp_reg = I915_READ(dp_priv->output_reg); - -@@ -749,20 +768,6 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], - return link_status[r - DP_LANE0_1_STATUS]; - } - --static void --intel_dp_save(struct drm_connector *connector) --{ -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct drm_device *dev = intel_encoder->base.dev; -- struct drm_i915_private *dev_priv = dev->dev_private; -- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; -- -- dp_priv->save_DP = I915_READ(dp_priv->output_reg); -- intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET, -- dp_priv->save_link_configuration, -- sizeof (dp_priv->save_link_configuration)); --} -- - static uint8_t - intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], - int lane) -@@ -892,6 +897,25 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count) - return signal_levels; - } - -+/* Gen6's DP voltage swing and pre-emphasis control */ -+static uint32_t -+intel_gen6_edp_signal_levels(uint8_t train_set) -+{ -+ switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) { -+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: -+ return EDP_LINK_TRAIN_400MV_0DB_SNB_B; -+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: -+ return EDP_LINK_TRAIN_400MV_6DB_SNB_B; -+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: -+ return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B; -+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: -+ return EDP_LINK_TRAIN_800MV_0DB_SNB_B; -+ default: -+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n"); -+ return EDP_LINK_TRAIN_400MV_0DB_SNB_B; -+ } -+} -+ - static uint8_t - intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], - int lane) -@@ -948,7 +972,7 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder, - uint8_t train_set[4], - bool first) - { -- struct drm_device *dev = intel_encoder->base.dev; -+ struct drm_device *dev = intel_encoder->enc.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - int ret; -@@ -974,7 +998,7 @@ static void - intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, - uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) - { -- struct drm_device *dev = intel_encoder->base.dev; -+ struct drm_device *dev = intel_encoder->enc.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - uint8_t train_set[4]; -@@ -985,23 +1009,38 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, - bool channel_eq = false; - bool first = true; - int tries; -+ u32 reg; - - /* Write the link configuration data */ -- intel_dp_aux_native_write(intel_encoder, 0x100, -+ intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET, - link_configuration, DP_LINK_CONFIGURATION_SIZE); - - DP |= DP_PORT_EN; -- DP &= ~DP_LINK_TRAIN_MASK; -+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) -+ DP &= ~DP_LINK_TRAIN_MASK_CPT; -+ else -+ DP &= ~DP_LINK_TRAIN_MASK; - memset(train_set, 0, 4); - voltage = 0xff; - tries = 0; - clock_recovery = false; - for (;;) { - /* Use train_set[0] to set the voltage and pre emphasis values */ -- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); -- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; -+ uint32_t signal_levels; -+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { -+ signal_levels = intel_gen6_edp_signal_levels(train_set[0]); -+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; -+ } else { -+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); -+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; -+ } -+ -+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) -+ reg = DP | DP_LINK_TRAIN_PAT_1_CPT; -+ else -+ reg = DP | DP_LINK_TRAIN_PAT_1; - -- if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1, -+ if (!intel_dp_set_link_train(intel_encoder, reg, - DP_TRAINING_PATTERN_1, train_set, first)) - break; - first = false; -@@ -1041,11 +1080,23 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, - channel_eq = false; - for (;;) { - /* Use train_set[0] to set the voltage and pre emphasis values */ -- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); -- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; -+ uint32_t signal_levels; -+ -+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { -+ signal_levels = intel_gen6_edp_signal_levels(train_set[0]); -+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; -+ } else { -+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); -+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; -+ } -+ -+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) -+ reg = DP | DP_LINK_TRAIN_PAT_2_CPT; -+ else -+ reg = DP | DP_LINK_TRAIN_PAT_2; - - /* channel eq pattern */ -- if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2, -+ if (!intel_dp_set_link_train(intel_encoder, reg, - DP_TRAINING_PATTERN_2, train_set, - false)) - break; -@@ -1068,7 +1119,12 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, - ++tries; - } - -- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); -+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) -+ reg = DP | DP_LINK_TRAIN_OFF_CPT; -+ else -+ reg = DP | DP_LINK_TRAIN_OFF; -+ -+ I915_WRITE(dp_priv->output_reg, reg); - POSTING_READ(dp_priv->output_reg); - intel_dp_aux_native_write_1(intel_encoder, - DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); -@@ -1077,7 +1133,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, - static void - intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) - { -- struct drm_device *dev = intel_encoder->base.dev; -+ struct drm_device *dev = intel_encoder->enc.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - -@@ -1090,9 +1146,15 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) - udelay(100); - } - -- DP &= ~DP_LINK_TRAIN_MASK; -- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); -- POSTING_READ(dp_priv->output_reg); -+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) { -+ DP &= ~DP_LINK_TRAIN_MASK_CPT; -+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); -+ POSTING_READ(dp_priv->output_reg); -+ } else { -+ DP &= ~DP_LINK_TRAIN_MASK; -+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); -+ POSTING_READ(dp_priv->output_reg); -+ } - - udelay(17000); - -@@ -1102,18 +1164,6 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) - POSTING_READ(dp_priv->output_reg); - } - --static void --intel_dp_restore(struct drm_connector *connector) --{ -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; -- -- if (dp_priv->save_DP & DP_PORT_EN) -- intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration); -- else -- intel_dp_link_down(intel_encoder, dp_priv->save_DP); --} -- - /* - * According to DP spec - * 5.1.2: -@@ -1144,7 +1194,8 @@ intel_dp_check_link_status(struct intel_encoder *intel_encoder) - static enum drm_connector_status - ironlake_dp_detect(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - enum drm_connector_status status; - -@@ -1156,6 +1207,8 @@ ironlake_dp_detect(struct drm_connector *connector) - if (dp_priv->dpcd[0] != 0) - status = connector_status_connected; - } -+ DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0], -+ dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]); - return status; - } - -@@ -1168,8 +1221,9 @@ ironlake_dp_detect(struct drm_connector *connector) - static enum drm_connector_status - intel_dp_detect(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct drm_device *dev = intel_encoder->base.dev; -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); -+ struct drm_device *dev = intel_encoder->enc.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - uint32_t temp, bit; -@@ -1180,16 +1234,6 @@ intel_dp_detect(struct drm_connector *connector) - if (HAS_PCH_SPLIT(dev)) - return ironlake_dp_detect(connector); - -- temp = I915_READ(PORT_HOTPLUG_EN); -- -- I915_WRITE(PORT_HOTPLUG_EN, -- temp | -- DPB_HOTPLUG_INT_EN | -- DPC_HOTPLUG_INT_EN | -- DPD_HOTPLUG_INT_EN); -- -- POSTING_READ(PORT_HOTPLUG_EN); -- - switch (dp_priv->output_reg) { - case DP_B: - bit = DPB_HOTPLUG_INT_STATUS; -@@ -1222,15 +1266,16 @@ intel_dp_detect(struct drm_connector *connector) - - static int intel_dp_get_modes(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct drm_device *dev = intel_encoder->base.dev; -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); -+ struct drm_device *dev = intel_encoder->enc.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - /* We should parse the EDID data and find out if it has an audio sink - */ - -- ret = intel_ddc_get_modes(intel_encoder); -+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); - if (ret) - return ret; - -@@ -1249,13 +1294,9 @@ static int intel_dp_get_modes(struct drm_connector *connector) - static void - intel_dp_destroy (struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- -- if (intel_encoder->i2c_bus) -- intel_i2c_destroy(intel_encoder->i2c_bus); - drm_sysfs_connector_remove(connector); - drm_connector_cleanup(connector); -- kfree(intel_encoder); -+ kfree(connector); - } - - static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { -@@ -1268,8 +1309,6 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { - - static const struct drm_connector_funcs intel_dp_connector_funcs = { - .dpms = drm_helper_connector_dpms, -- .save = intel_dp_save, -- .restore = intel_dp_restore, - .detect = intel_dp_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = intel_dp_destroy, -@@ -1278,12 +1317,17 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { - static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { - .get_modes = intel_dp_get_modes, - .mode_valid = intel_dp_mode_valid, -- .best_encoder = intel_best_encoder, -+ .best_encoder = intel_attached_encoder, - }; - - static void intel_dp_enc_destroy(struct drm_encoder *encoder) - { -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); -+ -+ if (intel_encoder->i2c_bus) -+ intel_i2c_destroy(intel_encoder->i2c_bus); - drm_encoder_cleanup(encoder); -+ kfree(intel_encoder); - } - - static const struct drm_encoder_funcs intel_dp_enc_funcs = { -@@ -1299,12 +1343,35 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder) - intel_dp_check_link_status(intel_encoder); - } - -+/* Return which DP Port should be selected for Transcoder DP control */ -+int -+intel_trans_dp_port_sel (struct drm_crtc *crtc) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct drm_mode_config *mode_config = &dev->mode_config; -+ struct drm_encoder *encoder; -+ struct intel_encoder *intel_encoder = NULL; -+ -+ list_for_each_entry(encoder, &mode_config->encoder_list, head) { -+ if (encoder->crtc != crtc) -+ continue; -+ -+ intel_encoder = enc_to_intel_encoder(encoder); -+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { -+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; -+ return dp_priv->output_reg; -+ } -+ } -+ return -1; -+} -+ - void - intel_dp_init(struct drm_device *dev, int output_reg) - { - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_connector *connector; - struct intel_encoder *intel_encoder; -+ struct intel_connector *intel_connector; - struct intel_dp_priv *dp_priv; - const char *name = NULL; - -@@ -1313,13 +1380,21 @@ intel_dp_init(struct drm_device *dev, int output_reg) - if (!intel_encoder) - return; - -+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); -+ if (!intel_connector) { -+ kfree(intel_encoder); -+ return; -+ } -+ - dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); - -- connector = &intel_encoder->base; -+ connector = &intel_connector->base; - drm_connector_init(dev, connector, &intel_dp_connector_funcs, - DRM_MODE_CONNECTOR_DisplayPort); - drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); - -+ connector->polled = DRM_CONNECTOR_POLL_HPD; -+ - if (output_reg == DP_A) - intel_encoder->type = INTEL_OUTPUT_EDP; - else -@@ -1349,7 +1424,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) - DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); - -- drm_mode_connector_attach_encoder(&intel_encoder->base, -+ drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->enc); - drm_sysfs_connector_add(connector); - -@@ -1378,7 +1453,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) - break; - } - -- intel_dp_i2c_init(intel_encoder, name); -+ intel_dp_i2c_init(intel_encoder, intel_connector, name); - - intel_encoder->ddc_bus = &dp_priv->adapter; - intel_encoder->hot_plug = intel_dp_hot_plug; -diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h -index e302537..df931f7 100644 ---- a/drivers/gpu/drm/i915/intel_drv.h -+++ b/drivers/gpu/drm/i915/intel_drv.h -@@ -96,8 +96,6 @@ struct intel_framebuffer { - - - struct intel_encoder { -- struct drm_connector base; -- - struct drm_encoder enc; - int type; - struct i2c_adapter *i2c_bus; -@@ -110,6 +108,11 @@ struct intel_encoder { - int clone_mask; - }; - -+struct intel_connector { -+ struct drm_connector base; -+ void *dev_priv; -+}; -+ - struct intel_crtc; - struct intel_overlay { - struct drm_device *dev; -@@ -149,17 +152,18 @@ struct intel_crtc { - bool lowfreq_avail; - struct intel_overlay *overlay; - struct intel_unpin_work *unpin_work; -+ int fdi_lanes; - }; - - #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) --#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) -+#define to_intel_connector(x) container_of(x, struct intel_connector, base) - #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) - #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) - - struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, - const char *name); - void intel_i2c_destroy(struct i2c_adapter *adapter); --int intel_ddc_get_modes(struct intel_encoder *intel_encoder); -+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); - extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); - void intel_i2c_quirk_set(struct drm_device *dev, bool enable); - void intel_i2c_reset_gmbus(struct drm_device *dev); -@@ -183,7 +187,7 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc); - extern void intel_encoder_prepare (struct drm_encoder *encoder); - extern void intel_encoder_commit (struct drm_encoder *encoder); - --extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); -+extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector); - - extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, - struct drm_crtc *crtc); -@@ -192,17 +196,16 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, - extern void intel_wait_for_vblank(struct drm_device *dev); - extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); - extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, -+ struct drm_connector *connector, - struct drm_display_mode *mode, - int *dpms_mode); - extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, -+ struct drm_connector *connector, - int dpms_mode); - - extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); - extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); - extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); --extern int intelfb_probe(struct drm_device *dev); --extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); --extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc); - extern void intelfb_restore(void); - extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, - u16 blue, int regno); -@@ -212,10 +215,12 @@ extern void intel_init_clock_gating(struct drm_device *dev); - extern void ironlake_enable_drps(struct drm_device *dev); - extern void ironlake_disable_drps(struct drm_device *dev); - --extern int intel_framebuffer_create(struct drm_device *dev, -- struct drm_mode_fb_cmd *mode_cmd, -- struct drm_framebuffer **fb, -- struct drm_gem_object *obj); -+extern int intel_framebuffer_init(struct drm_device *dev, -+ struct intel_framebuffer *ifb, -+ struct drm_mode_fb_cmd *mode_cmd, -+ struct drm_gem_object *obj); -+extern int intel_fbdev_init(struct drm_device *dev); -+extern void intel_fbdev_fini(struct drm_device *dev); - - extern void intel_prepare_page_flip(struct drm_device *dev, int plane); - extern void intel_finish_page_flip(struct drm_device *dev, int pipe); -@@ -229,4 +234,6 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data, - struct drm_file *file_priv); - extern int intel_overlay_attrs(struct drm_device *dev, void *data, - struct drm_file *file_priv); -+ -+extern void intel_fb_output_poll_changed(struct drm_device *dev); - #endif /* __INTEL_DRV_H__ */ -diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c -index ebf213c..227feca 100644 ---- a/drivers/gpu/drm/i915/intel_dvo.c -+++ b/drivers/gpu/drm/i915/intel_dvo.c -@@ -96,39 +96,11 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) - } - } - --static void intel_dvo_save(struct drm_connector *connector) --{ -- struct drm_i915_private *dev_priv = connector->dev->dev_private; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_dvo_device *dvo = intel_encoder->dev_priv; -- -- /* Each output should probably just save the registers it touches, -- * but for now, use more overkill. -- */ -- dev_priv->saveDVOA = I915_READ(DVOA); -- dev_priv->saveDVOB = I915_READ(DVOB); -- dev_priv->saveDVOC = I915_READ(DVOC); -- -- dvo->dev_ops->save(dvo); --} -- --static void intel_dvo_restore(struct drm_connector *connector) --{ -- struct drm_i915_private *dev_priv = connector->dev->dev_private; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_dvo_device *dvo = intel_encoder->dev_priv; -- -- dvo->dev_ops->restore(dvo); -- -- I915_WRITE(DVOA, dev_priv->saveDVOA); -- I915_WRITE(DVOB, dev_priv->saveDVOB); -- I915_WRITE(DVOC, dev_priv->saveDVOC); --} -- - static int intel_dvo_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dvo_device *dvo = intel_encoder->dev_priv; - - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) -@@ -241,7 +213,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, - */ - static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dvo_device *dvo = intel_encoder->dev_priv; - - return dvo->dev_ops->detect(dvo); -@@ -249,7 +222,8 @@ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connecto - - static int intel_dvo_get_modes(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dvo_device *dvo = intel_encoder->dev_priv; - - /* We should probably have an i2c driver get_modes function for those -@@ -257,7 +231,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector) - * (TV-out, for example), but for now with just TMDS and LVDS, - * that's not the case. - */ -- intel_ddc_get_modes(intel_encoder); -+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus); - if (!list_empty(&connector->probed_modes)) - return 1; - -@@ -275,38 +249,10 @@ static int intel_dvo_get_modes(struct drm_connector *connector) - - static void intel_dvo_destroy (struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_dvo_device *dvo = intel_encoder->dev_priv; -- -- if (dvo) { -- if (dvo->dev_ops->destroy) -- dvo->dev_ops->destroy(dvo); -- if (dvo->panel_fixed_mode) -- kfree(dvo->panel_fixed_mode); -- /* no need, in i830_dvoices[] now */ -- //kfree(dvo); -- } -- if (intel_encoder->i2c_bus) -- intel_i2c_destroy(intel_encoder->i2c_bus); -- if (intel_encoder->ddc_bus) -- intel_i2c_destroy(intel_encoder->ddc_bus); - drm_sysfs_connector_remove(connector); - drm_connector_cleanup(connector); -- kfree(intel_encoder); --} -- --#ifdef RANDR_GET_CRTC_INTERFACE --static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector) --{ -- struct drm_device *dev = connector->dev; -- struct drm_i915_private *dev_priv = dev->dev_private; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_dvo_device *dvo = intel_encoder->dev_priv; -- int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); -- -- return intel_pipe_to_crtc(pScrn, pipe); -+ kfree(connector); - } --#endif - - static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { - .dpms = intel_dvo_dpms, -@@ -318,8 +264,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { - - static const struct drm_connector_funcs intel_dvo_connector_funcs = { - .dpms = drm_helper_connector_dpms, -- .save = intel_dvo_save, -- .restore = intel_dvo_restore, - .detect = intel_dvo_detect, - .destroy = intel_dvo_destroy, - .fill_modes = drm_helper_probe_single_connector_modes, -@@ -328,12 +272,26 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = { - static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { - .mode_valid = intel_dvo_mode_valid, - .get_modes = intel_dvo_get_modes, -- .best_encoder = intel_best_encoder, -+ .best_encoder = intel_attached_encoder, - }; - - static void intel_dvo_enc_destroy(struct drm_encoder *encoder) - { -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); -+ struct intel_dvo_device *dvo = intel_encoder->dev_priv; -+ -+ if (dvo) { -+ if (dvo->dev_ops->destroy) -+ dvo->dev_ops->destroy(dvo); -+ if (dvo->panel_fixed_mode) -+ kfree(dvo->panel_fixed_mode); -+ } -+ if (intel_encoder->i2c_bus) -+ intel_i2c_destroy(intel_encoder->i2c_bus); -+ if (intel_encoder->ddc_bus) -+ intel_i2c_destroy(intel_encoder->ddc_bus); - drm_encoder_cleanup(encoder); -+ kfree(intel_encoder); - } - - static const struct drm_encoder_funcs intel_dvo_enc_funcs = { -@@ -352,7 +310,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector) - { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dvo_device *dvo = intel_encoder->dev_priv; - uint32_t dvo_reg = dvo->dvo_reg; - uint32_t dvo_val = I915_READ(dvo_reg); -@@ -384,6 +343,7 @@ intel_dvo_get_current_mode (struct drm_connector *connector) - void intel_dvo_init(struct drm_device *dev) - { - struct intel_encoder *intel_encoder; -+ struct intel_connector *intel_connector; - struct intel_dvo_device *dvo; - struct i2c_adapter *i2cbus = NULL; - int ret = 0; -@@ -393,6 +353,12 @@ void intel_dvo_init(struct drm_device *dev) - if (!intel_encoder) - return; - -+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); -+ if (!intel_connector) { -+ kfree(intel_encoder); -+ return; -+ } -+ - /* Set up the DDC bus */ - intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); - if (!intel_encoder->ddc_bus) -@@ -400,7 +366,7 @@ void intel_dvo_init(struct drm_device *dev) - - /* Now, try to find a controller */ - for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { -- struct drm_connector *connector = &intel_encoder->base; -+ struct drm_connector *connector = &intel_connector->base; - int gpio; - - dvo = &intel_dvo_devices[i]; -@@ -471,7 +437,7 @@ void intel_dvo_init(struct drm_device *dev) - drm_encoder_helper_add(&intel_encoder->enc, - &intel_dvo_helper_funcs); - -- drm_mode_connector_attach_encoder(&intel_encoder->base, -+ drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->enc); - if (dvo->type == INTEL_DVO_CHIP_LVDS) { - /* For our LVDS chipsets, we should hopefully be able -@@ -496,4 +462,5 @@ void intel_dvo_init(struct drm_device *dev) - intel_i2c_destroy(i2cbus); - free_intel: - kfree(intel_encoder); -+ kfree(intel_connector); - } -diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c -index 8a0b3bc..c3c5052 100644 ---- a/drivers/gpu/drm/i915/intel_fb.c -+++ b/drivers/gpu/drm/i915/intel_fb.c -@@ -44,9 +44,10 @@ - #include "i915_drm.h" - #include "i915_drv.h" - --struct intelfb_par { -+struct intel_fbdev { - struct drm_fb_helper helper; -- struct intel_framebuffer *intel_fb; -+ struct intel_framebuffer ifb; -+ struct list_head fbdev_list; - struct drm_display_mode *our_mode; - }; - -@@ -54,7 +55,6 @@ static struct fb_ops intelfb_ops = { - .owner = THIS_MODULE, - .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = drm_fb_helper_set_par, -- .fb_setcolreg = drm_fb_helper_setcolreg, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, -@@ -63,62 +63,12 @@ static struct fb_ops intelfb_ops = { - .fb_setcmap = drm_fb_helper_setcmap, - }; - --static struct drm_fb_helper_funcs intel_fb_helper_funcs = { -- .gamma_set = intel_crtc_fb_gamma_set, -- .gamma_get = intel_crtc_fb_gamma_get, --}; -- -- --/** -- * Currently it is assumed that the old framebuffer is reused. -- * -- * LOCKING -- * caller should hold the mode config lock. -- * -- */ --int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc) --{ -- struct fb_info *info; -- struct drm_framebuffer *fb; -- struct drm_display_mode *mode = crtc->desired_mode; -- -- fb = crtc->fb; -- if (!fb) -- return 1; -- -- info = fb->fbdev; -- if (!info) -- return 1; -- -- if (!mode) -- return 1; -- -- info->var.xres = mode->hdisplay; -- info->var.right_margin = mode->hsync_start - mode->hdisplay; -- info->var.hsync_len = mode->hsync_end - mode->hsync_start; -- info->var.left_margin = mode->htotal - mode->hsync_end; -- info->var.yres = mode->vdisplay; -- info->var.lower_margin = mode->vsync_start - mode->vdisplay; -- info->var.vsync_len = mode->vsync_end - mode->vsync_start; -- info->var.upper_margin = mode->vtotal - mode->vsync_end; -- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100; -- /* avoid overflow */ -- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh; -- -- return 0; --} --EXPORT_SYMBOL(intelfb_resize); -- --static int intelfb_create(struct drm_device *dev, uint32_t fb_width, -- uint32_t fb_height, uint32_t surface_width, -- uint32_t surface_height, -- uint32_t surface_depth, uint32_t surface_bpp, -- struct drm_framebuffer **fb_p) -+static int intelfb_create(struct intel_fbdev *ifbdev, -+ struct drm_fb_helper_surface_size *sizes) - { -+ struct drm_device *dev = ifbdev->helper.dev; - struct fb_info *info; -- struct intelfb_par *par; - struct drm_framebuffer *fb; -- struct intel_framebuffer *intel_fb; - struct drm_mode_fb_cmd mode_cmd; - struct drm_gem_object *fbo = NULL; - struct drm_i915_gem_object *obj_priv; -@@ -126,19 +76,19 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, - int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; - - /* we don't do packed 24bpp */ -- if (surface_bpp == 24) -- surface_bpp = 32; -+ if (sizes->surface_bpp == 24) -+ sizes->surface_bpp = 32; - -- mode_cmd.width = surface_width; -- mode_cmd.height = surface_height; -+ mode_cmd.width = sizes->surface_width; -+ mode_cmd.height = sizes->surface_height; - -- mode_cmd.bpp = surface_bpp; -+ mode_cmd.bpp = sizes->surface_bpp; - mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); -- mode_cmd.depth = surface_depth; -+ mode_cmd.depth = sizes->surface_depth; - - size = mode_cmd.pitch * mode_cmd.height; - size = ALIGN(size, PAGE_SIZE); -- fbo = drm_gem_object_alloc(dev, size); -+ fbo = i915_gem_alloc_object(dev, size); - if (!fbo) { - DRM_ERROR("failed to allocate framebuffer\n"); - ret = -ENOMEM; -@@ -155,47 +105,43 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, - } - - /* Flush everything out, we'll be doing GTT only from now on */ -- i915_gem_object_set_to_gtt_domain(fbo, 1); -- -- ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); -+ ret = i915_gem_object_set_to_gtt_domain(fbo, 1); - if (ret) { -- DRM_ERROR("failed to allocate fb.\n"); -+ DRM_ERROR("failed to bind fb: %d.\n", ret); - goto out_unpin; - } - -- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); -- -- intel_fb = to_intel_framebuffer(fb); -- *fb_p = fb; -- -- info = framebuffer_alloc(sizeof(struct intelfb_par), device); -+ info = framebuffer_alloc(0, device); - if (!info) { - ret = -ENOMEM; - goto out_unpin; - } - -- par = info->par; -+ info->par = ifbdev; - -- par->helper.funcs = &intel_fb_helper_funcs; -- par->helper.dev = dev; -- ret = drm_fb_helper_init_crtc_count(&par->helper, 2, -- INTELFB_CONN_LIMIT); -- if (ret) -- goto out_unref; -+ intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo); -+ -+ fb = &ifbdev->ifb.base; -+ -+ ifbdev->helper.fb = fb; -+ ifbdev->helper.fbdev = info; - - strcpy(info->fix.id, "inteldrmfb"); - - info->flags = FBINFO_DEFAULT; -- - info->fbops = &intelfb_ops; - -- - /* setup aperture base/size for vesafb takeover */ -- info->aperture_base = dev->mode_config.fb_base; -+ info->apertures = alloc_apertures(1); -+ if (!info->apertures) { -+ ret = -ENOMEM; -+ goto out_unpin; -+ } -+ info->apertures->ranges[0].base = dev->mode_config.fb_base; - if (IS_I9XX(dev)) -- info->aperture_size = pci_resource_len(dev->pdev, 2); -+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); - else -- info->aperture_size = pci_resource_len(dev->pdev, 0); -+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); - - info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; - info->fix.smem_len = size; -@@ -208,12 +154,18 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, - ret = -ENOSPC; - goto out_unpin; - } -+ -+ ret = fb_alloc_cmap(&info->cmap, 256, 0); -+ if (ret) { -+ ret = -ENOMEM; -+ goto out_unpin; -+ } - info->screen_size = size; - - // memset(info->screen_base, 0, size); - - drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); -- drm_fb_helper_fill_var(info, fb, fb_width, fb_height); -+ drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); - - /* FIXME: we really shouldn't expose mmio space at all */ - info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar); -@@ -225,14 +177,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, - info->pixmap.flags = FB_PIXMAP_SYSTEM; - info->pixmap.scan_align = 1; - -- fb->fbdev = info; -- -- par->intel_fb = intel_fb; -- -- /* To allow resizeing without swapping buffers */ - DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", -- intel_fb->base.width, intel_fb->base.height, -- obj_priv->gtt_offset, fbo); -+ fb->width, fb->height, -+ obj_priv->gtt_offset, fbo); -+ - - mutex_unlock(&dev->struct_mutex); - vga_switcheroo_client_fb_set(dev->pdev, info); -@@ -247,35 +195,92 @@ out: - return ret; - } - --int intelfb_probe(struct drm_device *dev) -+static int intel_fb_find_or_create_single(struct drm_fb_helper *helper, -+ struct drm_fb_helper_surface_size *sizes) - { -+ struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper; -+ int new_fb = 0; - int ret; - -- DRM_DEBUG_KMS("\n"); -- ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); -- return ret; -+ if (!helper->fb) { -+ ret = intelfb_create(ifbdev, sizes); -+ if (ret) -+ return ret; -+ new_fb = 1; -+ } -+ return new_fb; - } --EXPORT_SYMBOL(intelfb_probe); - --int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) -+static struct drm_fb_helper_funcs intel_fb_helper_funcs = { -+ .gamma_set = intel_crtc_fb_gamma_set, -+ .gamma_get = intel_crtc_fb_gamma_get, -+ .fb_probe = intel_fb_find_or_create_single, -+}; -+ -+int intel_fbdev_destroy(struct drm_device *dev, -+ struct intel_fbdev *ifbdev) - { - struct fb_info *info; -+ struct intel_framebuffer *ifb = &ifbdev->ifb; - -- if (!fb) -- return -EINVAL; -- -- info = fb->fbdev; -- -- if (info) { -- struct intelfb_par *par = info->par; -+ if (ifbdev->helper.fbdev) { -+ info = ifbdev->helper.fbdev; - unregister_framebuffer(info); - iounmap(info->screen_base); -- if (info->par) -- drm_fb_helper_free(&par->helper); -+ if (info->cmap.len) -+ fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } - -+ drm_fb_helper_fini(&ifbdev->helper); -+ -+ drm_framebuffer_cleanup(&ifb->base); -+ if (ifb->obj) -+ drm_gem_object_unreference_unlocked(ifb->obj); -+ -+ return 0; -+} -+ -+int intel_fbdev_init(struct drm_device *dev) -+{ -+ struct intel_fbdev *ifbdev; -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ int ret; -+ -+ ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); -+ if (!ifbdev) -+ return -ENOMEM; -+ -+ dev_priv->fbdev = ifbdev; -+ ifbdev->helper.funcs = &intel_fb_helper_funcs; -+ -+ ret = drm_fb_helper_init(dev, &ifbdev->helper, -+ dev_priv->num_pipe, -+ INTELFB_CONN_LIMIT); -+ if (ret) { -+ kfree(ifbdev); -+ return ret; -+ } -+ -+ drm_fb_helper_single_add_all_connectors(&ifbdev->helper); -+ drm_fb_helper_initial_config(&ifbdev->helper, 32); - return 0; - } --EXPORT_SYMBOL(intelfb_remove); -+ -+void intel_fbdev_fini(struct drm_device *dev) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ if (!dev_priv->fbdev) -+ return; -+ -+ intel_fbdev_destroy(dev, dev_priv->fbdev); -+ kfree(dev_priv->fbdev); -+ dev_priv->fbdev = NULL; -+} - MODULE_LICENSE("GPL and additional rights"); -+ -+void intel_fb_output_poll_changed(struct drm_device *dev) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); -+} -diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c -index 48cade0..83bd764 100644 ---- a/drivers/gpu/drm/i915/intel_hdmi.c -+++ b/drivers/gpu/drm/i915/intel_hdmi.c -@@ -39,7 +39,6 @@ - - struct intel_hdmi_priv { - u32 sdvox_reg; -- u32 save_SDVOX; - bool has_hdmi_sink; - }; - -@@ -60,11 +59,18 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, - SDVO_VSYNC_ACTIVE_HIGH | - SDVO_HSYNC_ACTIVE_HIGH; - -- if (hdmi_priv->has_hdmi_sink) -+ if (hdmi_priv->has_hdmi_sink) { - sdvox |= SDVO_AUDIO_ENABLE; -+ if (HAS_PCH_CPT(dev)) -+ sdvox |= HDMI_MODE_SELECT; -+ } - -- if (intel_crtc->pipe == 1) -- sdvox |= SDVO_PIPE_B_SELECT; -+ if (intel_crtc->pipe == 1) { -+ if (HAS_PCH_CPT(dev)) -+ sdvox |= PORT_TRANS_B_SEL_CPT; -+ else -+ sdvox |= SDVO_PIPE_B_SELECT; -+ } - - I915_WRITE(hdmi_priv->sdvox_reg, sdvox); - POSTING_READ(hdmi_priv->sdvox_reg); -@@ -106,27 +112,6 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) - } - } - --static void intel_hdmi_save(struct drm_connector *connector) --{ -- struct drm_device *dev = connector->dev; -- struct drm_i915_private *dev_priv = dev->dev_private; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; -- -- hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); --} -- --static void intel_hdmi_restore(struct drm_connector *connector) --{ -- struct drm_device *dev = connector->dev; -- struct drm_i915_private *dev_priv = dev->dev_private; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; -- -- I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); -- POSTING_READ(hdmi_priv->sdvox_reg); --} -- - static int intel_hdmi_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) - { -@@ -151,13 +136,14 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, - static enum drm_connector_status - intel_hdmi_detect(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; - struct edid *edid = NULL; - enum drm_connector_status status = connector_status_disconnected; - - hdmi_priv->has_hdmi_sink = false; -- edid = drm_get_edid(&intel_encoder->base, -+ edid = drm_get_edid(connector, - intel_encoder->ddc_bus); - - if (edid) { -@@ -165,7 +151,7 @@ intel_hdmi_detect(struct drm_connector *connector) - status = connector_status_connected; - hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); - } -- intel_encoder->base.display_info.raw_edid = NULL; -+ connector->display_info.raw_edid = NULL; - kfree(edid); - } - -@@ -174,24 +160,21 @@ intel_hdmi_detect(struct drm_connector *connector) - - static int intel_hdmi_get_modes(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - - /* We should parse the EDID data and find out if it's an HDMI sink so - * we can send audio to it. - */ - -- return intel_ddc_get_modes(intel_encoder); -+ return intel_ddc_get_modes(connector, intel_encoder->ddc_bus); - } - - static void intel_hdmi_destroy(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- -- if (intel_encoder->i2c_bus) -- intel_i2c_destroy(intel_encoder->i2c_bus); - drm_sysfs_connector_remove(connector); - drm_connector_cleanup(connector); -- kfree(intel_encoder); -+ kfree(connector); - } - - static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { -@@ -204,8 +187,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { - - static const struct drm_connector_funcs intel_hdmi_connector_funcs = { - .dpms = drm_helper_connector_dpms, -- .save = intel_hdmi_save, -- .restore = intel_hdmi_restore, - .detect = intel_hdmi_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = intel_hdmi_destroy, -@@ -214,12 +195,17 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { - static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { - .get_modes = intel_hdmi_get_modes, - .mode_valid = intel_hdmi_mode_valid, -- .best_encoder = intel_best_encoder, -+ .best_encoder = intel_attached_encoder, - }; - - static void intel_hdmi_enc_destroy(struct drm_encoder *encoder) - { -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); -+ -+ if (intel_encoder->i2c_bus) -+ intel_i2c_destroy(intel_encoder->i2c_bus); - drm_encoder_cleanup(encoder); -+ kfree(intel_encoder); - } - - static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { -@@ -231,21 +217,30 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_connector *connector; - struct intel_encoder *intel_encoder; -+ struct intel_connector *intel_connector; - struct intel_hdmi_priv *hdmi_priv; - - intel_encoder = kcalloc(sizeof(struct intel_encoder) + - sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); - if (!intel_encoder) - return; -+ -+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); -+ if (!intel_connector) { -+ kfree(intel_encoder); -+ return; -+ } -+ - hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); - -- connector = &intel_encoder->base; -+ connector = &intel_connector->base; - drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, - DRM_MODE_CONNECTOR_HDMIA); - drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); - - intel_encoder->type = INTEL_OUTPUT_HDMI; - -+ connector->polled = DRM_CONNECTOR_POLL_HPD; - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - intel_encoder->crtc_mask = (1 << 0) | (1 << 1); -@@ -285,7 +280,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) - DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); - -- drm_mode_connector_attach_encoder(&intel_encoder->base, -+ drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->enc); - drm_sysfs_connector_add(connector); - -@@ -303,6 +298,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) - err_connector: - drm_connector_cleanup(connector); - kfree(intel_encoder); -+ kfree(intel_connector); - - return; - } -diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c -index b66806a..6a1accd 100644 ---- a/drivers/gpu/drm/i915/intel_lvds.c -+++ b/drivers/gpu/drm/i915/intel_lvds.c -@@ -139,75 +139,6 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) - /* XXX: We never power down the LVDS pairs. */ - } - --static void intel_lvds_save(struct drm_connector *connector) --{ -- struct drm_device *dev = connector->dev; -- struct drm_i915_private *dev_priv = dev->dev_private; -- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; -- u32 pwm_ctl_reg; -- -- if (HAS_PCH_SPLIT(dev)) { -- pp_on_reg = PCH_PP_ON_DELAYS; -- pp_off_reg = PCH_PP_OFF_DELAYS; -- pp_ctl_reg = PCH_PP_CONTROL; -- pp_div_reg = PCH_PP_DIVISOR; -- pwm_ctl_reg = BLC_PWM_CPU_CTL; -- } else { -- pp_on_reg = PP_ON_DELAYS; -- pp_off_reg = PP_OFF_DELAYS; -- pp_ctl_reg = PP_CONTROL; -- pp_div_reg = PP_DIVISOR; -- pwm_ctl_reg = BLC_PWM_CTL; -- } -- -- dev_priv->savePP_ON = I915_READ(pp_on_reg); -- dev_priv->savePP_OFF = I915_READ(pp_off_reg); -- dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg); -- dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg); -- dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg); -- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & -- BACKLIGHT_DUTY_CYCLE_MASK); -- -- /* -- * If the light is off at server startup, just make it full brightness -- */ -- if (dev_priv->backlight_duty_cycle == 0) -- dev_priv->backlight_duty_cycle = -- intel_lvds_get_max_backlight(dev); --} -- --static void intel_lvds_restore(struct drm_connector *connector) --{ -- struct drm_device *dev = connector->dev; -- struct drm_i915_private *dev_priv = dev->dev_private; -- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; -- u32 pwm_ctl_reg; -- -- if (HAS_PCH_SPLIT(dev)) { -- pp_on_reg = PCH_PP_ON_DELAYS; -- pp_off_reg = PCH_PP_OFF_DELAYS; -- pp_ctl_reg = PCH_PP_CONTROL; -- pp_div_reg = PCH_PP_DIVISOR; -- pwm_ctl_reg = BLC_PWM_CPU_CTL; -- } else { -- pp_on_reg = PP_ON_DELAYS; -- pp_off_reg = PP_OFF_DELAYS; -- pp_ctl_reg = PP_CONTROL; -- pp_div_reg = PP_DIVISOR; -- pwm_ctl_reg = BLC_PWM_CTL; -- } -- -- I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL); -- I915_WRITE(pp_on_reg, dev_priv->savePP_ON); -- I915_WRITE(pp_off_reg, dev_priv->savePP_OFF); -- I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR); -- I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL); -- if (dev_priv->savePP_CONTROL & POWER_TARGET_ON) -- intel_lvds_set_power(dev, true); -- else -- intel_lvds_set_power(dev, false); --} -- - static int intel_lvds_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) - { -@@ -635,12 +566,13 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect - static int intel_lvds_get_modes(struct drm_connector *connector) - { - struct drm_device *dev = connector->dev; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct drm_i915_private *dev_priv = dev->dev_private; - int ret = 0; - - if (dev_priv->lvds_edid_good) { -- ret = intel_ddc_get_modes(intel_encoder); -+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); - - if (ret) - return ret; -@@ -717,11 +649,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, - static void intel_lvds_destroy(struct drm_connector *connector) - { - struct drm_device *dev = connector->dev; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); - struct drm_i915_private *dev_priv = dev->dev_private; - -- if (intel_encoder->ddc_bus) -- intel_i2c_destroy(intel_encoder->ddc_bus); - if (dev_priv->lid_notifier.notifier_call) - acpi_lid_notifier_unregister(&dev_priv->lid_notifier); - drm_sysfs_connector_remove(connector); -@@ -734,13 +663,14 @@ static int intel_lvds_set_property(struct drm_connector *connector, - uint64_t value) - { - struct drm_device *dev = connector->dev; -- struct intel_encoder *intel_encoder = -- to_intel_encoder(connector); - - if (property == dev->mode_config.scaling_mode_property && - connector->encoder) { - struct drm_crtc *crtc = connector->encoder->crtc; -+ struct drm_encoder *encoder = connector->encoder; -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; -+ - if (value == DRM_MODE_SCALE_NONE) { - DRM_DEBUG_KMS("no scaling not supported\n"); - return 0; -@@ -774,13 +704,11 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { - static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { - .get_modes = intel_lvds_get_modes, - .mode_valid = intel_lvds_mode_valid, -- .best_encoder = intel_best_encoder, -+ .best_encoder = intel_attached_encoder, - }; - - static const struct drm_connector_funcs intel_lvds_connector_funcs = { - .dpms = drm_helper_connector_dpms, -- .save = intel_lvds_save, -- .restore = intel_lvds_restore, - .detect = intel_lvds_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .set_property = intel_lvds_set_property, -@@ -790,7 +718,12 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { - - static void intel_lvds_enc_destroy(struct drm_encoder *encoder) - { -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); -+ -+ if (intel_encoder->ddc_bus) -+ intel_i2c_destroy(intel_encoder->ddc_bus); - drm_encoder_cleanup(encoder); -+ kfree(intel_encoder); - } - - static const struct drm_encoder_funcs intel_lvds_enc_funcs = { -@@ -979,6 +912,7 @@ void intel_lvds_init(struct drm_device *dev) - { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *intel_encoder; -+ struct intel_connector *intel_connector; - struct drm_connector *connector; - struct drm_encoder *encoder; - struct drm_display_mode *scan; /* *modes, *bios_mode; */ -@@ -1012,19 +946,27 @@ void intel_lvds_init(struct drm_device *dev) - return; - } - -- connector = &intel_encoder->base; -+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); -+ if (!intel_connector) { -+ kfree(intel_encoder); -+ return; -+ } -+ -+ connector = &intel_connector->base; - encoder = &intel_encoder->enc; -- drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs, -+ drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, - DRM_MODE_CONNECTOR_LVDS); - - drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS); - -- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); -+ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); - intel_encoder->type = INTEL_OUTPUT_LVDS; - - intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); - intel_encoder->crtc_mask = (1 << 1); -+ if (IS_I965G(dev)) -+ intel_encoder->crtc_mask |= (1 << 0); - drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); - drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); - connector->display_info.subpixel_order = SubPixelHorizontalRGB; -@@ -1039,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev) - * the initial panel fitting mode will be FULL_SCREEN. - */ - -- drm_connector_attach_property(&intel_encoder->base, -+ drm_connector_attach_property(&intel_connector->base, - dev->mode_config.scaling_mode_property, - DRM_MODE_SCALE_FULLSCREEN); - lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; -@@ -1067,7 +1009,7 @@ void intel_lvds_init(struct drm_device *dev) - */ - dev_priv->lvds_edid_good = true; - -- if (!intel_ddc_get_modes(intel_encoder)) -+ if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus)) - dev_priv->lvds_edid_good = false; - - list_for_each_entry(scan, &connector->probed_modes, head) { -@@ -1151,4 +1093,5 @@ failed: - drm_connector_cleanup(connector); - drm_encoder_cleanup(encoder); - kfree(intel_encoder); -+ kfree(intel_connector); - } -diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c -index 8e5c83b..4b1fd3d 100644 ---- a/drivers/gpu/drm/i915/intel_modes.c -+++ b/drivers/gpu/drm/i915/intel_modes.c -@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder) - } - }; - -- intel_i2c_quirk_set(intel_encoder->base.dev, true); -+ intel_i2c_quirk_set(intel_encoder->enc.dev, true); - ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); -- intel_i2c_quirk_set(intel_encoder->base.dev, false); -+ intel_i2c_quirk_set(intel_encoder->enc.dev, false); - if (ret == 2) - return true; - -@@ -66,22 +66,23 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder) - /** - * intel_ddc_get_modes - get modelist from monitor - * @connector: DRM connector device to use -+ * @adapter: i2c adapter - * - * Fetch the EDID information from @connector using the DDC bus. - */ --int intel_ddc_get_modes(struct intel_encoder *intel_encoder) -+int intel_ddc_get_modes(struct drm_connector *connector, -+ struct i2c_adapter *adapter) - { - struct edid *edid; - int ret = 0; - -- intel_i2c_quirk_set(intel_encoder->base.dev, true); -- edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus); -- intel_i2c_quirk_set(intel_encoder->base.dev, false); -+ intel_i2c_quirk_set(connector->dev, true); -+ edid = drm_get_edid(connector, adapter); -+ intel_i2c_quirk_set(connector->dev, false); - if (edid) { -- drm_mode_connector_update_edid_property(&intel_encoder->base, -- edid); -- ret = drm_add_edid_modes(&intel_encoder->base, edid); -- intel_encoder->base.display_info.raw_edid = NULL; -+ drm_mode_connector_update_edid_property(connector, edid); -+ ret = drm_add_edid_modes(connector, edid); -+ connector->display_info.raw_edid = NULL; - kfree(edid); - } - -diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c -index 6d524a1..d7ad513 100644 ---- a/drivers/gpu/drm/i915/intel_overlay.c -+++ b/drivers/gpu/drm/i915/intel_overlay.c -@@ -211,9 +211,8 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) - static int intel_overlay_on(struct intel_overlay *overlay) - { - struct drm_device *dev = overlay->dev; -- drm_i915_private_t *dev_priv = dev->dev_private; - int ret; -- RING_LOCALS; -+ drm_i915_private_t *dev_priv = dev->dev_private; - - BUG_ON(overlay->active); - -@@ -227,11 +226,13 @@ static int intel_overlay_on(struct intel_overlay *overlay) - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - -- overlay->last_flip_req = i915_add_request(dev, NULL, 0); -+ overlay->last_flip_req = -+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - -- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); -+ ret = i915_do_wait_request(dev, -+ overlay->last_flip_req, 1, &dev_priv->render_ring); - if (ret != 0) - return ret; - -@@ -248,7 +249,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay, - drm_i915_private_t *dev_priv = dev->dev_private; - u32 flip_addr = overlay->flip_addr; - u32 tmp; -- RING_LOCALS; - - BUG_ON(!overlay->active); - -@@ -265,7 +265,8 @@ static void intel_overlay_continue(struct intel_overlay *overlay, - OUT_RING(flip_addr); - ADVANCE_LP_RING(); - -- overlay->last_flip_req = i915_add_request(dev, NULL, 0); -+ overlay->last_flip_req = -+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring); - } - - static int intel_overlay_wait_flip(struct intel_overlay *overlay) -@@ -274,10 +275,10 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay) - drm_i915_private_t *dev_priv = dev->dev_private; - int ret; - u32 tmp; -- RING_LOCALS; - - if (overlay->last_flip_req != 0) { -- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); -+ ret = i915_do_wait_request(dev, overlay->last_flip_req, -+ 1, &dev_priv->render_ring); - if (ret == 0) { - overlay->last_flip_req = 0; - -@@ -296,11 +297,13 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay) - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - -- overlay->last_flip_req = i915_add_request(dev, NULL, 0); -+ overlay->last_flip_req = -+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - -- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); -+ ret = i915_do_wait_request(dev, overlay->last_flip_req, -+ 1, &dev_priv->render_ring); - if (ret != 0) - return ret; - -@@ -314,9 +317,8 @@ static int intel_overlay_off(struct intel_overlay *overlay) - { - u32 flip_addr = overlay->flip_addr; - struct drm_device *dev = overlay->dev; -- drm_i915_private_t *dev_priv = dev->dev_private; -+ drm_i915_private_t *dev_priv = dev->dev_private; - int ret; -- RING_LOCALS; - - BUG_ON(!overlay->active); - -@@ -336,11 +338,13 @@ static int intel_overlay_off(struct intel_overlay *overlay) - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - -- overlay->last_flip_req = i915_add_request(dev, NULL, 0); -+ overlay->last_flip_req = -+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - -- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); -+ ret = i915_do_wait_request(dev, overlay->last_flip_req, -+ 1, &dev_priv->render_ring); - if (ret != 0) - return ret; - -@@ -354,11 +358,13 @@ static int intel_overlay_off(struct intel_overlay *overlay) - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - -- overlay->last_flip_req = i915_add_request(dev, NULL, 0); -+ overlay->last_flip_req = -+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - -- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); -+ ret = i915_do_wait_request(dev, overlay->last_flip_req, -+ 1, &dev_priv->render_ring); - if (ret != 0) - return ret; - -@@ -373,7 +379,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) - - /* never have the overlay hw on without showing a frame */ - BUG_ON(!overlay->vid_bo); -- obj = overlay->vid_bo->obj; -+ obj = &overlay->vid_bo->base; - - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); -@@ -390,28 +396,29 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, - int interruptible) - { - struct drm_device *dev = overlay->dev; -- drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; -+ drm_i915_private_t *dev_priv = dev->dev_private; - u32 flip_addr; - int ret; -- RING_LOCALS; - - if (overlay->hw_wedged == HW_WEDGED) - return -EIO; - - if (overlay->last_flip_req == 0) { -- overlay->last_flip_req = i915_add_request(dev, NULL, 0); -+ overlay->last_flip_req = -+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - } - -- ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible); -+ ret = i915_do_wait_request(dev, overlay->last_flip_req, -+ interruptible, &dev_priv->render_ring); - if (ret != 0) - return ret; - - switch (overlay->hw_wedged) { - case RELEASE_OLD_VID: -- obj = overlay->old_vid_bo->obj; -+ obj = &overlay->old_vid_bo->base; - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - overlay->old_vid_bo = NULL; -@@ -429,12 +436,13 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - -- overlay->last_flip_req = i915_add_request(dev, NULL, 0); -+ overlay->last_flip_req = i915_add_request(dev, NULL, -+ 0, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - - ret = i915_do_wait_request(dev, overlay->last_flip_req, -- interruptible); -+ interruptible, &dev_priv->render_ring); - if (ret != 0) - return ret; - -@@ -467,7 +475,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) - if (ret != 0) - return ret; - -- obj = overlay->old_vid_bo->obj; -+ obj = &overlay->old_vid_bo->base; - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - overlay->old_vid_bo = NULL; -@@ -1341,7 +1349,7 @@ void intel_setup_overlay(struct drm_device *dev) - return; - overlay->dev = dev; - -- reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); -+ reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); - if (!reg_bo) - goto out_free; - overlay->reg_bo = to_intel_bo(reg_bo); -diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c -new file mode 100644 -index 0000000..cea4f1a ---- /dev/null -+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c -@@ -0,0 +1,849 @@ -+/* -+ * Copyright © 2008-2010 Intel Corporation -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -+ * IN THE SOFTWARE. -+ * -+ * Authors: -+ * Eric Anholt -+ * Zou Nan hai -+ * Xiang Hai hao -+ * -+ */ -+ -+#include "drmP.h" -+#include "drm.h" -+#include "i915_drv.h" -+#include "i915_drm.h" -+#include "i915_trace.h" -+ -+static void -+render_ring_flush(struct drm_device *dev, -+ struct intel_ring_buffer *ring, -+ u32 invalidate_domains, -+ u32 flush_domains) -+{ -+#if WATCH_EXEC -+ DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, -+ invalidate_domains, flush_domains); -+#endif -+ u32 cmd; -+ trace_i915_gem_request_flush(dev, ring->next_seqno, -+ invalidate_domains, flush_domains); -+ -+ if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { -+ /* -+ * read/write caches: -+ * -+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is -+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is -+ * also flushed at 2d versus 3d pipeline switches. -+ * -+ * read-only caches: -+ * -+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if -+ * MI_READ_FLUSH is set, and is always flushed on 965. -+ * -+ * I915_GEM_DOMAIN_COMMAND may not exist? -+ * -+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is -+ * invalidated when MI_EXE_FLUSH is set. -+ * -+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is -+ * invalidated with every MI_FLUSH. -+ * -+ * TLBs: -+ * -+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND -+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and -+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER -+ * are flushed at any MI_FLUSH. -+ */ -+ -+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; -+ if ((invalidate_domains|flush_domains) & -+ I915_GEM_DOMAIN_RENDER) -+ cmd &= ~MI_NO_WRITE_FLUSH; -+ if (!IS_I965G(dev)) { -+ /* -+ * On the 965, the sampler cache always gets flushed -+ * and this bit is reserved. -+ */ -+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) -+ cmd |= MI_READ_FLUSH; -+ } -+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) -+ cmd |= MI_EXE_FLUSH; -+ -+#if WATCH_EXEC -+ DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); -+#endif -+ intel_ring_begin(dev, ring, 8); -+ intel_ring_emit(dev, ring, cmd); -+ intel_ring_emit(dev, ring, MI_NOOP); -+ intel_ring_advance(dev, ring); -+ } -+} -+ -+static unsigned int render_ring_get_head(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ return I915_READ(PRB0_HEAD) & HEAD_ADDR; -+} -+ -+static unsigned int render_ring_get_tail(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ return I915_READ(PRB0_TAIL) & TAIL_ADDR; -+} -+ -+static unsigned int render_ring_get_active_head(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; -+ -+ return I915_READ(acthd_reg); -+} -+ -+static void render_ring_advance_ring(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ I915_WRITE(PRB0_TAIL, ring->tail); -+} -+ -+static int init_ring_common(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ u32 head; -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ struct drm_i915_gem_object *obj_priv; -+ obj_priv = to_intel_bo(ring->gem_object); -+ -+ /* Stop the ring if it's running. */ -+ I915_WRITE(ring->regs.ctl, 0); -+ I915_WRITE(ring->regs.head, 0); -+ I915_WRITE(ring->regs.tail, 0); -+ -+ /* Initialize the ring. */ -+ I915_WRITE(ring->regs.start, obj_priv->gtt_offset); -+ head = ring->get_head(dev, ring); -+ -+ /* G45 ring initialization fails to reset head to zero */ -+ if (head != 0) { -+ DRM_ERROR("%s head not reset to zero " -+ "ctl %08x head %08x tail %08x start %08x\n", -+ ring->name, -+ I915_READ(ring->regs.ctl), -+ I915_READ(ring->regs.head), -+ I915_READ(ring->regs.tail), -+ I915_READ(ring->regs.start)); -+ -+ I915_WRITE(ring->regs.head, 0); -+ -+ DRM_ERROR("%s head forced to zero " -+ "ctl %08x head %08x tail %08x start %08x\n", -+ ring->name, -+ I915_READ(ring->regs.ctl), -+ I915_READ(ring->regs.head), -+ I915_READ(ring->regs.tail), -+ I915_READ(ring->regs.start)); -+ } -+ -+ I915_WRITE(ring->regs.ctl, -+ ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) -+ | RING_NO_REPORT | RING_VALID); -+ -+ head = I915_READ(ring->regs.head) & HEAD_ADDR; -+ /* If the head is still not zero, the ring is dead */ -+ if (head != 0) { -+ DRM_ERROR("%s initialization failed " -+ "ctl %08x head %08x tail %08x start %08x\n", -+ ring->name, -+ I915_READ(ring->regs.ctl), -+ I915_READ(ring->regs.head), -+ I915_READ(ring->regs.tail), -+ I915_READ(ring->regs.start)); -+ return -EIO; -+ } -+ -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ i915_kernel_lost_context(dev); -+ else { -+ ring->head = ring->get_head(dev, ring); -+ ring->tail = ring->get_tail(dev, ring); -+ ring->space = ring->head - (ring->tail + 8); -+ if (ring->space < 0) -+ ring->space += ring->size; -+ } -+ return 0; -+} -+ -+static int init_render_ring(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ int ret = init_ring_common(dev, ring); -+ if (IS_I9XX(dev) && !IS_GEN3(dev)) { -+ I915_WRITE(MI_MODE, -+ (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); -+ } -+ return ret; -+} -+ -+#define PIPE_CONTROL_FLUSH(addr) \ -+do { \ -+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ -+ PIPE_CONTROL_DEPTH_STALL | 2); \ -+ OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ -+ OUT_RING(0); \ -+ OUT_RING(0); \ -+} while (0) -+ -+/** -+ * Creates a new sequence number, emitting a write of it to the status page -+ * plus an interrupt, which will trigger i915_user_interrupt_handler. -+ * -+ * Must be called with struct_lock held. -+ * -+ * Returned sequence numbers are nonzero on success. -+ */ -+static u32 -+render_ring_add_request(struct drm_device *dev, -+ struct intel_ring_buffer *ring, -+ struct drm_file *file_priv, -+ u32 flush_domains) -+{ -+ u32 seqno; -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ seqno = intel_ring_get_seqno(dev, ring); -+ -+ if (IS_GEN6(dev)) { -+ BEGIN_LP_RING(6); -+ OUT_RING(GFX_OP_PIPE_CONTROL | 3); -+ OUT_RING(PIPE_CONTROL_QW_WRITE | -+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | -+ PIPE_CONTROL_NOTIFY); -+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); -+ OUT_RING(seqno); -+ OUT_RING(0); -+ OUT_RING(0); -+ ADVANCE_LP_RING(); -+ } else if (HAS_PIPE_CONTROL(dev)) { -+ u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; -+ -+ /* -+ * Workaround qword write incoherence by flushing the -+ * PIPE_NOTIFY buffers out to memory before requesting -+ * an interrupt. -+ */ -+ BEGIN_LP_RING(32); -+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | -+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); -+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); -+ OUT_RING(seqno); -+ OUT_RING(0); -+ PIPE_CONTROL_FLUSH(scratch_addr); -+ scratch_addr += 128; /* write to separate cachelines */ -+ PIPE_CONTROL_FLUSH(scratch_addr); -+ scratch_addr += 128; -+ PIPE_CONTROL_FLUSH(scratch_addr); -+ scratch_addr += 128; -+ PIPE_CONTROL_FLUSH(scratch_addr); -+ scratch_addr += 128; -+ PIPE_CONTROL_FLUSH(scratch_addr); -+ scratch_addr += 128; -+ PIPE_CONTROL_FLUSH(scratch_addr); -+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | -+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | -+ PIPE_CONTROL_NOTIFY); -+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); -+ OUT_RING(seqno); -+ OUT_RING(0); -+ ADVANCE_LP_RING(); -+ } else { -+ BEGIN_LP_RING(4); -+ OUT_RING(MI_STORE_DWORD_INDEX); -+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); -+ OUT_RING(seqno); -+ -+ OUT_RING(MI_USER_INTERRUPT); -+ ADVANCE_LP_RING(); -+ } -+ return seqno; -+} -+ -+static u32 -+render_ring_get_gem_seqno(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; -+ if (HAS_PIPE_CONTROL(dev)) -+ return ((volatile u32 *)(dev_priv->seqno_page))[0]; -+ else -+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX); -+} -+ -+static void -+render_ring_get_user_irq(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); -+ if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { -+ if (HAS_PCH_SPLIT(dev)) -+ ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); -+ else -+ i915_enable_irq(dev_priv, I915_USER_INTERRUPT); -+ } -+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); -+} -+ -+static void -+render_ring_put_user_irq(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); -+ BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); -+ if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { -+ if (HAS_PCH_SPLIT(dev)) -+ ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); -+ else -+ i915_disable_irq(dev_priv, I915_USER_INTERRUPT); -+ } -+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); -+} -+ -+static void render_setup_status_page(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ if (IS_GEN6(dev)) { -+ I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr); -+ I915_READ(HWS_PGA_GEN6); /* posting read */ -+ } else { -+ I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); -+ I915_READ(HWS_PGA); /* posting read */ -+ } -+ -+} -+ -+void -+bsd_ring_flush(struct drm_device *dev, -+ struct intel_ring_buffer *ring, -+ u32 invalidate_domains, -+ u32 flush_domains) -+{ -+ intel_ring_begin(dev, ring, 8); -+ intel_ring_emit(dev, ring, MI_FLUSH); -+ intel_ring_emit(dev, ring, MI_NOOP); -+ intel_ring_advance(dev, ring); -+} -+ -+static inline unsigned int bsd_ring_get_head(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ return I915_READ(BSD_RING_HEAD) & HEAD_ADDR; -+} -+ -+static inline unsigned int bsd_ring_get_tail(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ return I915_READ(BSD_RING_TAIL) & TAIL_ADDR; -+} -+ -+static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ return I915_READ(BSD_RING_ACTHD); -+} -+ -+static inline void bsd_ring_advance_ring(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ I915_WRITE(BSD_RING_TAIL, ring->tail); -+} -+ -+static int init_bsd_ring(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ return init_ring_common(dev, ring); -+} -+ -+static u32 -+bsd_ring_add_request(struct drm_device *dev, -+ struct intel_ring_buffer *ring, -+ struct drm_file *file_priv, -+ u32 flush_domains) -+{ -+ u32 seqno; -+ seqno = intel_ring_get_seqno(dev, ring); -+ intel_ring_begin(dev, ring, 4); -+ intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); -+ intel_ring_emit(dev, ring, -+ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); -+ intel_ring_emit(dev, ring, seqno); -+ intel_ring_emit(dev, ring, MI_USER_INTERRUPT); -+ intel_ring_advance(dev, ring); -+ -+ DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); -+ -+ return seqno; -+} -+ -+static void bsd_setup_status_page(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); -+ I915_READ(BSD_HWS_PGA); -+} -+ -+static void -+bsd_ring_get_user_irq(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ /* do nothing */ -+} -+static void -+bsd_ring_put_user_irq(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ /* do nothing */ -+} -+ -+static u32 -+bsd_ring_get_gem_seqno(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX); -+} -+ -+static int -+bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, -+ struct intel_ring_buffer *ring, -+ struct drm_i915_gem_execbuffer2 *exec, -+ struct drm_clip_rect *cliprects, -+ uint64_t exec_offset) -+{ -+ uint32_t exec_start; -+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset; -+ intel_ring_begin(dev, ring, 2); -+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | -+ (2 << 6) | MI_BATCH_NON_SECURE_I965); -+ intel_ring_emit(dev, ring, exec_start); -+ intel_ring_advance(dev, ring); -+ return 0; -+} -+ -+ -+static int -+render_ring_dispatch_gem_execbuffer(struct drm_device *dev, -+ struct intel_ring_buffer *ring, -+ struct drm_i915_gem_execbuffer2 *exec, -+ struct drm_clip_rect *cliprects, -+ uint64_t exec_offset) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ int nbox = exec->num_cliprects; -+ int i = 0, count; -+ uint32_t exec_start, exec_len; -+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset; -+ exec_len = (uint32_t) exec->batch_len; -+ -+ trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); -+ -+ count = nbox ? nbox : 1; -+ -+ for (i = 0; i < count; i++) { -+ if (i < nbox) { -+ int ret = i915_emit_box(dev, cliprects, i, -+ exec->DR1, exec->DR4); -+ if (ret) -+ return ret; -+ } -+ -+ if (IS_I830(dev) || IS_845G(dev)) { -+ intel_ring_begin(dev, ring, 4); -+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER); -+ intel_ring_emit(dev, ring, -+ exec_start | MI_BATCH_NON_SECURE); -+ intel_ring_emit(dev, ring, exec_start + exec_len - 4); -+ intel_ring_emit(dev, ring, 0); -+ } else { -+ intel_ring_begin(dev, ring, 4); -+ if (IS_I965G(dev)) { -+ intel_ring_emit(dev, ring, -+ MI_BATCH_BUFFER_START | (2 << 6) -+ | MI_BATCH_NON_SECURE_I965); -+ intel_ring_emit(dev, ring, exec_start); -+ } else { -+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START -+ | (2 << 6)); -+ intel_ring_emit(dev, ring, exec_start | -+ MI_BATCH_NON_SECURE); -+ } -+ } -+ intel_ring_advance(dev, ring); -+ } -+ -+ /* XXX breadcrumb */ -+ return 0; -+} -+ -+static void cleanup_status_page(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ struct drm_gem_object *obj; -+ struct drm_i915_gem_object *obj_priv; -+ -+ obj = ring->status_page.obj; -+ if (obj == NULL) -+ return; -+ obj_priv = to_intel_bo(obj); -+ -+ kunmap(obj_priv->pages[0]); -+ i915_gem_object_unpin(obj); -+ drm_gem_object_unreference(obj); -+ ring->status_page.obj = NULL; -+ -+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); -+} -+ -+static int init_status_page(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ struct drm_gem_object *obj; -+ struct drm_i915_gem_object *obj_priv; -+ int ret; -+ -+ obj = i915_gem_alloc_object(dev, 4096); -+ if (obj == NULL) { -+ DRM_ERROR("Failed to allocate status page\n"); -+ ret = -ENOMEM; -+ goto err; -+ } -+ obj_priv = to_intel_bo(obj); -+ obj_priv->agp_type = AGP_USER_CACHED_MEMORY; -+ -+ ret = i915_gem_object_pin(obj, 4096); -+ if (ret != 0) { -+ goto err_unref; -+ } -+ -+ ring->status_page.gfx_addr = obj_priv->gtt_offset; -+ ring->status_page.page_addr = kmap(obj_priv->pages[0]); -+ if (ring->status_page.page_addr == NULL) { -+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); -+ goto err_unpin; -+ } -+ ring->status_page.obj = obj; -+ memset(ring->status_page.page_addr, 0, PAGE_SIZE); -+ -+ ring->setup_status_page(dev, ring); -+ DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", -+ ring->name, ring->status_page.gfx_addr); -+ -+ return 0; -+ -+err_unpin: -+ i915_gem_object_unpin(obj); -+err_unref: -+ drm_gem_object_unreference(obj); -+err: -+ return ret; -+} -+ -+ -+int intel_init_ring_buffer(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ int ret; -+ struct drm_i915_gem_object *obj_priv; -+ struct drm_gem_object *obj; -+ ring->dev = dev; -+ -+ if (I915_NEED_GFX_HWS(dev)) { -+ ret = init_status_page(dev, ring); -+ if (ret) -+ return ret; -+ } -+ -+ obj = i915_gem_alloc_object(dev, ring->size); -+ if (obj == NULL) { -+ DRM_ERROR("Failed to allocate ringbuffer\n"); -+ ret = -ENOMEM; -+ goto cleanup; -+ } -+ -+ ring->gem_object = obj; -+ -+ ret = i915_gem_object_pin(obj, ring->alignment); -+ if (ret != 0) { -+ drm_gem_object_unreference(obj); -+ goto cleanup; -+ } -+ -+ obj_priv = to_intel_bo(obj); -+ ring->map.size = ring->size; -+ ring->map.offset = dev->agp->base + obj_priv->gtt_offset; -+ ring->map.type = 0; -+ ring->map.flags = 0; -+ ring->map.mtrr = 0; -+ -+ drm_core_ioremap_wc(&ring->map, dev); -+ if (ring->map.handle == NULL) { -+ DRM_ERROR("Failed to map ringbuffer.\n"); -+ i915_gem_object_unpin(obj); -+ drm_gem_object_unreference(obj); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ -+ ring->virtual_start = ring->map.handle; -+ ret = ring->init(dev, ring); -+ if (ret != 0) { -+ intel_cleanup_ring_buffer(dev, ring); -+ return ret; -+ } -+ -+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) -+ i915_kernel_lost_context(dev); -+ else { -+ ring->head = ring->get_head(dev, ring); -+ ring->tail = ring->get_tail(dev, ring); -+ ring->space = ring->head - (ring->tail + 8); -+ if (ring->space < 0) -+ ring->space += ring->size; -+ } -+ INIT_LIST_HEAD(&ring->active_list); -+ INIT_LIST_HEAD(&ring->request_list); -+ return ret; -+cleanup: -+ cleanup_status_page(dev, ring); -+ return ret; -+} -+ -+void intel_cleanup_ring_buffer(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ if (ring->gem_object == NULL) -+ return; -+ -+ drm_core_ioremapfree(&ring->map, dev); -+ -+ i915_gem_object_unpin(ring->gem_object); -+ drm_gem_object_unreference(ring->gem_object); -+ ring->gem_object = NULL; -+ cleanup_status_page(dev, ring); -+} -+ -+int intel_wrap_ring_buffer(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ unsigned int *virt; -+ int rem; -+ rem = ring->size - ring->tail; -+ -+ if (ring->space < rem) { -+ int ret = intel_wait_ring_buffer(dev, ring, rem); -+ if (ret) -+ return ret; -+ } -+ -+ virt = (unsigned int *)(ring->virtual_start + ring->tail); -+ rem /= 4; -+ while (rem--) -+ *virt++ = MI_NOOP; -+ -+ ring->tail = 0; -+ -+ return 0; -+} -+ -+int intel_wait_ring_buffer(struct drm_device *dev, -+ struct intel_ring_buffer *ring, int n) -+{ -+ unsigned long end; -+ -+ trace_i915_ring_wait_begin (dev); -+ end = jiffies + 3 * HZ; -+ do { -+ ring->head = ring->get_head(dev, ring); -+ ring->space = ring->head - (ring->tail + 8); -+ if (ring->space < 0) -+ ring->space += ring->size; -+ if (ring->space >= n) { -+ trace_i915_ring_wait_end (dev); -+ return 0; -+ } -+ -+ if (dev->primary->master) { -+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; -+ if (master_priv->sarea_priv) -+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; -+ } -+ -+ yield(); -+ } while (!time_after(jiffies, end)); -+ trace_i915_ring_wait_end (dev); -+ return -EBUSY; -+} -+ -+void intel_ring_begin(struct drm_device *dev, -+ struct intel_ring_buffer *ring, int n) -+{ -+ if (unlikely(ring->tail + n > ring->size)) -+ intel_wrap_ring_buffer(dev, ring); -+ if (unlikely(ring->space < n)) -+ intel_wait_ring_buffer(dev, ring, n); -+} -+ -+void intel_ring_emit(struct drm_device *dev, -+ struct intel_ring_buffer *ring, unsigned int data) -+{ -+ unsigned int *virt = ring->virtual_start + ring->tail; -+ *virt = data; -+ ring->tail += 4; -+ ring->tail &= ring->size - 1; -+ ring->space -= 4; -+} -+ -+void intel_ring_advance(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ ring->advance_ring(dev, ring); -+} -+ -+void intel_fill_struct(struct drm_device *dev, -+ struct intel_ring_buffer *ring, -+ void *data, -+ unsigned int len) -+{ -+ unsigned int *virt = ring->virtual_start + ring->tail; -+ BUG_ON((len&~(4-1)) != 0); -+ intel_ring_begin(dev, ring, len); -+ memcpy(virt, data, len); -+ ring->tail += len; -+ ring->tail &= ring->size - 1; -+ ring->space -= len; -+ intel_ring_advance(dev, ring); -+} -+ -+u32 intel_ring_get_seqno(struct drm_device *dev, -+ struct intel_ring_buffer *ring) -+{ -+ u32 seqno; -+ seqno = ring->next_seqno; -+ -+ /* reserve 0 for non-seqno */ -+ if (++ring->next_seqno == 0) -+ ring->next_seqno = 1; -+ return seqno; -+} -+ -+struct intel_ring_buffer render_ring = { -+ .name = "render ring", -+ .regs = { -+ .ctl = PRB0_CTL, -+ .head = PRB0_HEAD, -+ .tail = PRB0_TAIL, -+ .start = PRB0_START -+ }, -+ .ring_flag = I915_EXEC_RENDER, -+ .size = 32 * PAGE_SIZE, -+ .alignment = PAGE_SIZE, -+ .virtual_start = NULL, -+ .dev = NULL, -+ .gem_object = NULL, -+ .head = 0, -+ .tail = 0, -+ .space = 0, -+ .next_seqno = 1, -+ .user_irq_refcount = 0, -+ .irq_gem_seqno = 0, -+ .waiting_gem_seqno = 0, -+ .setup_status_page = render_setup_status_page, -+ .init = init_render_ring, -+ .get_head = render_ring_get_head, -+ .get_tail = render_ring_get_tail, -+ .get_active_head = render_ring_get_active_head, -+ .advance_ring = render_ring_advance_ring, -+ .flush = render_ring_flush, -+ .add_request = render_ring_add_request, -+ .get_gem_seqno = render_ring_get_gem_seqno, -+ .user_irq_get = render_ring_get_user_irq, -+ .user_irq_put = render_ring_put_user_irq, -+ .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, -+ .status_page = {NULL, 0, NULL}, -+ .map = {0,} -+}; -+ -+/* ring buffer for bit-stream decoder */ -+ -+struct intel_ring_buffer bsd_ring = { -+ .name = "bsd ring", -+ .regs = { -+ .ctl = BSD_RING_CTL, -+ .head = BSD_RING_HEAD, -+ .tail = BSD_RING_TAIL, -+ .start = BSD_RING_START -+ }, -+ .ring_flag = I915_EXEC_BSD, -+ .size = 32 * PAGE_SIZE, -+ .alignment = PAGE_SIZE, -+ .virtual_start = NULL, -+ .dev = NULL, -+ .gem_object = NULL, -+ .head = 0, -+ .tail = 0, -+ .space = 0, -+ .next_seqno = 1, -+ .user_irq_refcount = 0, -+ .irq_gem_seqno = 0, -+ .waiting_gem_seqno = 0, -+ .setup_status_page = bsd_setup_status_page, -+ .init = init_bsd_ring, -+ .get_head = bsd_ring_get_head, -+ .get_tail = bsd_ring_get_tail, -+ .get_active_head = bsd_ring_get_active_head, -+ .advance_ring = bsd_ring_advance_ring, -+ .flush = bsd_ring_flush, -+ .add_request = bsd_ring_add_request, -+ .get_gem_seqno = bsd_ring_get_gem_seqno, -+ .user_irq_get = bsd_ring_get_user_irq, -+ .user_irq_put = bsd_ring_put_user_irq, -+ .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, -+ .status_page = {NULL, 0, NULL}, -+ .map = {0,} -+}; -diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h -new file mode 100644 -index 0000000..d5568d3 ---- /dev/null -+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h -@@ -0,0 +1,124 @@ -+#ifndef _INTEL_RINGBUFFER_H_ -+#define _INTEL_RINGBUFFER_H_ -+ -+struct intel_hw_status_page { -+ void *page_addr; -+ unsigned int gfx_addr; -+ struct drm_gem_object *obj; -+}; -+ -+struct drm_i915_gem_execbuffer2; -+struct intel_ring_buffer { -+ const char *name; -+ struct ring_regs { -+ u32 ctl; -+ u32 head; -+ u32 tail; -+ u32 start; -+ } regs; -+ unsigned int ring_flag; -+ unsigned long size; -+ unsigned int alignment; -+ void *virtual_start; -+ struct drm_device *dev; -+ struct drm_gem_object *gem_object; -+ -+ unsigned int head; -+ unsigned int tail; -+ unsigned int space; -+ u32 next_seqno; -+ struct intel_hw_status_page status_page; -+ -+ u32 irq_gem_seqno; /* last seq seem at irq time */ -+ u32 waiting_gem_seqno; -+ int user_irq_refcount; -+ void (*user_irq_get)(struct drm_device *dev, -+ struct intel_ring_buffer *ring); -+ void (*user_irq_put)(struct drm_device *dev, -+ struct intel_ring_buffer *ring); -+ void (*setup_status_page)(struct drm_device *dev, -+ struct intel_ring_buffer *ring); -+ -+ int (*init)(struct drm_device *dev, -+ struct intel_ring_buffer *ring); -+ -+ unsigned int (*get_head)(struct drm_device *dev, -+ struct intel_ring_buffer *ring); -+ unsigned int (*get_tail)(struct drm_device *dev, -+ struct intel_ring_buffer *ring); -+ unsigned int (*get_active_head)(struct drm_device *dev, -+ struct intel_ring_buffer *ring); -+ void (*advance_ring)(struct drm_device *dev, -+ struct intel_ring_buffer *ring); -+ void (*flush)(struct drm_device *dev, -+ struct intel_ring_buffer *ring, -+ u32 invalidate_domains, -+ u32 flush_domains); -+ u32 (*add_request)(struct drm_device *dev, -+ struct intel_ring_buffer *ring, -+ struct drm_file *file_priv, -+ u32 flush_domains); -+ u32 (*get_gem_seqno)(struct drm_device *dev, -+ struct intel_ring_buffer *ring); -+ int (*dispatch_gem_execbuffer)(struct drm_device *dev, -+ struct intel_ring_buffer *ring, -+ struct drm_i915_gem_execbuffer2 *exec, -+ struct drm_clip_rect *cliprects, -+ uint64_t exec_offset); -+ -+ /** -+ * List of objects currently involved in rendering from the -+ * ringbuffer. -+ * -+ * Includes buffers having the contents of their GPU caches -+ * flushed, not necessarily primitives. last_rendering_seqno -+ * represents when the rendering involved will be completed. -+ * -+ * A reference is held on the buffer while on this list. -+ */ -+ struct list_head active_list; -+ -+ /** -+ * List of breadcrumbs associated with GPU requests currently -+ * outstanding. -+ */ -+ struct list_head request_list; -+ -+ wait_queue_head_t irq_queue; -+ drm_local_map_t map; -+}; -+ -+static inline u32 -+intel_read_status_page(struct intel_ring_buffer *ring, -+ int reg) -+{ -+ u32 *regs = ring->status_page.page_addr; -+ return regs[reg]; -+} -+ -+int intel_init_ring_buffer(struct drm_device *dev, -+ struct intel_ring_buffer *ring); -+void intel_cleanup_ring_buffer(struct drm_device *dev, -+ struct intel_ring_buffer *ring); -+int intel_wait_ring_buffer(struct drm_device *dev, -+ struct intel_ring_buffer *ring, int n); -+int intel_wrap_ring_buffer(struct drm_device *dev, -+ struct intel_ring_buffer *ring); -+void intel_ring_begin(struct drm_device *dev, -+ struct intel_ring_buffer *ring, int n); -+void intel_ring_emit(struct drm_device *dev, -+ struct intel_ring_buffer *ring, u32 data); -+void intel_fill_struct(struct drm_device *dev, -+ struct intel_ring_buffer *ring, -+ void *data, -+ unsigned int len); -+void intel_ring_advance(struct drm_device *dev, -+ struct intel_ring_buffer *ring); -+ -+u32 intel_ring_get_seqno(struct drm_device *dev, -+ struct intel_ring_buffer *ring); -+ -+extern struct intel_ring_buffer render_ring; -+extern struct intel_ring_buffer bsd_ring; -+ -+#endif /* _INTEL_RINGBUFFER_H_ */ -diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c -index 87d9536..76993ac 100644 ---- a/drivers/gpu/drm/i915/intel_sdvo.c -+++ b/drivers/gpu/drm/i915/intel_sdvo.c -@@ -36,7 +36,18 @@ - #include "i915_drm.h" - #include "i915_drv.h" - #include "intel_sdvo_regs.h" --#include -+ -+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) -+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) -+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) -+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0) -+ -+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ -+ SDVO_TV_MASK) -+ -+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK) -+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) -+ - - static char *tv_format_names[] = { - "NTSC_M" , "NTSC_J" , "NTSC_443", -@@ -86,12 +97,6 @@ struct intel_sdvo_priv { - /* This is for current tv format name */ - char *tv_format_name; - -- /* This contains all current supported TV format */ -- char *tv_format_supported[TV_FORMAT_NUM]; -- int format_supported_num; -- struct drm_property *tv_format_property; -- struct drm_property *tv_format_name_property[TV_FORMAT_NUM]; -- - /** - * This is set if we treat the device as HDMI, instead of DVI. - */ -@@ -112,12 +117,6 @@ struct intel_sdvo_priv { - */ - struct drm_display_mode *sdvo_lvds_fixed_mode; - -- /** -- * Returned SDTV resolutions allowed for the current format, if the -- * device reported it. -- */ -- struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions; -- - /* - * supported encoding mode, used to determine whether HDMI is - * supported -@@ -130,11 +129,24 @@ struct intel_sdvo_priv { - /* Mac mini hack -- use the same DDC as the analog connector */ - struct i2c_adapter *analog_ddc_bus; - -- int save_sdvo_mult; -- u16 save_active_outputs; -- struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; -- struct intel_sdvo_dtd save_output_dtd[16]; -- u32 save_SDVOX; -+}; -+ -+struct intel_sdvo_connector { -+ /* Mark the type of connector */ -+ uint16_t output_flag; -+ -+ /* This contains all current supported TV format */ -+ char *tv_format_supported[TV_FORMAT_NUM]; -+ int format_supported_num; -+ struct drm_property *tv_format_property; -+ struct drm_property *tv_format_name_property[TV_FORMAT_NUM]; -+ -+ /** -+ * Returned SDTV resolutions allowed for the current format, if the -+ * device reported it. -+ */ -+ struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions; -+ - /* add the property for the SDVO-TV */ - struct drm_property *left_property; - struct drm_property *right_property; -@@ -162,7 +174,12 @@ struct intel_sdvo_priv { - }; - - static bool --intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags); -+intel_sdvo_output_setup(struct intel_encoder *intel_encoder, -+ uint16_t flags); -+static void -+intel_sdvo_tv_create_property(struct drm_connector *connector, int type); -+static void -+intel_sdvo_create_enhance_property(struct drm_connector *connector); - - /** - * Writes the SDVOB or SDVOC with the given value, but always writes both -@@ -171,12 +188,18 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags); - */ - static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) - { -- struct drm_device *dev = intel_encoder->base.dev; -+ struct drm_device *dev = intel_encoder->enc.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - u32 bval = val, cval = val; - int i; - -+ if (sdvo_priv->sdvo_reg == PCH_SDVOB) { -+ I915_WRITE(sdvo_priv->sdvo_reg, val); -+ I915_READ(sdvo_priv->sdvo_reg); -+ return; -+ } -+ - if (sdvo_priv->sdvo_reg == SDVOB) { - cval = I915_READ(SDVOC); - } else { -@@ -353,7 +376,8 @@ static const struct _sdvo_cmd_name { - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), - }; - --#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC") -+#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) -+#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC") - #define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv) - - static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, -@@ -563,17 +587,6 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b - return true; - } - --static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder, -- u16 *outputs) --{ -- u8 status; -- -- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); -- status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs)); -- -- return (status == SDVO_CMD_STATUS_SUCCESS); --} -- - static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, - u16 outputs) - { -@@ -646,40 +659,6 @@ static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder, - return (status == SDVO_CMD_STATUS_SUCCESS); - } - --static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd, -- struct intel_sdvo_dtd *dtd) --{ -- u8 status; -- -- intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0); -- status = intel_sdvo_read_response(intel_encoder, &dtd->part1, -- sizeof(dtd->part1)); -- if (status != SDVO_CMD_STATUS_SUCCESS) -- return false; -- -- intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0); -- status = intel_sdvo_read_response(intel_encoder, &dtd->part2, -- sizeof(dtd->part2)); -- if (status != SDVO_CMD_STATUS_SUCCESS) -- return false; -- -- return true; --} -- --static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder, -- struct intel_sdvo_dtd *dtd) --{ -- return intel_sdvo_get_timing(intel_encoder, -- SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); --} -- --static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder, -- struct intel_sdvo_dtd *dtd) --{ -- return intel_sdvo_get_timing(intel_encoder, -- SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); --} -- - static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, - struct intel_sdvo_dtd *dtd) - { -@@ -767,23 +746,6 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_en - return false; - } - --static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder) --{ -- u8 response, status; -- -- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); -- status = intel_sdvo_read_response(intel_encoder, &response, 1); -- -- if (status != SDVO_CMD_STATUS_SUCCESS) { -- DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); -- return SDVO_CLOCK_RATE_MULT_1X; -- } else { -- DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response); -- } -- -- return response; --} -- - static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) - { - u8 status; -@@ -1071,7 +1033,7 @@ static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder) - memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? - sizeof(format) : sizeof(format_map)); - -- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map, -+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format, - sizeof(format)); - - status = intel_sdvo_read_response(intel_encoder, NULL, 0); -@@ -1101,7 +1063,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, - /* Set output timings */ - intel_sdvo_get_dtd_from_mode(&output_dtd, mode); - intel_sdvo_set_target_output(intel_encoder, -- dev_priv->controlled_output); -+ dev_priv->attached_output); - intel_sdvo_set_output_timing(intel_encoder, &output_dtd); - - /* Set the input timing to the screen. Assume always input 0. */ -@@ -1139,7 +1101,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, - dev_priv->sdvo_lvds_fixed_mode); - - intel_sdvo_set_target_output(intel_encoder, -- dev_priv->controlled_output); -+ dev_priv->attached_output); - intel_sdvo_set_output_timing(intel_encoder, &output_dtd); - - /* Set the input timing to the screen. Assume always input 0. */ -@@ -1204,7 +1166,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, - * channel on the motherboard. In a two-input device, the first input - * will be SDVOB and the second SDVOC. - */ -- in_out.in0 = sdvo_priv->controlled_output; -+ in_out.in0 = sdvo_priv->attached_output; - in_out.in1 = 0; - - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, -@@ -1230,7 +1192,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, - if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { - /* Set the output timing to the screen */ - intel_sdvo_set_target_output(intel_encoder, -- sdvo_priv->controlled_output); -+ sdvo_priv->attached_output); - intel_sdvo_set_output_timing(intel_encoder, &input_dtd); - } - -@@ -1352,107 +1314,16 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) - - if (0) - intel_sdvo_set_encoder_power_state(intel_encoder, mode); -- intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output); -+ intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output); - } - return; - } - --static void intel_sdvo_save(struct drm_connector *connector) --{ -- struct drm_device *dev = connector->dev; -- struct drm_i915_private *dev_priv = dev->dev_private; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; -- int o; -- -- sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder); -- intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs); -- -- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { -- intel_sdvo_set_target_input(intel_encoder, true, false); -- intel_sdvo_get_input_timing(intel_encoder, -- &sdvo_priv->save_input_dtd_1); -- } -- -- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { -- intel_sdvo_set_target_input(intel_encoder, false, true); -- intel_sdvo_get_input_timing(intel_encoder, -- &sdvo_priv->save_input_dtd_2); -- } -- -- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) -- { -- u16 this_output = (1 << o); -- if (sdvo_priv->caps.output_flags & this_output) -- { -- intel_sdvo_set_target_output(intel_encoder, this_output); -- intel_sdvo_get_output_timing(intel_encoder, -- &sdvo_priv->save_output_dtd[o]); -- } -- } -- if (sdvo_priv->is_tv) { -- /* XXX: Save TV format/enhancements. */ -- } -- -- sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg); --} -- --static void intel_sdvo_restore(struct drm_connector *connector) --{ -- struct drm_device *dev = connector->dev; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; -- int o; -- int i; -- bool input1, input2; -- u8 status; -- -- intel_sdvo_set_active_outputs(intel_encoder, 0); -- -- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) -- { -- u16 this_output = (1 << o); -- if (sdvo_priv->caps.output_flags & this_output) { -- intel_sdvo_set_target_output(intel_encoder, this_output); -- intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]); -- } -- } -- -- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { -- intel_sdvo_set_target_input(intel_encoder, true, false); -- intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1); -- } -- -- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { -- intel_sdvo_set_target_input(intel_encoder, false, true); -- intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2); -- } -- -- intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult); -- -- if (sdvo_priv->is_tv) { -- /* XXX: Restore TV format/enhancements. */ -- } -- -- intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX); -- -- if (sdvo_priv->save_SDVOX & SDVO_ENABLE) -- { -- for (i = 0; i < 2; i++) -- intel_wait_for_vblank(dev); -- status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2); -- if (status == SDVO_CMD_STATUS_SUCCESS && !input1) -- DRM_DEBUG_KMS("First %s output reported failure to " -- "sync\n", SDVO_NAME(sdvo_priv)); -- } -- -- intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs); --} -- - static int intel_sdvo_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) -@@ -1490,6 +1361,8 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str - return true; - } - -+/* No use! */ -+#if 0 - struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) - { - struct drm_connector *connector = NULL; -@@ -1560,6 +1433,7 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); - intel_sdvo_read_response(intel_encoder, &response, 2); - } -+#endif - - static bool - intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) -@@ -1598,12 +1472,17 @@ static struct drm_connector * - intel_find_analog_connector(struct drm_device *dev) - { - struct drm_connector *connector; -+ struct drm_encoder *encoder; - struct intel_encoder *intel_encoder; - -- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -- intel_encoder = to_intel_encoder(connector); -- if (intel_encoder->type == INTEL_OUTPUT_ANALOG) -- return connector; -+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -+ intel_encoder = enc_to_intel_encoder(encoder); -+ if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { -+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -+ if (encoder == intel_attached_encoder(connector)) -+ return connector; -+ } -+ } - } - return NULL; - } -@@ -1625,15 +1504,17 @@ intel_analog_is_connected(struct drm_device *dev) - } - - enum drm_connector_status --intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) -+intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; -+ struct intel_connector *intel_connector = to_intel_connector(connector); -+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; - enum drm_connector_status status = connector_status_connected; - struct edid *edid = NULL; - -- edid = drm_get_edid(&intel_encoder->base, -- intel_encoder->ddc_bus); -+ edid = drm_get_edid(connector, intel_encoder->ddc_bus); - - /* This is only applied to SDVO cards with multiple outputs */ - if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) { -@@ -1646,8 +1527,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) - */ - while(temp_ddc > 1) { - sdvo_priv->ddc_bus = temp_ddc; -- edid = drm_get_edid(&intel_encoder->base, -- intel_encoder->ddc_bus); -+ edid = drm_get_edid(connector, intel_encoder->ddc_bus); - if (edid) { - /* - * When we can get the EDID, maybe it is the -@@ -1664,28 +1544,25 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) - /* when there is no edid and no monitor is connected with VGA - * port, try to use the CRT ddc to read the EDID for DVI-connector - */ -- if (edid == NULL && -- sdvo_priv->analog_ddc_bus && -- !intel_analog_is_connected(intel_encoder->base.dev)) -- edid = drm_get_edid(&intel_encoder->base, -- sdvo_priv->analog_ddc_bus); -+ if (edid == NULL && sdvo_priv->analog_ddc_bus && -+ !intel_analog_is_connected(connector->dev)) -+ edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus); -+ - if (edid != NULL) { -- /* Don't report the output as connected if it's a DVI-I -- * connector with a non-digital EDID coming out. -- */ -- if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { -- if (edid->input & DRM_EDID_INPUT_DIGITAL) -- sdvo_priv->is_hdmi = -- drm_detect_hdmi_monitor(edid); -- else -- status = connector_status_disconnected; -- } -+ bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); -+ bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK); - -- kfree(edid); -- intel_encoder->base.display_info.raw_edid = NULL; -+ /* DDC bus is shared, match EDID to connector type */ -+ if (is_digital && need_digital) -+ sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); -+ else if (is_digital != need_digital) -+ status = connector_status_disconnected; - -- } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) -+ connector->display_info.raw_edid = NULL; -+ } else - status = connector_status_disconnected; -+ -+ kfree(edid); - - return status; - } -@@ -1694,8 +1571,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect - { - uint16_t response; - u8 status; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); -+ struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; -+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; -+ enum drm_connector_status ret; - - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); -@@ -1713,24 +1594,41 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect - if (response == 0) - return connector_status_disconnected; - -- if (intel_sdvo_multifunc_encoder(intel_encoder) && -- sdvo_priv->attached_output != response) { -- if (sdvo_priv->controlled_output != response && -- intel_sdvo_output_setup(intel_encoder, response) != true) -- return connector_status_unknown; -- sdvo_priv->attached_output = response; -+ sdvo_priv->attached_output = response; -+ -+ if ((sdvo_connector->output_flag & response) == 0) -+ ret = connector_status_disconnected; -+ else if (response & SDVO_TMDS_MASK) -+ ret = intel_sdvo_hdmi_sink_detect(connector); -+ else -+ ret = connector_status_connected; -+ -+ /* May update encoder flag for like clock for SDVO TV, etc.*/ -+ if (ret == connector_status_connected) { -+ sdvo_priv->is_tv = false; -+ sdvo_priv->is_lvds = false; -+ intel_encoder->needs_tv_clock = false; -+ -+ if (response & SDVO_TV_MASK) { -+ sdvo_priv->is_tv = true; -+ intel_encoder->needs_tv_clock = true; -+ } -+ if (response & SDVO_LVDS_MASK) -+ sdvo_priv->is_lvds = true; - } -- return intel_sdvo_hdmi_sink_detect(connector, response); -+ -+ return ret; - } - - static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - int num_modes; - - /* set the bus switch and get the modes */ -- num_modes = intel_ddc_get_modes(intel_encoder); -+ num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); - - /* - * Mac mini hack. On this device, the DVI-I connector shares one DDC -@@ -1740,17 +1638,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) - */ - if (num_modes == 0 && - sdvo_priv->analog_ddc_bus && -- !intel_analog_is_connected(intel_encoder->base.dev)) { -- struct i2c_adapter *digital_ddc_bus; -- -+ !intel_analog_is_connected(connector->dev)) { - /* Switch to the analog ddc bus and try that - */ -- digital_ddc_bus = intel_encoder->ddc_bus; -- intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus; -- -- (void) intel_ddc_get_modes(intel_encoder); -- -- intel_encoder->ddc_bus = digital_ddc_bus; -+ (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus); - } - } - -@@ -1821,8 +1712,9 @@ struct drm_display_mode sdvo_tv_modes[] = { - - static void intel_sdvo_get_tv_modes(struct drm_connector *connector) - { -- struct intel_encoder *output = to_intel_encoder(connector); -- struct intel_sdvo_priv *sdvo_priv = output->dev_priv; -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); -+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - struct intel_sdvo_sdtv_resolution_request tv_res; - uint32_t reply = 0, format_map = 0; - int i; -@@ -1842,11 +1734,11 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) - sizeof(format_map) ? sizeof(format_map) : - sizeof(struct intel_sdvo_sdtv_resolution_request)); - -- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); -+ intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output); - -- intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, -+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, - &tv_res, sizeof(tv_res)); -- status = intel_sdvo_read_response(output, &reply, 3); -+ status = intel_sdvo_read_response(intel_encoder, &reply, 3); - if (status != SDVO_CMD_STATUS_SUCCESS) - return; - -@@ -1863,7 +1755,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) - - static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct drm_i915_private *dev_priv = connector->dev->dev_private; - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - struct drm_display_mode *newmode; -@@ -1873,7 +1766,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) - * Assume that the preferred modes are - * arranged in priority order. - */ -- intel_ddc_get_modes(intel_encoder); -+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus); - if (list_empty(&connector->probed_modes) == false) - goto end; - -@@ -1902,12 +1795,12 @@ end: - - static int intel_sdvo_get_modes(struct drm_connector *connector) - { -- struct intel_encoder *output = to_intel_encoder(connector); -- struct intel_sdvo_priv *sdvo_priv = output->dev_priv; -+ struct intel_connector *intel_connector = to_intel_connector(connector); -+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; - -- if (sdvo_priv->is_tv) -+ if (IS_TV(sdvo_connector)) - intel_sdvo_get_tv_modes(connector); -- else if (sdvo_priv->is_lvds == true) -+ else if (IS_LVDS(sdvo_connector)) - intel_sdvo_get_lvds_modes(connector); - else - intel_sdvo_get_ddc_modes(connector); -@@ -1920,11 +1813,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) - static - void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; -+ struct intel_connector *intel_connector = to_intel_connector(connector); -+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv; - struct drm_device *dev = connector->dev; - -- if (sdvo_priv->is_tv) { -+ if (IS_TV(sdvo_priv)) { - if (sdvo_priv->left_property) - drm_property_destroy(dev, sdvo_priv->left_property); - if (sdvo_priv->right_property) -@@ -1937,8 +1830,6 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) - drm_property_destroy(dev, sdvo_priv->hpos_property); - if (sdvo_priv->vpos_property) - drm_property_destroy(dev, sdvo_priv->vpos_property); -- } -- if (sdvo_priv->is_tv) { - if (sdvo_priv->saturation_property) - drm_property_destroy(dev, - sdvo_priv->saturation_property); -@@ -1948,7 +1839,7 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) - if (sdvo_priv->hue_property) - drm_property_destroy(dev, sdvo_priv->hue_property); - } -- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { -+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { - if (sdvo_priv->brightness_property) - drm_property_destroy(dev, - sdvo_priv->brightness_property); -@@ -1958,31 +1849,17 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) - - static void intel_sdvo_destroy(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; -- -- if (intel_encoder->i2c_bus) -- intel_i2c_destroy(intel_encoder->i2c_bus); -- if (intel_encoder->ddc_bus) -- intel_i2c_destroy(intel_encoder->ddc_bus); -- if (sdvo_priv->analog_ddc_bus) -- intel_i2c_destroy(sdvo_priv->analog_ddc_bus); -- -- if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) -- drm_mode_destroy(connector->dev, -- sdvo_priv->sdvo_lvds_fixed_mode); -+ struct intel_connector *intel_connector = to_intel_connector(connector); -+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; - -- if (sdvo_priv->tv_format_property) -+ if (sdvo_connector->tv_format_property) - drm_property_destroy(connector->dev, -- sdvo_priv->tv_format_property); -- -- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) -- intel_sdvo_destroy_enhance_property(connector); -+ sdvo_connector->tv_format_property); - -+ intel_sdvo_destroy_enhance_property(connector); - drm_sysfs_connector_remove(connector); - drm_connector_cleanup(connector); -- -- kfree(intel_encoder); -+ kfree(connector); - } - - static int -@@ -1990,9 +1867,11 @@ intel_sdvo_set_property(struct drm_connector *connector, - struct drm_property *property, - uint64_t val) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; -- struct drm_encoder *encoder = &intel_encoder->enc; -+ struct intel_connector *intel_connector = to_intel_connector(connector); -+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; - struct drm_crtc *crtc = encoder->crtc; - int ret = 0; - bool changed = false; -@@ -2003,101 +1882,101 @@ intel_sdvo_set_property(struct drm_connector *connector, - if (ret < 0) - goto out; - -- if (property == sdvo_priv->tv_format_property) { -+ if (property == sdvo_connector->tv_format_property) { - if (val >= TV_FORMAT_NUM) { - ret = -EINVAL; - goto out; - } - if (sdvo_priv->tv_format_name == -- sdvo_priv->tv_format_supported[val]) -+ sdvo_connector->tv_format_supported[val]) - goto out; - -- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; -+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val]; - changed = true; - } - -- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { -+ if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) { - cmd = 0; - temp_value = val; -- if (sdvo_priv->left_property == property) { -+ if (sdvo_connector->left_property == property) { - drm_connector_property_set_value(connector, -- sdvo_priv->right_property, val); -- if (sdvo_priv->left_margin == temp_value) -+ sdvo_connector->right_property, val); -+ if (sdvo_connector->left_margin == temp_value) - goto out; - -- sdvo_priv->left_margin = temp_value; -- sdvo_priv->right_margin = temp_value; -- temp_value = sdvo_priv->max_hscan - -- sdvo_priv->left_margin; -+ sdvo_connector->left_margin = temp_value; -+ sdvo_connector->right_margin = temp_value; -+ temp_value = sdvo_connector->max_hscan - -+ sdvo_connector->left_margin; - cmd = SDVO_CMD_SET_OVERSCAN_H; -- } else if (sdvo_priv->right_property == property) { -+ } else if (sdvo_connector->right_property == property) { - drm_connector_property_set_value(connector, -- sdvo_priv->left_property, val); -- if (sdvo_priv->right_margin == temp_value) -+ sdvo_connector->left_property, val); -+ if (sdvo_connector->right_margin == temp_value) - goto out; - -- sdvo_priv->left_margin = temp_value; -- sdvo_priv->right_margin = temp_value; -- temp_value = sdvo_priv->max_hscan - -- sdvo_priv->left_margin; -+ sdvo_connector->left_margin = temp_value; -+ sdvo_connector->right_margin = temp_value; -+ temp_value = sdvo_connector->max_hscan - -+ sdvo_connector->left_margin; - cmd = SDVO_CMD_SET_OVERSCAN_H; -- } else if (sdvo_priv->top_property == property) { -+ } else if (sdvo_connector->top_property == property) { - drm_connector_property_set_value(connector, -- sdvo_priv->bottom_property, val); -- if (sdvo_priv->top_margin == temp_value) -+ sdvo_connector->bottom_property, val); -+ if (sdvo_connector->top_margin == temp_value) - goto out; - -- sdvo_priv->top_margin = temp_value; -- sdvo_priv->bottom_margin = temp_value; -- temp_value = sdvo_priv->max_vscan - -- sdvo_priv->top_margin; -+ sdvo_connector->top_margin = temp_value; -+ sdvo_connector->bottom_margin = temp_value; -+ temp_value = sdvo_connector->max_vscan - -+ sdvo_connector->top_margin; - cmd = SDVO_CMD_SET_OVERSCAN_V; -- } else if (sdvo_priv->bottom_property == property) { -+ } else if (sdvo_connector->bottom_property == property) { - drm_connector_property_set_value(connector, -- sdvo_priv->top_property, val); -- if (sdvo_priv->bottom_margin == temp_value) -+ sdvo_connector->top_property, val); -+ if (sdvo_connector->bottom_margin == temp_value) - goto out; -- sdvo_priv->top_margin = temp_value; -- sdvo_priv->bottom_margin = temp_value; -- temp_value = sdvo_priv->max_vscan - -- sdvo_priv->top_margin; -+ sdvo_connector->top_margin = temp_value; -+ sdvo_connector->bottom_margin = temp_value; -+ temp_value = sdvo_connector->max_vscan - -+ sdvo_connector->top_margin; - cmd = SDVO_CMD_SET_OVERSCAN_V; -- } else if (sdvo_priv->hpos_property == property) { -- if (sdvo_priv->cur_hpos == temp_value) -+ } else if (sdvo_connector->hpos_property == property) { -+ if (sdvo_connector->cur_hpos == temp_value) - goto out; - - cmd = SDVO_CMD_SET_POSITION_H; -- sdvo_priv->cur_hpos = temp_value; -- } else if (sdvo_priv->vpos_property == property) { -- if (sdvo_priv->cur_vpos == temp_value) -+ sdvo_connector->cur_hpos = temp_value; -+ } else if (sdvo_connector->vpos_property == property) { -+ if (sdvo_connector->cur_vpos == temp_value) - goto out; - - cmd = SDVO_CMD_SET_POSITION_V; -- sdvo_priv->cur_vpos = temp_value; -- } else if (sdvo_priv->saturation_property == property) { -- if (sdvo_priv->cur_saturation == temp_value) -+ sdvo_connector->cur_vpos = temp_value; -+ } else if (sdvo_connector->saturation_property == property) { -+ if (sdvo_connector->cur_saturation == temp_value) - goto out; - - cmd = SDVO_CMD_SET_SATURATION; -- sdvo_priv->cur_saturation = temp_value; -- } else if (sdvo_priv->contrast_property == property) { -- if (sdvo_priv->cur_contrast == temp_value) -+ sdvo_connector->cur_saturation = temp_value; -+ } else if (sdvo_connector->contrast_property == property) { -+ if (sdvo_connector->cur_contrast == temp_value) - goto out; - - cmd = SDVO_CMD_SET_CONTRAST; -- sdvo_priv->cur_contrast = temp_value; -- } else if (sdvo_priv->hue_property == property) { -- if (sdvo_priv->cur_hue == temp_value) -+ sdvo_connector->cur_contrast = temp_value; -+ } else if (sdvo_connector->hue_property == property) { -+ if (sdvo_connector->cur_hue == temp_value) - goto out; - - cmd = SDVO_CMD_SET_HUE; -- sdvo_priv->cur_hue = temp_value; -- } else if (sdvo_priv->brightness_property == property) { -- if (sdvo_priv->cur_brightness == temp_value) -+ sdvo_connector->cur_hue = temp_value; -+ } else if (sdvo_connector->brightness_property == property) { -+ if (sdvo_connector->cur_brightness == temp_value) - goto out; - - cmd = SDVO_CMD_SET_BRIGHTNESS; -- sdvo_priv->cur_brightness = temp_value; -+ sdvo_connector->cur_brightness = temp_value; - } - if (cmd) { - intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2); -@@ -2127,8 +2006,6 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { - - static const struct drm_connector_funcs intel_sdvo_connector_funcs = { - .dpms = drm_helper_connector_dpms, -- .save = intel_sdvo_save, -- .restore = intel_sdvo_restore, - .detect = intel_sdvo_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .set_property = intel_sdvo_set_property, -@@ -2138,12 +2015,27 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { - static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { - .get_modes = intel_sdvo_get_modes, - .mode_valid = intel_sdvo_mode_valid, -- .best_encoder = intel_best_encoder, -+ .best_encoder = intel_attached_encoder, - }; - - static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) - { -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); -+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; -+ -+ if (intel_encoder->i2c_bus) -+ intel_i2c_destroy(intel_encoder->i2c_bus); -+ if (intel_encoder->ddc_bus) -+ intel_i2c_destroy(intel_encoder->ddc_bus); -+ if (sdvo_priv->analog_ddc_bus) -+ intel_i2c_destroy(sdvo_priv->analog_ddc_bus); -+ -+ if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) -+ drm_mode_destroy(encoder->dev, -+ sdvo_priv->sdvo_lvds_fixed_mode); -+ - drm_encoder_cleanup(encoder); -+ kfree(intel_encoder); - } - - static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { -@@ -2159,49 +2051,29 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { - * outputs, then LVDS outputs. - */ - static void --intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv) -+intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, -+ struct intel_sdvo_priv *sdvo, u32 reg) - { -- uint16_t mask = 0; -- unsigned int num_bits; -- -- /* Make a mask of outputs less than or equal to our own priority in the -- * list. -- */ -- switch (dev_priv->controlled_output) { -- case SDVO_OUTPUT_LVDS1: -- mask |= SDVO_OUTPUT_LVDS1; -- case SDVO_OUTPUT_LVDS0: -- mask |= SDVO_OUTPUT_LVDS0; -- case SDVO_OUTPUT_TMDS1: -- mask |= SDVO_OUTPUT_TMDS1; -- case SDVO_OUTPUT_TMDS0: -- mask |= SDVO_OUTPUT_TMDS0; -- case SDVO_OUTPUT_RGB1: -- mask |= SDVO_OUTPUT_RGB1; -- case SDVO_OUTPUT_RGB0: -- mask |= SDVO_OUTPUT_RGB0; -- break; -- } -+ struct sdvo_device_mapping *mapping; - -- /* Count bits to find what number we are in the priority list. */ -- mask &= dev_priv->caps.output_flags; -- num_bits = hweight16(mask); -- if (num_bits > 3) { -- /* if more than 3 outputs, default to DDC bus 3 for now */ -- num_bits = 3; -- } -+ if (IS_SDVOB(reg)) -+ mapping = &(dev_priv->sdvo_mappings[0]); -+ else -+ mapping = &(dev_priv->sdvo_mappings[1]); - -- /* Corresponds to SDVO_CONTROL_BUS_DDCx */ -- dev_priv->ddc_bus = 1 << num_bits; -+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); - } - - static bool --intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output) -+intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device) - { - struct intel_sdvo_priv *sdvo_priv = output->dev_priv; - uint8_t status; - -- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); -+ if (device == 0) -+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0); -+ else -+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1); - - intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0); - status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1); -@@ -2214,15 +2086,13 @@ static struct intel_encoder * - intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) - { - struct drm_device *dev = chan->drm_dev; -- struct drm_connector *connector; -+ struct drm_encoder *encoder; - struct intel_encoder *intel_encoder = NULL; - -- list_for_each_entry(connector, -- &dev->mode_config.connector_list, head) { -- if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) { -- intel_encoder = to_intel_encoder(connector); -+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -+ intel_encoder = enc_to_intel_encoder(encoder); -+ if (intel_encoder->ddc_bus == &chan->adapter) - break; -- } - } - return intel_encoder; - } -@@ -2259,7 +2129,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) - struct drm_i915_private *dev_priv = dev->dev_private; - struct sdvo_device_mapping *my_mapping, *other_mapping; - -- if (sdvo_reg == SDVOB) { -+ if (IS_SDVOB(sdvo_reg)) { - my_mapping = &dev_priv->sdvo_mappings[0]; - other_mapping = &dev_priv->sdvo_mappings[1]; - } else { -@@ -2284,120 +2154,237 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) - /* No SDVO device info is found for another DVO port, - * so use mapping assumption we had before BIOS parsing. - */ -- if (sdvo_reg == SDVOB) -+ if (IS_SDVOB(sdvo_reg)) - return 0x70; - else - return 0x72; - } - --static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id) -+static bool -+intel_sdvo_connector_alloc (struct intel_connector **ret) - { -- DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident); -- return 1; -+ struct intel_connector *intel_connector; -+ struct intel_sdvo_connector *sdvo_connector; -+ -+ *ret = kzalloc(sizeof(*intel_connector) + -+ sizeof(*sdvo_connector), GFP_KERNEL); -+ if (!*ret) -+ return false; -+ -+ intel_connector = *ret; -+ sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1); -+ intel_connector->dev_priv = sdvo_connector; -+ -+ return true; - } - --static struct dmi_system_id intel_sdvo_bad_tv[] = { -- { -- .callback = intel_sdvo_bad_tv_callback, -- .ident = "IntelG45/ICH10R/DME1737", -- .matches = { -- DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"), -- DMI_MATCH(DMI_PRODUCT_NAME, "4800784"), -- }, -- }, -+static void -+intel_sdvo_connector_create (struct drm_encoder *encoder, -+ struct drm_connector *connector) -+{ -+ drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs, -+ connector->connector_type); - -- { } /* terminating entry */ --}; -+ drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); -+ -+ connector->interlace_allowed = 0; -+ connector->doublescan_allowed = 0; -+ connector->display_info.subpixel_order = SubPixelHorizontalRGB; -+ -+ drm_mode_connector_attach_encoder(connector, encoder); -+ drm_sysfs_connector_add(connector); -+} - - static bool --intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) -+intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device) - { -- struct drm_connector *connector = &intel_encoder->base; - struct drm_encoder *encoder = &intel_encoder->enc; - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; -- bool ret = true, registered = false; -+ struct drm_connector *connector; -+ struct intel_connector *intel_connector; -+ struct intel_sdvo_connector *sdvo_connector; -+ -+ if (!intel_sdvo_connector_alloc(&intel_connector)) -+ return false; -+ -+ sdvo_connector = intel_connector->dev_priv; -+ -+ if (device == 0) { -+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0; -+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; -+ } else if (device == 1) { -+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1; -+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; -+ } -+ -+ connector = &intel_connector->base; -+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; -+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS; -+ connector->connector_type = DRM_MODE_CONNECTOR_DVID; -+ -+ if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode) -+ && intel_sdvo_get_digital_encoding_mode(intel_encoder, device) -+ && sdvo_priv->is_hdmi) { -+ /* enable hdmi encoding mode if supported */ -+ intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); -+ intel_sdvo_set_colorimetry(intel_encoder, -+ SDVO_COLORIMETRY_RGB256); -+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; -+ } -+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | -+ (1 << INTEL_ANALOG_CLONE_BIT); -+ -+ intel_sdvo_connector_create(encoder, connector); -+ -+ return true; -+} -+ -+static bool -+intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type) -+{ -+ struct drm_encoder *encoder = &intel_encoder->enc; -+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; -+ struct drm_connector *connector; -+ struct intel_connector *intel_connector; -+ struct intel_sdvo_connector *sdvo_connector; -+ -+ if (!intel_sdvo_connector_alloc(&intel_connector)) -+ return false; -+ -+ connector = &intel_connector->base; -+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; -+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; -+ sdvo_connector = intel_connector->dev_priv; -+ -+ sdvo_priv->controlled_output |= type; -+ sdvo_connector->output_flag = type; -+ -+ sdvo_priv->is_tv = true; -+ intel_encoder->needs_tv_clock = true; -+ intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; -+ -+ intel_sdvo_connector_create(encoder, connector); -+ -+ intel_sdvo_tv_create_property(connector, type); -+ -+ intel_sdvo_create_enhance_property(connector); -+ -+ return true; -+} -+ -+static bool -+intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device) -+{ -+ struct drm_encoder *encoder = &intel_encoder->enc; -+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; -+ struct drm_connector *connector; -+ struct intel_connector *intel_connector; -+ struct intel_sdvo_connector *sdvo_connector; -+ -+ if (!intel_sdvo_connector_alloc(&intel_connector)) -+ return false; -+ -+ connector = &intel_connector->base; -+ connector->polled = DRM_CONNECTOR_POLL_CONNECT; -+ encoder->encoder_type = DRM_MODE_ENCODER_DAC; -+ connector->connector_type = DRM_MODE_CONNECTOR_VGA; -+ sdvo_connector = intel_connector->dev_priv; -+ -+ if (device == 0) { -+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0; -+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; -+ } else if (device == 1) { -+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1; -+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; -+ } -+ -+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | -+ (1 << INTEL_ANALOG_CLONE_BIT); -+ -+ intel_sdvo_connector_create(encoder, connector); -+ return true; -+} -+ -+static bool -+intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device) -+{ -+ struct drm_encoder *encoder = &intel_encoder->enc; -+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; -+ struct drm_connector *connector; -+ struct intel_connector *intel_connector; -+ struct intel_sdvo_connector *sdvo_connector; -+ -+ if (!intel_sdvo_connector_alloc(&intel_connector)) -+ return false; -+ -+ connector = &intel_connector->base; -+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS; -+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS; -+ sdvo_connector = intel_connector->dev_priv; -+ -+ sdvo_priv->is_lvds = true; -+ -+ if (device == 0) { -+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0; -+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; -+ } else if (device == 1) { -+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1; -+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; -+ } -+ -+ intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | -+ (1 << INTEL_SDVO_LVDS_CLONE_BIT); -+ -+ intel_sdvo_connector_create(encoder, connector); -+ intel_sdvo_create_enhance_property(connector); -+ return true; -+} -+ -+static bool -+intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) -+{ -+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - - sdvo_priv->is_tv = false; - intel_encoder->needs_tv_clock = false; - sdvo_priv->is_lvds = false; - -- if (device_is_registered(&connector->kdev)) { -- drm_sysfs_connector_remove(connector); -- registered = true; -- } -+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ - -- if (flags & -- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { -- if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) -- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; -- else -- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; -- -- encoder->encoder_type = DRM_MODE_ENCODER_TMDS; -- connector->connector_type = DRM_MODE_CONNECTOR_DVID; -- -- if (intel_sdvo_get_supp_encode(intel_encoder, -- &sdvo_priv->encode) && -- intel_sdvo_get_digital_encoding_mode(intel_encoder) && -- sdvo_priv->is_hdmi) { -- /* enable hdmi encoding mode if supported */ -- intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); -- intel_sdvo_set_colorimetry(intel_encoder, -- SDVO_COLORIMETRY_RGB256); -- connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; -- intel_encoder->clone_mask = -- (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | -- (1 << INTEL_ANALOG_CLONE_BIT); -- } -- } else if ((flags & SDVO_OUTPUT_SVID0) && -- !dmi_check_system(intel_sdvo_bad_tv)) { -- -- sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; -- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; -- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; -- sdvo_priv->is_tv = true; -- intel_encoder->needs_tv_clock = true; -- intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; -- } else if (flags & SDVO_OUTPUT_RGB0) { -- -- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; -- encoder->encoder_type = DRM_MODE_ENCODER_DAC; -- connector->connector_type = DRM_MODE_CONNECTOR_VGA; -- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | -- (1 << INTEL_ANALOG_CLONE_BIT); -- } else if (flags & SDVO_OUTPUT_RGB1) { -- -- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; -- encoder->encoder_type = DRM_MODE_ENCODER_DAC; -- connector->connector_type = DRM_MODE_CONNECTOR_VGA; -- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | -- (1 << INTEL_ANALOG_CLONE_BIT); -- } else if (flags & SDVO_OUTPUT_CVBS0) { -- -- sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; -- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; -- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; -- sdvo_priv->is_tv = true; -- intel_encoder->needs_tv_clock = true; -- intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; -- } else if (flags & SDVO_OUTPUT_LVDS0) { -- -- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; -- encoder->encoder_type = DRM_MODE_ENCODER_LVDS; -- connector->connector_type = DRM_MODE_CONNECTOR_LVDS; -- sdvo_priv->is_lvds = true; -- intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | -- (1 << INTEL_SDVO_LVDS_CLONE_BIT); -- } else if (flags & SDVO_OUTPUT_LVDS1) { -- -- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; -- encoder->encoder_type = DRM_MODE_ENCODER_LVDS; -- connector->connector_type = DRM_MODE_CONNECTOR_LVDS; -- sdvo_priv->is_lvds = true; -- intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | -- (1 << INTEL_SDVO_LVDS_CLONE_BIT); -- } else { -+ if (flags & SDVO_OUTPUT_TMDS0) -+ if (!intel_sdvo_dvi_init(intel_encoder, 0)) -+ return false; -+ -+ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) -+ if (!intel_sdvo_dvi_init(intel_encoder, 1)) -+ return false; -+ -+ /* TV has no XXX1 function block */ -+ if (flags & SDVO_OUTPUT_SVID0) -+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0)) -+ return false; -+ -+ if (flags & SDVO_OUTPUT_CVBS0) -+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0)) -+ return false; -+ -+ if (flags & SDVO_OUTPUT_RGB0) -+ if (!intel_sdvo_analog_init(intel_encoder, 0)) -+ return false; -+ -+ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) -+ if (!intel_sdvo_analog_init(intel_encoder, 1)) -+ return false; -+ -+ if (flags & SDVO_OUTPUT_LVDS0) -+ if (!intel_sdvo_lvds_init(intel_encoder, 0)) -+ return false; - -+ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) -+ if (!intel_sdvo_lvds_init(intel_encoder, 1)) -+ return false; -+ -+ if ((flags & SDVO_OUTPUT_MASK) == 0) { - unsigned char bytes[2]; - - sdvo_priv->controlled_output = 0; -@@ -2405,28 +2392,25 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) - DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", - SDVO_NAME(sdvo_priv), - bytes[0], bytes[1]); -- ret = false; -+ return false; - } - intel_encoder->crtc_mask = (1 << 0) | (1 << 1); - -- if (ret && registered) -- ret = drm_sysfs_connector_add(connector) == 0 ? true : false; -- -- -- return ret; -- -+ return true; - } - --static void intel_sdvo_tv_create_property(struct drm_connector *connector) -+static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; -+ struct intel_connector *intel_connector = to_intel_connector(connector); -+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; - struct intel_sdvo_tv_format format; - uint32_t format_map, i; - uint8_t status; - -- intel_sdvo_set_target_output(intel_encoder, -- sdvo_priv->controlled_output); -+ intel_sdvo_set_target_output(intel_encoder, type); - - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); -@@ -2441,35 +2425,37 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector) - if (format_map == 0) - return; - -- sdvo_priv->format_supported_num = 0; -+ sdvo_connector->format_supported_num = 0; - for (i = 0 ; i < TV_FORMAT_NUM; i++) - if (format_map & (1 << i)) { -- sdvo_priv->tv_format_supported -- [sdvo_priv->format_supported_num++] = -+ sdvo_connector->tv_format_supported -+ [sdvo_connector->format_supported_num++] = - tv_format_names[i]; - } - - -- sdvo_priv->tv_format_property = -+ sdvo_connector->tv_format_property = - drm_property_create( - connector->dev, DRM_MODE_PROP_ENUM, -- "mode", sdvo_priv->format_supported_num); -+ "mode", sdvo_connector->format_supported_num); - -- for (i = 0; i < sdvo_priv->format_supported_num; i++) -+ for (i = 0; i < sdvo_connector->format_supported_num; i++) - drm_property_add_enum( -- sdvo_priv->tv_format_property, i, -- i, sdvo_priv->tv_format_supported[i]); -+ sdvo_connector->tv_format_property, i, -+ i, sdvo_connector->tv_format_supported[i]); - -- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0]; -+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0]; - drm_connector_attach_property( -- connector, sdvo_priv->tv_format_property, 0); -+ connector, sdvo_connector->tv_format_property, 0); - - } - - static void intel_sdvo_create_enhance_property(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); -+ struct intel_connector *intel_connector = to_intel_connector(connector); -+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv; - struct intel_sdvo_enhancements_reply sdvo_data; - struct drm_device *dev = connector->dev; - uint8_t status; -@@ -2488,7 +2474,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) - DRM_DEBUG_KMS("No enhancement is supported\n"); - return; - } -- if (sdvo_priv->is_tv) { -+ if (IS_TV(sdvo_priv)) { - /* when horizontal overscan is supported, Add the left/right - * property - */ -@@ -2636,8 +2622,6 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) - "default %d, current %d\n", - data_value[0], data_value[1], response); - } -- } -- if (sdvo_priv->is_tv) { - if (sdvo_data.saturation) { - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_MAX_SATURATION, NULL, 0); -@@ -2733,7 +2717,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) - data_value[0], data_value[1], response); - } - } -- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { -+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { - if (sdvo_data.brightness) { - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); -@@ -2773,12 +2757,11 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) - bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) - { - struct drm_i915_private *dev_priv = dev->dev_private; -- struct drm_connector *connector; - struct intel_encoder *intel_encoder; - struct intel_sdvo_priv *sdvo_priv; -- - u8 ch[0x40]; - int i; -+ u32 i2c_reg, ddc_reg, analog_ddc_reg; - - intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); - if (!intel_encoder) { -@@ -2791,11 +2774,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) - intel_encoder->dev_priv = sdvo_priv; - intel_encoder->type = INTEL_OUTPUT_SDVO; - -+ if (HAS_PCH_SPLIT(dev)) { -+ i2c_reg = PCH_GPIOE; -+ ddc_reg = PCH_GPIOE; -+ analog_ddc_reg = PCH_GPIOA; -+ } else { -+ i2c_reg = GPIOE; -+ ddc_reg = GPIOE; -+ analog_ddc_reg = GPIOA; -+ } -+ - /* setup the DDC bus. */ -- if (sdvo_reg == SDVOB) -- intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); -+ if (IS_SDVOB(sdvo_reg)) -+ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB"); - else -- intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); -+ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC"); - - if (!intel_encoder->i2c_bus) - goto err_inteloutput; -@@ -2809,20 +2802,20 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) - for (i = 0; i < 0x40; i++) { - if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { - DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", -- sdvo_reg == SDVOB ? 'B' : 'C'); -+ IS_SDVOB(sdvo_reg) ? 'B' : 'C'); - goto err_i2c; - } - } - - /* setup the DDC bus. */ -- if (sdvo_reg == SDVOB) { -- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); -- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, -+ if (IS_SDVOB(sdvo_reg)) { -+ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS"); -+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, - "SDVOB/VGA DDC BUS"); - dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; - } else { -- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); -- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, -+ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS"); -+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, - "SDVOC/VGA DDC BUS"); - dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; - } -@@ -2833,41 +2826,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) - /* Wrap with our custom algo which switches to DDC mode */ - intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; - -+ /* encoder type will be decided later */ -+ drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0); -+ drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); -+ - /* In default case sdvo lvds is false */ - intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); - - if (intel_sdvo_output_setup(intel_encoder, - sdvo_priv->caps.output_flags) != true) { - DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", -- sdvo_reg == SDVOB ? 'B' : 'C'); -+ IS_SDVOB(sdvo_reg) ? 'B' : 'C'); - goto err_i2c; - } - -- -- connector = &intel_encoder->base; -- drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, -- connector->connector_type); -- -- drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); -- connector->interlace_allowed = 0; -- connector->doublescan_allowed = 0; -- connector->display_info.subpixel_order = SubPixelHorizontalRGB; -- -- drm_encoder_init(dev, &intel_encoder->enc, -- &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type); -- -- drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); -- -- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); -- if (sdvo_priv->is_tv) -- intel_sdvo_tv_create_property(connector); -- -- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) -- intel_sdvo_create_enhance_property(connector); -- -- drm_sysfs_connector_add(connector); -- -- intel_sdvo_select_ddc_bus(sdvo_priv); -+ intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg); - - /* Set the input timing to the screen. Assume always input 0. */ - intel_sdvo_set_target_input(intel_encoder, true, false); -diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c -index d7d39b2..6d553c2 100644 ---- a/drivers/gpu/drm/i915/intel_tv.c -+++ b/drivers/gpu/drm/i915/intel_tv.c -@@ -916,143 +916,6 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode) - } - } - --static void --intel_tv_save(struct drm_connector *connector) --{ -- struct drm_device *dev = connector->dev; -- struct drm_i915_private *dev_priv = dev->dev_private; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; -- int i; -- -- tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); -- tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2); -- tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3); -- tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1); -- tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2); -- tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3); -- tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4); -- tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5); -- tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6); -- tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7); -- tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1); -- tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2); -- tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3); -- -- tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y); -- tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2); -- tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U); -- tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2); -- tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V); -- tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2); -- tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS); -- tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL); -- tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS); -- tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE); -- tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1); -- tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2); -- tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3); -- -- for (i = 0; i < 60; i++) -- tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2)); -- for (i = 0; i < 60; i++) -- tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2)); -- for (i = 0; i < 43; i++) -- tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2)); -- for (i = 0; i < 43; i++) -- tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2)); -- -- tv_priv->save_TV_DAC = I915_READ(TV_DAC); -- tv_priv->save_TV_CTL = I915_READ(TV_CTL); --} -- --static void --intel_tv_restore(struct drm_connector *connector) --{ -- struct drm_device *dev = connector->dev; -- struct drm_i915_private *dev_priv = dev->dev_private; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; -- struct drm_crtc *crtc = connector->encoder->crtc; -- struct intel_crtc *intel_crtc; -- int i; -- -- /* FIXME: No CRTC? */ -- if (!crtc) -- return; -- -- intel_crtc = to_intel_crtc(crtc); -- I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1); -- I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2); -- I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3); -- I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1); -- I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2); -- I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3); -- I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4); -- I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5); -- I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6); -- I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7); -- I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1); -- I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2); -- I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3); -- -- I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y); -- I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2); -- I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U); -- I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2); -- I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V); -- I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2); -- I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS); -- I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL); -- -- { -- int pipeconf_reg = (intel_crtc->pipe == 0) ? -- PIPEACONF : PIPEBCONF; -- int dspcntr_reg = (intel_crtc->plane == 0) ? -- DSPACNTR : DSPBCNTR; -- int pipeconf = I915_READ(pipeconf_reg); -- int dspcntr = I915_READ(dspcntr_reg); -- int dspbase_reg = (intel_crtc->plane == 0) ? -- DSPAADDR : DSPBADDR; -- /* Pipe must be off here */ -- I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); -- /* Flush the plane changes */ -- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); -- -- if (!IS_I9XX(dev)) { -- /* Wait for vblank for the disable to take effect */ -- intel_wait_for_vblank(dev); -- } -- -- I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); -- /* Wait for vblank for the disable to take effect. */ -- intel_wait_for_vblank(dev); -- -- /* Filter ctl must be set before TV_WIN_SIZE */ -- I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1); -- I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2); -- I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3); -- I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS); -- I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE); -- I915_WRITE(pipeconf_reg, pipeconf); -- I915_WRITE(dspcntr_reg, dspcntr); -- /* Flush the plane changes */ -- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); -- } -- -- for (i = 0; i < 60; i++) -- I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]); -- for (i = 0; i < 60; i++) -- I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]); -- for (i = 0; i < 43; i++) -- I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]); -- for (i = 0; i < 43; i++) -- I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]); -- -- I915_WRITE(TV_DAC, tv_priv->save_TV_DAC); -- I915_WRITE(TV_CTL, tv_priv->save_TV_CTL); --} -- - static const struct tv_mode * - intel_tv_mode_lookup (char *tv_format) - { -@@ -1078,7 +941,8 @@ intel_tv_mode_find (struct intel_encoder *intel_encoder) - static enum drm_mode_status - intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); - - /* Ensure TV refresh is close to desired refresh */ -@@ -1441,7 +1305,8 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder - */ - static void intel_tv_find_better_format(struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); - int i; -@@ -1475,9 +1340,9 @@ intel_tv_detect(struct drm_connector *connector) - { - struct drm_crtc *crtc; - struct drm_display_mode mode; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; -- struct drm_encoder *encoder = &intel_encoder->enc; - int dpms_mode; - int type = tv_priv->type; - -@@ -1487,10 +1352,12 @@ intel_tv_detect(struct drm_connector *connector) - if (encoder->crtc && encoder->crtc->enabled) { - type = intel_tv_detect_type(encoder->crtc, intel_encoder); - } else { -- crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode); -+ crtc = intel_get_load_detect_pipe(intel_encoder, connector, -+ &mode, &dpms_mode); - if (crtc) { - type = intel_tv_detect_type(crtc, intel_encoder); -- intel_release_load_detect_pipe(intel_encoder, dpms_mode); -+ intel_release_load_detect_pipe(intel_encoder, connector, -+ dpms_mode); - } else - type = -1; - } -@@ -1525,7 +1392,8 @@ static void - intel_tv_chose_preferred_modes(struct drm_connector *connector, - struct drm_display_mode *mode_ptr) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); - - if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) -@@ -1550,7 +1418,8 @@ static int - intel_tv_get_modes(struct drm_connector *connector) - { - struct drm_display_mode *mode_ptr; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); - int j, count = 0; - u64 tmp; -@@ -1604,11 +1473,9 @@ intel_tv_get_modes(struct drm_connector *connector) - static void - intel_tv_destroy (struct drm_connector *connector) - { -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -- - drm_sysfs_connector_remove(connector); - drm_connector_cleanup(connector); -- kfree(intel_encoder); -+ kfree(connector); - } - - -@@ -1617,9 +1484,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop - uint64_t val) - { - struct drm_device *dev = connector->dev; -- struct intel_encoder *intel_encoder = to_intel_encoder(connector); -+ struct drm_encoder *encoder = intel_attached_encoder(connector); -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; -- struct drm_encoder *encoder = &intel_encoder->enc; - struct drm_crtc *crtc = encoder->crtc; - int ret = 0; - bool changed = false; -@@ -1676,8 +1543,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { - - static const struct drm_connector_funcs intel_tv_connector_funcs = { - .dpms = drm_helper_connector_dpms, -- .save = intel_tv_save, -- .restore = intel_tv_restore, - .detect = intel_tv_detect, - .destroy = intel_tv_destroy, - .set_property = intel_tv_set_property, -@@ -1687,12 +1552,15 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = { - static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { - .mode_valid = intel_tv_mode_valid, - .get_modes = intel_tv_get_modes, -- .best_encoder = intel_best_encoder, -+ .best_encoder = intel_attached_encoder, - }; - - static void intel_tv_enc_destroy(struct drm_encoder *encoder) - { -+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); -+ - drm_encoder_cleanup(encoder); -+ kfree(intel_encoder); - } - - static const struct drm_encoder_funcs intel_tv_enc_funcs = { -@@ -1741,6 +1609,7 @@ intel_tv_init(struct drm_device *dev) - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_connector *connector; - struct intel_encoder *intel_encoder; -+ struct intel_connector *intel_connector; - struct intel_tv_priv *tv_priv; - u32 tv_dac_on, tv_dac_off, save_tv_dac; - char **tv_format_names; -@@ -1786,7 +1655,13 @@ intel_tv_init(struct drm_device *dev) - return; - } - -- connector = &intel_encoder->base; -+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); -+ if (!intel_connector) { -+ kfree(intel_encoder); -+ return; -+ } -+ -+ connector = &intel_connector->base; - - drm_connector_init(dev, connector, &intel_tv_connector_funcs, - DRM_MODE_CONNECTOR_SVIDEO); -@@ -1794,7 +1669,7 @@ intel_tv_init(struct drm_device *dev) - drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, - DRM_MODE_ENCODER_TVDAC); - -- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); -+ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); - tv_priv = (struct intel_tv_priv *)(intel_encoder + 1); - intel_encoder->type = INTEL_OUTPUT_TVOUT; - intel_encoder->crtc_mask = (1 << 0) | (1 << 1); -diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile -index 453df3f..acd31ed 100644 ---- a/drivers/gpu/drm/nouveau/Makefile -+++ b/drivers/gpu/drm/nouveau/Makefile -@@ -22,7 +22,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ - nv50_cursor.o nv50_display.o nv50_fbcon.o \ - nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ - nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ -- nv17_gpio.o nv50_gpio.o -+ nv17_gpio.o nv50_gpio.o \ -+ nv50_calc.o - - nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o - nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o -diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c -index e13f6af..d4bcca8 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_acpi.c -+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c -@@ -34,7 +34,7 @@ - static struct nouveau_dsm_priv { - bool dsm_detected; - acpi_handle dhandle; -- acpi_handle dsm_handle; -+ acpi_handle rom_handle; - } nouveau_dsm_priv; - - static const char nouveau_dsm_muid[] = { -@@ -107,9 +107,9 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero - static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) - { - if (id == VGA_SWITCHEROO_IGD) -- return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA); -+ return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); - else -- return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED); -+ return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED); - } - - static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, -@@ -118,7 +118,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, - if (id == VGA_SWITCHEROO_IGD) - return 0; - -- return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state); -+ return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); - } - - static int nouveau_dsm_init(void) -@@ -151,18 +151,18 @@ static bool nouveau_dsm_pci_probe(struct pci_dev *pdev) - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); - if (!dhandle) - return false; -+ - status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); - if (ACPI_FAILURE(status)) { - return false; - } - -- ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED, -- NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); -+ ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED, -+ NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); - if (ret < 0) - return false; - - nouveau_dsm_priv.dhandle = dhandle; -- nouveau_dsm_priv.dsm_handle = nvidia_handle; - return true; - } - -@@ -173,6 +173,7 @@ static bool nouveau_dsm_detect(void) - struct pci_dev *pdev = NULL; - int has_dsm = 0; - int vga_count = 0; -+ - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { - vga_count++; - -@@ -180,7 +181,7 @@ static bool nouveau_dsm_detect(void) - } - - if (vga_count == 2 && has_dsm) { -- acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer); -+ acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); - printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", - acpi_method_name); - nouveau_dsm_priv.dsm_detected = true; -@@ -204,3 +205,57 @@ void nouveau_unregister_dsm_handler(void) - { - vga_switcheroo_unregister_handler(); - } -+ -+/* retrieve the ROM in 4k blocks */ -+static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, -+ int offset, int len) -+{ -+ acpi_status status; -+ union acpi_object rom_arg_elements[2], *obj; -+ struct acpi_object_list rom_arg; -+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; -+ -+ rom_arg.count = 2; -+ rom_arg.pointer = &rom_arg_elements[0]; -+ -+ rom_arg_elements[0].type = ACPI_TYPE_INTEGER; -+ rom_arg_elements[0].integer.value = offset; -+ -+ rom_arg_elements[1].type = ACPI_TYPE_INTEGER; -+ rom_arg_elements[1].integer.value = len; -+ -+ status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer); -+ if (ACPI_FAILURE(status)) { -+ printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status)); -+ return -ENODEV; -+ } -+ obj = (union acpi_object *)buffer.pointer; -+ memcpy(bios+offset, obj->buffer.pointer, len); -+ kfree(buffer.pointer); -+ return len; -+} -+ -+bool nouveau_acpi_rom_supported(struct pci_dev *pdev) -+{ -+ acpi_status status; -+ acpi_handle dhandle, rom_handle; -+ -+ if (!nouveau_dsm_priv.dsm_detected) -+ return false; -+ -+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); -+ if (!dhandle) -+ return false; -+ -+ status = acpi_get_handle(dhandle, "_ROM", &rom_handle); -+ if (ACPI_FAILURE(status)) -+ return false; -+ -+ nouveau_dsm_priv.rom_handle = rom_handle; -+ return true; -+} -+ -+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) -+{ -+ return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); -+} -diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c -index abc382a..fc924b6 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_bios.c -+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c -@@ -26,6 +26,7 @@ - #define NV_DEBUG_NOTRACE - #include "nouveau_drv.h" - #include "nouveau_hw.h" -+#include "nouveau_encoder.h" - - /* these defines are made up */ - #define NV_CIO_CRE_44_HEADA 0x0 -@@ -177,6 +178,25 @@ out: - pci_disable_rom(dev->pdev); - } - -+static void load_vbios_acpi(struct drm_device *dev, uint8_t *data) -+{ -+ int i; -+ int ret; -+ int size = 64 * 1024; -+ -+ if (!nouveau_acpi_rom_supported(dev->pdev)) -+ return; -+ -+ for (i = 0; i < (size / ROM_BIOS_PAGE); i++) { -+ ret = nouveau_acpi_get_bios_chunk(data, -+ (i * ROM_BIOS_PAGE), -+ ROM_BIOS_PAGE); -+ if (ret <= 0) -+ break; -+ } -+ return; -+} -+ - struct methods { - const char desc[8]; - void (*loadbios)(struct drm_device *, uint8_t *); -@@ -190,6 +210,7 @@ static struct methods nv04_methods[] = { - }; - - static struct methods nv50_methods[] = { -+ { "ACPI", load_vbios_acpi, true }, - { "PRAMIN", load_vbios_pramin, true }, - { "PROM", load_vbios_prom, false }, - { "PCIROM", load_vbios_pci, true }, -@@ -256,6 +277,11 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) - struct init_tbl_entry { - char *name; - uint8_t id; -+ /* Return: -+ * > 0: success, length of opcode -+ * 0: success, but abort further parsing of table (INIT_DONE etc) -+ * < 0: failure, table parsing will be aborted -+ */ - int (*handler)(struct nvbios *, uint16_t, struct init_exec *); - }; - -@@ -709,6 +735,83 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev) - return dcb_entry; - } - -+static int -+read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c) -+{ -+ uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4; -+ int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES; -+ int recordoffset = 0, rdofs = 1, wrofs = 0; -+ uint8_t port_type = 0; -+ -+ if (!i2ctable) -+ return -EINVAL; -+ -+ if (dcb_version >= 0x30) { -+ if (i2ctable[0] != dcb_version) /* necessary? */ -+ NV_WARN(dev, -+ "DCB I2C table version mismatch (%02X vs %02X)\n", -+ i2ctable[0], dcb_version); -+ dcb_i2c_ver = i2ctable[0]; -+ headerlen = i2ctable[1]; -+ if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES) -+ i2c_entries = i2ctable[2]; -+ else -+ NV_WARN(dev, -+ "DCB I2C table has more entries than indexable " -+ "(%d entries, max %d)\n", i2ctable[2], -+ DCB_MAX_NUM_I2C_ENTRIES); -+ entry_len = i2ctable[3]; -+ /* [4] is i2c_default_indices, read in parse_dcb_table() */ -+ } -+ /* -+ * It's your own fault if you call this function on a DCB 1.1 BIOS -- -+ * the test below is for DCB 1.2 -+ */ -+ if (dcb_version < 0x14) { -+ recordoffset = 2; -+ rdofs = 0; -+ wrofs = 1; -+ } -+ -+ if (index == 0xf) -+ return 0; -+ if (index >= i2c_entries) { -+ NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n", -+ index, i2ctable[2]); -+ return -ENOENT; -+ } -+ if (i2ctable[headerlen + entry_len * index + 3] == 0xff) { -+ NV_ERROR(dev, "DCB I2C entry invalid\n"); -+ return -EINVAL; -+ } -+ -+ if (dcb_i2c_ver >= 0x30) { -+ port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index]; -+ -+ /* -+ * Fixup for chips using same address offset for read and -+ * write. -+ */ -+ if (port_type == 4) /* seen on C51 */ -+ rdofs = wrofs = 1; -+ if (port_type >= 5) /* G80+ */ -+ rdofs = wrofs = 0; -+ } -+ -+ if (dcb_i2c_ver >= 0x40) { -+ if (port_type != 5 && port_type != 6) -+ NV_WARN(dev, "DCB I2C table has port type %d\n", port_type); -+ -+ i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]); -+ } -+ -+ i2c->port_type = port_type; -+ i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index]; -+ i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index]; -+ -+ return 0; -+} -+ - static struct nouveau_i2c_chan * - init_i2c_device_find(struct drm_device *dev, int i2c_index) - { -@@ -727,6 +830,20 @@ init_i2c_device_find(struct drm_device *dev, int i2c_index) - } - if (i2c_index == 0x80) /* g80+ */ - i2c_index = dcb->i2c_default_indices & 0xf; -+ else -+ if (i2c_index == 0x81) -+ i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4; -+ -+ if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) { -+ NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index); -+ return NULL; -+ } -+ -+ /* Make sure i2c table entry has been parsed, it may not -+ * have been if this is a bus not referenced by a DCB encoder -+ */ -+ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table, -+ i2c_index, &dcb->i2c[i2c_index]); - - return nouveau_i2c_find(dev, i2c_index); - } -@@ -818,7 +935,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset, - NV_ERROR(bios->dev, - "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", - offset, config, count); -- return 0; -+ return -EINVAL; - } - - configval = ROM32(bios->data[offset + 11 + config * 4]); -@@ -920,7 +1037,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset, - NV_ERROR(bios->dev, - "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", - offset, config, count); -- return 0; -+ return -EINVAL; - } - - freq = ROM16(bios->data[offset + 12 + config * 2]); -@@ -1067,6 +1184,126 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset, - } - - static int -+init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) -+{ -+ /* -+ * INIT_DP_CONDITION opcode: 0x3A ('') -+ * -+ * offset (8 bit): opcode -+ * offset + 1 (8 bit): "sub" opcode -+ * offset + 2 (8 bit): unknown -+ * -+ */ -+ -+ struct bit_displayport_encoder_table *dpe = NULL; -+ struct dcb_entry *dcb = bios->display.output; -+ struct drm_device *dev = bios->dev; -+ uint8_t cond = bios->data[offset + 1]; -+ int dummy; -+ -+ BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond); -+ -+ if (!iexec->execute) -+ return 3; -+ -+ dpe = nouveau_bios_dp_table(dev, dcb, &dummy); -+ if (!dpe) { -+ NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset); -+ return -EINVAL; -+ } -+ -+ switch (cond) { -+ case 0: -+ { -+ struct dcb_connector_table_entry *ent = -+ &bios->dcb.connector.entry[dcb->connector]; -+ -+ if (ent->type != DCB_CONNECTOR_eDP) -+ iexec->execute = false; -+ } -+ break; -+ case 1: -+ case 2: -+ if (!(dpe->unknown & cond)) -+ iexec->execute = false; -+ break; -+ case 5: -+ { -+ struct nouveau_i2c_chan *auxch; -+ int ret; -+ -+ auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index); -+ if (!auxch) -+ return -ENODEV; -+ -+ ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1); -+ if (ret) -+ return ret; -+ -+ if (cond & 1) -+ iexec->execute = false; -+ } -+ break; -+ default: -+ NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond); -+ break; -+ } -+ -+ if (iexec->execute) -+ BIOSLOG(bios, "0x%04X: continuing to execute\n", offset); -+ else -+ BIOSLOG(bios, "0x%04X: skipping following commands\n", offset); -+ -+ return 3; -+} -+ -+static int -+init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) -+{ -+ /* -+ * INIT_3B opcode: 0x3B ('') -+ * -+ * offset (8 bit): opcode -+ * offset + 1 (8 bit): crtc index -+ * -+ */ -+ -+ uint8_t or = ffs(bios->display.output->or) - 1; -+ uint8_t index = bios->data[offset + 1]; -+ uint8_t data; -+ -+ if (!iexec->execute) -+ return 2; -+ -+ data = bios_idxprt_rd(bios, 0x3d4, index); -+ bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or)); -+ return 2; -+} -+ -+static int -+init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) -+{ -+ /* -+ * INIT_3C opcode: 0x3C ('') -+ * -+ * offset (8 bit): opcode -+ * offset + 1 (8 bit): crtc index -+ * -+ */ -+ -+ uint8_t or = ffs(bios->display.output->or) - 1; -+ uint8_t index = bios->data[offset + 1]; -+ uint8_t data; -+ -+ if (!iexec->execute) -+ return 2; -+ -+ data = bios_idxprt_rd(bios, 0x3d4, index); -+ bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or)); -+ return 2; -+} -+ -+static int - init_idx_addr_latched(struct nvbios *bios, uint16_t offset, - struct init_exec *iexec) - { -@@ -1170,7 +1407,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset, - NV_ERROR(bios->dev, - "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", - offset, config, count); -- return 0; -+ return -EINVAL; - } - - freq = ROM32(bios->data[offset + 11 + config * 4]); -@@ -1231,12 +1468,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - */ - - uint8_t i2c_index = bios->data[offset + 1]; -- uint8_t i2c_address = bios->data[offset + 2]; -+ uint8_t i2c_address = bios->data[offset + 2] >> 1; - uint8_t count = bios->data[offset + 3]; -- int len = 4 + count * 3; - struct nouveau_i2c_chan *chan; -- struct i2c_msg msg; -- int i; -+ int len = 4 + count * 3; -+ int ret, i; - - if (!iexec->execute) - return len; -@@ -1247,35 +1483,34 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - - chan = init_i2c_device_find(bios->dev, i2c_index); - if (!chan) -- return 0; -+ return -ENODEV; - - for (i = 0; i < count; i++) { -- uint8_t i2c_reg = bios->data[offset + 4 + i * 3]; -+ uint8_t reg = bios->data[offset + 4 + i * 3]; - uint8_t mask = bios->data[offset + 5 + i * 3]; - uint8_t data = bios->data[offset + 6 + i * 3]; -- uint8_t value; -+ union i2c_smbus_data val; - -- msg.addr = i2c_address; -- msg.flags = I2C_M_RD; -- msg.len = 1; -- msg.buf = &value; -- if (i2c_transfer(&chan->adapter, &msg, 1) != 1) -- return 0; -+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, -+ I2C_SMBUS_READ, reg, -+ I2C_SMBUS_BYTE_DATA, &val); -+ if (ret < 0) -+ return ret; - - BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, " - "Mask: 0x%02X, Data: 0x%02X\n", -- offset, i2c_reg, value, mask, data); -+ offset, reg, val.byte, mask, data); - -- value = (value & mask) | data; -+ if (!bios->execute) -+ continue; - -- if (bios->execute) { -- msg.addr = i2c_address; -- msg.flags = 0; -- msg.len = 1; -- msg.buf = &value; -- if (i2c_transfer(&chan->adapter, &msg, 1) != 1) -- return 0; -- } -+ val.byte &= mask; -+ val.byte |= data; -+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, -+ I2C_SMBUS_WRITE, reg, -+ I2C_SMBUS_BYTE_DATA, &val); -+ if (ret < 0) -+ return ret; - } - - return len; -@@ -1301,12 +1536,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - */ - - uint8_t i2c_index = bios->data[offset + 1]; -- uint8_t i2c_address = bios->data[offset + 2]; -+ uint8_t i2c_address = bios->data[offset + 2] >> 1; - uint8_t count = bios->data[offset + 3]; -- int len = 4 + count * 2; - struct nouveau_i2c_chan *chan; -- struct i2c_msg msg; -- int i; -+ int len = 4 + count * 2; -+ int ret, i; - - if (!iexec->execute) - return len; -@@ -1317,23 +1551,25 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - - chan = init_i2c_device_find(bios->dev, i2c_index); - if (!chan) -- return 0; -+ return -ENODEV; - - for (i = 0; i < count; i++) { -- uint8_t i2c_reg = bios->data[offset + 4 + i * 2]; -- uint8_t data = bios->data[offset + 5 + i * 2]; -+ uint8_t reg = bios->data[offset + 4 + i * 2]; -+ union i2c_smbus_data val; -+ -+ val.byte = bios->data[offset + 5 + i * 2]; - - BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n", -- offset, i2c_reg, data); -- -- if (bios->execute) { -- msg.addr = i2c_address; -- msg.flags = 0; -- msg.len = 1; -- msg.buf = &data; -- if (i2c_transfer(&chan->adapter, &msg, 1) != 1) -- return 0; -- } -+ offset, reg, val.byte); -+ -+ if (!bios->execute) -+ continue; -+ -+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, -+ I2C_SMBUS_WRITE, reg, -+ I2C_SMBUS_BYTE_DATA, &val); -+ if (ret < 0) -+ return ret; - } - - return len; -@@ -1357,7 +1593,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - */ - - uint8_t i2c_index = bios->data[offset + 1]; -- uint8_t i2c_address = bios->data[offset + 2]; -+ uint8_t i2c_address = bios->data[offset + 2] >> 1; - uint8_t count = bios->data[offset + 3]; - int len = 4 + count; - struct nouveau_i2c_chan *chan; -@@ -1374,7 +1610,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - - chan = init_i2c_device_find(bios->dev, i2c_index); - if (!chan) -- return 0; -+ return -ENODEV; - - for (i = 0; i < count; i++) { - data[i] = bios->data[offset + 4 + i]; -@@ -1388,7 +1624,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - msg.len = count; - msg.buf = data; - if (i2c_transfer(&chan->adapter, &msg, 1) != 1) -- return 0; -+ return -EIO; - } - - return len; -@@ -1427,7 +1663,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - - reg = get_tmds_index_reg(bios->dev, mlv); - if (!reg) -- return 0; -+ return -EINVAL; - - bios_wr32(bios, reg, - tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE); -@@ -1471,7 +1707,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset, - - reg = get_tmds_index_reg(bios->dev, mlv); - if (!reg) -- return 0; -+ return -EINVAL; - - for (i = 0; i < count; i++) { - uint8_t tmdsaddr = bios->data[offset + 3 + i * 2]; -@@ -1946,7 +2182,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset, - uint32_t reg, data; - - if (bios->major_version > 2) -- return 0; -+ return -ENODEV; - - bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd( - bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20); -@@ -2001,7 +2237,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset, - int clock; - - if (bios->major_version > 2) -- return 0; -+ return -ENODEV; - - clock = ROM16(bios->data[meminitoffs + 4]) * 10; - setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock); -@@ -2034,7 +2270,7 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset, - uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6)); - - if (bios->major_version > 2) -- return 0; -+ return -ENODEV; - - bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, - NV_CIO_CRE_SCRATCH4__INDEX, cr3c); -@@ -2591,7 +2827,10 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - - BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); - -- nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); -+ BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n", -+ offset, gpio->tag, gpio->state_default); -+ if (bios->execute) -+ nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); - - /* The NVIDIA binary driver doesn't appear to actually do - * any of this, my VBIOS does however. -@@ -2656,7 +2895,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, - NV_ERROR(bios->dev, - "0x%04X: Zero block length - has the M table " - "been parsed?\n", offset); -- return 0; -+ return -EINVAL; - } - - strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf; -@@ -2840,14 +3079,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - - if (!bios->display.output) { - NV_ERROR(dev, "INIT_AUXCH: no active output\n"); -- return 0; -+ return -EINVAL; - } - - auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); - if (!auxch) { - NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n", - bios->display.output->i2c_index); -- return 0; -+ return -ENODEV; - } - - if (!iexec->execute) -@@ -2860,7 +3099,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1); - if (ret) { - NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret); -- return 0; -+ return ret; - } - - data &= bios->data[offset + 0]; -@@ -2869,7 +3108,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1); - if (ret) { - NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret); -- return 0; -+ return ret; - } - } - -@@ -2899,14 +3138,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - - if (!bios->display.output) { - NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n"); -- return 0; -+ return -EINVAL; - } - - auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); - if (!auxch) { - NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n", - bios->display.output->i2c_index); -- return 0; -+ return -ENODEV; - } - - if (!iexec->execute) -@@ -2917,7 +3156,7 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1); - if (ret) { - NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret); -- return 0; -+ return ret; - } - } - -@@ -2934,6 +3173,9 @@ static struct init_tbl_entry itbl_entry[] = { - { "INIT_COPY" , 0x37, init_copy }, - { "INIT_NOT" , 0x38, init_not }, - { "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition }, -+ { "INIT_DP_CONDITION" , 0x3A, init_dp_condition }, -+ { "INIT_OP_3B" , 0x3B, init_op_3b }, -+ { "INIT_OP_3C" , 0x3C, init_op_3c }, - { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched }, - { "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 }, - { "INIT_PLL2" , 0x4B, init_pll2 }, -@@ -3001,7 +3243,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset, - * is changed back to EXECUTE. - */ - -- int count = 0, i, res; -+ int count = 0, i, ret; - uint8_t id; - - /* -@@ -3016,26 +3258,33 @@ parse_init_table(struct nvbios *bios, unsigned int offset, - for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++) - ; - -- if (itbl_entry[i].name) { -- BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", -- offset, itbl_entry[i].id, itbl_entry[i].name); -- -- /* execute eventual command handler */ -- res = (*itbl_entry[i].handler)(bios, offset, iexec); -- if (!res) -- break; -- /* -- * Add the offset of the current command including all data -- * of that command. The offset will then be pointing on the -- * next op code. -- */ -- offset += res; -- } else { -+ if (!itbl_entry[i].name) { - NV_ERROR(bios->dev, - "0x%04X: Init table command not found: " - "0x%02X\n", offset, id); - return -ENOENT; - } -+ -+ BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset, -+ itbl_entry[i].id, itbl_entry[i].name); -+ -+ /* execute eventual command handler */ -+ ret = (*itbl_entry[i].handler)(bios, offset, iexec); -+ if (ret < 0) { -+ NV_ERROR(bios->dev, "0x%04X: Failed parsing init " -+ "table opcode: %s %d\n", offset, -+ itbl_entry[i].name, ret); -+ } -+ -+ if (ret <= 0) -+ break; -+ -+ /* -+ * Add the offset of the current command including all data -+ * of that command. The offset will then be pointing on the -+ * next op code. -+ */ -+ offset += ret; - } - - if (offset >= bios->length) -@@ -3671,7 +3920,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b - - static uint8_t * - bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent, -- uint16_t record, int record_len, int record_nr) -+ uint16_t record, int record_len, int record_nr, -+ bool match_link) - { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->vbios; -@@ -3679,12 +3929,28 @@ bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent, - uint16_t table; - int i, v; - -+ switch (dcbent->type) { -+ case OUTPUT_TMDS: -+ case OUTPUT_LVDS: -+ case OUTPUT_DP: -+ break; -+ default: -+ match_link = false; -+ break; -+ } -+ - for (i = 0; i < record_nr; i++, record += record_len) { - table = ROM16(bios->data[record]); - if (!table) - continue; - entry = ROM32(bios->data[table]); - -+ if (match_link) { -+ v = (entry & 0x00c00000) >> 22; -+ if (!(v & dcbent->sorconf.link)) -+ continue; -+ } -+ - v = (entry & 0x000f0000) >> 16; - if (!(v & dcbent->or)) - continue; -@@ -3726,7 +3992,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent, - *length = table[4]; - return bios_output_config_match(dev, dcbent, - bios->display.dp_table_ptr + table[1], -- table[2], table[3]); -+ table[2], table[3], table[0] >= 0x21); - } - - int -@@ -3815,7 +4081,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, - dcbent->type, dcbent->location, dcbent->or); - otable = bios_output_config_match(dev, dcbent, table[1] + - bios->display.script_table_ptr, -- table[2], table[3]); -+ table[2], table[3], table[0] >= 0x21); - if (!otable) { - NV_ERROR(dev, "Couldn't find matching output script table\n"); - return 1; -@@ -4285,31 +4551,32 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims - break; - } - --#if 0 /* for easy debugging */ -- ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq); -- ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq); -- ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq); -- ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq); -- -- ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq); -- ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq); -- ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq); -- ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq); -- -- ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n); -- ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n); -- ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m); -- ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m); -- ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n); -- ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n); -- ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m); -- ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m); -- -- ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p); -- ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias); -- -- ErrorF("pll.refclk: %d\n", pll_lim->refclk); --#endif -+ NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq); -+ NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq); -+ NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq); -+ NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq); -+ NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n); -+ NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n); -+ NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m); -+ NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m); -+ if (pll_lim->vco2.maxfreq) { -+ NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq); -+ NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq); -+ NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq); -+ NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq); -+ NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n); -+ NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n); -+ NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m); -+ NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m); -+ } -+ if (!pll_lim->max_p) { -+ NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p); -+ NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias); -+ } else { -+ NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p); -+ NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p); -+ } -+ NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk); - - return 0; - } -@@ -4953,79 +5220,6 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len) - return 0; - } - --static int --read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c) --{ -- uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4; -- int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES; -- int recordoffset = 0, rdofs = 1, wrofs = 0; -- uint8_t port_type = 0; -- -- if (!i2ctable) -- return -EINVAL; -- -- if (dcb_version >= 0x30) { -- if (i2ctable[0] != dcb_version) /* necessary? */ -- NV_WARN(dev, -- "DCB I2C table version mismatch (%02X vs %02X)\n", -- i2ctable[0], dcb_version); -- dcb_i2c_ver = i2ctable[0]; -- headerlen = i2ctable[1]; -- if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES) -- i2c_entries = i2ctable[2]; -- else -- NV_WARN(dev, -- "DCB I2C table has more entries than indexable " -- "(%d entries, max %d)\n", i2ctable[2], -- DCB_MAX_NUM_I2C_ENTRIES); -- entry_len = i2ctable[3]; -- /* [4] is i2c_default_indices, read in parse_dcb_table() */ -- } -- /* -- * It's your own fault if you call this function on a DCB 1.1 BIOS -- -- * the test below is for DCB 1.2 -- */ -- if (dcb_version < 0x14) { -- recordoffset = 2; -- rdofs = 0; -- wrofs = 1; -- } -- -- if (index == 0xf) -- return 0; -- if (index >= i2c_entries) { -- NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n", -- index, i2ctable[2]); -- return -ENOENT; -- } -- if (i2ctable[headerlen + entry_len * index + 3] == 0xff) { -- NV_ERROR(dev, "DCB I2C entry invalid\n"); -- return -EINVAL; -- } -- -- if (dcb_i2c_ver >= 0x30) { -- port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index]; -- -- /* -- * Fixup for chips using same address offset for read and -- * write. -- */ -- if (port_type == 4) /* seen on C51 */ -- rdofs = wrofs = 1; -- if (port_type >= 5) /* G80+ */ -- rdofs = wrofs = 0; -- } -- -- if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6) -- NV_WARN(dev, "DCB I2C table has port type %d\n", port_type); -- -- i2c->port_type = port_type; -- i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index]; -- i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index]; -- -- return 0; --} -- - static struct dcb_gpio_entry * - new_gpio_entry(struct nvbios *bios) - { -@@ -5379,12 +5573,6 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, - entry->bus = (conn >> 16) & 0xf; - entry->location = (conn >> 20) & 0x3; - entry->or = (conn >> 24) & 0xf; -- /* -- * Normal entries consist of a single bit, but dual link has the -- * next most significant bit set too -- */ -- entry->duallink_possible = -- ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); - - switch (entry->type) { - case OUTPUT_ANALOG: -@@ -5468,6 +5656,16 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, - break; - } - -+ if (dcb->version < 0x40) { -+ /* Normal entries consist of a single bit, but dual link has -+ * the next most significant bit set too -+ */ -+ entry->duallink_possible = -+ ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); -+ } else { -+ entry->duallink_possible = (entry->sorconf.link == 3); -+ } -+ - /* unsure what DCB version introduces this, 3.0? */ - if (conf & 0x100000) - entry->i2c_upper_default = true; -@@ -6051,6 +6249,30 @@ nouveau_bios_i2c_devices_takedown(struct drm_device *dev) - nouveau_i2c_fini(dev, entry); - } - -+static bool -+nouveau_bios_posted(struct drm_device *dev) -+{ -+ struct drm_nouveau_private *dev_priv = dev->dev_private; -+ bool was_locked; -+ unsigned htotal; -+ -+ if (dev_priv->chipset >= NV_50) { -+ if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && -+ NVReadVgaCrtc(dev, 0, 0x1a) == 0) -+ return false; -+ return true; -+ } -+ -+ was_locked = NVLockVgaCrtcs(dev, false); -+ htotal = NVReadVgaCrtc(dev, 0, 0x06); -+ htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8; -+ htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4; -+ htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10; -+ htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11; -+ NVLockVgaCrtcs(dev, was_locked); -+ return (htotal != 0); -+} -+ - int - nouveau_bios_init(struct drm_device *dev) - { -@@ -6085,11 +6307,9 @@ nouveau_bios_init(struct drm_device *dev) - bios->execute = false; - - /* ... unless card isn't POSTed already */ -- if (dev_priv->card_type >= NV_10 && -- NVReadVgaCrtc(dev, 0, 0x00) == 0 && -- NVReadVgaCrtc(dev, 0, 0x1a) == 0) { -+ if (!nouveau_bios_posted(dev)) { - NV_INFO(dev, "Adaptor not initialised\n"); -- if (dev_priv->card_type < NV_50) { -+ if (dev_priv->card_type < NV_40) { - NV_ERROR(dev, "Unable to POST this chipset\n"); - return -ENODEV; - } -diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h -index c0d7b0a..adf4ec2 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_bios.h -+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h -@@ -35,6 +35,7 @@ - #define DCB_LOC_ON_CHIP 0 - - struct dcb_i2c_entry { -+ uint32_t entry; - uint8_t port_type; - uint8_t read, write; - struct nouveau_i2c_chan *chan; -diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c -index 957d176..6f3c195 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_bo.c -+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c -@@ -160,11 +160,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, - ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, - ttm_bo_type_device, &nvbo->placement, align, 0, - false, NULL, size, nouveau_bo_del_ttm); -- nvbo->channel = NULL; - if (ret) { - /* ttm will call nouveau_bo_del_ttm if it fails.. */ - return ret; - } -+ nvbo->channel = NULL; - - spin_lock(&dev_priv->ttm.bo_list_lock); - list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list); -@@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) - - nouveau_bo_placement_set(nvbo, memtype, 0); - -- ret = ttm_bo_validate(bo, &nvbo->placement, false, false); -+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); - if (ret == 0) { - switch (bo->mem.mem_type) { - case TTM_PL_VRAM: -@@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) - - nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); - -- ret = ttm_bo_validate(bo, &nvbo->placement, false, false); -+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); - if (ret == 0) { - switch (bo->mem.mem_type) { - case TTM_PL_VRAM: -@@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - break; - case TTM_PL_VRAM: - man->flags = TTM_MEMTYPE_FLAG_FIXED | -- TTM_MEMTYPE_FLAG_MAPPABLE | -- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; -+ TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; -- -- man->io_addr = NULL; -- man->io_offset = drm_get_resource_start(dev, 1); -- man->io_size = drm_get_resource_len(dev, 1); -- if (man->io_size > dev_priv->vram_size) -- man->io_size = dev_priv->vram_size; -- - man->gpu_offset = dev_priv->vm_vram_base; - break; - case TTM_PL_TT: - switch (dev_priv->gart_info.type) { - case NOUVEAU_GART_AGP: -- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | -- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; -+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_UNCACHED; - man->default_caching = TTM_PL_FLAG_UNCACHED; - break; -@@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - dev_priv->gart_info.type); - return -EINVAL; - } -- -- man->io_offset = dev_priv->gart_info.aper_base; -- man->io_size = dev_priv->gart_info.aper_size; -- man->io_addr = NULL; - man->gpu_offset = dev_priv->vm_gart_base; - break; - default: -@@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) - - static int - nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, -- struct nouveau_bo *nvbo, bool evict, bool no_wait, -+ struct nouveau_bo *nvbo, bool evict, -+ bool no_wait_reserve, bool no_wait_gpu, - struct ttm_mem_reg *new_mem) - { - struct nouveau_fence *fence = NULL; -@@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, - return ret; - - ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, -- evict, no_wait, new_mem); -+ evict, no_wait_reserve, no_wait_gpu, new_mem); - if (nvbo->channel && nvbo->channel != chan) - ret = nouveau_fence_wait(fence, NULL, false, false); - nouveau_fence_unref((void *)&fence); -@@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, - - static int - nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, -- int no_wait, struct ttm_mem_reg *new_mem) -+ bool no_wait_reserve, bool no_wait_gpu, -+ struct ttm_mem_reg *new_mem) - { - struct nouveau_bo *nvbo = nouveau_bo(bo); - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); -@@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, - dst_offset += (PAGE_SIZE * line_count); - } - -- return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem); -+ return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); - } - - static int - nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, -- bool no_wait, struct ttm_mem_reg *new_mem) -+ bool no_wait_reserve, bool no_wait_gpu, -+ struct ttm_mem_reg *new_mem) - { - u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; - struct ttm_placement placement; -@@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, - - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; -- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); -+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); - if (ret) - return ret; - -@@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, - if (ret) - goto out; - -- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem); -+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); - if (ret) - goto out; - -- ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); -+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); - out: - if (tmp_mem.mm_node) { - spin_lock(&bo->bdev->glob->lru_lock); -@@ -618,7 +608,8 @@ out: - - static int - nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, -- bool no_wait, struct ttm_mem_reg *new_mem) -+ bool no_wait_reserve, bool no_wait_gpu, -+ struct ttm_mem_reg *new_mem) - { - u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; - struct ttm_placement placement; -@@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, - - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; -- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); -+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); - if (ret) - return ret; - -- ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem); -+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); - if (ret) - goto out; - -- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); -+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); - if (ret) - goto out; - -@@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, - - static int - nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, -- bool no_wait, struct ttm_mem_reg *new_mem) -+ bool no_wait_reserve, bool no_wait_gpu, -+ struct ttm_mem_reg *new_mem) - { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); - struct nouveau_bo *nvbo = nouveau_bo(bo); -@@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, - /* Software copy if the card isn't up and running yet. */ - if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || - !dev_priv->channel) { -- ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); -+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); - goto out; - } - -@@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, - - /* Hardware assisted copy. */ - if (new_mem->mem_type == TTM_PL_SYSTEM) -- ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem); -+ ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); - else if (old_mem->mem_type == TTM_PL_SYSTEM) -- ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem); -+ ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); - else -- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); -+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); - - if (!ret) - goto out; - - /* Fallback to software copy. */ -- ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); -+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); - - out: - if (ret) -@@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) - return 0; - } - -+static int -+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) -+{ -+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; -+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); -+ struct drm_device *dev = dev_priv->dev; -+ -+ mem->bus.addr = NULL; -+ mem->bus.offset = 0; -+ mem->bus.size = mem->num_pages << PAGE_SHIFT; -+ mem->bus.base = 0; -+ mem->bus.is_iomem = false; -+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) -+ return -EINVAL; -+ switch (mem->mem_type) { -+ case TTM_PL_SYSTEM: -+ /* System memory */ -+ return 0; -+ case TTM_PL_TT: -+#if __OS_HAS_AGP -+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { -+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; -+ mem->bus.base = dev_priv->gart_info.aper_base; -+ mem->bus.is_iomem = true; -+ } -+#endif -+ break; -+ case TTM_PL_VRAM: -+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; -+ mem->bus.base = drm_get_resource_start(dev, 1); -+ mem->bus.is_iomem = true; -+ break; -+ default: -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+static void -+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) -+{ -+} -+ -+static int -+nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) -+{ -+ return 0; -+} -+ - struct ttm_bo_driver nouveau_bo_driver = { - .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, - .invalidate_caches = nouveau_bo_invalidate_caches, -@@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = { - .sync_obj_flush = nouveau_fence_flush, - .sync_obj_unref = nouveau_fence_unref, - .sync_obj_ref = nouveau_fence_ref, -+ .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, -+ .io_mem_reserve = &nouveau_ttm_io_mem_reserve, -+ .io_mem_free = &nouveau_ttm_io_mem_free, - }; - -diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c -index 14afe1e..149ed22 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_connector.c -+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c -@@ -241,7 +241,8 @@ nouveau_connector_detect(struct drm_connector *connector) - if (nv_encoder && nv_connector->native_mode) { - unsigned status = connector_status_connected; - --#ifdef CONFIG_ACPI -+#if defined(CONFIG_ACPI_BUTTON) || \ -+ (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE)) - if (!nouveau_ignorelid && !acpi_lid_open()) - status = connector_status_unknown; - #endif -@@ -431,24 +432,27 @@ nouveau_connector_set_property(struct drm_connector *connector, - } - - static struct drm_display_mode * --nouveau_connector_native_mode(struct nouveau_connector *connector) -+nouveau_connector_native_mode(struct drm_connector *connector) - { -- struct drm_device *dev = connector->base.dev; -+ struct drm_connector_helper_funcs *helper = connector->helper_private; -+ struct nouveau_connector *nv_connector = nouveau_connector(connector); -+ struct drm_device *dev = connector->dev; - struct drm_display_mode *mode, *largest = NULL; - int high_w = 0, high_h = 0, high_v = 0; - -- /* Use preferred mode if there is one.. */ -- list_for_each_entry(mode, &connector->base.probed_modes, head) { -+ list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { -+ if (helper->mode_valid(connector, mode) != MODE_OK) -+ continue; -+ -+ /* Use preferred mode if there is one.. */ - if (mode->type & DRM_MODE_TYPE_PREFERRED) { - NV_DEBUG_KMS(dev, "native mode from preferred\n"); - return drm_mode_duplicate(dev, mode); - } -- } - -- /* Otherwise, take the resolution with the largest width, then height, -- * then vertical refresh -- */ -- list_for_each_entry(mode, &connector->base.probed_modes, head) { -+ /* Otherwise, take the resolution with the largest width, then -+ * height, then vertical refresh -+ */ - if (mode->hdisplay < high_w) - continue; - -@@ -552,7 +556,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) - */ - if (!nv_connector->native_mode) - nv_connector->native_mode = -- nouveau_connector_native_mode(nv_connector); -+ nouveau_connector_native_mode(connector); - if (ret == 0 && nv_connector->native_mode) { - struct drm_display_mode *mode; - -@@ -583,9 +587,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector, - - switch (nv_encoder->dcb->type) { - case OUTPUT_LVDS: -- BUG_ON(!nv_connector->native_mode); -- if (mode->hdisplay > nv_connector->native_mode->hdisplay || -- mode->vdisplay > nv_connector->native_mode->vdisplay) -+ if (nv_connector->native_mode && -+ (mode->hdisplay > nv_connector->native_mode->hdisplay || -+ mode->vdisplay > nv_connector->native_mode->vdisplay)) - return MODE_PANEL; - - min_clock = 0; -@@ -593,8 +597,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, - break; - case OUTPUT_TMDS: - if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || -- (dev_priv->card_type < NV_50 && -- !nv_encoder->dcb->duallink_possible)) -+ !nv_encoder->dcb->duallink_possible) - max_clock = 165000; - else - max_clock = 330000; -@@ -728,7 +731,7 @@ nouveau_connector_create_lvds(struct drm_device *dev, - if (ret == 0) - goto out; - nv_connector->detected_encoder = nv_encoder; -- nv_connector->native_mode = nouveau_connector_native_mode(nv_connector); -+ nv_connector->native_mode = nouveau_connector_native_mode(connector); - list_for_each_entry_safe(mode, temp, &connector->probed_modes, head) - drm_mode_remove(connector, mode); - -@@ -843,6 +846,7 @@ nouveau_connector_create(struct drm_device *dev, - - switch (dcb->type) { - case DCB_CONNECTOR_VGA: -+ connector->polled = DRM_CONNECTOR_POLL_CONNECT; - if (dev_priv->card_type >= NV_50) { - drm_connector_attach_property(connector, - dev->mode_config.scaling_mode_property, -@@ -854,6 +858,17 @@ nouveau_connector_create(struct drm_device *dev, - case DCB_CONNECTOR_TV_3: - nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; - break; -+ case DCB_CONNECTOR_DP: -+ case DCB_CONNECTOR_eDP: -+ case DCB_CONNECTOR_HDMI_0: -+ case DCB_CONNECTOR_HDMI_1: -+ case DCB_CONNECTOR_DVI_I: -+ case DCB_CONNECTOR_DVI_D: -+ if (dev_priv->card_type >= NV_50) -+ connector->polled = DRM_CONNECTOR_POLL_HPD; -+ else -+ connector->polled = DRM_CONNECTOR_POLL_CONNECT; -+ /* fall-through */ - default: - nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; - -diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h -index 49fa7b2..cb1ce2a 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_crtc.h -+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h -@@ -40,6 +40,8 @@ struct nouveau_crtc { - int sharpness; - int last_dpms; - -+ int cursor_saved_x, cursor_saved_y; -+ - struct { - int cpp; - bool blanked; -diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c -index a251886..7933de4 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c -+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c -@@ -33,6 +33,8 @@ - #include "drmP.h" - #include "nouveau_drv.h" - -+#include -+ - static int - nouveau_debugfs_channel_info(struct seq_file *m, void *data) - { -@@ -159,6 +161,7 @@ static struct drm_info_list nouveau_debugfs_list[] = { - { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, - { "memory", nouveau_debugfs_memory_info, 0, NULL }, - { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, -+ { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL }, - }; - #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) - -diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c -index cf1c5c0..74e6b4e 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_display.c -+++ b/drivers/gpu/drm/nouveau/nouveau_display.c -@@ -34,10 +34,6 @@ static void - nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) - { - struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); -- struct drm_device *dev = drm_fb->dev; -- -- if (drm_fb->fbdev) -- nouveau_fbcon_remove(dev, drm_fb); - - if (fb->nvbo) - drm_gem_object_unreference_unlocked(fb->nvbo->gem); -@@ -61,27 +57,20 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = { - .create_handle = nouveau_user_framebuffer_create_handle, - }; - --struct drm_framebuffer * --nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo, -- struct drm_mode_fb_cmd *mode_cmd) -+int -+nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb, -+ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo) - { -- struct nouveau_framebuffer *fb; - int ret; - -- fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); -- if (!fb) -- return NULL; -- -- ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs); -+ ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs); - if (ret) { -- kfree(fb); -- return NULL; -+ return ret; - } - -- drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd); -- -- fb->nvbo = nvbo; -- return &fb->base; -+ drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd); -+ nouveau_fb->nvbo = nvbo; -+ return 0; - } - - static struct drm_framebuffer * -@@ -89,24 +78,29 @@ nouveau_user_framebuffer_create(struct drm_device *dev, - struct drm_file *file_priv, - struct drm_mode_fb_cmd *mode_cmd) - { -- struct drm_framebuffer *fb; -+ struct nouveau_framebuffer *nouveau_fb; - struct drm_gem_object *gem; -+ int ret; - - gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); - if (!gem) - return NULL; - -- fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd); -- if (!fb) { -+ nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); -+ if (!nouveau_fb) -+ return NULL; -+ -+ ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); -+ if (ret) { - drm_gem_object_unreference(gem); - return NULL; - } - -- return fb; -+ return &nouveau_fb->base; - } - - const struct drm_mode_config_funcs nouveau_mode_config_funcs = { - .fb_create = nouveau_user_framebuffer_create, -- .fb_changed = nouveau_fbcon_probe, -+ .output_poll_changed = nouveau_fbcon_output_poll_changed, - }; - -diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c -index 1de974a..2737704 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_drv.c -+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c -@@ -153,7 +153,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) - struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; - struct nouveau_channel *chan; - struct drm_crtc *crtc; -- uint32_t fbdev_flags; - int ret, i; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) -@@ -163,8 +162,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) - return 0; - - NV_INFO(dev, "Disabling fbcon acceleration...\n"); -- fbdev_flags = dev_priv->fbdev_info->flags; -- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; -+ nouveau_fbcon_save_disable_accel(dev); - - NV_INFO(dev, "Unpinning framebuffer(s)...\n"); - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -@@ -177,6 +175,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) - nouveau_bo_unpin(nouveau_fb->nvbo); - } - -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); -+ -+ nouveau_bo_unmap(nv_crtc->cursor.nvbo); -+ nouveau_bo_unpin(nv_crtc->cursor.nvbo); -+ } -+ - NV_INFO(dev, "Evicting buffers...\n"); - ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); - -@@ -230,9 +235,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) - } - - acquire_console_sem(); -- fb_set_suspend(dev_priv->fbdev_info, 1); -+ nouveau_fbcon_set_suspend(dev, 1); - release_console_sem(); -- dev_priv->fbdev_info->flags = fbdev_flags; -+ nouveau_fbcon_restore_accel(dev); - return 0; - - out_abort: -@@ -250,14 +255,12 @@ nouveau_pci_resume(struct pci_dev *pdev) - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; - struct drm_crtc *crtc; -- uint32_t fbdev_flags; - int ret, i; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - -- fbdev_flags = dev_priv->fbdev_info->flags; -- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; -+ nouveau_fbcon_save_disable_accel(dev); - - NV_INFO(dev, "We're back, enabling device...\n"); - pci_set_power_state(pdev, PCI_D0); -@@ -318,12 +321,34 @@ nouveau_pci_resume(struct pci_dev *pdev) - nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); - } - -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); -+ int ret; -+ -+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); -+ if (!ret) -+ ret = nouveau_bo_map(nv_crtc->cursor.nvbo); -+ if (ret) -+ NV_ERROR(dev, "Could not pin/map cursor.\n"); -+ } -+ - if (dev_priv->card_type < NV_50) { - nv04_display_restore(dev); - NVLockVgaCrtcs(dev, false); - } else - nv50_display_init(dev); - -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); -+ -+ nv_crtc->cursor.set_offset(nv_crtc, -+ nv_crtc->cursor.nvbo->bo.offset - -+ dev_priv->vm_vram_base); -+ -+ nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, -+ nv_crtc->cursor_saved_y); -+ } -+ - /* Force CLUT to get re-loaded during modeset */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); -@@ -332,13 +357,14 @@ nouveau_pci_resume(struct pci_dev *pdev) - } - - acquire_console_sem(); -- fb_set_suspend(dev_priv->fbdev_info, 0); -+ nouveau_fbcon_set_suspend(dev, 0); - release_console_sem(); - -- nouveau_fbcon_zfill(dev); -+ nouveau_fbcon_zfill_all(dev); - - drm_helper_resume_force_mode(dev); -- dev_priv->fbdev_info->flags = fbdev_flags; -+ -+ nouveau_fbcon_restore_accel(dev); - return 0; - } - -diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h -index ace630a..c697191 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_drv.h -+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h -@@ -535,6 +535,7 @@ struct drm_nouveau_private { - - struct fb_info *fbdev_info; - -+ int fifo_alloc_count; - struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; - - struct nouveau_engine engine; -@@ -621,6 +622,9 @@ struct drm_nouveau_private { - struct { - struct dentry *channel_root; - } debugfs; -+ -+ struct nouveau_fbdev *nfbdev; -+ struct apertures_struct *apertures; - }; - - static inline struct drm_nouveau_private * -@@ -847,12 +851,17 @@ extern int nouveau_dma_init(struct nouveau_channel *); - extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); - - /* nouveau_acpi.c */ -+#define ROM_BIOS_PAGE 4096 - #if defined(CONFIG_ACPI) - void nouveau_register_dsm_handler(void); - void nouveau_unregister_dsm_handler(void); -+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); -+bool nouveau_acpi_rom_supported(struct pci_dev *pdev); - #else - static inline void nouveau_register_dsm_handler(void) {} - static inline void nouveau_unregister_dsm_handler(void) {} -+static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; } -+static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; } - #endif - - /* nouveau_backlight.c */ -@@ -1166,6 +1175,12 @@ int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); - int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); - int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); - -+/* nv50_calc. */ -+int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, -+ int *N1, int *M1, int *N2, int *M2, int *P); -+int nv50_calc_pll2(struct drm_device *, struct pll_lims *, -+ int clk, int *N, int *fN, int *M, int *P); -+ - #ifndef ioread32_native - #ifdef __BIG_ENDIAN - #define ioread16_native ioread16be -diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h -index 9f28b94..e1df820 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_encoder.h -+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h -@@ -48,6 +48,8 @@ struct nouveau_encoder { - union { - struct { - int mc_unknown; -+ uint32_t unk0; -+ uint32_t unk1; - int dpcd_version; - int link_nr; - int link_bw; -diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h -index 4a3f31a..d432134 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_fb.h -+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h -@@ -40,8 +40,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb) - - extern const struct drm_mode_config_funcs nouveau_mode_config_funcs; - --struct drm_framebuffer * --nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *, -- struct drm_mode_fb_cmd *); -- -+int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb, -+ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo); - #endif /* __NOUVEAU_FB_H__ */ -diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c -index 8e7dc1d..c9a4a0d 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c -+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c -@@ -52,8 +52,8 @@ - static int - nouveau_fbcon_sync(struct fb_info *info) - { -- struct nouveau_fbcon_par *par = info->par; -- struct drm_device *dev = par->dev; -+ struct nouveau_fbdev *nfbdev = info->par; -+ struct drm_device *dev = nfbdev->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->channel; - int ret, i; -@@ -97,7 +97,6 @@ static struct fb_ops nouveau_fbcon_ops = { - .owner = THIS_MODULE, - .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = drm_fb_helper_set_par, -- .fb_setcolreg = drm_fb_helper_setcolreg, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, -@@ -111,7 +110,6 @@ static struct fb_ops nv04_fbcon_ops = { - .owner = THIS_MODULE, - .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = drm_fb_helper_set_par, -- .fb_setcolreg = drm_fb_helper_setcolreg, - .fb_fillrect = nv04_fbcon_fillrect, - .fb_copyarea = nv04_fbcon_copyarea, - .fb_imageblit = nv04_fbcon_imageblit, -@@ -125,7 +123,6 @@ static struct fb_ops nv50_fbcon_ops = { - .owner = THIS_MODULE, - .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = drm_fb_helper_set_par, -- .fb_setcolreg = drm_fb_helper_setcolreg, - .fb_fillrect = nv50_fbcon_fillrect, - .fb_copyarea = nv50_fbcon_copyarea, - .fb_imageblit = nv50_fbcon_imageblit, -@@ -155,54 +152,10 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, - *blue = nv_crtc->lut.b[regno]; - } - --static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { -- .gamma_set = nouveau_fbcon_gamma_set, -- .gamma_get = nouveau_fbcon_gamma_get --}; -- --#if defined(__i386__) || defined(__x86_64__) --static bool --nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev) -+static void -+nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev) - { -- struct pci_dev *pdev = dev->pdev; -- int ramin; -- -- if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB && -- screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) -- return false; -- -- if (screen_info.lfb_base < pci_resource_start(pdev, 1)) -- goto not_fb; -- -- if (screen_info.lfb_base + screen_info.lfb_size >= -- pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1)) -- goto not_fb; -- -- return true; --not_fb: -- ramin = 2; -- if (pci_resource_len(pdev, ramin) == 0) { -- ramin = 3; -- if (pci_resource_len(pdev, ramin) == 0) -- return false; -- } -- -- if (screen_info.lfb_base < pci_resource_start(pdev, ramin)) -- return false; -- -- if (screen_info.lfb_base + screen_info.lfb_size >= -- pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin)) -- return false; -- -- return true; --} --#endif -- --void --nouveau_fbcon_zfill(struct drm_device *dev) --{ -- struct drm_nouveau_private *dev_priv = dev->dev_private; -- struct fb_info *info = dev_priv->fbdev_info; -+ struct fb_info *info = nfbdev->helper.fbdev; - struct fb_fillrect rect; - - /* Clear the entire fbcon. The drm will program every connector -@@ -218,28 +171,27 @@ nouveau_fbcon_zfill(struct drm_device *dev) - } - - static int --nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, -- uint32_t fb_height, uint32_t surface_width, -- uint32_t surface_height, uint32_t surface_depth, -- uint32_t surface_bpp, struct drm_framebuffer **pfb) -+nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, -+ struct drm_fb_helper_surface_size *sizes) - { -+ struct drm_device *dev = nfbdev->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct fb_info *info; -- struct nouveau_fbcon_par *par; - struct drm_framebuffer *fb; - struct nouveau_framebuffer *nouveau_fb; - struct nouveau_bo *nvbo; - struct drm_mode_fb_cmd mode_cmd; -- struct device *device = &dev->pdev->dev; -+ struct pci_dev *pdev = dev->pdev; -+ struct device *device = &pdev->dev; - int size, ret; - -- mode_cmd.width = surface_width; -- mode_cmd.height = surface_height; -+ mode_cmd.width = sizes->surface_width; -+ mode_cmd.height = sizes->surface_height; - -- mode_cmd.bpp = surface_bpp; -+ mode_cmd.bpp = sizes->surface_bpp; - mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); - mode_cmd.pitch = roundup(mode_cmd.pitch, 256); -- mode_cmd.depth = surface_depth; -+ mode_cmd.depth = sizes->surface_depth; - - size = mode_cmd.pitch * mode_cmd.height; - size = roundup(size, PAGE_SIZE); -@@ -268,31 +220,28 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, - - mutex_lock(&dev->struct_mutex); - -- fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd); -- if (!fb) { -+ info = framebuffer_alloc(0, device); -+ if (!info) { - ret = -ENOMEM; -- NV_ERROR(dev, "failed to allocate fb.\n"); - goto out_unref; - } - -- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); -- -- nouveau_fb = nouveau_framebuffer(fb); -- *pfb = fb; -- -- info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device); -- if (!info) { -+ ret = fb_alloc_cmap(&info->cmap, 256, 0); -+ if (ret) { - ret = -ENOMEM; - goto out_unref; - } - -- par = info->par; -- par->helper.funcs = &nouveau_fbcon_helper_funcs; -- par->helper.dev = dev; -- ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4); -- if (ret) -- goto out_unref; -- dev_priv->fbdev_info = info; -+ info->par = nfbdev; -+ -+ nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo); -+ -+ nouveau_fb = &nfbdev->nouveau_fb; -+ fb = &nouveau_fb->base; -+ -+ /* setup helper */ -+ nfbdev->helper.fb = fb; -+ nfbdev->helper.fbdev = info; - - strcpy(info->fix.id, "nouveaufb"); - if (nouveau_nofbaccel) -@@ -310,31 +259,17 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, - info->screen_size = size; - - drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); -- drm_fb_helper_fill_var(info, fb, fb_width, fb_height); -+ drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); - - /* FIXME: we really shouldn't expose mmio space at all */ -- info->fix.mmio_start = pci_resource_start(dev->pdev, 1); -- info->fix.mmio_len = pci_resource_len(dev->pdev, 1); -+ info->fix.mmio_start = pci_resource_start(pdev, 1); -+ info->fix.mmio_len = pci_resource_len(pdev, 1); - - /* Set aperture base/size for vesafb takeover */ --#if defined(__i386__) || defined(__x86_64__) -- if (nouveau_fbcon_has_vesafb_or_efifb(dev)) { -- /* Some NVIDIA VBIOS' are stupid and decide to put the -- * framebuffer in the middle of the PRAMIN BAR for -- * whatever reason. We need to know the exact lfb_base -- * to get vesafb kicked off, and the only reliable way -- * we have left is to find out lfb_base the same way -- * vesafb did. -- */ -- info->aperture_base = screen_info.lfb_base; -- info->aperture_size = screen_info.lfb_size; -- if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB) -- info->aperture_size *= 65536; -- } else --#endif -- { -- info->aperture_base = info->fix.mmio_start; -- info->aperture_size = info->fix.mmio_len; -+ info->apertures = dev_priv->apertures; -+ if (!info->apertures) { -+ ret = -ENOMEM; -+ goto out_unref; - } - - info->pixmap.size = 64*1024; -@@ -343,11 +278,6 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, - info->pixmap.flags = FB_PIXMAP_SYSTEM; - info->pixmap.scan_align = 1; - -- fb->fbdev = info; -- -- par->nouveau_fb = nouveau_fb; -- par->dev = dev; -- - if (dev_priv->channel && !nouveau_nofbaccel) { - switch (dev_priv->card_type) { - case NV_50: -@@ -361,7 +291,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, - }; - } - -- nouveau_fbcon_zfill(dev); -+ nouveau_fbcon_zfill(dev, nfbdev); - - /* To allow resizeing without swapping buffers */ - NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n", -@@ -379,44 +309,129 @@ out: - return ret; - } - --int --nouveau_fbcon_probe(struct drm_device *dev) -+static int -+nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper, -+ struct drm_fb_helper_surface_size *sizes) - { -- NV_DEBUG_KMS(dev, "\n"); -+ struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper; -+ int new_fb = 0; -+ int ret; -+ -+ if (!helper->fb) { -+ ret = nouveau_fbcon_create(nfbdev, sizes); -+ if (ret) -+ return ret; -+ new_fb = 1; -+ } -+ return new_fb; -+} - -- return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create); -+void -+nouveau_fbcon_output_poll_changed(struct drm_device *dev) -+{ -+ struct drm_nouveau_private *dev_priv = dev->dev_private; -+ drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper); - } - - int --nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb) -+nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) - { -- struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb); -+ struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb; - struct fb_info *info; - -- if (!fb) -- return -EINVAL; -- -- info = fb->fbdev; -- if (info) { -- struct nouveau_fbcon_par *par = info->par; -- -+ if (nfbdev->helper.fbdev) { -+ info = nfbdev->helper.fbdev; - unregister_framebuffer(info); -+ if (info->cmap.len) -+ fb_dealloc_cmap(&info->cmap); -+ framebuffer_release(info); -+ } -+ -+ if (nouveau_fb->nvbo) { - nouveau_bo_unmap(nouveau_fb->nvbo); - drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); - nouveau_fb->nvbo = NULL; -- if (par) -- drm_fb_helper_free(&par->helper); -- framebuffer_release(info); - } -- -+ drm_fb_helper_fini(&nfbdev->helper); -+ drm_framebuffer_cleanup(&nouveau_fb->base); - return 0; - } - - void nouveau_fbcon_gpu_lockup(struct fb_info *info) - { -- struct nouveau_fbcon_par *par = info->par; -- struct drm_device *dev = par->dev; -+ struct nouveau_fbdev *nfbdev = info->par; -+ struct drm_device *dev = nfbdev->dev; - - NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); - info->flags |= FBINFO_HWACCEL_DISABLED; - } -+ -+static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { -+ .gamma_set = nouveau_fbcon_gamma_set, -+ .gamma_get = nouveau_fbcon_gamma_get, -+ .fb_probe = nouveau_fbcon_find_or_create_single, -+}; -+ -+ -+int nouveau_fbcon_init(struct drm_device *dev) -+{ -+ struct drm_nouveau_private *dev_priv = dev->dev_private; -+ struct nouveau_fbdev *nfbdev; -+ int ret; -+ -+ nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); -+ if (!nfbdev) -+ return -ENOMEM; -+ -+ nfbdev->dev = dev; -+ dev_priv->nfbdev = nfbdev; -+ nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; -+ -+ ret = drm_fb_helper_init(dev, &nfbdev->helper, 2, 4); -+ if (ret) { -+ kfree(nfbdev); -+ return ret; -+ } -+ -+ drm_fb_helper_single_add_all_connectors(&nfbdev->helper); -+ drm_fb_helper_initial_config(&nfbdev->helper, 32); -+ return 0; -+} -+ -+void nouveau_fbcon_fini(struct drm_device *dev) -+{ -+ struct drm_nouveau_private *dev_priv = dev->dev_private; -+ -+ if (!dev_priv->nfbdev) -+ return; -+ -+ nouveau_fbcon_destroy(dev, dev_priv->nfbdev); -+ kfree(dev_priv->nfbdev); -+ dev_priv->nfbdev = NULL; -+} -+ -+void nouveau_fbcon_save_disable_accel(struct drm_device *dev) -+{ -+ struct drm_nouveau_private *dev_priv = dev->dev_private; -+ -+ dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags; -+ dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; -+} -+ -+void nouveau_fbcon_restore_accel(struct drm_device *dev) -+{ -+ struct drm_nouveau_private *dev_priv = dev->dev_private; -+ dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags; -+} -+ -+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) -+{ -+ struct drm_nouveau_private *dev_priv = dev->dev_private; -+ fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state); -+} -+ -+void nouveau_fbcon_zfill_all(struct drm_device *dev) -+{ -+ struct drm_nouveau_private *dev_priv = dev->dev_private; -+ nouveau_fbcon_zfill(dev, dev_priv->nfbdev); -+} -diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h -index f9c34e1..e7e1268 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h -+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h -@@ -29,16 +29,16 @@ - - #include "drm_fb_helper.h" - --struct nouveau_fbcon_par { -+#include "nouveau_fb.h" -+struct nouveau_fbdev { - struct drm_fb_helper helper; -+ struct nouveau_framebuffer nouveau_fb; -+ struct list_head fbdev_list; - struct drm_device *dev; -- struct nouveau_framebuffer *nouveau_fb; -+ unsigned int saved_flags; - }; - --int nouveau_fbcon_probe(struct drm_device *dev); --int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb); - void nouveau_fbcon_restore(void); --void nouveau_fbcon_zfill(struct drm_device *dev); - - void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); - void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); -@@ -50,5 +50,14 @@ void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); - int nv50_fbcon_accel_init(struct fb_info *info); - - void nouveau_fbcon_gpu_lockup(struct fb_info *info); -+ -+int nouveau_fbcon_init(struct drm_device *dev); -+void nouveau_fbcon_fini(struct drm_device *dev); -+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state); -+void nouveau_fbcon_zfill_all(struct drm_device *dev); -+void nouveau_fbcon_save_disable_accel(struct drm_device *dev); -+void nouveau_fbcon_restore_accel(struct drm_device *dev); -+ -+void nouveau_fbcon_output_poll_changed(struct drm_device *dev); - #endif /* __NV50_FBCON_H__ */ - -diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c -index 1bc0b38..69c76cf 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_gem.c -+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c -@@ -57,6 +57,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem) - } - - ttm_bo_unref(&bo); -+ -+ drm_gem_object_release(gem); -+ kfree(gem); - } - - int -@@ -382,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, - - nvbo->channel = chan; - ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, -- false, false); -+ false, false, false); - nvbo->channel = NULL; - if (unlikely(ret)) { - NV_ERROR(dev, "fail ttm_validate\n"); -diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c -index 32f0e49..f731c5f 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_grctx.c -+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c -@@ -68,13 +68,12 @@ nouveau_grctx_prog_load(struct drm_device *dev) - return ret; - } - -- pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL); -+ pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL); - if (!pgraph->ctxprog) { - NV_ERROR(dev, "OOM copying ctxprog\n"); - release_firmware(fw); - return -ENOMEM; - } -- memcpy(pgraph->ctxprog, fw->data, fw->size); - - cp = pgraph->ctxprog; - if (le32_to_cpu(cp->signature) != 0x5043564e || -@@ -97,14 +96,13 @@ nouveau_grctx_prog_load(struct drm_device *dev) - return ret; - } - -- pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); -+ pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL); - if (!pgraph->ctxvals) { - NV_ERROR(dev, "OOM copying ctxvals\n"); - release_firmware(fw); - nouveau_grctx_fini(dev); - return -ENOMEM; - } -- memcpy(pgraph->ctxvals, fw->data, fw->size); - - cv = (void *)pgraph->ctxvals; - if (le32_to_cpu(cv->signature) != 0x5643564e || -diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c -index 88583e7..316a3c7 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_i2c.c -+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c -@@ -254,16 +254,27 @@ struct nouveau_i2c_chan * - nouveau_i2c_find(struct drm_device *dev, int index) - { - struct drm_nouveau_private *dev_priv = dev->dev_private; -- struct nvbios *bios = &dev_priv->vbios; -+ struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index]; - - if (index >= DCB_MAX_NUM_I2C_ENTRIES) - return NULL; - -- if (!bios->dcb.i2c[index].chan) { -- if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index)) -- return NULL; -+ if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) { -+ uint32_t reg = 0xe500, val; -+ -+ if (i2c->port_type == 6) { -+ reg += i2c->read * 0x50; -+ val = 0x2002; -+ } else { -+ reg += ((i2c->entry & 0x1e00) >> 9) * 0x50; -+ val = 0xe001; -+ } -+ -+ nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val); - } - -- return bios->dcb.i2c[index].chan; -+ if (!i2c->chan && nouveau_i2c_init(dev, i2c, index)) -+ return NULL; -+ return i2c->chan; - } - -diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c -index 13e73ce..53360f1 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_irq.c -+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c -@@ -1204,7 +1204,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS) - { - struct drm_device *dev = (struct drm_device *)arg; - struct drm_nouveau_private *dev_priv = dev->dev_private; -- uint32_t status, fbdev_flags = 0; -+ uint32_t status; - unsigned long flags; - - status = nv_rd32(dev, NV03_PMC_INTR_0); -@@ -1213,11 +1213,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS) - - spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - -- if (dev_priv->fbdev_info) { -- fbdev_flags = dev_priv->fbdev_info->flags; -- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; -- } -- - if (status & NV_PMC_INTR_0_PFIFO_PENDING) { - nouveau_fifo_irq_handler(dev); - status &= ~NV_PMC_INTR_0_PFIFO_PENDING; -@@ -1247,9 +1242,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS) - if (status) - NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status); - -- if (dev_priv->fbdev_info) -- dev_priv->fbdev_info->flags = fbdev_flags; -- - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); - - return IRQ_HANDLED; -diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c -index 775a701..c1fd42b 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_mem.c -+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c -@@ -540,7 +540,8 @@ nouveau_mem_detect(struct drm_device *dev) - dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); - dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; - if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) -- dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; -+ dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); -+ dev_priv->vram_sys_base <<= 12; - } - - NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); -diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h -index aa9b310..6ca80a3 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_reg.h -+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h -@@ -826,6 +826,7 @@ - #define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000 - #define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) - #define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) -+#define NV50_SOR_DP_UNK128(i,l) (0x0061c128 + (i) * 0x800 + (l) * 0x80) - #define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) - - #define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000) -diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c -index e171064..b02a231 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_state.c -+++ b/drivers/gpu/drm/nouveau/nouveau_state.c -@@ -34,6 +34,7 @@ - - #include "nouveau_drv.h" - #include "nouveau_drm.h" -+#include "nouveau_fbcon.h" - #include "nv50_display.h" - - static void nouveau_stub_takedown(struct drm_device *dev) {} -@@ -375,12 +376,15 @@ out_err: - static void nouveau_switcheroo_set_state(struct pci_dev *pdev, - enum vga_switcheroo_state state) - { -+ struct drm_device *dev = pci_get_drvdata(pdev); - pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; - if (state == VGA_SWITCHEROO_ON) { - printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); - nouveau_pci_resume(pdev); -+ drm_kms_helper_poll_enable(dev); - } else { - printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); -+ drm_kms_helper_poll_disable(dev); - nouveau_pci_suspend(pdev, pmm); - } - } -@@ -515,8 +519,10 @@ nouveau_card_init(struct drm_device *dev) - - dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; - -- if (drm_core_check_feature(dev, DRIVER_MODESET)) -- drm_helper_initial_config(dev); -+ if (drm_core_check_feature(dev, DRIVER_MODESET)) { -+ nouveau_fbcon_init(dev); -+ drm_kms_helper_poll_init(dev); -+ } - - return 0; - -@@ -563,6 +569,7 @@ static void nouveau_card_takedown(struct drm_device *dev) - NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); - - if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { -+ - nouveau_backlight_exit(dev); - - if (dev_priv->channel) { -@@ -637,6 +644,48 @@ static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev) - #endif - } - -+static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev) -+{ -+ struct pci_dev *pdev = dev->pdev; -+ struct apertures_struct *aper = alloc_apertures(3); -+ if (!aper) -+ return NULL; -+ -+ aper->ranges[0].base = pci_resource_start(pdev, 1); -+ aper->ranges[0].size = pci_resource_len(pdev, 1); -+ aper->count = 1; -+ -+ if (pci_resource_len(pdev, 2)) { -+ aper->ranges[aper->count].base = pci_resource_start(pdev, 2); -+ aper->ranges[aper->count].size = pci_resource_len(pdev, 2); -+ aper->count++; -+ } -+ -+ if (pci_resource_len(pdev, 3)) { -+ aper->ranges[aper->count].base = pci_resource_start(pdev, 3); -+ aper->ranges[aper->count].size = pci_resource_len(pdev, 3); -+ aper->count++; -+ } -+ -+ return aper; -+} -+ -+static int nouveau_remove_conflicting_drivers(struct drm_device *dev) -+{ -+ struct drm_nouveau_private *dev_priv = dev->dev_private; -+ bool primary = false; -+ dev_priv->apertures = nouveau_get_apertures(dev); -+ if (!dev_priv->apertures) -+ return -ENOMEM; -+ -+#ifdef CONFIG_X86 -+ primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; -+#endif -+ -+ remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary); -+ return 0; -+} -+ - int nouveau_load(struct drm_device *dev, unsigned long flags) - { - struct drm_nouveau_private *dev_priv; -@@ -724,29 +773,30 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) - NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", - dev_priv->card_type, reg0); - -- /* map larger RAMIN aperture on NV40 cards */ -- dev_priv->ramin = NULL; -+ if (drm_core_check_feature(dev, DRIVER_MODESET)) { -+ int ret = nouveau_remove_conflicting_drivers(dev); -+ if (ret) -+ return ret; -+ } -+ -+ /* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */ - if (dev_priv->card_type >= NV_40) { - int ramin_bar = 2; - if (pci_resource_len(dev->pdev, ramin_bar) == 0) - ramin_bar = 3; - - dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar); -- dev_priv->ramin = ioremap( -- pci_resource_start(dev->pdev, ramin_bar), -+ dev_priv->ramin = -+ ioremap(pci_resource_start(dev->pdev, ramin_bar), - dev_priv->ramin_size); - if (!dev_priv->ramin) { -- NV_ERROR(dev, "Failed to init RAMIN mapping, " -- "limited instance memory available\n"); -+ NV_ERROR(dev, "Failed to PRAMIN BAR"); -+ return -ENOMEM; - } -- } -- -- /* On older cards (or if the above failed), create a map covering -- * the BAR0 PRAMIN aperture */ -- if (!dev_priv->ramin) { -+ } else { - dev_priv->ramin_size = 1 * 1024 * 1024; - dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN, -- dev_priv->ramin_size); -+ dev_priv->ramin_size); - if (!dev_priv->ramin) { - NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n"); - return -ENOMEM; -@@ -794,6 +844,8 @@ int nouveau_unload(struct drm_device *dev) - struct drm_nouveau_private *dev_priv = dev->dev_private; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { -+ drm_kms_helper_poll_fini(dev); -+ nouveau_fbcon_fini(dev); - if (dev_priv->card_type >= NV_50) - nv50_display_destroy(dev); - else -@@ -859,6 +911,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, - case NOUVEAU_GETPARAM_VM_VRAM_BASE: - getparam->value = dev_priv->vm_vram_base; - break; -+ case NOUVEAU_GETPARAM_PTIMER_TIME: -+ getparam->value = dev_priv->engine.timer.read(dev); -+ break; - case NOUVEAU_GETPARAM_GRAPH_UNITS: - /* NV40 and NV50 versions are quite different, but register - * address is the same. User is supposed to know the card -diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c -index 89a91b9..aaf3de3 100644 ---- a/drivers/gpu/drm/nouveau/nv04_cursor.c -+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c -@@ -20,6 +20,7 @@ nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) - static void - nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) - { -+ nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; - NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index, - NV_PRAMDAC_CU_START_POS, - XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) | -diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c -index 813b25c..1eeac4f 100644 ---- a/drivers/gpu/drm/nouveau/nv04_fbcon.c -+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c -@@ -30,8 +30,8 @@ - void - nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) - { -- struct nouveau_fbcon_par *par = info->par; -- struct drm_device *dev = par->dev; -+ struct nouveau_fbdev *nfbdev = info->par; -+ struct drm_device *dev = nfbdev->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->channel; - -@@ -57,8 +57,8 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) - void - nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) - { -- struct nouveau_fbcon_par *par = info->par; -- struct drm_device *dev = par->dev; -+ struct nouveau_fbdev *nfbdev = info->par; -+ struct drm_device *dev = nfbdev->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->channel; - -@@ -91,8 +91,8 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) - void - nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) - { -- struct nouveau_fbcon_par *par = info->par; -- struct drm_device *dev = par->dev; -+ struct nouveau_fbdev *nfbdev = info->par; -+ struct drm_device *dev = nfbdev->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->channel; - uint32_t fg; -@@ -179,8 +179,8 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle) - int - nv04_fbcon_accel_init(struct fb_info *info) - { -- struct nouveau_fbcon_par *par = info->par; -- struct drm_device *dev = par->dev; -+ struct nouveau_fbdev *nfbdev = info->par; -+ struct drm_device *dev = nfbdev->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->channel; - const int sub = NvSubCtxSurf2D; -@@ -236,7 +236,7 @@ nv04_fbcon_accel_init(struct fb_info *info) - if (ret) - return ret; - -- ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ? -+ ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ? - 0x009f : 0x005f, NvImageBlit); - if (ret) - return ret; -diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c -index e260986..618355e 100644 ---- a/drivers/gpu/drm/nouveau/nv04_graph.c -+++ b/drivers/gpu/drm/nouveau/nv04_graph.c -@@ -532,9 +532,82 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, - return 0; - } - --static int --nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, -- int mthd, uint32_t data) -+/* -+ * Software methods, why they are needed, and how they all work: -+ * -+ * NV04 and NV05 keep most of the state in PGRAPH context itself, but some -+ * 2d engine settings are kept inside the grobjs themselves. The grobjs are -+ * 3 words long on both. grobj format on NV04 is: -+ * -+ * word 0: -+ * - bits 0-7: class -+ * - bit 12: color key active -+ * - bit 13: clip rect active -+ * - bit 14: if set, destination surface is swizzled and taken from buffer 5 -+ * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken -+ * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or -+ * NV03_CONTEXT_SURFACE_DST]. -+ * - bits 15-17: 2d operation [aka patch config] -+ * - bit 24: patch valid [enables rendering using this object] -+ * - bit 25: surf3d valid [for tex_tri and multitex_tri only] -+ * word 1: -+ * - bits 0-1: mono format -+ * - bits 8-13: color format -+ * - bits 16-31: DMA_NOTIFY instance -+ * word 2: -+ * - bits 0-15: DMA_A instance -+ * - bits 16-31: DMA_B instance -+ * -+ * On NV05 it's: -+ * -+ * word 0: -+ * - bits 0-7: class -+ * - bit 12: color key active -+ * - bit 13: clip rect active -+ * - bit 14: if set, destination surface is swizzled and taken from buffer 5 -+ * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken -+ * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or -+ * NV03_CONTEXT_SURFACE_DST]. -+ * - bits 15-17: 2d operation [aka patch config] -+ * - bits 20-22: dither mode -+ * - bit 24: patch valid [enables rendering using this object] -+ * - bit 25: surface_dst/surface_color/surf2d/surf3d valid -+ * - bit 26: surface_src/surface_zeta valid -+ * - bit 27: pattern valid -+ * - bit 28: rop valid -+ * - bit 29: beta1 valid -+ * - bit 30: beta4 valid -+ * word 1: -+ * - bits 0-1: mono format -+ * - bits 8-13: color format -+ * - bits 16-31: DMA_NOTIFY instance -+ * word 2: -+ * - bits 0-15: DMA_A instance -+ * - bits 16-31: DMA_B instance -+ * -+ * NV05 will set/unset the relevant valid bits when you poke the relevant -+ * object-binding methods with object of the proper type, or with the NULL -+ * type. It'll only allow rendering using the grobj if all needed objects -+ * are bound. The needed set of objects depends on selected operation: for -+ * example rop object is needed by ROP_AND, but not by SRCCOPY_AND. -+ * -+ * NV04 doesn't have these methods implemented at all, and doesn't have the -+ * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24 -+ * is set. So we have to emulate them in software, internally keeping the -+ * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04, -+ * but the last word isn't actually used for anything, we abuse it for this -+ * purpose. -+ * -+ * Actually, NV05 can optionally check bit 24 too, but we disable this since -+ * there's no use for it. -+ * -+ * For unknown reasons, NV04 implements surf3d binding in hardware as an -+ * exception. Also for unknown reasons, NV04 doesn't implement the clipping -+ * methods on the surf3d object, so we have to emulate them too. -+ */ -+ -+static void -+nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value) - { - struct drm_device *dev = chan->dev; - uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; -@@ -542,42 +615,509 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, - uint32_t tmp; - - tmp = nv_ri32(dev, instance); -- tmp &= ~0x00038000; -- tmp |= ((data & 7) << 15); -+ tmp &= ~mask; -+ tmp |= value; - - nv_wi32(dev, instance, tmp); - nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp); - nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp); -+} -+ -+static void -+nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value) -+{ -+ struct drm_device *dev = chan->dev; -+ uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; -+ uint32_t tmp, ctx1; -+ int class, op, valid = 1; -+ -+ ctx1 = nv_ri32(dev, instance); -+ class = ctx1 & 0xff; -+ op = (ctx1 >> 15) & 7; -+ tmp = nv_ri32(dev, instance + 0xc); -+ tmp &= ~mask; -+ tmp |= value; -+ nv_wi32(dev, instance + 0xc, tmp); -+ -+ /* check for valid surf2d/surf_dst/surf_color */ -+ if (!(tmp & 0x02000000)) -+ valid = 0; -+ /* check for valid surf_src/surf_zeta */ -+ if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000)) -+ valid = 0; -+ -+ switch (op) { -+ /* SRCCOPY_AND, SRCCOPY: no extra objects required */ -+ case 0: -+ case 3: -+ break; -+ /* ROP_AND: requires pattern and rop */ -+ case 1: -+ if (!(tmp & 0x18000000)) -+ valid = 0; -+ break; -+ /* BLEND_AND: requires beta1 */ -+ case 2: -+ if (!(tmp & 0x20000000)) -+ valid = 0; -+ break; -+ /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */ -+ case 4: -+ case 5: -+ if (!(tmp & 0x40000000)) -+ valid = 0; -+ break; -+ } -+ -+ nv04_graph_set_ctx1(chan, 0x01000000, valid << 24); -+} -+ -+static int -+nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ if (data > 5) -+ return 1; -+ /* Old versions of the objects only accept first three operations. */ -+ if (data > 2 && grclass < 0x40) -+ return 1; -+ nv04_graph_set_ctx1(chan, 0x00038000, data << 15); -+ /* changing operation changes set of objects needed for validation */ -+ nv04_graph_set_ctx_val(chan, 0, 0); -+ return 0; -+} -+ -+static int -+nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ uint32_t min = data & 0xffff, max; -+ uint32_t w = data >> 16; -+ if (min & 0x8000) -+ /* too large */ -+ return 1; -+ if (w & 0x8000) -+ /* yes, it accepts negative for some reason. */ -+ w |= 0xffff0000; -+ max = min + w; -+ max &= 0x3ffff; -+ nv_wr32(chan->dev, 0x40053c, min); -+ nv_wr32(chan->dev, 0x400544, max); -+ return 0; -+} -+ -+static int -+nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ uint32_t min = data & 0xffff, max; -+ uint32_t w = data >> 16; -+ if (min & 0x8000) -+ /* too large */ -+ return 1; -+ if (w & 0x8000) -+ /* yes, it accepts negative for some reason. */ -+ w |= 0xffff0000; -+ max = min + w; -+ max &= 0x3ffff; -+ nv_wr32(chan->dev, 0x400540, min); -+ nv_wr32(chan->dev, 0x400548, max); - return 0; - } - -+static int -+nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ switch (nv_ri32(chan->dev, data << 4) & 0xff) { -+ case 0x30: -+ nv04_graph_set_ctx1(chan, 0x00004000, 0); -+ nv04_graph_set_ctx_val(chan, 0x02000000, 0); -+ return 0; -+ case 0x42: -+ nv04_graph_set_ctx1(chan, 0x00004000, 0); -+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); -+ return 0; -+ } -+ return 1; -+} -+ -+static int -+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ switch (nv_ri32(chan->dev, data << 4) & 0xff) { -+ case 0x30: -+ nv04_graph_set_ctx1(chan, 0x00004000, 0); -+ nv04_graph_set_ctx_val(chan, 0x02000000, 0); -+ return 0; -+ case 0x42: -+ nv04_graph_set_ctx1(chan, 0x00004000, 0); -+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); -+ return 0; -+ case 0x52: -+ nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000); -+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); -+ return 0; -+ } -+ return 1; -+} -+ -+static int -+nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ switch (nv_ri32(chan->dev, data << 4) & 0xff) { -+ case 0x30: -+ nv04_graph_set_ctx_val(chan, 0x08000000, 0); -+ return 0; -+ case 0x18: -+ nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000); -+ return 0; -+ } -+ return 1; -+} -+ -+static int -+nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ switch (nv_ri32(chan->dev, data << 4) & 0xff) { -+ case 0x30: -+ nv04_graph_set_ctx_val(chan, 0x08000000, 0); -+ return 0; -+ case 0x44: -+ nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000); -+ return 0; -+ } -+ return 1; -+} -+ -+static int -+nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ switch (nv_ri32(chan->dev, data << 4) & 0xff) { -+ case 0x30: -+ nv04_graph_set_ctx_val(chan, 0x10000000, 0); -+ return 0; -+ case 0x43: -+ nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000); -+ return 0; -+ } -+ return 1; -+} -+ -+static int -+nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ switch (nv_ri32(chan->dev, data << 4) & 0xff) { -+ case 0x30: -+ nv04_graph_set_ctx_val(chan, 0x20000000, 0); -+ return 0; -+ case 0x12: -+ nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000); -+ return 0; -+ } -+ return 1; -+} -+ -+static int -+nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ switch (nv_ri32(chan->dev, data << 4) & 0xff) { -+ case 0x30: -+ nv04_graph_set_ctx_val(chan, 0x40000000, 0); -+ return 0; -+ case 0x72: -+ nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000); -+ return 0; -+ } -+ return 1; -+} -+ -+static int -+nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ switch (nv_ri32(chan->dev, data << 4) & 0xff) { -+ case 0x30: -+ nv04_graph_set_ctx_val(chan, 0x02000000, 0); -+ return 0; -+ case 0x58: -+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); -+ return 0; -+ } -+ return 1; -+} -+ -+static int -+nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ switch (nv_ri32(chan->dev, data << 4) & 0xff) { -+ case 0x30: -+ nv04_graph_set_ctx_val(chan, 0x04000000, 0); -+ return 0; -+ case 0x59: -+ nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000); -+ return 0; -+ } -+ return 1; -+} -+ -+static int -+nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ switch (nv_ri32(chan->dev, data << 4) & 0xff) { -+ case 0x30: -+ nv04_graph_set_ctx_val(chan, 0x02000000, 0); -+ return 0; -+ case 0x5a: -+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000); -+ return 0; -+ } -+ return 1; -+} -+ -+static int -+nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ switch (nv_ri32(chan->dev, data << 4) & 0xff) { -+ case 0x30: -+ nv04_graph_set_ctx_val(chan, 0x04000000, 0); -+ return 0; -+ case 0x5b: -+ nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000); -+ return 0; -+ } -+ return 1; -+} -+ -+static int -+nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ switch (nv_ri32(chan->dev, data << 4) & 0xff) { -+ case 0x30: -+ nv04_graph_set_ctx1(chan, 0x2000, 0); -+ return 0; -+ case 0x19: -+ nv04_graph_set_ctx1(chan, 0x2000, 0x2000); -+ return 0; -+ } -+ return 1; -+} -+ -+static int -+nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass, -+ int mthd, uint32_t data) -+{ -+ switch (nv_ri32(chan->dev, data << 4) & 0xff) { -+ case 0x30: -+ nv04_graph_set_ctx1(chan, 0x1000, 0); -+ return 0; -+ /* Yes, for some reason even the old versions of objects -+ * accept 0x57 and not 0x17. Consistency be damned. -+ */ -+ case 0x57: -+ nv04_graph_set_ctx1(chan, 0x1000, 0x1000); -+ return 0; -+ } -+ return 1; -+} -+ - static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = { - { 0x0150, nv04_graph_mthd_set_ref }, - {} - }; - --static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = { -+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = { -+ { 0x0184, nv04_graph_mthd_bind_nv01_patt }, -+ { 0x0188, nv04_graph_mthd_bind_rop }, -+ { 0x018c, nv04_graph_mthd_bind_beta1 }, -+ { 0x0190, nv04_graph_mthd_bind_surf_dst }, -+ { 0x02fc, nv04_graph_mthd_set_operation }, -+ {}, -+}; -+ -+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = { -+ { 0x0188, nv04_graph_mthd_bind_nv04_patt }, -+ { 0x018c, nv04_graph_mthd_bind_rop }, -+ { 0x0190, nv04_graph_mthd_bind_beta1 }, -+ { 0x0194, nv04_graph_mthd_bind_beta4 }, -+ { 0x0198, nv04_graph_mthd_bind_surf2d }, -+ { 0x02fc, nv04_graph_mthd_set_operation }, -+ {}, -+}; -+ -+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = { -+ { 0x0184, nv04_graph_mthd_bind_chroma }, -+ { 0x0188, nv04_graph_mthd_bind_clip }, -+ { 0x018c, nv04_graph_mthd_bind_nv01_patt }, -+ { 0x0190, nv04_graph_mthd_bind_rop }, -+ { 0x0194, nv04_graph_mthd_bind_beta1 }, -+ { 0x0198, nv04_graph_mthd_bind_surf_dst }, -+ { 0x019c, nv04_graph_mthd_bind_surf_src }, -+ { 0x02fc, nv04_graph_mthd_set_operation }, -+ {}, -+}; -+ -+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = { -+ { 0x0184, nv04_graph_mthd_bind_chroma }, -+ { 0x0188, nv04_graph_mthd_bind_clip }, -+ { 0x018c, nv04_graph_mthd_bind_nv04_patt }, -+ { 0x0190, nv04_graph_mthd_bind_rop }, -+ { 0x0194, nv04_graph_mthd_bind_beta1 }, -+ { 0x0198, nv04_graph_mthd_bind_beta4 }, -+ { 0x019c, nv04_graph_mthd_bind_surf2d }, -+ { 0x02fc, nv04_graph_mthd_set_operation }, -+ {}, -+}; -+ -+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = { -+ { 0x0188, nv04_graph_mthd_bind_chroma }, -+ { 0x018c, nv04_graph_mthd_bind_clip }, -+ { 0x0190, nv04_graph_mthd_bind_nv04_patt }, -+ { 0x0194, nv04_graph_mthd_bind_rop }, -+ { 0x0198, nv04_graph_mthd_bind_beta1 }, -+ { 0x019c, nv04_graph_mthd_bind_beta4 }, -+ { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf }, -+ { 0x03e4, nv04_graph_mthd_set_operation }, -+ {}, -+}; -+ -+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = { -+ { 0x0184, nv04_graph_mthd_bind_chroma }, -+ { 0x0188, nv04_graph_mthd_bind_clip }, -+ { 0x018c, nv04_graph_mthd_bind_nv01_patt }, -+ { 0x0190, nv04_graph_mthd_bind_rop }, -+ { 0x0194, nv04_graph_mthd_bind_beta1 }, -+ { 0x0198, nv04_graph_mthd_bind_surf_dst }, -+ { 0x02fc, nv04_graph_mthd_set_operation }, -+ {}, -+}; -+ -+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = { -+ { 0x0184, nv04_graph_mthd_bind_chroma }, -+ { 0x0188, nv04_graph_mthd_bind_nv01_patt }, -+ { 0x018c, nv04_graph_mthd_bind_rop }, -+ { 0x0190, nv04_graph_mthd_bind_beta1 }, -+ { 0x0194, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, - }; - -+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = { -+ { 0x0184, nv04_graph_mthd_bind_chroma }, -+ { 0x0188, nv04_graph_mthd_bind_nv04_patt }, -+ { 0x018c, nv04_graph_mthd_bind_rop }, -+ { 0x0190, nv04_graph_mthd_bind_beta1 }, -+ { 0x0194, nv04_graph_mthd_bind_beta4 }, -+ { 0x0198, nv04_graph_mthd_bind_surf2d }, -+ { 0x02fc, nv04_graph_mthd_set_operation }, -+ {}, -+}; -+ -+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = { -+ { 0x0188, nv04_graph_mthd_bind_nv01_patt }, -+ { 0x018c, nv04_graph_mthd_bind_rop }, -+ { 0x0190, nv04_graph_mthd_bind_beta1 }, -+ { 0x0194, nv04_graph_mthd_bind_surf_dst }, -+ { 0x0304, nv04_graph_mthd_set_operation }, -+ {}, -+}; -+ -+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = { -+ { 0x0188, nv04_graph_mthd_bind_nv04_patt }, -+ { 0x018c, nv04_graph_mthd_bind_rop }, -+ { 0x0190, nv04_graph_mthd_bind_beta1 }, -+ { 0x0194, nv04_graph_mthd_bind_beta4 }, -+ { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf }, -+ { 0x0304, nv04_graph_mthd_set_operation }, -+ {}, -+}; -+ -+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = { -+ { 0x0184, nv04_graph_mthd_bind_clip }, -+ { 0x0188, nv04_graph_mthd_bind_nv01_patt }, -+ { 0x018c, nv04_graph_mthd_bind_rop }, -+ { 0x0190, nv04_graph_mthd_bind_beta1 }, -+ { 0x0194, nv04_graph_mthd_bind_surf_dst }, -+ { 0x02fc, nv04_graph_mthd_set_operation }, -+ {}, -+}; -+ -+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = { -+ { 0x0184, nv04_graph_mthd_bind_clip }, -+ { 0x0188, nv04_graph_mthd_bind_nv04_patt }, -+ { 0x018c, nv04_graph_mthd_bind_rop }, -+ { 0x0190, nv04_graph_mthd_bind_beta1 }, -+ { 0x0194, nv04_graph_mthd_bind_beta4 }, -+ { 0x0198, nv04_graph_mthd_bind_surf2d }, -+ { 0x02fc, nv04_graph_mthd_set_operation }, -+ {}, -+}; -+ -+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = { -+ { 0x0188, nv04_graph_mthd_bind_clip }, -+ { 0x018c, nv04_graph_mthd_bind_surf_color }, -+ { 0x0190, nv04_graph_mthd_bind_surf_zeta }, -+ {}, -+}; -+ -+static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = { -+ { 0x02f8, nv04_graph_mthd_surf3d_clip_h }, -+ { 0x02fc, nv04_graph_mthd_surf3d_clip_v }, -+ {}, -+}; -+ - struct nouveau_pgraph_object_class nv04_graph_grclass[] = { -- { 0x0039, false, NULL }, -- { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */ -- { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */ -- { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */ -- { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */ -+ { 0x0038, false, NULL }, /* dvd subpicture */ -+ { 0x0039, false, NULL }, /* m2mf */ -+ { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */ -+ { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */ -+ { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */ -+ { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */ -+ { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */ -+ { 0x0064, false, NULL }, /* nv05 iifc */ -+ { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */ -+ { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */ -+ { 0x0065, false, NULL }, /* nv05 ifc */ -+ { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */ -+ { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */ -+ { 0x0066, false, NULL }, /* nv05 sifc */ -+ { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */ -+ { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */ - { 0x0030, false, NULL }, /* null */ - { 0x0042, false, NULL }, /* surf2d */ - { 0x0043, false, NULL }, /* rop */ - { 0x0012, false, NULL }, /* beta1 */ - { 0x0072, false, NULL }, /* beta4 */ - { 0x0019, false, NULL }, /* cliprect */ -- { 0x0044, false, NULL }, /* pattern */ -+ { 0x0018, false, NULL }, /* nv01 pattern */ -+ { 0x0044, false, NULL }, /* nv04 pattern */ - { 0x0052, false, NULL }, /* swzsurf */ -- { 0x0053, false, NULL }, /* surf3d */ -+ { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */ -+ { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */ - { 0x0054, false, NULL }, /* tex_tri */ - { 0x0055, false, NULL }, /* multitex_tri */ -+ { 0x0017, false, NULL }, /* nv01 chroma */ -+ { 0x0057, false, NULL }, /* nv04 chroma */ -+ { 0x0058, false, NULL }, /* surf_dst */ -+ { 0x0059, false, NULL }, /* surf_src */ -+ { 0x005a, false, NULL }, /* surf_color */ -+ { 0x005b, false, NULL }, /* surf_zeta */ -+ { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */ -+ { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */ -+ { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */ -+ { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */ -+ { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */ -+ { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */ - { 0x506e, true, nv04_graph_mthds_sw }, - {} - }; -diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c -index 0616c96..704a25d 100644 ---- a/drivers/gpu/drm/nouveau/nv40_graph.c -+++ b/drivers/gpu/drm/nouveau/nv40_graph.c -@@ -253,7 +253,11 @@ nv40_graph_init(struct drm_device *dev) - - if (!dev_priv->engine.graph.ctxprog) { - struct nouveau_grctx ctx = {}; -- uint32_t cp[256]; -+ uint32_t *cp; -+ -+ cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL); -+ if (!cp) -+ return -ENOMEM; - - ctx.dev = dev; - ctx.mode = NOUVEAU_GRCTX_PROG; -@@ -265,6 +269,8 @@ nv40_graph_init(struct drm_device *dev) - nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); - for (i = 0; i < ctx.ctxprog_len; i++) - nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); -+ -+ kfree(cp); - } - - /* No context present currently */ -diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c -index 11b11c3..9b5c974 100644 ---- a/drivers/gpu/drm/nouveau/nv40_grctx.c -+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c -@@ -115,11 +115,6 @@ - - /* TODO: - * - get vs count from 0x1540 -- * - document unimplemented bits compared to nvidia -- * - nsource handling -- * - R0 & 0x0200 handling -- * - single-vs handling -- * - 400314 bit 0 - */ - - static int -diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c -new file mode 100644 -index 0000000..2cdc2bf ---- /dev/null -+++ b/drivers/gpu/drm/nouveau/nv50_calc.c -@@ -0,0 +1,87 @@ -+/* -+ * Copyright 2010 Red Hat Inc. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: Ben Skeggs -+ */ -+ -+#include "drmP.h" -+#include "drm_fixed.h" -+#include "nouveau_drv.h" -+#include "nouveau_hw.h" -+ -+int -+nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk, -+ int *N1, int *M1, int *N2, int *M2, int *P) -+{ -+ struct nouveau_pll_vals pll_vals; -+ int ret; -+ -+ ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals); -+ if (ret <= 0) -+ return ret; -+ -+ *N1 = pll_vals.N1; -+ *M1 = pll_vals.M1; -+ *N2 = pll_vals.N2; -+ *M2 = pll_vals.M2; -+ *P = pll_vals.log2P; -+ return ret; -+} -+ -+int -+nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk, -+ int *N, int *fN, int *M, int *P) -+{ -+ fixed20_12 fb_div, a, b; -+ -+ *P = pll->vco1.maxfreq / clk; -+ if (*P > pll->max_p) -+ *P = pll->max_p; -+ if (*P < pll->min_p) -+ *P = pll->min_p; -+ -+ /* *M = ceil(refclk / pll->vco.max_inputfreq); */ -+ a.full = dfixed_const(pll->refclk); -+ b.full = dfixed_const(pll->vco1.max_inputfreq); -+ a.full = dfixed_div(a, b); -+ a.full = dfixed_ceil(a); -+ *M = dfixed_trunc(a); -+ -+ /* fb_div = (vco * *M) / refclk; */ -+ fb_div.full = dfixed_const(clk * *P); -+ fb_div.full = dfixed_mul(fb_div, a); -+ a.full = dfixed_const(pll->refclk); -+ fb_div.full = dfixed_div(fb_div, a); -+ -+ /* *N = floor(fb_div); */ -+ a.full = dfixed_floor(fb_div); -+ *N = dfixed_trunc(fb_div); -+ -+ /* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */ -+ b.full = dfixed_const(8192); -+ a.full = dfixed_mul(a, b); -+ fb_div.full = dfixed_mul(fb_div, b); -+ fb_div.full = fb_div.full - a.full; -+ *fN = dfixed_trunc(fb_div) - 4096; -+ *fN &= 0xffff; -+ -+ return clk; -+} -diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c -index cfabeb9..b4e4a3b 100644 ---- a/drivers/gpu/drm/nouveau/nv50_crtc.c -+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c -@@ -264,32 +264,40 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update) - int - nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) - { -- uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head); -- struct nouveau_pll_vals pll; -- struct pll_lims limits; -+ uint32_t reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head); -+ struct pll_lims pll; - uint32_t reg1, reg2; -- int ret; -+ int ret, N1, M1, N2, M2, P; - -- ret = get_pll_limits(dev, pll_reg, &limits); -+ ret = get_pll_limits(dev, reg, &pll); - if (ret) - return ret; - -- ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll); -- if (ret <= 0) -- return ret; -+ if (pll.vco2.maxfreq) { -+ ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P); -+ if (ret <= 0) -+ return 0; -+ -+ NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n", -+ pclk, ret, N1, M1, N2, M2, P); - -- if (limits.vco2.maxfreq) { -- reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00; -- reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00; -- nv_wr32(dev, pll_reg, 0x10000611); -- nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1); -- nv_wr32(dev, pll_reg + 8, -- reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2); -+ reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00; -+ reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00; -+ nv_wr32(dev, reg, 0x10000611); -+ nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1); -+ nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2); - } else { -- reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000; -- nv_wr32(dev, pll_reg, 0x50000610); -- nv_wr32(dev, pll_reg + 4, reg1 | -- (pll.log2P << 16) | (pll.M1 << 8) | pll.N1); -+ ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); -+ if (ret <= 0) -+ return 0; -+ -+ NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n", -+ pclk, ret, N1, N2, M1, P); -+ -+ reg1 = nv_rd32(dev, reg + 4) & 0xffc00000; -+ nv_wr32(dev, reg, 0x50000610); -+ nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1); -+ nv_wr32(dev, reg + 8, N2); - } - - return 0; -diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c -index 753e723..03ad7ab 100644 ---- a/drivers/gpu/drm/nouveau/nv50_cursor.c -+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c -@@ -107,6 +107,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) - { - struct drm_device *dev = nv_crtc->base.dev; - -+ nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; - nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), - ((y & 0xFFFF) << 16) | (x & 0xFFFF)); - /* Needed to make the cursor move. */ -diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c -index 649db4c..580a5d1 100644 ---- a/drivers/gpu/drm/nouveau/nv50_display.c -+++ b/drivers/gpu/drm/nouveau/nv50_display.c -@@ -29,6 +29,7 @@ - #include "nouveau_encoder.h" - #include "nouveau_connector.h" - #include "nouveau_fb.h" -+#include "nouveau_fbcon.h" - #include "drm_crtc_helper.h" - - static void -@@ -783,6 +784,37 @@ ack: - } - - static void -+nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb) -+{ -+ int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1); -+ struct drm_encoder *encoder; -+ uint32_t tmp, unk0 = 0, unk1 = 0; -+ -+ if (dcb->type != OUTPUT_DP) -+ return; -+ -+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); -+ -+ if (nv_encoder->dcb == dcb) { -+ unk0 = nv_encoder->dp.unk0; -+ unk1 = nv_encoder->dp.unk1; -+ break; -+ } -+ } -+ -+ if (unk0 || unk1) { -+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); -+ tmp &= 0xfffffe03; -+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0); -+ -+ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link)); -+ tmp &= 0xfef080c0; -+ nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1); -+ } -+} -+ -+static void - nv50_display_unk20_handler(struct drm_device *dev) - { - struct dcb_entry *dcbent; -@@ -805,6 +837,8 @@ nv50_display_unk20_handler(struct drm_device *dev) - - nouveau_bios_run_display_table(dev, dcbent, script, pclk); - -+ nv50_display_unk20_dp_hack(dev, dcbent); -+ - tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head)); - tmp &= ~0x000000f; - nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp); -@@ -945,6 +979,8 @@ nv50_display_irq_hotplug_bh(struct work_struct *work) - nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054)); - if (dev_priv->chipset >= 0x90) - nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); -+ -+ drm_helper_hpd_irq_event(dev); - } - - void -diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c -index a95e694..32611bd 100644 ---- a/drivers/gpu/drm/nouveau/nv50_fb.c -+++ b/drivers/gpu/drm/nouveau/nv50_fb.c -@@ -6,10 +6,16 @@ - int - nv50_fb_init(struct drm_device *dev) - { -- /* This is needed to get meaningful information from 100c90 -- * on traps. No idea what these values mean exactly. */ - struct drm_nouveau_private *dev_priv = dev->dev_private; - -+ /* Not a clue what this is exactly. Without pointing it at a -+ * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) -+ * cause IOMMU "read from address 0" errors (rh#561267) -+ */ -+ nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8); -+ -+ /* This is needed to get meaningful information from 100c90 -+ * on traps. No idea what these values mean exactly. */ - switch (dev_priv->chipset) { - case 0x50: - nv_wr32(dev, 0x100c90, 0x0707ff); -diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c -index a8c70e7..6bf025c 100644 ---- a/drivers/gpu/drm/nouveau/nv50_fbcon.c -+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c -@@ -6,8 +6,8 @@ - void - nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) - { -- struct nouveau_fbcon_par *par = info->par; -- struct drm_device *dev = par->dev; -+ struct nouveau_fbdev *nfbdev = info->par; -+ struct drm_device *dev = nfbdev->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->channel; - -@@ -49,8 +49,8 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) - void - nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) - { -- struct nouveau_fbcon_par *par = info->par; -- struct drm_device *dev = par->dev; -+ struct nouveau_fbdev *nfbdev = info->par; -+ struct drm_device *dev = nfbdev->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->channel; - -@@ -84,8 +84,8 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) - void - nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) - { -- struct nouveau_fbcon_par *par = info->par; -- struct drm_device *dev = par->dev; -+ struct nouveau_fbdev *nfbdev = info->par; -+ struct drm_device *dev = nfbdev->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->channel; - uint32_t width, dwords, *data = (uint32_t *)image->data; -@@ -152,8 +152,8 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) - int - nv50_fbcon_accel_init(struct fb_info *info) - { -- struct nouveau_fbcon_par *par = info->par; -- struct drm_device *dev = par->dev; -+ struct nouveau_fbdev *nfbdev = info->par; -+ struct drm_device *dev = nfbdev->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->channel; - struct nouveau_gpuobj *eng2d = NULL; -diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c -index c61782b..bb47ad7 100644 ---- a/drivers/gpu/drm/nouveau/nv50_gpio.c -+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c -@@ -31,7 +31,7 @@ nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) - { - const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; - -- if (gpio->line > 32) -+ if (gpio->line >= 32) - return -EINVAL; - - *reg = nv50_gpio_reg[gpio->line >> 3]; -diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c -index 0c68698..812778d 100644 ---- a/drivers/gpu/drm/nouveau/nv50_sor.c -+++ b/drivers/gpu/drm/nouveau/nv50_sor.c -@@ -274,7 +274,6 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = { - int - nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) - { -- struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_encoder *nv_encoder = NULL; - struct drm_encoder *encoder; - bool dum; -@@ -321,18 +320,19 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) - encoder->possible_clones = 0; - - if (nv_encoder->dcb->type == OUTPUT_DP) { -- uint32_t mc, or = nv_encoder->or; -+ int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1); -+ uint32_t tmp; - -- if (dev_priv->chipset < 0x90 || -- dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) -- mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or)); -- else -- mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or)); -+ tmp = nv_rd32(dev, 0x61c700 + (or * 0x800)); - -- switch ((mc & 0x00000f00) >> 8) { -+ switch ((tmp & 0x00000f00) >> 8) { - case 8: - case 9: -- nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16; -+ nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16; -+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); -+ nv_encoder->dp.unk0 = tmp & 0x000001fc; -+ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link)); -+ nv_encoder->dp.unk1 = tmp & 0x010f7f3f; - break; - default: - break; -diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile -index 3c91312..84b1f27 100644 ---- a/drivers/gpu/drm/radeon/Makefile -+++ b/drivers/gpu/drm/radeon/Makefile -@@ -33,6 +33,9 @@ $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable - $(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable - $(call if_changed,mkregtable) - -+$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable -+ $(call if_changed,mkregtable) -+ - $(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h - - $(obj)/r200.o: $(obj)/r200_reg_safe.h -@@ -47,6 +50,8 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h - - $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h - -+$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h -+ - radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ - radeon_irq.o r300_cmdbuf.o r600_cp.o - # add KMS driver -@@ -60,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ - rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ - r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ - r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ -- evergreen.o -+ evergreen.o evergreen_cs.o - - radeon-$(CONFIG_COMPAT) += radeon_ioc32.o - radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o -diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h -index 27e2c71..2ebcb97 100644 ---- a/drivers/gpu/drm/radeon/atombios.h -+++ b/drivers/gpu/drm/radeon/atombios.h -@@ -5742,6 +5742,9 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER - #define ATOM_PP_THERMALCONTROLLER_RV6xx 7 - #define ATOM_PP_THERMALCONTROLLER_RV770 8 - #define ATOM_PP_THERMALCONTROLLER_ADT7473 9 -+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 -+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 -+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller - - typedef struct _ATOM_PPLIB_STATE - { -@@ -5749,6 +5752,26 @@ typedef struct _ATOM_PPLIB_STATE - UCHAR ucClockStateIndices[1]; // variable-sized - } ATOM_PPLIB_STATE; - -+typedef struct _ATOM_PPLIB_FANTABLE -+{ -+ UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same. -+ UCHAR ucTHyst; // Temperature hysteresis. Integer. -+ USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. -+ USHORT usTMed; // The middle temperature where we change slopes. -+ USHORT usTHigh; // The high point above TMed for adjusting the second slope. -+ USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments). -+ USHORT usPWMMed; // The PWM value (in percent) at TMed. -+ USHORT usPWMHigh; // The PWM value at THigh. -+} ATOM_PPLIB_FANTABLE; -+ -+typedef struct _ATOM_PPLIB_EXTENDEDHEADER -+{ -+ USHORT usSize; -+ ULONG ulMaxEngineClock; // For Overdrive. -+ ULONG ulMaxMemoryClock; // For Overdrive. -+ // Add extra system parameters here, always adjust size to include all fields. -+} ATOM_PPLIB_EXTENDEDHEADER; -+ - //// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps - #define ATOM_PP_PLATFORM_CAP_BACKBIAS 1 - #define ATOM_PP_PLATFORM_CAP_POWERPLAY 2 -@@ -5762,6 +5785,12 @@ typedef struct _ATOM_PPLIB_STATE - #define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512 - #define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024 - #define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048 -+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096 -+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition. -+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition). -+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC. -+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature. -+#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state. - - typedef struct _ATOM_PPLIB_POWERPLAYTABLE - { -@@ -5797,6 +5826,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE - - } ATOM_PPLIB_POWERPLAYTABLE; - -+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2 -+{ -+ ATOM_PPLIB_POWERPLAYTABLE basicTable; -+ UCHAR ucNumCustomThermalPolicy; -+ USHORT usCustomThermalPolicyArrayOffset; -+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2; -+ -+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 -+{ -+ ATOM_PPLIB_POWERPLAYTABLE2 basicTable2; -+ USHORT usFormatID; // To be used ONLY by PPGen. -+ USHORT usFanTableOffset; -+ USHORT usExtendendedHeaderOffset; -+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; -+ - //// ATOM_PPLIB_NONCLOCK_INFO::usClassification - #define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 - #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 -@@ -5816,7 +5860,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE - #define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400 - #define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800 - #define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 --// remaining 3 bits are reserved -+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000 -+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 -+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 - - //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings - #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 -@@ -5840,9 +5886,15 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE - - #define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000 - #define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000 -+#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 - #define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000 - --#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 -+//memory related flags -+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000 -+ -+//M3 Arb //2bits, current 3 sets of parameters in total -+#define ATOM_PPLIB_M3ARB_MASK 0x00060000 -+#define ATOM_PPLIB_M3ARB_SHIFT 17 - - // Contained in an array starting at the offset - // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. -@@ -5860,6 +5912,9 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO - // Contained in an array starting at the offset - // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. - // referenced from ATOM_PPLIB_STATE::ucClockStateIndices -+#define ATOM_PPLIB_NONCLOCKINFO_VER1 12 -+#define ATOM_PPLIB_NONCLOCKINFO_VER2 24 -+ - typedef struct _ATOM_PPLIB_R600_CLOCK_INFO - { - USHORT usEngineClockLow; -@@ -5882,6 +5937,23 @@ typedef struct _ATOM_PPLIB_R600_CLOCK_INFO - #define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4 - #define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8 - #define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16 -+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0). -+ -+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO -+{ -+ USHORT usEngineClockLow; -+ UCHAR ucEngineClockHigh; -+ -+ USHORT usMemoryClockLow; -+ UCHAR ucMemoryClockHigh; -+ -+ USHORT usVDDC; -+ USHORT usVDDCI; -+ USHORT usUnused; -+ -+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_* -+ -+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO; - - typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO - -diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c -index a87990b..f3f2827 100644 ---- a/drivers/gpu/drm/radeon/atombios_crtc.c -+++ b/drivers/gpu/drm/radeon/atombios_crtc.c -@@ -26,7 +26,7 @@ - #include - #include - #include --#include "radeon_fixed.h" -+#include - #include "radeon.h" - #include "atom.h" - #include "atom-bits.h" -@@ -245,25 +245,27 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) - - switch (mode) { - case DRM_MODE_DPMS_ON: -+ radeon_crtc->enabled = true; -+ /* adjust pm to dpms changes BEFORE enabling crtcs */ -+ radeon_pm_compute_clocks(rdev); - atombios_enable_crtc(crtc, ATOM_ENABLE); - if (ASIC_IS_DCE3(rdev)) - atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); - atombios_blank_crtc(crtc, ATOM_DISABLE); -- /* XXX re-enable when interrupt support is added */ -- if (!ASIC_IS_DCE4(rdev)) -- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); -+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); - radeon_crtc_load_lut(crtc); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: -- /* XXX re-enable when interrupt support is added */ -- if (!ASIC_IS_DCE4(rdev)) -- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); -+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); - atombios_blank_crtc(crtc, ATOM_ENABLE); - if (ASIC_IS_DCE3(rdev)) - atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); - atombios_enable_crtc(crtc, ATOM_DISABLE); -+ radeon_crtc->enabled = false; -+ /* adjust pm to dpms changes AFTER disabling crtcs */ -+ radeon_pm_compute_clocks(rdev); - break; - } - } -@@ -705,6 +707,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode - break; - case ATOM_DCPLL: - case ATOM_PPLL_INVALID: -+ default: - pll = &rdev->clock.dcpll; - break; - } -@@ -1160,6 +1163,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) - { -+ struct drm_device *dev = crtc->dev; -+ struct radeon_device *rdev = dev->dev_private; -+ -+ /* adjust pm to upcoming mode change */ -+ radeon_pm_compute_clocks(rdev); -+ - if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) - return false; - return true; -diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c -index 28b31c6..abffb14 100644 ---- a/drivers/gpu/drm/radeon/atombios_dp.c -+++ b/drivers/gpu/drm/radeon/atombios_dp.c -@@ -351,7 +351,7 @@ retry: - args.v1.ucChannelID = chan->rec.i2c_id; - args.v1.ucDelay = delay / 10; - if (ASIC_IS_DCE4(rdev)) -- args.v2.ucHPD_ID = chan->rec.hpd_id; -+ args.v2.ucHPD_ID = chan->rec.hpd; - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - -diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c -index e8f447e..4b6623d 100644 ---- a/drivers/gpu/drm/radeon/evergreen.c -+++ b/drivers/gpu/drm/radeon/evergreen.c -@@ -28,39 +28,246 @@ - #include "radeon.h" - #include "radeon_asic.h" - #include "radeon_drm.h" --#include "rv770d.h" -+#include "evergreend.h" - #include "atom.h" - #include "avivod.h" - #include "evergreen_reg.h" - -+#define EVERGREEN_PFP_UCODE_SIZE 1120 -+#define EVERGREEN_PM4_UCODE_SIZE 1376 -+ - static void evergreen_gpu_init(struct radeon_device *rdev); - void evergreen_fini(struct radeon_device *rdev); - -+void evergreen_pm_misc(struct radeon_device *rdev) -+{ -+ int req_ps_idx = rdev->pm.requested_power_state_index; -+ int req_cm_idx = rdev->pm.requested_clock_mode_index; -+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; -+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; -+ -+ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { -+ if (voltage->voltage != rdev->pm.current_vddc) { -+ radeon_atom_set_voltage(rdev, voltage->voltage); -+ rdev->pm.current_vddc = voltage->voltage; -+ DRM_DEBUG("Setting: v: %d\n", voltage->voltage); -+ } -+ } -+} -+ -+void evergreen_pm_prepare(struct radeon_device *rdev) -+{ -+ struct drm_device *ddev = rdev->ddev; -+ struct drm_crtc *crtc; -+ struct radeon_crtc *radeon_crtc; -+ u32 tmp; -+ -+ /* disable any active CRTCs */ -+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { -+ radeon_crtc = to_radeon_crtc(crtc); -+ if (radeon_crtc->enabled) { -+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); -+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; -+ WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); -+ } -+ } -+} -+ -+void evergreen_pm_finish(struct radeon_device *rdev) -+{ -+ struct drm_device *ddev = rdev->ddev; -+ struct drm_crtc *crtc; -+ struct radeon_crtc *radeon_crtc; -+ u32 tmp; -+ -+ /* enable any active CRTCs */ -+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { -+ radeon_crtc = to_radeon_crtc(crtc); -+ if (radeon_crtc->enabled) { -+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); -+ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; -+ WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); -+ } -+ } -+} -+ - bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) - { - bool connected = false; -- /* XXX */ -+ -+ switch (hpd) { -+ case RADEON_HPD_1: -+ if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) -+ connected = true; -+ break; -+ case RADEON_HPD_2: -+ if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) -+ connected = true; -+ break; -+ case RADEON_HPD_3: -+ if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) -+ connected = true; -+ break; -+ case RADEON_HPD_4: -+ if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) -+ connected = true; -+ break; -+ case RADEON_HPD_5: -+ if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) -+ connected = true; -+ break; -+ case RADEON_HPD_6: -+ if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) -+ connected = true; -+ break; -+ default: -+ break; -+ } -+ - return connected; - } - - void evergreen_hpd_set_polarity(struct radeon_device *rdev, - enum radeon_hpd_id hpd) - { -- /* XXX */ -+ u32 tmp; -+ bool connected = evergreen_hpd_sense(rdev, hpd); -+ -+ switch (hpd) { -+ case RADEON_HPD_1: -+ tmp = RREG32(DC_HPD1_INT_CONTROL); -+ if (connected) -+ tmp &= ~DC_HPDx_INT_POLARITY; -+ else -+ tmp |= DC_HPDx_INT_POLARITY; -+ WREG32(DC_HPD1_INT_CONTROL, tmp); -+ break; -+ case RADEON_HPD_2: -+ tmp = RREG32(DC_HPD2_INT_CONTROL); -+ if (connected) -+ tmp &= ~DC_HPDx_INT_POLARITY; -+ else -+ tmp |= DC_HPDx_INT_POLARITY; -+ WREG32(DC_HPD2_INT_CONTROL, tmp); -+ break; -+ case RADEON_HPD_3: -+ tmp = RREG32(DC_HPD3_INT_CONTROL); -+ if (connected) -+ tmp &= ~DC_HPDx_INT_POLARITY; -+ else -+ tmp |= DC_HPDx_INT_POLARITY; -+ WREG32(DC_HPD3_INT_CONTROL, tmp); -+ break; -+ case RADEON_HPD_4: -+ tmp = RREG32(DC_HPD4_INT_CONTROL); -+ if (connected) -+ tmp &= ~DC_HPDx_INT_POLARITY; -+ else -+ tmp |= DC_HPDx_INT_POLARITY; -+ WREG32(DC_HPD4_INT_CONTROL, tmp); -+ break; -+ case RADEON_HPD_5: -+ tmp = RREG32(DC_HPD5_INT_CONTROL); -+ if (connected) -+ tmp &= ~DC_HPDx_INT_POLARITY; -+ else -+ tmp |= DC_HPDx_INT_POLARITY; -+ WREG32(DC_HPD5_INT_CONTROL, tmp); -+ break; -+ case RADEON_HPD_6: -+ tmp = RREG32(DC_HPD6_INT_CONTROL); -+ if (connected) -+ tmp &= ~DC_HPDx_INT_POLARITY; -+ else -+ tmp |= DC_HPDx_INT_POLARITY; -+ WREG32(DC_HPD6_INT_CONTROL, tmp); -+ break; -+ default: -+ break; -+ } - } - - void evergreen_hpd_init(struct radeon_device *rdev) - { -- /* XXX */ -+ struct drm_device *dev = rdev->ddev; -+ struct drm_connector *connector; -+ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | -+ DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN; -+ -+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -+ struct radeon_connector *radeon_connector = to_radeon_connector(connector); -+ switch (radeon_connector->hpd.hpd) { -+ case RADEON_HPD_1: -+ WREG32(DC_HPD1_CONTROL, tmp); -+ rdev->irq.hpd[0] = true; -+ break; -+ case RADEON_HPD_2: -+ WREG32(DC_HPD2_CONTROL, tmp); -+ rdev->irq.hpd[1] = true; -+ break; -+ case RADEON_HPD_3: -+ WREG32(DC_HPD3_CONTROL, tmp); -+ rdev->irq.hpd[2] = true; -+ break; -+ case RADEON_HPD_4: -+ WREG32(DC_HPD4_CONTROL, tmp); -+ rdev->irq.hpd[3] = true; -+ break; -+ case RADEON_HPD_5: -+ WREG32(DC_HPD5_CONTROL, tmp); -+ rdev->irq.hpd[4] = true; -+ break; -+ case RADEON_HPD_6: -+ WREG32(DC_HPD6_CONTROL, tmp); -+ rdev->irq.hpd[5] = true; -+ break; -+ default: -+ break; -+ } -+ } -+ if (rdev->irq.installed) -+ evergreen_irq_set(rdev); - } - -- --void evergreen_bandwidth_update(struct radeon_device *rdev) -+void evergreen_hpd_fini(struct radeon_device *rdev) - { -- /* XXX */ -+ struct drm_device *dev = rdev->ddev; -+ struct drm_connector *connector; -+ -+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -+ struct radeon_connector *radeon_connector = to_radeon_connector(connector); -+ switch (radeon_connector->hpd.hpd) { -+ case RADEON_HPD_1: -+ WREG32(DC_HPD1_CONTROL, 0); -+ rdev->irq.hpd[0] = false; -+ break; -+ case RADEON_HPD_2: -+ WREG32(DC_HPD2_CONTROL, 0); -+ rdev->irq.hpd[1] = false; -+ break; -+ case RADEON_HPD_3: -+ WREG32(DC_HPD3_CONTROL, 0); -+ rdev->irq.hpd[2] = false; -+ break; -+ case RADEON_HPD_4: -+ WREG32(DC_HPD4_CONTROL, 0); -+ rdev->irq.hpd[3] = false; -+ break; -+ case RADEON_HPD_5: -+ WREG32(DC_HPD5_CONTROL, 0); -+ rdev->irq.hpd[4] = false; -+ break; -+ case RADEON_HPD_6: -+ WREG32(DC_HPD6_CONTROL, 0); -+ rdev->irq.hpd[5] = false; -+ break; -+ default: -+ break; -+ } -+ } - } - --void evergreen_hpd_fini(struct radeon_device *rdev) -+void evergreen_bandwidth_update(struct radeon_device *rdev) - { - /* XXX */ - } -@@ -83,10 +290,31 @@ static int evergreen_mc_wait_for_idle(struct radeon_device *rdev) - /* - * GART - */ -+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev) -+{ -+ unsigned i; -+ u32 tmp; -+ -+ WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); -+ for (i = 0; i < rdev->usec_timeout; i++) { -+ /* read MC_STATUS */ -+ tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); -+ tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; -+ if (tmp == 2) { -+ printk(KERN_WARNING "[drm] r600 flush TLB failed\n"); -+ return; -+ } -+ if (tmp) { -+ return; -+ } -+ udelay(1); -+ } -+} -+ - int evergreen_pcie_gart_enable(struct radeon_device *rdev) - { - u32 tmp; -- int r, i; -+ int r; - - if (rdev->gart.table.vram.robj == NULL) { - dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); -@@ -121,10 +349,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) - RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); - WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, - (u32)(rdev->dummy_page.addr >> 12)); -- for (i = 1; i < 7; i++) -- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); -+ WREG32(VM_CONTEXT1_CNTL, 0); - -- r600_pcie_gart_tlb_flush(rdev); -+ evergreen_pcie_gart_tlb_flush(rdev); - rdev->gart.ready = true; - return 0; - } -@@ -132,11 +359,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) - void evergreen_pcie_gart_disable(struct radeon_device *rdev) - { - u32 tmp; -- int i, r; -+ int r; - - /* Disable all tables */ -- for (i = 0; i < 7; i++) -- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); -+ WREG32(VM_CONTEXT0_CNTL, 0); -+ WREG32(VM_CONTEXT1_CNTL, 0); - - /* Setup L2 cache */ - WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | -@@ -173,7 +400,6 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev) - void evergreen_agp_enable(struct radeon_device *rdev) - { - u32 tmp; -- int i; - - /* Setup L2 cache */ - WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | -@@ -193,8 +419,8 @@ void evergreen_agp_enable(struct radeon_device *rdev) - WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); - WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); - WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); -- for (i = 0; i < 7; i++) -- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); -+ WREG32(VM_CONTEXT0_CNTL, 0); -+ WREG32(VM_CONTEXT1_CNTL, 0); - } - - static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) -@@ -400,40 +626,656 @@ static void evergreen_mc_program(struct radeon_device *rdev) - rv515_vga_render_disable(rdev); - } - --#if 0 - /* - * CP. - */ --static void evergreen_cp_stop(struct radeon_device *rdev) --{ -- /* XXX */ --} -- - - static int evergreen_cp_load_microcode(struct radeon_device *rdev) - { -- /* XXX */ -+ const __be32 *fw_data; -+ int i; - -+ if (!rdev->me_fw || !rdev->pfp_fw) -+ return -EINVAL; -+ -+ r700_cp_stop(rdev); -+ WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); -+ -+ fw_data = (const __be32 *)rdev->pfp_fw->data; -+ WREG32(CP_PFP_UCODE_ADDR, 0); -+ for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++) -+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); -+ WREG32(CP_PFP_UCODE_ADDR, 0); -+ -+ fw_data = (const __be32 *)rdev->me_fw->data; -+ WREG32(CP_ME_RAM_WADDR, 0); -+ for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++) -+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); -+ -+ WREG32(CP_PFP_UCODE_ADDR, 0); -+ WREG32(CP_ME_RAM_WADDR, 0); -+ WREG32(CP_ME_RAM_RADDR, 0); - return 0; - } - -+int evergreen_cp_resume(struct radeon_device *rdev) -+{ -+ u32 tmp; -+ u32 rb_bufsz; -+ int r; -+ -+ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ -+ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | -+ SOFT_RESET_PA | -+ SOFT_RESET_SH | -+ SOFT_RESET_VGT | -+ SOFT_RESET_SX)); -+ RREG32(GRBM_SOFT_RESET); -+ mdelay(15); -+ WREG32(GRBM_SOFT_RESET, 0); -+ RREG32(GRBM_SOFT_RESET); -+ -+ /* Set ring buffer size */ -+ rb_bufsz = drm_order(rdev->cp.ring_size / 8); -+ tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; -+#ifdef __BIG_ENDIAN -+ tmp |= BUF_SWAP_32BIT; -+#endif -+ WREG32(CP_RB_CNTL, tmp); -+ WREG32(CP_SEM_WAIT_TIMER, 0x4); -+ -+ /* Set the write pointer delay */ -+ WREG32(CP_RB_WPTR_DELAY, 0); -+ -+ /* Initialize the ring buffer's read and write pointers */ -+ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); -+ WREG32(CP_RB_RPTR_WR, 0); -+ WREG32(CP_RB_WPTR, 0); -+ WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); -+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); -+ mdelay(1); -+ WREG32(CP_RB_CNTL, tmp); -+ -+ WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8); -+ WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); -+ -+ rdev->cp.rptr = RREG32(CP_RB_RPTR); -+ rdev->cp.wptr = RREG32(CP_RB_WPTR); -+ -+ r600_cp_start(rdev); -+ rdev->cp.ready = true; -+ r = radeon_ring_test(rdev); -+ if (r) { -+ rdev->cp.ready = false; -+ return r; -+ } -+ return 0; -+} - - /* - * Core functions - */ --static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes, -+static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, -+ u32 num_tile_pipes, - u32 num_backends, - u32 backend_disable_mask) - { - u32 backend_map = 0; -+ u32 enabled_backends_mask = 0; -+ u32 enabled_backends_count = 0; -+ u32 cur_pipe; -+ u32 swizzle_pipe[EVERGREEN_MAX_PIPES]; -+ u32 cur_backend = 0; -+ u32 i; -+ bool force_no_swizzle; -+ -+ if (num_tile_pipes > EVERGREEN_MAX_PIPES) -+ num_tile_pipes = EVERGREEN_MAX_PIPES; -+ if (num_tile_pipes < 1) -+ num_tile_pipes = 1; -+ if (num_backends > EVERGREEN_MAX_BACKENDS) -+ num_backends = EVERGREEN_MAX_BACKENDS; -+ if (num_backends < 1) -+ num_backends = 1; -+ -+ for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { -+ if (((backend_disable_mask >> i) & 1) == 0) { -+ enabled_backends_mask |= (1 << i); -+ ++enabled_backends_count; -+ } -+ if (enabled_backends_count == num_backends) -+ break; -+ } -+ -+ if (enabled_backends_count == 0) { -+ enabled_backends_mask = 1; -+ enabled_backends_count = 1; -+ } -+ -+ if (enabled_backends_count != num_backends) -+ num_backends = enabled_backends_count; -+ -+ memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES); -+ switch (rdev->family) { -+ case CHIP_CEDAR: -+ case CHIP_REDWOOD: -+ force_no_swizzle = false; -+ break; -+ case CHIP_CYPRESS: -+ case CHIP_HEMLOCK: -+ case CHIP_JUNIPER: -+ default: -+ force_no_swizzle = true; -+ break; -+ } -+ if (force_no_swizzle) { -+ bool last_backend_enabled = false; -+ -+ force_no_swizzle = false; -+ for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { -+ if (((enabled_backends_mask >> i) & 1) == 1) { -+ if (last_backend_enabled) -+ force_no_swizzle = true; -+ last_backend_enabled = true; -+ } else -+ last_backend_enabled = false; -+ } -+ } -+ -+ switch (num_tile_pipes) { -+ case 1: -+ case 3: -+ case 5: -+ case 7: -+ DRM_ERROR("odd number of pipes!\n"); -+ break; -+ case 2: -+ swizzle_pipe[0] = 0; -+ swizzle_pipe[1] = 1; -+ break; -+ case 4: -+ if (force_no_swizzle) { -+ swizzle_pipe[0] = 0; -+ swizzle_pipe[1] = 1; -+ swizzle_pipe[2] = 2; -+ swizzle_pipe[3] = 3; -+ } else { -+ swizzle_pipe[0] = 0; -+ swizzle_pipe[1] = 2; -+ swizzle_pipe[2] = 1; -+ swizzle_pipe[3] = 3; -+ } -+ break; -+ case 6: -+ if (force_no_swizzle) { -+ swizzle_pipe[0] = 0; -+ swizzle_pipe[1] = 1; -+ swizzle_pipe[2] = 2; -+ swizzle_pipe[3] = 3; -+ swizzle_pipe[4] = 4; -+ swizzle_pipe[5] = 5; -+ } else { -+ swizzle_pipe[0] = 0; -+ swizzle_pipe[1] = 2; -+ swizzle_pipe[2] = 4; -+ swizzle_pipe[3] = 1; -+ swizzle_pipe[4] = 3; -+ swizzle_pipe[5] = 5; -+ } -+ break; -+ case 8: -+ if (force_no_swizzle) { -+ swizzle_pipe[0] = 0; -+ swizzle_pipe[1] = 1; -+ swizzle_pipe[2] = 2; -+ swizzle_pipe[3] = 3; -+ swizzle_pipe[4] = 4; -+ swizzle_pipe[5] = 5; -+ swizzle_pipe[6] = 6; -+ swizzle_pipe[7] = 7; -+ } else { -+ swizzle_pipe[0] = 0; -+ swizzle_pipe[1] = 2; -+ swizzle_pipe[2] = 4; -+ swizzle_pipe[3] = 6; -+ swizzle_pipe[4] = 1; -+ swizzle_pipe[5] = 3; -+ swizzle_pipe[6] = 5; -+ swizzle_pipe[7] = 7; -+ } -+ break; -+ } -+ -+ for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { -+ while (((1 << cur_backend) & enabled_backends_mask) == 0) -+ cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; -+ -+ backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); -+ -+ cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; -+ } - - return backend_map; - } --#endif - - static void evergreen_gpu_init(struct radeon_device *rdev) - { -- /* XXX */ -+ u32 cc_rb_backend_disable = 0; -+ u32 cc_gc_shader_pipe_config; -+ u32 gb_addr_config = 0; -+ u32 mc_shared_chmap, mc_arb_ramcfg; -+ u32 gb_backend_map; -+ u32 grbm_gfx_index; -+ u32 sx_debug_1; -+ u32 smx_dc_ctl0; -+ u32 sq_config; -+ u32 sq_lds_resource_mgmt; -+ u32 sq_gpr_resource_mgmt_1; -+ u32 sq_gpr_resource_mgmt_2; -+ u32 sq_gpr_resource_mgmt_3; -+ u32 sq_thread_resource_mgmt; -+ u32 sq_thread_resource_mgmt_2; -+ u32 sq_stack_resource_mgmt_1; -+ u32 sq_stack_resource_mgmt_2; -+ u32 sq_stack_resource_mgmt_3; -+ u32 vgt_cache_invalidation; -+ u32 hdp_host_path_cntl; -+ int i, j, num_shader_engines, ps_thread_count; -+ -+ switch (rdev->family) { -+ case CHIP_CYPRESS: -+ case CHIP_HEMLOCK: -+ rdev->config.evergreen.num_ses = 2; -+ rdev->config.evergreen.max_pipes = 4; -+ rdev->config.evergreen.max_tile_pipes = 8; -+ rdev->config.evergreen.max_simds = 10; -+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; -+ rdev->config.evergreen.max_gprs = 256; -+ rdev->config.evergreen.max_threads = 248; -+ rdev->config.evergreen.max_gs_threads = 32; -+ rdev->config.evergreen.max_stack_entries = 512; -+ rdev->config.evergreen.sx_num_of_sets = 4; -+ rdev->config.evergreen.sx_max_export_size = 256; -+ rdev->config.evergreen.sx_max_export_pos_size = 64; -+ rdev->config.evergreen.sx_max_export_smx_size = 192; -+ rdev->config.evergreen.max_hw_contexts = 8; -+ rdev->config.evergreen.sq_num_cf_insts = 2; -+ -+ rdev->config.evergreen.sc_prim_fifo_size = 0x100; -+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; -+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; -+ break; -+ case CHIP_JUNIPER: -+ rdev->config.evergreen.num_ses = 1; -+ rdev->config.evergreen.max_pipes = 4; -+ rdev->config.evergreen.max_tile_pipes = 4; -+ rdev->config.evergreen.max_simds = 10; -+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; -+ rdev->config.evergreen.max_gprs = 256; -+ rdev->config.evergreen.max_threads = 248; -+ rdev->config.evergreen.max_gs_threads = 32; -+ rdev->config.evergreen.max_stack_entries = 512; -+ rdev->config.evergreen.sx_num_of_sets = 4; -+ rdev->config.evergreen.sx_max_export_size = 256; -+ rdev->config.evergreen.sx_max_export_pos_size = 64; -+ rdev->config.evergreen.sx_max_export_smx_size = 192; -+ rdev->config.evergreen.max_hw_contexts = 8; -+ rdev->config.evergreen.sq_num_cf_insts = 2; -+ -+ rdev->config.evergreen.sc_prim_fifo_size = 0x100; -+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; -+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; -+ break; -+ case CHIP_REDWOOD: -+ rdev->config.evergreen.num_ses = 1; -+ rdev->config.evergreen.max_pipes = 4; -+ rdev->config.evergreen.max_tile_pipes = 4; -+ rdev->config.evergreen.max_simds = 5; -+ rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; -+ rdev->config.evergreen.max_gprs = 256; -+ rdev->config.evergreen.max_threads = 248; -+ rdev->config.evergreen.max_gs_threads = 32; -+ rdev->config.evergreen.max_stack_entries = 256; -+ rdev->config.evergreen.sx_num_of_sets = 4; -+ rdev->config.evergreen.sx_max_export_size = 256; -+ rdev->config.evergreen.sx_max_export_pos_size = 64; -+ rdev->config.evergreen.sx_max_export_smx_size = 192; -+ rdev->config.evergreen.max_hw_contexts = 8; -+ rdev->config.evergreen.sq_num_cf_insts = 2; -+ -+ rdev->config.evergreen.sc_prim_fifo_size = 0x100; -+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; -+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; -+ break; -+ case CHIP_CEDAR: -+ default: -+ rdev->config.evergreen.num_ses = 1; -+ rdev->config.evergreen.max_pipes = 2; -+ rdev->config.evergreen.max_tile_pipes = 2; -+ rdev->config.evergreen.max_simds = 2; -+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; -+ rdev->config.evergreen.max_gprs = 256; -+ rdev->config.evergreen.max_threads = 192; -+ rdev->config.evergreen.max_gs_threads = 16; -+ rdev->config.evergreen.max_stack_entries = 256; -+ rdev->config.evergreen.sx_num_of_sets = 4; -+ rdev->config.evergreen.sx_max_export_size = 128; -+ rdev->config.evergreen.sx_max_export_pos_size = 32; -+ rdev->config.evergreen.sx_max_export_smx_size = 96; -+ rdev->config.evergreen.max_hw_contexts = 4; -+ rdev->config.evergreen.sq_num_cf_insts = 1; -+ -+ rdev->config.evergreen.sc_prim_fifo_size = 0x40; -+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; -+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; -+ break; -+ } -+ -+ /* Initialize HDP */ -+ for (i = 0, j = 0; i < 32; i++, j += 0x18) { -+ WREG32((0x2c14 + j), 0x00000000); -+ WREG32((0x2c18 + j), 0x00000000); -+ WREG32((0x2c1c + j), 0x00000000); -+ WREG32((0x2c20 + j), 0x00000000); -+ WREG32((0x2c24 + j), 0x00000000); -+ } -+ -+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); -+ -+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; -+ -+ cc_gc_shader_pipe_config |= -+ INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes) -+ & EVERGREEN_MAX_PIPES_MASK); -+ cc_gc_shader_pipe_config |= -+ INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds) -+ & EVERGREEN_MAX_SIMDS_MASK); -+ -+ cc_rb_backend_disable = -+ BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends) -+ & EVERGREEN_MAX_BACKENDS_MASK); -+ -+ -+ mc_shared_chmap = RREG32(MC_SHARED_CHMAP); -+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); -+ -+ switch (rdev->config.evergreen.max_tile_pipes) { -+ case 1: -+ default: -+ gb_addr_config |= NUM_PIPES(0); -+ break; -+ case 2: -+ gb_addr_config |= NUM_PIPES(1); -+ break; -+ case 4: -+ gb_addr_config |= NUM_PIPES(2); -+ break; -+ case 8: -+ gb_addr_config |= NUM_PIPES(3); -+ break; -+ } -+ -+ gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); -+ gb_addr_config |= BANK_INTERLEAVE_SIZE(0); -+ gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1); -+ gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1); -+ gb_addr_config |= NUM_GPUS(0); /* Hemlock? */ -+ gb_addr_config |= MULTI_GPU_TILE_SIZE(2); -+ -+ if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2) -+ gb_addr_config |= ROW_SIZE(2); -+ else -+ gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT); -+ -+ if (rdev->ddev->pdev->device == 0x689e) { -+ u32 efuse_straps_4; -+ u32 efuse_straps_3; -+ u8 efuse_box_bit_131_124; -+ -+ WREG32(RCU_IND_INDEX, 0x204); -+ efuse_straps_4 = RREG32(RCU_IND_DATA); -+ WREG32(RCU_IND_INDEX, 0x203); -+ efuse_straps_3 = RREG32(RCU_IND_DATA); -+ efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28)); -+ -+ switch(efuse_box_bit_131_124) { -+ case 0x00: -+ gb_backend_map = 0x76543210; -+ break; -+ case 0x55: -+ gb_backend_map = 0x77553311; -+ break; -+ case 0x56: -+ gb_backend_map = 0x77553300; -+ break; -+ case 0x59: -+ gb_backend_map = 0x77552211; -+ break; -+ case 0x66: -+ gb_backend_map = 0x77443300; -+ break; -+ case 0x99: -+ gb_backend_map = 0x66552211; -+ break; -+ case 0x5a: -+ gb_backend_map = 0x77552200; -+ break; -+ case 0xaa: -+ gb_backend_map = 0x66442200; -+ break; -+ case 0x95: -+ gb_backend_map = 0x66553311; -+ break; -+ default: -+ DRM_ERROR("bad backend map, using default\n"); -+ gb_backend_map = -+ evergreen_get_tile_pipe_to_backend_map(rdev, -+ rdev->config.evergreen.max_tile_pipes, -+ rdev->config.evergreen.max_backends, -+ ((EVERGREEN_MAX_BACKENDS_MASK << -+ rdev->config.evergreen.max_backends) & -+ EVERGREEN_MAX_BACKENDS_MASK)); -+ break; -+ } -+ } else if (rdev->ddev->pdev->device == 0x68b9) { -+ u32 efuse_straps_3; -+ u8 efuse_box_bit_127_124; -+ -+ WREG32(RCU_IND_INDEX, 0x203); -+ efuse_straps_3 = RREG32(RCU_IND_DATA); -+ efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28; -+ -+ switch(efuse_box_bit_127_124) { -+ case 0x0: -+ gb_backend_map = 0x00003210; -+ break; -+ case 0x5: -+ case 0x6: -+ case 0x9: -+ case 0xa: -+ gb_backend_map = 0x00003311; -+ break; -+ default: -+ DRM_ERROR("bad backend map, using default\n"); -+ gb_backend_map = -+ evergreen_get_tile_pipe_to_backend_map(rdev, -+ rdev->config.evergreen.max_tile_pipes, -+ rdev->config.evergreen.max_backends, -+ ((EVERGREEN_MAX_BACKENDS_MASK << -+ rdev->config.evergreen.max_backends) & -+ EVERGREEN_MAX_BACKENDS_MASK)); -+ break; -+ } -+ } else -+ gb_backend_map = -+ evergreen_get_tile_pipe_to_backend_map(rdev, -+ rdev->config.evergreen.max_tile_pipes, -+ rdev->config.evergreen.max_backends, -+ ((EVERGREEN_MAX_BACKENDS_MASK << -+ rdev->config.evergreen.max_backends) & -+ EVERGREEN_MAX_BACKENDS_MASK)); -+ -+ WREG32(GB_BACKEND_MAP, gb_backend_map); -+ WREG32(GB_ADDR_CONFIG, gb_addr_config); -+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config); -+ WREG32(HDP_ADDR_CONFIG, gb_addr_config); -+ -+ num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; -+ grbm_gfx_index = INSTANCE_BROADCAST_WRITES; -+ -+ for (i = 0; i < rdev->config.evergreen.num_ses; i++) { -+ u32 rb = cc_rb_backend_disable | (0xf0 << 16); -+ u32 sp = cc_gc_shader_pipe_config; -+ u32 gfx = grbm_gfx_index | SE_INDEX(i); -+ -+ if (i == num_shader_engines) { -+ rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK); -+ sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK); -+ } -+ -+ WREG32(GRBM_GFX_INDEX, gfx); -+ WREG32(RLC_GFX_INDEX, gfx); -+ -+ WREG32(CC_RB_BACKEND_DISABLE, rb); -+ WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); -+ WREG32(GC_USER_RB_BACKEND_DISABLE, rb); -+ WREG32(CC_GC_SHADER_PIPE_CONFIG, sp); -+ } -+ -+ grbm_gfx_index |= SE_BROADCAST_WRITES; -+ WREG32(GRBM_GFX_INDEX, grbm_gfx_index); -+ WREG32(RLC_GFX_INDEX, grbm_gfx_index); -+ -+ WREG32(CGTS_SYS_TCC_DISABLE, 0); -+ WREG32(CGTS_TCC_DISABLE, 0); -+ WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); -+ WREG32(CGTS_USER_TCC_DISABLE, 0); -+ -+ /* set HW defaults for 3D engine */ -+ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | -+ ROQ_IB2_START(0x2b))); -+ -+ WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); -+ -+ WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | -+ SYNC_GRADIENT | -+ SYNC_WALKER | -+ SYNC_ALIGNER)); -+ -+ sx_debug_1 = RREG32(SX_DEBUG_1); -+ sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; -+ WREG32(SX_DEBUG_1, sx_debug_1); -+ -+ -+ smx_dc_ctl0 = RREG32(SMX_DC_CTL0); -+ smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); -+ smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); -+ WREG32(SMX_DC_CTL0, smx_dc_ctl0); -+ -+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | -+ POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | -+ SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); -+ -+ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | -+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | -+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); -+ -+ WREG32(VGT_NUM_INSTANCES, 1); -+ WREG32(SPI_CONFIG_CNTL, 0); -+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); -+ WREG32(CP_PERFMON_CNTL, 0); -+ -+ WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | -+ FETCH_FIFO_HIWATER(0x4) | -+ DONE_FIFO_HIWATER(0xe0) | -+ ALU_UPDATE_FIFO_HIWATER(0x8))); -+ -+ sq_config = RREG32(SQ_CONFIG); -+ sq_config &= ~(PS_PRIO(3) | -+ VS_PRIO(3) | -+ GS_PRIO(3) | -+ ES_PRIO(3)); -+ sq_config |= (VC_ENABLE | -+ EXPORT_SRC_C | -+ PS_PRIO(0) | -+ VS_PRIO(1) | -+ GS_PRIO(2) | -+ ES_PRIO(3)); -+ -+ if (rdev->family == CHIP_CEDAR) -+ /* no vertex cache */ -+ sq_config &= ~VC_ENABLE; -+ -+ sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); -+ -+ sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32); -+ sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32); -+ sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4); -+ sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); -+ sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); -+ sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); -+ sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); -+ -+ if (rdev->family == CHIP_CEDAR) -+ ps_thread_count = 96; -+ else -+ ps_thread_count = 128; -+ -+ sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); -+ sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; -+ sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; -+ sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; -+ sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; -+ sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; -+ -+ sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); -+ sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); -+ sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); -+ sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); -+ sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); -+ sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); -+ -+ WREG32(SQ_CONFIG, sq_config); -+ WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); -+ WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); -+ WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3); -+ WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); -+ WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2); -+ WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); -+ WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); -+ WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3); -+ WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0); -+ WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt); -+ -+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | -+ FORCE_EOV_MAX_REZ_CNT(255))); -+ -+ if (rdev->family == CHIP_CEDAR) -+ vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); -+ else -+ vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); -+ vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); -+ WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); -+ -+ WREG32(VGT_GS_VERTEX_REUSE, 16); -+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0); -+ -+ WREG32(CB_PERF_CTR0_SEL_0, 0); -+ WREG32(CB_PERF_CTR0_SEL_1, 0); -+ WREG32(CB_PERF_CTR1_SEL_0, 0); -+ WREG32(CB_PERF_CTR1_SEL_1, 0); -+ WREG32(CB_PERF_CTR2_SEL_0, 0); -+ WREG32(CB_PERF_CTR2_SEL_1, 0); -+ WREG32(CB_PERF_CTR3_SEL_0, 0); -+ WREG32(CB_PERF_CTR3_SEL_1, 0); -+ -+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); -+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); -+ -+ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); -+ -+ udelay(50); -+ - } - - int evergreen_mc_init(struct radeon_device *rdev) -@@ -476,26 +1318,627 @@ int evergreen_mc_init(struct radeon_device *rdev) - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; - rdev->mc.visible_vram_size = rdev->mc.aper_size; -- /* FIXME remove this once we support unmappable VRAM */ -- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { -- rdev->mc.mc_vram_size = rdev->mc.aper_size; -- rdev->mc.real_vram_size = rdev->mc.aper_size; -- } - r600_vram_gtt_location(rdev, &rdev->mc); - radeon_update_bandwidth_info(rdev); - - return 0; - } - --int evergreen_gpu_reset(struct radeon_device *rdev) -+bool evergreen_gpu_is_lockup(struct radeon_device *rdev) - { - /* FIXME: implement for evergreen */ -+ return false; -+} -+ -+static int evergreen_gpu_soft_reset(struct radeon_device *rdev) -+{ -+ struct evergreen_mc_save save; -+ u32 srbm_reset = 0; -+ u32 grbm_reset = 0; -+ -+ dev_info(rdev->dev, "GPU softreset \n"); -+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", -+ RREG32(GRBM_STATUS)); -+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", -+ RREG32(GRBM_STATUS_SE0)); -+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", -+ RREG32(GRBM_STATUS_SE1)); -+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", -+ RREG32(SRBM_STATUS)); -+ evergreen_mc_stop(rdev, &save); -+ if (evergreen_mc_wait_for_idle(rdev)) { -+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); -+ } -+ /* Disable CP parsing/prefetching */ -+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); -+ -+ /* reset all the gfx blocks */ -+ grbm_reset = (SOFT_RESET_CP | -+ SOFT_RESET_CB | -+ SOFT_RESET_DB | -+ SOFT_RESET_PA | -+ SOFT_RESET_SC | -+ SOFT_RESET_SPI | -+ SOFT_RESET_SH | -+ SOFT_RESET_SX | -+ SOFT_RESET_TC | -+ SOFT_RESET_TA | -+ SOFT_RESET_VC | -+ SOFT_RESET_VGT); -+ -+ dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); -+ WREG32(GRBM_SOFT_RESET, grbm_reset); -+ (void)RREG32(GRBM_SOFT_RESET); -+ udelay(50); -+ WREG32(GRBM_SOFT_RESET, 0); -+ (void)RREG32(GRBM_SOFT_RESET); -+ -+ /* reset all the system blocks */ -+ srbm_reset = SRBM_SOFT_RESET_ALL_MASK; -+ -+ dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset); -+ WREG32(SRBM_SOFT_RESET, srbm_reset); -+ (void)RREG32(SRBM_SOFT_RESET); -+ udelay(50); -+ WREG32(SRBM_SOFT_RESET, 0); -+ (void)RREG32(SRBM_SOFT_RESET); -+ /* Wait a little for things to settle down */ -+ udelay(50); -+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", -+ RREG32(GRBM_STATUS)); -+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", -+ RREG32(GRBM_STATUS_SE0)); -+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", -+ RREG32(GRBM_STATUS_SE1)); -+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", -+ RREG32(SRBM_STATUS)); -+ /* After reset we need to reinit the asic as GPU often endup in an -+ * incoherent state. -+ */ -+ atom_asic_init(rdev->mode_info.atom_context); -+ evergreen_mc_resume(rdev, &save); -+ return 0; -+} -+ -+int evergreen_asic_reset(struct radeon_device *rdev) -+{ -+ return evergreen_gpu_soft_reset(rdev); -+} -+ -+/* Interrupts */ -+ -+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc) -+{ -+ switch (crtc) { -+ case 0: -+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET); -+ case 1: -+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET); -+ case 2: -+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET); -+ case 3: -+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET); -+ case 4: -+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET); -+ case 5: -+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET); -+ default: -+ return 0; -+ } -+} -+ -+void evergreen_disable_interrupt_state(struct radeon_device *rdev) -+{ -+ u32 tmp; -+ -+ WREG32(CP_INT_CNTL, 0); -+ WREG32(GRBM_INT_CNTL, 0); -+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); -+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); -+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); -+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); -+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); -+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); -+ -+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); -+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); -+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); -+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); -+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); -+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); -+ -+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0); -+ WREG32(DACB_AUTODETECT_INT_CONTROL, 0); -+ -+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; -+ WREG32(DC_HPD1_INT_CONTROL, tmp); -+ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; -+ WREG32(DC_HPD2_INT_CONTROL, tmp); -+ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; -+ WREG32(DC_HPD3_INT_CONTROL, tmp); -+ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; -+ WREG32(DC_HPD4_INT_CONTROL, tmp); -+ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; -+ WREG32(DC_HPD5_INT_CONTROL, tmp); -+ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; -+ WREG32(DC_HPD6_INT_CONTROL, tmp); -+ -+} -+ -+int evergreen_irq_set(struct radeon_device *rdev) -+{ -+ u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; -+ u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; -+ u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; -+ u32 grbm_int_cntl = 0; -+ -+ if (!rdev->irq.installed) { -+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); -+ return -EINVAL; -+ } -+ /* don't enable anything if the ih is disabled */ -+ if (!rdev->ih.enabled) { -+ r600_disable_interrupts(rdev); -+ /* force the active interrupt state to all disabled */ -+ evergreen_disable_interrupt_state(rdev); -+ return 0; -+ } -+ -+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; -+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; -+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; -+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; -+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; -+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; -+ -+ if (rdev->irq.sw_int) { -+ DRM_DEBUG("evergreen_irq_set: sw int\n"); -+ cp_int_cntl |= RB_INT_ENABLE; -+ } -+ if (rdev->irq.crtc_vblank_int[0]) { -+ DRM_DEBUG("evergreen_irq_set: vblank 0\n"); -+ crtc1 |= VBLANK_INT_MASK; -+ } -+ if (rdev->irq.crtc_vblank_int[1]) { -+ DRM_DEBUG("evergreen_irq_set: vblank 1\n"); -+ crtc2 |= VBLANK_INT_MASK; -+ } -+ if (rdev->irq.crtc_vblank_int[2]) { -+ DRM_DEBUG("evergreen_irq_set: vblank 2\n"); -+ crtc3 |= VBLANK_INT_MASK; -+ } -+ if (rdev->irq.crtc_vblank_int[3]) { -+ DRM_DEBUG("evergreen_irq_set: vblank 3\n"); -+ crtc4 |= VBLANK_INT_MASK; -+ } -+ if (rdev->irq.crtc_vblank_int[4]) { -+ DRM_DEBUG("evergreen_irq_set: vblank 4\n"); -+ crtc5 |= VBLANK_INT_MASK; -+ } -+ if (rdev->irq.crtc_vblank_int[5]) { -+ DRM_DEBUG("evergreen_irq_set: vblank 5\n"); -+ crtc6 |= VBLANK_INT_MASK; -+ } -+ if (rdev->irq.hpd[0]) { -+ DRM_DEBUG("evergreen_irq_set: hpd 1\n"); -+ hpd1 |= DC_HPDx_INT_EN; -+ } -+ if (rdev->irq.hpd[1]) { -+ DRM_DEBUG("evergreen_irq_set: hpd 2\n"); -+ hpd2 |= DC_HPDx_INT_EN; -+ } -+ if (rdev->irq.hpd[2]) { -+ DRM_DEBUG("evergreen_irq_set: hpd 3\n"); -+ hpd3 |= DC_HPDx_INT_EN; -+ } -+ if (rdev->irq.hpd[3]) { -+ DRM_DEBUG("evergreen_irq_set: hpd 4\n"); -+ hpd4 |= DC_HPDx_INT_EN; -+ } -+ if (rdev->irq.hpd[4]) { -+ DRM_DEBUG("evergreen_irq_set: hpd 5\n"); -+ hpd5 |= DC_HPDx_INT_EN; -+ } -+ if (rdev->irq.hpd[5]) { -+ DRM_DEBUG("evergreen_irq_set: hpd 6\n"); -+ hpd6 |= DC_HPDx_INT_EN; -+ } -+ if (rdev->irq.gui_idle) { -+ DRM_DEBUG("gui idle\n"); -+ grbm_int_cntl |= GUI_IDLE_INT_ENABLE; -+ } -+ -+ WREG32(CP_INT_CNTL, cp_int_cntl); -+ WREG32(GRBM_INT_CNTL, grbm_int_cntl); -+ -+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); -+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); -+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); -+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); -+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); -+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); -+ -+ WREG32(DC_HPD1_INT_CONTROL, hpd1); -+ WREG32(DC_HPD2_INT_CONTROL, hpd2); -+ WREG32(DC_HPD3_INT_CONTROL, hpd3); -+ WREG32(DC_HPD4_INT_CONTROL, hpd4); -+ WREG32(DC_HPD5_INT_CONTROL, hpd5); -+ WREG32(DC_HPD6_INT_CONTROL, hpd6); -+ - return 0; - } - -+static inline void evergreen_irq_ack(struct radeon_device *rdev, -+ u32 *disp_int, -+ u32 *disp_int_cont, -+ u32 *disp_int_cont2, -+ u32 *disp_int_cont3, -+ u32 *disp_int_cont4, -+ u32 *disp_int_cont5) -+{ -+ u32 tmp; -+ -+ *disp_int = RREG32(DISP_INTERRUPT_STATUS); -+ *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); -+ *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); -+ *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); -+ *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); -+ *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); -+ -+ if (*disp_int & LB_D1_VBLANK_INTERRUPT) -+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); -+ if (*disp_int & LB_D1_VLINE_INTERRUPT) -+ WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); -+ -+ if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT) -+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); -+ if (*disp_int_cont & LB_D2_VLINE_INTERRUPT) -+ WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); -+ -+ if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) -+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); -+ if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT) -+ WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); -+ -+ if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) -+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); -+ if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT) -+ WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); -+ -+ if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) -+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); -+ if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT) -+ WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); -+ -+ if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) -+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); -+ if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT) -+ WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); -+ -+ if (*disp_int & DC_HPD1_INTERRUPT) { -+ tmp = RREG32(DC_HPD1_INT_CONTROL); -+ tmp |= DC_HPDx_INT_ACK; -+ WREG32(DC_HPD1_INT_CONTROL, tmp); -+ } -+ if (*disp_int_cont & DC_HPD2_INTERRUPT) { -+ tmp = RREG32(DC_HPD2_INT_CONTROL); -+ tmp |= DC_HPDx_INT_ACK; -+ WREG32(DC_HPD2_INT_CONTROL, tmp); -+ } -+ if (*disp_int_cont2 & DC_HPD3_INTERRUPT) { -+ tmp = RREG32(DC_HPD3_INT_CONTROL); -+ tmp |= DC_HPDx_INT_ACK; -+ WREG32(DC_HPD3_INT_CONTROL, tmp); -+ } -+ if (*disp_int_cont3 & DC_HPD4_INTERRUPT) { -+ tmp = RREG32(DC_HPD4_INT_CONTROL); -+ tmp |= DC_HPDx_INT_ACK; -+ WREG32(DC_HPD4_INT_CONTROL, tmp); -+ } -+ if (*disp_int_cont4 & DC_HPD5_INTERRUPT) { -+ tmp = RREG32(DC_HPD5_INT_CONTROL); -+ tmp |= DC_HPDx_INT_ACK; -+ WREG32(DC_HPD5_INT_CONTROL, tmp); -+ } -+ if (*disp_int_cont5 & DC_HPD6_INTERRUPT) { -+ tmp = RREG32(DC_HPD5_INT_CONTROL); -+ tmp |= DC_HPDx_INT_ACK; -+ WREG32(DC_HPD6_INT_CONTROL, tmp); -+ } -+} -+ -+void evergreen_irq_disable(struct radeon_device *rdev) -+{ -+ u32 disp_int, disp_int_cont, disp_int_cont2; -+ u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; -+ -+ r600_disable_interrupts(rdev); -+ /* Wait and acknowledge irq */ -+ mdelay(1); -+ evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, -+ &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); -+ evergreen_disable_interrupt_state(rdev); -+} -+ -+static void evergreen_irq_suspend(struct radeon_device *rdev) -+{ -+ evergreen_irq_disable(rdev); -+ r600_rlc_stop(rdev); -+} -+ -+static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) -+{ -+ u32 wptr, tmp; -+ -+ /* XXX use writeback */ -+ wptr = RREG32(IH_RB_WPTR); -+ -+ if (wptr & RB_OVERFLOW) { -+ /* When a ring buffer overflow happen start parsing interrupt -+ * from the last not overwritten vector (wptr + 16). Hopefully -+ * this should allow us to catchup. -+ */ -+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", -+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); -+ rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; -+ tmp = RREG32(IH_RB_CNTL); -+ tmp |= IH_WPTR_OVERFLOW_CLEAR; -+ WREG32(IH_RB_CNTL, tmp); -+ } -+ return (wptr & rdev->ih.ptr_mask); -+} -+ -+int evergreen_irq_process(struct radeon_device *rdev) -+{ -+ u32 wptr = evergreen_get_ih_wptr(rdev); -+ u32 rptr = rdev->ih.rptr; -+ u32 src_id, src_data; -+ u32 ring_index; -+ u32 disp_int, disp_int_cont, disp_int_cont2; -+ u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; -+ unsigned long flags; -+ bool queue_hotplug = false; -+ -+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); -+ if (!rdev->ih.enabled) -+ return IRQ_NONE; -+ -+ spin_lock_irqsave(&rdev->ih.lock, flags); -+ -+ if (rptr == wptr) { -+ spin_unlock_irqrestore(&rdev->ih.lock, flags); -+ return IRQ_NONE; -+ } -+ if (rdev->shutdown) { -+ spin_unlock_irqrestore(&rdev->ih.lock, flags); -+ return IRQ_NONE; -+ } -+ -+restart_ih: -+ /* display interrupts */ -+ evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, -+ &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); -+ -+ rdev->ih.wptr = wptr; -+ while (rptr != wptr) { -+ /* wptr/rptr are in bytes! */ -+ ring_index = rptr / 4; -+ src_id = rdev->ih.ring[ring_index] & 0xff; -+ src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; -+ -+ switch (src_id) { -+ case 1: /* D1 vblank/vline */ -+ switch (src_data) { -+ case 0: /* D1 vblank */ -+ if (disp_int & LB_D1_VBLANK_INTERRUPT) { -+ drm_handle_vblank(rdev->ddev, 0); -+ wake_up(&rdev->irq.vblank_queue); -+ disp_int &= ~LB_D1_VBLANK_INTERRUPT; -+ DRM_DEBUG("IH: D1 vblank\n"); -+ } -+ break; -+ case 1: /* D1 vline */ -+ if (disp_int & LB_D1_VLINE_INTERRUPT) { -+ disp_int &= ~LB_D1_VLINE_INTERRUPT; -+ DRM_DEBUG("IH: D1 vline\n"); -+ } -+ break; -+ default: -+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); -+ break; -+ } -+ break; -+ case 2: /* D2 vblank/vline */ -+ switch (src_data) { -+ case 0: /* D2 vblank */ -+ if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { -+ drm_handle_vblank(rdev->ddev, 1); -+ wake_up(&rdev->irq.vblank_queue); -+ disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; -+ DRM_DEBUG("IH: D2 vblank\n"); -+ } -+ break; -+ case 1: /* D2 vline */ -+ if (disp_int_cont & LB_D2_VLINE_INTERRUPT) { -+ disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; -+ DRM_DEBUG("IH: D2 vline\n"); -+ } -+ break; -+ default: -+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); -+ break; -+ } -+ break; -+ case 3: /* D3 vblank/vline */ -+ switch (src_data) { -+ case 0: /* D3 vblank */ -+ if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { -+ drm_handle_vblank(rdev->ddev, 2); -+ wake_up(&rdev->irq.vblank_queue); -+ disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; -+ DRM_DEBUG("IH: D3 vblank\n"); -+ } -+ break; -+ case 1: /* D3 vline */ -+ if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { -+ disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; -+ DRM_DEBUG("IH: D3 vline\n"); -+ } -+ break; -+ default: -+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); -+ break; -+ } -+ break; -+ case 4: /* D4 vblank/vline */ -+ switch (src_data) { -+ case 0: /* D4 vblank */ -+ if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { -+ drm_handle_vblank(rdev->ddev, 3); -+ wake_up(&rdev->irq.vblank_queue); -+ disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; -+ DRM_DEBUG("IH: D4 vblank\n"); -+ } -+ break; -+ case 1: /* D4 vline */ -+ if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { -+ disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; -+ DRM_DEBUG("IH: D4 vline\n"); -+ } -+ break; -+ default: -+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); -+ break; -+ } -+ break; -+ case 5: /* D5 vblank/vline */ -+ switch (src_data) { -+ case 0: /* D5 vblank */ -+ if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { -+ drm_handle_vblank(rdev->ddev, 4); -+ wake_up(&rdev->irq.vblank_queue); -+ disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; -+ DRM_DEBUG("IH: D5 vblank\n"); -+ } -+ break; -+ case 1: /* D5 vline */ -+ if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { -+ disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; -+ DRM_DEBUG("IH: D5 vline\n"); -+ } -+ break; -+ default: -+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); -+ break; -+ } -+ break; -+ case 6: /* D6 vblank/vline */ -+ switch (src_data) { -+ case 0: /* D6 vblank */ -+ if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { -+ drm_handle_vblank(rdev->ddev, 5); -+ wake_up(&rdev->irq.vblank_queue); -+ disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; -+ DRM_DEBUG("IH: D6 vblank\n"); -+ } -+ break; -+ case 1: /* D6 vline */ -+ if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { -+ disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; -+ DRM_DEBUG("IH: D6 vline\n"); -+ } -+ break; -+ default: -+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); -+ break; -+ } -+ break; -+ case 42: /* HPD hotplug */ -+ switch (src_data) { -+ case 0: -+ if (disp_int & DC_HPD1_INTERRUPT) { -+ disp_int &= ~DC_HPD1_INTERRUPT; -+ queue_hotplug = true; -+ DRM_DEBUG("IH: HPD1\n"); -+ } -+ break; -+ case 1: -+ if (disp_int_cont & DC_HPD2_INTERRUPT) { -+ disp_int_cont &= ~DC_HPD2_INTERRUPT; -+ queue_hotplug = true; -+ DRM_DEBUG("IH: HPD2\n"); -+ } -+ break; -+ case 2: -+ if (disp_int_cont2 & DC_HPD3_INTERRUPT) { -+ disp_int_cont2 &= ~DC_HPD3_INTERRUPT; -+ queue_hotplug = true; -+ DRM_DEBUG("IH: HPD3\n"); -+ } -+ break; -+ case 3: -+ if (disp_int_cont3 & DC_HPD4_INTERRUPT) { -+ disp_int_cont3 &= ~DC_HPD4_INTERRUPT; -+ queue_hotplug = true; -+ DRM_DEBUG("IH: HPD4\n"); -+ } -+ break; -+ case 4: -+ if (disp_int_cont4 & DC_HPD5_INTERRUPT) { -+ disp_int_cont4 &= ~DC_HPD5_INTERRUPT; -+ queue_hotplug = true; -+ DRM_DEBUG("IH: HPD5\n"); -+ } -+ break; -+ case 5: -+ if (disp_int_cont5 & DC_HPD6_INTERRUPT) { -+ disp_int_cont5 &= ~DC_HPD6_INTERRUPT; -+ queue_hotplug = true; -+ DRM_DEBUG("IH: HPD6\n"); -+ } -+ break; -+ default: -+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); -+ break; -+ } -+ break; -+ case 176: /* CP_INT in ring buffer */ -+ case 177: /* CP_INT in IB1 */ -+ case 178: /* CP_INT in IB2 */ -+ DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); -+ radeon_fence_process(rdev); -+ break; -+ case 181: /* CP EOP event */ -+ DRM_DEBUG("IH: CP EOP\n"); -+ break; -+ case 233: /* GUI IDLE */ -+ DRM_DEBUG("IH: CP EOP\n"); -+ rdev->pm.gui_idle = true; -+ wake_up(&rdev->irq.idle_queue); -+ break; -+ default: -+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); -+ break; -+ } -+ -+ /* wptr/rptr are in bytes! */ -+ rptr += 16; -+ rptr &= rdev->ih.ptr_mask; -+ } -+ /* make sure wptr hasn't changed while processing */ -+ wptr = evergreen_get_ih_wptr(rdev); -+ if (wptr != rdev->ih.wptr) -+ goto restart_ih; -+ if (queue_hotplug) -+ queue_work(rdev->wq, &rdev->hotplug_work); -+ rdev->ih.rptr = rptr; -+ WREG32(IH_RB_RPTR, rdev->ih.rptr); -+ spin_unlock_irqrestore(&rdev->ih.lock, flags); -+ return IRQ_HANDLED; -+} -+ - static int evergreen_startup(struct radeon_device *rdev) - { --#if 0 - int r; - - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { -@@ -505,17 +1948,15 @@ static int evergreen_startup(struct radeon_device *rdev) - return r; - } - } --#endif -+ - evergreen_mc_program(rdev); --#if 0 - if (rdev->flags & RADEON_IS_AGP) { -- evergreem_agp_enable(rdev); -+ evergreen_agp_enable(rdev); - } else { - r = evergreen_pcie_gart_enable(rdev); - if (r) - return r; - } --#endif - evergreen_gpu_init(rdev); - #if 0 - if (!rdev->r600_blit.shader_obj) { -@@ -536,6 +1977,7 @@ static int evergreen_startup(struct radeon_device *rdev) - DRM_ERROR("failed to pin blit object %d\n", r); - return r; - } -+#endif - - /* Enable IRQ */ - r = r600_irq_init(rdev); -@@ -544,7 +1986,7 @@ static int evergreen_startup(struct radeon_device *rdev) - radeon_irq_kms_fini(rdev); - return r; - } -- r600_irq_set(rdev); -+ evergreen_irq_set(rdev); - - r = radeon_ring_init(rdev, rdev->cp.ring_size); - if (r) -@@ -552,12 +1994,12 @@ static int evergreen_startup(struct radeon_device *rdev) - r = evergreen_cp_load_microcode(rdev); - if (r) - return r; -- r = r600_cp_resume(rdev); -+ r = evergreen_cp_resume(rdev); - if (r) - return r; - /* write back buffer are not vital so don't worry about failure */ - r600_wb_enable(rdev); --#endif -+ - return 0; - } - -@@ -582,13 +2024,13 @@ int evergreen_resume(struct radeon_device *rdev) - DRM_ERROR("r600 startup failed on resume\n"); - return r; - } --#if 0 -+ - r = r600_ib_test(rdev); - if (r) { - DRM_ERROR("radeon: failled testing IB (%d).\n", r); - return r; - } --#endif -+ - return r; - - } -@@ -597,12 +2039,14 @@ int evergreen_suspend(struct radeon_device *rdev) - { - #if 0 - int r; -- -+#endif - /* FIXME: we should wait for ring to be empty */ - r700_cp_stop(rdev); - rdev->cp.ready = false; -+ evergreen_irq_suspend(rdev); - r600_wb_disable(rdev); - evergreen_pcie_gart_disable(rdev); -+#if 0 - /* unpin shaders bo */ - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (likely(r == 0)) { -@@ -682,8 +2126,6 @@ int evergreen_init(struct radeon_device *rdev) - r = radeon_clocks_init(rdev); - if (r) - return r; -- /* Initialize power management */ -- radeon_pm_init(rdev); - /* Fence driver */ - r = radeon_fence_driver_init(rdev); - if (r) -@@ -702,7 +2144,7 @@ int evergreen_init(struct radeon_device *rdev) - r = radeon_bo_init(rdev); - if (r) - return r; --#if 0 -+ - r = radeon_irq_kms_init(rdev); - if (r) - return r; -@@ -716,14 +2158,16 @@ int evergreen_init(struct radeon_device *rdev) - r = r600_pcie_gart_init(rdev); - if (r) - return r; --#endif -- rdev->accel_working = false; -+ -+ rdev->accel_working = true; - r = evergreen_startup(rdev); - if (r) { -- evergreen_suspend(rdev); -- /*r600_wb_fini(rdev);*/ -- /*radeon_ring_fini(rdev);*/ -- /*evergreen_pcie_gart_fini(rdev);*/ -+ dev_err(rdev->dev, "disabling GPU acceleration\n"); -+ r700_cp_fini(rdev); -+ r600_wb_fini(rdev); -+ r600_irq_fini(rdev); -+ radeon_irq_kms_fini(rdev); -+ evergreen_pcie_gart_fini(rdev); - rdev->accel_working = false; - } - if (rdev->accel_working) { -@@ -743,16 +2187,12 @@ int evergreen_init(struct radeon_device *rdev) - - void evergreen_fini(struct radeon_device *rdev) - { -- radeon_pm_fini(rdev); -- evergreen_suspend(rdev); --#if 0 -- r600_blit_fini(rdev); -+ /*r600_blit_fini(rdev);*/ -+ r700_cp_fini(rdev); -+ r600_wb_fini(rdev); - r600_irq_fini(rdev); - radeon_irq_kms_fini(rdev); -- radeon_ring_fini(rdev); -- r600_wb_fini(rdev); - evergreen_pcie_gart_fini(rdev); --#endif - radeon_gem_fini(rdev); - radeon_fence_driver_fini(rdev); - radeon_clocks_fini(rdev); -diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c -new file mode 100644 -index 0000000..64516b9 ---- /dev/null -+++ b/drivers/gpu/drm/radeon/evergreen_cs.c -@@ -0,0 +1,1356 @@ -+/* -+ * Copyright 2010 Advanced Micro Devices, Inc. -+ * Copyright 2008 Red Hat Inc. -+ * Copyright 2009 Jerome Glisse. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: Dave Airlie -+ * Alex Deucher -+ * Jerome Glisse -+ */ -+#include "drmP.h" -+#include "radeon.h" -+#include "evergreend.h" -+#include "evergreen_reg_safe.h" -+ -+static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, -+ struct radeon_cs_reloc **cs_reloc); -+ -+struct evergreen_cs_track { -+ u32 group_size; -+ u32 nbanks; -+ u32 npipes; -+ /* value we track */ -+ u32 nsamples; -+ u32 cb_color_base_last[12]; -+ struct radeon_bo *cb_color_bo[12]; -+ u32 cb_color_bo_offset[12]; -+ struct radeon_bo *cb_color_fmask_bo[8]; -+ struct radeon_bo *cb_color_cmask_bo[8]; -+ u32 cb_color_info[12]; -+ u32 cb_color_view[12]; -+ u32 cb_color_pitch_idx[12]; -+ u32 cb_color_slice_idx[12]; -+ u32 cb_color_dim_idx[12]; -+ u32 cb_color_dim[12]; -+ u32 cb_color_pitch[12]; -+ u32 cb_color_slice[12]; -+ u32 cb_color_cmask_slice[8]; -+ u32 cb_color_fmask_slice[8]; -+ u32 cb_target_mask; -+ u32 cb_shader_mask; -+ u32 vgt_strmout_config; -+ u32 vgt_strmout_buffer_config; -+ u32 db_depth_control; -+ u32 db_depth_view; -+ u32 db_depth_size; -+ u32 db_depth_size_idx; -+ u32 db_z_info; -+ u32 db_z_idx; -+ u32 db_z_read_offset; -+ u32 db_z_write_offset; -+ struct radeon_bo *db_z_read_bo; -+ struct radeon_bo *db_z_write_bo; -+ u32 db_s_info; -+ u32 db_s_idx; -+ u32 db_s_read_offset; -+ u32 db_s_write_offset; -+ struct radeon_bo *db_s_read_bo; -+ struct radeon_bo *db_s_write_bo; -+}; -+ -+static void evergreen_cs_track_init(struct evergreen_cs_track *track) -+{ -+ int i; -+ -+ for (i = 0; i < 8; i++) { -+ track->cb_color_fmask_bo[i] = NULL; -+ track->cb_color_cmask_bo[i] = NULL; -+ track->cb_color_cmask_slice[i] = 0; -+ track->cb_color_fmask_slice[i] = 0; -+ } -+ -+ for (i = 0; i < 12; i++) { -+ track->cb_color_base_last[i] = 0; -+ track->cb_color_bo[i] = NULL; -+ track->cb_color_bo_offset[i] = 0xFFFFFFFF; -+ track->cb_color_info[i] = 0; -+ track->cb_color_view[i] = 0; -+ track->cb_color_pitch_idx[i] = 0; -+ track->cb_color_slice_idx[i] = 0; -+ track->cb_color_dim[i] = 0; -+ track->cb_color_pitch[i] = 0; -+ track->cb_color_slice[i] = 0; -+ track->cb_color_dim[i] = 0; -+ } -+ track->cb_target_mask = 0xFFFFFFFF; -+ track->cb_shader_mask = 0xFFFFFFFF; -+ -+ track->db_depth_view = 0xFFFFC000; -+ track->db_depth_size = 0xFFFFFFFF; -+ track->db_depth_size_idx = 0; -+ track->db_depth_control = 0xFFFFFFFF; -+ track->db_z_info = 0xFFFFFFFF; -+ track->db_z_idx = 0xFFFFFFFF; -+ track->db_z_read_offset = 0xFFFFFFFF; -+ track->db_z_write_offset = 0xFFFFFFFF; -+ track->db_z_read_bo = NULL; -+ track->db_z_write_bo = NULL; -+ track->db_s_info = 0xFFFFFFFF; -+ track->db_s_idx = 0xFFFFFFFF; -+ track->db_s_read_offset = 0xFFFFFFFF; -+ track->db_s_write_offset = 0xFFFFFFFF; -+ track->db_s_read_bo = NULL; -+ track->db_s_write_bo = NULL; -+} -+ -+static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i) -+{ -+ /* XXX fill in */ -+ return 0; -+} -+ -+static int evergreen_cs_track_check(struct radeon_cs_parser *p) -+{ -+ struct evergreen_cs_track *track = p->track; -+ -+ /* we don't support stream out buffer yet */ -+ if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) { -+ dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n"); -+ return -EINVAL; -+ } -+ -+ /* XXX fill in */ -+ return 0; -+} -+ -+/** -+ * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet -+ * @parser: parser structure holding parsing context. -+ * @pkt: where to store packet informations -+ * -+ * Assume that chunk_ib_index is properly set. Will return -EINVAL -+ * if packet is bigger than remaining ib size. or if packets is unknown. -+ **/ -+int evergreen_cs_packet_parse(struct radeon_cs_parser *p, -+ struct radeon_cs_packet *pkt, -+ unsigned idx) -+{ -+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; -+ uint32_t header; -+ -+ if (idx >= ib_chunk->length_dw) { -+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n", -+ idx, ib_chunk->length_dw); -+ return -EINVAL; -+ } -+ header = radeon_get_ib_value(p, idx); -+ pkt->idx = idx; -+ pkt->type = CP_PACKET_GET_TYPE(header); -+ pkt->count = CP_PACKET_GET_COUNT(header); -+ pkt->one_reg_wr = 0; -+ switch (pkt->type) { -+ case PACKET_TYPE0: -+ pkt->reg = CP_PACKET0_GET_REG(header); -+ break; -+ case PACKET_TYPE3: -+ pkt->opcode = CP_PACKET3_GET_OPCODE(header); -+ break; -+ case PACKET_TYPE2: -+ pkt->count = -1; -+ break; -+ default: -+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); -+ return -EINVAL; -+ } -+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { -+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", -+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+/** -+ * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3 -+ * @parser: parser structure holding parsing context. -+ * @data: pointer to relocation data -+ * @offset_start: starting offset -+ * @offset_mask: offset mask (to align start offset on) -+ * @reloc: reloc informations -+ * -+ * Check next packet is relocation packet3, do bo validation and compute -+ * GPU offset using the provided start. -+ **/ -+static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, -+ struct radeon_cs_reloc **cs_reloc) -+{ -+ struct radeon_cs_chunk *relocs_chunk; -+ struct radeon_cs_packet p3reloc; -+ unsigned idx; -+ int r; -+ -+ if (p->chunk_relocs_idx == -1) { -+ DRM_ERROR("No relocation chunk !\n"); -+ return -EINVAL; -+ } -+ *cs_reloc = NULL; -+ relocs_chunk = &p->chunks[p->chunk_relocs_idx]; -+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); -+ if (r) { -+ return r; -+ } -+ p->idx += p3reloc.count + 2; -+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { -+ DRM_ERROR("No packet3 for relocation for packet at %d.\n", -+ p3reloc.idx); -+ return -EINVAL; -+ } -+ idx = radeon_get_ib_value(p, p3reloc.idx + 1); -+ if (idx >= relocs_chunk->length_dw) { -+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", -+ idx, relocs_chunk->length_dw); -+ return -EINVAL; -+ } -+ /* FIXME: we assume reloc size is 4 dwords */ -+ *cs_reloc = p->relocs_ptr[(idx / 4)]; -+ return 0; -+} -+ -+/** -+ * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc -+ * @parser: parser structure holding parsing context. -+ * -+ * Check next packet is relocation packet3, do bo validation and compute -+ * GPU offset using the provided start. -+ **/ -+static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) -+{ -+ struct radeon_cs_packet p3reloc; -+ int r; -+ -+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); -+ if (r) { -+ return 0; -+ } -+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { -+ return 0; -+ } -+ return 1; -+} -+ -+/** -+ * evergreen_cs_packet_next_vline() - parse userspace VLINE packet -+ * @parser: parser structure holding parsing context. -+ * -+ * Userspace sends a special sequence for VLINE waits. -+ * PACKET0 - VLINE_START_END + value -+ * PACKET3 - WAIT_REG_MEM poll vline status reg -+ * RELOC (P3) - crtc_id in reloc. -+ * -+ * This function parses this and relocates the VLINE START END -+ * and WAIT_REG_MEM packets to the correct crtc. -+ * It also detects a switched off crtc and nulls out the -+ * wait in that case. -+ */ -+static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) -+{ -+ struct drm_mode_object *obj; -+ struct drm_crtc *crtc; -+ struct radeon_crtc *radeon_crtc; -+ struct radeon_cs_packet p3reloc, wait_reg_mem; -+ int crtc_id; -+ int r; -+ uint32_t header, h_idx, reg, wait_reg_mem_info; -+ volatile uint32_t *ib; -+ -+ ib = p->ib->ptr; -+ -+ /* parse the WAIT_REG_MEM */ -+ r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx); -+ if (r) -+ return r; -+ -+ /* check its a WAIT_REG_MEM */ -+ if (wait_reg_mem.type != PACKET_TYPE3 || -+ wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { -+ DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); -+ r = -EINVAL; -+ return r; -+ } -+ -+ wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); -+ /* bit 4 is reg (0) or mem (1) */ -+ if (wait_reg_mem_info & 0x10) { -+ DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); -+ r = -EINVAL; -+ return r; -+ } -+ /* waiting for value to be equal */ -+ if ((wait_reg_mem_info & 0x7) != 0x3) { -+ DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); -+ r = -EINVAL; -+ return r; -+ } -+ if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) { -+ DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); -+ r = -EINVAL; -+ return r; -+ } -+ -+ if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) { -+ DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); -+ r = -EINVAL; -+ return r; -+ } -+ -+ /* jump over the NOP */ -+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); -+ if (r) -+ return r; -+ -+ h_idx = p->idx - 2; -+ p->idx += wait_reg_mem.count + 2; -+ p->idx += p3reloc.count + 2; -+ -+ header = radeon_get_ib_value(p, h_idx); -+ crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); -+ reg = CP_PACKET0_GET_REG(header); -+ mutex_lock(&p->rdev->ddev->mode_config.mutex); -+ obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); -+ if (!obj) { -+ DRM_ERROR("cannot find crtc %d\n", crtc_id); -+ r = -EINVAL; -+ goto out; -+ } -+ crtc = obj_to_crtc(obj); -+ radeon_crtc = to_radeon_crtc(crtc); -+ crtc_id = radeon_crtc->crtc_id; -+ -+ if (!crtc->enabled) { -+ /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ -+ ib[h_idx + 2] = PACKET2(0); -+ ib[h_idx + 3] = PACKET2(0); -+ ib[h_idx + 4] = PACKET2(0); -+ ib[h_idx + 5] = PACKET2(0); -+ ib[h_idx + 6] = PACKET2(0); -+ ib[h_idx + 7] = PACKET2(0); -+ ib[h_idx + 8] = PACKET2(0); -+ } else { -+ switch (reg) { -+ case EVERGREEN_VLINE_START_END: -+ header &= ~R600_CP_PACKET0_REG_MASK; -+ header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2; -+ ib[h_idx] = header; -+ ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2; -+ break; -+ default: -+ DRM_ERROR("unknown crtc reloc\n"); -+ r = -EINVAL; -+ goto out; -+ } -+ } -+out: -+ mutex_unlock(&p->rdev->ddev->mode_config.mutex); -+ return r; -+} -+ -+static int evergreen_packet0_check(struct radeon_cs_parser *p, -+ struct radeon_cs_packet *pkt, -+ unsigned idx, unsigned reg) -+{ -+ int r; -+ -+ switch (reg) { -+ case EVERGREEN_VLINE_START_END: -+ r = evergreen_cs_packet_parse_vline(p); -+ if (r) { -+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n", -+ idx, reg); -+ return r; -+ } -+ break; -+ default: -+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", -+ reg, idx); -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p, -+ struct radeon_cs_packet *pkt) -+{ -+ unsigned reg, i; -+ unsigned idx; -+ int r; -+ -+ idx = pkt->idx + 1; -+ reg = pkt->reg; -+ for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { -+ r = evergreen_packet0_check(p, pkt, idx, reg); -+ if (r) { -+ return r; -+ } -+ } -+ return 0; -+} -+ -+/** -+ * evergreen_cs_check_reg() - check if register is authorized or not -+ * @parser: parser structure holding parsing context -+ * @reg: register we are testing -+ * @idx: index into the cs buffer -+ * -+ * This function will test against evergreen_reg_safe_bm and return 0 -+ * if register is safe. If register is not flag as safe this function -+ * will test it against a list of register needind special handling. -+ */ -+static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) -+{ -+ struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; -+ struct radeon_cs_reloc *reloc; -+ u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); -+ u32 m, i, tmp, *ib; -+ int r; -+ -+ i = (reg >> 7); -+ if (i > last_reg) { -+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); -+ return -EINVAL; -+ } -+ m = 1 << ((reg >> 2) & 31); -+ if (!(evergreen_reg_safe_bm[i] & m)) -+ return 0; -+ ib = p->ib->ptr; -+ switch (reg) { -+ /* force following reg to 0 in an attemp to disable out buffer -+ * which will need us to better understand how it works to perform -+ * security check on it (Jerome) -+ */ -+ case SQ_ESGS_RING_SIZE: -+ case SQ_GSVS_RING_SIZE: -+ case SQ_ESTMP_RING_SIZE: -+ case SQ_GSTMP_RING_SIZE: -+ case SQ_HSTMP_RING_SIZE: -+ case SQ_LSTMP_RING_SIZE: -+ case SQ_PSTMP_RING_SIZE: -+ case SQ_VSTMP_RING_SIZE: -+ case SQ_ESGS_RING_ITEMSIZE: -+ case SQ_ESTMP_RING_ITEMSIZE: -+ case SQ_GSTMP_RING_ITEMSIZE: -+ case SQ_GSVS_RING_ITEMSIZE: -+ case SQ_GS_VERT_ITEMSIZE: -+ case SQ_GS_VERT_ITEMSIZE_1: -+ case SQ_GS_VERT_ITEMSIZE_2: -+ case SQ_GS_VERT_ITEMSIZE_3: -+ case SQ_GSVS_RING_OFFSET_1: -+ case SQ_GSVS_RING_OFFSET_2: -+ case SQ_GSVS_RING_OFFSET_3: -+ case SQ_HSTMP_RING_ITEMSIZE: -+ case SQ_LSTMP_RING_ITEMSIZE: -+ case SQ_PSTMP_RING_ITEMSIZE: -+ case SQ_VSTMP_RING_ITEMSIZE: -+ case VGT_TF_RING_SIZE: -+ /* get value to populate the IB don't remove */ -+ tmp =radeon_get_ib_value(p, idx); -+ ib[idx] = 0; -+ break; -+ case DB_DEPTH_CONTROL: -+ track->db_depth_control = radeon_get_ib_value(p, idx); -+ break; -+ case DB_Z_INFO: -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ dev_warn(p->dev, "bad SET_CONTEXT_REG " -+ "0x%04X\n", reg); -+ return -EINVAL; -+ } -+ track->db_z_info = radeon_get_ib_value(p, idx); -+ ib[idx] &= ~Z_ARRAY_MODE(0xf); -+ track->db_z_info &= ~Z_ARRAY_MODE(0xf); -+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { -+ ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); -+ track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); -+ } else { -+ ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); -+ track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); -+ } -+ break; -+ case DB_STENCIL_INFO: -+ track->db_s_info = radeon_get_ib_value(p, idx); -+ break; -+ case DB_DEPTH_VIEW: -+ track->db_depth_view = radeon_get_ib_value(p, idx); -+ break; -+ case DB_DEPTH_SIZE: -+ track->db_depth_size = radeon_get_ib_value(p, idx); -+ track->db_depth_size_idx = idx; -+ break; -+ case DB_Z_READ_BASE: -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ dev_warn(p->dev, "bad SET_CONTEXT_REG " -+ "0x%04X\n", reg); -+ return -EINVAL; -+ } -+ track->db_z_read_offset = radeon_get_ib_value(p, idx); -+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); -+ track->db_z_read_bo = reloc->robj; -+ break; -+ case DB_Z_WRITE_BASE: -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ dev_warn(p->dev, "bad SET_CONTEXT_REG " -+ "0x%04X\n", reg); -+ return -EINVAL; -+ } -+ track->db_z_write_offset = radeon_get_ib_value(p, idx); -+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); -+ track->db_z_write_bo = reloc->robj; -+ break; -+ case DB_STENCIL_READ_BASE: -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ dev_warn(p->dev, "bad SET_CONTEXT_REG " -+ "0x%04X\n", reg); -+ return -EINVAL; -+ } -+ track->db_s_read_offset = radeon_get_ib_value(p, idx); -+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); -+ track->db_s_read_bo = reloc->robj; -+ break; -+ case DB_STENCIL_WRITE_BASE: -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ dev_warn(p->dev, "bad SET_CONTEXT_REG " -+ "0x%04X\n", reg); -+ return -EINVAL; -+ } -+ track->db_s_write_offset = radeon_get_ib_value(p, idx); -+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); -+ track->db_s_write_bo = reloc->robj; -+ break; -+ case VGT_STRMOUT_CONFIG: -+ track->vgt_strmout_config = radeon_get_ib_value(p, idx); -+ break; -+ case VGT_STRMOUT_BUFFER_CONFIG: -+ track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx); -+ break; -+ case CB_TARGET_MASK: -+ track->cb_target_mask = radeon_get_ib_value(p, idx); -+ break; -+ case CB_SHADER_MASK: -+ track->cb_shader_mask = radeon_get_ib_value(p, idx); -+ break; -+ case PA_SC_AA_CONFIG: -+ tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK; -+ track->nsamples = 1 << tmp; -+ break; -+ case CB_COLOR0_VIEW: -+ case CB_COLOR1_VIEW: -+ case CB_COLOR2_VIEW: -+ case CB_COLOR3_VIEW: -+ case CB_COLOR4_VIEW: -+ case CB_COLOR5_VIEW: -+ case CB_COLOR6_VIEW: -+ case CB_COLOR7_VIEW: -+ tmp = (reg - CB_COLOR0_VIEW) / 0x3c; -+ track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); -+ break; -+ case CB_COLOR8_VIEW: -+ case CB_COLOR9_VIEW: -+ case CB_COLOR10_VIEW: -+ case CB_COLOR11_VIEW: -+ tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8; -+ track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); -+ break; -+ case CB_COLOR0_INFO: -+ case CB_COLOR1_INFO: -+ case CB_COLOR2_INFO: -+ case CB_COLOR3_INFO: -+ case CB_COLOR4_INFO: -+ case CB_COLOR5_INFO: -+ case CB_COLOR6_INFO: -+ case CB_COLOR7_INFO: -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ dev_warn(p->dev, "bad SET_CONTEXT_REG " -+ "0x%04X\n", reg); -+ return -EINVAL; -+ } -+ tmp = (reg - CB_COLOR0_INFO) / 0x3c; -+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); -+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { -+ ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); -+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); -+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { -+ ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); -+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); -+ } -+ break; -+ case CB_COLOR8_INFO: -+ case CB_COLOR9_INFO: -+ case CB_COLOR10_INFO: -+ case CB_COLOR11_INFO: -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ dev_warn(p->dev, "bad SET_CONTEXT_REG " -+ "0x%04X\n", reg); -+ return -EINVAL; -+ } -+ tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; -+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); -+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { -+ ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); -+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); -+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { -+ ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); -+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); -+ } -+ break; -+ case CB_COLOR0_PITCH: -+ case CB_COLOR1_PITCH: -+ case CB_COLOR2_PITCH: -+ case CB_COLOR3_PITCH: -+ case CB_COLOR4_PITCH: -+ case CB_COLOR5_PITCH: -+ case CB_COLOR6_PITCH: -+ case CB_COLOR7_PITCH: -+ tmp = (reg - CB_COLOR0_PITCH) / 0x3c; -+ track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); -+ track->cb_color_pitch_idx[tmp] = idx; -+ break; -+ case CB_COLOR8_PITCH: -+ case CB_COLOR9_PITCH: -+ case CB_COLOR10_PITCH: -+ case CB_COLOR11_PITCH: -+ tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8; -+ track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); -+ track->cb_color_pitch_idx[tmp] = idx; -+ break; -+ case CB_COLOR0_SLICE: -+ case CB_COLOR1_SLICE: -+ case CB_COLOR2_SLICE: -+ case CB_COLOR3_SLICE: -+ case CB_COLOR4_SLICE: -+ case CB_COLOR5_SLICE: -+ case CB_COLOR6_SLICE: -+ case CB_COLOR7_SLICE: -+ tmp = (reg - CB_COLOR0_SLICE) / 0x3c; -+ track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); -+ track->cb_color_slice_idx[tmp] = idx; -+ break; -+ case CB_COLOR8_SLICE: -+ case CB_COLOR9_SLICE: -+ case CB_COLOR10_SLICE: -+ case CB_COLOR11_SLICE: -+ tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; -+ track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); -+ track->cb_color_slice_idx[tmp] = idx; -+ break; -+ case CB_COLOR0_ATTRIB: -+ case CB_COLOR1_ATTRIB: -+ case CB_COLOR2_ATTRIB: -+ case CB_COLOR3_ATTRIB: -+ case CB_COLOR4_ATTRIB: -+ case CB_COLOR5_ATTRIB: -+ case CB_COLOR6_ATTRIB: -+ case CB_COLOR7_ATTRIB: -+ case CB_COLOR8_ATTRIB: -+ case CB_COLOR9_ATTRIB: -+ case CB_COLOR10_ATTRIB: -+ case CB_COLOR11_ATTRIB: -+ break; -+ case CB_COLOR0_DIM: -+ case CB_COLOR1_DIM: -+ case CB_COLOR2_DIM: -+ case CB_COLOR3_DIM: -+ case CB_COLOR4_DIM: -+ case CB_COLOR5_DIM: -+ case CB_COLOR6_DIM: -+ case CB_COLOR7_DIM: -+ tmp = (reg - CB_COLOR0_DIM) / 0x3c; -+ track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx); -+ track->cb_color_dim_idx[tmp] = idx; -+ break; -+ case CB_COLOR8_DIM: -+ case CB_COLOR9_DIM: -+ case CB_COLOR10_DIM: -+ case CB_COLOR11_DIM: -+ tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8; -+ track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx); -+ track->cb_color_dim_idx[tmp] = idx; -+ break; -+ case CB_COLOR0_FMASK: -+ case CB_COLOR1_FMASK: -+ case CB_COLOR2_FMASK: -+ case CB_COLOR3_FMASK: -+ case CB_COLOR4_FMASK: -+ case CB_COLOR5_FMASK: -+ case CB_COLOR6_FMASK: -+ case CB_COLOR7_FMASK: -+ tmp = (reg - CB_COLOR0_FMASK) / 0x3c; -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); -+ return -EINVAL; -+ } -+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); -+ track->cb_color_fmask_bo[tmp] = reloc->robj; -+ break; -+ case CB_COLOR0_CMASK: -+ case CB_COLOR1_CMASK: -+ case CB_COLOR2_CMASK: -+ case CB_COLOR3_CMASK: -+ case CB_COLOR4_CMASK: -+ case CB_COLOR5_CMASK: -+ case CB_COLOR6_CMASK: -+ case CB_COLOR7_CMASK: -+ tmp = (reg - CB_COLOR0_CMASK) / 0x3c; -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); -+ return -EINVAL; -+ } -+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); -+ track->cb_color_cmask_bo[tmp] = reloc->robj; -+ break; -+ case CB_COLOR0_FMASK_SLICE: -+ case CB_COLOR1_FMASK_SLICE: -+ case CB_COLOR2_FMASK_SLICE: -+ case CB_COLOR3_FMASK_SLICE: -+ case CB_COLOR4_FMASK_SLICE: -+ case CB_COLOR5_FMASK_SLICE: -+ case CB_COLOR6_FMASK_SLICE: -+ case CB_COLOR7_FMASK_SLICE: -+ tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c; -+ track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx); -+ break; -+ case CB_COLOR0_CMASK_SLICE: -+ case CB_COLOR1_CMASK_SLICE: -+ case CB_COLOR2_CMASK_SLICE: -+ case CB_COLOR3_CMASK_SLICE: -+ case CB_COLOR4_CMASK_SLICE: -+ case CB_COLOR5_CMASK_SLICE: -+ case CB_COLOR6_CMASK_SLICE: -+ case CB_COLOR7_CMASK_SLICE: -+ tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c; -+ track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx); -+ break; -+ case CB_COLOR0_BASE: -+ case CB_COLOR1_BASE: -+ case CB_COLOR2_BASE: -+ case CB_COLOR3_BASE: -+ case CB_COLOR4_BASE: -+ case CB_COLOR5_BASE: -+ case CB_COLOR6_BASE: -+ case CB_COLOR7_BASE: -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ dev_warn(p->dev, "bad SET_CONTEXT_REG " -+ "0x%04X\n", reg); -+ return -EINVAL; -+ } -+ tmp = (reg - CB_COLOR0_BASE) / 0x3c; -+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); -+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); -+ track->cb_color_base_last[tmp] = ib[idx]; -+ track->cb_color_bo[tmp] = reloc->robj; -+ break; -+ case CB_COLOR8_BASE: -+ case CB_COLOR9_BASE: -+ case CB_COLOR10_BASE: -+ case CB_COLOR11_BASE: -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ dev_warn(p->dev, "bad SET_CONTEXT_REG " -+ "0x%04X\n", reg); -+ return -EINVAL; -+ } -+ tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; -+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); -+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); -+ track->cb_color_base_last[tmp] = ib[idx]; -+ track->cb_color_bo[tmp] = reloc->robj; -+ break; -+ case CB_IMMED0_BASE: -+ case CB_IMMED1_BASE: -+ case CB_IMMED2_BASE: -+ case CB_IMMED3_BASE: -+ case CB_IMMED4_BASE: -+ case CB_IMMED5_BASE: -+ case CB_IMMED6_BASE: -+ case CB_IMMED7_BASE: -+ case CB_IMMED8_BASE: -+ case CB_IMMED9_BASE: -+ case CB_IMMED10_BASE: -+ case CB_IMMED11_BASE: -+ case DB_HTILE_DATA_BASE: -+ case SQ_PGM_START_FS: -+ case SQ_PGM_START_ES: -+ case SQ_PGM_START_VS: -+ case SQ_PGM_START_GS: -+ case SQ_PGM_START_PS: -+ case SQ_PGM_START_HS: -+ case SQ_PGM_START_LS: -+ case GDS_ADDR_BASE: -+ case SQ_CONST_MEM_BASE: -+ case SQ_ALU_CONST_CACHE_GS_0: -+ case SQ_ALU_CONST_CACHE_GS_1: -+ case SQ_ALU_CONST_CACHE_GS_2: -+ case SQ_ALU_CONST_CACHE_GS_3: -+ case SQ_ALU_CONST_CACHE_GS_4: -+ case SQ_ALU_CONST_CACHE_GS_5: -+ case SQ_ALU_CONST_CACHE_GS_6: -+ case SQ_ALU_CONST_CACHE_GS_7: -+ case SQ_ALU_CONST_CACHE_GS_8: -+ case SQ_ALU_CONST_CACHE_GS_9: -+ case SQ_ALU_CONST_CACHE_GS_10: -+ case SQ_ALU_CONST_CACHE_GS_11: -+ case SQ_ALU_CONST_CACHE_GS_12: -+ case SQ_ALU_CONST_CACHE_GS_13: -+ case SQ_ALU_CONST_CACHE_GS_14: -+ case SQ_ALU_CONST_CACHE_GS_15: -+ case SQ_ALU_CONST_CACHE_PS_0: -+ case SQ_ALU_CONST_CACHE_PS_1: -+ case SQ_ALU_CONST_CACHE_PS_2: -+ case SQ_ALU_CONST_CACHE_PS_3: -+ case SQ_ALU_CONST_CACHE_PS_4: -+ case SQ_ALU_CONST_CACHE_PS_5: -+ case SQ_ALU_CONST_CACHE_PS_6: -+ case SQ_ALU_CONST_CACHE_PS_7: -+ case SQ_ALU_CONST_CACHE_PS_8: -+ case SQ_ALU_CONST_CACHE_PS_9: -+ case SQ_ALU_CONST_CACHE_PS_10: -+ case SQ_ALU_CONST_CACHE_PS_11: -+ case SQ_ALU_CONST_CACHE_PS_12: -+ case SQ_ALU_CONST_CACHE_PS_13: -+ case SQ_ALU_CONST_CACHE_PS_14: -+ case SQ_ALU_CONST_CACHE_PS_15: -+ case SQ_ALU_CONST_CACHE_VS_0: -+ case SQ_ALU_CONST_CACHE_VS_1: -+ case SQ_ALU_CONST_CACHE_VS_2: -+ case SQ_ALU_CONST_CACHE_VS_3: -+ case SQ_ALU_CONST_CACHE_VS_4: -+ case SQ_ALU_CONST_CACHE_VS_5: -+ case SQ_ALU_CONST_CACHE_VS_6: -+ case SQ_ALU_CONST_CACHE_VS_7: -+ case SQ_ALU_CONST_CACHE_VS_8: -+ case SQ_ALU_CONST_CACHE_VS_9: -+ case SQ_ALU_CONST_CACHE_VS_10: -+ case SQ_ALU_CONST_CACHE_VS_11: -+ case SQ_ALU_CONST_CACHE_VS_12: -+ case SQ_ALU_CONST_CACHE_VS_13: -+ case SQ_ALU_CONST_CACHE_VS_14: -+ case SQ_ALU_CONST_CACHE_VS_15: -+ case SQ_ALU_CONST_CACHE_HS_0: -+ case SQ_ALU_CONST_CACHE_HS_1: -+ case SQ_ALU_CONST_CACHE_HS_2: -+ case SQ_ALU_CONST_CACHE_HS_3: -+ case SQ_ALU_CONST_CACHE_HS_4: -+ case SQ_ALU_CONST_CACHE_HS_5: -+ case SQ_ALU_CONST_CACHE_HS_6: -+ case SQ_ALU_CONST_CACHE_HS_7: -+ case SQ_ALU_CONST_CACHE_HS_8: -+ case SQ_ALU_CONST_CACHE_HS_9: -+ case SQ_ALU_CONST_CACHE_HS_10: -+ case SQ_ALU_CONST_CACHE_HS_11: -+ case SQ_ALU_CONST_CACHE_HS_12: -+ case SQ_ALU_CONST_CACHE_HS_13: -+ case SQ_ALU_CONST_CACHE_HS_14: -+ case SQ_ALU_CONST_CACHE_HS_15: -+ case SQ_ALU_CONST_CACHE_LS_0: -+ case SQ_ALU_CONST_CACHE_LS_1: -+ case SQ_ALU_CONST_CACHE_LS_2: -+ case SQ_ALU_CONST_CACHE_LS_3: -+ case SQ_ALU_CONST_CACHE_LS_4: -+ case SQ_ALU_CONST_CACHE_LS_5: -+ case SQ_ALU_CONST_CACHE_LS_6: -+ case SQ_ALU_CONST_CACHE_LS_7: -+ case SQ_ALU_CONST_CACHE_LS_8: -+ case SQ_ALU_CONST_CACHE_LS_9: -+ case SQ_ALU_CONST_CACHE_LS_10: -+ case SQ_ALU_CONST_CACHE_LS_11: -+ case SQ_ALU_CONST_CACHE_LS_12: -+ case SQ_ALU_CONST_CACHE_LS_13: -+ case SQ_ALU_CONST_CACHE_LS_14: -+ case SQ_ALU_CONST_CACHE_LS_15: -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ dev_warn(p->dev, "bad SET_CONTEXT_REG " -+ "0x%04X\n", reg); -+ return -EINVAL; -+ } -+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); -+ break; -+ default: -+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+/** -+ * evergreen_check_texture_resource() - check if register is authorized or not -+ * @p: parser structure holding parsing context -+ * @idx: index into the cs buffer -+ * @texture: texture's bo structure -+ * @mipmap: mipmap's bo structure -+ * -+ * This function will check that the resource has valid field and that -+ * the texture and mipmap bo object are big enough to cover this resource. -+ */ -+static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx, -+ struct radeon_bo *texture, -+ struct radeon_bo *mipmap) -+{ -+ /* XXX fill in */ -+ return 0; -+} -+ -+static int evergreen_packet3_check(struct radeon_cs_parser *p, -+ struct radeon_cs_packet *pkt) -+{ -+ struct radeon_cs_reloc *reloc; -+ struct evergreen_cs_track *track; -+ volatile u32 *ib; -+ unsigned idx; -+ unsigned i; -+ unsigned start_reg, end_reg, reg; -+ int r; -+ u32 idx_value; -+ -+ track = (struct evergreen_cs_track *)p->track; -+ ib = p->ib->ptr; -+ idx = pkt->idx + 1; -+ idx_value = radeon_get_ib_value(p, idx); -+ -+ switch (pkt->opcode) { -+ case PACKET3_CONTEXT_CONTROL: -+ if (pkt->count != 1) { -+ DRM_ERROR("bad CONTEXT_CONTROL\n"); -+ return -EINVAL; -+ } -+ break; -+ case PACKET3_INDEX_TYPE: -+ case PACKET3_NUM_INSTANCES: -+ case PACKET3_CLEAR_STATE: -+ if (pkt->count) { -+ DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); -+ return -EINVAL; -+ } -+ break; -+ case PACKET3_INDEX_BASE: -+ if (pkt->count != 1) { -+ DRM_ERROR("bad INDEX_BASE\n"); -+ return -EINVAL; -+ } -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ DRM_ERROR("bad INDEX_BASE\n"); -+ return -EINVAL; -+ } -+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); -+ ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; -+ r = evergreen_cs_track_check(p); -+ if (r) { -+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); -+ return r; -+ } -+ break; -+ case PACKET3_DRAW_INDEX: -+ if (pkt->count != 3) { -+ DRM_ERROR("bad DRAW_INDEX\n"); -+ return -EINVAL; -+ } -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ DRM_ERROR("bad DRAW_INDEX\n"); -+ return -EINVAL; -+ } -+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); -+ ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; -+ r = evergreen_cs_track_check(p); -+ if (r) { -+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); -+ return r; -+ } -+ break; -+ case PACKET3_DRAW_INDEX_2: -+ if (pkt->count != 4) { -+ DRM_ERROR("bad DRAW_INDEX_2\n"); -+ return -EINVAL; -+ } -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ DRM_ERROR("bad DRAW_INDEX_2\n"); -+ return -EINVAL; -+ } -+ ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); -+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; -+ r = evergreen_cs_track_check(p); -+ if (r) { -+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); -+ return r; -+ } -+ break; -+ case PACKET3_DRAW_INDEX_AUTO: -+ if (pkt->count != 1) { -+ DRM_ERROR("bad DRAW_INDEX_AUTO\n"); -+ return -EINVAL; -+ } -+ r = evergreen_cs_track_check(p); -+ if (r) { -+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); -+ return r; -+ } -+ break; -+ case PACKET3_DRAW_INDEX_MULTI_AUTO: -+ if (pkt->count != 2) { -+ DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n"); -+ return -EINVAL; -+ } -+ r = evergreen_cs_track_check(p); -+ if (r) { -+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); -+ return r; -+ } -+ break; -+ case PACKET3_DRAW_INDEX_IMMD: -+ if (pkt->count < 2) { -+ DRM_ERROR("bad DRAW_INDEX_IMMD\n"); -+ return -EINVAL; -+ } -+ r = evergreen_cs_track_check(p); -+ if (r) { -+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); -+ return r; -+ } -+ break; -+ case PACKET3_DRAW_INDEX_OFFSET: -+ if (pkt->count != 2) { -+ DRM_ERROR("bad DRAW_INDEX_OFFSET\n"); -+ return -EINVAL; -+ } -+ r = evergreen_cs_track_check(p); -+ if (r) { -+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); -+ return r; -+ } -+ break; -+ case PACKET3_DRAW_INDEX_OFFSET_2: -+ if (pkt->count != 3) { -+ DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n"); -+ return -EINVAL; -+ } -+ r = evergreen_cs_track_check(p); -+ if (r) { -+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); -+ return r; -+ } -+ break; -+ case PACKET3_WAIT_REG_MEM: -+ if (pkt->count != 5) { -+ DRM_ERROR("bad WAIT_REG_MEM\n"); -+ return -EINVAL; -+ } -+ /* bit 4 is reg (0) or mem (1) */ -+ if (idx_value & 0x10) { -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ DRM_ERROR("bad WAIT_REG_MEM\n"); -+ return -EINVAL; -+ } -+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); -+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; -+ } -+ break; -+ case PACKET3_SURFACE_SYNC: -+ if (pkt->count != 3) { -+ DRM_ERROR("bad SURFACE_SYNC\n"); -+ return -EINVAL; -+ } -+ /* 0xffffffff/0x0 is flush all cache flag */ -+ if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || -+ radeon_get_ib_value(p, idx + 2) != 0) { -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ DRM_ERROR("bad SURFACE_SYNC\n"); -+ return -EINVAL; -+ } -+ ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); -+ } -+ break; -+ case PACKET3_EVENT_WRITE: -+ if (pkt->count != 2 && pkt->count != 0) { -+ DRM_ERROR("bad EVENT_WRITE\n"); -+ return -EINVAL; -+ } -+ if (pkt->count) { -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ DRM_ERROR("bad EVENT_WRITE\n"); -+ return -EINVAL; -+ } -+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); -+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; -+ } -+ break; -+ case PACKET3_EVENT_WRITE_EOP: -+ if (pkt->count != 4) { -+ DRM_ERROR("bad EVENT_WRITE_EOP\n"); -+ return -EINVAL; -+ } -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ DRM_ERROR("bad EVENT_WRITE_EOP\n"); -+ return -EINVAL; -+ } -+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); -+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; -+ break; -+ case PACKET3_EVENT_WRITE_EOS: -+ if (pkt->count != 3) { -+ DRM_ERROR("bad EVENT_WRITE_EOS\n"); -+ return -EINVAL; -+ } -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ DRM_ERROR("bad EVENT_WRITE_EOS\n"); -+ return -EINVAL; -+ } -+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); -+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; -+ break; -+ case PACKET3_SET_CONFIG_REG: -+ start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START; -+ end_reg = 4 * pkt->count + start_reg - 4; -+ if ((start_reg < PACKET3_SET_CONFIG_REG_START) || -+ (start_reg >= PACKET3_SET_CONFIG_REG_END) || -+ (end_reg >= PACKET3_SET_CONFIG_REG_END)) { -+ DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); -+ return -EINVAL; -+ } -+ for (i = 0; i < pkt->count; i++) { -+ reg = start_reg + (4 * i); -+ r = evergreen_cs_check_reg(p, reg, idx+1+i); -+ if (r) -+ return r; -+ } -+ break; -+ case PACKET3_SET_CONTEXT_REG: -+ start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START; -+ end_reg = 4 * pkt->count + start_reg - 4; -+ if ((start_reg < PACKET3_SET_CONTEXT_REG_START) || -+ (start_reg >= PACKET3_SET_CONTEXT_REG_END) || -+ (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { -+ DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); -+ return -EINVAL; -+ } -+ for (i = 0; i < pkt->count; i++) { -+ reg = start_reg + (4 * i); -+ r = evergreen_cs_check_reg(p, reg, idx+1+i); -+ if (r) -+ return r; -+ } -+ break; -+ case PACKET3_SET_RESOURCE: -+ if (pkt->count % 8) { -+ DRM_ERROR("bad SET_RESOURCE\n"); -+ return -EINVAL; -+ } -+ start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START; -+ end_reg = 4 * pkt->count + start_reg - 4; -+ if ((start_reg < PACKET3_SET_RESOURCE_START) || -+ (start_reg >= PACKET3_SET_RESOURCE_END) || -+ (end_reg >= PACKET3_SET_RESOURCE_END)) { -+ DRM_ERROR("bad SET_RESOURCE\n"); -+ return -EINVAL; -+ } -+ for (i = 0; i < (pkt->count / 8); i++) { -+ struct radeon_bo *texture, *mipmap; -+ u32 size, offset; -+ -+ switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) { -+ case SQ_TEX_VTX_VALID_TEXTURE: -+ /* tex base */ -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ DRM_ERROR("bad SET_RESOURCE (tex)\n"); -+ return -EINVAL; -+ } -+ ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); -+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) -+ ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); -+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) -+ ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); -+ texture = reloc->robj; -+ /* tex mip base */ -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ DRM_ERROR("bad SET_RESOURCE (tex)\n"); -+ return -EINVAL; -+ } -+ ib[idx+1+(i*8)+4] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); -+ mipmap = reloc->robj; -+ r = evergreen_check_texture_resource(p, idx+1+(i*8), -+ texture, mipmap); -+ if (r) -+ return r; -+ break; -+ case SQ_TEX_VTX_VALID_BUFFER: -+ /* vtx base */ -+ r = evergreen_cs_packet_next_reloc(p, &reloc); -+ if (r) { -+ DRM_ERROR("bad SET_RESOURCE (vtx)\n"); -+ return -EINVAL; -+ } -+ offset = radeon_get_ib_value(p, idx+1+(i*8)+0); -+ size = radeon_get_ib_value(p, idx+1+(i*8)+1); -+ if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { -+ /* force size to size of the buffer */ -+ dev_warn(p->dev, "vbo resource seems too big for the bo\n"); -+ ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj); -+ } -+ ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); -+ ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; -+ break; -+ case SQ_TEX_VTX_INVALID_TEXTURE: -+ case SQ_TEX_VTX_INVALID_BUFFER: -+ default: -+ DRM_ERROR("bad SET_RESOURCE\n"); -+ return -EINVAL; -+ } -+ } -+ break; -+ case PACKET3_SET_ALU_CONST: -+ /* XXX fix me ALU const buffers only */ -+ break; -+ case PACKET3_SET_BOOL_CONST: -+ start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START; -+ end_reg = 4 * pkt->count + start_reg - 4; -+ if ((start_reg < PACKET3_SET_BOOL_CONST_START) || -+ (start_reg >= PACKET3_SET_BOOL_CONST_END) || -+ (end_reg >= PACKET3_SET_BOOL_CONST_END)) { -+ DRM_ERROR("bad SET_BOOL_CONST\n"); -+ return -EINVAL; -+ } -+ break; -+ case PACKET3_SET_LOOP_CONST: -+ start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START; -+ end_reg = 4 * pkt->count + start_reg - 4; -+ if ((start_reg < PACKET3_SET_LOOP_CONST_START) || -+ (start_reg >= PACKET3_SET_LOOP_CONST_END) || -+ (end_reg >= PACKET3_SET_LOOP_CONST_END)) { -+ DRM_ERROR("bad SET_LOOP_CONST\n"); -+ return -EINVAL; -+ } -+ break; -+ case PACKET3_SET_CTL_CONST: -+ start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START; -+ end_reg = 4 * pkt->count + start_reg - 4; -+ if ((start_reg < PACKET3_SET_CTL_CONST_START) || -+ (start_reg >= PACKET3_SET_CTL_CONST_END) || -+ (end_reg >= PACKET3_SET_CTL_CONST_END)) { -+ DRM_ERROR("bad SET_CTL_CONST\n"); -+ return -EINVAL; -+ } -+ break; -+ case PACKET3_SET_SAMPLER: -+ if (pkt->count % 3) { -+ DRM_ERROR("bad SET_SAMPLER\n"); -+ return -EINVAL; -+ } -+ start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START; -+ end_reg = 4 * pkt->count + start_reg - 4; -+ if ((start_reg < PACKET3_SET_SAMPLER_START) || -+ (start_reg >= PACKET3_SET_SAMPLER_END) || -+ (end_reg >= PACKET3_SET_SAMPLER_END)) { -+ DRM_ERROR("bad SET_SAMPLER\n"); -+ return -EINVAL; -+ } -+ break; -+ case PACKET3_NOP: -+ break; -+ default: -+ DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+int evergreen_cs_parse(struct radeon_cs_parser *p) -+{ -+ struct radeon_cs_packet pkt; -+ struct evergreen_cs_track *track; -+ int r; -+ -+ if (p->track == NULL) { -+ /* initialize tracker, we are in kms */ -+ track = kzalloc(sizeof(*track), GFP_KERNEL); -+ if (track == NULL) -+ return -ENOMEM; -+ evergreen_cs_track_init(track); -+ track->npipes = p->rdev->config.evergreen.tiling_npipes; -+ track->nbanks = p->rdev->config.evergreen.tiling_nbanks; -+ track->group_size = p->rdev->config.evergreen.tiling_group_size; -+ p->track = track; -+ } -+ do { -+ r = evergreen_cs_packet_parse(p, &pkt, p->idx); -+ if (r) { -+ kfree(p->track); -+ p->track = NULL; -+ return r; -+ } -+ p->idx += pkt.count + 2; -+ switch (pkt.type) { -+ case PACKET_TYPE0: -+ r = evergreen_cs_parse_packet0(p, &pkt); -+ break; -+ case PACKET_TYPE2: -+ break; -+ case PACKET_TYPE3: -+ r = evergreen_packet3_check(p, &pkt); -+ break; -+ default: -+ DRM_ERROR("Unknown packet type %d !\n", pkt.type); -+ kfree(p->track); -+ p->track = NULL; -+ return -EINVAL; -+ } -+ if (r) { -+ kfree(p->track); -+ p->track = NULL; -+ return r; -+ } -+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); -+#if 0 -+ for (r = 0; r < p->ib->length_dw; r++) { -+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]); -+ mdelay(1); -+ } -+#endif -+ kfree(p->track); -+ p->track = NULL; -+ return 0; -+} -+ -diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h -index f7c7c96..e028c1c 100644 ---- a/drivers/gpu/drm/radeon/evergreen_reg.h -+++ b/drivers/gpu/drm/radeon/evergreen_reg.h -@@ -151,6 +151,9 @@ - #define EVERGREEN_DATA_FORMAT 0x6b00 - # define EVERGREEN_INTERLEAVE_EN (1 << 0) - #define EVERGREEN_DESKTOP_HEIGHT 0x6b04 -+#define EVERGREEN_VLINE_START_END 0x6b08 -+#define EVERGREEN_VLINE_STATUS 0x6bb8 -+# define EVERGREEN_VLINE_STAT (1 << 12) - - #define EVERGREEN_VIEWPORT_START 0x6d70 - #define EVERGREEN_VIEWPORT_SIZE 0x6d74 -@@ -164,8 +167,12 @@ - #define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0) - - /* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */ -+#define EVERGREEN_CRTC_V_BLANK_START_END 0x6e34 - #define EVERGREEN_CRTC_CONTROL 0x6e70 - # define EVERGREEN_CRTC_MASTER_EN (1 << 0) -+# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) -+#define EVERGREEN_CRTC_STATUS 0x6e8c -+#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 - #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 - - #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 -diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h -new file mode 100644 -index 0000000..79683f6 ---- /dev/null -+++ b/drivers/gpu/drm/radeon/evergreend.h -@@ -0,0 +1,1020 @@ -+/* -+ * Copyright 2010 Advanced Micro Devices, Inc. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: Alex Deucher -+ */ -+#ifndef EVERGREEND_H -+#define EVERGREEND_H -+ -+#define EVERGREEN_MAX_SH_GPRS 256 -+#define EVERGREEN_MAX_TEMP_GPRS 16 -+#define EVERGREEN_MAX_SH_THREADS 256 -+#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096 -+#define EVERGREEN_MAX_FRC_EOV_CNT 16384 -+#define EVERGREEN_MAX_BACKENDS 8 -+#define EVERGREEN_MAX_BACKENDS_MASK 0xFF -+#define EVERGREEN_MAX_SIMDS 16 -+#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF -+#define EVERGREEN_MAX_PIPES 8 -+#define EVERGREEN_MAX_PIPES_MASK 0xFF -+#define EVERGREEN_MAX_LDS_NUM 0xFFFF -+ -+/* Registers */ -+ -+#define RCU_IND_INDEX 0x100 -+#define RCU_IND_DATA 0x104 -+ -+#define GRBM_GFX_INDEX 0x802C -+#define INSTANCE_INDEX(x) ((x) << 0) -+#define SE_INDEX(x) ((x) << 16) -+#define INSTANCE_BROADCAST_WRITES (1 << 30) -+#define SE_BROADCAST_WRITES (1 << 31) -+#define RLC_GFX_INDEX 0x3fC4 -+#define CC_GC_SHADER_PIPE_CONFIG 0x8950 -+#define WRITE_DIS (1 << 0) -+#define CC_RB_BACKEND_DISABLE 0x98F4 -+#define BACKEND_DISABLE(x) ((x) << 16) -+#define GB_ADDR_CONFIG 0x98F8 -+#define NUM_PIPES(x) ((x) << 0) -+#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) -+#define BANK_INTERLEAVE_SIZE(x) ((x) << 8) -+#define NUM_SHADER_ENGINES(x) ((x) << 12) -+#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16) -+#define NUM_GPUS(x) ((x) << 20) -+#define MULTI_GPU_TILE_SIZE(x) ((x) << 24) -+#define ROW_SIZE(x) ((x) << 28) -+#define GB_BACKEND_MAP 0x98FC -+#define DMIF_ADDR_CONFIG 0xBD4 -+#define HDP_ADDR_CONFIG 0x2F48 -+ -+#define CC_SYS_RB_BACKEND_DISABLE 0x3F88 -+#define GC_USER_RB_BACKEND_DISABLE 0x9B7C -+ -+#define CGTS_SYS_TCC_DISABLE 0x3F90 -+#define CGTS_TCC_DISABLE 0x9148 -+#define CGTS_USER_SYS_TCC_DISABLE 0x3F94 -+#define CGTS_USER_TCC_DISABLE 0x914C -+ -+#define CONFIG_MEMSIZE 0x5428 -+ -+#define CP_ME_CNTL 0x86D8 -+#define CP_ME_HALT (1 << 28) -+#define CP_PFP_HALT (1 << 26) -+#define CP_ME_RAM_DATA 0xC160 -+#define CP_ME_RAM_RADDR 0xC158 -+#define CP_ME_RAM_WADDR 0xC15C -+#define CP_MEQ_THRESHOLDS 0x8764 -+#define STQ_SPLIT(x) ((x) << 0) -+#define CP_PERFMON_CNTL 0x87FC -+#define CP_PFP_UCODE_ADDR 0xC150 -+#define CP_PFP_UCODE_DATA 0xC154 -+#define CP_QUEUE_THRESHOLDS 0x8760 -+#define ROQ_IB1_START(x) ((x) << 0) -+#define ROQ_IB2_START(x) ((x) << 8) -+#define CP_RB_BASE 0xC100 -+#define CP_RB_CNTL 0xC104 -+#define RB_BUFSZ(x) ((x) << 0) -+#define RB_BLKSZ(x) ((x) << 8) -+#define RB_NO_UPDATE (1 << 27) -+#define RB_RPTR_WR_ENA (1 << 31) -+#define BUF_SWAP_32BIT (2 << 16) -+#define CP_RB_RPTR 0x8700 -+#define CP_RB_RPTR_ADDR 0xC10C -+#define CP_RB_RPTR_ADDR_HI 0xC110 -+#define CP_RB_RPTR_WR 0xC108 -+#define CP_RB_WPTR 0xC114 -+#define CP_RB_WPTR_ADDR 0xC118 -+#define CP_RB_WPTR_ADDR_HI 0xC11C -+#define CP_RB_WPTR_DELAY 0x8704 -+#define CP_SEM_WAIT_TIMER 0x85BC -+#define CP_DEBUG 0xC1FC -+ -+ -+#define GC_USER_SHADER_PIPE_CONFIG 0x8954 -+#define INACTIVE_QD_PIPES(x) ((x) << 8) -+#define INACTIVE_QD_PIPES_MASK 0x0000FF00 -+#define INACTIVE_SIMDS(x) ((x) << 16) -+#define INACTIVE_SIMDS_MASK 0x00FF0000 -+ -+#define GRBM_CNTL 0x8000 -+#define GRBM_READ_TIMEOUT(x) ((x) << 0) -+#define GRBM_SOFT_RESET 0x8020 -+#define SOFT_RESET_CP (1 << 0) -+#define SOFT_RESET_CB (1 << 1) -+#define SOFT_RESET_DB (1 << 3) -+#define SOFT_RESET_PA (1 << 5) -+#define SOFT_RESET_SC (1 << 6) -+#define SOFT_RESET_SPI (1 << 8) -+#define SOFT_RESET_SH (1 << 9) -+#define SOFT_RESET_SX (1 << 10) -+#define SOFT_RESET_TC (1 << 11) -+#define SOFT_RESET_TA (1 << 12) -+#define SOFT_RESET_VC (1 << 13) -+#define SOFT_RESET_VGT (1 << 14) -+ -+#define GRBM_STATUS 0x8010 -+#define CMDFIFO_AVAIL_MASK 0x0000000F -+#define SRBM_RQ_PENDING (1 << 5) -+#define CF_RQ_PENDING (1 << 7) -+#define PF_RQ_PENDING (1 << 8) -+#define GRBM_EE_BUSY (1 << 10) -+#define SX_CLEAN (1 << 11) -+#define DB_CLEAN (1 << 12) -+#define CB_CLEAN (1 << 13) -+#define TA_BUSY (1 << 14) -+#define VGT_BUSY_NO_DMA (1 << 16) -+#define VGT_BUSY (1 << 17) -+#define SX_BUSY (1 << 20) -+#define SH_BUSY (1 << 21) -+#define SPI_BUSY (1 << 22) -+#define SC_BUSY (1 << 24) -+#define PA_BUSY (1 << 25) -+#define DB_BUSY (1 << 26) -+#define CP_COHERENCY_BUSY (1 << 28) -+#define CP_BUSY (1 << 29) -+#define CB_BUSY (1 << 30) -+#define GUI_ACTIVE (1 << 31) -+#define GRBM_STATUS_SE0 0x8014 -+#define GRBM_STATUS_SE1 0x8018 -+#define SE_SX_CLEAN (1 << 0) -+#define SE_DB_CLEAN (1 << 1) -+#define SE_CB_CLEAN (1 << 2) -+#define SE_TA_BUSY (1 << 25) -+#define SE_SX_BUSY (1 << 26) -+#define SE_SPI_BUSY (1 << 27) -+#define SE_SH_BUSY (1 << 28) -+#define SE_SC_BUSY (1 << 29) -+#define SE_DB_BUSY (1 << 30) -+#define SE_CB_BUSY (1 << 31) -+ -+#define HDP_HOST_PATH_CNTL 0x2C00 -+#define HDP_NONSURFACE_BASE 0x2C04 -+#define HDP_NONSURFACE_INFO 0x2C08 -+#define HDP_NONSURFACE_SIZE 0x2C0C -+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 -+#define HDP_TILING_CONFIG 0x2F3C -+ -+#define MC_SHARED_CHMAP 0x2004 -+#define NOOFCHAN_SHIFT 12 -+#define NOOFCHAN_MASK 0x00003000 -+ -+#define MC_ARB_RAMCFG 0x2760 -+#define NOOFBANK_SHIFT 0 -+#define NOOFBANK_MASK 0x00000003 -+#define NOOFRANK_SHIFT 2 -+#define NOOFRANK_MASK 0x00000004 -+#define NOOFROWS_SHIFT 3 -+#define NOOFROWS_MASK 0x00000038 -+#define NOOFCOLS_SHIFT 6 -+#define NOOFCOLS_MASK 0x000000C0 -+#define CHANSIZE_SHIFT 8 -+#define CHANSIZE_MASK 0x00000100 -+#define BURSTLENGTH_SHIFT 9 -+#define BURSTLENGTH_MASK 0x00000200 -+#define CHANSIZE_OVERRIDE (1 << 11) -+#define MC_VM_AGP_TOP 0x2028 -+#define MC_VM_AGP_BOT 0x202C -+#define MC_VM_AGP_BASE 0x2030 -+#define MC_VM_FB_LOCATION 0x2024 -+#define MC_VM_MB_L1_TLB0_CNTL 0x2234 -+#define MC_VM_MB_L1_TLB1_CNTL 0x2238 -+#define MC_VM_MB_L1_TLB2_CNTL 0x223C -+#define MC_VM_MB_L1_TLB3_CNTL 0x2240 -+#define ENABLE_L1_TLB (1 << 0) -+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) -+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3) -+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3) -+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3) -+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3) -+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5) -+#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15) -+#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18) -+#define MC_VM_MD_L1_TLB0_CNTL 0x2654 -+#define MC_VM_MD_L1_TLB1_CNTL 0x2658 -+#define MC_VM_MD_L1_TLB2_CNTL 0x265C -+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C -+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 -+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 -+ -+#define PA_CL_ENHANCE 0x8A14 -+#define CLIP_VTX_REORDER_ENA (1 << 0) -+#define NUM_CLIP_SEQ(x) ((x) << 1) -+#define PA_SC_AA_CONFIG 0x28C04 -+#define MSAA_NUM_SAMPLES_SHIFT 0 -+#define MSAA_NUM_SAMPLES_MASK 0x3 -+#define PA_SC_CLIPRECT_RULE 0x2820C -+#define PA_SC_EDGERULE 0x28230 -+#define PA_SC_FIFO_SIZE 0x8BCC -+#define SC_PRIM_FIFO_SIZE(x) ((x) << 0) -+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12) -+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20) -+#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24 -+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) -+#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) -+#define PA_SC_LINE_STIPPLE 0x28A0C -+#define PA_SC_LINE_STIPPLE_STATE 0x8B10 -+ -+#define SCRATCH_REG0 0x8500 -+#define SCRATCH_REG1 0x8504 -+#define SCRATCH_REG2 0x8508 -+#define SCRATCH_REG3 0x850C -+#define SCRATCH_REG4 0x8510 -+#define SCRATCH_REG5 0x8514 -+#define SCRATCH_REG6 0x8518 -+#define SCRATCH_REG7 0x851C -+#define SCRATCH_UMSK 0x8540 -+#define SCRATCH_ADDR 0x8544 -+ -+#define SMX_DC_CTL0 0xA020 -+#define USE_HASH_FUNCTION (1 << 0) -+#define NUMBER_OF_SETS(x) ((x) << 1) -+#define FLUSH_ALL_ON_EVENT (1 << 10) -+#define STALL_ON_EVENT (1 << 11) -+#define SMX_EVENT_CTL 0xA02C -+#define ES_FLUSH_CTL(x) ((x) << 0) -+#define GS_FLUSH_CTL(x) ((x) << 3) -+#define ACK_FLUSH_CTL(x) ((x) << 6) -+#define SYNC_FLUSH_CTL (1 << 8) -+ -+#define SPI_CONFIG_CNTL 0x9100 -+#define GPR_WRITE_PRIORITY(x) ((x) << 0) -+#define SPI_CONFIG_CNTL_1 0x913C -+#define VTX_DONE_DELAY(x) ((x) << 0) -+#define INTERP_ONE_PRIM_PER_ROW (1 << 4) -+#define SPI_INPUT_Z 0x286D8 -+#define SPI_PS_IN_CONTROL_0 0x286CC -+#define NUM_INTERP(x) ((x)<<0) -+#define POSITION_ENA (1<<8) -+#define POSITION_CENTROID (1<<9) -+#define POSITION_ADDR(x) ((x)<<10) -+#define PARAM_GEN(x) ((x)<<15) -+#define PARAM_GEN_ADDR(x) ((x)<<19) -+#define BARYC_SAMPLE_CNTL(x) ((x)<<26) -+#define PERSP_GRADIENT_ENA (1<<28) -+#define LINEAR_GRADIENT_ENA (1<<29) -+#define POSITION_SAMPLE (1<<30) -+#define BARYC_AT_SAMPLE_ENA (1<<31) -+ -+#define SQ_CONFIG 0x8C00 -+#define VC_ENABLE (1 << 0) -+#define EXPORT_SRC_C (1 << 1) -+#define CS_PRIO(x) ((x) << 18) -+#define LS_PRIO(x) ((x) << 20) -+#define HS_PRIO(x) ((x) << 22) -+#define PS_PRIO(x) ((x) << 24) -+#define VS_PRIO(x) ((x) << 26) -+#define GS_PRIO(x) ((x) << 28) -+#define ES_PRIO(x) ((x) << 30) -+#define SQ_GPR_RESOURCE_MGMT_1 0x8C04 -+#define NUM_PS_GPRS(x) ((x) << 0) -+#define NUM_VS_GPRS(x) ((x) << 16) -+#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28) -+#define SQ_GPR_RESOURCE_MGMT_2 0x8C08 -+#define NUM_GS_GPRS(x) ((x) << 0) -+#define NUM_ES_GPRS(x) ((x) << 16) -+#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C -+#define NUM_HS_GPRS(x) ((x) << 0) -+#define NUM_LS_GPRS(x) ((x) << 16) -+#define SQ_THREAD_RESOURCE_MGMT 0x8C18 -+#define NUM_PS_THREADS(x) ((x) << 0) -+#define NUM_VS_THREADS(x) ((x) << 8) -+#define NUM_GS_THREADS(x) ((x) << 16) -+#define NUM_ES_THREADS(x) ((x) << 24) -+#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C -+#define NUM_HS_THREADS(x) ((x) << 0) -+#define NUM_LS_THREADS(x) ((x) << 8) -+#define SQ_STACK_RESOURCE_MGMT_1 0x8C20 -+#define NUM_PS_STACK_ENTRIES(x) ((x) << 0) -+#define NUM_VS_STACK_ENTRIES(x) ((x) << 16) -+#define SQ_STACK_RESOURCE_MGMT_2 0x8C24 -+#define NUM_GS_STACK_ENTRIES(x) ((x) << 0) -+#define NUM_ES_STACK_ENTRIES(x) ((x) << 16) -+#define SQ_STACK_RESOURCE_MGMT_3 0x8C28 -+#define NUM_HS_STACK_ENTRIES(x) ((x) << 0) -+#define NUM_LS_STACK_ENTRIES(x) ((x) << 16) -+#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C -+#define SQ_LDS_RESOURCE_MGMT 0x8E2C -+ -+#define SQ_MS_FIFO_SIZES 0x8CF0 -+#define CACHE_FIFO_SIZE(x) ((x) << 0) -+#define FETCH_FIFO_HIWATER(x) ((x) << 8) -+#define DONE_FIFO_HIWATER(x) ((x) << 16) -+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24) -+ -+#define SX_DEBUG_1 0x9058 -+#define ENABLE_NEW_SMX_ADDRESS (1 << 16) -+#define SX_EXPORT_BUFFER_SIZES 0x900C -+#define COLOR_BUFFER_SIZE(x) ((x) << 0) -+#define POSITION_BUFFER_SIZE(x) ((x) << 8) -+#define SMX_BUFFER_SIZE(x) ((x) << 16) -+#define SX_MISC 0x28350 -+ -+#define CB_PERF_CTR0_SEL_0 0x9A20 -+#define CB_PERF_CTR0_SEL_1 0x9A24 -+#define CB_PERF_CTR1_SEL_0 0x9A28 -+#define CB_PERF_CTR1_SEL_1 0x9A2C -+#define CB_PERF_CTR2_SEL_0 0x9A30 -+#define CB_PERF_CTR2_SEL_1 0x9A34 -+#define CB_PERF_CTR3_SEL_0 0x9A38 -+#define CB_PERF_CTR3_SEL_1 0x9A3C -+ -+#define TA_CNTL_AUX 0x9508 -+#define DISABLE_CUBE_WRAP (1 << 0) -+#define DISABLE_CUBE_ANISO (1 << 1) -+#define SYNC_GRADIENT (1 << 24) -+#define SYNC_WALKER (1 << 25) -+#define SYNC_ALIGNER (1 << 26) -+ -+#define VGT_CACHE_INVALIDATION 0x88C4 -+#define CACHE_INVALIDATION(x) ((x) << 0) -+#define VC_ONLY 0 -+#define TC_ONLY 1 -+#define VC_AND_TC 2 -+#define AUTO_INVLD_EN(x) ((x) << 6) -+#define NO_AUTO 0 -+#define ES_AUTO 1 -+#define GS_AUTO 2 -+#define ES_AND_GS_AUTO 3 -+#define VGT_GS_VERTEX_REUSE 0x88D4 -+#define VGT_NUM_INSTANCES 0x8974 -+#define VGT_OUT_DEALLOC_CNTL 0x28C5C -+#define DEALLOC_DIST_MASK 0x0000007F -+#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 -+#define VTX_REUSE_DEPTH_MASK 0x000000FF -+ -+#define VM_CONTEXT0_CNTL 0x1410 -+#define ENABLE_CONTEXT (1 << 0) -+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) -+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) -+#define VM_CONTEXT1_CNTL 0x1414 -+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C -+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C -+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C -+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518 -+#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 -+#define REQUEST_TYPE(x) (((x) & 0xf) << 0) -+#define RESPONSE_TYPE_MASK 0x000000F0 -+#define RESPONSE_TYPE_SHIFT 4 -+#define VM_L2_CNTL 0x1400 -+#define ENABLE_L2_CACHE (1 << 0) -+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) -+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9) -+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14) -+#define VM_L2_CNTL2 0x1404 -+#define INVALIDATE_ALL_L1_TLBS (1 << 0) -+#define INVALIDATE_L2_CACHE (1 << 1) -+#define VM_L2_CNTL3 0x1408 -+#define BANK_SELECT(x) ((x) << 0) -+#define CACHE_UPDATE_MODE(x) ((x) << 6) -+#define VM_L2_STATUS 0x140C -+#define L2_BUSY (1 << 0) -+ -+#define WAIT_UNTIL 0x8040 -+ -+#define SRBM_STATUS 0x0E50 -+#define SRBM_SOFT_RESET 0x0E60 -+#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6 -+#define SOFT_RESET_BIF (1 << 1) -+#define SOFT_RESET_CG (1 << 2) -+#define SOFT_RESET_DC (1 << 5) -+#define SOFT_RESET_GRBM (1 << 8) -+#define SOFT_RESET_HDP (1 << 9) -+#define SOFT_RESET_IH (1 << 10) -+#define SOFT_RESET_MC (1 << 11) -+#define SOFT_RESET_RLC (1 << 13) -+#define SOFT_RESET_ROM (1 << 14) -+#define SOFT_RESET_SEM (1 << 15) -+#define SOFT_RESET_VMC (1 << 17) -+#define SOFT_RESET_TST (1 << 21) -+#define SOFT_RESET_REGBB (1 << 22) -+#define SOFT_RESET_ORB (1 << 23) -+ -+#define IH_RB_CNTL 0x3e00 -+# define IH_RB_ENABLE (1 << 0) -+# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ -+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6) -+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8) -+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ -+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16) -+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31) -+#define IH_RB_BASE 0x3e04 -+#define IH_RB_RPTR 0x3e08 -+#define IH_RB_WPTR 0x3e0c -+# define RB_OVERFLOW (1 << 0) -+# define WPTR_OFFSET_MASK 0x3fffc -+#define IH_RB_WPTR_ADDR_HI 0x3e10 -+#define IH_RB_WPTR_ADDR_LO 0x3e14 -+#define IH_CNTL 0x3e18 -+# define ENABLE_INTR (1 << 0) -+# define IH_MC_SWAP(x) ((x) << 2) -+# define IH_MC_SWAP_NONE 0 -+# define IH_MC_SWAP_16BIT 1 -+# define IH_MC_SWAP_32BIT 2 -+# define IH_MC_SWAP_64BIT 3 -+# define RPTR_REARM (1 << 4) -+# define MC_WRREQ_CREDIT(x) ((x) << 15) -+# define MC_WR_CLEAN_CNT(x) ((x) << 20) -+ -+#define CP_INT_CNTL 0xc124 -+# define CNTX_BUSY_INT_ENABLE (1 << 19) -+# define CNTX_EMPTY_INT_ENABLE (1 << 20) -+# define SCRATCH_INT_ENABLE (1 << 25) -+# define TIME_STAMP_INT_ENABLE (1 << 26) -+# define IB2_INT_ENABLE (1 << 29) -+# define IB1_INT_ENABLE (1 << 30) -+# define RB_INT_ENABLE (1 << 31) -+#define CP_INT_STATUS 0xc128 -+# define SCRATCH_INT_STAT (1 << 25) -+# define TIME_STAMP_INT_STAT (1 << 26) -+# define IB2_INT_STAT (1 << 29) -+# define IB1_INT_STAT (1 << 30) -+# define RB_INT_STAT (1 << 31) -+ -+#define GRBM_INT_CNTL 0x8060 -+# define RDERR_INT_ENABLE (1 << 0) -+# define GUI_IDLE_INT_ENABLE (1 << 19) -+ -+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */ -+#define CRTC_STATUS_FRAME_COUNT 0x6e98 -+ -+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */ -+#define VLINE_STATUS 0x6bb8 -+# define VLINE_OCCURRED (1 << 0) -+# define VLINE_ACK (1 << 4) -+# define VLINE_STAT (1 << 12) -+# define VLINE_INTERRUPT (1 << 16) -+# define VLINE_INTERRUPT_TYPE (1 << 17) -+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */ -+#define VBLANK_STATUS 0x6bbc -+# define VBLANK_OCCURRED (1 << 0) -+# define VBLANK_ACK (1 << 4) -+# define VBLANK_STAT (1 << 12) -+# define VBLANK_INTERRUPT (1 << 16) -+# define VBLANK_INTERRUPT_TYPE (1 << 17) -+ -+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */ -+#define INT_MASK 0x6b40 -+# define VBLANK_INT_MASK (1 << 0) -+# define VLINE_INT_MASK (1 << 4) -+ -+#define DISP_INTERRUPT_STATUS 0x60f4 -+# define LB_D1_VLINE_INTERRUPT (1 << 2) -+# define LB_D1_VBLANK_INTERRUPT (1 << 3) -+# define DC_HPD1_INTERRUPT (1 << 17) -+# define DC_HPD1_RX_INTERRUPT (1 << 18) -+# define DACA_AUTODETECT_INTERRUPT (1 << 22) -+# define DACB_AUTODETECT_INTERRUPT (1 << 23) -+# define DC_I2C_SW_DONE_INTERRUPT (1 << 24) -+# define DC_I2C_HW_DONE_INTERRUPT (1 << 25) -+#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8 -+# define LB_D2_VLINE_INTERRUPT (1 << 2) -+# define LB_D2_VBLANK_INTERRUPT (1 << 3) -+# define DC_HPD2_INTERRUPT (1 << 17) -+# define DC_HPD2_RX_INTERRUPT (1 << 18) -+# define DISP_TIMER_INTERRUPT (1 << 24) -+#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc -+# define LB_D3_VLINE_INTERRUPT (1 << 2) -+# define LB_D3_VBLANK_INTERRUPT (1 << 3) -+# define DC_HPD3_INTERRUPT (1 << 17) -+# define DC_HPD3_RX_INTERRUPT (1 << 18) -+#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100 -+# define LB_D4_VLINE_INTERRUPT (1 << 2) -+# define LB_D4_VBLANK_INTERRUPT (1 << 3) -+# define DC_HPD4_INTERRUPT (1 << 17) -+# define DC_HPD4_RX_INTERRUPT (1 << 18) -+#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c -+# define LB_D5_VLINE_INTERRUPT (1 << 2) -+# define LB_D5_VBLANK_INTERRUPT (1 << 3) -+# define DC_HPD5_INTERRUPT (1 << 17) -+# define DC_HPD5_RX_INTERRUPT (1 << 18) -+#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050 -+# define LB_D6_VLINE_INTERRUPT (1 << 2) -+# define LB_D6_VBLANK_INTERRUPT (1 << 3) -+# define DC_HPD6_INTERRUPT (1 << 17) -+# define DC_HPD6_RX_INTERRUPT (1 << 18) -+ -+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ -+#define GRPH_INT_STATUS 0x6858 -+# define GRPH_PFLIP_INT_OCCURRED (1 << 0) -+# define GRPH_PFLIP_INT_CLEAR (1 << 8) -+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ -+#define GRPH_INT_CONTROL 0x685c -+# define GRPH_PFLIP_INT_MASK (1 << 0) -+# define GRPH_PFLIP_INT_TYPE (1 << 8) -+ -+#define DACA_AUTODETECT_INT_CONTROL 0x66c8 -+#define DACB_AUTODETECT_INT_CONTROL 0x67c8 -+ -+#define DC_HPD1_INT_STATUS 0x601c -+#define DC_HPD2_INT_STATUS 0x6028 -+#define DC_HPD3_INT_STATUS 0x6034 -+#define DC_HPD4_INT_STATUS 0x6040 -+#define DC_HPD5_INT_STATUS 0x604c -+#define DC_HPD6_INT_STATUS 0x6058 -+# define DC_HPDx_INT_STATUS (1 << 0) -+# define DC_HPDx_SENSE (1 << 1) -+# define DC_HPDx_RX_INT_STATUS (1 << 8) -+ -+#define DC_HPD1_INT_CONTROL 0x6020 -+#define DC_HPD2_INT_CONTROL 0x602c -+#define DC_HPD3_INT_CONTROL 0x6038 -+#define DC_HPD4_INT_CONTROL 0x6044 -+#define DC_HPD5_INT_CONTROL 0x6050 -+#define DC_HPD6_INT_CONTROL 0x605c -+# define DC_HPDx_INT_ACK (1 << 0) -+# define DC_HPDx_INT_POLARITY (1 << 8) -+# define DC_HPDx_INT_EN (1 << 16) -+# define DC_HPDx_RX_INT_ACK (1 << 20) -+# define DC_HPDx_RX_INT_EN (1 << 24) -+ -+#define DC_HPD1_CONTROL 0x6024 -+#define DC_HPD2_CONTROL 0x6030 -+#define DC_HPD3_CONTROL 0x603c -+#define DC_HPD4_CONTROL 0x6048 -+#define DC_HPD5_CONTROL 0x6054 -+#define DC_HPD6_CONTROL 0x6060 -+# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0) -+# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) -+# define DC_HPDx_EN (1 << 28) -+ -+/* -+ * PM4 -+ */ -+#define PACKET_TYPE0 0 -+#define PACKET_TYPE1 1 -+#define PACKET_TYPE2 2 -+#define PACKET_TYPE3 3 -+ -+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) -+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) -+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2) -+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) -+#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ -+ (((reg) >> 2) & 0xFFFF) | \ -+ ((n) & 0x3FFF) << 16) -+#define CP_PACKET2 0x80000000 -+#define PACKET2_PAD_SHIFT 0 -+#define PACKET2_PAD_MASK (0x3fffffff << 0) -+ -+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) -+ -+#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ -+ (((op) & 0xFF) << 8) | \ -+ ((n) & 0x3FFF) << 16) -+ -+/* Packet 3 types */ -+#define PACKET3_NOP 0x10 -+#define PACKET3_SET_BASE 0x11 -+#define PACKET3_CLEAR_STATE 0x12 -+#define PACKET3_INDIRECT_BUFFER_SIZE 0x13 -+#define PACKET3_DISPATCH_DIRECT 0x15 -+#define PACKET3_DISPATCH_INDIRECT 0x16 -+#define PACKET3_INDIRECT_BUFFER_END 0x17 -+#define PACKET3_SET_PREDICATION 0x20 -+#define PACKET3_REG_RMW 0x21 -+#define PACKET3_COND_EXEC 0x22 -+#define PACKET3_PRED_EXEC 0x23 -+#define PACKET3_DRAW_INDIRECT 0x24 -+#define PACKET3_DRAW_INDEX_INDIRECT 0x25 -+#define PACKET3_INDEX_BASE 0x26 -+#define PACKET3_DRAW_INDEX_2 0x27 -+#define PACKET3_CONTEXT_CONTROL 0x28 -+#define PACKET3_DRAW_INDEX_OFFSET 0x29 -+#define PACKET3_INDEX_TYPE 0x2A -+#define PACKET3_DRAW_INDEX 0x2B -+#define PACKET3_DRAW_INDEX_AUTO 0x2D -+#define PACKET3_DRAW_INDEX_IMMD 0x2E -+#define PACKET3_NUM_INSTANCES 0x2F -+#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30 -+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 -+#define PACKET3_DRAW_INDEX_OFFSET_2 0x35 -+#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 -+#define PACKET3_MEM_SEMAPHORE 0x39 -+#define PACKET3_MPEG_INDEX 0x3A -+#define PACKET3_WAIT_REG_MEM 0x3C -+#define PACKET3_MEM_WRITE 0x3D -+#define PACKET3_INDIRECT_BUFFER 0x32 -+#define PACKET3_SURFACE_SYNC 0x43 -+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) -+# define PACKET3_CB1_DEST_BASE_ENA (1 << 7) -+# define PACKET3_CB2_DEST_BASE_ENA (1 << 8) -+# define PACKET3_CB3_DEST_BASE_ENA (1 << 9) -+# define PACKET3_CB4_DEST_BASE_ENA (1 << 10) -+# define PACKET3_CB5_DEST_BASE_ENA (1 << 11) -+# define PACKET3_CB6_DEST_BASE_ENA (1 << 12) -+# define PACKET3_CB7_DEST_BASE_ENA (1 << 13) -+# define PACKET3_DB_DEST_BASE_ENA (1 << 14) -+# define PACKET3_CB8_DEST_BASE_ENA (1 << 15) -+# define PACKET3_CB9_DEST_BASE_ENA (1 << 16) -+# define PACKET3_CB10_DEST_BASE_ENA (1 << 17) -+# define PACKET3_CB11_DEST_BASE_ENA (1 << 17) -+# define PACKET3_FULL_CACHE_ENA (1 << 20) -+# define PACKET3_TC_ACTION_ENA (1 << 23) -+# define PACKET3_VC_ACTION_ENA (1 << 24) -+# define PACKET3_CB_ACTION_ENA (1 << 25) -+# define PACKET3_DB_ACTION_ENA (1 << 26) -+# define PACKET3_SH_ACTION_ENA (1 << 27) -+# define PACKET3_SMX_ACTION_ENA (1 << 28) -+#define PACKET3_ME_INITIALIZE 0x44 -+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) -+#define PACKET3_COND_WRITE 0x45 -+#define PACKET3_EVENT_WRITE 0x46 -+#define PACKET3_EVENT_WRITE_EOP 0x47 -+#define PACKET3_EVENT_WRITE_EOS 0x48 -+#define PACKET3_PREAMBLE_CNTL 0x4A -+#define PACKET3_RB_OFFSET 0x4B -+#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C -+#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D -+#define PACKET3_ALU_PS_CONST_UPDATE 0x4E -+#define PACKET3_ALU_VS_CONST_UPDATE 0x4F -+#define PACKET3_ONE_REG_WRITE 0x57 -+#define PACKET3_SET_CONFIG_REG 0x68 -+#define PACKET3_SET_CONFIG_REG_START 0x00008000 -+#define PACKET3_SET_CONFIG_REG_END 0x0000ac00 -+#define PACKET3_SET_CONTEXT_REG 0x69 -+#define PACKET3_SET_CONTEXT_REG_START 0x00028000 -+#define PACKET3_SET_CONTEXT_REG_END 0x00029000 -+#define PACKET3_SET_ALU_CONST 0x6A -+/* alu const buffers only; no reg file */ -+#define PACKET3_SET_BOOL_CONST 0x6B -+#define PACKET3_SET_BOOL_CONST_START 0x0003a500 -+#define PACKET3_SET_BOOL_CONST_END 0x0003a518 -+#define PACKET3_SET_LOOP_CONST 0x6C -+#define PACKET3_SET_LOOP_CONST_START 0x0003a200 -+#define PACKET3_SET_LOOP_CONST_END 0x0003a500 -+#define PACKET3_SET_RESOURCE 0x6D -+#define PACKET3_SET_RESOURCE_START 0x00030000 -+#define PACKET3_SET_RESOURCE_END 0x00038000 -+#define PACKET3_SET_SAMPLER 0x6E -+#define PACKET3_SET_SAMPLER_START 0x0003c000 -+#define PACKET3_SET_SAMPLER_END 0x0003c600 -+#define PACKET3_SET_CTL_CONST 0x6F -+#define PACKET3_SET_CTL_CONST_START 0x0003cff0 -+#define PACKET3_SET_CTL_CONST_END 0x0003ff0c -+#define PACKET3_SET_RESOURCE_OFFSET 0x70 -+#define PACKET3_SET_ALU_CONST_VS 0x71 -+#define PACKET3_SET_ALU_CONST_DI 0x72 -+#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 -+#define PACKET3_SET_RESOURCE_INDIRECT 0x74 -+#define PACKET3_SET_APPEND_CNT 0x75 -+ -+#define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c -+#define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30) -+#define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3) -+#define SQ_TEX_VTX_INVALID_TEXTURE 0x0 -+#define SQ_TEX_VTX_INVALID_BUFFER 0x1 -+#define SQ_TEX_VTX_VALID_TEXTURE 0x2 -+#define SQ_TEX_VTX_VALID_BUFFER 0x3 -+ -+#define SQ_CONST_MEM_BASE 0x8df8 -+ -+#define SQ_ESGS_RING_SIZE 0x8c44 -+#define SQ_GSVS_RING_SIZE 0x8c4c -+#define SQ_ESTMP_RING_SIZE 0x8c54 -+#define SQ_GSTMP_RING_SIZE 0x8c5c -+#define SQ_VSTMP_RING_SIZE 0x8c64 -+#define SQ_PSTMP_RING_SIZE 0x8c6c -+#define SQ_LSTMP_RING_SIZE 0x8e14 -+#define SQ_HSTMP_RING_SIZE 0x8e1c -+#define VGT_TF_RING_SIZE 0x8988 -+ -+#define SQ_ESGS_RING_ITEMSIZE 0x28900 -+#define SQ_GSVS_RING_ITEMSIZE 0x28904 -+#define SQ_ESTMP_RING_ITEMSIZE 0x28908 -+#define SQ_GSTMP_RING_ITEMSIZE 0x2890c -+#define SQ_VSTMP_RING_ITEMSIZE 0x28910 -+#define SQ_PSTMP_RING_ITEMSIZE 0x28914 -+#define SQ_LSTMP_RING_ITEMSIZE 0x28830 -+#define SQ_HSTMP_RING_ITEMSIZE 0x28834 -+ -+#define SQ_GS_VERT_ITEMSIZE 0x2891c -+#define SQ_GS_VERT_ITEMSIZE_1 0x28920 -+#define SQ_GS_VERT_ITEMSIZE_2 0x28924 -+#define SQ_GS_VERT_ITEMSIZE_3 0x28928 -+#define SQ_GSVS_RING_OFFSET_1 0x2892c -+#define SQ_GSVS_RING_OFFSET_2 0x28930 -+#define SQ_GSVS_RING_OFFSET_3 0x28934 -+ -+#define SQ_ALU_CONST_CACHE_PS_0 0x28940 -+#define SQ_ALU_CONST_CACHE_PS_1 0x28944 -+#define SQ_ALU_CONST_CACHE_PS_2 0x28948 -+#define SQ_ALU_CONST_CACHE_PS_3 0x2894c -+#define SQ_ALU_CONST_CACHE_PS_4 0x28950 -+#define SQ_ALU_CONST_CACHE_PS_5 0x28954 -+#define SQ_ALU_CONST_CACHE_PS_6 0x28958 -+#define SQ_ALU_CONST_CACHE_PS_7 0x2895c -+#define SQ_ALU_CONST_CACHE_PS_8 0x28960 -+#define SQ_ALU_CONST_CACHE_PS_9 0x28964 -+#define SQ_ALU_CONST_CACHE_PS_10 0x28968 -+#define SQ_ALU_CONST_CACHE_PS_11 0x2896c -+#define SQ_ALU_CONST_CACHE_PS_12 0x28970 -+#define SQ_ALU_CONST_CACHE_PS_13 0x28974 -+#define SQ_ALU_CONST_CACHE_PS_14 0x28978 -+#define SQ_ALU_CONST_CACHE_PS_15 0x2897c -+#define SQ_ALU_CONST_CACHE_VS_0 0x28980 -+#define SQ_ALU_CONST_CACHE_VS_1 0x28984 -+#define SQ_ALU_CONST_CACHE_VS_2 0x28988 -+#define SQ_ALU_CONST_CACHE_VS_3 0x2898c -+#define SQ_ALU_CONST_CACHE_VS_4 0x28990 -+#define SQ_ALU_CONST_CACHE_VS_5 0x28994 -+#define SQ_ALU_CONST_CACHE_VS_6 0x28998 -+#define SQ_ALU_CONST_CACHE_VS_7 0x2899c -+#define SQ_ALU_CONST_CACHE_VS_8 0x289a0 -+#define SQ_ALU_CONST_CACHE_VS_9 0x289a4 -+#define SQ_ALU_CONST_CACHE_VS_10 0x289a8 -+#define SQ_ALU_CONST_CACHE_VS_11 0x289ac -+#define SQ_ALU_CONST_CACHE_VS_12 0x289b0 -+#define SQ_ALU_CONST_CACHE_VS_13 0x289b4 -+#define SQ_ALU_CONST_CACHE_VS_14 0x289b8 -+#define SQ_ALU_CONST_CACHE_VS_15 0x289bc -+#define SQ_ALU_CONST_CACHE_GS_0 0x289c0 -+#define SQ_ALU_CONST_CACHE_GS_1 0x289c4 -+#define SQ_ALU_CONST_CACHE_GS_2 0x289c8 -+#define SQ_ALU_CONST_CACHE_GS_3 0x289cc -+#define SQ_ALU_CONST_CACHE_GS_4 0x289d0 -+#define SQ_ALU_CONST_CACHE_GS_5 0x289d4 -+#define SQ_ALU_CONST_CACHE_GS_6 0x289d8 -+#define SQ_ALU_CONST_CACHE_GS_7 0x289dc -+#define SQ_ALU_CONST_CACHE_GS_8 0x289e0 -+#define SQ_ALU_CONST_CACHE_GS_9 0x289e4 -+#define SQ_ALU_CONST_CACHE_GS_10 0x289e8 -+#define SQ_ALU_CONST_CACHE_GS_11 0x289ec -+#define SQ_ALU_CONST_CACHE_GS_12 0x289f0 -+#define SQ_ALU_CONST_CACHE_GS_13 0x289f4 -+#define SQ_ALU_CONST_CACHE_GS_14 0x289f8 -+#define SQ_ALU_CONST_CACHE_GS_15 0x289fc -+#define SQ_ALU_CONST_CACHE_HS_0 0x28f00 -+#define SQ_ALU_CONST_CACHE_HS_1 0x28f04 -+#define SQ_ALU_CONST_CACHE_HS_2 0x28f08 -+#define SQ_ALU_CONST_CACHE_HS_3 0x28f0c -+#define SQ_ALU_CONST_CACHE_HS_4 0x28f10 -+#define SQ_ALU_CONST_CACHE_HS_5 0x28f14 -+#define SQ_ALU_CONST_CACHE_HS_6 0x28f18 -+#define SQ_ALU_CONST_CACHE_HS_7 0x28f1c -+#define SQ_ALU_CONST_CACHE_HS_8 0x28f20 -+#define SQ_ALU_CONST_CACHE_HS_9 0x28f24 -+#define SQ_ALU_CONST_CACHE_HS_10 0x28f28 -+#define SQ_ALU_CONST_CACHE_HS_11 0x28f2c -+#define SQ_ALU_CONST_CACHE_HS_12 0x28f30 -+#define SQ_ALU_CONST_CACHE_HS_13 0x28f34 -+#define SQ_ALU_CONST_CACHE_HS_14 0x28f38 -+#define SQ_ALU_CONST_CACHE_HS_15 0x28f3c -+#define SQ_ALU_CONST_CACHE_LS_0 0x28f40 -+#define SQ_ALU_CONST_CACHE_LS_1 0x28f44 -+#define SQ_ALU_CONST_CACHE_LS_2 0x28f48 -+#define SQ_ALU_CONST_CACHE_LS_3 0x28f4c -+#define SQ_ALU_CONST_CACHE_LS_4 0x28f50 -+#define SQ_ALU_CONST_CACHE_LS_5 0x28f54 -+#define SQ_ALU_CONST_CACHE_LS_6 0x28f58 -+#define SQ_ALU_CONST_CACHE_LS_7 0x28f5c -+#define SQ_ALU_CONST_CACHE_LS_8 0x28f60 -+#define SQ_ALU_CONST_CACHE_LS_9 0x28f64 -+#define SQ_ALU_CONST_CACHE_LS_10 0x28f68 -+#define SQ_ALU_CONST_CACHE_LS_11 0x28f6c -+#define SQ_ALU_CONST_CACHE_LS_12 0x28f70 -+#define SQ_ALU_CONST_CACHE_LS_13 0x28f74 -+#define SQ_ALU_CONST_CACHE_LS_14 0x28f78 -+#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c -+ -+#define DB_DEPTH_CONTROL 0x28800 -+#define DB_DEPTH_VIEW 0x28008 -+#define DB_HTILE_DATA_BASE 0x28014 -+#define DB_Z_INFO 0x28040 -+# define Z_ARRAY_MODE(x) ((x) << 4) -+#define DB_STENCIL_INFO 0x28044 -+#define DB_Z_READ_BASE 0x28048 -+#define DB_STENCIL_READ_BASE 0x2804c -+#define DB_Z_WRITE_BASE 0x28050 -+#define DB_STENCIL_WRITE_BASE 0x28054 -+#define DB_DEPTH_SIZE 0x28058 -+ -+#define SQ_PGM_START_PS 0x28840 -+#define SQ_PGM_START_VS 0x2885c -+#define SQ_PGM_START_GS 0x28874 -+#define SQ_PGM_START_ES 0x2888c -+#define SQ_PGM_START_FS 0x288a4 -+#define SQ_PGM_START_HS 0x288b8 -+#define SQ_PGM_START_LS 0x288d0 -+ -+#define VGT_STRMOUT_CONFIG 0x28b94 -+#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98 -+ -+#define CB_TARGET_MASK 0x28238 -+#define CB_SHADER_MASK 0x2823c -+ -+#define GDS_ADDR_BASE 0x28720 -+ -+#define CB_IMMED0_BASE 0x28b9c -+#define CB_IMMED1_BASE 0x28ba0 -+#define CB_IMMED2_BASE 0x28ba4 -+#define CB_IMMED3_BASE 0x28ba8 -+#define CB_IMMED4_BASE 0x28bac -+#define CB_IMMED5_BASE 0x28bb0 -+#define CB_IMMED6_BASE 0x28bb4 -+#define CB_IMMED7_BASE 0x28bb8 -+#define CB_IMMED8_BASE 0x28bbc -+#define CB_IMMED9_BASE 0x28bc0 -+#define CB_IMMED10_BASE 0x28bc4 -+#define CB_IMMED11_BASE 0x28bc8 -+ -+/* all 12 CB blocks have these regs */ -+#define CB_COLOR0_BASE 0x28c60 -+#define CB_COLOR0_PITCH 0x28c64 -+#define CB_COLOR0_SLICE 0x28c68 -+#define CB_COLOR0_VIEW 0x28c6c -+#define CB_COLOR0_INFO 0x28c70 -+# define CB_ARRAY_MODE(x) ((x) << 8) -+# define ARRAY_LINEAR_GENERAL 0 -+# define ARRAY_LINEAR_ALIGNED 1 -+# define ARRAY_1D_TILED_THIN1 2 -+# define ARRAY_2D_TILED_THIN1 4 -+#define CB_COLOR0_ATTRIB 0x28c74 -+#define CB_COLOR0_DIM 0x28c78 -+/* only CB0-7 blocks have these regs */ -+#define CB_COLOR0_CMASK 0x28c7c -+#define CB_COLOR0_CMASK_SLICE 0x28c80 -+#define CB_COLOR0_FMASK 0x28c84 -+#define CB_COLOR0_FMASK_SLICE 0x28c88 -+#define CB_COLOR0_CLEAR_WORD0 0x28c8c -+#define CB_COLOR0_CLEAR_WORD1 0x28c90 -+#define CB_COLOR0_CLEAR_WORD2 0x28c94 -+#define CB_COLOR0_CLEAR_WORD3 0x28c98 -+ -+#define CB_COLOR1_BASE 0x28c9c -+#define CB_COLOR2_BASE 0x28cd8 -+#define CB_COLOR3_BASE 0x28d14 -+#define CB_COLOR4_BASE 0x28d50 -+#define CB_COLOR5_BASE 0x28d8c -+#define CB_COLOR6_BASE 0x28dc8 -+#define CB_COLOR7_BASE 0x28e04 -+#define CB_COLOR8_BASE 0x28e40 -+#define CB_COLOR9_BASE 0x28e5c -+#define CB_COLOR10_BASE 0x28e78 -+#define CB_COLOR11_BASE 0x28e94 -+ -+#define CB_COLOR1_PITCH 0x28ca0 -+#define CB_COLOR2_PITCH 0x28cdc -+#define CB_COLOR3_PITCH 0x28d18 -+#define CB_COLOR4_PITCH 0x28d54 -+#define CB_COLOR5_PITCH 0x28d90 -+#define CB_COLOR6_PITCH 0x28dcc -+#define CB_COLOR7_PITCH 0x28e08 -+#define CB_COLOR8_PITCH 0x28e44 -+#define CB_COLOR9_PITCH 0x28e60 -+#define CB_COLOR10_PITCH 0x28e7c -+#define CB_COLOR11_PITCH 0x28e98 -+ -+#define CB_COLOR1_SLICE 0x28ca4 -+#define CB_COLOR2_SLICE 0x28ce0 -+#define CB_COLOR3_SLICE 0x28d1c -+#define CB_COLOR4_SLICE 0x28d58 -+#define CB_COLOR5_SLICE 0x28d94 -+#define CB_COLOR6_SLICE 0x28dd0 -+#define CB_COLOR7_SLICE 0x28e0c -+#define CB_COLOR8_SLICE 0x28e48 -+#define CB_COLOR9_SLICE 0x28e64 -+#define CB_COLOR10_SLICE 0x28e80 -+#define CB_COLOR11_SLICE 0x28e9c -+ -+#define CB_COLOR1_VIEW 0x28ca8 -+#define CB_COLOR2_VIEW 0x28ce4 -+#define CB_COLOR3_VIEW 0x28d20 -+#define CB_COLOR4_VIEW 0x28d5c -+#define CB_COLOR5_VIEW 0x28d98 -+#define CB_COLOR6_VIEW 0x28dd4 -+#define CB_COLOR7_VIEW 0x28e10 -+#define CB_COLOR8_VIEW 0x28e4c -+#define CB_COLOR9_VIEW 0x28e68 -+#define CB_COLOR10_VIEW 0x28e84 -+#define CB_COLOR11_VIEW 0x28ea0 -+ -+#define CB_COLOR1_INFO 0x28cac -+#define CB_COLOR2_INFO 0x28ce8 -+#define CB_COLOR3_INFO 0x28d24 -+#define CB_COLOR4_INFO 0x28d60 -+#define CB_COLOR5_INFO 0x28d9c -+#define CB_COLOR6_INFO 0x28dd8 -+#define CB_COLOR7_INFO 0x28e14 -+#define CB_COLOR8_INFO 0x28e50 -+#define CB_COLOR9_INFO 0x28e6c -+#define CB_COLOR10_INFO 0x28e88 -+#define CB_COLOR11_INFO 0x28ea4 -+ -+#define CB_COLOR1_ATTRIB 0x28cb0 -+#define CB_COLOR2_ATTRIB 0x28cec -+#define CB_COLOR3_ATTRIB 0x28d28 -+#define CB_COLOR4_ATTRIB 0x28d64 -+#define CB_COLOR5_ATTRIB 0x28da0 -+#define CB_COLOR6_ATTRIB 0x28ddc -+#define CB_COLOR7_ATTRIB 0x28e18 -+#define CB_COLOR8_ATTRIB 0x28e54 -+#define CB_COLOR9_ATTRIB 0x28e70 -+#define CB_COLOR10_ATTRIB 0x28e8c -+#define CB_COLOR11_ATTRIB 0x28ea8 -+ -+#define CB_COLOR1_DIM 0x28cb4 -+#define CB_COLOR2_DIM 0x28cf0 -+#define CB_COLOR3_DIM 0x28d2c -+#define CB_COLOR4_DIM 0x28d68 -+#define CB_COLOR5_DIM 0x28da4 -+#define CB_COLOR6_DIM 0x28de0 -+#define CB_COLOR7_DIM 0x28e1c -+#define CB_COLOR8_DIM 0x28e58 -+#define CB_COLOR9_DIM 0x28e74 -+#define CB_COLOR10_DIM 0x28e90 -+#define CB_COLOR11_DIM 0x28eac -+ -+#define CB_COLOR1_CMASK 0x28cb8 -+#define CB_COLOR2_CMASK 0x28cf4 -+#define CB_COLOR3_CMASK 0x28d30 -+#define CB_COLOR4_CMASK 0x28d6c -+#define CB_COLOR5_CMASK 0x28da8 -+#define CB_COLOR6_CMASK 0x28de4 -+#define CB_COLOR7_CMASK 0x28e20 -+ -+#define CB_COLOR1_CMASK_SLICE 0x28cbc -+#define CB_COLOR2_CMASK_SLICE 0x28cf8 -+#define CB_COLOR3_CMASK_SLICE 0x28d34 -+#define CB_COLOR4_CMASK_SLICE 0x28d70 -+#define CB_COLOR5_CMASK_SLICE 0x28dac -+#define CB_COLOR6_CMASK_SLICE 0x28de8 -+#define CB_COLOR7_CMASK_SLICE 0x28e24 -+ -+#define CB_COLOR1_FMASK 0x28cc0 -+#define CB_COLOR2_FMASK 0x28cfc -+#define CB_COLOR3_FMASK 0x28d38 -+#define CB_COLOR4_FMASK 0x28d74 -+#define CB_COLOR5_FMASK 0x28db0 -+#define CB_COLOR6_FMASK 0x28dec -+#define CB_COLOR7_FMASK 0x28e28 -+ -+#define CB_COLOR1_FMASK_SLICE 0x28cc4 -+#define CB_COLOR2_FMASK_SLICE 0x28d00 -+#define CB_COLOR3_FMASK_SLICE 0x28d3c -+#define CB_COLOR4_FMASK_SLICE 0x28d78 -+#define CB_COLOR5_FMASK_SLICE 0x28db4 -+#define CB_COLOR6_FMASK_SLICE 0x28df0 -+#define CB_COLOR7_FMASK_SLICE 0x28e2c -+ -+#define CB_COLOR1_CLEAR_WORD0 0x28cc8 -+#define CB_COLOR2_CLEAR_WORD0 0x28d04 -+#define CB_COLOR3_CLEAR_WORD0 0x28d40 -+#define CB_COLOR4_CLEAR_WORD0 0x28d7c -+#define CB_COLOR5_CLEAR_WORD0 0x28db8 -+#define CB_COLOR6_CLEAR_WORD0 0x28df4 -+#define CB_COLOR7_CLEAR_WORD0 0x28e30 -+ -+#define CB_COLOR1_CLEAR_WORD1 0x28ccc -+#define CB_COLOR2_CLEAR_WORD1 0x28d08 -+#define CB_COLOR3_CLEAR_WORD1 0x28d44 -+#define CB_COLOR4_CLEAR_WORD1 0x28d80 -+#define CB_COLOR5_CLEAR_WORD1 0x28dbc -+#define CB_COLOR6_CLEAR_WORD1 0x28df8 -+#define CB_COLOR7_CLEAR_WORD1 0x28e34 -+ -+#define CB_COLOR1_CLEAR_WORD2 0x28cd0 -+#define CB_COLOR2_CLEAR_WORD2 0x28d0c -+#define CB_COLOR3_CLEAR_WORD2 0x28d48 -+#define CB_COLOR4_CLEAR_WORD2 0x28d84 -+#define CB_COLOR5_CLEAR_WORD2 0x28dc0 -+#define CB_COLOR6_CLEAR_WORD2 0x28dfc -+#define CB_COLOR7_CLEAR_WORD2 0x28e38 -+ -+#define CB_COLOR1_CLEAR_WORD3 0x28cd4 -+#define CB_COLOR2_CLEAR_WORD3 0x28d10 -+#define CB_COLOR3_CLEAR_WORD3 0x28d4c -+#define CB_COLOR4_CLEAR_WORD3 0x28d88 -+#define CB_COLOR5_CLEAR_WORD3 0x28dc4 -+#define CB_COLOR6_CLEAR_WORD3 0x28e00 -+#define CB_COLOR7_CLEAR_WORD3 0x28e3c -+ -+#define SQ_TEX_RESOURCE_WORD0_0 0x30000 -+#define SQ_TEX_RESOURCE_WORD1_0 0x30004 -+# define TEX_ARRAY_MODE(x) ((x) << 28) -+#define SQ_TEX_RESOURCE_WORD2_0 0x30008 -+#define SQ_TEX_RESOURCE_WORD3_0 0x3000C -+#define SQ_TEX_RESOURCE_WORD4_0 0x30010 -+#define SQ_TEX_RESOURCE_WORD5_0 0x30014 -+#define SQ_TEX_RESOURCE_WORD6_0 0x30018 -+#define SQ_TEX_RESOURCE_WORD7_0 0x3001c -+ -+ -+#endif -diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c -index cf60c0b..cf89aa2 100644 ---- a/drivers/gpu/drm/radeon/r100.c -+++ b/drivers/gpu/drm/radeon/r100.c -@@ -37,6 +37,7 @@ - #include "rs100d.h" - #include "rv200d.h" - #include "rv250d.h" -+#include "atom.h" - - #include - #include -@@ -67,6 +68,274 @@ MODULE_FIRMWARE(FIRMWARE_R520); - * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 - */ - -+void r100_pm_get_dynpm_state(struct radeon_device *rdev) -+{ -+ int i; -+ rdev->pm.dynpm_can_upclock = true; -+ rdev->pm.dynpm_can_downclock = true; -+ -+ switch (rdev->pm.dynpm_planned_action) { -+ case DYNPM_ACTION_MINIMUM: -+ rdev->pm.requested_power_state_index = 0; -+ rdev->pm.dynpm_can_downclock = false; -+ break; -+ case DYNPM_ACTION_DOWNCLOCK: -+ if (rdev->pm.current_power_state_index == 0) { -+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; -+ rdev->pm.dynpm_can_downclock = false; -+ } else { -+ if (rdev->pm.active_crtc_count > 1) { -+ for (i = 0; i < rdev->pm.num_power_states; i++) { -+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) -+ continue; -+ else if (i >= rdev->pm.current_power_state_index) { -+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; -+ break; -+ } else { -+ rdev->pm.requested_power_state_index = i; -+ break; -+ } -+ } -+ } else -+ rdev->pm.requested_power_state_index = -+ rdev->pm.current_power_state_index - 1; -+ } -+ /* don't use the power state if crtcs are active and no display flag is set */ -+ if ((rdev->pm.active_crtc_count > 0) && -+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags & -+ RADEON_PM_MODE_NO_DISPLAY)) { -+ rdev->pm.requested_power_state_index++; -+ } -+ break; -+ case DYNPM_ACTION_UPCLOCK: -+ if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { -+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; -+ rdev->pm.dynpm_can_upclock = false; -+ } else { -+ if (rdev->pm.active_crtc_count > 1) { -+ for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { -+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) -+ continue; -+ else if (i <= rdev->pm.current_power_state_index) { -+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; -+ break; -+ } else { -+ rdev->pm.requested_power_state_index = i; -+ break; -+ } -+ } -+ } else -+ rdev->pm.requested_power_state_index = -+ rdev->pm.current_power_state_index + 1; -+ } -+ break; -+ case DYNPM_ACTION_DEFAULT: -+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; -+ rdev->pm.dynpm_can_upclock = false; -+ break; -+ case DYNPM_ACTION_NONE: -+ default: -+ DRM_ERROR("Requested mode for not defined action\n"); -+ return; -+ } -+ /* only one clock mode per power state */ -+ rdev->pm.requested_clock_mode_index = 0; -+ -+ DRM_DEBUG("Requested: e: %d m: %d p: %d\n", -+ rdev->pm.power_state[rdev->pm.requested_power_state_index]. -+ clock_info[rdev->pm.requested_clock_mode_index].sclk, -+ rdev->pm.power_state[rdev->pm.requested_power_state_index]. -+ clock_info[rdev->pm.requested_clock_mode_index].mclk, -+ rdev->pm.power_state[rdev->pm.requested_power_state_index]. -+ pcie_lanes); -+} -+ -+void r100_pm_init_profile(struct radeon_device *rdev) -+{ -+ /* default */ -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; -+ /* low sh */ -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; -+ /* mid sh */ -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; -+ /* high sh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; -+ /* low mh */ -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; -+ /* mid mh */ -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; -+ /* high mh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; -+} -+ -+void r100_pm_misc(struct radeon_device *rdev) -+{ -+ int requested_index = rdev->pm.requested_power_state_index; -+ struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; -+ struct radeon_voltage *voltage = &ps->clock_info[0].voltage; -+ u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl; -+ -+ if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { -+ if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { -+ tmp = RREG32(voltage->gpio.reg); -+ if (voltage->active_high) -+ tmp |= voltage->gpio.mask; -+ else -+ tmp &= ~(voltage->gpio.mask); -+ WREG32(voltage->gpio.reg, tmp); -+ if (voltage->delay) -+ udelay(voltage->delay); -+ } else { -+ tmp = RREG32(voltage->gpio.reg); -+ if (voltage->active_high) -+ tmp &= ~voltage->gpio.mask; -+ else -+ tmp |= voltage->gpio.mask; -+ WREG32(voltage->gpio.reg, tmp); -+ if (voltage->delay) -+ udelay(voltage->delay); -+ } -+ } -+ -+ sclk_cntl = RREG32_PLL(SCLK_CNTL); -+ sclk_cntl2 = RREG32_PLL(SCLK_CNTL2); -+ sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3); -+ sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL); -+ sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3); -+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { -+ sclk_more_cntl |= REDUCED_SPEED_SCLK_EN; -+ if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE) -+ sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE; -+ else -+ sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE; -+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) -+ sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0); -+ else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) -+ sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2); -+ } else -+ sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN; -+ -+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { -+ sclk_more_cntl |= IO_CG_VOLTAGE_DROP; -+ if (voltage->delay) { -+ sclk_more_cntl |= VOLTAGE_DROP_SYNC; -+ switch (voltage->delay) { -+ case 33: -+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(0); -+ break; -+ case 66: -+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(1); -+ break; -+ case 99: -+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(2); -+ break; -+ case 132: -+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(3); -+ break; -+ } -+ } else -+ sclk_more_cntl &= ~VOLTAGE_DROP_SYNC; -+ } else -+ sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP; -+ -+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) -+ sclk_cntl &= ~FORCE_HDP; -+ else -+ sclk_cntl |= FORCE_HDP; -+ -+ WREG32_PLL(SCLK_CNTL, sclk_cntl); -+ WREG32_PLL(SCLK_CNTL2, sclk_cntl2); -+ WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl); -+ -+ /* set pcie lanes */ -+ if ((rdev->flags & RADEON_IS_PCIE) && -+ !(rdev->flags & RADEON_IS_IGP) && -+ rdev->asic->set_pcie_lanes && -+ (ps->pcie_lanes != -+ rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { -+ radeon_set_pcie_lanes(rdev, -+ ps->pcie_lanes); -+ DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); -+ } -+} -+ -+void r100_pm_prepare(struct radeon_device *rdev) -+{ -+ struct drm_device *ddev = rdev->ddev; -+ struct drm_crtc *crtc; -+ struct radeon_crtc *radeon_crtc; -+ u32 tmp; -+ -+ /* disable any active CRTCs */ -+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { -+ radeon_crtc = to_radeon_crtc(crtc); -+ if (radeon_crtc->enabled) { -+ if (radeon_crtc->crtc_id) { -+ tmp = RREG32(RADEON_CRTC2_GEN_CNTL); -+ tmp |= RADEON_CRTC2_DISP_REQ_EN_B; -+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp); -+ } else { -+ tmp = RREG32(RADEON_CRTC_GEN_CNTL); -+ tmp |= RADEON_CRTC_DISP_REQ_EN_B; -+ WREG32(RADEON_CRTC_GEN_CNTL, tmp); -+ } -+ } -+ } -+} -+ -+void r100_pm_finish(struct radeon_device *rdev) -+{ -+ struct drm_device *ddev = rdev->ddev; -+ struct drm_crtc *crtc; -+ struct radeon_crtc *radeon_crtc; -+ u32 tmp; -+ -+ /* enable any active CRTCs */ -+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { -+ radeon_crtc = to_radeon_crtc(crtc); -+ if (radeon_crtc->enabled) { -+ if (radeon_crtc->crtc_id) { -+ tmp = RREG32(RADEON_CRTC2_GEN_CNTL); -+ tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B; -+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp); -+ } else { -+ tmp = RREG32(RADEON_CRTC_GEN_CNTL); -+ tmp &= ~RADEON_CRTC_DISP_REQ_EN_B; -+ WREG32(RADEON_CRTC_GEN_CNTL, tmp); -+ } -+ } -+ } -+} -+ -+bool r100_gui_idle(struct radeon_device *rdev) -+{ -+ if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) -+ return false; -+ else -+ return true; -+} -+ - /* hpd for digital panel detect/disconnect */ - bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) - { -@@ -254,6 +523,9 @@ int r100_irq_set(struct radeon_device *rdev) - if (rdev->irq.sw_int) { - tmp |= RADEON_SW_INT_ENABLE; - } -+ if (rdev->irq.gui_idle) { -+ tmp |= RADEON_GUI_IDLE_MASK; -+ } - if (rdev->irq.crtc_vblank_int[0]) { - tmp |= RADEON_CRTC_VBLANK_MASK; - } -@@ -288,6 +560,12 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev) - RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | - RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; - -+ /* the interrupt works, but the status bit is permanently asserted */ -+ if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { -+ if (!rdev->irq.gui_idle_acked) -+ irq_mask |= RADEON_GUI_IDLE_STAT; -+ } -+ - if (irqs) { - WREG32(RADEON_GEN_INT_STATUS, irqs); - } -@@ -299,6 +577,9 @@ int r100_irq_process(struct radeon_device *rdev) - uint32_t status, msi_rearm; - bool queue_hotplug = false; - -+ /* reset gui idle ack. the status bit is broken */ -+ rdev->irq.gui_idle_acked = false; -+ - status = r100_irq_ack(rdev); - if (!status) { - return IRQ_NONE; -@@ -311,6 +592,12 @@ int r100_irq_process(struct radeon_device *rdev) - if (status & RADEON_SW_INT_TEST) { - radeon_fence_process(rdev); - } -+ /* gui idle interrupt */ -+ if (status & RADEON_GUI_IDLE_STAT) { -+ rdev->irq.gui_idle_acked = true; -+ rdev->pm.gui_idle = true; -+ wake_up(&rdev->irq.idle_queue); -+ } - /* Vertical blank interrupts */ - if (status & RADEON_CRTC_VBLANK_STAT) { - drm_handle_vblank(rdev->ddev, 0); -@@ -332,6 +619,8 @@ int r100_irq_process(struct radeon_device *rdev) - } - status = r100_irq_ack(rdev); - } -+ /* reset gui idle ack. the status bit is broken */ -+ rdev->irq.gui_idle_acked = false; - if (queue_hotplug) - queue_work(rdev->wq, &rdev->hotplug_work); - if (rdev->msi_enabled) { -@@ -663,26 +952,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) - if (r100_debugfs_cp_init(rdev)) { - DRM_ERROR("Failed to register debugfs file for CP !\n"); - } -- /* Reset CP */ -- tmp = RREG32(RADEON_CP_CSQ_STAT); -- if ((tmp & (1 << 31))) { -- DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp); -- WREG32(RADEON_CP_CSQ_MODE, 0); -- WREG32(RADEON_CP_CSQ_CNTL, 0); -- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); -- tmp = RREG32(RADEON_RBBM_SOFT_RESET); -- mdelay(2); -- WREG32(RADEON_RBBM_SOFT_RESET, 0); -- tmp = RREG32(RADEON_RBBM_SOFT_RESET); -- mdelay(2); -- tmp = RREG32(RADEON_CP_CSQ_STAT); -- if ((tmp & (1 << 31))) { -- DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp); -- } -- } else { -- DRM_INFO("radeon: cp idle (0x%08X)\n", tmp); -- } -- - if (!rdev->me_fw) { - r = r100_cp_init_microcode(rdev); - if (r) { -@@ -787,39 +1056,6 @@ void r100_cp_disable(struct radeon_device *rdev) - } - } - --int r100_cp_reset(struct radeon_device *rdev) --{ -- uint32_t tmp; -- bool reinit_cp; -- int i; -- -- reinit_cp = rdev->cp.ready; -- rdev->cp.ready = false; -- WREG32(RADEON_CP_CSQ_MODE, 0); -- WREG32(RADEON_CP_CSQ_CNTL, 0); -- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); -- (void)RREG32(RADEON_RBBM_SOFT_RESET); -- udelay(200); -- WREG32(RADEON_RBBM_SOFT_RESET, 0); -- /* Wait to prevent race in RBBM_STATUS */ -- mdelay(1); -- for (i = 0; i < rdev->usec_timeout; i++) { -- tmp = RREG32(RADEON_RBBM_STATUS); -- if (!(tmp & (1 << 16))) { -- DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n", -- tmp); -- if (reinit_cp) { -- return r100_cp_init(rdev, rdev->cp.ring_size); -- } -- return 0; -- } -- DRM_UDELAY(1); -- } -- tmp = RREG32(RADEON_RBBM_STATUS); -- DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp); -- return -1; --} -- - void r100_cp_commit(struct radeon_device *rdev) - { - WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); -@@ -1733,76 +1969,163 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev) - return -1; - } - --void r100_gpu_init(struct radeon_device *rdev) -+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp) - { -- /* TODO: anythings to do here ? pipes ? */ -- r100_hdp_reset(rdev); -+ lockup->last_cp_rptr = cp->rptr; -+ lockup->last_jiffies = jiffies; -+} -+ -+/** -+ * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information -+ * @rdev: radeon device structure -+ * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations -+ * @cp: radeon_cp structure holding CP information -+ * -+ * We don't need to initialize the lockup tracking information as we will either -+ * have CP rptr to a different value of jiffies wrap around which will force -+ * initialization of the lockup tracking informations. -+ * -+ * A possible false positivie is if we get call after while and last_cp_rptr == -+ * the current CP rptr, even if it's unlikely it might happen. To avoid this -+ * if the elapsed time since last call is bigger than 2 second than we return -+ * false and update the tracking information. Due to this the caller must call -+ * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported -+ * the fencing code should be cautious about that. -+ * -+ * Caller should write to the ring to force CP to do something so we don't get -+ * false positive when CP is just gived nothing to do. -+ * -+ **/ -+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp) -+{ -+ unsigned long cjiffies, elapsed; -+ -+ cjiffies = jiffies; -+ if (!time_after(cjiffies, lockup->last_jiffies)) { -+ /* likely a wrap around */ -+ lockup->last_cp_rptr = cp->rptr; -+ lockup->last_jiffies = jiffies; -+ return false; -+ } -+ if (cp->rptr != lockup->last_cp_rptr) { -+ /* CP is still working no lockup */ -+ lockup->last_cp_rptr = cp->rptr; -+ lockup->last_jiffies = jiffies; -+ return false; -+ } -+ elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies); -+ if (elapsed >= 3000) { -+ /* very likely the improbable case where current -+ * rptr is equal to last recorded, a while ago, rptr -+ * this is more likely a false positive update tracking -+ * information which should force us to be recall at -+ * latter point -+ */ -+ lockup->last_cp_rptr = cp->rptr; -+ lockup->last_jiffies = jiffies; -+ return false; -+ } -+ if (elapsed >= 1000) { -+ dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); -+ return true; -+ } -+ /* give a chance to the GPU ... */ -+ return false; - } - --void r100_hdp_reset(struct radeon_device *rdev) -+bool r100_gpu_is_lockup(struct radeon_device *rdev) - { -- uint32_t tmp; -+ u32 rbbm_status; -+ int r; - -- tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; -- tmp |= (7 << 28); -- WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); -- (void)RREG32(RADEON_HOST_PATH_CNTL); -- udelay(200); -- WREG32(RADEON_RBBM_SOFT_RESET, 0); -- WREG32(RADEON_HOST_PATH_CNTL, tmp); -- (void)RREG32(RADEON_HOST_PATH_CNTL); -+ rbbm_status = RREG32(R_000E40_RBBM_STATUS); -+ if (!G_000E40_GUI_ACTIVE(rbbm_status)) { -+ r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp); -+ return false; -+ } -+ /* force CP activities */ -+ r = radeon_ring_lock(rdev, 2); -+ if (!r) { -+ /* PACKET2 NOP */ -+ radeon_ring_write(rdev, 0x80000000); -+ radeon_ring_write(rdev, 0x80000000); -+ radeon_ring_unlock_commit(rdev); -+ } -+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); -+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp); - } - --int r100_rb2d_reset(struct radeon_device *rdev) -+void r100_bm_disable(struct radeon_device *rdev) - { -- uint32_t tmp; -- int i; -+ u32 tmp; - -- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2); -- (void)RREG32(RADEON_RBBM_SOFT_RESET); -- udelay(200); -- WREG32(RADEON_RBBM_SOFT_RESET, 0); -- /* Wait to prevent race in RBBM_STATUS */ -+ /* disable bus mastering */ -+ tmp = RREG32(R_000030_BUS_CNTL); -+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); -+ mdelay(1); -+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); -+ mdelay(1); -+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); -+ tmp = RREG32(RADEON_BUS_CNTL); -+ mdelay(1); -+ pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); -+ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); - mdelay(1); -- for (i = 0; i < rdev->usec_timeout; i++) { -- tmp = RREG32(RADEON_RBBM_STATUS); -- if (!(tmp & (1 << 26))) { -- DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n", -- tmp); -- return 0; -- } -- DRM_UDELAY(1); -- } -- tmp = RREG32(RADEON_RBBM_STATUS); -- DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp); -- return -1; - } - --int r100_gpu_reset(struct radeon_device *rdev) -+int r100_asic_reset(struct radeon_device *rdev) - { -- uint32_t status; -+ struct r100_mc_save save; -+ u32 status, tmp; - -- /* reset order likely matter */ -- status = RREG32(RADEON_RBBM_STATUS); -- /* reset HDP */ -- r100_hdp_reset(rdev); -- /* reset rb2d */ -- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { -- r100_rb2d_reset(rdev); -+ r100_mc_stop(rdev, &save); -+ status = RREG32(R_000E40_RBBM_STATUS); -+ if (!G_000E40_GUI_ACTIVE(status)) { -+ return 0; - } -- /* TODO: reset 3D engine */ -+ status = RREG32(R_000E40_RBBM_STATUS); -+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); -+ /* stop CP */ -+ WREG32(RADEON_CP_CSQ_CNTL, 0); -+ tmp = RREG32(RADEON_CP_RB_CNTL); -+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); -+ WREG32(RADEON_CP_RB_RPTR_WR, 0); -+ WREG32(RADEON_CP_RB_WPTR, 0); -+ WREG32(RADEON_CP_RB_CNTL, tmp); -+ /* save PCI state */ -+ pci_save_state(rdev->pdev); -+ /* disable bus mastering */ -+ r100_bm_disable(rdev); -+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | -+ S_0000F0_SOFT_RESET_RE(1) | -+ S_0000F0_SOFT_RESET_PP(1) | -+ S_0000F0_SOFT_RESET_RB(1)); -+ RREG32(R_0000F0_RBBM_SOFT_RESET); -+ mdelay(500); -+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0); -+ mdelay(1); -+ status = RREG32(R_000E40_RBBM_STATUS); -+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); - /* reset CP */ -- status = RREG32(RADEON_RBBM_STATUS); -- if (status & (1 << 16)) { -- r100_cp_reset(rdev); -- } -+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); -+ RREG32(R_0000F0_RBBM_SOFT_RESET); -+ mdelay(500); -+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0); -+ mdelay(1); -+ status = RREG32(R_000E40_RBBM_STATUS); -+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); -+ /* restore PCI & busmastering */ -+ pci_restore_state(rdev->pdev); -+ r100_enable_bm(rdev); - /* Check if GPU is idle */ -- status = RREG32(RADEON_RBBM_STATUS); -- if (status & RADEON_RBBM_ACTIVE) { -- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); -+ if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || -+ G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { -+ dev_err(rdev->dev, "failed to reset GPU\n"); -+ rdev->gpu_lockup = true; - return -1; - } -- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); -+ r100_mc_resume(rdev, &save); -+ dev_info(rdev->dev, "GPU reset succeed\n"); - return 0; - } - -@@ -2002,11 +2325,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev) - else - rdev->mc.mc_vram_size = rdev->mc.real_vram_size; - } -- /* FIXME remove this once we support unmappable VRAM */ -- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { -- rdev->mc.mc_vram_size = rdev->mc.aper_size; -- rdev->mc.real_vram_size = rdev->mc.aper_size; -- } - } - - void r100_vga_set_state(struct radeon_device *rdev, bool state) -@@ -2335,53 +2653,53 @@ void r100_bandwidth_update(struct radeon_device *rdev) - fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; - uint32_t temp, data, mem_trcd, mem_trp, mem_tras; - fixed20_12 memtcas_ff[8] = { -- fixed_init(1), -- fixed_init(2), -- fixed_init(3), -- fixed_init(0), -- fixed_init_half(1), -- fixed_init_half(2), -- fixed_init(0), -+ dfixed_init(1), -+ dfixed_init(2), -+ dfixed_init(3), -+ dfixed_init(0), -+ dfixed_init_half(1), -+ dfixed_init_half(2), -+ dfixed_init(0), - }; - fixed20_12 memtcas_rs480_ff[8] = { -- fixed_init(0), -- fixed_init(1), -- fixed_init(2), -- fixed_init(3), -- fixed_init(0), -- fixed_init_half(1), -- fixed_init_half(2), -- fixed_init_half(3), -+ dfixed_init(0), -+ dfixed_init(1), -+ dfixed_init(2), -+ dfixed_init(3), -+ dfixed_init(0), -+ dfixed_init_half(1), -+ dfixed_init_half(2), -+ dfixed_init_half(3), - }; - fixed20_12 memtcas2_ff[8] = { -- fixed_init(0), -- fixed_init(1), -- fixed_init(2), -- fixed_init(3), -- fixed_init(4), -- fixed_init(5), -- fixed_init(6), -- fixed_init(7), -+ dfixed_init(0), -+ dfixed_init(1), -+ dfixed_init(2), -+ dfixed_init(3), -+ dfixed_init(4), -+ dfixed_init(5), -+ dfixed_init(6), -+ dfixed_init(7), - }; - fixed20_12 memtrbs[8] = { -- fixed_init(1), -- fixed_init_half(1), -- fixed_init(2), -- fixed_init_half(2), -- fixed_init(3), -- fixed_init_half(3), -- fixed_init(4), -- fixed_init_half(4) -+ dfixed_init(1), -+ dfixed_init_half(1), -+ dfixed_init(2), -+ dfixed_init_half(2), -+ dfixed_init(3), -+ dfixed_init_half(3), -+ dfixed_init(4), -+ dfixed_init_half(4) - }; - fixed20_12 memtrbs_r4xx[8] = { -- fixed_init(4), -- fixed_init(5), -- fixed_init(6), -- fixed_init(7), -- fixed_init(8), -- fixed_init(9), -- fixed_init(10), -- fixed_init(11) -+ dfixed_init(4), -+ dfixed_init(5), -+ dfixed_init(6), -+ dfixed_init(7), -+ dfixed_init(8), -+ dfixed_init(9), -+ dfixed_init(10), -+ dfixed_init(11) - }; - fixed20_12 min_mem_eff; - fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; -@@ -2412,7 +2730,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) - } - } - -- min_mem_eff.full = rfixed_const_8(0); -+ min_mem_eff.full = dfixed_const_8(0); - /* get modes */ - if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { - uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); -@@ -2433,28 +2751,28 @@ void r100_bandwidth_update(struct radeon_device *rdev) - mclk_ff = rdev->pm.mclk; - - temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); -- temp_ff.full = rfixed_const(temp); -- mem_bw.full = rfixed_mul(mclk_ff, temp_ff); -+ temp_ff.full = dfixed_const(temp); -+ mem_bw.full = dfixed_mul(mclk_ff, temp_ff); - - pix_clk.full = 0; - pix_clk2.full = 0; - peak_disp_bw.full = 0; - if (mode1) { -- temp_ff.full = rfixed_const(1000); -- pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ -- pix_clk.full = rfixed_div(pix_clk, temp_ff); -- temp_ff.full = rfixed_const(pixel_bytes1); -- peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); -+ temp_ff.full = dfixed_const(1000); -+ pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ -+ pix_clk.full = dfixed_div(pix_clk, temp_ff); -+ temp_ff.full = dfixed_const(pixel_bytes1); -+ peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); - } - if (mode2) { -- temp_ff.full = rfixed_const(1000); -- pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ -- pix_clk2.full = rfixed_div(pix_clk2, temp_ff); -- temp_ff.full = rfixed_const(pixel_bytes2); -- peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); -+ temp_ff.full = dfixed_const(1000); -+ pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ -+ pix_clk2.full = dfixed_div(pix_clk2, temp_ff); -+ temp_ff.full = dfixed_const(pixel_bytes2); -+ peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); - } - -- mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); -+ mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); - if (peak_disp_bw.full >= mem_bw.full) { - DRM_ERROR("You may not have enough display bandwidth for current mode\n" - "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); -@@ -2496,9 +2814,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) - mem_tras = ((temp >> 12) & 0xf) + 4; - } - /* convert to FF */ -- trcd_ff.full = rfixed_const(mem_trcd); -- trp_ff.full = rfixed_const(mem_trp); -- tras_ff.full = rfixed_const(mem_tras); -+ trcd_ff.full = dfixed_const(mem_trcd); -+ trp_ff.full = dfixed_const(mem_trp); -+ tras_ff.full = dfixed_const(mem_tras); - - /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ - temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); -@@ -2516,7 +2834,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) - /* extra cas latency stored in bits 23-25 0-4 clocks */ - data = (temp >> 23) & 0x7; - if (data < 5) -- tcas_ff.full += rfixed_const(data); -+ tcas_ff.full += dfixed_const(data); - } - - if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { -@@ -2553,72 +2871,72 @@ void r100_bandwidth_update(struct radeon_device *rdev) - - if (rdev->flags & RADEON_IS_AGP) { - fixed20_12 agpmode_ff; -- agpmode_ff.full = rfixed_const(radeon_agpmode); -- temp_ff.full = rfixed_const_666(16); -- sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); -+ agpmode_ff.full = dfixed_const(radeon_agpmode); -+ temp_ff.full = dfixed_const_666(16); -+ sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); - } - /* TODO PCIE lanes may affect this - agpmode == 16?? */ - - if (ASIC_IS_R300(rdev)) { -- sclk_delay_ff.full = rfixed_const(250); -+ sclk_delay_ff.full = dfixed_const(250); - } else { - if ((rdev->family == CHIP_RV100) || - rdev->flags & RADEON_IS_IGP) { - if (rdev->mc.vram_is_ddr) -- sclk_delay_ff.full = rfixed_const(41); -+ sclk_delay_ff.full = dfixed_const(41); - else -- sclk_delay_ff.full = rfixed_const(33); -+ sclk_delay_ff.full = dfixed_const(33); - } else { - if (rdev->mc.vram_width == 128) -- sclk_delay_ff.full = rfixed_const(57); -+ sclk_delay_ff.full = dfixed_const(57); - else -- sclk_delay_ff.full = rfixed_const(41); -+ sclk_delay_ff.full = dfixed_const(41); - } - } - -- mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); -+ mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); - - if (rdev->mc.vram_is_ddr) { - if (rdev->mc.vram_width == 32) { -- k1.full = rfixed_const(40); -+ k1.full = dfixed_const(40); - c = 3; - } else { -- k1.full = rfixed_const(20); -+ k1.full = dfixed_const(20); - c = 1; - } - } else { -- k1.full = rfixed_const(40); -+ k1.full = dfixed_const(40); - c = 3; - } - -- temp_ff.full = rfixed_const(2); -- mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); -- temp_ff.full = rfixed_const(c); -- mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); -- temp_ff.full = rfixed_const(4); -- mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); -- mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); -+ temp_ff.full = dfixed_const(2); -+ mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); -+ temp_ff.full = dfixed_const(c); -+ mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); -+ temp_ff.full = dfixed_const(4); -+ mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); -+ mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); - mc_latency_mclk.full += k1.full; - -- mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); -- mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); -+ mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); -+ mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); - - /* - HW cursor time assuming worst case of full size colour cursor. - */ -- temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); -+ temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); - temp_ff.full += trcd_ff.full; - if (temp_ff.full < tras_ff.full) - temp_ff.full = tras_ff.full; -- cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); -+ cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); - -- temp_ff.full = rfixed_const(cur_size); -- cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); -+ temp_ff.full = dfixed_const(cur_size); -+ cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); - /* - Find the total latency for the display data. - */ -- disp_latency_overhead.full = rfixed_const(8); -- disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); -+ disp_latency_overhead.full = dfixed_const(8); -+ disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); - mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; - mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; - -@@ -2646,16 +2964,16 @@ void r100_bandwidth_update(struct radeon_device *rdev) - /* - Find the drain rate of the display buffer. - */ -- temp_ff.full = rfixed_const((16/pixel_bytes1)); -- disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); -+ temp_ff.full = dfixed_const((16/pixel_bytes1)); -+ disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); - - /* - Find the critical point of the display buffer. - */ -- crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); -- crit_point_ff.full += rfixed_const_half(0); -+ crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); -+ crit_point_ff.full += dfixed_const_half(0); - -- critical_point = rfixed_trunc(crit_point_ff); -+ critical_point = dfixed_trunc(crit_point_ff); - - if (rdev->disp_priority == 2) { - critical_point = 0; -@@ -2726,8 +3044,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) - /* - Find the drain rate of the display buffer. - */ -- temp_ff.full = rfixed_const((16/pixel_bytes2)); -- disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); -+ temp_ff.full = dfixed_const((16/pixel_bytes2)); -+ disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); - - grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); - grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); -@@ -2748,8 +3066,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) - critical_point2 = 0; - else { - temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; -- temp_ff.full = rfixed_const(temp); -- temp_ff.full = rfixed_mul(mclk_ff, temp_ff); -+ temp_ff.full = dfixed_const(temp); -+ temp_ff.full = dfixed_mul(mclk_ff, temp_ff); - if (sclk_ff.full < temp_ff.full) - temp_ff.full = sclk_ff.full; - -@@ -2757,15 +3075,15 @@ void r100_bandwidth_update(struct radeon_device *rdev) - - if (mode1) { - temp_ff.full = read_return_rate.full - disp_drain_rate.full; -- time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); -+ time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); - } else { - time_disp1_drop_priority.full = 0; - } - crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; -- crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); -- crit_point_ff.full += rfixed_const_half(0); -+ crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); -+ crit_point_ff.full += dfixed_const_half(0); - -- critical_point2 = rfixed_trunc(crit_point_ff); -+ critical_point2 = dfixed_trunc(crit_point_ff); - - if (rdev->disp_priority == 2) { - critical_point2 = 0; -@@ -3399,7 +3717,7 @@ static int r100_startup(struct radeon_device *rdev) - /* Resume clock */ - r100_clock_startup(rdev); - /* Initialize GPU configuration (# pipes, ...) */ -- r100_gpu_init(rdev); -+// r100_gpu_init(rdev); - /* Initialize GART (initialize after TTM so we can allocate - * memory through TTM but finalize after TTM) */ - r100_enable_bm(rdev); -@@ -3436,7 +3754,7 @@ int r100_resume(struct radeon_device *rdev) - /* Resume clock before doing reset */ - r100_clock_startup(rdev); - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); -@@ -3462,7 +3780,6 @@ int r100_suspend(struct radeon_device *rdev) - - void r100_fini(struct radeon_device *rdev) - { -- radeon_pm_fini(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); -@@ -3505,7 +3822,7 @@ int r100_init(struct radeon_device *rdev) - return r; - } - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, - "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), -@@ -3518,8 +3835,6 @@ int r100_init(struct radeon_device *rdev) - r100_errata(rdev); - /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); -- /* Initialize power management */ -- radeon_pm_init(rdev); - /* initialize AGP */ - if (rdev->flags & RADEON_IS_AGP) { - r = radeon_agp_init(rdev); -diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h -index df29a63..d016b16 100644 ---- a/drivers/gpu/drm/radeon/r100d.h -+++ b/drivers/gpu/drm/radeon/r100d.h -@@ -74,6 +74,134 @@ - #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) - - /* Registers */ -+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0 -+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0) -+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1) -+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE -+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1) -+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1) -+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD -+#define S_0000F0_SOFT_RESET_SE(x) (((x) & 0x1) << 2) -+#define G_0000F0_SOFT_RESET_SE(x) (((x) >> 2) & 0x1) -+#define C_0000F0_SOFT_RESET_SE 0xFFFFFFFB -+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3) -+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1) -+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7 -+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4) -+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1) -+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF -+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5) -+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1) -+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF -+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6) -+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1) -+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF -+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7) -+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1) -+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F -+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8) -+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1) -+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF -+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9) -+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1) -+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF -+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10) -+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1) -+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF -+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11) -+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1) -+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF -+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12) -+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1) -+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF -+#define R_000030_BUS_CNTL 0x000030 -+#define S_000030_BUS_DBL_RESYNC(x) (((x) & 0x1) << 0) -+#define G_000030_BUS_DBL_RESYNC(x) (((x) >> 0) & 0x1) -+#define C_000030_BUS_DBL_RESYNC 0xFFFFFFFE -+#define S_000030_BUS_MSTR_RESET(x) (((x) & 0x1) << 1) -+#define G_000030_BUS_MSTR_RESET(x) (((x) >> 1) & 0x1) -+#define C_000030_BUS_MSTR_RESET 0xFFFFFFFD -+#define S_000030_BUS_FLUSH_BUF(x) (((x) & 0x1) << 2) -+#define G_000030_BUS_FLUSH_BUF(x) (((x) >> 2) & 0x1) -+#define C_000030_BUS_FLUSH_BUF 0xFFFFFFFB -+#define S_000030_BUS_STOP_REQ_DIS(x) (((x) & 0x1) << 3) -+#define G_000030_BUS_STOP_REQ_DIS(x) (((x) >> 3) & 0x1) -+#define C_000030_BUS_STOP_REQ_DIS 0xFFFFFFF7 -+#define S_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 4) -+#define G_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) >> 4) & 0x1) -+#define C_000030_BUS_PM4_READ_COMBINE_EN 0xFFFFFFEF -+#define S_000030_BUS_WRT_COMBINE_EN(x) (((x) & 0x1) << 5) -+#define G_000030_BUS_WRT_COMBINE_EN(x) (((x) >> 5) & 0x1) -+#define C_000030_BUS_WRT_COMBINE_EN 0xFFFFFFDF -+#define S_000030_BUS_MASTER_DIS(x) (((x) & 0x1) << 6) -+#define G_000030_BUS_MASTER_DIS(x) (((x) >> 6) & 0x1) -+#define C_000030_BUS_MASTER_DIS 0xFFFFFFBF -+#define S_000030_BIOS_ROM_WRT_EN(x) (((x) & 0x1) << 7) -+#define G_000030_BIOS_ROM_WRT_EN(x) (((x) >> 7) & 0x1) -+#define C_000030_BIOS_ROM_WRT_EN 0xFFFFFF7F -+#define S_000030_BM_DAC_CRIPPLE(x) (((x) & 0x1) << 8) -+#define G_000030_BM_DAC_CRIPPLE(x) (((x) >> 8) & 0x1) -+#define C_000030_BM_DAC_CRIPPLE 0xFFFFFEFF -+#define S_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 9) -+#define G_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) >> 9) & 0x1) -+#define C_000030_BUS_NON_PM4_READ_COMBINE_EN 0xFFFFFDFF -+#define S_000030_BUS_XFERD_DISCARD_EN(x) (((x) & 0x1) << 10) -+#define G_000030_BUS_XFERD_DISCARD_EN(x) (((x) >> 10) & 0x1) -+#define C_000030_BUS_XFERD_DISCARD_EN 0xFFFFFBFF -+#define S_000030_BUS_SGL_READ_DISABLE(x) (((x) & 0x1) << 11) -+#define G_000030_BUS_SGL_READ_DISABLE(x) (((x) >> 11) & 0x1) -+#define C_000030_BUS_SGL_READ_DISABLE 0xFFFFF7FF -+#define S_000030_BIOS_DIS_ROM(x) (((x) & 0x1) << 12) -+#define G_000030_BIOS_DIS_ROM(x) (((x) >> 12) & 0x1) -+#define C_000030_BIOS_DIS_ROM 0xFFFFEFFF -+#define S_000030_BUS_PCI_READ_RETRY_EN(x) (((x) & 0x1) << 13) -+#define G_000030_BUS_PCI_READ_RETRY_EN(x) (((x) >> 13) & 0x1) -+#define C_000030_BUS_PCI_READ_RETRY_EN 0xFFFFDFFF -+#define S_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) & 0x1) << 14) -+#define G_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) >> 14) & 0x1) -+#define C_000030_BUS_AGP_AD_STEPPING_EN 0xFFFFBFFF -+#define S_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) & 0x1) << 15) -+#define G_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) >> 15) & 0x1) -+#define C_000030_BUS_PCI_WRT_RETRY_EN 0xFFFF7FFF -+#define S_000030_BUS_RETRY_WS(x) (((x) & 0xF) << 16) -+#define G_000030_BUS_RETRY_WS(x) (((x) >> 16) & 0xF) -+#define C_000030_BUS_RETRY_WS 0xFFF0FFFF -+#define S_000030_BUS_MSTR_RD_MULT(x) (((x) & 0x1) << 20) -+#define G_000030_BUS_MSTR_RD_MULT(x) (((x) >> 20) & 0x1) -+#define C_000030_BUS_MSTR_RD_MULT 0xFFEFFFFF -+#define S_000030_BUS_MSTR_RD_LINE(x) (((x) & 0x1) << 21) -+#define G_000030_BUS_MSTR_RD_LINE(x) (((x) >> 21) & 0x1) -+#define C_000030_BUS_MSTR_RD_LINE 0xFFDFFFFF -+#define S_000030_BUS_SUSPEND(x) (((x) & 0x1) << 22) -+#define G_000030_BUS_SUSPEND(x) (((x) >> 22) & 0x1) -+#define C_000030_BUS_SUSPEND 0xFFBFFFFF -+#define S_000030_LAT_16X(x) (((x) & 0x1) << 23) -+#define G_000030_LAT_16X(x) (((x) >> 23) & 0x1) -+#define C_000030_LAT_16X 0xFF7FFFFF -+#define S_000030_BUS_RD_DISCARD_EN(x) (((x) & 0x1) << 24) -+#define G_000030_BUS_RD_DISCARD_EN(x) (((x) >> 24) & 0x1) -+#define C_000030_BUS_RD_DISCARD_EN 0xFEFFFFFF -+#define S_000030_ENFRCWRDY(x) (((x) & 0x1) << 25) -+#define G_000030_ENFRCWRDY(x) (((x) >> 25) & 0x1) -+#define C_000030_ENFRCWRDY 0xFDFFFFFF -+#define S_000030_BUS_MSTR_WS(x) (((x) & 0x1) << 26) -+#define G_000030_BUS_MSTR_WS(x) (((x) >> 26) & 0x1) -+#define C_000030_BUS_MSTR_WS 0xFBFFFFFF -+#define S_000030_BUS_PARKING_DIS(x) (((x) & 0x1) << 27) -+#define G_000030_BUS_PARKING_DIS(x) (((x) >> 27) & 0x1) -+#define C_000030_BUS_PARKING_DIS 0xF7FFFFFF -+#define S_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) & 0x1) << 28) -+#define G_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) >> 28) & 0x1) -+#define C_000030_BUS_MSTR_DISCONNECT_EN 0xEFFFFFFF -+#define S_000030_SERR_EN(x) (((x) & 0x1) << 29) -+#define G_000030_SERR_EN(x) (((x) >> 29) & 0x1) -+#define C_000030_SERR_EN 0xDFFFFFFF -+#define S_000030_BUS_READ_BURST(x) (((x) & 0x1) << 30) -+#define G_000030_BUS_READ_BURST(x) (((x) >> 30) & 0x1) -+#define C_000030_BUS_READ_BURST 0xBFFFFFFF -+#define S_000030_BUS_RDY_READ_DLY(x) (((x) & 0x1) << 31) -+#define G_000030_BUS_RDY_READ_DLY(x) (((x) >> 31) & 0x1) -+#define C_000030_BUS_RDY_READ_DLY 0x7FFFFFFF - #define R_000040_GEN_INT_CNTL 0x000040 - #define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0) - #define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1) -@@ -710,5 +838,41 @@ - #define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) - #define C_00000D_FORCE_RB 0xEFFFFFFF - -+/* PLL regs */ -+#define SCLK_CNTL 0xd -+#define FORCE_HDP (1 << 17) -+#define CLK_PWRMGT_CNTL 0x14 -+#define GLOBAL_PMAN_EN (1 << 10) -+#define DISP_PM (1 << 20) -+#define PLL_PWRMGT_CNTL 0x15 -+#define MPLL_TURNOFF (1 << 0) -+#define SPLL_TURNOFF (1 << 1) -+#define PPLL_TURNOFF (1 << 2) -+#define P2PLL_TURNOFF (1 << 3) -+#define TVPLL_TURNOFF (1 << 4) -+#define MOBILE_SU (1 << 16) -+#define SU_SCLK_USE_BCLK (1 << 17) -+#define SCLK_CNTL2 0x1e -+#define REDUCED_SPEED_SCLK_MODE (1 << 16) -+#define REDUCED_SPEED_SCLK_SEL(x) ((x) << 17) -+#define MCLK_MISC 0x1f -+#define EN_MCLK_TRISTATE_IN_SUSPEND (1 << 18) -+#define SCLK_MORE_CNTL 0x35 -+#define REDUCED_SPEED_SCLK_EN (1 << 16) -+#define IO_CG_VOLTAGE_DROP (1 << 17) -+#define VOLTAGE_DELAY_SEL(x) ((x) << 20) -+#define VOLTAGE_DROP_SYNC (1 << 19) -+ -+/* mmreg */ -+#define DISP_PWR_MAN 0xd08 -+#define DISP_D3_GRPH_RST (1 << 18) -+#define DISP_D3_SUBPIC_RST (1 << 19) -+#define DISP_D3_OV0_RST (1 << 20) -+#define DISP_D1D2_GRPH_RST (1 << 21) -+#define DISP_D1D2_SUBPIC_RST (1 << 22) -+#define DISP_D1D2_OV0_RST (1 << 23) -+#define DISP_DVO_ENABLE_RST (1 << 24) -+#define TV_ENABLE_RST (1 << 25) -+#define AUTO_PWRUP_EN (1 << 26) - - #endif -diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c -index a5ff807..b2f9efe 100644 ---- a/drivers/gpu/drm/radeon/r300.c -+++ b/drivers/gpu/drm/radeon/r300.c -@@ -27,8 +27,9 @@ - */ - #include - #include --#include "drmP.h" --#include "drm.h" -+#include -+#include -+#include - #include "radeon_reg.h" - #include "radeon.h" - #include "radeon_asic.h" -@@ -151,6 +152,10 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev) - u32 tmp; - int r; - -+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); -+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); -+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); -+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); - tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); - tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; - WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); -@@ -323,7 +328,6 @@ void r300_gpu_init(struct radeon_device *rdev) - { - uint32_t gb_tile_config, tmp; - -- r100_hdp_reset(rdev); - if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || - (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) { - /* r300,r350 */ -@@ -375,89 +379,85 @@ void r300_gpu_init(struct radeon_device *rdev) - rdev->num_gb_pipes, rdev->num_z_pipes); - } - --int r300_ga_reset(struct radeon_device *rdev) -+bool r300_gpu_is_lockup(struct radeon_device *rdev) - { -- uint32_t tmp; -- bool reinit_cp; -- int i; -+ u32 rbbm_status; -+ int r; - -- reinit_cp = rdev->cp.ready; -- rdev->cp.ready = false; -- for (i = 0; i < rdev->usec_timeout; i++) { -- WREG32(RADEON_CP_CSQ_MODE, 0); -- WREG32(RADEON_CP_CSQ_CNTL, 0); -- WREG32(RADEON_RBBM_SOFT_RESET, 0x32005); -- (void)RREG32(RADEON_RBBM_SOFT_RESET); -- udelay(200); -- WREG32(RADEON_RBBM_SOFT_RESET, 0); -- /* Wait to prevent race in RBBM_STATUS */ -- mdelay(1); -- tmp = RREG32(RADEON_RBBM_STATUS); -- if (tmp & ((1 << 20) | (1 << 26))) { -- DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp); -- /* GA still busy soft reset it */ -- WREG32(0x429C, 0x200); -- WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); -- WREG32(R300_RE_SCISSORS_TL, 0); -- WREG32(R300_RE_SCISSORS_BR, 0); -- WREG32(0x24AC, 0); -- } -- /* Wait to prevent race in RBBM_STATUS */ -- mdelay(1); -- tmp = RREG32(RADEON_RBBM_STATUS); -- if (!(tmp & ((1 << 20) | (1 << 26)))) { -- break; -- } -+ rbbm_status = RREG32(R_000E40_RBBM_STATUS); -+ if (!G_000E40_GUI_ACTIVE(rbbm_status)) { -+ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp); -+ return false; - } -- for (i = 0; i < rdev->usec_timeout; i++) { -- tmp = RREG32(RADEON_RBBM_STATUS); -- if (!(tmp & ((1 << 20) | (1 << 26)))) { -- DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", -- tmp); -- if (reinit_cp) { -- return r100_cp_init(rdev, rdev->cp.ring_size); -- } -- return 0; -- } -- DRM_UDELAY(1); -+ /* force CP activities */ -+ r = radeon_ring_lock(rdev, 2); -+ if (!r) { -+ /* PACKET2 NOP */ -+ radeon_ring_write(rdev, 0x80000000); -+ radeon_ring_write(rdev, 0x80000000); -+ radeon_ring_unlock_commit(rdev); - } -- tmp = RREG32(RADEON_RBBM_STATUS); -- DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); -- return -1; -+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); -+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp); - } - --int r300_gpu_reset(struct radeon_device *rdev) -+int r300_asic_reset(struct radeon_device *rdev) - { -- uint32_t status; -- -- /* reset order likely matter */ -- status = RREG32(RADEON_RBBM_STATUS); -- /* reset HDP */ -- r100_hdp_reset(rdev); -- /* reset rb2d */ -- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { -- r100_rb2d_reset(rdev); -- } -- /* reset GA */ -- if (status & ((1 << 20) | (1 << 26))) { -- r300_ga_reset(rdev); -- } -- /* reset CP */ -- status = RREG32(RADEON_RBBM_STATUS); -- if (status & (1 << 16)) { -- r100_cp_reset(rdev); -+ struct r100_mc_save save; -+ u32 status, tmp; -+ -+ r100_mc_stop(rdev, &save); -+ status = RREG32(R_000E40_RBBM_STATUS); -+ if (!G_000E40_GUI_ACTIVE(status)) { -+ return 0; - } -+ status = RREG32(R_000E40_RBBM_STATUS); -+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); -+ /* stop CP */ -+ WREG32(RADEON_CP_CSQ_CNTL, 0); -+ tmp = RREG32(RADEON_CP_RB_CNTL); -+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); -+ WREG32(RADEON_CP_RB_RPTR_WR, 0); -+ WREG32(RADEON_CP_RB_WPTR, 0); -+ WREG32(RADEON_CP_RB_CNTL, tmp); -+ /* save PCI state */ -+ pci_save_state(rdev->pdev); -+ /* disable bus mastering */ -+ r100_bm_disable(rdev); -+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | -+ S_0000F0_SOFT_RESET_GA(1)); -+ RREG32(R_0000F0_RBBM_SOFT_RESET); -+ mdelay(500); -+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0); -+ mdelay(1); -+ status = RREG32(R_000E40_RBBM_STATUS); -+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); -+ /* resetting the CP seems to be problematic sometimes it end up -+ * hard locking the computer, but it's necessary for successfull -+ * reset more test & playing is needed on R3XX/R4XX to find a -+ * reliable (if any solution) -+ */ -+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); -+ RREG32(R_0000F0_RBBM_SOFT_RESET); -+ mdelay(500); -+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0); -+ mdelay(1); -+ status = RREG32(R_000E40_RBBM_STATUS); -+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); -+ /* restore PCI & busmastering */ -+ pci_restore_state(rdev->pdev); -+ r100_enable_bm(rdev); - /* Check if GPU is idle */ -- status = RREG32(RADEON_RBBM_STATUS); -- if (status & RADEON_RBBM_ACTIVE) { -- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); -+ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { -+ dev_err(rdev->dev, "failed to reset GPU\n"); -+ rdev->gpu_lockup = true; - return -1; - } -- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); -+ r100_mc_resume(rdev, &save); -+ dev_info(rdev->dev, "GPU reset succeed\n"); - return 0; - } - -- - /* - * r300,r350,rv350,rv380 VRAM info - */ -@@ -1316,7 +1316,7 @@ int r300_resume(struct radeon_device *rdev) - /* Resume clock before doing reset */ - r300_clock_startup(rdev); - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); -@@ -1344,7 +1344,6 @@ int r300_suspend(struct radeon_device *rdev) - - void r300_fini(struct radeon_device *rdev) - { -- radeon_pm_fini(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); -@@ -1387,7 +1386,7 @@ int r300_init(struct radeon_device *rdev) - return r; - } - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, - "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), -@@ -1400,8 +1399,6 @@ int r300_init(struct radeon_device *rdev) - r300_errata(rdev); - /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); -- /* Initialize power management */ -- radeon_pm_init(rdev); - /* initialize AGP */ - if (rdev->flags & RADEON_IS_AGP) { - r = radeon_agp_init(rdev); -diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h -index 4c73114..968a333 100644 ---- a/drivers/gpu/drm/radeon/r300d.h -+++ b/drivers/gpu/drm/radeon/r300d.h -@@ -209,7 +209,52 @@ - #define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) - #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) - #define C_000E40_GUI_ACTIVE 0x7FFFFFFF -- -+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0 -+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0) -+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1) -+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE -+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1) -+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1) -+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD -+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2) -+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1) -+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB -+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3) -+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1) -+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7 -+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4) -+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1) -+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF -+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5) -+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1) -+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF -+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6) -+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1) -+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF -+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7) -+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1) -+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F -+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8) -+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1) -+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF -+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9) -+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1) -+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF -+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10) -+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1) -+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF -+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11) -+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1) -+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF -+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12) -+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1) -+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF -+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13) -+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1) -+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF -+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14) -+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1) -+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF - - #define R_00000D_SCLK_CNTL 0x00000D - #define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) -diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c -index c2bda4a..e6c8914 100644 ---- a/drivers/gpu/drm/radeon/r420.c -+++ b/drivers/gpu/drm/radeon/r420.c -@@ -36,6 +36,45 @@ - #include "r420d.h" - #include "r420_reg_safe.h" - -+void r420_pm_init_profile(struct radeon_device *rdev) -+{ -+ /* default */ -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; -+ /* low sh */ -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; -+ /* mid sh */ -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; -+ /* high sh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; -+ /* low mh */ -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; -+ /* mid mh */ -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; -+ /* high mh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; -+} -+ - static void r420_set_reg_safe(struct radeon_device *rdev) - { - rdev->config.r300.reg_safe_bm = r420_reg_safe_bm; -@@ -241,7 +280,7 @@ int r420_resume(struct radeon_device *rdev) - /* Resume clock before doing reset */ - r420_clock_resume(rdev); - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); -@@ -274,7 +313,6 @@ int r420_suspend(struct radeon_device *rdev) - - void r420_fini(struct radeon_device *rdev) - { -- radeon_pm_fini(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); -@@ -322,7 +360,7 @@ int r420_init(struct radeon_device *rdev) - } - } - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, - "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), -@@ -334,8 +372,6 @@ int r420_init(struct radeon_device *rdev) - - /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); -- /* Initialize power management */ -- radeon_pm_init(rdev); - /* initialize AGP */ - if (rdev->flags & RADEON_IS_AGP) { - r = radeon_agp_init(rdev); -diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h -index 0cf2ad2..93c9a2b 100644 ---- a/drivers/gpu/drm/radeon/r500_reg.h -+++ b/drivers/gpu/drm/radeon/r500_reg.h -@@ -347,9 +347,11 @@ - - #define AVIVO_D1CRTC_CONTROL 0x6080 - # define AVIVO_CRTC_EN (1 << 0) -+# define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) - #define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 - #define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 - #define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c -+#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0 - #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 - #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 - -@@ -488,6 +490,7 @@ - #define AVIVO_D2CRTC_BLANK_CONTROL 0x6884 - #define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888 - #define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c -+#define AVIVO_D2CRTC_STATUS_POSITION 0x68a0 - #define AVIVO_D2CRTC_FRAME_COUNT 0x68a4 - #define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4 - -diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c -index 3c44b8d..34330df 100644 ---- a/drivers/gpu/drm/radeon/r520.c -+++ b/drivers/gpu/drm/radeon/r520.c -@@ -53,7 +53,6 @@ static void r520_gpu_init(struct radeon_device *rdev) - { - unsigned pipe_select_current, gb_pipe_select, tmp; - -- r100_hdp_reset(rdev); - rv515_vga_render_disable(rdev); - /* - * DST_PIPE_CONFIG 0x170C -@@ -209,7 +208,7 @@ int r520_resume(struct radeon_device *rdev) - /* Resume clock before doing reset */ - rv515_clock_startup(rdev); - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); -@@ -246,7 +245,7 @@ int r520_init(struct radeon_device *rdev) - return -EINVAL; - } - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, - "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), -@@ -262,8 +261,6 @@ int r520_init(struct radeon_device *rdev) - } - /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); -- /* Initialize power management */ -- radeon_pm_init(rdev); - /* initialize AGP */ - if (rdev->flags & RADEON_IS_AGP) { - r = radeon_agp_init(rdev); -diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c -index 8f3454e..0e91871 100644 ---- a/drivers/gpu/drm/radeon/r600.c -+++ b/drivers/gpu/drm/radeon/r600.c -@@ -44,6 +44,9 @@ - #define R700_PFP_UCODE_SIZE 848 - #define R700_PM4_UCODE_SIZE 1360 - #define R700_RLC_UCODE_SIZE 1024 -+#define EVERGREEN_PFP_UCODE_SIZE 1120 -+#define EVERGREEN_PM4_UCODE_SIZE 1376 -+#define EVERGREEN_RLC_UCODE_SIZE 768 - - /* Firmware Names */ - MODULE_FIRMWARE("radeon/R600_pfp.bin"); -@@ -68,6 +71,18 @@ MODULE_FIRMWARE("radeon/RV710_pfp.bin"); - MODULE_FIRMWARE("radeon/RV710_me.bin"); - MODULE_FIRMWARE("radeon/R600_rlc.bin"); - MODULE_FIRMWARE("radeon/R700_rlc.bin"); -+MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); -+MODULE_FIRMWARE("radeon/CEDAR_me.bin"); -+MODULE_FIRMWARE("radeon/CEDAR_rlc.bin"); -+MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); -+MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); -+MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin"); -+MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); -+MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); -+MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); -+MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); -+MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); -+MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); - - int r600_debugfs_mc_info_init(struct radeon_device *rdev); - -@@ -75,6 +90,494 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev); - int r600_mc_wait_for_idle(struct radeon_device *rdev); - void r600_gpu_init(struct radeon_device *rdev); - void r600_fini(struct radeon_device *rdev); -+void r600_irq_disable(struct radeon_device *rdev); -+ -+void r600_pm_get_dynpm_state(struct radeon_device *rdev) -+{ -+ int i; -+ -+ rdev->pm.dynpm_can_upclock = true; -+ rdev->pm.dynpm_can_downclock = true; -+ -+ /* power state array is low to high, default is first */ -+ if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) { -+ int min_power_state_index = 0; -+ -+ if (rdev->pm.num_power_states > 2) -+ min_power_state_index = 1; -+ -+ switch (rdev->pm.dynpm_planned_action) { -+ case DYNPM_ACTION_MINIMUM: -+ rdev->pm.requested_power_state_index = min_power_state_index; -+ rdev->pm.requested_clock_mode_index = 0; -+ rdev->pm.dynpm_can_downclock = false; -+ break; -+ case DYNPM_ACTION_DOWNCLOCK: -+ if (rdev->pm.current_power_state_index == min_power_state_index) { -+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; -+ rdev->pm.dynpm_can_downclock = false; -+ } else { -+ if (rdev->pm.active_crtc_count > 1) { -+ for (i = 0; i < rdev->pm.num_power_states; i++) { -+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) -+ continue; -+ else if (i >= rdev->pm.current_power_state_index) { -+ rdev->pm.requested_power_state_index = -+ rdev->pm.current_power_state_index; -+ break; -+ } else { -+ rdev->pm.requested_power_state_index = i; -+ break; -+ } -+ } -+ } else -+ rdev->pm.requested_power_state_index = -+ rdev->pm.current_power_state_index - 1; -+ } -+ rdev->pm.requested_clock_mode_index = 0; -+ /* don't use the power state if crtcs are active and no display flag is set */ -+ if ((rdev->pm.active_crtc_count > 0) && -+ (rdev->pm.power_state[rdev->pm.requested_power_state_index]. -+ clock_info[rdev->pm.requested_clock_mode_index].flags & -+ RADEON_PM_MODE_NO_DISPLAY)) { -+ rdev->pm.requested_power_state_index++; -+ } -+ break; -+ case DYNPM_ACTION_UPCLOCK: -+ if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { -+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; -+ rdev->pm.dynpm_can_upclock = false; -+ } else { -+ if (rdev->pm.active_crtc_count > 1) { -+ for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { -+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) -+ continue; -+ else if (i <= rdev->pm.current_power_state_index) { -+ rdev->pm.requested_power_state_index = -+ rdev->pm.current_power_state_index; -+ break; -+ } else { -+ rdev->pm.requested_power_state_index = i; -+ break; -+ } -+ } -+ } else -+ rdev->pm.requested_power_state_index = -+ rdev->pm.current_power_state_index + 1; -+ } -+ rdev->pm.requested_clock_mode_index = 0; -+ break; -+ case DYNPM_ACTION_DEFAULT: -+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; -+ rdev->pm.requested_clock_mode_index = 0; -+ rdev->pm.dynpm_can_upclock = false; -+ break; -+ case DYNPM_ACTION_NONE: -+ default: -+ DRM_ERROR("Requested mode for not defined action\n"); -+ return; -+ } -+ } else { -+ /* XXX select a power state based on AC/DC, single/dualhead, etc. */ -+ /* for now just select the first power state and switch between clock modes */ -+ /* power state array is low to high, default is first (0) */ -+ if (rdev->pm.active_crtc_count > 1) { -+ rdev->pm.requested_power_state_index = -1; -+ /* start at 1 as we don't want the default mode */ -+ for (i = 1; i < rdev->pm.num_power_states; i++) { -+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) -+ continue; -+ else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) || -+ (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) { -+ rdev->pm.requested_power_state_index = i; -+ break; -+ } -+ } -+ /* if nothing selected, grab the default state. */ -+ if (rdev->pm.requested_power_state_index == -1) -+ rdev->pm.requested_power_state_index = 0; -+ } else -+ rdev->pm.requested_power_state_index = 1; -+ -+ switch (rdev->pm.dynpm_planned_action) { -+ case DYNPM_ACTION_MINIMUM: -+ rdev->pm.requested_clock_mode_index = 0; -+ rdev->pm.dynpm_can_downclock = false; -+ break; -+ case DYNPM_ACTION_DOWNCLOCK: -+ if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { -+ if (rdev->pm.current_clock_mode_index == 0) { -+ rdev->pm.requested_clock_mode_index = 0; -+ rdev->pm.dynpm_can_downclock = false; -+ } else -+ rdev->pm.requested_clock_mode_index = -+ rdev->pm.current_clock_mode_index - 1; -+ } else { -+ rdev->pm.requested_clock_mode_index = 0; -+ rdev->pm.dynpm_can_downclock = false; -+ } -+ /* don't use the power state if crtcs are active and no display flag is set */ -+ if ((rdev->pm.active_crtc_count > 0) && -+ (rdev->pm.power_state[rdev->pm.requested_power_state_index]. -+ clock_info[rdev->pm.requested_clock_mode_index].flags & -+ RADEON_PM_MODE_NO_DISPLAY)) { -+ rdev->pm.requested_clock_mode_index++; -+ } -+ break; -+ case DYNPM_ACTION_UPCLOCK: -+ if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { -+ if (rdev->pm.current_clock_mode_index == -+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) { -+ rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index; -+ rdev->pm.dynpm_can_upclock = false; -+ } else -+ rdev->pm.requested_clock_mode_index = -+ rdev->pm.current_clock_mode_index + 1; -+ } else { -+ rdev->pm.requested_clock_mode_index = -+ rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1; -+ rdev->pm.dynpm_can_upclock = false; -+ } -+ break; -+ case DYNPM_ACTION_DEFAULT: -+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; -+ rdev->pm.requested_clock_mode_index = 0; -+ rdev->pm.dynpm_can_upclock = false; -+ break; -+ case DYNPM_ACTION_NONE: -+ default: -+ DRM_ERROR("Requested mode for not defined action\n"); -+ return; -+ } -+ } -+ -+ DRM_DEBUG("Requested: e: %d m: %d p: %d\n", -+ rdev->pm.power_state[rdev->pm.requested_power_state_index]. -+ clock_info[rdev->pm.requested_clock_mode_index].sclk, -+ rdev->pm.power_state[rdev->pm.requested_power_state_index]. -+ clock_info[rdev->pm.requested_clock_mode_index].mclk, -+ rdev->pm.power_state[rdev->pm.requested_power_state_index]. -+ pcie_lanes); -+} -+ -+static int r600_pm_get_type_index(struct radeon_device *rdev, -+ enum radeon_pm_state_type ps_type, -+ int instance) -+{ -+ int i; -+ int found_instance = -1; -+ -+ for (i = 0; i < rdev->pm.num_power_states; i++) { -+ if (rdev->pm.power_state[i].type == ps_type) { -+ found_instance++; -+ if (found_instance == instance) -+ return i; -+ } -+ } -+ /* return default if no match */ -+ return rdev->pm.default_power_state_index; -+} -+ -+void rs780_pm_init_profile(struct radeon_device *rdev) -+{ -+ if (rdev->pm.num_power_states == 2) { -+ /* default */ -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; -+ /* low sh */ -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; -+ /* mid sh */ -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; -+ /* high sh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; -+ /* low mh */ -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; -+ /* mid mh */ -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; -+ /* high mh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; -+ } else if (rdev->pm.num_power_states == 3) { -+ /* default */ -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; -+ /* low sh */ -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; -+ /* mid sh */ -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; -+ /* high sh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; -+ /* low mh */ -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; -+ /* mid mh */ -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; -+ /* high mh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; -+ } else { -+ /* default */ -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; -+ /* low sh */ -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; -+ /* mid sh */ -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; -+ /* high sh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; -+ /* low mh */ -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; -+ /* mid mh */ -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; -+ /* high mh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; -+ } -+} -+ -+void r600_pm_init_profile(struct radeon_device *rdev) -+{ -+ if (rdev->family == CHIP_R600) { -+ /* XXX */ -+ /* default */ -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; -+ /* low sh */ -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; -+ /* mid sh */ -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; -+ /* high sh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; -+ /* low mh */ -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; -+ /* mid mh */ -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; -+ /* high mh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; -+ } else { -+ if (rdev->pm.num_power_states < 4) { -+ /* default */ -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; -+ /* low sh */ -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; -+ /* mid sh */ -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; -+ /* high sh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; -+ /* low mh */ -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; -+ /* low mh */ -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; -+ /* high mh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; -+ } else { -+ /* default */ -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; -+ /* low sh */ -+ if (rdev->flags & RADEON_IS_MOBILITY) { -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; -+ } else { -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; -+ } -+ /* mid sh */ -+ if (rdev->flags & RADEON_IS_MOBILITY) { -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; -+ } else { -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; -+ } -+ /* high sh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; -+ /* low mh */ -+ if (rdev->flags & RADEON_IS_MOBILITY) { -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; -+ } else { -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; -+ } -+ /* mid mh */ -+ if (rdev->flags & RADEON_IS_MOBILITY) { -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; -+ } else { -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; -+ } -+ /* high mh */ -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = -+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; -+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; -+ } -+ } -+} -+ -+void r600_pm_misc(struct radeon_device *rdev) -+{ -+ int req_ps_idx = rdev->pm.requested_power_state_index; -+ int req_cm_idx = rdev->pm.requested_clock_mode_index; -+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; -+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; -+ -+ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { -+ if (voltage->voltage != rdev->pm.current_vddc) { -+ radeon_atom_set_voltage(rdev, voltage->voltage); -+ rdev->pm.current_vddc = voltage->voltage; -+ DRM_DEBUG("Setting: v: %d\n", voltage->voltage); -+ } -+ } -+} -+ -+bool r600_gui_idle(struct radeon_device *rdev) -+{ -+ if (RREG32(GRBM_STATUS) & GUI_ACTIVE) -+ return false; -+ else -+ return true; -+} - - /* hpd for digital panel detect/disconnect */ - bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) -@@ -714,11 +1217,6 @@ int r600_mc_init(struct radeon_device *rdev) - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); - rdev->mc.visible_vram_size = rdev->mc.aper_size; -- /* FIXME remove this once we support unmappable VRAM */ -- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { -- rdev->mc.mc_vram_size = rdev->mc.aper_size; -- rdev->mc.real_vram_size = rdev->mc.aper_size; -- } - r600_vram_gtt_location(rdev, &rdev->mc); - - if (rdev->flags & RADEON_IS_IGP) -@@ -750,7 +1248,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) - S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) | - S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | - S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); -- u32 srbm_reset = 0; - u32 tmp; - - dev_info(rdev->dev, "GPU softreset \n"); -@@ -765,7 +1262,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) - dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); - } - /* Disable CP parsing/prefetching */ -- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); -+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); - /* Check if any of the rendering block is busy and reset it */ - if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || - (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { -@@ -784,72 +1281,56 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) - S_008020_SOFT_RESET_VGT(1); - dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(R_008020_GRBM_SOFT_RESET, tmp); -- (void)RREG32(R_008020_GRBM_SOFT_RESET); -- udelay(50); -+ RREG32(R_008020_GRBM_SOFT_RESET); -+ mdelay(15); - WREG32(R_008020_GRBM_SOFT_RESET, 0); -- (void)RREG32(R_008020_GRBM_SOFT_RESET); - } - /* Reset CP (we always reset CP) */ - tmp = S_008020_SOFT_RESET_CP(1); - dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(R_008020_GRBM_SOFT_RESET, tmp); -- (void)RREG32(R_008020_GRBM_SOFT_RESET); -- udelay(50); -+ RREG32(R_008020_GRBM_SOFT_RESET); -+ mdelay(15); - WREG32(R_008020_GRBM_SOFT_RESET, 0); -- (void)RREG32(R_008020_GRBM_SOFT_RESET); -- /* Reset others GPU block if necessary */ -- if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS))) -- srbm_reset |= S_000E60_SOFT_RESET_RLC(1); -- if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS))) -- srbm_reset |= S_000E60_SOFT_RESET_GRBM(1); -- if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS))) -- srbm_reset |= S_000E60_SOFT_RESET_IH(1); -- if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS))) -- srbm_reset |= S_000E60_SOFT_RESET_VMC(1); -- if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS))) -- srbm_reset |= S_000E60_SOFT_RESET_MC(1); -- if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS))) -- srbm_reset |= S_000E60_SOFT_RESET_MC(1); -- if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS))) -- srbm_reset |= S_000E60_SOFT_RESET_MC(1); -- if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS))) -- srbm_reset |= S_000E60_SOFT_RESET_MC(1); -- if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS))) -- srbm_reset |= S_000E60_SOFT_RESET_MC(1); -- if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS))) -- srbm_reset |= S_000E60_SOFT_RESET_RLC(1); -- if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) -- srbm_reset |= S_000E60_SOFT_RESET_SEM(1); -- if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS))) -- srbm_reset |= S_000E60_SOFT_RESET_BIF(1); -- dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset); -- WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); -- (void)RREG32(R_000E60_SRBM_SOFT_RESET); -- udelay(50); -- WREG32(R_000E60_SRBM_SOFT_RESET, 0); -- (void)RREG32(R_000E60_SRBM_SOFT_RESET); -- WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); -- (void)RREG32(R_000E60_SRBM_SOFT_RESET); -- udelay(50); -- WREG32(R_000E60_SRBM_SOFT_RESET, 0); -- (void)RREG32(R_000E60_SRBM_SOFT_RESET); - /* Wait a little for things to settle down */ -- udelay(50); -+ mdelay(1); - dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", - RREG32(R_008010_GRBM_STATUS)); - dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", - RREG32(R_008014_GRBM_STATUS2)); - dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", - RREG32(R_000E50_SRBM_STATUS)); -- /* After reset we need to reinit the asic as GPU often endup in an -- * incoherent state. -- */ -- atom_asic_init(rdev->mode_info.atom_context); - rv515_mc_resume(rdev, &save); - return 0; - } - --int r600_gpu_reset(struct radeon_device *rdev) -+bool r600_gpu_is_lockup(struct radeon_device *rdev) -+{ -+ u32 srbm_status; -+ u32 grbm_status; -+ u32 grbm_status2; -+ int r; -+ -+ srbm_status = RREG32(R_000E50_SRBM_STATUS); -+ grbm_status = RREG32(R_008010_GRBM_STATUS); -+ grbm_status2 = RREG32(R_008014_GRBM_STATUS2); -+ if (!G_008010_GUI_ACTIVE(grbm_status)) { -+ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp); -+ return false; -+ } -+ /* force CP activities */ -+ r = radeon_ring_lock(rdev, 2); -+ if (!r) { -+ /* PACKET2 NOP */ -+ radeon_ring_write(rdev, 0x80000000); -+ radeon_ring_write(rdev, 0x80000000); -+ radeon_ring_unlock_commit(rdev); -+ } -+ rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); -+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp); -+} -+ -+int r600_asic_reset(struct radeon_device *rdev) - { - return r600_gpu_soft_reset(rdev); - } -@@ -1467,10 +1948,31 @@ int r600_init_microcode(struct radeon_device *rdev) - chip_name = "RV710"; - rlc_chip_name = "R700"; - break; -+ case CHIP_CEDAR: -+ chip_name = "CEDAR"; -+ rlc_chip_name = "CEDAR"; -+ break; -+ case CHIP_REDWOOD: -+ chip_name = "REDWOOD"; -+ rlc_chip_name = "REDWOOD"; -+ break; -+ case CHIP_JUNIPER: -+ chip_name = "JUNIPER"; -+ rlc_chip_name = "JUNIPER"; -+ break; -+ case CHIP_CYPRESS: -+ case CHIP_HEMLOCK: -+ chip_name = "CYPRESS"; -+ rlc_chip_name = "CYPRESS"; -+ break; - default: BUG(); - } - -- if (rdev->family >= CHIP_RV770) { -+ if (rdev->family >= CHIP_CEDAR) { -+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; -+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; -+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; -+ } else if (rdev->family >= CHIP_RV770) { - pfp_req_size = R700_PFP_UCODE_SIZE * 4; - me_req_size = R700_PM4_UCODE_SIZE * 4; - rlc_req_size = R700_RLC_UCODE_SIZE * 4; -@@ -1584,12 +2086,15 @@ int r600_cp_start(struct radeon_device *rdev) - } - radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); - radeon_ring_write(rdev, 0x1); -- if (rdev->family < CHIP_RV770) { -- radeon_ring_write(rdev, 0x3); -- radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1); -- } else { -+ if (rdev->family >= CHIP_CEDAR) { -+ radeon_ring_write(rdev, 0x0); -+ radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); -+ } else if (rdev->family >= CHIP_RV770) { - radeon_ring_write(rdev, 0x0); - radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); -+ } else { -+ radeon_ring_write(rdev, 0x3); -+ radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1); - } - radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); - radeon_ring_write(rdev, 0); -@@ -2051,8 +2556,6 @@ int r600_init(struct radeon_device *rdev) - r = radeon_clocks_init(rdev); - if (r) - return r; -- /* Initialize power management */ -- radeon_pm_init(rdev); - /* Fence driver */ - r = radeon_fence_driver_init(rdev); - if (r) -@@ -2117,7 +2620,6 @@ int r600_init(struct radeon_device *rdev) - - void r600_fini(struct radeon_device *rdev) - { -- radeon_pm_fini(rdev); - r600_audio_fini(rdev); - r600_blit_fini(rdev); - r600_cp_fini(rdev); -@@ -2290,10 +2792,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev) - } - } - --static void r600_rlc_stop(struct radeon_device *rdev) -+void r600_rlc_stop(struct radeon_device *rdev) - { - -- if (rdev->family >= CHIP_RV770) { -+ if ((rdev->family >= CHIP_RV770) && -+ (rdev->family <= CHIP_RV740)) { - /* r7xx asics need to soft reset RLC before halting */ - WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); - RREG32(SRBM_SOFT_RESET); -@@ -2330,7 +2833,12 @@ static int r600_rlc_init(struct radeon_device *rdev) - WREG32(RLC_UCODE_CNTL, 0); - - fw_data = (const __be32 *)rdev->rlc_fw->data; -- if (rdev->family >= CHIP_RV770) { -+ if (rdev->family >= CHIP_CEDAR) { -+ for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) { -+ WREG32(RLC_UCODE_ADDR, i); -+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); -+ } -+ } else if (rdev->family >= CHIP_RV770) { - for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { - WREG32(RLC_UCODE_ADDR, i); - WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); -@@ -2360,7 +2868,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev) - rdev->ih.enabled = true; - } - --static void r600_disable_interrupts(struct radeon_device *rdev) -+void r600_disable_interrupts(struct radeon_device *rdev) - { - u32 ih_rb_cntl = RREG32(IH_RB_CNTL); - u32 ih_cntl = RREG32(IH_CNTL); -@@ -2475,7 +2983,10 @@ int r600_irq_init(struct radeon_device *rdev) - WREG32(IH_CNTL, ih_cntl); - - /* force the active interrupt state to all disabled */ -- r600_disable_interrupt_state(rdev); -+ if (rdev->family >= CHIP_CEDAR) -+ evergreen_disable_interrupt_state(rdev); -+ else -+ r600_disable_interrupt_state(rdev); - - /* enable irqs */ - r600_enable_interrupts(rdev); -@@ -2485,7 +2996,7 @@ int r600_irq_init(struct radeon_device *rdev) - - void r600_irq_suspend(struct radeon_device *rdev) - { -- r600_disable_interrupts(rdev); -+ r600_irq_disable(rdev); - r600_rlc_stop(rdev); - } - -@@ -2500,6 +3011,8 @@ int r600_irq_set(struct radeon_device *rdev) - u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; - u32 mode_int = 0; - u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; -+ u32 grbm_int_cntl = 0; -+ u32 hdmi1, hdmi2; - - if (!rdev->irq.installed) { - WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); -@@ -2513,7 +3026,9 @@ int r600_irq_set(struct radeon_device *rdev) - return 0; - } - -+ hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; - if (ASIC_IS_DCE3(rdev)) { -+ hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; - hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; -@@ -2523,6 +3038,7 @@ int r600_irq_set(struct radeon_device *rdev) - hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; - } - } else { -+ hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN; - hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; -@@ -2564,10 +3080,25 @@ int r600_irq_set(struct radeon_device *rdev) - DRM_DEBUG("r600_irq_set: hpd 6\n"); - hpd6 |= DC_HPDx_INT_EN; - } -+ if (rdev->irq.hdmi[0]) { -+ DRM_DEBUG("r600_irq_set: hdmi 1\n"); -+ hdmi1 |= R600_HDMI_INT_EN; -+ } -+ if (rdev->irq.hdmi[1]) { -+ DRM_DEBUG("r600_irq_set: hdmi 2\n"); -+ hdmi2 |= R600_HDMI_INT_EN; -+ } -+ if (rdev->irq.gui_idle) { -+ DRM_DEBUG("gui idle\n"); -+ grbm_int_cntl |= GUI_IDLE_INT_ENABLE; -+ } - - WREG32(CP_INT_CNTL, cp_int_cntl); - WREG32(DxMODE_INT_MASK, mode_int); -+ WREG32(GRBM_INT_CNTL, grbm_int_cntl); -+ WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); - if (ASIC_IS_DCE3(rdev)) { -+ WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2); - WREG32(DC_HPD1_INT_CONTROL, hpd1); - WREG32(DC_HPD2_INT_CONTROL, hpd2); - WREG32(DC_HPD3_INT_CONTROL, hpd3); -@@ -2577,6 +3108,7 @@ int r600_irq_set(struct radeon_device *rdev) - WREG32(DC_HPD6_INT_CONTROL, hpd6); - } - } else { -+ WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2); - WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); - WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); - WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); -@@ -2660,6 +3192,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev, - WREG32(DC_HPD6_INT_CONTROL, tmp); - } - } -+ if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { -+ WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); -+ } -+ if (ASIC_IS_DCE3(rdev)) { -+ if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { -+ WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); -+ } -+ } else { -+ if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) { -+ WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK); -+ } -+ } - } - - void r600_irq_disable(struct radeon_device *rdev) -@@ -2713,6 +3257,8 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) - * 19 1 FP Hot plug detection B - * 19 2 DAC A auto-detection - * 19 3 DAC B auto-detection -+ * 21 4 HDMI block A -+ * 21 5 HDMI block B - * 176 - CP_INT RB - * 177 - CP_INT IB1 - * 178 - CP_INT IB2 -@@ -2852,6 +3398,10 @@ restart_ih: - break; - } - break; -+ case 21: /* HDMI */ -+ DRM_DEBUG("IH: HDMI: 0x%x\n", src_data); -+ r600_audio_schedule_polling(rdev); -+ break; - case 176: /* CP_INT in ring buffer */ - case 177: /* CP_INT in IB1 */ - case 178: /* CP_INT in IB2 */ -@@ -2861,6 +3411,11 @@ restart_ih: - case 181: /* CP EOP event */ - DRM_DEBUG("IH: CP EOP\n"); - break; -+ case 233: /* GUI IDLE */ -+ DRM_DEBUG("IH: CP EOP\n"); -+ rdev->pm.gui_idle = true; -+ wake_up(&rdev->irq.idle_queue); -+ break; - default: - DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); - break; -diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c -index 1d89805..2b26553 100644 ---- a/drivers/gpu/drm/radeon/r600_audio.c -+++ b/drivers/gpu/drm/radeon/r600_audio.c -@@ -44,7 +44,7 @@ static int r600_audio_chipset_supported(struct radeon_device *rdev) - /* - * current number of channels - */ --static int r600_audio_channels(struct radeon_device *rdev) -+int r600_audio_channels(struct radeon_device *rdev) - { - return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1; - } -@@ -52,7 +52,7 @@ static int r600_audio_channels(struct radeon_device *rdev) - /* - * current bits per sample - */ --static int r600_audio_bits_per_sample(struct radeon_device *rdev) -+int r600_audio_bits_per_sample(struct radeon_device *rdev) - { - uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4; - switch (value) { -@@ -71,7 +71,7 @@ static int r600_audio_bits_per_sample(struct radeon_device *rdev) - /* - * current sampling rate in HZ - */ --static int r600_audio_rate(struct radeon_device *rdev) -+int r600_audio_rate(struct radeon_device *rdev) - { - uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); - uint32_t result; -@@ -90,7 +90,7 @@ static int r600_audio_rate(struct radeon_device *rdev) - /* - * iec 60958 status bits - */ --static uint8_t r600_audio_status_bits(struct radeon_device *rdev) -+uint8_t r600_audio_status_bits(struct radeon_device *rdev) - { - return RREG32(R600_AUDIO_STATUS_BITS) & 0xff; - } -@@ -98,12 +98,21 @@ static uint8_t r600_audio_status_bits(struct radeon_device *rdev) - /* - * iec 60958 category code - */ --static uint8_t r600_audio_category_code(struct radeon_device *rdev) -+uint8_t r600_audio_category_code(struct radeon_device *rdev) - { - return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff; - } - - /* -+ * schedule next audio update event -+ */ -+void r600_audio_schedule_polling(struct radeon_device *rdev) -+{ -+ mod_timer(&rdev->audio_timer, -+ jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL)); -+} -+ -+/* - * update all hdmi interfaces with current audio parameters - */ - static void r600_audio_update_hdmi(unsigned long param) -@@ -118,7 +127,7 @@ static void r600_audio_update_hdmi(unsigned long param) - uint8_t category_code = r600_audio_category_code(rdev); - - struct drm_encoder *encoder; -- int changes = 0; -+ int changes = 0, still_going = 0; - - changes |= channels != rdev->audio_channels; - changes |= rate != rdev->audio_rate; -@@ -135,15 +144,13 @@ static void r600_audio_update_hdmi(unsigned long param) - } - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); -+ still_going |= radeon_encoder->audio_polling_active; - if (changes || r600_hdmi_buffer_status_changed(encoder)) -- r600_hdmi_update_audio_settings( -- encoder, channels, -- rate, bps, status_bits, -- category_code); -+ r600_hdmi_update_audio_settings(encoder); - } - -- mod_timer(&rdev->audio_timer, -- jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL)); -+ if(still_going) r600_audio_schedule_polling(rdev); - } - - /* -@@ -176,9 +183,34 @@ int r600_audio_init(struct radeon_device *rdev) - r600_audio_update_hdmi, - (unsigned long)rdev); - -+ return 0; -+} -+ -+/* -+ * enable the polling timer, to check for status changes -+ */ -+void r600_audio_enable_polling(struct drm_encoder *encoder) -+{ -+ struct drm_device *dev = encoder->dev; -+ struct radeon_device *rdev = dev->dev_private; -+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); -+ -+ DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active); -+ if (radeon_encoder->audio_polling_active) -+ return; -+ -+ radeon_encoder->audio_polling_active = 1; - mod_timer(&rdev->audio_timer, jiffies + 1); -+} - -- return 0; -+/* -+ * disable the polling timer, so we get no more status updates -+ */ -+void r600_audio_disable_polling(struct drm_encoder *encoder) -+{ -+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); -+ DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active); -+ radeon_encoder->audio_polling_active = 0; - } - - /* -diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c -index f6c6c77..d13622a 100644 ---- a/drivers/gpu/drm/radeon/r600_blit_kms.c -+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c -@@ -447,6 +447,9 @@ int r600_blit_init(struct radeon_device *rdev) - u32 packet2s[16]; - int num_packet2s = 0; - -+ /* don't reinitialize blit */ -+ if (rdev->r600_blit.shader_obj) -+ return 0; - mutex_init(&rdev->r600_blit.mutex); - rdev->r600_blit.state_offset = 0; - -diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c -index 2616b82..26b4bc9 100644 ---- a/drivers/gpu/drm/radeon/r600_hdmi.c -+++ b/drivers/gpu/drm/radeon/r600_hdmi.c -@@ -290,17 +290,15 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder) - if (!offset) - return; - -- if (r600_hdmi_is_audio_buffer_filled(encoder)) { -- /* disable audio workaround and start delivering of audio frames */ -- WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001); -+ if (!radeon_encoder->hdmi_audio_workaround || -+ r600_hdmi_is_audio_buffer_filled(encoder)) { - -- } else if (radeon_encoder->hdmi_audio_workaround) { -- /* enable audio workaround and start delivering of audio frames */ -- WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001); -+ /* disable audio workaround */ -+ WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001); - - } else { -- /* disable audio workaround and stop delivering of audio frames */ -- WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001); -+ /* enable audio workaround */ -+ WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001); - } - } - -@@ -345,25 +343,23 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod - - /* audio packets per line, does anyone know how to calc this ? */ - WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000); -- -- /* update? reset? don't realy know */ -- WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000); - } - - /* - * update settings with current parameters from audio engine - */ --void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, -- int channels, -- int rate, -- int bps, -- uint8_t status_bits, -- uint8_t category_code) -+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder) - { - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; - -+ int channels = r600_audio_channels(rdev); -+ int rate = r600_audio_rate(rdev); -+ int bps = r600_audio_bits_per_sample(rdev); -+ uint8_t status_bits = r600_audio_status_bits(rdev); -+ uint8_t category_code = r600_audio_category_code(rdev); -+ - uint32_t iec; - - if (!offset) -@@ -415,9 +411,6 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, - r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0); - - r600_hdmi_audio_workaround(encoder); -- -- /* update? reset? don't realy know */ -- WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000); - } - - static int r600_hdmi_find_free_block(struct drm_device *dev) -@@ -486,6 +479,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder) - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); -+ uint32_t offset; - - if (ASIC_IS_DCE4(rdev)) - return; -@@ -499,10 +493,10 @@ void r600_hdmi_enable(struct drm_encoder *encoder) - } - } - -+ offset = radeon_encoder->hdmi_offset; - if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { - WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1); - } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { -- int offset = radeon_encoder->hdmi_offset; - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: - WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4); -@@ -518,6 +512,21 @@ void r600_hdmi_enable(struct drm_encoder *encoder) - } - } - -+ if (rdev->irq.installed -+ && rdev->family != CHIP_RS600 -+ && rdev->family != CHIP_RS690 -+ && rdev->family != CHIP_RS740) { -+ -+ /* if irq is available use it */ -+ rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true; -+ radeon_irq_set(rdev); -+ -+ r600_audio_disable_polling(encoder); -+ } else { -+ /* if not fallback to polling */ -+ r600_audio_enable_polling(encoder); -+ } -+ - DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n", - radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); - } -@@ -530,22 +539,30 @@ void r600_hdmi_disable(struct drm_encoder *encoder) - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); -+ uint32_t offset; - - if (ASIC_IS_DCE4(rdev)) - return; - -- if (!radeon_encoder->hdmi_offset) { -+ offset = radeon_encoder->hdmi_offset; -+ if (!offset) { - dev_err(rdev->dev, "Disabling not enabled HDMI\n"); - return; - } - - DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n", -- radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); -+ offset, radeon_encoder->encoder_id); -+ -+ /* disable irq */ -+ rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false; -+ radeon_irq_set(rdev); -+ -+ /* disable polling */ -+ r600_audio_disable_polling(encoder); - - if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { - WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1); - } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { -- int offset = radeon_encoder->hdmi_offset; - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: - WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4); -diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h -index 7b1d223..d84612a 100644 ---- a/drivers/gpu/drm/radeon/r600_reg.h -+++ b/drivers/gpu/drm/radeon/r600_reg.h -@@ -157,33 +157,36 @@ - #define R600_HDMI_BLOCK3 0x7800 - - /* HDMI registers */ --#define R600_HDMI_ENABLE 0x00 --#define R600_HDMI_STATUS 0x04 --#define R600_HDMI_CNTL 0x08 --#define R600_HDMI_UNKNOWN_0 0x0C --#define R600_HDMI_AUDIOCNTL 0x10 --#define R600_HDMI_VIDEOCNTL 0x14 --#define R600_HDMI_VERSION 0x18 --#define R600_HDMI_UNKNOWN_1 0x28 --#define R600_HDMI_VIDEOINFOFRAME_0 0x54 --#define R600_HDMI_VIDEOINFOFRAME_1 0x58 --#define R600_HDMI_VIDEOINFOFRAME_2 0x5c --#define R600_HDMI_VIDEOINFOFRAME_3 0x60 --#define R600_HDMI_32kHz_CTS 0xac --#define R600_HDMI_32kHz_N 0xb0 --#define R600_HDMI_44_1kHz_CTS 0xb4 --#define R600_HDMI_44_1kHz_N 0xb8 --#define R600_HDMI_48kHz_CTS 0xbc --#define R600_HDMI_48kHz_N 0xc0 --#define R600_HDMI_AUDIOINFOFRAME_0 0xcc --#define R600_HDMI_AUDIOINFOFRAME_1 0xd0 --#define R600_HDMI_IEC60958_1 0xd4 --#define R600_HDMI_IEC60958_2 0xd8 --#define R600_HDMI_UNKNOWN_2 0xdc --#define R600_HDMI_AUDIO_DEBUG_0 0xe0 --#define R600_HDMI_AUDIO_DEBUG_1 0xe4 --#define R600_HDMI_AUDIO_DEBUG_2 0xe8 --#define R600_HDMI_AUDIO_DEBUG_3 0xec -+#define R600_HDMI_ENABLE 0x00 -+#define R600_HDMI_STATUS 0x04 -+# define R600_HDMI_INT_PENDING (1 << 29) -+#define R600_HDMI_CNTL 0x08 -+# define R600_HDMI_INT_EN (1 << 28) -+# define R600_HDMI_INT_ACK (1 << 29) -+#define R600_HDMI_UNKNOWN_0 0x0C -+#define R600_HDMI_AUDIOCNTL 0x10 -+#define R600_HDMI_VIDEOCNTL 0x14 -+#define R600_HDMI_VERSION 0x18 -+#define R600_HDMI_UNKNOWN_1 0x28 -+#define R600_HDMI_VIDEOINFOFRAME_0 0x54 -+#define R600_HDMI_VIDEOINFOFRAME_1 0x58 -+#define R600_HDMI_VIDEOINFOFRAME_2 0x5c -+#define R600_HDMI_VIDEOINFOFRAME_3 0x60 -+#define R600_HDMI_32kHz_CTS 0xac -+#define R600_HDMI_32kHz_N 0xb0 -+#define R600_HDMI_44_1kHz_CTS 0xb4 -+#define R600_HDMI_44_1kHz_N 0xb8 -+#define R600_HDMI_48kHz_CTS 0xbc -+#define R600_HDMI_48kHz_N 0xc0 -+#define R600_HDMI_AUDIOINFOFRAME_0 0xcc -+#define R600_HDMI_AUDIOINFOFRAME_1 0xd0 -+#define R600_HDMI_IEC60958_1 0xd4 -+#define R600_HDMI_IEC60958_2 0xd8 -+#define R600_HDMI_UNKNOWN_2 0xdc -+#define R600_HDMI_AUDIO_DEBUG_0 0xe0 -+#define R600_HDMI_AUDIO_DEBUG_1 0xe4 -+#define R600_HDMI_AUDIO_DEBUG_2 0xe8 -+#define R600_HDMI_AUDIO_DEBUG_3 0xec - - /* HDMI additional config base register addresses */ - #define R600_HDMI_CONFIG1 0x7600 -diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h -index 034218c..9c8af5f 100644 ---- a/drivers/gpu/drm/radeon/radeon.h -+++ b/drivers/gpu/drm/radeon/radeon.h -@@ -89,16 +89,17 @@ extern int radeon_testing; - extern int radeon_connector_table; - extern int radeon_tv; - extern int radeon_new_pll; --extern int radeon_dynpm; - extern int radeon_audio; - extern int radeon_disp_priority; - extern int radeon_hw_i2c; -+extern int radeon_pm; - - /* - * Copy from radeon_drv.h so we don't have to include both and have conflicting - * symbol; - */ - #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ -+#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2) - /* RADEON_IB_POOL_SIZE must be a power of 2 */ - #define RADEON_IB_POOL_SIZE 16 - #define RADEON_DEBUGFS_MAX_NUM_FILES 32 -@@ -172,8 +173,11 @@ struct radeon_clock { - int radeon_pm_init(struct radeon_device *rdev); - void radeon_pm_fini(struct radeon_device *rdev); - void radeon_pm_compute_clocks(struct radeon_device *rdev); -+void radeon_pm_suspend(struct radeon_device *rdev); -+void radeon_pm_resume(struct radeon_device *rdev); - void radeon_combios_get_power_modes(struct radeon_device *rdev); - void radeon_atombios_get_power_modes(struct radeon_device *rdev); -+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); - - /* - * Fences. -@@ -182,7 +186,8 @@ struct radeon_fence_driver { - uint32_t scratch_reg; - atomic_t seq; - uint32_t last_seq; -- unsigned long count_timeout; -+ unsigned long last_jiffies; -+ unsigned long last_timeout; - wait_queue_head_t queue; - rwlock_t lock; - struct list_head created; -@@ -197,7 +202,6 @@ struct radeon_fence { - struct list_head list; - /* protected by radeon_fence.lock */ - uint32_t seq; -- unsigned long timeout; - bool emited; - bool signaled; - }; -@@ -259,6 +263,7 @@ struct radeon_bo_list { - unsigned rdomain; - unsigned wdomain; - u32 tiling_flags; -+ bool reserved; - }; - - /* -@@ -371,10 +376,15 @@ struct radeon_irq { - bool installed; - bool sw_int; - /* FIXME: use a define max crtc rather than hardcode it */ -- bool crtc_vblank_int[2]; -+ bool crtc_vblank_int[6]; - wait_queue_head_t vblank_queue; - /* FIXME: use defines for max hpd/dacs */ - bool hpd[6]; -+ bool gui_idle; -+ bool gui_idle_acked; -+ wait_queue_head_t idle_queue; -+ /* FIXME: use defines for max HDMI blocks */ -+ bool hdmi[2]; - spinlock_t sw_lock; - int sw_refcount; - }; -@@ -462,7 +472,9 @@ int radeon_ib_test(struct radeon_device *rdev); - extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); - /* Ring access between begin & end cannot sleep */ - void radeon_ring_free_size(struct radeon_device *rdev); -+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw); - int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); -+void radeon_ring_commit(struct radeon_device *rdev); - void radeon_ring_unlock_commit(struct radeon_device *rdev); - void radeon_ring_unlock_undo(struct radeon_device *rdev); - int radeon_ring_test(struct radeon_device *rdev); -@@ -566,6 +578,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p, - */ - int radeon_agp_init(struct radeon_device *rdev); - void radeon_agp_resume(struct radeon_device *rdev); -+void radeon_agp_suspend(struct radeon_device *rdev); - void radeon_agp_fini(struct radeon_device *rdev); - - -@@ -597,17 +610,24 @@ struct radeon_wb { - * Equation between gpu/memory clock and available bandwidth is hw dependent - * (type of memory, bus size, efficiency, ...) - */ --enum radeon_pm_state { -- PM_STATE_DISABLED, -- PM_STATE_MINIMUM, -- PM_STATE_PAUSED, -- PM_STATE_ACTIVE -+ -+enum radeon_pm_method { -+ PM_METHOD_PROFILE, -+ PM_METHOD_DYNPM, -+}; -+ -+enum radeon_dynpm_state { -+ DYNPM_STATE_DISABLED, -+ DYNPM_STATE_MINIMUM, -+ DYNPM_STATE_PAUSED, -+ DYNPM_STATE_ACTIVE - }; --enum radeon_pm_action { -- PM_ACTION_NONE, -- PM_ACTION_MINIMUM, -- PM_ACTION_DOWNCLOCK, -- PM_ACTION_UPCLOCK -+enum radeon_dynpm_action { -+ DYNPM_ACTION_NONE, -+ DYNPM_ACTION_MINIMUM, -+ DYNPM_ACTION_DOWNCLOCK, -+ DYNPM_ACTION_UPCLOCK, -+ DYNPM_ACTION_DEFAULT - }; - - enum radeon_voltage_type { -@@ -625,11 +645,28 @@ enum radeon_pm_state_type { - POWER_STATE_TYPE_PERFORMANCE, - }; - --enum radeon_pm_clock_mode_type { -- POWER_MODE_TYPE_DEFAULT, -- POWER_MODE_TYPE_LOW, -- POWER_MODE_TYPE_MID, -- POWER_MODE_TYPE_HIGH, -+enum radeon_pm_profile_type { -+ PM_PROFILE_DEFAULT, -+ PM_PROFILE_AUTO, -+ PM_PROFILE_LOW, -+ PM_PROFILE_MID, -+ PM_PROFILE_HIGH, -+}; -+ -+#define PM_PROFILE_DEFAULT_IDX 0 -+#define PM_PROFILE_LOW_SH_IDX 1 -+#define PM_PROFILE_MID_SH_IDX 2 -+#define PM_PROFILE_HIGH_SH_IDX 3 -+#define PM_PROFILE_LOW_MH_IDX 4 -+#define PM_PROFILE_MID_MH_IDX 5 -+#define PM_PROFILE_HIGH_MH_IDX 6 -+#define PM_PROFILE_MAX 7 -+ -+struct radeon_pm_profile { -+ int dpms_off_ps_idx; -+ int dpms_on_ps_idx; -+ int dpms_off_cm_idx; -+ int dpms_on_cm_idx; - }; - - struct radeon_voltage { -@@ -646,12 +683,8 @@ struct radeon_voltage { - u32 voltage; - }; - --struct radeon_pm_non_clock_info { -- /* pcie lanes */ -- int pcie_lanes; -- /* standardized non-clock flags */ -- u32 flags; --}; -+/* clock mode flags */ -+#define RADEON_PM_MODE_NO_DISPLAY (1 << 0) - - struct radeon_pm_clock_info { - /* memory clock */ -@@ -660,10 +693,13 @@ struct radeon_pm_clock_info { - u32 sclk; - /* voltage info */ - struct radeon_voltage voltage; -- /* standardized clock flags - not sure we'll need these */ -+ /* standardized clock flags */ - u32 flags; - }; - -+/* state flags */ -+#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0) -+ - struct radeon_power_state { - enum radeon_pm_state_type type; - /* XXX: use a define for num clock modes */ -@@ -671,9 +707,11 @@ struct radeon_power_state { - /* number of valid clock modes in this power state */ - int num_clock_modes; - struct radeon_pm_clock_info *default_clock_mode; -- /* non clock info about this state */ -- struct radeon_pm_non_clock_info non_clock_info; -- bool voltage_drop_active; -+ /* standardized state flags */ -+ u32 flags; -+ u32 misc; /* vbios specific flags */ -+ u32 misc2; /* vbios specific flags */ -+ int pcie_lanes; /* pcie lanes */ - }; - - /* -@@ -683,14 +721,11 @@ struct radeon_power_state { - - struct radeon_pm { - struct mutex mutex; -- struct delayed_work idle_work; -- enum radeon_pm_state state; -- enum radeon_pm_action planned_action; -- unsigned long action_timeout; -- bool downclocked; -- int active_crtcs; -+ u32 active_crtcs; -+ int active_crtc_count; - int req_vblank; - bool vblank_sync; -+ bool gui_idle; - fixed20_12 max_bandwidth; - fixed20_12 igp_sideport_mclk; - fixed20_12 igp_system_mclk; -@@ -707,12 +742,28 @@ struct radeon_pm { - struct radeon_power_state power_state[8]; - /* number of valid power states */ - int num_power_states; -- struct radeon_power_state *current_power_state; -- struct radeon_pm_clock_info *current_clock_mode; -- struct radeon_power_state *requested_power_state; -- struct radeon_pm_clock_info *requested_clock_mode; -- struct radeon_power_state *default_power_state; -+ int current_power_state_index; -+ int current_clock_mode_index; -+ int requested_power_state_index; -+ int requested_clock_mode_index; -+ int default_power_state_index; -+ u32 current_sclk; -+ u32 current_mclk; -+ u32 current_vddc; - struct radeon_i2c_chan *i2c_bus; -+ /* selected pm method */ -+ enum radeon_pm_method pm_method; -+ /* dynpm power management */ -+ struct delayed_work dynpm_idle_work; -+ enum radeon_dynpm_state dynpm_state; -+ enum radeon_dynpm_action dynpm_planned_action; -+ unsigned long dynpm_action_timeout; -+ bool dynpm_can_upclock; -+ bool dynpm_can_downclock; -+ /* profile-based power management */ -+ enum radeon_pm_profile_type profile; -+ int profile_index; -+ struct radeon_pm_profile profiles[PM_PROFILE_MAX]; - }; - - -@@ -746,7 +797,8 @@ struct radeon_asic { - int (*resume)(struct radeon_device *rdev); - int (*suspend)(struct radeon_device *rdev); - void (*vga_set_state)(struct radeon_device *rdev, bool state); -- int (*gpu_reset)(struct radeon_device *rdev); -+ bool (*gpu_is_lockup)(struct radeon_device *rdev); -+ int (*asic_reset)(struct radeon_device *rdev); - void (*gart_tlb_flush)(struct radeon_device *rdev); - int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); - int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); -@@ -799,44 +851,84 @@ struct radeon_asic { - * through ring. - */ - void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); -+ bool (*gui_idle)(struct radeon_device *rdev); -+ /* power management */ -+ void (*pm_misc)(struct radeon_device *rdev); -+ void (*pm_prepare)(struct radeon_device *rdev); -+ void (*pm_finish)(struct radeon_device *rdev); -+ void (*pm_init_profile)(struct radeon_device *rdev); -+ void (*pm_get_dynpm_state)(struct radeon_device *rdev); - }; - - /* - * Asic structures - */ -+struct r100_gpu_lockup { -+ unsigned long last_jiffies; -+ u32 last_cp_rptr; -+}; -+ - struct r100_asic { -- const unsigned *reg_safe_bm; -- unsigned reg_safe_bm_size; -- u32 hdp_cntl; -+ const unsigned *reg_safe_bm; -+ unsigned reg_safe_bm_size; -+ u32 hdp_cntl; -+ struct r100_gpu_lockup lockup; - }; - - struct r300_asic { -- const unsigned *reg_safe_bm; -- unsigned reg_safe_bm_size; -- u32 resync_scratch; -- u32 hdp_cntl; -+ const unsigned *reg_safe_bm; -+ unsigned reg_safe_bm_size; -+ u32 resync_scratch; -+ u32 hdp_cntl; -+ struct r100_gpu_lockup lockup; - }; - - struct r600_asic { -- unsigned max_pipes; -- unsigned max_tile_pipes; -- unsigned max_simds; -- unsigned max_backends; -- unsigned max_gprs; -- unsigned max_threads; -- unsigned max_stack_entries; -- unsigned max_hw_contexts; -- unsigned max_gs_threads; -- unsigned sx_max_export_size; -- unsigned sx_max_export_pos_size; -- unsigned sx_max_export_smx_size; -- unsigned sq_num_cf_insts; -- unsigned tiling_nbanks; -- unsigned tiling_npipes; -- unsigned tiling_group_size; -+ unsigned max_pipes; -+ unsigned max_tile_pipes; -+ unsigned max_simds; -+ unsigned max_backends; -+ unsigned max_gprs; -+ unsigned max_threads; -+ unsigned max_stack_entries; -+ unsigned max_hw_contexts; -+ unsigned max_gs_threads; -+ unsigned sx_max_export_size; -+ unsigned sx_max_export_pos_size; -+ unsigned sx_max_export_smx_size; -+ unsigned sq_num_cf_insts; -+ unsigned tiling_nbanks; -+ unsigned tiling_npipes; -+ unsigned tiling_group_size; -+ struct r100_gpu_lockup lockup; - }; - - struct rv770_asic { -+ unsigned max_pipes; -+ unsigned max_tile_pipes; -+ unsigned max_simds; -+ unsigned max_backends; -+ unsigned max_gprs; -+ unsigned max_threads; -+ unsigned max_stack_entries; -+ unsigned max_hw_contexts; -+ unsigned max_gs_threads; -+ unsigned sx_max_export_size; -+ unsigned sx_max_export_pos_size; -+ unsigned sx_max_export_smx_size; -+ unsigned sq_num_cf_insts; -+ unsigned sx_num_of_sets; -+ unsigned sc_prim_fifo_size; -+ unsigned sc_hiz_tile_fifo_size; -+ unsigned sc_earlyz_tile_fifo_fize; -+ unsigned tiling_nbanks; -+ unsigned tiling_npipes; -+ unsigned tiling_group_size; -+ struct r100_gpu_lockup lockup; -+}; -+ -+struct evergreen_asic { -+ unsigned num_ses; - unsigned max_pipes; - unsigned max_tile_pipes; - unsigned max_simds; -@@ -853,7 +945,7 @@ struct rv770_asic { - unsigned sx_num_of_sets; - unsigned sc_prim_fifo_size; - unsigned sc_hiz_tile_fifo_size; -- unsigned sc_earlyz_tile_fifo_fize; -+ unsigned sc_earlyz_tile_fifo_size; - unsigned tiling_nbanks; - unsigned tiling_npipes; - unsigned tiling_group_size; -@@ -864,6 +956,7 @@ union radeon_asic_config { - struct r100_asic r100; - struct r600_asic r600; - struct rv770_asic rv770; -+ struct evergreen_asic evergreen; - }; - - /* -@@ -927,9 +1020,6 @@ struct radeon_device { - bool is_atom_bios; - uint16_t bios_header_start; - struct radeon_bo *stollen_vga_memory; -- struct fb_info *fbdev_info; -- struct radeon_bo *fbdev_rbo; -- struct radeon_framebuffer *fbdev_rfb; - /* Register mmio */ - resource_size_t rmmio_base; - resource_size_t rmmio_size; -@@ -974,6 +1064,7 @@ struct radeon_device { - struct work_struct hotplug_work; - int num_crtc; /* number of crtcs */ - struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ -+ struct mutex vram_mutex; - - /* audio stuff */ - struct timer_list audio_timer; -@@ -984,6 +1075,7 @@ struct radeon_device { - uint8_t audio_category_code; - - bool powered_down; -+ struct notifier_block acpi_nb; - }; - - int radeon_device_init(struct radeon_device *rdev, -@@ -1145,7 +1237,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) - #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) - #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) - #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) --#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) -+#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev)) -+#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) - #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) - #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) - #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) -@@ -1173,9 +1266,16 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) - #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) - #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) - #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) -+#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev)) -+#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev)) -+#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev)) -+#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) -+#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) -+#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) - - /* Common functions */ - /* AGP */ -+extern int radeon_gpu_reset(struct radeon_device *rdev); - extern void radeon_agp_disable(struct radeon_device *rdev); - extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); - extern void radeon_gart_restore(struct radeon_device *rdev); -@@ -1200,6 +1300,8 @@ extern int radeon_resume_kms(struct drm_device *dev); - extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); - - /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ -+extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp); -+extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp); - - /* rv200,rv250,rv280 */ - extern void r200_set_safe_registers(struct radeon_device *rdev); -@@ -1260,6 +1362,7 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev, - extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); - extern bool r600_card_posted(struct radeon_device *rdev); - extern void r600_cp_stop(struct radeon_device *rdev); -+extern int r600_cp_start(struct radeon_device *rdev); - extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); - extern int r600_cp_resume(struct radeon_device *rdev); - extern void r600_cp_fini(struct radeon_device *rdev); -@@ -1276,29 +1379,39 @@ extern void r600_scratch_init(struct radeon_device *rdev); - extern int r600_blit_init(struct radeon_device *rdev); - extern void r600_blit_fini(struct radeon_device *rdev); - extern int r600_init_microcode(struct radeon_device *rdev); --extern int r600_gpu_reset(struct radeon_device *rdev); -+extern int r600_asic_reset(struct radeon_device *rdev); - /* r600 irq */ - extern int r600_irq_init(struct radeon_device *rdev); - extern void r600_irq_fini(struct radeon_device *rdev); - extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); - extern int r600_irq_set(struct radeon_device *rdev); - extern void r600_irq_suspend(struct radeon_device *rdev); -+extern void r600_disable_interrupts(struct radeon_device *rdev); -+extern void r600_rlc_stop(struct radeon_device *rdev); - /* r600 audio */ - extern int r600_audio_init(struct radeon_device *rdev); - extern int r600_audio_tmds_index(struct drm_encoder *encoder); - extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); -+extern int r600_audio_channels(struct radeon_device *rdev); -+extern int r600_audio_bits_per_sample(struct radeon_device *rdev); -+extern int r600_audio_rate(struct radeon_device *rdev); -+extern uint8_t r600_audio_status_bits(struct radeon_device *rdev); -+extern uint8_t r600_audio_category_code(struct radeon_device *rdev); -+extern void r600_audio_schedule_polling(struct radeon_device *rdev); -+extern void r600_audio_enable_polling(struct drm_encoder *encoder); -+extern void r600_audio_disable_polling(struct drm_encoder *encoder); - extern void r600_audio_fini(struct radeon_device *rdev); - extern void r600_hdmi_init(struct drm_encoder *encoder); - extern void r600_hdmi_enable(struct drm_encoder *encoder); - extern void r600_hdmi_disable(struct drm_encoder *encoder); - extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); - extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); --extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, -- int channels, -- int rate, -- int bps, -- uint8_t status_bits, -- uint8_t category_code); -+extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); -+ -+extern void r700_cp_stop(struct radeon_device *rdev); -+extern void r700_cp_fini(struct radeon_device *rdev); -+extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); -+extern int evergreen_irq_set(struct radeon_device *rdev); - - /* evergreen */ - struct evergreen_mc_save { -diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c -index 28e473f..f40dfb7 100644 ---- a/drivers/gpu/drm/radeon/radeon_agp.c -+++ b/drivers/gpu/drm/radeon/radeon_agp.c -@@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev) - } - #endif - } -+ -+void radeon_agp_suspend(struct radeon_device *rdev) -+{ -+ radeon_agp_fini(rdev); -+} -diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c -index a4b4bc9..87f7e2c 100644 ---- a/drivers/gpu/drm/radeon/radeon_asic.c -+++ b/drivers/gpu/drm/radeon/radeon_asic.c -@@ -134,7 +134,8 @@ static struct radeon_asic r100_asic = { - .suspend = &r100_suspend, - .resume = &r100_resume, - .vga_set_state = &r100_vga_set_state, -- .gpu_reset = &r100_gpu_reset, -+ .gpu_is_lockup = &r100_gpu_is_lockup, -+ .asic_reset = &r100_asic_reset, - .gart_tlb_flush = &r100_pci_gart_tlb_flush, - .gart_set_page = &r100_pci_gart_set_page, - .cp_commit = &r100_cp_commit, -@@ -164,6 +165,12 @@ static struct radeon_asic r100_asic = { - .hpd_sense = &r100_hpd_sense, - .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, -+ .gui_idle = &r100_gui_idle, -+ .pm_misc = &r100_pm_misc, -+ .pm_prepare = &r100_pm_prepare, -+ .pm_finish = &r100_pm_finish, -+ .pm_init_profile = &r100_pm_init_profile, -+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, - }; - - static struct radeon_asic r200_asic = { -@@ -172,7 +179,8 @@ static struct radeon_asic r200_asic = { - .suspend = &r100_suspend, - .resume = &r100_resume, - .vga_set_state = &r100_vga_set_state, -- .gpu_reset = &r100_gpu_reset, -+ .gpu_is_lockup = &r100_gpu_is_lockup, -+ .asic_reset = &r100_asic_reset, - .gart_tlb_flush = &r100_pci_gart_tlb_flush, - .gart_set_page = &r100_pci_gart_set_page, - .cp_commit = &r100_cp_commit, -@@ -201,6 +209,12 @@ static struct radeon_asic r200_asic = { - .hpd_sense = &r100_hpd_sense, - .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, -+ .gui_idle = &r100_gui_idle, -+ .pm_misc = &r100_pm_misc, -+ .pm_prepare = &r100_pm_prepare, -+ .pm_finish = &r100_pm_finish, -+ .pm_init_profile = &r100_pm_init_profile, -+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, - }; - - static struct radeon_asic r300_asic = { -@@ -209,7 +223,8 @@ static struct radeon_asic r300_asic = { - .suspend = &r300_suspend, - .resume = &r300_resume, - .vga_set_state = &r100_vga_set_state, -- .gpu_reset = &r300_gpu_reset, -+ .gpu_is_lockup = &r300_gpu_is_lockup, -+ .asic_reset = &r300_asic_reset, - .gart_tlb_flush = &r100_pci_gart_tlb_flush, - .gart_set_page = &r100_pci_gart_set_page, - .cp_commit = &r100_cp_commit, -@@ -239,6 +254,12 @@ static struct radeon_asic r300_asic = { - .hpd_sense = &r100_hpd_sense, - .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, -+ .gui_idle = &r100_gui_idle, -+ .pm_misc = &r100_pm_misc, -+ .pm_prepare = &r100_pm_prepare, -+ .pm_finish = &r100_pm_finish, -+ .pm_init_profile = &r100_pm_init_profile, -+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, - }; - - static struct radeon_asic r300_asic_pcie = { -@@ -247,7 +268,8 @@ static struct radeon_asic r300_asic_pcie = { - .suspend = &r300_suspend, - .resume = &r300_resume, - .vga_set_state = &r100_vga_set_state, -- .gpu_reset = &r300_gpu_reset, -+ .gpu_is_lockup = &r300_gpu_is_lockup, -+ .asic_reset = &r300_asic_reset, - .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, - .gart_set_page = &rv370_pcie_gart_set_page, - .cp_commit = &r100_cp_commit, -@@ -276,6 +298,12 @@ static struct radeon_asic r300_asic_pcie = { - .hpd_sense = &r100_hpd_sense, - .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, -+ .gui_idle = &r100_gui_idle, -+ .pm_misc = &r100_pm_misc, -+ .pm_prepare = &r100_pm_prepare, -+ .pm_finish = &r100_pm_finish, -+ .pm_init_profile = &r100_pm_init_profile, -+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, - }; - - static struct radeon_asic r420_asic = { -@@ -284,7 +312,8 @@ static struct radeon_asic r420_asic = { - .suspend = &r420_suspend, - .resume = &r420_resume, - .vga_set_state = &r100_vga_set_state, -- .gpu_reset = &r300_gpu_reset, -+ .gpu_is_lockup = &r300_gpu_is_lockup, -+ .asic_reset = &r300_asic_reset, - .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, - .gart_set_page = &rv370_pcie_gart_set_page, - .cp_commit = &r100_cp_commit, -@@ -314,6 +343,12 @@ static struct radeon_asic r420_asic = { - .hpd_sense = &r100_hpd_sense, - .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, -+ .gui_idle = &r100_gui_idle, -+ .pm_misc = &r100_pm_misc, -+ .pm_prepare = &r100_pm_prepare, -+ .pm_finish = &r100_pm_finish, -+ .pm_init_profile = &r420_pm_init_profile, -+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, - }; - - static struct radeon_asic rs400_asic = { -@@ -322,7 +357,8 @@ static struct radeon_asic rs400_asic = { - .suspend = &rs400_suspend, - .resume = &rs400_resume, - .vga_set_state = &r100_vga_set_state, -- .gpu_reset = &r300_gpu_reset, -+ .gpu_is_lockup = &r300_gpu_is_lockup, -+ .asic_reset = &r300_asic_reset, - .gart_tlb_flush = &rs400_gart_tlb_flush, - .gart_set_page = &rs400_gart_set_page, - .cp_commit = &r100_cp_commit, -@@ -352,6 +388,12 @@ static struct radeon_asic rs400_asic = { - .hpd_sense = &r100_hpd_sense, - .hpd_set_polarity = &r100_hpd_set_polarity, - .ioctl_wait_idle = NULL, -+ .gui_idle = &r100_gui_idle, -+ .pm_misc = &r100_pm_misc, -+ .pm_prepare = &r100_pm_prepare, -+ .pm_finish = &r100_pm_finish, -+ .pm_init_profile = &r100_pm_init_profile, -+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, - }; - - static struct radeon_asic rs600_asic = { -@@ -360,7 +402,8 @@ static struct radeon_asic rs600_asic = { - .suspend = &rs600_suspend, - .resume = &rs600_resume, - .vga_set_state = &r100_vga_set_state, -- .gpu_reset = &r300_gpu_reset, -+ .gpu_is_lockup = &r300_gpu_is_lockup, -+ .asic_reset = &rs600_asic_reset, - .gart_tlb_flush = &rs600_gart_tlb_flush, - .gart_set_page = &rs600_gart_set_page, - .cp_commit = &r100_cp_commit, -@@ -390,6 +433,12 @@ static struct radeon_asic rs600_asic = { - .hpd_sense = &rs600_hpd_sense, - .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, -+ .gui_idle = &r100_gui_idle, -+ .pm_misc = &rs600_pm_misc, -+ .pm_prepare = &rs600_pm_prepare, -+ .pm_finish = &rs600_pm_finish, -+ .pm_init_profile = &r420_pm_init_profile, -+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, - }; - - static struct radeon_asic rs690_asic = { -@@ -398,7 +447,8 @@ static struct radeon_asic rs690_asic = { - .suspend = &rs690_suspend, - .resume = &rs690_resume, - .vga_set_state = &r100_vga_set_state, -- .gpu_reset = &r300_gpu_reset, -+ .gpu_is_lockup = &r300_gpu_is_lockup, -+ .asic_reset = &rs600_asic_reset, - .gart_tlb_flush = &rs400_gart_tlb_flush, - .gart_set_page = &rs400_gart_set_page, - .cp_commit = &r100_cp_commit, -@@ -428,6 +478,12 @@ static struct radeon_asic rs690_asic = { - .hpd_sense = &rs600_hpd_sense, - .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, -+ .gui_idle = &r100_gui_idle, -+ .pm_misc = &rs600_pm_misc, -+ .pm_prepare = &rs600_pm_prepare, -+ .pm_finish = &rs600_pm_finish, -+ .pm_init_profile = &r420_pm_init_profile, -+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, - }; - - static struct radeon_asic rv515_asic = { -@@ -436,7 +492,8 @@ static struct radeon_asic rv515_asic = { - .suspend = &rv515_suspend, - .resume = &rv515_resume, - .vga_set_state = &r100_vga_set_state, -- .gpu_reset = &rv515_gpu_reset, -+ .gpu_is_lockup = &r300_gpu_is_lockup, -+ .asic_reset = &rs600_asic_reset, - .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, - .gart_set_page = &rv370_pcie_gart_set_page, - .cp_commit = &r100_cp_commit, -@@ -466,6 +523,12 @@ static struct radeon_asic rv515_asic = { - .hpd_sense = &rs600_hpd_sense, - .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, -+ .gui_idle = &r100_gui_idle, -+ .pm_misc = &rs600_pm_misc, -+ .pm_prepare = &rs600_pm_prepare, -+ .pm_finish = &rs600_pm_finish, -+ .pm_init_profile = &r420_pm_init_profile, -+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, - }; - - static struct radeon_asic r520_asic = { -@@ -474,7 +537,8 @@ static struct radeon_asic r520_asic = { - .suspend = &rv515_suspend, - .resume = &r520_resume, - .vga_set_state = &r100_vga_set_state, -- .gpu_reset = &rv515_gpu_reset, -+ .gpu_is_lockup = &r300_gpu_is_lockup, -+ .asic_reset = &rs600_asic_reset, - .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, - .gart_set_page = &rv370_pcie_gart_set_page, - .cp_commit = &r100_cp_commit, -@@ -504,6 +568,12 @@ static struct radeon_asic r520_asic = { - .hpd_sense = &rs600_hpd_sense, - .hpd_set_polarity = &rs600_hpd_set_polarity, - .ioctl_wait_idle = NULL, -+ .gui_idle = &r100_gui_idle, -+ .pm_misc = &rs600_pm_misc, -+ .pm_prepare = &rs600_pm_prepare, -+ .pm_finish = &rs600_pm_finish, -+ .pm_init_profile = &r420_pm_init_profile, -+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state, - }; - - static struct radeon_asic r600_asic = { -@@ -513,7 +583,8 @@ static struct radeon_asic r600_asic = { - .resume = &r600_resume, - .cp_commit = &r600_cp_commit, - .vga_set_state = &r600_vga_set_state, -- .gpu_reset = &r600_gpu_reset, -+ .gpu_is_lockup = &r600_gpu_is_lockup, -+ .asic_reset = &r600_asic_reset, - .gart_tlb_flush = &r600_pcie_gart_tlb_flush, - .gart_set_page = &rs600_gart_set_page, - .ring_test = &r600_ring_test, -@@ -541,6 +612,12 @@ static struct radeon_asic r600_asic = { - .hpd_sense = &r600_hpd_sense, - .hpd_set_polarity = &r600_hpd_set_polarity, - .ioctl_wait_idle = r600_ioctl_wait_idle, -+ .gui_idle = &r600_gui_idle, -+ .pm_misc = &r600_pm_misc, -+ .pm_prepare = &rs600_pm_prepare, -+ .pm_finish = &rs600_pm_finish, -+ .pm_init_profile = &r600_pm_init_profile, -+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state, - }; - - static struct radeon_asic rs780_asic = { -@@ -549,8 +626,9 @@ static struct radeon_asic rs780_asic = { - .suspend = &r600_suspend, - .resume = &r600_resume, - .cp_commit = &r600_cp_commit, -+ .gpu_is_lockup = &r600_gpu_is_lockup, - .vga_set_state = &r600_vga_set_state, -- .gpu_reset = &r600_gpu_reset, -+ .asic_reset = &r600_asic_reset, - .gart_tlb_flush = &r600_pcie_gart_tlb_flush, - .gart_set_page = &rs600_gart_set_page, - .ring_test = &r600_ring_test, -@@ -578,6 +656,12 @@ static struct radeon_asic rs780_asic = { - .hpd_sense = &r600_hpd_sense, - .hpd_set_polarity = &r600_hpd_set_polarity, - .ioctl_wait_idle = r600_ioctl_wait_idle, -+ .gui_idle = &r600_gui_idle, -+ .pm_misc = &r600_pm_misc, -+ .pm_prepare = &rs600_pm_prepare, -+ .pm_finish = &rs600_pm_finish, -+ .pm_init_profile = &rs780_pm_init_profile, -+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state, - }; - - static struct radeon_asic rv770_asic = { -@@ -586,7 +670,8 @@ static struct radeon_asic rv770_asic = { - .suspend = &rv770_suspend, - .resume = &rv770_resume, - .cp_commit = &r600_cp_commit, -- .gpu_reset = &rv770_gpu_reset, -+ .asic_reset = &r600_asic_reset, -+ .gpu_is_lockup = &r600_gpu_is_lockup, - .vga_set_state = &r600_vga_set_state, - .gart_tlb_flush = &r600_pcie_gart_tlb_flush, - .gart_set_page = &rs600_gart_set_page, -@@ -615,6 +700,12 @@ static struct radeon_asic rv770_asic = { - .hpd_sense = &r600_hpd_sense, - .hpd_set_polarity = &r600_hpd_set_polarity, - .ioctl_wait_idle = r600_ioctl_wait_idle, -+ .gui_idle = &r600_gui_idle, -+ .pm_misc = &rv770_pm_misc, -+ .pm_prepare = &rs600_pm_prepare, -+ .pm_finish = &rs600_pm_finish, -+ .pm_init_profile = &r600_pm_init_profile, -+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state, - }; - - static struct radeon_asic evergreen_asic = { -@@ -622,18 +713,19 @@ static struct radeon_asic evergreen_asic = { - .fini = &evergreen_fini, - .suspend = &evergreen_suspend, - .resume = &evergreen_resume, -- .cp_commit = NULL, -- .gpu_reset = &evergreen_gpu_reset, -+ .cp_commit = &r600_cp_commit, -+ .gpu_is_lockup = &evergreen_gpu_is_lockup, -+ .asic_reset = &evergreen_asic_reset, - .vga_set_state = &r600_vga_set_state, -- .gart_tlb_flush = &r600_pcie_gart_tlb_flush, -+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, - .gart_set_page = &rs600_gart_set_page, -- .ring_test = NULL, -- .ring_ib_execute = NULL, -- .irq_set = NULL, -- .irq_process = NULL, -- .get_vblank_counter = NULL, -- .fence_ring_emit = NULL, -- .cs_parse = NULL, -+ .ring_test = &r600_ring_test, -+ .ring_ib_execute = &r600_ring_ib_execute, -+ .irq_set = &evergreen_irq_set, -+ .irq_process = &evergreen_irq_process, -+ .get_vblank_counter = &evergreen_get_vblank_counter, -+ .fence_ring_emit = &r600_fence_ring_emit, -+ .cs_parse = &evergreen_cs_parse, - .copy_blit = NULL, - .copy_dma = NULL, - .copy = NULL, -@@ -650,6 +742,12 @@ static struct radeon_asic evergreen_asic = { - .hpd_fini = &evergreen_hpd_fini, - .hpd_sense = &evergreen_hpd_sense, - .hpd_set_polarity = &evergreen_hpd_set_polarity, -+ .gui_idle = &r600_gui_idle, -+ .pm_misc = &evergreen_pm_misc, -+ .pm_prepare = &evergreen_pm_prepare, -+ .pm_finish = &evergreen_pm_finish, -+ .pm_init_profile = &r600_pm_init_profile, -+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state, - }; - - int radeon_asic_init(struct radeon_device *rdev) -diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h -index a0b8280..c0bbaa6 100644 ---- a/drivers/gpu/drm/radeon/radeon_asic.h -+++ b/drivers/gpu/drm/radeon/radeon_asic.h -@@ -60,7 +60,8 @@ int r100_resume(struct radeon_device *rdev); - uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); - void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); - void r100_vga_set_state(struct radeon_device *rdev, bool state); --int r100_gpu_reset(struct radeon_device *rdev); -+bool r100_gpu_is_lockup(struct radeon_device *rdev); -+int r100_asic_reset(struct radeon_device *rdev); - u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); - void r100_pci_gart_tlb_flush(struct radeon_device *rdev); - int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); -@@ -110,8 +111,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev); - void r100_wb_disable(struct radeon_device *rdev); - void r100_wb_fini(struct radeon_device *rdev); - int r100_wb_init(struct radeon_device *rdev); --void r100_hdp_reset(struct radeon_device *rdev); --int r100_rb2d_reset(struct radeon_device *rdev); - int r100_cp_reset(struct radeon_device *rdev); - void r100_vga_render_disable(struct radeon_device *rdev); - int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, -@@ -126,6 +125,13 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, - unsigned idx); - void r100_enable_bm(struct radeon_device *rdev); - void r100_set_common_regs(struct radeon_device *rdev); -+void r100_bm_disable(struct radeon_device *rdev); -+extern bool r100_gui_idle(struct radeon_device *rdev); -+extern void r100_pm_misc(struct radeon_device *rdev); -+extern void r100_pm_prepare(struct radeon_device *rdev); -+extern void r100_pm_finish(struct radeon_device *rdev); -+extern void r100_pm_init_profile(struct radeon_device *rdev); -+extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); - - /* - * r200,rv250,rs300,rv280 -@@ -134,7 +140,7 @@ extern int r200_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_pages, -- struct radeon_fence *fence); -+ struct radeon_fence *fence); - - /* - * r300,r350,rv350,rv380 -@@ -143,7 +149,8 @@ extern int r300_init(struct radeon_device *rdev); - extern void r300_fini(struct radeon_device *rdev); - extern int r300_suspend(struct radeon_device *rdev); - extern int r300_resume(struct radeon_device *rdev); --extern int r300_gpu_reset(struct radeon_device *rdev); -+extern bool r300_gpu_is_lockup(struct radeon_device *rdev); -+extern int r300_asic_reset(struct radeon_device *rdev); - extern void r300_ring_start(struct radeon_device *rdev); - extern void r300_fence_ring_emit(struct radeon_device *rdev, - struct radeon_fence *fence); -@@ -162,6 +169,7 @@ extern int r420_init(struct radeon_device *rdev); - extern void r420_fini(struct radeon_device *rdev); - extern int r420_suspend(struct radeon_device *rdev); - extern int r420_resume(struct radeon_device *rdev); -+extern void r420_pm_init_profile(struct radeon_device *rdev); - - /* - * rs400,rs480 -@@ -178,6 +186,7 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); - /* - * rs600. - */ -+extern int rs600_asic_reset(struct radeon_device *rdev); - extern int rs600_init(struct radeon_device *rdev); - extern void rs600_fini(struct radeon_device *rdev); - extern int rs600_suspend(struct radeon_device *rdev); -@@ -195,6 +204,9 @@ void rs600_hpd_fini(struct radeon_device *rdev); - bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); - void rs600_hpd_set_polarity(struct radeon_device *rdev, - enum radeon_hpd_id hpd); -+extern void rs600_pm_misc(struct radeon_device *rdev); -+extern void rs600_pm_prepare(struct radeon_device *rdev); -+extern void rs600_pm_finish(struct radeon_device *rdev); - - /* - * rs690,rs740 -@@ -212,7 +224,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev); - */ - int rv515_init(struct radeon_device *rdev); - void rv515_fini(struct radeon_device *rdev); --int rv515_gpu_reset(struct radeon_device *rdev); - uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); - void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); - void rv515_ring_start(struct radeon_device *rdev); -@@ -252,7 +263,8 @@ int r600_copy_dma(struct radeon_device *rdev, - struct radeon_fence *fence); - int r600_irq_process(struct radeon_device *rdev); - int r600_irq_set(struct radeon_device *rdev); --int r600_gpu_reset(struct radeon_device *rdev); -+bool r600_gpu_is_lockup(struct radeon_device *rdev); -+int r600_asic_reset(struct radeon_device *rdev); - int r600_set_surface_reg(struct radeon_device *rdev, int reg, - uint32_t tiling_flags, uint32_t pitch, - uint32_t offset, uint32_t obj_size); -@@ -268,6 +280,11 @@ bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); - void r600_hpd_set_polarity(struct radeon_device *rdev, - enum radeon_hpd_id hpd); - extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); -+extern bool r600_gui_idle(struct radeon_device *rdev); -+extern void r600_pm_misc(struct radeon_device *rdev); -+extern void r600_pm_init_profile(struct radeon_device *rdev); -+extern void rs780_pm_init_profile(struct radeon_device *rdev); -+extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); - - /* - * rv770,rv730,rv710,rv740 -@@ -276,20 +293,30 @@ int rv770_init(struct radeon_device *rdev); - void rv770_fini(struct radeon_device *rdev); - int rv770_suspend(struct radeon_device *rdev); - int rv770_resume(struct radeon_device *rdev); --int rv770_gpu_reset(struct radeon_device *rdev); -+extern void rv770_pm_misc(struct radeon_device *rdev); - - /* - * evergreen - */ -+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); - int evergreen_init(struct radeon_device *rdev); - void evergreen_fini(struct radeon_device *rdev); - int evergreen_suspend(struct radeon_device *rdev); - int evergreen_resume(struct radeon_device *rdev); --int evergreen_gpu_reset(struct radeon_device *rdev); -+bool evergreen_gpu_is_lockup(struct radeon_device *rdev); -+int evergreen_asic_reset(struct radeon_device *rdev); - void evergreen_bandwidth_update(struct radeon_device *rdev); - void evergreen_hpd_init(struct radeon_device *rdev); - void evergreen_hpd_fini(struct radeon_device *rdev); - bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); - void evergreen_hpd_set_polarity(struct radeon_device *rdev, - enum radeon_hpd_id hpd); -+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc); -+int evergreen_irq_set(struct radeon_device *rdev); -+int evergreen_irq_process(struct radeon_device *rdev); -+extern int evergreen_cs_parse(struct radeon_cs_parser *p); -+extern void evergreen_pm_misc(struct radeon_device *rdev); -+extern void evergreen_pm_prepare(struct radeon_device *rdev); -+extern void evergreen_pm_finish(struct radeon_device *rdev); -+ - #endif -diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c -index 9916d82..99bd8a9 100644 ---- a/drivers/gpu/drm/radeon/radeon_atombios.c -+++ b/drivers/gpu/drm/radeon/radeon_atombios.c -@@ -530,6 +530,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) - } - - /* look up gpio for ddc, hpd */ -+ ddc_bus.valid = false; -+ hpd.hpd = RADEON_HPD_NONE; - if ((le16_to_cpu(path->usDeviceTag) & - (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { - for (j = 0; j < con_obj->ucNumberOfObjects; j++) { -@@ -547,7 +549,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) - ATOM_I2C_RECORD *i2c_record; - ATOM_HPD_INT_RECORD *hpd_record; - ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; -- hpd.hpd = RADEON_HPD_NONE; - - while (record->ucRecordType > 0 - && record-> -@@ -585,13 +586,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) - break; - } - } -- } else { -- hpd.hpd = RADEON_HPD_NONE; -- ddc_bus.valid = false; - } - - /* needed for aux chan transactions */ -- ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0; -+ ddc_bus.hpd = hpd.hpd; - - conn_id = le16_to_cpu(path->usConnObjectId); - -@@ -682,10 +680,18 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct - uint8_t dac; - union atom_supported_devices *supported_devices; - int i, j, max_device; -- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; -+ struct bios_connector *bios_connectors; -+ size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; - -- if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) -+ bios_connectors = kzalloc(bc_size, GFP_KERNEL); -+ if (!bios_connectors) -+ return false; -+ -+ if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, -+ &data_offset)) { -+ kfree(bios_connectors); - return false; -+ } - - supported_devices = - (union atom_supported_devices *)(ctx->bios + data_offset); -@@ -853,6 +859,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct - - radeon_link_encoder_connector(dev); - -+ kfree(bios_connectors); - return true; - } - -@@ -1174,7 +1181,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct - lvds->native_mode.vtotal = lvds->native_mode.vdisplay + - le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); - lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + -- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); -+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); - lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + - le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); - lvds->panel_pwr_delay = -@@ -1442,26 +1449,30 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) - - static const char *thermal_controller_names[] = { - "NONE", -- "LM63", -- "ADM1032", -- "ADM1030", -- "MUA6649", -- "LM64", -- "F75375", -- "ASC7512", -+ "lm63", -+ "adm1032", -+ "adm1030", -+ "max6649", -+ "lm64", -+ "f75375", -+ "asc7xxx", - }; - - static const char *pp_lib_thermal_controller_names[] = { - "NONE", -- "LM63", -- "ADM1032", -- "ADM1030", -- "MUA6649", -- "LM64", -- "F75375", -+ "lm63", -+ "adm1032", -+ "adm1030", -+ "max6649", -+ "lm64", -+ "f75375", - "RV6xx", - "RV770", -- "ADT7473", -+ "adt7473", -+ "External GPIO", -+ "Evergreen", -+ "adt7473 with internal", -+ - }; - - union power_info { -@@ -1485,7 +1496,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - int state_index = 0, mode_index = 0; - struct radeon_i2c_bus_rec i2c_bus; - -- rdev->pm.default_power_state = NULL; -+ rdev->pm.default_power_state_index = -1; - - if (atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) { -@@ -1498,10 +1509,19 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - power_info->info.ucOverdriveControllerAddress >> 1); - i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); - rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); -+ if (rdev->pm.i2c_bus) { -+ struct i2c_board_info info = { }; -+ const char *name = thermal_controller_names[power_info->info. -+ ucOverdriveThermalController]; -+ info.addr = power_info->info.ucOverdriveControllerAddress >> 1; -+ strlcpy(info.type, name, sizeof(info.type)); -+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); -+ } - } - num_modes = power_info->info.ucNumOfPowerModeEntries; - if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) - num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; -+ /* last mode is usually default, array is low to high */ - for (i = 0; i < num_modes; i++) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; - switch (frev) { -@@ -1515,16 +1535,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) - continue; -- /* skip overclock modes for now */ -- if ((rdev->pm.power_state[state_index].clock_info[0].mclk > -- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || -- (rdev->pm.power_state[state_index].clock_info[0].sclk > -- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) -- continue; -- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = -+ rdev->pm.power_state[state_index].pcie_lanes = - power_info->info.asPowerPlayInfo[i].ucNumPciELanes; - misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); -- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { -+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || -+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_GPIO; - rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = -@@ -1542,6 +1557,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = - power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; - } -+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; -+ rdev->pm.power_state[state_index].misc = misc; - /* order matters! */ - if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) - rdev->pm.power_state[state_index].type = -@@ -1555,15 +1572,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; -- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) -+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_PERFORMANCE; -+ rdev->pm.power_state[state_index].flags &= -+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; -+ } - if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; -- rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; -+ rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[0]; -+ rdev->pm.power_state[state_index].flags &= -+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; -+ } else if (state_index == 0) { -+ rdev->pm.power_state[state_index].clock_info[0].flags |= -+ RADEON_PM_MODE_NO_DISPLAY; - } - state_index++; - break; -@@ -1577,17 +1602,12 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) - continue; -- /* skip overclock modes for now */ -- if ((rdev->pm.power_state[state_index].clock_info[0].mclk > -- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || -- (rdev->pm.power_state[state_index].clock_info[0].sclk > -- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) -- continue; -- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = -+ rdev->pm.power_state[state_index].pcie_lanes = - power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; - misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); - misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); -- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { -+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || -+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_GPIO; - rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = -@@ -1605,6 +1625,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = - power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; - } -+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; -+ rdev->pm.power_state[state_index].misc = misc; -+ rdev->pm.power_state[state_index].misc2 = misc2; - /* order matters! */ - if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) - rdev->pm.power_state[state_index].type = -@@ -1618,18 +1641,29 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; -- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) -+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_PERFORMANCE; -+ rdev->pm.power_state[state_index].flags &= -+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; -+ } - if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; -+ if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT) -+ rdev->pm.power_state[state_index].flags &= -+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; -- rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; -+ rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[0]; -+ rdev->pm.power_state[state_index].flags &= -+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; -+ } else if (state_index == 0) { -+ rdev->pm.power_state[state_index].clock_info[0].flags |= -+ RADEON_PM_MODE_NO_DISPLAY; - } - state_index++; - break; -@@ -1643,17 +1677,12 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) - continue; -- /* skip overclock modes for now */ -- if ((rdev->pm.power_state[state_index].clock_info[0].mclk > -- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || -- (rdev->pm.power_state[state_index].clock_info[0].sclk > -- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) -- continue; -- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = -+ rdev->pm.power_state[state_index].pcie_lanes = - power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; - misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); - misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); -- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { -+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || -+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_GPIO; - rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = -@@ -1677,6 +1706,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; - } - } -+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; -+ rdev->pm.power_state[state_index].misc = misc; -+ rdev->pm.power_state[state_index].misc2 = misc2; - /* order matters! */ - if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) - rdev->pm.power_state[state_index].type = -@@ -1690,42 +1722,89 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; -- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) -+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_PERFORMANCE; -+ rdev->pm.power_state[state_index].flags &= -+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; -+ } - if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; - if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; -- rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; -+ rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[0]; -+ } else if (state_index == 0) { -+ rdev->pm.power_state[state_index].clock_info[0].flags |= -+ RADEON_PM_MODE_NO_DISPLAY; - } - state_index++; - break; - } - } -- } else if (frev == 4) { -+ /* last mode is usually default */ -+ if (rdev->pm.default_power_state_index == -1) { -+ rdev->pm.power_state[state_index - 1].type = -+ POWER_STATE_TYPE_DEFAULT; -+ rdev->pm.default_power_state_index = state_index - 1; -+ rdev->pm.power_state[state_index - 1].default_clock_mode = -+ &rdev->pm.power_state[state_index - 1].clock_info[0]; -+ rdev->pm.power_state[state_index].flags &= -+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; -+ rdev->pm.power_state[state_index].misc = 0; -+ rdev->pm.power_state[state_index].misc2 = 0; -+ } -+ } else { -+ int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo); -+ uint8_t fw_frev, fw_crev; -+ uint16_t fw_data_offset, vddc = 0; -+ union firmware_info *firmware_info; -+ ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; -+ -+ if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL, -+ &fw_frev, &fw_crev, &fw_data_offset)) { -+ firmware_info = -+ (union firmware_info *)(mode_info->atom_context->bios + -+ fw_data_offset); -+ vddc = firmware_info->info_14.usBootUpVDDCVoltage; -+ } -+ - /* add the i2c bus for thermal/fan chip */ - /* no support for internal controller yet */ -- if (power_info->info_4.sThermalController.ucType > 0) { -- if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || -- (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) { -+ if (controller->ucType > 0) { -+ if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || -+ (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) || -+ (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) { - DRM_INFO("Internal thermal controller %s fan control\n", -- (power_info->info_4.sThermalController.ucFanParameters & -+ (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); -+ } else if ((controller->ucType == -+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || -+ (controller->ucType == -+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) { -+ DRM_INFO("Special thermal controller config\n"); - } else { - DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", -- pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType], -- power_info->info_4.sThermalController.ucI2cAddress >> 1, -- (power_info->info_4.sThermalController.ucFanParameters & -+ pp_lib_thermal_controller_names[controller->ucType], -+ controller->ucI2cAddress >> 1, -+ (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); -- i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine); -+ i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); - rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal"); -+ if (rdev->pm.i2c_bus) { -+ struct i2c_board_info info = { }; -+ const char *name = pp_lib_thermal_controller_names[controller->ucType]; -+ info.addr = controller->ucI2cAddress >> 1; -+ strlcpy(info.type, name, sizeof(info.type)); -+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); -+ } -+ - } - } -+ /* first mode is usually default, followed by low to high */ - for (i = 0; i < power_info->info_4.ucNumStates; i++) { - mode_index = 0; - power_state = (struct _ATOM_PPLIB_STATE *) -@@ -1754,14 +1833,31 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - /* skip invalid modes */ - if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) - continue; -- /* skip overclock modes for now */ -- if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk > -- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN) -+ /* voltage works differently on IGPs */ -+ mode_index++; -+ } else if (ASIC_IS_DCE4(rdev)) { -+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info = -+ (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *) -+ (mode_info->atom_context->bios + -+ data_offset + -+ le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + -+ (power_state->ucClockStateIndices[j] * -+ power_info->info_4.ucClockInfoSize)); -+ sclk = le16_to_cpu(clock_info->usEngineClockLow); -+ sclk |= clock_info->ucEngineClockHigh << 16; -+ mclk = le16_to_cpu(clock_info->usMemoryClockLow); -+ mclk |= clock_info->ucMemoryClockHigh << 16; -+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; -+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; -+ /* skip invalid modes */ -+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || -+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) - continue; - rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = - VOLTAGE_SW; - rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = - clock_info->usVDDC; -+ /* XXX usVDDCI */ - mode_index++; - } else { - struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info = -@@ -1781,12 +1877,6 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) - continue; -- /* skip overclock modes for now */ -- if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk > -- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || -- (rdev->pm.power_state[state_index].clock_info[mode_index].sclk > -- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) -- continue; - rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = - VOLTAGE_SW; - rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = -@@ -1798,7 +1888,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - if (mode_index) { - misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); - misc2 = le16_to_cpu(non_clock_info->usClassification); -- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = -+ rdev->pm.power_state[state_index].misc = misc; -+ rdev->pm.power_state[state_index].misc2 = misc2; -+ rdev->pm.power_state[state_index].pcie_lanes = - ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> - ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; - switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { -@@ -1815,22 +1907,46 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - POWER_STATE_TYPE_PERFORMANCE; - break; - } -+ rdev->pm.power_state[state_index].flags = 0; -+ if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) -+ rdev->pm.power_state[state_index].flags |= -+ RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; -- rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; -+ rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; -+ /* patch the table values with the default slck/mclk from firmware info */ -+ for (j = 0; j < mode_index; j++) { -+ rdev->pm.power_state[state_index].clock_info[j].mclk = -+ rdev->clock.default_mclk; -+ rdev->pm.power_state[state_index].clock_info[j].sclk = -+ rdev->clock.default_sclk; -+ if (vddc) -+ rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = -+ vddc; -+ } - } - state_index++; - } - } -+ /* if multiple clock modes, mark the lowest as no display */ -+ for (i = 0; i < state_index; i++) { -+ if (rdev->pm.power_state[i].num_clock_modes > 1) -+ rdev->pm.power_state[i].clock_info[0].flags |= -+ RADEON_PM_MODE_NO_DISPLAY; -+ } -+ /* first mode is usually default */ -+ if (rdev->pm.default_power_state_index == -1) { -+ rdev->pm.power_state[0].type = -+ POWER_STATE_TYPE_DEFAULT; -+ rdev->pm.default_power_state_index = 0; -+ rdev->pm.power_state[0].default_clock_mode = -+ &rdev->pm.power_state[0].clock_info[0]; -+ } - } - } else { -- /* XXX figure out some good default low power mode for cards w/out power tables */ -- } -- -- if (rdev->pm.default_power_state == NULL) { - /* add the default mode */ - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; -@@ -1840,18 +1956,17 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[0]; - rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; -- if (rdev->asic->get_pcie_lanes) -- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev); -- else -- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16; -- rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; -+ rdev->pm.power_state[state_index].pcie_lanes = 16; -+ rdev->pm.default_power_state_index = state_index; -+ rdev->pm.power_state[state_index].flags = 0; - state_index++; - } -+ - rdev->pm.num_power_states = state_index; - -- rdev->pm.current_power_state = rdev->pm.default_power_state; -- rdev->pm.current_clock_mode = -- rdev->pm.default_power_state->default_clock_mode; -+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; -+ rdev->pm.current_clock_mode_index = 0; -+ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; - } - - void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) -@@ -1907,6 +2022,42 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - } - -+union set_voltage { -+ struct _SET_VOLTAGE_PS_ALLOCATION alloc; -+ struct _SET_VOLTAGE_PARAMETERS v1; -+ struct _SET_VOLTAGE_PARAMETERS_V2 v2; -+}; -+ -+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level) -+{ -+ union set_voltage args; -+ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); -+ u8 frev, crev, volt_index = level; -+ -+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) -+ return; -+ -+ switch (crev) { -+ case 1: -+ args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; -+ args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; -+ args.v1.ucVoltageIndex = volt_index; -+ break; -+ case 2: -+ args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; -+ args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; -+ args.v2.usVoltageLevel = cpu_to_le16(level); -+ break; -+ default: -+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev); -+ return; -+ } -+ -+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); -+} -+ -+ -+ - void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) - { - struct radeon_device *rdev = dev->dev_private; -diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c -index 8ad71f7..fbba938 100644 ---- a/drivers/gpu/drm/radeon/radeon_bios.c -+++ b/drivers/gpu/drm/radeon/radeon_bios.c -@@ -85,12 +85,11 @@ static bool radeon_read_bios(struct radeon_device *rdev) - pci_unmap_rom(rdev->pdev, bios); - return false; - } -- rdev->bios = kmalloc(size, GFP_KERNEL); -+ rdev->bios = kmemdup(bios, size, GFP_KERNEL); - if (rdev->bios == NULL) { - pci_unmap_rom(rdev->pdev, bios); - return false; - } -- memcpy(rdev->bios, bios, size); - pci_unmap_rom(rdev->pdev, bios); - return true; - } -diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c -index 37db8ad..1bee2f9 100644 ---- a/drivers/gpu/drm/radeon/radeon_combios.c -+++ b/drivers/gpu/drm/radeon/radeon_combios.c -@@ -450,17 +450,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) - { - int edid_info; - struct edid *edid; -+ unsigned char *raw; - edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); - if (!edid_info) - return false; - -- edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1), -- GFP_KERNEL); -+ raw = rdev->bios + edid_info; -+ edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL); - if (edid == NULL) - return false; - -- memcpy((unsigned char *)edid, -- (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH); -+ memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1)); - - if (!drm_edid_is_valid(edid)) { - kfree(edid); -@@ -600,7 +600,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde - } - i2c.mm_i2c = false; - i2c.i2c_id = 0; -- i2c.hpd_id = 0; -+ i2c.hpd = RADEON_HPD_NONE; - - if (ddc_line) - i2c.valid = true; -@@ -1113,18 +1113,20 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder - break; - - if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && -- (RBIOS16(tmp + 2) == -- lvds->native_mode.vdisplay)) { -- lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8; -- lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8; -- lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) + -- RBIOS16(tmp + 21)) * 8; -- -- lvds->native_mode.vtotal = RBIOS16(tmp + 24); -- lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff; -- lvds->native_mode.vsync_end = -- ((RBIOS16(tmp + 28) & 0xf800) >> 11) + -- (RBIOS16(tmp + 28) & 0x7ff); -+ (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { -+ lvds->native_mode.htotal = lvds->native_mode.hdisplay + -+ (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; -+ lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + -+ (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; -+ lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + -+ (RBIOS8(tmp + 23) * 8); -+ -+ lvds->native_mode.vtotal = lvds->native_mode.vdisplay + -+ (RBIOS16(tmp + 24) - RBIOS16(tmp + 26)); -+ lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + -+ ((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26)); -+ lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + -+ ((RBIOS16(tmp + 28) & 0xf800) >> 11); - - lvds->native_mode.clock = RBIOS16(tmp + 9) * 10; - lvds->native_mode.flags = 0; -@@ -2024,6 +2026,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) - combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); - break; - default: -+ ddc_i2c.valid = false; - break; - } - -@@ -2196,7 +2199,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) - ATOM_DEVICE_DFP1_SUPPORT); - - ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); -- hpd.hpd = RADEON_HPD_NONE; -+ hpd.hpd = RADEON_HPD_1; - radeon_add_legacy_connector(dev, - 0, - ATOM_DEVICE_CRT1_SUPPORT | -@@ -2337,6 +2340,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) - if (RBIOS8(tv_info + 6) == 'T') { - if (radeon_apply_legacy_tv_quirks(dev)) { - hpd.hpd = RADEON_HPD_NONE; -+ ddc_i2c.valid = false; - radeon_add_legacy_encoder(dev, - radeon_get_encoder_id - (dev, -@@ -2366,7 +2370,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) - u8 rev, blocks, tmp; - int state_index = 0; - -- rdev->pm.default_power_state = NULL; -+ rdev->pm.default_power_state_index = -1; - - if (rdev->flags & RADEON_IS_MOBILITY) { - offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); -@@ -2380,17 +2384,13 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) - if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) - goto default_mode; -- /* skip overclock modes for now */ -- if ((rdev->pm.power_state[state_index].clock_info[0].mclk > -- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || -- (rdev->pm.power_state[state_index].clock_info[0].sclk > -- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) -- goto default_mode; - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - misc = RBIOS16(offset + 0x5 + 0x0); - if (rev > 4) - misc2 = RBIOS16(offset + 0x5 + 0xe); -+ rdev->pm.power_state[state_index].misc = misc; -+ rdev->pm.power_state[state_index].misc2 = misc2; - if (misc & 0x4) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; - if (misc & 0x8) -@@ -2437,8 +2437,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) - } else - rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; - if (rev > 6) -- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = -+ rdev->pm.power_state[state_index].pcie_lanes = - RBIOS8(offset + 0x5 + 0x10); -+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - state_index++; - } else { - /* XXX figure out some good default low power mode for mobility cards w/out power tables */ -@@ -2455,17 +2456,19 @@ default_mode: - rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; - rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; - rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; -- rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; -- if (rdev->asic->get_pcie_lanes) -- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev); -+ if ((state_index > 0) && -+ (rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO)) -+ rdev->pm.power_state[state_index].clock_info[0].voltage = -+ rdev->pm.power_state[0].clock_info[0].voltage; - else -- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16; -- rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; -+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; -+ rdev->pm.power_state[state_index].pcie_lanes = 16; -+ rdev->pm.power_state[state_index].flags = 0; -+ rdev->pm.default_power_state_index = state_index; - rdev->pm.num_power_states = state_index + 1; - -- rdev->pm.current_power_state = rdev->pm.default_power_state; -- rdev->pm.current_clock_mode = -- rdev->pm.default_power_state->default_clock_mode; -+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; -+ rdev->pm.current_clock_mode_index = 0; - } - - void radeon_external_tmds_setup(struct drm_encoder *encoder) -diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c -index 4559a53..0c7ccc6 100644 ---- a/drivers/gpu/drm/radeon/radeon_connectors.c -+++ b/drivers/gpu/drm/radeon/radeon_connectors.c -@@ -1041,7 +1041,6 @@ radeon_add_atom_connector(struct drm_device *dev, - struct radeon_connector_atom_dig *radeon_dig_connector; - uint32_t subpixel_order = SubPixelNone; - bool shared_ddc = false; -- int ret; - - /* fixme - tv/cv/din */ - if (connector_type == DRM_MODE_CONNECTOR_Unknown) -@@ -1076,9 +1075,7 @@ radeon_add_atom_connector(struct drm_device *dev, - switch (connector_type) { - case DRM_MODE_CONNECTOR_VGA: - drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); -- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); -- if (ret) -- goto failed; -+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); - if (i2c_bus->valid) { - radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); - if (!radeon_connector->ddc_bus) -@@ -1088,12 +1085,11 @@ radeon_add_atom_connector(struct drm_device *dev, - drm_connector_attach_property(&radeon_connector->base, - rdev->mode_info.load_detect_property, - 1); -+ connector->polled = DRM_CONNECTOR_POLL_CONNECT; - break; - case DRM_MODE_CONNECTOR_DVIA: - drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); -- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); -- if (ret) -- goto failed; -+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); - if (i2c_bus->valid) { - radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); - if (!radeon_connector->ddc_bus) -@@ -1113,9 +1109,7 @@ radeon_add_atom_connector(struct drm_device *dev, - radeon_dig_connector->igp_lane_info = igp_lane_info; - radeon_connector->con_priv = radeon_dig_connector; - drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); -- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); -- if (ret) -- goto failed; -+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); - if (i2c_bus->valid) { - radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); - if (!radeon_connector->ddc_bus) -@@ -1141,9 +1135,7 @@ radeon_add_atom_connector(struct drm_device *dev, - radeon_dig_connector->igp_lane_info = igp_lane_info; - radeon_connector->con_priv = radeon_dig_connector; - drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); -- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); -- if (ret) -- goto failed; -+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); - if (i2c_bus->valid) { - radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI"); - if (!radeon_connector->ddc_bus) -@@ -1163,9 +1155,7 @@ radeon_add_atom_connector(struct drm_device *dev, - radeon_dig_connector->igp_lane_info = igp_lane_info; - radeon_connector->con_priv = radeon_dig_connector; - drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); -- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); -- if (ret) -- goto failed; -+ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); - if (i2c_bus->valid) { - /* add DP i2c bus */ - if (connector_type == DRM_MODE_CONNECTOR_eDP) -@@ -1191,9 +1181,7 @@ radeon_add_atom_connector(struct drm_device *dev, - case DRM_MODE_CONNECTOR_9PinDIN: - if (radeon_tv == 1) { - drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); -- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); -- if (ret) -- goto failed; -+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); - radeon_connector->dac_load_detect = true; - drm_connector_attach_property(&radeon_connector->base, - rdev->mode_info.load_detect_property, -@@ -1211,9 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev, - radeon_dig_connector->igp_lane_info = igp_lane_info; - radeon_connector->con_priv = radeon_dig_connector; - drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); -- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); -- if (ret) -- goto failed; -+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); - if (i2c_bus->valid) { - radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); - if (!radeon_connector->ddc_bus) -@@ -1226,6 +1212,12 @@ radeon_add_atom_connector(struct drm_device *dev, - break; - } - -+ if (hpd->hpd == RADEON_HPD_NONE) { -+ if (i2c_bus->valid) -+ connector->polled = DRM_CONNECTOR_POLL_CONNECT; -+ } else -+ connector->polled = DRM_CONNECTOR_POLL_HPD; -+ - connector->display_info.subpixel_order = subpixel_order; - drm_sysfs_connector_add(connector); - return; -@@ -1250,7 +1242,6 @@ radeon_add_legacy_connector(struct drm_device *dev, - struct drm_connector *connector; - struct radeon_connector *radeon_connector; - uint32_t subpixel_order = SubPixelNone; -- int ret; - - /* fixme - tv/cv/din */ - if (connector_type == DRM_MODE_CONNECTOR_Unknown) -@@ -1278,9 +1269,7 @@ radeon_add_legacy_connector(struct drm_device *dev, - switch (connector_type) { - case DRM_MODE_CONNECTOR_VGA: - drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); -- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); -- if (ret) -- goto failed; -+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); - if (i2c_bus->valid) { - radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); - if (!radeon_connector->ddc_bus) -@@ -1290,12 +1279,11 @@ radeon_add_legacy_connector(struct drm_device *dev, - drm_connector_attach_property(&radeon_connector->base, - rdev->mode_info.load_detect_property, - 1); -+ connector->polled = DRM_CONNECTOR_POLL_CONNECT; - break; - case DRM_MODE_CONNECTOR_DVIA: - drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); -- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); -- if (ret) -- goto failed; -+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); - if (i2c_bus->valid) { - radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); - if (!radeon_connector->ddc_bus) -@@ -1309,9 +1297,7 @@ radeon_add_legacy_connector(struct drm_device *dev, - case DRM_MODE_CONNECTOR_DVII: - case DRM_MODE_CONNECTOR_DVID: - drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); -- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); -- if (ret) -- goto failed; -+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); - if (i2c_bus->valid) { - radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); - if (!radeon_connector->ddc_bus) -@@ -1330,9 +1316,7 @@ radeon_add_legacy_connector(struct drm_device *dev, - case DRM_MODE_CONNECTOR_9PinDIN: - if (radeon_tv == 1) { - drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); -- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); -- if (ret) -- goto failed; -+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); - radeon_connector->dac_load_detect = true; - /* RS400,RC410,RS480 chipset seems to report a lot - * of false positive on load detect, we haven't yet -@@ -1351,9 +1335,7 @@ radeon_add_legacy_connector(struct drm_device *dev, - break; - case DRM_MODE_CONNECTOR_LVDS: - drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); -- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); -- if (ret) -- goto failed; -+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); - if (i2c_bus->valid) { - radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); - if (!radeon_connector->ddc_bus) -@@ -1366,6 +1348,11 @@ radeon_add_legacy_connector(struct drm_device *dev, - break; - } - -+ if (hpd->hpd == RADEON_HPD_NONE) { -+ if (i2c_bus->valid) -+ connector->polled = DRM_CONNECTOR_POLL_CONNECT; -+ } else -+ connector->polled = DRM_CONNECTOR_POLL_HPD; - connector->display_info.subpixel_order = subpixel_order; - drm_sysfs_connector_add(connector); - return; -diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c -index f9b0fe0..ae0fb73 100644 ---- a/drivers/gpu/drm/radeon/radeon_cs.c -+++ b/drivers/gpu/drm/radeon/radeon_cs.c -@@ -220,10 +220,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) - int r; - - mutex_lock(&rdev->cs_mutex); -- if (rdev->gpu_lockup) { -- mutex_unlock(&rdev->cs_mutex); -- return -EINVAL; -- } - /* initialize parser */ - memset(&parser, 0, sizeof(struct radeon_cs_parser)); - parser.filp = filp; -diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c -index 7b629e3..f10faed 100644 ---- a/drivers/gpu/drm/radeon/radeon_device.c -+++ b/drivers/gpu/drm/radeon/radeon_device.c -@@ -299,24 +299,24 @@ void radeon_update_bandwidth_info(struct radeon_device *rdev) - sclk = radeon_get_engine_clock(rdev); - mclk = rdev->clock.default_mclk; - -- a.full = rfixed_const(100); -- rdev->pm.sclk.full = rfixed_const(sclk); -- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); -- rdev->pm.mclk.full = rfixed_const(mclk); -- rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); -+ a.full = dfixed_const(100); -+ rdev->pm.sclk.full = dfixed_const(sclk); -+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); -+ rdev->pm.mclk.full = dfixed_const(mclk); -+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); - -- a.full = rfixed_const(16); -+ a.full = dfixed_const(16); - /* core_bandwidth = sclk(Mhz) * 16 */ -- rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); -+ rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); - } else { - sclk = radeon_get_engine_clock(rdev); - mclk = radeon_get_memory_clock(rdev); - -- a.full = rfixed_const(100); -- rdev->pm.sclk.full = rfixed_const(sclk); -- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); -- rdev->pm.mclk.full = rfixed_const(mclk); -- rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); -+ a.full = dfixed_const(100); -+ rdev->pm.sclk.full = dfixed_const(sclk); -+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); -+ rdev->pm.mclk.full = dfixed_const(mclk); -+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); - } - } - -@@ -546,8 +546,10 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero - /* don't suspend or resume card normally */ - rdev->powered_down = false; - radeon_resume_kms(dev); -+ drm_kms_helper_poll_enable(dev); - } else { - printk(KERN_INFO "radeon: switched off\n"); -+ drm_kms_helper_poll_disable(dev); - radeon_suspend_kms(dev, pmm); - /* don't suspend or resume card normally */ - rdev->powered_down = true; -@@ -599,9 +601,11 @@ int radeon_device_init(struct radeon_device *rdev, - spin_lock_init(&rdev->ih.lock); - mutex_init(&rdev->gem.mutex); - mutex_init(&rdev->pm.mutex); -+ mutex_init(&rdev->vram_mutex); - rwlock_init(&rdev->fence_drv.lock); - INIT_LIST_HEAD(&rdev->gem.objects); - init_waitqueue_head(&rdev->irq.vblank_queue); -+ init_waitqueue_head(&rdev->irq.idle_queue); - - /* setup workqueue */ - rdev->wq = create_workqueue("radeon"); -@@ -671,7 +675,7 @@ int radeon_device_init(struct radeon_device *rdev, - /* Acceleration not working on AGP card try again - * with fallback to PCI or PCIE GART - */ -- radeon_gpu_reset(rdev); -+ radeon_asic_reset(rdev); - radeon_fini(rdev); - radeon_agp_disable(rdev); - r = radeon_init(rdev); -@@ -691,6 +695,8 @@ void radeon_device_fini(struct radeon_device *rdev) - { - DRM_INFO("radeon: finishing device.\n"); - rdev->shutdown = true; -+ /* evict vram memory */ -+ radeon_bo_evict_vram(rdev); - radeon_fini(rdev); - destroy_workqueue(rdev->wq); - vga_switcheroo_unregister_client(rdev->pdev); -@@ -707,6 +713,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) - { - struct radeon_device *rdev; - struct drm_crtc *crtc; -+ struct drm_connector *connector; - int r; - - if (dev == NULL || dev->dev_private == NULL) { -@@ -719,6 +726,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) - - if (rdev->powered_down) - return 0; -+ -+ /* turn off display hw */ -+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); -+ } -+ - /* unpin the front buffers */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); -@@ -728,9 +741,10 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) - continue; - } - robj = rfb->obj->driver_private; -- if (robj != rdev->fbdev_rbo) { -+ /* don't unpin kernel fb objects */ -+ if (!radeon_fbdev_robj_is_fb(rdev, robj)) { - r = radeon_bo_reserve(robj, false); -- if (unlikely(r == 0)) { -+ if (r == 0) { - radeon_bo_unpin(robj); - radeon_bo_unreserve(robj); - } -@@ -743,11 +757,14 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) - - radeon_save_bios_scratch_regs(rdev); - -+ radeon_pm_suspend(rdev); - radeon_suspend(rdev); - radeon_hpd_fini(rdev); - /* evict remaining vram memory */ - radeon_bo_evict_vram(rdev); - -+ radeon_agp_suspend(rdev); -+ - pci_save_state(dev->pdev); - if (state.event == PM_EVENT_SUSPEND) { - /* Shut down the device */ -@@ -755,7 +772,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) - pci_set_power_state(dev->pdev, PCI_D3hot); - } - acquire_console_sem(); -- fb_set_suspend(rdev->fbdev_info, 1); -+ radeon_fbdev_set_suspend(rdev, 1); - release_console_sem(); - return 0; - } -@@ -778,8 +795,9 @@ int radeon_resume_kms(struct drm_device *dev) - /* resume AGP if in use */ - radeon_agp_resume(rdev); - radeon_resume(rdev); -+ radeon_pm_resume(rdev); - radeon_restore_bios_scratch_regs(rdev); -- fb_set_suspend(rdev->fbdev_info, 0); -+ radeon_fbdev_set_suspend(rdev, 0); - release_console_sem(); - - /* reset hpd state */ -@@ -789,6 +807,26 @@ int radeon_resume_kms(struct drm_device *dev) - return 0; - } - -+int radeon_gpu_reset(struct radeon_device *rdev) -+{ -+ int r; -+ -+ radeon_save_bios_scratch_regs(rdev); -+ radeon_suspend(rdev); -+ -+ r = radeon_asic_reset(rdev); -+ if (!r) { -+ dev_info(rdev->dev, "GPU reset succeed\n"); -+ radeon_resume(rdev); -+ radeon_restore_bios_scratch_regs(rdev); -+ drm_helper_resume_force_mode(rdev->ddev); -+ return 0; -+ } -+ /* bad news, how to tell it to userspace ? */ -+ dev_info(rdev->dev, "GPU reset failed\n"); -+ return r; -+} -+ - - /* - * Debugfs -diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c -index bb1c122..c73444a 100644 ---- a/drivers/gpu/drm/radeon/radeon_display.c -+++ b/drivers/gpu/drm/radeon/radeon_display.c -@@ -284,8 +284,7 @@ static const char *connector_names[15] = { - "eDP", - }; - --static const char *hpd_names[7] = { -- "NONE", -+static const char *hpd_names[6] = { - "HPD1", - "HPD2", - "HPD3", -@@ -633,37 +632,37 @@ calc_fb_div(struct radeon_pll *pll, - - vco_freq = freq * post_div; - /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */ -- a.full = rfixed_const(pll->reference_freq); -- feedback_divider.full = rfixed_const(vco_freq); -- feedback_divider.full = rfixed_div(feedback_divider, a); -- a.full = rfixed_const(ref_div); -- feedback_divider.full = rfixed_mul(feedback_divider, a); -+ a.full = dfixed_const(pll->reference_freq); -+ feedback_divider.full = dfixed_const(vco_freq); -+ feedback_divider.full = dfixed_div(feedback_divider, a); -+ a.full = dfixed_const(ref_div); -+ feedback_divider.full = dfixed_mul(feedback_divider, a); - - if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { - /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */ -- a.full = rfixed_const(10); -- feedback_divider.full = rfixed_mul(feedback_divider, a); -- feedback_divider.full += rfixed_const_half(0); -- feedback_divider.full = rfixed_floor(feedback_divider); -- feedback_divider.full = rfixed_div(feedback_divider, a); -+ a.full = dfixed_const(10); -+ feedback_divider.full = dfixed_mul(feedback_divider, a); -+ feedback_divider.full += dfixed_const_half(0); -+ feedback_divider.full = dfixed_floor(feedback_divider); -+ feedback_divider.full = dfixed_div(feedback_divider, a); - - /* *fb_div = floor(feedback_divider); */ -- a.full = rfixed_floor(feedback_divider); -- *fb_div = rfixed_trunc(a); -+ a.full = dfixed_floor(feedback_divider); -+ *fb_div = dfixed_trunc(a); - /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */ -- a.full = rfixed_const(10); -- b.full = rfixed_mul(feedback_divider, a); -+ a.full = dfixed_const(10); -+ b.full = dfixed_mul(feedback_divider, a); - -- feedback_divider.full = rfixed_floor(feedback_divider); -- feedback_divider.full = rfixed_mul(feedback_divider, a); -+ feedback_divider.full = dfixed_floor(feedback_divider); -+ feedback_divider.full = dfixed_mul(feedback_divider, a); - feedback_divider.full = b.full - feedback_divider.full; -- *fb_div_frac = rfixed_trunc(feedback_divider); -+ *fb_div_frac = dfixed_trunc(feedback_divider); - } else { - /* *fb_div = floor(feedback_divider + 0.5); */ -- feedback_divider.full += rfixed_const_half(0); -- feedback_divider.full = rfixed_floor(feedback_divider); -+ feedback_divider.full += dfixed_const_half(0); -+ feedback_divider.full = dfixed_floor(feedback_divider); - -- *fb_div = rfixed_trunc(feedback_divider); -+ *fb_div = dfixed_trunc(feedback_divider); - *fb_div_frac = 0; - } - -@@ -693,10 +692,10 @@ calc_fb_ref_div(struct radeon_pll *pll, - pll_out_max = pll->pll_out_max; - } - -- ffreq.full = rfixed_const(freq); -+ ffreq.full = dfixed_const(freq); - /* max_error = ffreq * 0.0025; */ -- a.full = rfixed_const(400); -- max_error.full = rfixed_div(ffreq, a); -+ a.full = dfixed_const(400); -+ max_error.full = dfixed_div(ffreq, a); - - for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) { - if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) { -@@ -707,9 +706,9 @@ calc_fb_ref_div(struct radeon_pll *pll, - continue; - - /* pll_out = vco / post_div; */ -- a.full = rfixed_const(post_div); -- pll_out.full = rfixed_const(vco); -- pll_out.full = rfixed_div(pll_out, a); -+ a.full = dfixed_const(post_div); -+ pll_out.full = dfixed_const(vco); -+ pll_out.full = dfixed_div(pll_out, a); - - if (pll_out.full >= ffreq.full) { - error.full = pll_out.full - ffreq.full; -@@ -831,10 +830,6 @@ void radeon_compute_pll(struct radeon_pll *pll, - static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) - { - struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); -- struct drm_device *dev = fb->dev; -- -- if (fb->fbdev) -- radeonfb_remove(dev, fb); - - if (radeon_fb->obj) - drm_gem_object_unreference_unlocked(radeon_fb->obj); -@@ -856,21 +851,15 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = { - .create_handle = radeon_user_framebuffer_create_handle, - }; - --struct drm_framebuffer * --radeon_framebuffer_create(struct drm_device *dev, -- struct drm_mode_fb_cmd *mode_cmd, -- struct drm_gem_object *obj) -+void -+radeon_framebuffer_init(struct drm_device *dev, -+ struct radeon_framebuffer *rfb, -+ struct drm_mode_fb_cmd *mode_cmd, -+ struct drm_gem_object *obj) - { -- struct radeon_framebuffer *radeon_fb; -- -- radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); -- if (radeon_fb == NULL) { -- return NULL; -- } -- drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs); -- drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd); -- radeon_fb->obj = obj; -- return &radeon_fb->base; -+ rfb->obj = obj; -+ drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs); -+ drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd); - } - - static struct drm_framebuffer * -@@ -879,6 +868,7 @@ radeon_user_framebuffer_create(struct drm_device *dev, - struct drm_mode_fb_cmd *mode_cmd) - { - struct drm_gem_object *obj; -+ struct radeon_framebuffer *radeon_fb; - - obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); - if (obj == NULL) { -@@ -886,12 +876,26 @@ radeon_user_framebuffer_create(struct drm_device *dev, - "can't create framebuffer\n", mode_cmd->handle); - return NULL; - } -- return radeon_framebuffer_create(dev, mode_cmd, obj); -+ -+ radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); -+ if (radeon_fb == NULL) { -+ return NULL; -+ } -+ -+ radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); -+ -+ return &radeon_fb->base; -+} -+ -+static void radeon_output_poll_changed(struct drm_device *dev) -+{ -+ struct radeon_device *rdev = dev->dev_private; -+ radeon_fb_output_poll_changed(rdev); - } - - static const struct drm_mode_config_funcs radeon_mode_funcs = { - .fb_create = radeon_user_framebuffer_create, -- .fb_changed = radeonfb_probe, -+ .output_poll_changed = radeon_output_poll_changed - }; - - struct drm_prop_enum_list { -@@ -978,8 +982,11 @@ void radeon_update_display_priority(struct radeon_device *rdev) - /* set display priority to high for r3xx, rv515 chips - * this avoids flickering due to underflow to the - * display controllers during heavy acceleration. -+ * Don't force high on rs4xx igp chips as it seems to -+ * affect the sound card. See kernel bug 15982. - */ -- if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) -+ if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) && -+ !(rdev->flags & RADEON_IS_IGP)) - rdev->disp_priority = 2; - else - rdev->disp_priority = 0; -@@ -1031,15 +1038,27 @@ int radeon_modeset_init(struct radeon_device *rdev) - } - /* initialize hpd */ - radeon_hpd_init(rdev); -- drm_helper_initial_config(rdev->ddev); -+ -+ /* Initialize power management */ -+ if (radeon_pm) -+ radeon_pm_init(rdev); -+ -+ radeon_fbdev_init(rdev); -+ drm_kms_helper_poll_init(rdev->ddev); -+ - return 0; - } - - void radeon_modeset_fini(struct radeon_device *rdev) - { -+ radeon_fbdev_fini(rdev); - kfree(rdev->mode_info.bios_hardcoded_edid); - -+ if (radeon_pm) -+ radeon_pm_fini(rdev); -+ - if (rdev->mode_info.mode_config_initialized) { -+ drm_kms_helper_poll_fini(rdev->ddev); - radeon_hpd_fini(rdev); - drm_mode_config_cleanup(rdev->ddev); - rdev->mode_info.mode_config_initialized = false; -@@ -1089,15 +1108,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, - } - if (radeon_crtc->rmx_type != RMX_OFF) { - fixed20_12 a, b; -- a.full = rfixed_const(crtc->mode.vdisplay); -- b.full = rfixed_const(radeon_crtc->native_mode.hdisplay); -- radeon_crtc->vsc.full = rfixed_div(a, b); -- a.full = rfixed_const(crtc->mode.hdisplay); -- b.full = rfixed_const(radeon_crtc->native_mode.vdisplay); -- radeon_crtc->hsc.full = rfixed_div(a, b); -+ a.full = dfixed_const(crtc->mode.vdisplay); -+ b.full = dfixed_const(radeon_crtc->native_mode.hdisplay); -+ radeon_crtc->vsc.full = dfixed_div(a, b); -+ a.full = dfixed_const(crtc->mode.hdisplay); -+ b.full = dfixed_const(radeon_crtc->native_mode.vdisplay); -+ radeon_crtc->hsc.full = dfixed_div(a, b); - } else { -- radeon_crtc->vsc.full = rfixed_const(1); -- radeon_crtc->hsc.full = rfixed_const(1); -+ radeon_crtc->vsc.full = dfixed_const(1); -+ radeon_crtc->hsc.full = dfixed_const(1); - } - return true; - } -diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c -index b3749d4..7ed94d2 100644 ---- a/drivers/gpu/drm/radeon/radeon_drv.c -+++ b/drivers/gpu/drm/radeon/radeon_drv.c -@@ -44,9 +44,11 @@ - * - 2.1.0 - add square tiling interface - * - 2.2.0 - add r6xx/r7xx const buffer support - * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs -+ * - 2.4.0 - add crtc id query -+ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen - */ - #define KMS_DRIVER_MAJOR 2 --#define KMS_DRIVER_MINOR 3 -+#define KMS_DRIVER_MINOR 5 - #define KMS_DRIVER_PATCHLEVEL 0 - int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); - int radeon_driver_unload_kms(struct drm_device *dev); -@@ -91,10 +93,10 @@ int radeon_testing = 0; - int radeon_connector_table = 0; - int radeon_tv = 1; - int radeon_new_pll = -1; --int radeon_dynpm = -1; - int radeon_audio = 1; - int radeon_disp_priority = 0; - int radeon_hw_i2c = 0; -+int radeon_pm = 0; - - MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); - module_param_named(no_wb, radeon_no_wb, int, 0444); -@@ -132,9 +134,6 @@ module_param_named(tv, radeon_tv, int, 0444); - MODULE_PARM_DESC(new_pll, "Select new PLL code"); - module_param_named(new_pll, radeon_new_pll, int, 0444); - --MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)"); --module_param_named(dynpm, radeon_dynpm, int, 0444); -- - MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); - module_param_named(audio, radeon_audio, int, 0444); - -@@ -144,6 +143,9 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444); - MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); - module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); - -+MODULE_PARM_DESC(pm, "enable power management (0 = disable)"); -+module_param_named(pm, radeon_pm, int, 0444); -+ - static int radeon_suspend(struct drm_device *dev, pm_message_t state) - { - drm_radeon_private_t *dev_priv = dev->dev_private; -diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c -index c5ddaf5..1ebb100 100644 ---- a/drivers/gpu/drm/radeon/radeon_encoders.c -+++ b/drivers/gpu/drm/radeon/radeon_encoders.c -@@ -309,9 +309,6 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - -- /* adjust pm to upcoming mode change */ -- radeon_pm_compute_clocks(rdev); -- - /* set the active encoder to connector routing */ - radeon_encoder_set_active_device(encoder); - drm_mode_set_crtcinfo(adjusted_mode, 0); -@@ -1111,8 +1108,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) - } - radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); - -- /* adjust pm to dpms change */ -- radeon_pm_compute_clocks(rdev); - } - - union crtc_source_param { -@@ -1546,10 +1541,49 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder) - - static void radeon_atom_encoder_disable(struct drm_encoder *encoder) - { -+ struct drm_device *dev = encoder->dev; -+ struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_atom_dig *dig; - radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); - -+ switch (radeon_encoder->encoder_id) { -+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1: -+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: -+ case ENCODER_OBJECT_ID_INTERNAL_LVDS: -+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1: -+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE); -+ break; -+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: -+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: -+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: -+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: -+ if (ASIC_IS_DCE4(rdev)) -+ /* disable the transmitter */ -+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); -+ else { -+ /* disable the encoder and transmitter */ -+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); -+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE); -+ } -+ break; -+ case ENCODER_OBJECT_ID_INTERNAL_DDI: -+ atombios_ddia_setup(encoder, ATOM_DISABLE); -+ break; -+ case ENCODER_OBJECT_ID_INTERNAL_DVO1: -+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: -+ atombios_external_tmds_setup(encoder, ATOM_DISABLE); -+ break; -+ case ENCODER_OBJECT_ID_INTERNAL_DAC1: -+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: -+ case ENCODER_OBJECT_ID_INTERNAL_DAC2: -+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: -+ atombios_dac_setup(encoder, ATOM_DISABLE); -+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) -+ atombios_tv_setup(encoder, ATOM_DISABLE); -+ break; -+ } -+ - if (radeon_encoder_is_digital(encoder)) { - if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) - r600_hdmi_disable(encoder); -diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c -index 9ac57a0..dc1634b 100644 ---- a/drivers/gpu/drm/radeon/radeon_fb.c -+++ b/drivers/gpu/drm/radeon/radeon_fb.c -@@ -23,10 +23,6 @@ - * Authors: - * David Airlie - */ -- /* -- * Modularization -- */ -- - #include - #include - #include -@@ -42,17 +38,21 @@ - - #include - --struct radeon_fb_device { -+/* object hierarchy - -+ this contains a helper + a radeon fb -+ the helper contains a pointer to radeon framebuffer baseclass. -+*/ -+struct radeon_fbdev { - struct drm_fb_helper helper; -- struct radeon_framebuffer *rfb; -- struct radeon_device *rdev; -+ struct radeon_framebuffer rfb; -+ struct list_head fbdev_list; -+ struct radeon_device *rdev; - }; - - static struct fb_ops radeonfb_ops = { - .owner = THIS_MODULE, - .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = drm_fb_helper_set_par, -- .fb_setcolreg = drm_fb_helper_setcolreg, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, -@@ -61,45 +61,6 @@ static struct fb_ops radeonfb_ops = { - .fb_setcmap = drm_fb_helper_setcmap, - }; - --/** -- * Currently it is assumed that the old framebuffer is reused. -- * -- * LOCKING -- * caller should hold the mode config lock. -- * -- */ --int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc) --{ -- struct fb_info *info; -- struct drm_framebuffer *fb; -- struct drm_display_mode *mode = crtc->desired_mode; -- -- fb = crtc->fb; -- if (fb == NULL) { -- return 1; -- } -- info = fb->fbdev; -- if (info == NULL) { -- return 1; -- } -- if (mode == NULL) { -- return 1; -- } -- info->var.xres = mode->hdisplay; -- info->var.right_margin = mode->hsync_start - mode->hdisplay; -- info->var.hsync_len = mode->hsync_end - mode->hsync_start; -- info->var.left_margin = mode->htotal - mode->hsync_end; -- info->var.yres = mode->vdisplay; -- info->var.lower_margin = mode->vsync_start - mode->vdisplay; -- info->var.vsync_len = mode->vsync_end - mode->vsync_start; -- info->var.upper_margin = mode->vtotal - mode->vsync_end; -- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100; -- /* avoid overflow */ -- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh; -- -- return 0; --} --EXPORT_SYMBOL(radeonfb_resize); - - static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) - { -@@ -125,57 +86,44 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo - return aligned; - } - --static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { -- .gamma_set = radeon_crtc_fb_gamma_set, -- .gamma_get = radeon_crtc_fb_gamma_get, --}; -+static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) -+{ -+ struct radeon_bo *rbo = gobj->driver_private; -+ int ret; -+ -+ ret = radeon_bo_reserve(rbo, false); -+ if (likely(ret == 0)) { -+ radeon_bo_kunmap(rbo); -+ radeon_bo_unreserve(rbo); -+ } -+ drm_gem_object_unreference_unlocked(gobj); -+} - --int radeonfb_create(struct drm_device *dev, -- uint32_t fb_width, uint32_t fb_height, -- uint32_t surface_width, uint32_t surface_height, -- uint32_t surface_depth, uint32_t surface_bpp, -- struct drm_framebuffer **fb_p) -+static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, -+ struct drm_mode_fb_cmd *mode_cmd, -+ struct drm_gem_object **gobj_p) - { -- struct radeon_device *rdev = dev->dev_private; -- struct fb_info *info; -- struct radeon_fb_device *rfbdev; -- struct drm_framebuffer *fb = NULL; -- struct radeon_framebuffer *rfb; -- struct drm_mode_fb_cmd mode_cmd; -+ struct radeon_device *rdev = rfbdev->rdev; - struct drm_gem_object *gobj = NULL; - struct radeon_bo *rbo = NULL; -- struct device *device = &rdev->pdev->dev; -- int size, aligned_size, ret; -- u64 fb_gpuaddr; -- void *fbptr = NULL; -- unsigned long tmp; - bool fb_tiled = false; /* useful for testing */ - u32 tiling_flags = 0; -+ int ret; -+ int aligned_size, size; - -- mode_cmd.width = surface_width; -- mode_cmd.height = surface_height; -- -- /* avivo can't scanout real 24bpp */ -- if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) -- surface_bpp = 32; -- -- mode_cmd.bpp = surface_bpp; - /* need to align pitch with crtc limits */ -- mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); -- mode_cmd.depth = surface_depth; -+ mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); - -- size = mode_cmd.pitch * mode_cmd.height; -+ size = mode_cmd->pitch * mode_cmd->height; - aligned_size = ALIGN(size, PAGE_SIZE); -- - ret = radeon_gem_object_create(rdev, aligned_size, 0, -- RADEON_GEM_DOMAIN_VRAM, -- false, ttm_bo_type_kernel, -- &gobj); -+ RADEON_GEM_DOMAIN_VRAM, -+ false, ttm_bo_type_kernel, -+ &gobj); - if (ret) { -- printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", -- surface_width, surface_height); -- ret = -ENOMEM; -- goto out; -+ printk(KERN_ERR "failed to allocate framebuffer (%d)\n", -+ aligned_size); -+ return -ENOMEM; - } - rbo = gobj->driver_private; - -@@ -183,7 +131,7 @@ int radeonfb_create(struct drm_device *dev, - tiling_flags = RADEON_TILING_MACRO; - - #ifdef __BIG_ENDIAN -- switch (mode_cmd.bpp) { -+ switch (mode_cmd->bpp) { - case 32: - tiling_flags |= RADEON_TILING_SWAP_32BIT; - break; -@@ -196,57 +144,81 @@ int radeonfb_create(struct drm_device *dev, - - if (tiling_flags) { - ret = radeon_bo_set_tiling_flags(rbo, -- tiling_flags | RADEON_TILING_SURFACE, -- mode_cmd.pitch); -+ tiling_flags | RADEON_TILING_SURFACE, -+ mode_cmd->pitch); - if (ret) - dev_err(rdev->dev, "FB failed to set tiling flags\n"); - } -- mutex_lock(&rdev->ddev->struct_mutex); -- fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); -- if (fb == NULL) { -- DRM_ERROR("failed to allocate fb.\n"); -- ret = -ENOMEM; -- goto out_unref; -- } -+ -+ - ret = radeon_bo_reserve(rbo, false); - if (unlikely(ret != 0)) - goto out_unref; -- ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); -+ ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL); - if (ret) { - radeon_bo_unreserve(rbo); - goto out_unref; - } - if (fb_tiled) - radeon_bo_check_tiling(rbo, 0, 0); -- ret = radeon_bo_kmap(rbo, &fbptr); -+ ret = radeon_bo_kmap(rbo, NULL); - radeon_bo_unreserve(rbo); - if (ret) { - goto out_unref; - } - -- list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); -+ *gobj_p = gobj; -+ return 0; -+out_unref: -+ radeonfb_destroy_pinned_object(gobj); -+ *gobj_p = NULL; -+ return ret; -+} - -- *fb_p = fb; -- rfb = to_radeon_framebuffer(fb); -- rdev->fbdev_rfb = rfb; -- rdev->fbdev_rbo = rbo; -+static int radeonfb_create(struct radeon_fbdev *rfbdev, -+ struct drm_fb_helper_surface_size *sizes) -+{ -+ struct radeon_device *rdev = rfbdev->rdev; -+ struct fb_info *info; -+ struct drm_framebuffer *fb = NULL; -+ struct drm_mode_fb_cmd mode_cmd; -+ struct drm_gem_object *gobj = NULL; -+ struct radeon_bo *rbo = NULL; -+ struct device *device = &rdev->pdev->dev; -+ int ret; -+ unsigned long tmp; -+ -+ mode_cmd.width = sizes->surface_width; -+ mode_cmd.height = sizes->surface_height; -+ -+ /* avivo can't scanout real 24bpp */ -+ if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) -+ sizes->surface_bpp = 32; - -- info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); -+ mode_cmd.bpp = sizes->surface_bpp; -+ mode_cmd.depth = sizes->surface_depth; -+ -+ ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); -+ rbo = gobj->driver_private; -+ -+ /* okay we have an object now allocate the framebuffer */ -+ info = framebuffer_alloc(0, device); - if (info == NULL) { - ret = -ENOMEM; - goto out_unref; - } - -- rdev->fbdev_info = info; -- rfbdev = info->par; -- rfbdev->helper.funcs = &radeon_fb_helper_funcs; -- rfbdev->helper.dev = dev; -- ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc, -- RADEONFB_CONN_LIMIT); -- if (ret) -- goto out_unref; -+ info->par = rfbdev; -+ -+ radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); -+ -+ fb = &rfbdev->rfb.base; -+ -+ /* setup helper */ -+ rfbdev->helper.fb = fb; -+ rfbdev->helper.fbdev = info; - -- memset_io(fbptr, 0x0, aligned_size); -+ memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); - - strcpy(info->fix.id, "radeondrmfb"); - -@@ -255,17 +227,22 @@ int radeonfb_create(struct drm_device *dev, - info->flags = FBINFO_DEFAULT; - info->fbops = &radeonfb_ops; - -- tmp = fb_gpuaddr - rdev->mc.vram_start; -+ tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start; - info->fix.smem_start = rdev->mc.aper_base + tmp; -- info->fix.smem_len = size; -- info->screen_base = fbptr; -- info->screen_size = size; -+ info->fix.smem_len = radeon_bo_size(rbo); -+ info->screen_base = rbo->kptr; -+ info->screen_size = radeon_bo_size(rbo); - -- drm_fb_helper_fill_var(info, fb, fb_width, fb_height); -+ drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); - - /* setup aperture base/size for vesafb takeover */ -- info->aperture_base = rdev->ddev->mode_config.fb_base; -- info->aperture_size = rdev->mc.real_vram_size; -+ info->apertures = alloc_apertures(1); -+ if (!info->apertures) { -+ ret = -ENOMEM; -+ goto out_unref; -+ } -+ info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; -+ info->apertures->ranges[0].size = rdev->mc.real_vram_size; - - info->fix.mmio_start = 0; - info->fix.mmio_len = 0; -@@ -274,44 +251,55 @@ int radeonfb_create(struct drm_device *dev, - info->pixmap.access_align = 32; - info->pixmap.flags = FB_PIXMAP_SYSTEM; - info->pixmap.scan_align = 1; -+ - if (info->screen_base == NULL) { - ret = -ENOSPC; - goto out_unref; - } -+ -+ ret = fb_alloc_cmap(&info->cmap, 256, 0); -+ if (ret) { -+ ret = -ENOMEM; -+ goto out_unref; -+ } -+ - DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); - DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); -- DRM_INFO("size %lu\n", (unsigned long)size); -+ DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo)); - DRM_INFO("fb depth is %d\n", fb->depth); - DRM_INFO(" pitch is %d\n", fb->pitch); - -- fb->fbdev = info; -- rfbdev->rfb = rfb; -- rfbdev->rdev = rdev; -- -- mutex_unlock(&rdev->ddev->struct_mutex); - vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); - return 0; - - out_unref: - if (rbo) { -- ret = radeon_bo_reserve(rbo, false); -- if (likely(ret == 0)) { -- radeon_bo_kunmap(rbo); -- radeon_bo_unreserve(rbo); -- } -+ - } - if (fb && ret) { -- list_del(&fb->filp_head); - drm_gem_object_unreference(gobj); - drm_framebuffer_cleanup(fb); - kfree(fb); - } -- drm_gem_object_unreference(gobj); -- mutex_unlock(&rdev->ddev->struct_mutex); --out: - return ret; - } - -+static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper, -+ struct drm_fb_helper_surface_size *sizes) -+{ -+ struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper; -+ int new_fb = 0; -+ int ret; -+ -+ if (!helper->fb) { -+ ret = radeonfb_create(rfbdev, sizes); -+ if (ret) -+ return ret; -+ new_fb = 1; -+ } -+ return new_fb; -+} -+ - static char *mode_option; - int radeon_parse_options(char *options) - { -@@ -328,46 +316,108 @@ int radeon_parse_options(char *options) - return 0; - } - --int radeonfb_probe(struct drm_device *dev) -+void radeon_fb_output_poll_changed(struct radeon_device *rdev) - { -- struct radeon_device *rdev = dev->dev_private; -- int bpp_sel = 32; -- -- /* select 8 bpp console on RN50 or 16MB cards */ -- if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) -- bpp_sel = 8; -- -- return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create); -+ drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); - } - --int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) -+static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) - { - struct fb_info *info; -- struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); -+ struct radeon_framebuffer *rfb = &rfbdev->rfb; - struct radeon_bo *rbo; - int r; - -- if (!fb) { -- return -EINVAL; -+ if (rfbdev->helper.fbdev) { -+ info = rfbdev->helper.fbdev; -+ -+ unregister_framebuffer(info); -+ if (info->cmap.len) -+ fb_dealloc_cmap(&info->cmap); -+ framebuffer_release(info); - } -- info = fb->fbdev; -- if (info) { -- struct radeon_fb_device *rfbdev = info->par; -+ -+ if (rfb->obj) { - rbo = rfb->obj->driver_private; -- unregister_framebuffer(info); - r = radeon_bo_reserve(rbo, false); - if (likely(r == 0)) { - radeon_bo_kunmap(rbo); - radeon_bo_unpin(rbo); - radeon_bo_unreserve(rbo); - } -- drm_fb_helper_free(&rfbdev->helper); -- framebuffer_release(info); -+ drm_gem_object_unreference_unlocked(rfb->obj); - } -+ drm_fb_helper_fini(&rfbdev->helper); -+ drm_framebuffer_cleanup(&rfb->base); - -- printk(KERN_INFO "unregistered panic notifier\n"); -+ return 0; -+} - -+static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { -+ .gamma_set = radeon_crtc_fb_gamma_set, -+ .gamma_get = radeon_crtc_fb_gamma_get, -+ .fb_probe = radeon_fb_find_or_create_single, -+}; -+ -+int radeon_fbdev_init(struct radeon_device *rdev) -+{ -+ struct radeon_fbdev *rfbdev; -+ int bpp_sel = 32; -+ int ret; -+ -+ /* select 8 bpp console on RN50 or 16MB cards */ -+ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) -+ bpp_sel = 8; -+ -+ rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL); -+ if (!rfbdev) -+ return -ENOMEM; -+ -+ rfbdev->rdev = rdev; -+ rdev->mode_info.rfbdev = rfbdev; -+ rfbdev->helper.funcs = &radeon_fb_helper_funcs; -+ -+ ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, -+ rdev->num_crtc, -+ RADEONFB_CONN_LIMIT); -+ if (ret) { -+ kfree(rfbdev); -+ return ret; -+ } -+ -+ drm_fb_helper_single_add_all_connectors(&rfbdev->helper); -+ drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); - return 0; - } --EXPORT_SYMBOL(radeonfb_remove); --MODULE_LICENSE("GPL"); -+ -+void radeon_fbdev_fini(struct radeon_device *rdev) -+{ -+ if (!rdev->mode_info.rfbdev) -+ return; -+ -+ radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev); -+ kfree(rdev->mode_info.rfbdev); -+ rdev->mode_info.rfbdev = NULL; -+} -+ -+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) -+{ -+ fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state); -+} -+ -+int radeon_fbdev_total_size(struct radeon_device *rdev) -+{ -+ struct radeon_bo *robj; -+ int size = 0; -+ -+ robj = rdev->mode_info.rfbdev->rfb.obj->driver_private; -+ size += radeon_bo_size(robj); -+ return size; -+} -+ -+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) -+{ -+ if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private) -+ return true; -+ return false; -+} -diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c -index d90f95b..b1f9a81 100644 ---- a/drivers/gpu/drm/radeon/radeon_fence.c -+++ b/drivers/gpu/drm/radeon/radeon_fence.c -@@ -58,7 +58,6 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) - radeon_fence_ring_emit(rdev, fence); - - fence->emited = true; -- fence->timeout = jiffies + ((2000 * HZ) / 1000); - list_del(&fence->list); - list_add_tail(&fence->list, &rdev->fence_drv.emited); - write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); -@@ -71,15 +70,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) - struct list_head *i, *n; - uint32_t seq; - bool wake = false; -+ unsigned long cjiffies; - -- if (rdev == NULL) { -- return true; -- } -- if (rdev->shutdown) { -- return true; -- } - seq = RREG32(rdev->fence_drv.scratch_reg); -- rdev->fence_drv.last_seq = seq; -+ if (seq != rdev->fence_drv.last_seq) { -+ rdev->fence_drv.last_seq = seq; -+ rdev->fence_drv.last_jiffies = jiffies; -+ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; -+ } else { -+ cjiffies = jiffies; -+ if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) { -+ cjiffies -= rdev->fence_drv.last_jiffies; -+ if (time_after(rdev->fence_drv.last_timeout, cjiffies)) { -+ /* update the timeout */ -+ rdev->fence_drv.last_timeout -= cjiffies; -+ } else { -+ /* the 500ms timeout is elapsed we should test -+ * for GPU lockup -+ */ -+ rdev->fence_drv.last_timeout = 1; -+ } -+ } else { -+ /* wrap around update last jiffies, we will just wait -+ * a little longer -+ */ -+ rdev->fence_drv.last_jiffies = cjiffies; -+ } -+ return false; -+ } - n = NULL; - list_for_each(i, &rdev->fence_drv.emited) { - fence = list_entry(i, struct radeon_fence, list); -@@ -171,9 +189,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence) - int radeon_fence_wait(struct radeon_fence *fence, bool intr) - { - struct radeon_device *rdev; -- unsigned long cur_jiffies; -- unsigned long timeout; -- bool expired = false; -+ unsigned long irq_flags, timeout; -+ u32 seq; - int r; - - if (fence == NULL) { -@@ -184,21 +201,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) - if (radeon_fence_signaled(fence)) { - return 0; - } -- -+ timeout = rdev->fence_drv.last_timeout; - retry: -- cur_jiffies = jiffies; -- timeout = HZ / 100; -- if (time_after(fence->timeout, cur_jiffies)) { -- timeout = fence->timeout - cur_jiffies; -- } -- -+ /* save current sequence used to check for GPU lockup */ -+ seq = rdev->fence_drv.last_seq; - if (intr) { - radeon_irq_kms_sw_irq_get(rdev); - r = wait_event_interruptible_timeout(rdev->fence_drv.queue, - radeon_fence_signaled(fence), timeout); - radeon_irq_kms_sw_irq_put(rdev); -- if (unlikely(r < 0)) -+ if (unlikely(r < 0)) { - return r; -+ } - } else { - radeon_irq_kms_sw_irq_get(rdev); - r = wait_event_timeout(rdev->fence_drv.queue, -@@ -206,38 +220,36 @@ retry: - radeon_irq_kms_sw_irq_put(rdev); - } - if (unlikely(!radeon_fence_signaled(fence))) { -- if (unlikely(r == 0)) { -- expired = true; -+ /* we were interrupted for some reason and fence isn't -+ * isn't signaled yet, resume wait -+ */ -+ if (r) { -+ timeout = r; -+ goto retry; - } -- if (unlikely(expired)) { -- timeout = 1; -- if (time_after(cur_jiffies, fence->timeout)) { -- timeout = cur_jiffies - fence->timeout; -- } -- timeout = jiffies_to_msecs(timeout); -- if (timeout > 500) { -- DRM_ERROR("fence(%p:0x%08X) %lums timeout " -- "going to reset GPU\n", -- fence, fence->seq, timeout); -- radeon_gpu_reset(rdev); -- WREG32(rdev->fence_drv.scratch_reg, fence->seq); -- } -+ /* don't protect read access to rdev->fence_drv.last_seq -+ * if we experiencing a lockup the value doesn't change -+ */ -+ if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { -+ /* good news we believe it's a lockup */ -+ WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq); -+ /* FIXME: what should we do ? marking everyone -+ * as signaled for now -+ */ -+ rdev->gpu_lockup = true; -+ r = radeon_gpu_reset(rdev); -+ if (r) -+ return r; -+ WREG32(rdev->fence_drv.scratch_reg, fence->seq); -+ rdev->gpu_lockup = false; - } -+ timeout = RADEON_FENCE_JIFFIES_TIMEOUT; -+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); -+ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; -+ rdev->fence_drv.last_jiffies = jiffies; -+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); - goto retry; - } -- if (unlikely(expired)) { -- rdev->fence_drv.count_timeout++; -- cur_jiffies = jiffies; -- timeout = 1; -- if (time_after(cur_jiffies, fence->timeout)) { -- timeout = cur_jiffies - fence->timeout; -- } -- timeout = jiffies_to_msecs(timeout); -- DRM_ERROR("fence(%p:0x%08X) %lums timeout\n", -- fence, fence->seq, timeout); -- DRM_ERROR("last signaled fence(0x%08X)\n", -- rdev->fence_drv.last_seq); -- } - return 0; - } - -@@ -333,7 +345,6 @@ int radeon_fence_driver_init(struct radeon_device *rdev) - INIT_LIST_HEAD(&rdev->fence_drv.created); - INIT_LIST_HEAD(&rdev->fence_drv.emited); - INIT_LIST_HEAD(&rdev->fence_drv.signaled); -- rdev->fence_drv.count_timeout = 0; - init_waitqueue_head(&rdev->fence_drv.queue); - rdev->fence_drv.initialized = true; - write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); -diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h -deleted file mode 100644 -index 3d4d84e..0000000 ---- a/drivers/gpu/drm/radeon/radeon_fixed.h -+++ /dev/null -@@ -1,67 +0,0 @@ --/* -- * Copyright 2009 Red Hat Inc. -- * -- * Permission is hereby granted, free of charge, to any person obtaining a -- * copy of this software and associated documentation files (the "Software"), -- * to deal in the Software without restriction, including without limitation -- * the rights to use, copy, modify, merge, publish, distribute, sublicense, -- * and/or sell copies of the Software, and to permit persons to whom the -- * Software is furnished to do so, subject to the following conditions: -- * -- * The above copyright notice and this permission notice shall be included in -- * all copies or substantial portions of the Software. -- * -- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -- * OTHER DEALINGS IN THE SOFTWARE. -- * -- * Authors: Dave Airlie -- */ --#ifndef RADEON_FIXED_H --#define RADEON_FIXED_H -- --typedef union rfixed { -- u32 full; --} fixed20_12; -- -- --#define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */ --#define rfixed_const_half(A) (u32)(((A) << 12) + 2048) --#define rfixed_const_666(A) (u32)(((A) << 12) + 2731) --#define rfixed_const_8(A) (u32)(((A) << 12) + 3277) --#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12) --#define fixed_init(A) { .full = rfixed_const((A)) } --#define fixed_init_half(A) { .full = rfixed_const_half((A)) } --#define rfixed_trunc(A) ((A).full >> 12) -- --static inline u32 rfixed_floor(fixed20_12 A) --{ -- u32 non_frac = rfixed_trunc(A); -- -- return rfixed_const(non_frac); --} -- --static inline u32 rfixed_ceil(fixed20_12 A) --{ -- u32 non_frac = rfixed_trunc(A); -- -- if (A.full > rfixed_const(non_frac)) -- return rfixed_const(non_frac + 1); -- else -- return rfixed_const(non_frac); --} -- --static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B) --{ -- u64 tmp = ((u64)A.full << 13); -- -- do_div(tmp, B.full); -- tmp += 1; -- tmp /= 2; -- return lower_32_bits(tmp); --} --#endif -diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c -index 1770d3c..e65b903 100644 ---- a/drivers/gpu/drm/radeon/radeon_gart.c -+++ b/drivers/gpu/drm/radeon/radeon_gart.c -@@ -173,7 +173,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, - int i, j; - - if (!rdev->gart.ready) { -- DRM_ERROR("trying to bind memory to unitialized GART !\n"); -+ WARN(1, "trying to bind memory to unitialized GART !\n"); - return -EINVAL; - } - t = offset / RADEON_GPU_PAGE_SIZE; -diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c -index ef92d14..a72a3ee 100644 ---- a/drivers/gpu/drm/radeon/radeon_gem.c -+++ b/drivers/gpu/drm/radeon/radeon_gem.c -@@ -44,6 +44,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj) - if (robj) { - radeon_bo_unref(&robj); - } -+ -+ drm_gem_object_release(gobj); -+ kfree(gobj); - } - - int radeon_gem_object_create(struct radeon_device *rdev, int size, -@@ -158,8 +161,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, - args->vram_visible = rdev->mc.real_vram_size; - if (rdev->stollen_vga_memory) - args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); -- if (rdev->fbdev_rbo) -- args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo); -+ args->vram_visible -= radeon_fbdev_total_size(rdev); - args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 - - RADEON_IB_POOL_SIZE*64*1024; - return 0; -diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c -index a212041..059bfa4 100644 ---- a/drivers/gpu/drm/radeon/radeon_irq_kms.c -+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c -@@ -26,6 +26,7 @@ - * Jerome Glisse - */ - #include "drmP.h" -+#include "drm_crtc_helper.h" - #include "radeon_drm.h" - #include "radeon_reg.h" - #include "radeon.h" -@@ -55,7 +56,7 @@ static void radeon_hotplug_work_func(struct work_struct *work) - radeon_connector_hotplug(connector); - } - /* Just fire off a uevent and let userspace tell us what to do */ -- drm_sysfs_hotplug_event(dev); -+ drm_helper_hpd_irq_event(dev); - } - - void radeon_driver_irq_preinstall_kms(struct drm_device *dev) -@@ -67,6 +68,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) - - /* Disable *all* interrupts */ - rdev->irq.sw_int = false; -+ rdev->irq.gui_idle = false; - for (i = 0; i < rdev->num_crtc; i++) - rdev->irq.crtc_vblank_int[i] = false; - for (i = 0; i < 6; i++) -@@ -96,6 +98,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) - } - /* Disable *all* interrupts */ - rdev->irq.sw_int = false; -+ rdev->irq.gui_idle = false; - for (i = 0; i < rdev->num_crtc; i++) - rdev->irq.crtc_vblank_int[i] = false; - for (i = 0; i < 6; i++) -diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c -index c633319..6a70c0d 100644 ---- a/drivers/gpu/drm/radeon/radeon_kms.c -+++ b/drivers/gpu/drm/radeon/radeon_kms.c -@@ -98,11 +98,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) - { - struct radeon_device *rdev = dev->dev_private; - struct drm_radeon_info *info; -+ struct radeon_mode_info *minfo = &rdev->mode_info; - uint32_t *value_ptr; - uint32_t value; -+ struct drm_crtc *crtc; -+ int i, found; - - info = data; - value_ptr = (uint32_t *)((unsigned long)info->value); -+ value = *value_ptr; - switch (info->request) { - case RADEON_INFO_DEVICE_ID: - value = dev->pci_device; -@@ -114,6 +118,27 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) - value = rdev->num_z_pipes; - break; - case RADEON_INFO_ACCEL_WORKING: -+ /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ -+ if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) -+ value = false; -+ else -+ value = rdev->accel_working; -+ break; -+ case RADEON_INFO_CRTC_FROM_ID: -+ for (i = 0, found = 0; i < rdev->num_crtc; i++) { -+ crtc = (struct drm_crtc *)minfo->crtcs[i]; -+ if (crtc && crtc->base.id == value) { -+ value = i; -+ found = 1; -+ break; -+ } -+ } -+ if (!found) { -+ DRM_DEBUG("unknown crtc id %d\n", value); -+ return -EINVAL; -+ } -+ break; -+ case RADEON_INFO_ACCEL_WORKING2: - value = rdev->accel_working; - break; - default: -diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c -index 88865e3..e1e5255 100644 ---- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c -+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c -@@ -26,7 +26,7 @@ - #include - #include - #include --#include "radeon_fixed.h" -+#include - #include "radeon.h" - #include "atom.h" - -@@ -314,6 +314,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) - - switch (mode) { - case DRM_MODE_DPMS_ON: -+ radeon_crtc->enabled = true; -+ /* adjust pm to dpms changes BEFORE enabling crtcs */ -+ radeon_pm_compute_clocks(rdev); - if (radeon_crtc->crtc_id) - WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask)); - else { -@@ -335,6 +338,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) - RADEON_CRTC_DISP_REQ_EN_B)); - WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask); - } -+ radeon_crtc->enabled = false; -+ /* adjust pm to dpms changes AFTER disabling crtcs */ -+ radeon_pm_compute_clocks(rdev); - break; - } - } -@@ -966,6 +972,12 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) - { -+ struct drm_device *dev = crtc->dev; -+ struct radeon_device *rdev = dev->dev_private; -+ -+ /* adjust pm to upcoming mode change */ -+ radeon_pm_compute_clocks(rdev); -+ - if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) - return false; - return true; -diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c -index 0274abe..5b07b88 100644 ---- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c -+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c -@@ -116,8 +116,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) - else - radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); - -- /* adjust pm to dpms change */ -- radeon_pm_compute_clocks(rdev); - } - - static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) -@@ -217,11 +215,6 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *adjusted_mode) - { - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); -- struct drm_device *dev = encoder->dev; -- struct radeon_device *rdev = dev->dev_private; -- -- /* adjust pm to upcoming mode change */ -- radeon_pm_compute_clocks(rdev); - - /* set the active encoder to connector routing */ - radeon_encoder_set_active_device(encoder); -@@ -286,8 +279,6 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode - else - radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); - -- /* adjust pm to dpms change */ -- radeon_pm_compute_clocks(rdev); - } - - static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) -@@ -474,8 +465,6 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) - else - radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); - -- /* adjust pm to dpms change */ -- radeon_pm_compute_clocks(rdev); - } - - static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) -@@ -642,8 +631,6 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) - else - radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); - -- /* adjust pm to dpms change */ -- radeon_pm_compute_clocks(rdev); - } - - static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) -@@ -852,8 +839,6 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) - else - radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); - -- /* adjust pm to dpms change */ -- radeon_pm_compute_clocks(rdev); - } - - static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) -@@ -1183,6 +1168,17 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; - bool color = true; -+ struct drm_crtc *crtc; -+ -+ /* find out if crtc2 is in use or if this encoder is using it */ -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); -+ if ((radeon_crtc->crtc_id == 1) && crtc->enabled) { -+ if (encoder->crtc != crtc) { -+ return connector_status_disconnected; -+ } -+ } -+ } - - if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO || - connector->connector_type == DRM_MODE_CONNECTOR_Composite || -diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h -index 5413fcd..67358ba 100644 ---- a/drivers/gpu/drm/radeon/radeon_mode.h -+++ b/drivers/gpu/drm/radeon/radeon_mode.h -@@ -34,11 +34,12 @@ - #include - #include - #include -+#include - #include - #include - #include --#include "radeon_fixed.h" - -+struct radeon_bo; - struct radeon_device; - - #define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) -@@ -65,6 +66,16 @@ enum radeon_tv_std { - TV_STD_PAL_N, - }; - -+enum radeon_hpd_id { -+ RADEON_HPD_1 = 0, -+ RADEON_HPD_2, -+ RADEON_HPD_3, -+ RADEON_HPD_4, -+ RADEON_HPD_5, -+ RADEON_HPD_6, -+ RADEON_HPD_NONE = 0xff, -+}; -+ - /* radeon gpio-based i2c - * 1. "mask" reg and bits - * grabs the gpio pins for software use -@@ -84,7 +95,7 @@ struct radeon_i2c_bus_rec { - /* id used by atom */ - uint8_t i2c_id; - /* id used by atom */ -- uint8_t hpd_id; -+ enum radeon_hpd_id hpd; - /* can be used with hw i2c engine */ - bool hw_capable; - /* uses multi-media i2c engine */ -@@ -202,6 +213,8 @@ enum radeon_dvo_chip { - DVO_SIL1178, - }; - -+struct radeon_fbdev; -+ - struct radeon_mode_info { - struct atom_context *atom_context; - struct card_info *atom_card_info; -@@ -218,6 +231,9 @@ struct radeon_mode_info { - struct drm_property *tmds_pll_property; - /* hardcoded DFP edid from BIOS */ - struct edid *bios_hardcoded_edid; -+ -+ /* pointer to fbdev info structure */ -+ struct radeon_fbdev *rfbdev; - }; - - #define MAX_H_CODE_TIMING_LEN 32 -@@ -339,6 +355,7 @@ struct radeon_encoder { - enum radeon_rmx_type rmx_type; - struct drm_display_mode native_mode; - void *enc_priv; -+ int audio_polling_active; - int hdmi_offset; - int hdmi_config_offset; - int hdmi_audio_workaround; -@@ -363,16 +380,6 @@ struct radeon_gpio_rec { - u32 mask; - }; - --enum radeon_hpd_id { -- RADEON_HPD_NONE = 0, -- RADEON_HPD_1, -- RADEON_HPD_2, -- RADEON_HPD_3, -- RADEON_HPD_4, -- RADEON_HPD_5, -- RADEON_HPD_6, --}; -- - struct radeon_hpd { - enum radeon_hpd_id hpd; - u8 plugged_state; -@@ -532,11 +539,10 @@ extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, - u16 blue, int regno); - extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, int regno); --struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, -- struct drm_mode_fb_cmd *mode_cmd, -- struct drm_gem_object *obj); -- --int radeonfb_probe(struct drm_device *dev); -+void radeon_framebuffer_init(struct drm_device *dev, -+ struct radeon_framebuffer *rfb, -+ struct drm_mode_fb_cmd *mode_cmd, -+ struct drm_gem_object *obj); - - int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); - bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev); -@@ -575,4 +581,13 @@ void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder, - void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); -+ -+/* fbdev layer */ -+int radeon_fbdev_init(struct radeon_device *rdev); -+void radeon_fbdev_fini(struct radeon_device *rdev); -+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); -+int radeon_fbdev_total_size(struct radeon_device *rdev); -+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); -+ -+void radeon_fb_output_poll_changed(struct radeon_device *rdev); - #endif -diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c -index 1227747..d5b9373 100644 ---- a/drivers/gpu/drm/radeon/radeon_object.c -+++ b/drivers/gpu/drm/radeon/radeon_object.c -@@ -112,9 +112,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, - - radeon_ttm_placement_from_domain(bo, domain); - /* Kernel allocation are uninterruptible */ -+ mutex_lock(&rdev->vram_mutex); - r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, - &bo->placement, 0, 0, !kernel, NULL, size, - &radeon_ttm_bo_destroy); -+ mutex_unlock(&rdev->vram_mutex); - if (unlikely(r != 0)) { - if (r != -ERESTARTSYS) - dev_err(rdev->dev, -@@ -166,11 +168,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo) - void radeon_bo_unref(struct radeon_bo **bo) - { - struct ttm_buffer_object *tbo; -+ struct radeon_device *rdev; - - if ((*bo) == NULL) - return; -+ rdev = (*bo)->rdev; - tbo = &((*bo)->tbo); -+ mutex_lock(&rdev->vram_mutex); - ttm_bo_unref(&tbo); -+ mutex_unlock(&rdev->vram_mutex); - if (tbo == NULL) - *bo = NULL; - } -@@ -192,7 +198,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) - } - for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; -- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); -+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); - if (likely(r == 0)) { - bo->pin_count = 1; - if (gpu_addr != NULL) -@@ -216,7 +222,7 @@ int radeon_bo_unpin(struct radeon_bo *bo) - return 0; - for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; -- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); -+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); - if (unlikely(r != 0)) - dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); - return r; -@@ -295,6 +301,7 @@ int radeon_bo_list_reserve(struct list_head *head) - r = radeon_bo_reserve(lobj->bo, false); - if (unlikely(r != 0)) - return r; -+ lobj->reserved = true; - } - return 0; - } -@@ -305,7 +312,7 @@ void radeon_bo_list_unreserve(struct list_head *head) - - list_for_each_entry(lobj, head, list) { - /* only unreserve object we successfully reserved */ -- if (radeon_bo_is_reserved(lobj->bo)) -+ if (lobj->reserved && radeon_bo_is_reserved(lobj->bo)) - radeon_bo_unreserve(lobj->bo); - } - } -@@ -316,6 +323,9 @@ int radeon_bo_list_validate(struct list_head *head) - struct radeon_bo *bo; - int r; - -+ list_for_each_entry(lobj, head, list) { -+ lobj->reserved = false; -+ } - r = radeon_bo_list_reserve(head); - if (unlikely(r != 0)) { - return r; -@@ -331,7 +341,7 @@ int radeon_bo_list_validate(struct list_head *head) - lobj->rdomain); - } - r = ttm_bo_validate(&bo->tbo, &bo->placement, -- true, false); -+ true, false, false); - if (unlikely(r)) - return r; - } -@@ -499,11 +509,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo, - radeon_bo_check_tiling(rbo, 0, 1); - } - --void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) -+int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) - { -+ struct radeon_device *rdev; - struct radeon_bo *rbo; -+ unsigned long offset, size; -+ int r; -+ - if (!radeon_ttm_bo_is_radeon_bo(bo)) -- return; -+ return 0; - rbo = container_of(bo, struct radeon_bo, tbo); - radeon_bo_check_tiling(rbo, 0, 0); -+ rdev = rbo->rdev; -+ if (bo->mem.mem_type == TTM_PL_VRAM) { -+ size = bo->mem.num_pages << PAGE_SHIFT; -+ offset = bo->mem.mm_node->start << PAGE_SHIFT; -+ if ((offset + size) > rdev->mc.visible_vram_size) { -+ /* hurrah the memory is not visible ! */ -+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); -+ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; -+ r = ttm_bo_validate(bo, &rbo->placement, false, true, false); -+ if (unlikely(r != 0)) -+ return r; -+ offset = bo->mem.mm_node->start << PAGE_SHIFT; -+ /* this should not happen */ -+ if ((offset + size) > rdev->mc.visible_vram_size) -+ return -EINVAL; -+ } -+ } -+ return 0; - } -diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h -index 7ab43de..353998d 100644 ---- a/drivers/gpu/drm/radeon/radeon_object.h -+++ b/drivers/gpu/drm/radeon/radeon_object.h -@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, - bool force_drop); - extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); --extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); -+extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); - extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); - #endif -diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c -index a4b5749..63f679a 100644 ---- a/drivers/gpu/drm/radeon/radeon_pm.c -+++ b/drivers/gpu/drm/radeon/radeon_pm.c -@@ -23,25 +23,17 @@ - #include "drmP.h" - #include "radeon.h" - #include "avivod.h" -+#ifdef CONFIG_ACPI -+#include -+#endif -+#include - - #define RADEON_IDLE_LOOP_MS 100 - #define RADEON_RECLOCK_DELAY_MS 200 - #define RADEON_WAIT_VBLANK_TIMEOUT 200 -+#define RADEON_WAIT_IDLE_TIMEOUT 200 - --static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); --static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); --static void radeon_pm_set_clocks(struct radeon_device *rdev); --static void radeon_pm_idle_work_handler(struct work_struct *work); --static int radeon_debugfs_pm_init(struct radeon_device *rdev); -- --static const char *pm_state_names[4] = { -- "PM_STATE_DISABLED", -- "PM_STATE_MINIMUM", -- "PM_STATE_PAUSED", -- "PM_STATE_ACTIVE" --}; -- --static const char *pm_state_types[5] = { -+static const char *radeon_pm_state_type_name[5] = { - "Default", - "Powersave", - "Battery", -@@ -49,138 +41,109 @@ static const char *pm_state_types[5] = { - "Performance", - }; - --static void radeon_print_power_mode_info(struct radeon_device *rdev) -+static void radeon_dynpm_idle_work_handler(struct work_struct *work); -+static int radeon_debugfs_pm_init(struct radeon_device *rdev); -+static bool radeon_pm_in_vbl(struct radeon_device *rdev); -+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); -+static void radeon_pm_update_profile(struct radeon_device *rdev); -+static void radeon_pm_set_clocks(struct radeon_device *rdev); -+ -+#define ACPI_AC_CLASS "ac_adapter" -+ -+#ifdef CONFIG_ACPI -+static int radeon_acpi_event(struct notifier_block *nb, -+ unsigned long val, -+ void *data) - { -- int i, j; -- bool is_default; -+ struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb); -+ struct acpi_bus_event *entry = (struct acpi_bus_event *)data; - -- DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states); -- for (i = 0; i < rdev->pm.num_power_states; i++) { -- if (rdev->pm.default_power_state == &rdev->pm.power_state[i]) -- is_default = true; -+ if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { -+ if (power_supply_is_system_supplied() > 0) -+ DRM_DEBUG("pm: AC\n"); - else -- is_default = false; -- DRM_INFO("State %d %s %s\n", i, -- pm_state_types[rdev->pm.power_state[i].type], -- is_default ? "(default)" : ""); -- if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) -- DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes); -- DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes); -- for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) { -- if (rdev->flags & RADEON_IS_IGP) -- DRM_INFO("\t\t%d engine: %d\n", -- j, -- rdev->pm.power_state[i].clock_info[j].sclk * 10); -- else -- DRM_INFO("\t\t%d engine/memory: %d/%d\n", -- j, -- rdev->pm.power_state[i].clock_info[j].sclk * 10, -- rdev->pm.power_state[i].clock_info[j].mclk * 10); -+ DRM_DEBUG("pm: DC\n"); -+ -+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) { -+ if (rdev->pm.profile == PM_PROFILE_AUTO) { -+ mutex_lock(&rdev->pm.mutex); -+ radeon_pm_update_profile(rdev); -+ radeon_pm_set_clocks(rdev); -+ mutex_unlock(&rdev->pm.mutex); -+ } - } - } -+ -+ return NOTIFY_OK; - } -+#endif - --static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev, -- enum radeon_pm_state_type type) -+static void radeon_pm_update_profile(struct radeon_device *rdev) - { -- int i, j; -- enum radeon_pm_state_type wanted_types[2]; -- int wanted_count; -- -- switch (type) { -- case POWER_STATE_TYPE_DEFAULT: -- default: -- return rdev->pm.default_power_state; -- case POWER_STATE_TYPE_POWERSAVE: -- if (rdev->flags & RADEON_IS_MOBILITY) { -- wanted_types[0] = POWER_STATE_TYPE_POWERSAVE; -- wanted_types[1] = POWER_STATE_TYPE_BATTERY; -- wanted_count = 2; -- } else { -- wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE; -- wanted_count = 1; -- } -+ switch (rdev->pm.profile) { -+ case PM_PROFILE_DEFAULT: -+ rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; - break; -- case POWER_STATE_TYPE_BATTERY: -- if (rdev->flags & RADEON_IS_MOBILITY) { -- wanted_types[0] = POWER_STATE_TYPE_BATTERY; -- wanted_types[1] = POWER_STATE_TYPE_POWERSAVE; -- wanted_count = 2; -+ case PM_PROFILE_AUTO: -+ if (power_supply_is_system_supplied() > 0) { -+ if (rdev->pm.active_crtc_count > 1) -+ rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; -+ else -+ rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; - } else { -- wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE; -- wanted_count = 1; -+ if (rdev->pm.active_crtc_count > 1) -+ rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; -+ else -+ rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; - } - break; -- case POWER_STATE_TYPE_BALANCED: -- case POWER_STATE_TYPE_PERFORMANCE: -- wanted_types[0] = type; -- wanted_count = 1; -+ case PM_PROFILE_LOW: -+ if (rdev->pm.active_crtc_count > 1) -+ rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; -+ else -+ rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; - break; -- } -- -- for (i = 0; i < wanted_count; i++) { -- for (j = 0; j < rdev->pm.num_power_states; j++) { -- if (rdev->pm.power_state[j].type == wanted_types[i]) -- return &rdev->pm.power_state[j]; -- } -- } -- -- return rdev->pm.default_power_state; --} -- --static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev, -- struct radeon_power_state *power_state, -- enum radeon_pm_clock_mode_type type) --{ -- switch (type) { -- case POWER_MODE_TYPE_DEFAULT: -- default: -- return power_state->default_clock_mode; -- case POWER_MODE_TYPE_LOW: -- return &power_state->clock_info[0]; -- case POWER_MODE_TYPE_MID: -- if (power_state->num_clock_modes > 2) -- return &power_state->clock_info[1]; -+ case PM_PROFILE_MID: -+ if (rdev->pm.active_crtc_count > 1) -+ rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; -+ else -+ rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; -+ break; -+ case PM_PROFILE_HIGH: -+ if (rdev->pm.active_crtc_count > 1) -+ rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; - else -- return &power_state->clock_info[0]; -+ rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; - break; -- case POWER_MODE_TYPE_HIGH: -- return &power_state->clock_info[power_state->num_clock_modes - 1]; - } - -+ if (rdev->pm.active_crtc_count == 0) { -+ rdev->pm.requested_power_state_index = -+ rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; -+ rdev->pm.requested_clock_mode_index = -+ rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; -+ } else { -+ rdev->pm.requested_power_state_index = -+ rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; -+ rdev->pm.requested_clock_mode_index = -+ rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; -+ } - } - --static void radeon_get_power_state(struct radeon_device *rdev, -- enum radeon_pm_action action) -+static void radeon_unmap_vram_bos(struct radeon_device *rdev) - { -- switch (action) { -- case PM_ACTION_MINIMUM: -- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY); -- rdev->pm.requested_clock_mode = -- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW); -- break; -- case PM_ACTION_DOWNCLOCK: -- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE); -- rdev->pm.requested_clock_mode = -- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID); -- break; -- case PM_ACTION_UPCLOCK: -- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT); -- rdev->pm.requested_clock_mode = -- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH); -- break; -- case PM_ACTION_NONE: -- default: -- DRM_ERROR("Requested mode for not defined action\n"); -+ struct radeon_bo *bo, *n; -+ -+ if (list_empty(&rdev->gem.objects)) - return; -+ -+ list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { -+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) -+ ttm_bo_unmap_virtual(&bo->tbo); - } -- DRM_INFO("Requested: e: %d m: %d p: %d\n", -- rdev->pm.requested_clock_mode->sclk, -- rdev->pm.requested_clock_mode->mclk, -- rdev->pm.requested_power_state->non_clock_info.pcie_lanes); - } - --static inline void radeon_sync_with_vblank(struct radeon_device *rdev) -+static void radeon_sync_with_vblank(struct radeon_device *rdev) - { - if (rdev->pm.active_crtcs) { - rdev->pm.vblank_sync = false; -@@ -192,73 +155,359 @@ static inline void radeon_sync_with_vblank(struct radeon_device *rdev) - - static void radeon_set_power_state(struct radeon_device *rdev) - { -- /* if *_clock_mode are the same, *_power_state are as well */ -- if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode) -- return; -+ u32 sclk, mclk; -+ bool misc_after = false; - -- DRM_INFO("Setting: e: %d m: %d p: %d\n", -- rdev->pm.requested_clock_mode->sclk, -- rdev->pm.requested_clock_mode->mclk, -- rdev->pm.requested_power_state->non_clock_info.pcie_lanes); -+ if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && -+ (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) -+ return; - -- /* set pcie lanes */ -- /* TODO */ -+ if (radeon_gui_idle(rdev)) { -+ sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. -+ clock_info[rdev->pm.requested_clock_mode_index].sclk; -+ if (sclk > rdev->clock.default_sclk) -+ sclk = rdev->clock.default_sclk; - -- /* set voltage */ -- /* TODO */ -+ mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. -+ clock_info[rdev->pm.requested_clock_mode_index].mclk; -+ if (mclk > rdev->clock.default_mclk) -+ mclk = rdev->clock.default_mclk; - -- /* set engine clock */ -- radeon_sync_with_vblank(rdev); -- radeon_pm_debug_check_in_vbl(rdev, false); -- radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk); -- radeon_pm_debug_check_in_vbl(rdev, true); -+ /* upvolt before raising clocks, downvolt after lowering clocks */ -+ if (sclk < rdev->pm.current_sclk) -+ misc_after = true; - --#if 0 -- /* set memory clock */ -- if (rdev->asic->set_memory_clock) { - radeon_sync_with_vblank(rdev); -- radeon_pm_debug_check_in_vbl(rdev, false); -- radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk); -- radeon_pm_debug_check_in_vbl(rdev, true); -+ -+ if (rdev->pm.pm_method == PM_METHOD_DYNPM) { -+ if (!radeon_pm_in_vbl(rdev)) -+ return; -+ } -+ -+ radeon_pm_prepare(rdev); -+ -+ if (!misc_after) -+ /* voltage, pcie lanes, etc.*/ -+ radeon_pm_misc(rdev); -+ -+ /* set engine clock */ -+ if (sclk != rdev->pm.current_sclk) { -+ radeon_pm_debug_check_in_vbl(rdev, false); -+ radeon_set_engine_clock(rdev, sclk); -+ radeon_pm_debug_check_in_vbl(rdev, true); -+ rdev->pm.current_sclk = sclk; -+ DRM_DEBUG("Setting: e: %d\n", sclk); -+ } -+ -+ /* set memory clock */ -+ if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { -+ radeon_pm_debug_check_in_vbl(rdev, false); -+ radeon_set_memory_clock(rdev, mclk); -+ radeon_pm_debug_check_in_vbl(rdev, true); -+ rdev->pm.current_mclk = mclk; -+ DRM_DEBUG("Setting: m: %d\n", mclk); -+ } -+ -+ if (misc_after) -+ /* voltage, pcie lanes, etc.*/ -+ radeon_pm_misc(rdev); -+ -+ radeon_pm_finish(rdev); -+ -+ rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; -+ rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; -+ } else -+ DRM_DEBUG("pm: GUI not idle!!!\n"); -+} -+ -+static void radeon_pm_set_clocks(struct radeon_device *rdev) -+{ -+ int i; -+ -+ mutex_lock(&rdev->ddev->struct_mutex); -+ mutex_lock(&rdev->vram_mutex); -+ mutex_lock(&rdev->cp.mutex); -+ -+ /* gui idle int has issues on older chips it seems */ -+ if (rdev->family >= CHIP_R600) { -+ if (rdev->irq.installed) { -+ /* wait for GPU idle */ -+ rdev->pm.gui_idle = false; -+ rdev->irq.gui_idle = true; -+ radeon_irq_set(rdev); -+ wait_event_interruptible_timeout( -+ rdev->irq.idle_queue, rdev->pm.gui_idle, -+ msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); -+ rdev->irq.gui_idle = false; -+ radeon_irq_set(rdev); -+ } -+ } else { -+ if (rdev->cp.ready) { -+ struct radeon_fence *fence; -+ radeon_ring_alloc(rdev, 64); -+ radeon_fence_create(rdev, &fence); -+ radeon_fence_emit(rdev, fence); -+ radeon_ring_commit(rdev); -+ radeon_fence_wait(fence, false); -+ radeon_fence_unref(&fence); -+ } - } --#endif -+ radeon_unmap_vram_bos(rdev); -+ -+ if (rdev->irq.installed) { -+ for (i = 0; i < rdev->num_crtc; i++) { -+ if (rdev->pm.active_crtcs & (1 << i)) { -+ rdev->pm.req_vblank |= (1 << i); -+ drm_vblank_get(rdev->ddev, i); -+ } -+ } -+ } -+ -+ radeon_set_power_state(rdev); -+ -+ if (rdev->irq.installed) { -+ for (i = 0; i < rdev->num_crtc; i++) { -+ if (rdev->pm.req_vblank & (1 << i)) { -+ rdev->pm.req_vblank &= ~(1 << i); -+ drm_vblank_put(rdev->ddev, i); -+ } -+ } -+ } -+ -+ /* update display watermarks based on new power state */ -+ radeon_update_bandwidth_info(rdev); -+ if (rdev->pm.active_crtc_count) -+ radeon_bandwidth_update(rdev); -+ -+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; -+ -+ mutex_unlock(&rdev->cp.mutex); -+ mutex_unlock(&rdev->vram_mutex); -+ mutex_unlock(&rdev->ddev->struct_mutex); -+} -+ -+static void radeon_pm_print_states(struct radeon_device *rdev) -+{ -+ int i, j; -+ struct radeon_power_state *power_state; -+ struct radeon_pm_clock_info *clock_info; -+ -+ DRM_DEBUG("%d Power State(s)\n", rdev->pm.num_power_states); -+ for (i = 0; i < rdev->pm.num_power_states; i++) { -+ power_state = &rdev->pm.power_state[i]; -+ DRM_DEBUG("State %d: %s\n", i, -+ radeon_pm_state_type_name[power_state->type]); -+ if (i == rdev->pm.default_power_state_index) -+ DRM_DEBUG("\tDefault"); -+ if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) -+ DRM_DEBUG("\t%d PCIE Lanes\n", power_state->pcie_lanes); -+ if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) -+ DRM_DEBUG("\tSingle display only\n"); -+ DRM_DEBUG("\t%d Clock Mode(s)\n", power_state->num_clock_modes); -+ for (j = 0; j < power_state->num_clock_modes; j++) { -+ clock_info = &(power_state->clock_info[j]); -+ if (rdev->flags & RADEON_IS_IGP) -+ DRM_DEBUG("\t\t%d e: %d%s\n", -+ j, -+ clock_info->sclk * 10, -+ clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); -+ else -+ DRM_DEBUG("\t\t%d e: %d\tm: %d\tv: %d%s\n", -+ j, -+ clock_info->sclk * 10, -+ clock_info->mclk * 10, -+ clock_info->voltage.voltage, -+ clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); -+ } -+ } -+} -+ -+static ssize_t radeon_get_pm_profile(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); -+ struct radeon_device *rdev = ddev->dev_private; -+ int cp = rdev->pm.profile; -+ -+ return snprintf(buf, PAGE_SIZE, "%s\n", -+ (cp == PM_PROFILE_AUTO) ? "auto" : -+ (cp == PM_PROFILE_LOW) ? "low" : -+ (cp == PM_PROFILE_HIGH) ? "high" : "default"); -+} -+ -+static ssize_t radeon_set_pm_profile(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count) -+{ -+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); -+ struct radeon_device *rdev = ddev->dev_private; -+ -+ mutex_lock(&rdev->pm.mutex); -+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) { -+ if (strncmp("default", buf, strlen("default")) == 0) -+ rdev->pm.profile = PM_PROFILE_DEFAULT; -+ else if (strncmp("auto", buf, strlen("auto")) == 0) -+ rdev->pm.profile = PM_PROFILE_AUTO; -+ else if (strncmp("low", buf, strlen("low")) == 0) -+ rdev->pm.profile = PM_PROFILE_LOW; -+ else if (strncmp("mid", buf, strlen("mid")) == 0) -+ rdev->pm.profile = PM_PROFILE_MID; -+ else if (strncmp("high", buf, strlen("high")) == 0) -+ rdev->pm.profile = PM_PROFILE_HIGH; -+ else { -+ DRM_ERROR("invalid power profile!\n"); -+ goto fail; -+ } -+ radeon_pm_update_profile(rdev); -+ radeon_pm_set_clocks(rdev); -+ } -+fail: -+ mutex_unlock(&rdev->pm.mutex); -+ -+ return count; -+} -+ -+static ssize_t radeon_get_pm_method(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); -+ struct radeon_device *rdev = ddev->dev_private; -+ int pm = rdev->pm.pm_method; -+ -+ return snprintf(buf, PAGE_SIZE, "%s\n", -+ (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile"); -+} -+ -+static ssize_t radeon_set_pm_method(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count) -+{ -+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); -+ struct radeon_device *rdev = ddev->dev_private; -+ -+ -+ if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { -+ mutex_lock(&rdev->pm.mutex); -+ rdev->pm.pm_method = PM_METHOD_DYNPM; -+ rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; -+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; -+ mutex_unlock(&rdev->pm.mutex); -+ } else if (strncmp("profile", buf, strlen("profile")) == 0) { -+ mutex_lock(&rdev->pm.mutex); -+ rdev->pm.pm_method = PM_METHOD_PROFILE; -+ /* disable dynpm */ -+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; -+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; -+ cancel_delayed_work(&rdev->pm.dynpm_idle_work); -+ mutex_unlock(&rdev->pm.mutex); -+ } else { -+ DRM_ERROR("invalid power method!\n"); -+ goto fail; -+ } -+ radeon_pm_compute_clocks(rdev); -+fail: -+ return count; -+} -+ -+static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); -+static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); - -- rdev->pm.current_power_state = rdev->pm.requested_power_state; -- rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode; -+void radeon_pm_suspend(struct radeon_device *rdev) -+{ -+ mutex_lock(&rdev->pm.mutex); -+ cancel_delayed_work(&rdev->pm.dynpm_idle_work); -+ mutex_unlock(&rdev->pm.mutex); -+} -+ -+void radeon_pm_resume(struct radeon_device *rdev) -+{ -+ /* asic init will reset the default power state */ -+ mutex_lock(&rdev->pm.mutex); -+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; -+ rdev->pm.current_clock_mode_index = 0; -+ rdev->pm.current_sclk = rdev->clock.default_sclk; -+ rdev->pm.current_mclk = rdev->clock.default_mclk; -+ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; -+ mutex_unlock(&rdev->pm.mutex); -+ radeon_pm_compute_clocks(rdev); - } - - int radeon_pm_init(struct radeon_device *rdev) - { -- rdev->pm.state = PM_STATE_DISABLED; -- rdev->pm.planned_action = PM_ACTION_NONE; -- rdev->pm.downclocked = false; -+ int ret; -+ /* default to profile method */ -+ rdev->pm.pm_method = PM_METHOD_PROFILE; -+ rdev->pm.profile = PM_PROFILE_DEFAULT; -+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; -+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; -+ rdev->pm.dynpm_can_upclock = true; -+ rdev->pm.dynpm_can_downclock = true; -+ rdev->pm.current_sclk = rdev->clock.default_sclk; -+ rdev->pm.current_mclk = rdev->clock.default_mclk; - - if (rdev->bios) { - if (rdev->is_atom_bios) - radeon_atombios_get_power_modes(rdev); - else - radeon_combios_get_power_modes(rdev); -- radeon_print_power_mode_info(rdev); -+ radeon_pm_print_states(rdev); -+ radeon_pm_init_profile(rdev); - } - -- if (radeon_debugfs_pm_init(rdev)) { -- DRM_ERROR("Failed to register debugfs file for PM!\n"); -- } -+ if (rdev->pm.num_power_states > 1) { -+ /* where's the best place to put these? */ -+ ret = device_create_file(rdev->dev, &dev_attr_power_profile); -+ if (ret) -+ DRM_ERROR("failed to create device file for power profile\n"); -+ ret = device_create_file(rdev->dev, &dev_attr_power_method); -+ if (ret) -+ DRM_ERROR("failed to create device file for power method\n"); -+ -+#ifdef CONFIG_ACPI -+ rdev->acpi_nb.notifier_call = radeon_acpi_event; -+ register_acpi_notifier(&rdev->acpi_nb); -+#endif -+ INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); - -- INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler); -+ if (radeon_debugfs_pm_init(rdev)) { -+ DRM_ERROR("Failed to register debugfs file for PM!\n"); -+ } - -- if (radeon_dynpm != -1 && radeon_dynpm) { -- rdev->pm.state = PM_STATE_PAUSED; -- DRM_INFO("radeon: dynamic power management enabled\n"); -+ DRM_INFO("radeon: power management initialized\n"); - } - -- DRM_INFO("radeon: power management initialized\n"); -- - return 0; - } - - void radeon_pm_fini(struct radeon_device *rdev) - { -+ if (rdev->pm.num_power_states > 1) { -+ mutex_lock(&rdev->pm.mutex); -+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) { -+ rdev->pm.profile = PM_PROFILE_DEFAULT; -+ radeon_pm_update_profile(rdev); -+ radeon_pm_set_clocks(rdev); -+ } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { -+ /* cancel work */ -+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); -+ /* reset default clocks */ -+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; -+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; -+ radeon_pm_set_clocks(rdev); -+ } -+ mutex_unlock(&rdev->pm.mutex); -+ -+ device_remove_file(rdev->dev, &dev_attr_power_profile); -+ device_remove_file(rdev->dev, &dev_attr_power_method); -+#ifdef CONFIG_ACPI -+ unregister_acpi_notifier(&rdev->acpi_nb); -+#endif -+ } -+ - if (rdev->pm.i2c_bus) - radeon_i2c_destroy(rdev->pm.i2c_bus); - } -@@ -266,146 +515,167 @@ void radeon_pm_fini(struct radeon_device *rdev) - void radeon_pm_compute_clocks(struct radeon_device *rdev) - { - struct drm_device *ddev = rdev->ddev; -- struct drm_connector *connector; -+ struct drm_crtc *crtc; - struct radeon_crtc *radeon_crtc; -- int count = 0; - -- if (rdev->pm.state == PM_STATE_DISABLED) -+ if (rdev->pm.num_power_states < 2) - return; - - mutex_lock(&rdev->pm.mutex); - - rdev->pm.active_crtcs = 0; -- list_for_each_entry(connector, -- &ddev->mode_config.connector_list, head) { -- if (connector->encoder && -- connector->encoder->crtc && -- connector->dpms != DRM_MODE_DPMS_OFF) { -- radeon_crtc = to_radeon_crtc(connector->encoder->crtc); -+ rdev->pm.active_crtc_count = 0; -+ list_for_each_entry(crtc, -+ &ddev->mode_config.crtc_list, head) { -+ radeon_crtc = to_radeon_crtc(crtc); -+ if (radeon_crtc->enabled) { - rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); -- ++count; -+ rdev->pm.active_crtc_count++; - } - } - -- if (count > 1) { -- if (rdev->pm.state == PM_STATE_ACTIVE) { -- cancel_delayed_work(&rdev->pm.idle_work); -- -- rdev->pm.state = PM_STATE_PAUSED; -- rdev->pm.planned_action = PM_ACTION_UPCLOCK; -- if (rdev->pm.downclocked) -- radeon_pm_set_clocks(rdev); -- -- DRM_DEBUG("radeon: dynamic power management deactivated\n"); -- } -- } else if (count == 1) { -- /* TODO: Increase clocks if needed for current mode */ -- -- if (rdev->pm.state == PM_STATE_MINIMUM) { -- rdev->pm.state = PM_STATE_ACTIVE; -- rdev->pm.planned_action = PM_ACTION_UPCLOCK; -- radeon_pm_set_clocks(rdev); -- -- queue_delayed_work(rdev->wq, &rdev->pm.idle_work, -- msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); -- } -- else if (rdev->pm.state == PM_STATE_PAUSED) { -- rdev->pm.state = PM_STATE_ACTIVE; -- queue_delayed_work(rdev->wq, &rdev->pm.idle_work, -- msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); -- DRM_DEBUG("radeon: dynamic power management activated\n"); -- } -- } -- else { /* count == 0 */ -- if (rdev->pm.state != PM_STATE_MINIMUM) { -- cancel_delayed_work(&rdev->pm.idle_work); -- -- rdev->pm.state = PM_STATE_MINIMUM; -- rdev->pm.planned_action = PM_ACTION_MINIMUM; -- radeon_pm_set_clocks(rdev); -+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) { -+ radeon_pm_update_profile(rdev); -+ radeon_pm_set_clocks(rdev); -+ } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { -+ if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { -+ if (rdev->pm.active_crtc_count > 1) { -+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { -+ cancel_delayed_work(&rdev->pm.dynpm_idle_work); -+ -+ rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; -+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; -+ radeon_pm_get_dynpm_state(rdev); -+ radeon_pm_set_clocks(rdev); -+ -+ DRM_DEBUG("radeon: dynamic power management deactivated\n"); -+ } -+ } else if (rdev->pm.active_crtc_count == 1) { -+ /* TODO: Increase clocks if needed for current mode */ -+ -+ if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { -+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; -+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; -+ radeon_pm_get_dynpm_state(rdev); -+ radeon_pm_set_clocks(rdev); -+ -+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, -+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); -+ } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { -+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; -+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, -+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); -+ DRM_DEBUG("radeon: dynamic power management activated\n"); -+ } -+ } else { /* count == 0 */ -+ if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { -+ cancel_delayed_work(&rdev->pm.dynpm_idle_work); -+ -+ rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; -+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; -+ radeon_pm_get_dynpm_state(rdev); -+ radeon_pm_set_clocks(rdev); -+ } -+ } - } - } - - mutex_unlock(&rdev->pm.mutex); - } - --static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) -+static bool radeon_pm_in_vbl(struct radeon_device *rdev) - { -- u32 stat_crtc1 = 0, stat_crtc2 = 0; -+ u32 stat_crtc = 0, vbl = 0, position = 0; - bool in_vbl = true; - -- if (ASIC_IS_AVIVO(rdev)) { -+ if (ASIC_IS_DCE4(rdev)) { - if (rdev->pm.active_crtcs & (1 << 0)) { -- stat_crtc1 = RREG32(D1CRTC_STATUS); -- if (!(stat_crtc1 & 1)) -+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + -+ EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; -+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + -+ EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; -+ } -+ if (rdev->pm.active_crtcs & (1 << 1)) { -+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + -+ EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; -+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + -+ EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; -+ } -+ if (rdev->pm.active_crtcs & (1 << 2)) { -+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + -+ EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; -+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + -+ EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; -+ } -+ if (rdev->pm.active_crtcs & (1 << 3)) { -+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + -+ EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; -+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + -+ EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; -+ } -+ if (rdev->pm.active_crtcs & (1 << 4)) { -+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + -+ EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; -+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + -+ EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; -+ } -+ if (rdev->pm.active_crtcs & (1 << 5)) { -+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + -+ EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; -+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + -+ EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; -+ } -+ } else if (ASIC_IS_AVIVO(rdev)) { -+ if (rdev->pm.active_crtcs & (1 << 0)) { -+ vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff; -+ position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff; -+ } -+ if (rdev->pm.active_crtcs & (1 << 1)) { -+ vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff; -+ position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff; -+ } -+ if (position < vbl && position > 1) -+ in_vbl = false; -+ } else { -+ if (rdev->pm.active_crtcs & (1 << 0)) { -+ stat_crtc = RREG32(RADEON_CRTC_STATUS); -+ if (!(stat_crtc & 1)) - in_vbl = false; - } - if (rdev->pm.active_crtcs & (1 << 1)) { -- stat_crtc2 = RREG32(D2CRTC_STATUS); -- if (!(stat_crtc2 & 1)) -+ stat_crtc = RREG32(RADEON_CRTC2_STATUS); -+ if (!(stat_crtc & 1)) - in_vbl = false; - } - } -- if (in_vbl == false) -- DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1, -- stat_crtc2, finish ? "exit" : "entry"); -- return in_vbl; --} --static void radeon_pm_set_clocks_locked(struct radeon_device *rdev) --{ -- /*radeon_fence_wait_last(rdev);*/ -- switch (rdev->pm.planned_action) { -- case PM_ACTION_UPCLOCK: -- rdev->pm.downclocked = false; -- break; -- case PM_ACTION_DOWNCLOCK: -- rdev->pm.downclocked = true; -- break; -- case PM_ACTION_MINIMUM: -- break; -- case PM_ACTION_NONE: -- DRM_ERROR("%s: PM_ACTION_NONE\n", __func__); -- break; -- } - -- radeon_set_power_state(rdev); -- rdev->pm.planned_action = PM_ACTION_NONE; -+ if (position < vbl && position > 1) -+ in_vbl = false; -+ -+ return in_vbl; - } - --static void radeon_pm_set_clocks(struct radeon_device *rdev) -+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) - { -- radeon_get_power_state(rdev, rdev->pm.planned_action); -- mutex_lock(&rdev->cp.mutex); -+ u32 stat_crtc = 0; -+ bool in_vbl = radeon_pm_in_vbl(rdev); - -- if (rdev->pm.active_crtcs & (1 << 0)) { -- rdev->pm.req_vblank |= (1 << 0); -- drm_vblank_get(rdev->ddev, 0); -- } -- if (rdev->pm.active_crtcs & (1 << 1)) { -- rdev->pm.req_vblank |= (1 << 1); -- drm_vblank_get(rdev->ddev, 1); -- } -- radeon_pm_set_clocks_locked(rdev); -- if (rdev->pm.req_vblank & (1 << 0)) { -- rdev->pm.req_vblank &= ~(1 << 0); -- drm_vblank_put(rdev->ddev, 0); -- } -- if (rdev->pm.req_vblank & (1 << 1)) { -- rdev->pm.req_vblank &= ~(1 << 1); -- drm_vblank_put(rdev->ddev, 1); -- } -- -- mutex_unlock(&rdev->cp.mutex); -+ if (in_vbl == false) -+ DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc, -+ finish ? "exit" : "entry"); -+ return in_vbl; - } - --static void radeon_pm_idle_work_handler(struct work_struct *work) -+static void radeon_dynpm_idle_work_handler(struct work_struct *work) - { - struct radeon_device *rdev; -+ int resched; - rdev = container_of(work, struct radeon_device, -- pm.idle_work.work); -+ pm.dynpm_idle_work.work); - -+ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); - mutex_lock(&rdev->pm.mutex); -- if (rdev->pm.state == PM_STATE_ACTIVE) { -+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { - unsigned long irq_flags; - int not_processed = 0; - -@@ -421,35 +691,40 @@ static void radeon_pm_idle_work_handler(struct work_struct *work) - read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); - - if (not_processed >= 3) { /* should upclock */ -- if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) { -- rdev->pm.planned_action = PM_ACTION_NONE; -- } else if (rdev->pm.planned_action == PM_ACTION_NONE && -- rdev->pm.downclocked) { -- rdev->pm.planned_action = -- PM_ACTION_UPCLOCK; -- rdev->pm.action_timeout = jiffies + -+ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { -+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; -+ } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && -+ rdev->pm.dynpm_can_upclock) { -+ rdev->pm.dynpm_planned_action = -+ DYNPM_ACTION_UPCLOCK; -+ rdev->pm.dynpm_action_timeout = jiffies + - msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); - } - } else if (not_processed == 0) { /* should downclock */ -- if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) { -- rdev->pm.planned_action = PM_ACTION_NONE; -- } else if (rdev->pm.planned_action == PM_ACTION_NONE && -- !rdev->pm.downclocked) { -- rdev->pm.planned_action = -- PM_ACTION_DOWNCLOCK; -- rdev->pm.action_timeout = jiffies + -+ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { -+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; -+ } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && -+ rdev->pm.dynpm_can_downclock) { -+ rdev->pm.dynpm_planned_action = -+ DYNPM_ACTION_DOWNCLOCK; -+ rdev->pm.dynpm_action_timeout = jiffies + - msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); - } - } - -- if (rdev->pm.planned_action != PM_ACTION_NONE && -- jiffies > rdev->pm.action_timeout) { -+ /* Note, radeon_pm_set_clocks is called with static_switch set -+ * to false since we want to wait for vbl to avoid flicker. -+ */ -+ if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && -+ jiffies > rdev->pm.dynpm_action_timeout) { -+ radeon_pm_get_dynpm_state(rdev); - radeon_pm_set_clocks(rdev); - } - } - mutex_unlock(&rdev->pm.mutex); -+ ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); - -- queue_delayed_work(rdev->wq, &rdev->pm.idle_work, -+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, - msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); - } - -@@ -464,12 +739,13 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) - struct drm_device *dev = node->minor->dev; - struct radeon_device *rdev = dev->dev_private; - -- seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]); - seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); - seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); - seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); - if (rdev->asic->get_memory_clock) - seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); -+ if (rdev->pm.current_vddc) -+ seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); - if (rdev->asic->get_pcie_lanes) - seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); - -diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h -index eabbc9c..c332f46 100644 ---- a/drivers/gpu/drm/radeon/radeon_reg.h -+++ b/drivers/gpu/drm/radeon/radeon_reg.h -@@ -553,7 +553,6 @@ - # define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16) - #define RADEON_CRTC2_CRNT_FRAME 0x0314 - #define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318 --#define RADEON_CRTC2_STATUS 0x03fc - #define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310 - #define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */ - #define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */ -@@ -995,6 +994,7 @@ - # define RADEON_FP_DETECT_MASK (1 << 4) - # define RADEON_CRTC2_VBLANK_MASK (1 << 9) - # define RADEON_FP2_DETECT_MASK (1 << 10) -+# define RADEON_GUI_IDLE_MASK (1 << 19) - # define RADEON_SW_INT_ENABLE (1 << 25) - #define RADEON_GEN_INT_STATUS 0x0044 - # define AVIVO_DISPLAY_INT_STATUS (1 << 0) -@@ -1006,6 +1006,8 @@ - # define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) - # define RADEON_FP2_DETECT_STAT (1 << 10) - # define RADEON_FP2_DETECT_STAT_ACK (1 << 10) -+# define RADEON_GUI_IDLE_STAT (1 << 19) -+# define RADEON_GUI_IDLE_STAT_ACK (1 << 19) - # define RADEON_SW_INT_FIRE (1 << 26) - # define RADEON_SW_INT_TEST (1 << 25) - # define RADEON_SW_INT_TEST_ACK (1 << 25) -diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c -index f6e1e8d..261e98a 100644 ---- a/drivers/gpu/drm/radeon/radeon_ring.c -+++ b/drivers/gpu/drm/radeon/radeon_ring.c -@@ -219,24 +219,26 @@ int radeon_ib_pool_init(struct radeon_device *rdev) - void radeon_ib_pool_fini(struct radeon_device *rdev) - { - int r; -+ struct radeon_bo *robj; - - if (!rdev->ib_pool.ready) { - return; - } - mutex_lock(&rdev->ib_pool.mutex); - radeon_ib_bogus_cleanup(rdev); -+ robj = rdev->ib_pool.robj; -+ rdev->ib_pool.robj = NULL; -+ mutex_unlock(&rdev->ib_pool.mutex); - -- if (rdev->ib_pool.robj) { -- r = radeon_bo_reserve(rdev->ib_pool.robj, false); -+ if (robj) { -+ r = radeon_bo_reserve(robj, false); - if (likely(r == 0)) { -- radeon_bo_kunmap(rdev->ib_pool.robj); -- radeon_bo_unpin(rdev->ib_pool.robj); -- radeon_bo_unreserve(rdev->ib_pool.robj); -+ radeon_bo_kunmap(robj); -+ radeon_bo_unpin(robj); -+ radeon_bo_unreserve(robj); - } -- radeon_bo_unref(&rdev->ib_pool.robj); -- rdev->ib_pool.robj = NULL; -+ radeon_bo_unref(&robj); - } -- mutex_unlock(&rdev->ib_pool.mutex); - } - - -@@ -258,31 +260,41 @@ void radeon_ring_free_size(struct radeon_device *rdev) - } - } - --int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) -+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw) - { - int r; - - /* Align requested size with padding so unlock_commit can - * pad safely */ - ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; -- mutex_lock(&rdev->cp.mutex); - while (ndw > (rdev->cp.ring_free_dw - 1)) { - radeon_ring_free_size(rdev); - if (ndw < rdev->cp.ring_free_dw) { - break; - } - r = radeon_fence_wait_next(rdev); -- if (r) { -- mutex_unlock(&rdev->cp.mutex); -+ if (r) - return r; -- } - } - rdev->cp.count_dw = ndw; - rdev->cp.wptr_old = rdev->cp.wptr; - return 0; - } - --void radeon_ring_unlock_commit(struct radeon_device *rdev) -+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) -+{ -+ int r; -+ -+ mutex_lock(&rdev->cp.mutex); -+ r = radeon_ring_alloc(rdev, ndw); -+ if (r) { -+ mutex_unlock(&rdev->cp.mutex); -+ return r; -+ } -+ return 0; -+} -+ -+void radeon_ring_commit(struct radeon_device *rdev) - { - unsigned count_dw_pad; - unsigned i; -@@ -295,6 +307,11 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev) - } - DRM_MEMORYBARRIER(); - radeon_cp_commit(rdev); -+} -+ -+void radeon_ring_unlock_commit(struct radeon_device *rdev) -+{ -+ radeon_ring_commit(rdev); - mutex_unlock(&rdev->cp.mutex); - } - -@@ -344,20 +361,23 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) - void radeon_ring_fini(struct radeon_device *rdev) - { - int r; -+ struct radeon_bo *ring_obj; - - mutex_lock(&rdev->cp.mutex); -- if (rdev->cp.ring_obj) { -- r = radeon_bo_reserve(rdev->cp.ring_obj, false); -+ ring_obj = rdev->cp.ring_obj; -+ rdev->cp.ring = NULL; -+ rdev->cp.ring_obj = NULL; -+ mutex_unlock(&rdev->cp.mutex); -+ -+ if (ring_obj) { -+ r = radeon_bo_reserve(ring_obj, false); - if (likely(r == 0)) { -- radeon_bo_kunmap(rdev->cp.ring_obj); -- radeon_bo_unpin(rdev->cp.ring_obj); -- radeon_bo_unreserve(rdev->cp.ring_obj); -+ radeon_bo_kunmap(ring_obj); -+ radeon_bo_unpin(ring_obj); -+ radeon_bo_unreserve(ring_obj); - } -- radeon_bo_unref(&rdev->cp.ring_obj); -- rdev->cp.ring = NULL; -- rdev->cp.ring_obj = NULL; -+ radeon_bo_unref(&ring_obj); - } -- mutex_unlock(&rdev->cp.mutex); - } - - -diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c -index cc5316d..b3ba44c 100644 ---- a/drivers/gpu/drm/radeon/radeon_state.c -+++ b/drivers/gpu/drm/radeon/radeon_state.c -@@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, - flags |= RADEON_FRONT; - } - if (flags & (RADEON_DEPTH|RADEON_STENCIL)) { -- if (!dev_priv->have_z_offset) -+ if (!dev_priv->have_z_offset) { - printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n"); -- flags &= ~(RADEON_DEPTH | RADEON_STENCIL); -+ flags &= ~(RADEON_DEPTH | RADEON_STENCIL); -+ } - } - - if (flags & (RADEON_FRONT | RADEON_BACK)) { -diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c -index d031b68..e9918d8 100644 ---- a/drivers/gpu/drm/radeon/radeon_ttm.c -+++ b/drivers/gpu/drm/radeon/radeon_ttm.c -@@ -33,6 +33,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -162,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - (unsigned)type); - return -EINVAL; - } -- man->io_offset = rdev->mc.agp_base; -- man->io_size = rdev->mc.gtt_size; -- man->io_addr = NULL; - if (!rdev->ddev->agp->cant_use_aperture) -- man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | -- TTM_MEMTYPE_FLAG_MAPPABLE; -+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; -- } else --#endif -- { -- man->io_offset = 0; -- man->io_size = 0; -- man->io_addr = NULL; - } -+#endif - break; - case TTM_PL_VRAM: - /* "On-card" video ram */ - man->gpu_offset = rdev->mc.vram_start; - man->flags = TTM_MEMTYPE_FLAG_FIXED | -- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | - TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; -- man->io_addr = NULL; -- man->io_offset = rdev->mc.aper_base; -- man->io_size = rdev->mc.aper_size; - break; - default: - DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); -@@ -244,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo, - } - - static int radeon_move_blit(struct ttm_buffer_object *bo, -- bool evict, int no_wait, -- struct ttm_mem_reg *new_mem, -- struct ttm_mem_reg *old_mem) -+ bool evict, int no_wait_reserve, bool no_wait_gpu, -+ struct ttm_mem_reg *new_mem, -+ struct ttm_mem_reg *old_mem) - { - struct radeon_device *rdev; - uint64_t old_start, new_start; -@@ -290,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, - r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); - /* FIXME: handle copy error */ - r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, -- evict, no_wait, new_mem); -+ evict, no_wait_reserve, no_wait_gpu, new_mem); - radeon_fence_unref(&fence); - return r; - } - - static int radeon_move_vram_ram(struct ttm_buffer_object *bo, -- bool evict, bool interruptible, bool no_wait, -+ bool evict, bool interruptible, -+ bool no_wait_reserve, bool no_wait_gpu, - struct ttm_mem_reg *new_mem) - { - struct radeon_device *rdev; -@@ -317,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, - placement.busy_placement = &placements; - placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, -- interruptible, no_wait); -+ interruptible, no_wait_reserve, no_wait_gpu); - if (unlikely(r)) { - return r; - } -@@ -331,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, - if (unlikely(r)) { - goto out_cleanup; - } -- r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem); -+ r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem); - if (unlikely(r)) { - goto out_cleanup; - } -- r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); -+ r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); - out_cleanup: - if (tmp_mem.mm_node) { - struct ttm_bo_global *glob = rdev->mman.bdev.glob; -@@ -349,7 +338,8 @@ out_cleanup: - } - - static int radeon_move_ram_vram(struct ttm_buffer_object *bo, -- bool evict, bool interruptible, bool no_wait, -+ bool evict, bool interruptible, -+ bool no_wait_reserve, bool no_wait_gpu, - struct ttm_mem_reg *new_mem) - { - struct radeon_device *rdev; -@@ -369,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; -- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); -+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); - if (unlikely(r)) { - return r; - } -- r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); -+ r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); - if (unlikely(r)) { - goto out_cleanup; - } -- r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); -+ r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem); - if (unlikely(r)) { - goto out_cleanup; - } -@@ -394,8 +384,9 @@ out_cleanup: - } - - static int radeon_bo_move(struct ttm_buffer_object *bo, -- bool evict, bool interruptible, bool no_wait, -- struct ttm_mem_reg *new_mem) -+ bool evict, bool interruptible, -+ bool no_wait_reserve, bool no_wait_gpu, -+ struct ttm_mem_reg *new_mem) - { - struct radeon_device *rdev; - struct ttm_mem_reg *old_mem = &bo->mem; -@@ -422,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, - if (old_mem->mem_type == TTM_PL_VRAM && - new_mem->mem_type == TTM_PL_SYSTEM) { - r = radeon_move_vram_ram(bo, evict, interruptible, -- no_wait, new_mem); -+ no_wait_reserve, no_wait_gpu, new_mem); - } else if (old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_VRAM) { - r = radeon_move_ram_vram(bo, evict, interruptible, -- no_wait, new_mem); -+ no_wait_reserve, no_wait_gpu, new_mem); - } else { -- r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); -+ r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem); - } - - if (r) { - memcpy: -- r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); -+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); - } -- - return r; - } - -+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) -+{ -+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; -+ struct radeon_device *rdev = radeon_get_rdev(bdev); -+ -+ mem->bus.addr = NULL; -+ mem->bus.offset = 0; -+ mem->bus.size = mem->num_pages << PAGE_SHIFT; -+ mem->bus.base = 0; -+ mem->bus.is_iomem = false; -+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) -+ return -EINVAL; -+ switch (mem->mem_type) { -+ case TTM_PL_SYSTEM: -+ /* system memory */ -+ return 0; -+ case TTM_PL_TT: -+#if __OS_HAS_AGP -+ if (rdev->flags & RADEON_IS_AGP) { -+ /* RADEON_IS_AGP is set only if AGP is active */ -+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; -+ mem->bus.base = rdev->mc.agp_base; -+ mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; -+ } -+#endif -+ break; -+ case TTM_PL_VRAM: -+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; -+ /* check if it's visible */ -+ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) -+ return -EINVAL; -+ mem->bus.base = rdev->mc.aper_base; -+ mem->bus.is_iomem = true; -+ break; -+ default: -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) -+{ -+} -+ - static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, - bool lazy, bool interruptible) - { -@@ -479,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = { - .sync_obj_ref = &radeon_sync_obj_ref, - .move_notify = &radeon_bo_move_notify, - .fault_reserve_notify = &radeon_bo_fault_reserve_notify, -+ .io_mem_reserve = &radeon_ttm_io_mem_reserve, -+ .io_mem_free = &radeon_ttm_io_mem_free, - }; - - int radeon_ttm_init(struct radeon_device *rdev) -@@ -571,13 +607,17 @@ static const struct vm_operations_struct *ttm_vm_ops = NULL; - static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) - { - struct ttm_buffer_object *bo; -+ struct radeon_device *rdev; - int r; - -- bo = (struct ttm_buffer_object *)vma->vm_private_data; -+ bo = (struct ttm_buffer_object *)vma->vm_private_data; - if (bo == NULL) { - return VM_FAULT_NOPAGE; - } -+ rdev = radeon_get_rdev(bo->bdev); -+ mutex_lock(&rdev->vram_mutex); - r = ttm_vm_ops->fault(vma, vmf); -+ mutex_unlock(&rdev->vram_mutex); - return r; - } - -@@ -745,8 +785,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data) - static int radeon_ttm_debugfs_init(struct radeon_device *rdev) - { - #if defined(CONFIG_DEBUG_FS) -- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES]; -- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32]; -+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1]; -+ static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32]; - unsigned i; - - for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { -@@ -763,7 +803,13 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) - radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; - - } -- return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES); -+ /* Add ttm page pool to debugfs */ -+ sprintf(radeon_mem_types_names[i], "ttm_page_pool"); -+ radeon_mem_types_list[i].name = radeon_mem_types_names[i]; -+ radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs; -+ radeon_mem_types_list[i].driver_features = 0; -+ radeon_mem_types_list[i].data = NULL; -+ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1); - - #endif - return 0; -diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen -new file mode 100644 -index 0000000..b5c757f ---- /dev/null -+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen -@@ -0,0 +1,611 @@ -+evergreen 0x9400 -+0x00008040 WAIT_UNTIL -+0x00008044 WAIT_UNTIL_POLL_CNTL -+0x00008048 WAIT_UNTIL_POLL_MASK -+0x0000804c WAIT_UNTIL_POLL_REFDATA -+0x000088B0 VGT_VTX_VECT_EJECT_REG -+0x000088C4 VGT_CACHE_INVALIDATION -+0x000088D4 VGT_GS_VERTEX_REUSE -+0x00008958 VGT_PRIMITIVE_TYPE -+0x0000895C VGT_INDEX_TYPE -+0x00008970 VGT_NUM_INDICES -+0x00008974 VGT_NUM_INSTANCES -+0x00008990 VGT_COMPUTE_DIM_X -+0x00008994 VGT_COMPUTE_DIM_Y -+0x00008998 VGT_COMPUTE_DIM_Z -+0x0000899C VGT_COMPUTE_START_X -+0x000089A0 VGT_COMPUTE_START_Y -+0x000089A4 VGT_COMPUTE_START_Z -+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE -+0x00008A14 PA_CL_ENHANCE -+0x00008A60 PA_SC_LINE_STIPPLE_VALUE -+0x00008B10 PA_SC_LINE_STIPPLE_STATE -+0x00008BF0 PA_SC_ENHANCE -+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ -+0x00008C00 SQ_CONFIG -+0x00008C04 SQ_GPR_RESOURCE_MGMT_1 -+0x00008C08 SQ_GPR_RESOURCE_MGMT_2 -+0x00008C0C SQ_GPR_RESOURCE_MGMT_3 -+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1 -+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2 -+0x00008C18 SQ_THREAD_RESOURCE_MGMT -+0x00008C1C SQ_THREAD_RESOURCE_MGMT_2 -+0x00008C20 SQ_STACK_RESOURCE_MGMT_1 -+0x00008C24 SQ_STACK_RESOURCE_MGMT_2 -+0x00008C28 SQ_STACK_RESOURCE_MGMT_3 -+0x00008DF8 SQ_CONST_MEM_BASE -+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS -+0x00009100 SPI_CONFIG_CNTL -+0x0000913C SPI_CONFIG_CNTL_1 -+0x00009700 VC_CNTL -+0x00009714 VC_ENHANCE -+0x00009830 DB_DEBUG -+0x00009834 DB_DEBUG2 -+0x00009838 DB_DEBUG3 -+0x0000983C DB_DEBUG4 -+0x00009854 DB_WATERMARKS -+0x0000A400 TD_PS_BORDER_COLOR_INDEX -+0x0000A404 TD_PS_BORDER_COLOR_RED -+0x0000A408 TD_PS_BORDER_COLOR_GREEN -+0x0000A40C TD_PS_BORDER_COLOR_BLUE -+0x0000A410 TD_PS_BORDER_COLOR_ALPHA -+0x0000A414 TD_VS_BORDER_COLOR_INDEX -+0x0000A418 TD_VS_BORDER_COLOR_RED -+0x0000A41C TD_VS_BORDER_COLOR_GREEN -+0x0000A420 TD_VS_BORDER_COLOR_BLUE -+0x0000A424 TD_VS_BORDER_COLOR_ALPHA -+0x0000A428 TD_GS_BORDER_COLOR_INDEX -+0x0000A42C TD_GS_BORDER_COLOR_RED -+0x0000A430 TD_GS_BORDER_COLOR_GREEN -+0x0000A434 TD_GS_BORDER_COLOR_BLUE -+0x0000A438 TD_GS_BORDER_COLOR_ALPHA -+0x0000A43C TD_HS_BORDER_COLOR_INDEX -+0x0000A440 TD_HS_BORDER_COLOR_RED -+0x0000A444 TD_HS_BORDER_COLOR_GREEN -+0x0000A448 TD_HS_BORDER_COLOR_BLUE -+0x0000A44C TD_HS_BORDER_COLOR_ALPHA -+0x0000A450 TD_LS_BORDER_COLOR_INDEX -+0x0000A454 TD_LS_BORDER_COLOR_RED -+0x0000A458 TD_LS_BORDER_COLOR_GREEN -+0x0000A45C TD_LS_BORDER_COLOR_BLUE -+0x0000A460 TD_LS_BORDER_COLOR_ALPHA -+0x0000A464 TD_CS_BORDER_COLOR_INDEX -+0x0000A468 TD_CS_BORDER_COLOR_RED -+0x0000A46C TD_CS_BORDER_COLOR_GREEN -+0x0000A470 TD_CS_BORDER_COLOR_BLUE -+0x0000A474 TD_CS_BORDER_COLOR_ALPHA -+0x00028000 DB_RENDER_CONTROL -+0x00028004 DB_COUNT_CONTROL -+0x0002800C DB_RENDER_OVERRIDE -+0x00028010 DB_RENDER_OVERRIDE2 -+0x00028028 DB_STENCIL_CLEAR -+0x0002802C DB_DEPTH_CLEAR -+0x00028034 PA_SC_SCREEN_SCISSOR_BR -+0x00028030 PA_SC_SCREEN_SCISSOR_TL -+0x0002805C DB_DEPTH_SLICE -+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 -+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 -+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2 -+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3 -+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4 -+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5 -+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6 -+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7 -+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8 -+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9 -+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10 -+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11 -+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12 -+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13 -+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14 -+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15 -+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0 -+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1 -+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2 -+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3 -+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4 -+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5 -+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6 -+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7 -+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8 -+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9 -+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10 -+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11 -+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12 -+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 -+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 -+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 -+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 -+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 -+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 -+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3 -+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4 -+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5 -+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6 -+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7 -+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8 -+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9 -+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10 -+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11 -+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12 -+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13 -+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14 -+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15 -+0x00028200 PA_SC_WINDOW_OFFSET -+0x00028204 PA_SC_WINDOW_SCISSOR_TL -+0x00028208 PA_SC_WINDOW_SCISSOR_BR -+0x0002820C PA_SC_CLIPRECT_RULE -+0x00028210 PA_SC_CLIPRECT_0_TL -+0x00028214 PA_SC_CLIPRECT_0_BR -+0x00028218 PA_SC_CLIPRECT_1_TL -+0x0002821C PA_SC_CLIPRECT_1_BR -+0x00028220 PA_SC_CLIPRECT_2_TL -+0x00028224 PA_SC_CLIPRECT_2_BR -+0x00028228 PA_SC_CLIPRECT_3_TL -+0x0002822C PA_SC_CLIPRECT_3_BR -+0x00028230 PA_SC_EDGERULE -+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET -+0x00028240 PA_SC_GENERIC_SCISSOR_TL -+0x00028244 PA_SC_GENERIC_SCISSOR_BR -+0x00028250 PA_SC_VPORT_SCISSOR_0_TL -+0x00028254 PA_SC_VPORT_SCISSOR_0_BR -+0x00028258 PA_SC_VPORT_SCISSOR_1_TL -+0x0002825C PA_SC_VPORT_SCISSOR_1_BR -+0x00028260 PA_SC_VPORT_SCISSOR_2_TL -+0x00028264 PA_SC_VPORT_SCISSOR_2_BR -+0x00028268 PA_SC_VPORT_SCISSOR_3_TL -+0x0002826C PA_SC_VPORT_SCISSOR_3_BR -+0x00028270 PA_SC_VPORT_SCISSOR_4_TL -+0x00028274 PA_SC_VPORT_SCISSOR_4_BR -+0x00028278 PA_SC_VPORT_SCISSOR_5_TL -+0x0002827C PA_SC_VPORT_SCISSOR_5_BR -+0x00028280 PA_SC_VPORT_SCISSOR_6_TL -+0x00028284 PA_SC_VPORT_SCISSOR_6_BR -+0x00028288 PA_SC_VPORT_SCISSOR_7_TL -+0x0002828C PA_SC_VPORT_SCISSOR_7_BR -+0x00028290 PA_SC_VPORT_SCISSOR_8_TL -+0x00028294 PA_SC_VPORT_SCISSOR_8_BR -+0x00028298 PA_SC_VPORT_SCISSOR_9_TL -+0x0002829C PA_SC_VPORT_SCISSOR_9_BR -+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL -+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR -+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL -+0x000282AC PA_SC_VPORT_SCISSOR_11_BR -+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL -+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR -+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL -+0x000282BC PA_SC_VPORT_SCISSOR_13_BR -+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL -+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR -+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL -+0x000282CC PA_SC_VPORT_SCISSOR_15_BR -+0x000282D0 PA_SC_VPORT_ZMIN_0 -+0x000282D4 PA_SC_VPORT_ZMAX_0 -+0x000282D8 PA_SC_VPORT_ZMIN_1 -+0x000282DC PA_SC_VPORT_ZMAX_1 -+0x000282E0 PA_SC_VPORT_ZMIN_2 -+0x000282E4 PA_SC_VPORT_ZMAX_2 -+0x000282E8 PA_SC_VPORT_ZMIN_3 -+0x000282EC PA_SC_VPORT_ZMAX_3 -+0x000282F0 PA_SC_VPORT_ZMIN_4 -+0x000282F4 PA_SC_VPORT_ZMAX_4 -+0x000282F8 PA_SC_VPORT_ZMIN_5 -+0x000282FC PA_SC_VPORT_ZMAX_5 -+0x00028300 PA_SC_VPORT_ZMIN_6 -+0x00028304 PA_SC_VPORT_ZMAX_6 -+0x00028308 PA_SC_VPORT_ZMIN_7 -+0x0002830C PA_SC_VPORT_ZMAX_7 -+0x00028310 PA_SC_VPORT_ZMIN_8 -+0x00028314 PA_SC_VPORT_ZMAX_8 -+0x00028318 PA_SC_VPORT_ZMIN_9 -+0x0002831C PA_SC_VPORT_ZMAX_9 -+0x00028320 PA_SC_VPORT_ZMIN_10 -+0x00028324 PA_SC_VPORT_ZMAX_10 -+0x00028328 PA_SC_VPORT_ZMIN_11 -+0x0002832C PA_SC_VPORT_ZMAX_11 -+0x00028330 PA_SC_VPORT_ZMIN_12 -+0x00028334 PA_SC_VPORT_ZMAX_12 -+0x00028338 PA_SC_VPORT_ZMIN_13 -+0x0002833C PA_SC_VPORT_ZMAX_13 -+0x00028340 PA_SC_VPORT_ZMIN_14 -+0x00028344 PA_SC_VPORT_ZMAX_14 -+0x00028348 PA_SC_VPORT_ZMIN_15 -+0x0002834C PA_SC_VPORT_ZMAX_15 -+0x00028350 SX_MISC -+0x00028380 SQ_VTX_SEMANTIC_0 -+0x00028384 SQ_VTX_SEMANTIC_1 -+0x00028388 SQ_VTX_SEMANTIC_2 -+0x0002838C SQ_VTX_SEMANTIC_3 -+0x00028390 SQ_VTX_SEMANTIC_4 -+0x00028394 SQ_VTX_SEMANTIC_5 -+0x00028398 SQ_VTX_SEMANTIC_6 -+0x0002839C SQ_VTX_SEMANTIC_7 -+0x000283A0 SQ_VTX_SEMANTIC_8 -+0x000283A4 SQ_VTX_SEMANTIC_9 -+0x000283A8 SQ_VTX_SEMANTIC_10 -+0x000283AC SQ_VTX_SEMANTIC_11 -+0x000283B0 SQ_VTX_SEMANTIC_12 -+0x000283B4 SQ_VTX_SEMANTIC_13 -+0x000283B8 SQ_VTX_SEMANTIC_14 -+0x000283BC SQ_VTX_SEMANTIC_15 -+0x000283C0 SQ_VTX_SEMANTIC_16 -+0x000283C4 SQ_VTX_SEMANTIC_17 -+0x000283C8 SQ_VTX_SEMANTIC_18 -+0x000283CC SQ_VTX_SEMANTIC_19 -+0x000283D0 SQ_VTX_SEMANTIC_20 -+0x000283D4 SQ_VTX_SEMANTIC_21 -+0x000283D8 SQ_VTX_SEMANTIC_22 -+0x000283DC SQ_VTX_SEMANTIC_23 -+0x000283E0 SQ_VTX_SEMANTIC_24 -+0x000283E4 SQ_VTX_SEMANTIC_25 -+0x000283E8 SQ_VTX_SEMANTIC_26 -+0x000283EC SQ_VTX_SEMANTIC_27 -+0x000283F0 SQ_VTX_SEMANTIC_28 -+0x000283F4 SQ_VTX_SEMANTIC_29 -+0x000283F8 SQ_VTX_SEMANTIC_30 -+0x000283FC SQ_VTX_SEMANTIC_31 -+0x00028400 VGT_MAX_VTX_INDX -+0x00028404 VGT_MIN_VTX_INDX -+0x00028408 VGT_INDX_OFFSET -+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX -+0x00028410 SX_ALPHA_TEST_CONTROL -+0x00028414 CB_BLEND_RED -+0x00028418 CB_BLEND_GREEN -+0x0002841C CB_BLEND_BLUE -+0x00028420 CB_BLEND_ALPHA -+0x00028430 DB_STENCILREFMASK -+0x00028434 DB_STENCILREFMASK_BF -+0x00028438 SX_ALPHA_REF -+0x0002843C PA_CL_VPORT_XSCALE_0 -+0x00028440 PA_CL_VPORT_XOFFSET_0 -+0x00028444 PA_CL_VPORT_YSCALE_0 -+0x00028448 PA_CL_VPORT_YOFFSET_0 -+0x0002844C PA_CL_VPORT_ZSCALE_0 -+0x00028450 PA_CL_VPORT_ZOFFSET_0 -+0x00028454 PA_CL_VPORT_XSCALE_1 -+0x00028458 PA_CL_VPORT_XOFFSET_1 -+0x0002845C PA_CL_VPORT_YSCALE_1 -+0x00028460 PA_CL_VPORT_YOFFSET_1 -+0x00028464 PA_CL_VPORT_ZSCALE_1 -+0x00028468 PA_CL_VPORT_ZOFFSET_1 -+0x0002846C PA_CL_VPORT_XSCALE_2 -+0x00028470 PA_CL_VPORT_XOFFSET_2 -+0x00028474 PA_CL_VPORT_YSCALE_2 -+0x00028478 PA_CL_VPORT_YOFFSET_2 -+0x0002847C PA_CL_VPORT_ZSCALE_2 -+0x00028480 PA_CL_VPORT_ZOFFSET_2 -+0x00028484 PA_CL_VPORT_XSCALE_3 -+0x00028488 PA_CL_VPORT_XOFFSET_3 -+0x0002848C PA_CL_VPORT_YSCALE_3 -+0x00028490 PA_CL_VPORT_YOFFSET_3 -+0x00028494 PA_CL_VPORT_ZSCALE_3 -+0x00028498 PA_CL_VPORT_ZOFFSET_3 -+0x0002849C PA_CL_VPORT_XSCALE_4 -+0x000284A0 PA_CL_VPORT_XOFFSET_4 -+0x000284A4 PA_CL_VPORT_YSCALE_4 -+0x000284A8 PA_CL_VPORT_YOFFSET_4 -+0x000284AC PA_CL_VPORT_ZSCALE_4 -+0x000284B0 PA_CL_VPORT_ZOFFSET_4 -+0x000284B4 PA_CL_VPORT_XSCALE_5 -+0x000284B8 PA_CL_VPORT_XOFFSET_5 -+0x000284BC PA_CL_VPORT_YSCALE_5 -+0x000284C0 PA_CL_VPORT_YOFFSET_5 -+0x000284C4 PA_CL_VPORT_ZSCALE_5 -+0x000284C8 PA_CL_VPORT_ZOFFSET_5 -+0x000284CC PA_CL_VPORT_XSCALE_6 -+0x000284D0 PA_CL_VPORT_XOFFSET_6 -+0x000284D4 PA_CL_VPORT_YSCALE_6 -+0x000284D8 PA_CL_VPORT_YOFFSET_6 -+0x000284DC PA_CL_VPORT_ZSCALE_6 -+0x000284E0 PA_CL_VPORT_ZOFFSET_6 -+0x000284E4 PA_CL_VPORT_XSCALE_7 -+0x000284E8 PA_CL_VPORT_XOFFSET_7 -+0x000284EC PA_CL_VPORT_YSCALE_7 -+0x000284F0 PA_CL_VPORT_YOFFSET_7 -+0x000284F4 PA_CL_VPORT_ZSCALE_7 -+0x000284F8 PA_CL_VPORT_ZOFFSET_7 -+0x000284FC PA_CL_VPORT_XSCALE_8 -+0x00028500 PA_CL_VPORT_XOFFSET_8 -+0x00028504 PA_CL_VPORT_YSCALE_8 -+0x00028508 PA_CL_VPORT_YOFFSET_8 -+0x0002850C PA_CL_VPORT_ZSCALE_8 -+0x00028510 PA_CL_VPORT_ZOFFSET_8 -+0x00028514 PA_CL_VPORT_XSCALE_9 -+0x00028518 PA_CL_VPORT_XOFFSET_9 -+0x0002851C PA_CL_VPORT_YSCALE_9 -+0x00028520 PA_CL_VPORT_YOFFSET_9 -+0x00028524 PA_CL_VPORT_ZSCALE_9 -+0x00028528 PA_CL_VPORT_ZOFFSET_9 -+0x0002852C PA_CL_VPORT_XSCALE_10 -+0x00028530 PA_CL_VPORT_XOFFSET_10 -+0x00028534 PA_CL_VPORT_YSCALE_10 -+0x00028538 PA_CL_VPORT_YOFFSET_10 -+0x0002853C PA_CL_VPORT_ZSCALE_10 -+0x00028540 PA_CL_VPORT_ZOFFSET_10 -+0x00028544 PA_CL_VPORT_XSCALE_11 -+0x00028548 PA_CL_VPORT_XOFFSET_11 -+0x0002854C PA_CL_VPORT_YSCALE_11 -+0x00028550 PA_CL_VPORT_YOFFSET_11 -+0x00028554 PA_CL_VPORT_ZSCALE_11 -+0x00028558 PA_CL_VPORT_ZOFFSET_11 -+0x0002855C PA_CL_VPORT_XSCALE_12 -+0x00028560 PA_CL_VPORT_XOFFSET_12 -+0x00028564 PA_CL_VPORT_YSCALE_12 -+0x00028568 PA_CL_VPORT_YOFFSET_12 -+0x0002856C PA_CL_VPORT_ZSCALE_12 -+0x00028570 PA_CL_VPORT_ZOFFSET_12 -+0x00028574 PA_CL_VPORT_XSCALE_13 -+0x00028578 PA_CL_VPORT_XOFFSET_13 -+0x0002857C PA_CL_VPORT_YSCALE_13 -+0x00028580 PA_CL_VPORT_YOFFSET_13 -+0x00028584 PA_CL_VPORT_ZSCALE_13 -+0x00028588 PA_CL_VPORT_ZOFFSET_13 -+0x0002858C PA_CL_VPORT_XSCALE_14 -+0x00028590 PA_CL_VPORT_XOFFSET_14 -+0x00028594 PA_CL_VPORT_YSCALE_14 -+0x00028598 PA_CL_VPORT_YOFFSET_14 -+0x0002859C PA_CL_VPORT_ZSCALE_14 -+0x000285A0 PA_CL_VPORT_ZOFFSET_14 -+0x000285A4 PA_CL_VPORT_XSCALE_15 -+0x000285A8 PA_CL_VPORT_XOFFSET_15 -+0x000285AC PA_CL_VPORT_YSCALE_15 -+0x000285B0 PA_CL_VPORT_YOFFSET_15 -+0x000285B4 PA_CL_VPORT_ZSCALE_15 -+0x000285B8 PA_CL_VPORT_ZOFFSET_15 -+0x000285BC PA_CL_UCP_0_X -+0x000285C0 PA_CL_UCP_0_Y -+0x000285C4 PA_CL_UCP_0_Z -+0x000285C8 PA_CL_UCP_0_W -+0x000285CC PA_CL_UCP_1_X -+0x000285D0 PA_CL_UCP_1_Y -+0x000285D4 PA_CL_UCP_1_Z -+0x000285D8 PA_CL_UCP_1_W -+0x000285DC PA_CL_UCP_2_X -+0x000285E0 PA_CL_UCP_2_Y -+0x000285E4 PA_CL_UCP_2_Z -+0x000285E8 PA_CL_UCP_2_W -+0x000285EC PA_CL_UCP_3_X -+0x000285F0 PA_CL_UCP_3_Y -+0x000285F4 PA_CL_UCP_3_Z -+0x000285F8 PA_CL_UCP_3_W -+0x000285FC PA_CL_UCP_4_X -+0x00028600 PA_CL_UCP_4_Y -+0x00028604 PA_CL_UCP_4_Z -+0x00028608 PA_CL_UCP_4_W -+0x0002860C PA_CL_UCP_5_X -+0x00028610 PA_CL_UCP_5_Y -+0x00028614 PA_CL_UCP_5_Z -+0x00028618 PA_CL_UCP_5_W -+0x0002861C SPI_VS_OUT_ID_0 -+0x00028620 SPI_VS_OUT_ID_1 -+0x00028624 SPI_VS_OUT_ID_2 -+0x00028628 SPI_VS_OUT_ID_3 -+0x0002862C SPI_VS_OUT_ID_4 -+0x00028630 SPI_VS_OUT_ID_5 -+0x00028634 SPI_VS_OUT_ID_6 -+0x00028638 SPI_VS_OUT_ID_7 -+0x0002863C SPI_VS_OUT_ID_8 -+0x00028640 SPI_VS_OUT_ID_9 -+0x00028644 SPI_PS_INPUT_CNTL_0 -+0x00028648 SPI_PS_INPUT_CNTL_1 -+0x0002864C SPI_PS_INPUT_CNTL_2 -+0x00028650 SPI_PS_INPUT_CNTL_3 -+0x00028654 SPI_PS_INPUT_CNTL_4 -+0x00028658 SPI_PS_INPUT_CNTL_5 -+0x0002865C SPI_PS_INPUT_CNTL_6 -+0x00028660 SPI_PS_INPUT_CNTL_7 -+0x00028664 SPI_PS_INPUT_CNTL_8 -+0x00028668 SPI_PS_INPUT_CNTL_9 -+0x0002866C SPI_PS_INPUT_CNTL_10 -+0x00028670 SPI_PS_INPUT_CNTL_11 -+0x00028674 SPI_PS_INPUT_CNTL_12 -+0x00028678 SPI_PS_INPUT_CNTL_13 -+0x0002867C SPI_PS_INPUT_CNTL_14 -+0x00028680 SPI_PS_INPUT_CNTL_15 -+0x00028684 SPI_PS_INPUT_CNTL_16 -+0x00028688 SPI_PS_INPUT_CNTL_17 -+0x0002868C SPI_PS_INPUT_CNTL_18 -+0x00028690 SPI_PS_INPUT_CNTL_19 -+0x00028694 SPI_PS_INPUT_CNTL_20 -+0x00028698 SPI_PS_INPUT_CNTL_21 -+0x0002869C SPI_PS_INPUT_CNTL_22 -+0x000286A0 SPI_PS_INPUT_CNTL_23 -+0x000286A4 SPI_PS_INPUT_CNTL_24 -+0x000286A8 SPI_PS_INPUT_CNTL_25 -+0x000286AC SPI_PS_INPUT_CNTL_26 -+0x000286B0 SPI_PS_INPUT_CNTL_27 -+0x000286B4 SPI_PS_INPUT_CNTL_28 -+0x000286B8 SPI_PS_INPUT_CNTL_29 -+0x000286BC SPI_PS_INPUT_CNTL_30 -+0x000286C0 SPI_PS_INPUT_CNTL_31 -+0x000286C4 SPI_VS_OUT_CONFIG -+0x000286C8 SPI_THREAD_GROUPING -+0x000286CC SPI_PS_IN_CONTROL_0 -+0x000286D0 SPI_PS_IN_CONTROL_1 -+0x000286D4 SPI_INTERP_CONTROL_0 -+0x000286D8 SPI_INPUT_Z -+0x000286DC SPI_FOG_CNTL -+0x000286E0 SPI_BARYC_CNTL -+0x000286E4 SPI_PS_IN_CONTROL_2 -+0x000286E8 SPI_COMPUTE_INPUT_CNTL -+0x000286EC SPI_COMPUTE_NUM_THREAD_X -+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y -+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z -+0x000286F8 GDS_ADDR_SIZE -+0x00028780 CB_BLEND0_CONTROL -+0x00028784 CB_BLEND1_CONTROL -+0x00028788 CB_BLEND2_CONTROL -+0x0002878C CB_BLEND3_CONTROL -+0x00028790 CB_BLEND4_CONTROL -+0x00028794 CB_BLEND5_CONTROL -+0x00028798 CB_BLEND6_CONTROL -+0x0002879C CB_BLEND7_CONTROL -+0x000287CC CS_COPY_STATE -+0x000287D0 GFX_COPY_STATE -+0x000287D4 PA_CL_POINT_X_RAD -+0x000287D8 PA_CL_POINT_Y_RAD -+0x000287DC PA_CL_POINT_SIZE -+0x000287E0 PA_CL_POINT_CULL_RAD -+0x00028808 CB_COLOR_CONTROL -+0x0002880C DB_SHADER_CONTROL -+0x00028810 PA_CL_CLIP_CNTL -+0x00028814 PA_SU_SC_MODE_CNTL -+0x00028818 PA_CL_VTE_CNTL -+0x0002881C PA_CL_VS_OUT_CNTL -+0x00028820 PA_CL_NANINF_CNTL -+0x00028824 PA_SU_LINE_STIPPLE_CNTL -+0x00028828 PA_SU_LINE_STIPPLE_SCALE -+0x0002882C PA_SU_PRIM_FILTER_CNTL -+0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1 -+0x00028844 SQ_PGM_RESOURCES_PS -+0x00028848 SQ_PGM_RESOURCES_2_PS -+0x0002884C SQ_PGM_EXPORTS_PS -+0x0002885C SQ_PGM_RESOURCES_VS -+0x00028860 SQ_PGM_RESOURCES_2_VS -+0x00028878 SQ_PGM_RESOURCES_GS -+0x0002887C SQ_PGM_RESOURCES_2_GS -+0x00028890 SQ_PGM_RESOURCES_ES -+0x00028894 SQ_PGM_RESOURCES_2_ES -+0x000288A8 SQ_PGM_RESOURCES_FS -+0x000288BC SQ_PGM_RESOURCES_HS -+0x000288C0 SQ_PGM_RESOURCES_2_HS -+0x000288D0 SQ_PGM_RESOURCES_LS -+0x000288D4 SQ_PGM_RESOURCES_2_LS -+0x000288E8 SQ_LDS_ALLOC -+0x000288EC SQ_LDS_ALLOC_PS -+0x000288F0 SQ_VTX_SEMANTIC_CLEAR -+0x00028A00 PA_SU_POINT_SIZE -+0x00028A04 PA_SU_POINT_MINMAX -+0x00028A08 PA_SU_LINE_CNTL -+0x00028A0C PA_SC_LINE_STIPPLE -+0x00028A10 VGT_OUTPUT_PATH_CNTL -+0x00028A14 VGT_HOS_CNTL -+0x00028A18 VGT_HOS_MAX_TESS_LEVEL -+0x00028A1C VGT_HOS_MIN_TESS_LEVEL -+0x00028A20 VGT_HOS_REUSE_DEPTH -+0x00028A24 VGT_GROUP_PRIM_TYPE -+0x00028A28 VGT_GROUP_FIRST_DECR -+0x00028A2C VGT_GROUP_DECR -+0x00028A30 VGT_GROUP_VECT_0_CNTL -+0x00028A34 VGT_GROUP_VECT_1_CNTL -+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL -+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL -+0x00028A40 VGT_GS_MODE -+0x00028A48 PA_SC_MODE_CNTL_0 -+0x00028A4C PA_SC_MODE_CNTL_1 -+0x00028A50 VGT_ENHANCE -+0x00028A54 VGT_GS_PER_ES -+0x00028A58 VGT_ES_PER_GS -+0x00028A5C VGT_GS_PER_VS -+0x00028A6C VGT_GS_OUT_PRIM_TYPE -+0x00028A84 VGT_PRIMITIVEID_EN -+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN -+0x00028AA0 VGT_INSTANCE_STEP_RATE_0 -+0x00028AA4 VGT_INSTANCE_STEP_RATE_1 -+0x00028AB4 VGT_REUSE_OFF -+0x00028AB8 VGT_VTX_CNT_EN -+0x00028ABC DB_HTILE_SURFACE -+0x00028AC0 DB_SRESULTS_COMPARE_STATE0 -+0x00028AC4 DB_SRESULTS_COMPARE_STATE1 -+0x00028AC8 DB_PRELOAD_CONTROL -+0x00028B38 VGT_GS_MAX_VERT_OUT -+0x00028B54 VGT_SHADER_STAGES_EN -+0x00028B58 VGT_LS_HS_CONFIG -+0x00028B5C VGT_LS_SIZE -+0x00028B60 VGT_HS_SIZE -+0x00028B64 VGT_LS_HS_ALLOC -+0x00028B68 VGT_HS_PATCH_CONST -+0x00028B6C VGT_TF_PARAM -+0x00028B70 DB_ALPHA_TO_MASK -+0x00028B74 VGT_DISPATCH_INITIATOR -+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL -+0x00028B7C PA_SU_POLY_OFFSET_CLAMP -+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE -+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET -+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE -+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET -+0x00028B74 VGT_GS_INSTANCE_CNT -+0x00028C00 PA_SC_LINE_CNTL -+0x00028C08 PA_SU_VTX_CNTL -+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ -+0x00028C10 PA_CL_GB_VERT_DISC_ADJ -+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ -+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ -+0x00028C1C PA_SC_AA_SAMPLE_LOCS_0 -+0x00028C20 PA_SC_AA_SAMPLE_LOCS_1 -+0x00028C24 PA_SC_AA_SAMPLE_LOCS_2 -+0x00028C28 PA_SC_AA_SAMPLE_LOCS_3 -+0x00028C2C PA_SC_AA_SAMPLE_LOCS_4 -+0x00028C30 PA_SC_AA_SAMPLE_LOCS_5 -+0x00028C34 PA_SC_AA_SAMPLE_LOCS_6 -+0x00028C38 PA_SC_AA_SAMPLE_LOCS_7 -+0x00028C3C PA_SC_AA_MASK -+0x00028C8C CB_COLOR0_CLEAR_WORD0 -+0x00028C90 CB_COLOR0_CLEAR_WORD1 -+0x00028C94 CB_COLOR0_CLEAR_WORD2 -+0x00028C98 CB_COLOR0_CLEAR_WORD3 -+0x00028CC8 CB_COLOR1_CLEAR_WORD0 -+0x00028CCC CB_COLOR1_CLEAR_WORD1 -+0x00028CD0 CB_COLOR1_CLEAR_WORD2 -+0x00028CD4 CB_COLOR1_CLEAR_WORD3 -+0x00028D04 CB_COLOR2_CLEAR_WORD0 -+0x00028D08 CB_COLOR2_CLEAR_WORD1 -+0x00028D0C CB_COLOR2_CLEAR_WORD2 -+0x00028D10 CB_COLOR2_CLEAR_WORD3 -+0x00028D40 CB_COLOR3_CLEAR_WORD0 -+0x00028D44 CB_COLOR3_CLEAR_WORD1 -+0x00028D48 CB_COLOR3_CLEAR_WORD2 -+0x00028D4C CB_COLOR3_CLEAR_WORD3 -+0x00028D7C CB_COLOR4_CLEAR_WORD0 -+0x00028D80 CB_COLOR4_CLEAR_WORD1 -+0x00028D84 CB_COLOR4_CLEAR_WORD2 -+0x00028D88 CB_COLOR4_CLEAR_WORD3 -+0x00028DB8 CB_COLOR5_CLEAR_WORD0 -+0x00028DBC CB_COLOR5_CLEAR_WORD1 -+0x00028DC0 CB_COLOR5_CLEAR_WORD2 -+0x00028DC4 CB_COLOR5_CLEAR_WORD3 -+0x00028DF4 CB_COLOR6_CLEAR_WORD0 -+0x00028DF8 CB_COLOR6_CLEAR_WORD1 -+0x00028DFC CB_COLOR6_CLEAR_WORD2 -+0x00028E00 CB_COLOR6_CLEAR_WORD3 -+0x00028E30 CB_COLOR7_CLEAR_WORD0 -+0x00028E34 CB_COLOR7_CLEAR_WORD1 -+0x00028E38 CB_COLOR7_CLEAR_WORD2 -+0x00028E3C CB_COLOR7_CLEAR_WORD3 -+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0 -+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1 -+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2 -+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3 -+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4 -+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5 -+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6 -+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7 -+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8 -+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9 -+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10 -+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11 -+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12 -+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13 -+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14 -+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15 -+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0 -+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1 -+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2 -+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3 -+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4 -+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5 -+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6 -+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7 -+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8 -+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9 -+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10 -+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11 -+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12 -+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13 -+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14 -+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15 -+0x0003CFF0 SQ_VTX_BASE_VTX_LOC -+0x0003CFF4 SQ_VTX_START_INST_LOC -+0x0003FF00 SQ_TEX_SAMPLER_CLEAR -+0x0003FF04 SQ_TEX_RESOURCE_CLEAR -+0x0003FF08 SQ_LOOP_BOOL_CLEAR -diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c -index 1a41cb2..9e4240b 100644 ---- a/drivers/gpu/drm/radeon/rs400.c -+++ b/drivers/gpu/drm/radeon/rs400.c -@@ -243,8 +243,6 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev) - - void rs400_gpu_init(struct radeon_device *rdev) - { -- /* FIXME: HDP same place on rs400 ? */ -- r100_hdp_reset(rdev); - /* FIXME: is this correct ? */ - r420_pipes_init(rdev); - if (rs400_mc_wait_for_idle(rdev)) { -@@ -433,7 +431,7 @@ int rs400_resume(struct radeon_device *rdev) - /* setup MC before calling post tables */ - rs400_mc_program(rdev); - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); -@@ -458,7 +456,6 @@ int rs400_suspend(struct radeon_device *rdev) - - void rs400_fini(struct radeon_device *rdev) - { -- radeon_pm_fini(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); -@@ -497,7 +494,7 @@ int rs400_init(struct radeon_device *rdev) - return r; - } - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, - "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), -@@ -509,8 +506,6 @@ int rs400_init(struct radeon_device *rdev) - - /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); -- /* Initialize power management */ -- radeon_pm_init(rdev); - /* initialize memory controller */ - rs400_mc_init(rdev); - /* Fence driver */ -diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c -index a81bc7a..7bb4c3e 100644 ---- a/drivers/gpu/drm/radeon/rs600.c -+++ b/drivers/gpu/drm/radeon/rs600.c -@@ -46,6 +46,136 @@ - void rs600_gpu_init(struct radeon_device *rdev); - int rs600_mc_wait_for_idle(struct radeon_device *rdev); - -+void rs600_pm_misc(struct radeon_device *rdev) -+{ -+ int requested_index = rdev->pm.requested_power_state_index; -+ struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; -+ struct radeon_voltage *voltage = &ps->clock_info[0].voltage; -+ u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl; -+ u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl; -+ -+ if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { -+ if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { -+ tmp = RREG32(voltage->gpio.reg); -+ if (voltage->active_high) -+ tmp |= voltage->gpio.mask; -+ else -+ tmp &= ~(voltage->gpio.mask); -+ WREG32(voltage->gpio.reg, tmp); -+ if (voltage->delay) -+ udelay(voltage->delay); -+ } else { -+ tmp = RREG32(voltage->gpio.reg); -+ if (voltage->active_high) -+ tmp &= ~voltage->gpio.mask; -+ else -+ tmp |= voltage->gpio.mask; -+ WREG32(voltage->gpio.reg, tmp); -+ if (voltage->delay) -+ udelay(voltage->delay); -+ } -+ } else if (voltage->type == VOLTAGE_VDDC) -+ radeon_atom_set_voltage(rdev, voltage->vddc_id); -+ -+ dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); -+ dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); -+ dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf); -+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { -+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) { -+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2); -+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2); -+ } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) { -+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4); -+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4); -+ } -+ } else { -+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1); -+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1); -+ } -+ WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length); -+ -+ dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL); -+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { -+ dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP; -+ if (voltage->delay) { -+ dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC; -+ dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay); -+ } else -+ dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC; -+ } else -+ dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP; -+ WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl); -+ -+ hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL); -+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) -+ hdp_dyn_cntl &= ~HDP_FORCEON; -+ else -+ hdp_dyn_cntl |= HDP_FORCEON; -+ WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl); -+#if 0 -+ /* mc_host_dyn seems to cause hangs from time to time */ -+ mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL); -+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN) -+ mc_host_dyn_cntl &= ~MC_HOST_FORCEON; -+ else -+ mc_host_dyn_cntl |= MC_HOST_FORCEON; -+ WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl); -+#endif -+ dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL); -+ if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN) -+ dyn_backbias_cntl |= IO_CG_BACKBIAS_EN; -+ else -+ dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN; -+ WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl); -+ -+ /* set pcie lanes */ -+ if ((rdev->flags & RADEON_IS_PCIE) && -+ !(rdev->flags & RADEON_IS_IGP) && -+ rdev->asic->set_pcie_lanes && -+ (ps->pcie_lanes != -+ rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { -+ radeon_set_pcie_lanes(rdev, -+ ps->pcie_lanes); -+ DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); -+ } -+} -+ -+void rs600_pm_prepare(struct radeon_device *rdev) -+{ -+ struct drm_device *ddev = rdev->ddev; -+ struct drm_crtc *crtc; -+ struct radeon_crtc *radeon_crtc; -+ u32 tmp; -+ -+ /* disable any active CRTCs */ -+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { -+ radeon_crtc = to_radeon_crtc(crtc); -+ if (radeon_crtc->enabled) { -+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); -+ tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; -+ WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); -+ } -+ } -+} -+ -+void rs600_pm_finish(struct radeon_device *rdev) -+{ -+ struct drm_device *ddev = rdev->ddev; -+ struct drm_crtc *crtc; -+ struct radeon_crtc *radeon_crtc; -+ u32 tmp; -+ -+ /* enable any active CRTCs */ -+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { -+ radeon_crtc = to_radeon_crtc(crtc); -+ if (radeon_crtc->enabled) { -+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset); -+ tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; -+ WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); -+ } -+ } -+} -+ - /* hpd for digital panel detect/disconnect */ - bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) - { -@@ -147,6 +277,78 @@ void rs600_hpd_fini(struct radeon_device *rdev) - } - } - -+void rs600_bm_disable(struct radeon_device *rdev) -+{ -+ u32 tmp; -+ -+ /* disable bus mastering */ -+ pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); -+ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); -+ mdelay(1); -+} -+ -+int rs600_asic_reset(struct radeon_device *rdev) -+{ -+ u32 status, tmp; -+ -+ struct rv515_mc_save save; -+ -+ /* Stops all mc clients */ -+ rv515_mc_stop(rdev, &save); -+ status = RREG32(R_000E40_RBBM_STATUS); -+ if (!G_000E40_GUI_ACTIVE(status)) { -+ return 0; -+ } -+ status = RREG32(R_000E40_RBBM_STATUS); -+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); -+ /* stop CP */ -+ WREG32(RADEON_CP_CSQ_CNTL, 0); -+ tmp = RREG32(RADEON_CP_RB_CNTL); -+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); -+ WREG32(RADEON_CP_RB_RPTR_WR, 0); -+ WREG32(RADEON_CP_RB_WPTR, 0); -+ WREG32(RADEON_CP_RB_CNTL, tmp); -+ pci_save_state(rdev->pdev); -+ /* disable bus mastering */ -+ rs600_bm_disable(rdev); -+ /* reset GA+VAP */ -+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | -+ S_0000F0_SOFT_RESET_GA(1)); -+ RREG32(R_0000F0_RBBM_SOFT_RESET); -+ mdelay(500); -+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0); -+ mdelay(1); -+ status = RREG32(R_000E40_RBBM_STATUS); -+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); -+ /* reset CP */ -+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); -+ RREG32(R_0000F0_RBBM_SOFT_RESET); -+ mdelay(500); -+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0); -+ mdelay(1); -+ status = RREG32(R_000E40_RBBM_STATUS); -+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); -+ /* reset MC */ -+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1)); -+ RREG32(R_0000F0_RBBM_SOFT_RESET); -+ mdelay(500); -+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0); -+ mdelay(1); -+ status = RREG32(R_000E40_RBBM_STATUS); -+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); -+ /* restore PCI & busmastering */ -+ pci_restore_state(rdev->pdev); -+ /* Check if GPU is idle */ -+ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { -+ dev_err(rdev->dev, "failed to reset GPU\n"); -+ rdev->gpu_lockup = true; -+ return -1; -+ } -+ rv515_mc_resume(rdev, &save); -+ dev_info(rdev->dev, "GPU reset succeed\n"); -+ return 0; -+} -+ - /* - * GART. - */ -@@ -310,6 +512,9 @@ int rs600_irq_set(struct radeon_device *rdev) - if (rdev->irq.sw_int) { - tmp |= S_000040_SW_INT_EN(1); - } -+ if (rdev->irq.gui_idle) { -+ tmp |= S_000040_GUI_IDLE(1); -+ } - if (rdev->irq.crtc_vblank_int[0]) { - mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); - } -@@ -332,9 +537,15 @@ int rs600_irq_set(struct radeon_device *rdev) - static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) - { - uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); -- uint32_t irq_mask = ~C_000044_SW_INT; -+ uint32_t irq_mask = S_000044_SW_INT(1); - u32 tmp; - -+ /* the interrupt works, but the status bit is permanently asserted */ -+ if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { -+ if (!rdev->irq.gui_idle_acked) -+ irq_mask |= S_000044_GUI_IDLE_STAT(1); -+ } -+ - if (G_000044_DISPLAY_INT_STAT(irqs)) { - *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); - if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { -@@ -382,6 +593,9 @@ int rs600_irq_process(struct radeon_device *rdev) - uint32_t r500_disp_int; - bool queue_hotplug = false; - -+ /* reset gui idle ack. the status bit is broken */ -+ rdev->irq.gui_idle_acked = false; -+ - status = rs600_irq_ack(rdev, &r500_disp_int); - if (!status && !r500_disp_int) { - return IRQ_NONE; -@@ -390,6 +604,12 @@ int rs600_irq_process(struct radeon_device *rdev) - /* SW interrupt */ - if (G_000044_SW_INT(status)) - radeon_fence_process(rdev); -+ /* GUI idle */ -+ if (G_000040_GUI_IDLE(status)) { -+ rdev->irq.gui_idle_acked = true; -+ rdev->pm.gui_idle = true; -+ wake_up(&rdev->irq.idle_queue); -+ } - /* Vertical blank interrupts */ - if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { - drm_handle_vblank(rdev->ddev, 0); -@@ -411,6 +631,8 @@ int rs600_irq_process(struct radeon_device *rdev) - } - status = rs600_irq_ack(rdev, &r500_disp_int); - } -+ /* reset gui idle ack. the status bit is broken */ -+ rdev->irq.gui_idle_acked = false; - if (queue_hotplug) - queue_work(rdev->wq, &rdev->hotplug_work); - if (rdev->msi_enabled) { -@@ -454,7 +676,6 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev) - - void rs600_gpu_init(struct radeon_device *rdev) - { -- r100_hdp_reset(rdev); - r420_pipes_init(rdev); - /* Wait for mc idle */ - if (rs600_mc_wait_for_idle(rdev)) -@@ -601,7 +822,7 @@ int rs600_resume(struct radeon_device *rdev) - /* Resume clock before doing reset */ - rv515_clock_startup(rdev); - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); -@@ -626,7 +847,6 @@ int rs600_suspend(struct radeon_device *rdev) - - void rs600_fini(struct radeon_device *rdev) - { -- radeon_pm_fini(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); -@@ -664,7 +884,7 @@ int rs600_init(struct radeon_device *rdev) - return -EINVAL; - } - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, - "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), -@@ -676,8 +896,6 @@ int rs600_init(struct radeon_device *rdev) - - /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); -- /* Initialize power management */ -- radeon_pm_init(rdev); - /* initialize memory controller */ - rs600_mc_init(rdev); - rs600_debugfs(rdev); -diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h -index e52d269..a27c13a 100644 ---- a/drivers/gpu/drm/radeon/rs600d.h -+++ b/drivers/gpu/drm/radeon/rs600d.h -@@ -178,6 +178,52 @@ - #define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0) - #define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF) - #define C_000074_MC_IND_DATA 0x00000000 -+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0 -+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0) -+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1) -+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE -+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1) -+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1) -+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD -+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2) -+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1) -+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB -+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3) -+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1) -+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7 -+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4) -+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1) -+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF -+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5) -+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1) -+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF -+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6) -+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1) -+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF -+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7) -+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1) -+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F -+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8) -+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1) -+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF -+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9) -+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1) -+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF -+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10) -+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1) -+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF -+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11) -+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1) -+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF -+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12) -+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1) -+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF -+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13) -+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1) -+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF -+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14) -+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1) -+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF - #define R_000134_HDP_FB_LOCATION 0x000134 - #define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) - #define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) -@@ -588,4 +634,38 @@ - #define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) - #define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF - -+/* PLL regs */ -+#define GENERAL_PWRMGT 0x8 -+#define GLOBAL_PWRMGT_EN (1 << 0) -+#define MOBILE_SU (1 << 2) -+#define DYN_PWRMGT_SCLK_LENGTH 0xc -+#define NORMAL_POWER_SCLK_HILEN(x) ((x) << 0) -+#define NORMAL_POWER_SCLK_LOLEN(x) ((x) << 4) -+#define REDUCED_POWER_SCLK_HILEN(x) ((x) << 8) -+#define REDUCED_POWER_SCLK_LOLEN(x) ((x) << 12) -+#define POWER_D1_SCLK_HILEN(x) ((x) << 16) -+#define POWER_D1_SCLK_LOLEN(x) ((x) << 20) -+#define STATIC_SCREEN_HILEN(x) ((x) << 24) -+#define STATIC_SCREEN_LOLEN(x) ((x) << 28) -+#define DYN_SCLK_VOL_CNTL 0xe -+#define IO_CG_VOLTAGE_DROP (1 << 0) -+#define VOLTAGE_DROP_SYNC (1 << 2) -+#define VOLTAGE_DELAY_SEL(x) ((x) << 3) -+#define HDP_DYN_CNTL 0x10 -+#define HDP_FORCEON (1 << 0) -+#define MC_HOST_DYN_CNTL 0x1e -+#define MC_HOST_FORCEON (1 << 0) -+#define DYN_BACKBIAS_CNTL 0x29 -+#define IO_CG_BACKBIAS_EN (1 << 0) -+ -+/* mmreg */ -+#define DOUT_POWER_MANAGEMENT_CNTL 0x7ee0 -+#define PWRDN_WAIT_BUSY_OFF (1 << 0) -+#define PWRDN_WAIT_PWRSEQ_OFF (1 << 4) -+#define PWRDN_WAIT_PPLL_OFF (1 << 8) -+#define PWRUP_WAIT_PPLL_ON (1 << 12) -+#define PWRUP_WAIT_MEM_INIT_DONE (1 << 16) -+#define PM_ASSERT_RESET (1 << 20) -+#define PM_PWRDN_PPLL (1 << 24) -+ - #endif -diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c -index bbf3da7..bcc3319 100644 ---- a/drivers/gpu/drm/radeon/rs690.c -+++ b/drivers/gpu/drm/radeon/rs690.c -@@ -48,8 +48,6 @@ static int rs690_mc_wait_for_idle(struct radeon_device *rdev) - - static void rs690_gpu_init(struct radeon_device *rdev) - { -- /* FIXME: HDP same place on rs690 ? */ -- r100_hdp_reset(rdev); - /* FIXME: is this correct ? */ - r420_pipes_init(rdev); - if (rs690_mc_wait_for_idle(rdev)) { -@@ -78,59 +76,59 @@ void rs690_pm_info(struct radeon_device *rdev) - /* Get various system informations from bios */ - switch (crev) { - case 1: -- tmp.full = rfixed_const(100); -- rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock); -- rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); -- rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); -- rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock)); -- rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth); -+ tmp.full = dfixed_const(100); -+ rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); -+ rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); -+ rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); -+ rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); -+ rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); - break; - case 2: -- tmp.full = rfixed_const(100); -- rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock); -- rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); -- rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock); -- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); -- rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq); -- rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); -- rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); -+ tmp.full = dfixed_const(100); -+ rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); -+ rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); -+ rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); -+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); -+ rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); -+ rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); -+ rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); - break; - default: -- tmp.full = rfixed_const(100); -+ tmp.full = dfixed_const(100); - /* We assume the slower possible clock ie worst case */ - /* DDR 333Mhz */ -- rdev->pm.igp_sideport_mclk.full = rfixed_const(333); -+ rdev->pm.igp_sideport_mclk.full = dfixed_const(333); - /* FIXME: system clock ? */ -- rdev->pm.igp_system_mclk.full = rfixed_const(100); -- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); -- rdev->pm.igp_ht_link_clk.full = rfixed_const(200); -- rdev->pm.igp_ht_link_width.full = rfixed_const(8); -+ rdev->pm.igp_system_mclk.full = dfixed_const(100); -+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); -+ rdev->pm.igp_ht_link_clk.full = dfixed_const(200); -+ rdev->pm.igp_ht_link_width.full = dfixed_const(8); - DRM_ERROR("No integrated system info for your GPU, using safe default\n"); - break; - } - } else { -- tmp.full = rfixed_const(100); -+ tmp.full = dfixed_const(100); - /* We assume the slower possible clock ie worst case */ - /* DDR 333Mhz */ -- rdev->pm.igp_sideport_mclk.full = rfixed_const(333); -+ rdev->pm.igp_sideport_mclk.full = dfixed_const(333); - /* FIXME: system clock ? */ -- rdev->pm.igp_system_mclk.full = rfixed_const(100); -- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); -- rdev->pm.igp_ht_link_clk.full = rfixed_const(200); -- rdev->pm.igp_ht_link_width.full = rfixed_const(8); -+ rdev->pm.igp_system_mclk.full = dfixed_const(100); -+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); -+ rdev->pm.igp_ht_link_clk.full = dfixed_const(200); -+ rdev->pm.igp_ht_link_width.full = dfixed_const(8); - DRM_ERROR("No integrated system info for your GPU, using safe default\n"); - } - /* Compute various bandwidth */ - /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ -- tmp.full = rfixed_const(4); -- rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp); -+ tmp.full = dfixed_const(4); -+ rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp); - /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 - * = ht_clk * ht_width / 5 - */ -- tmp.full = rfixed_const(5); -- rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk, -+ tmp.full = dfixed_const(5); -+ rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk, - rdev->pm.igp_ht_link_width); -- rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp); -+ rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp); - if (tmp.full < rdev->pm.max_bandwidth.full) { - /* HT link is a limiting factor */ - rdev->pm.max_bandwidth.full = tmp.full; -@@ -138,10 +136,10 @@ void rs690_pm_info(struct radeon_device *rdev) - /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 - * = (sideport_clk * 14) / 10 - */ -- tmp.full = rfixed_const(14); -- rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp); -- tmp.full = rfixed_const(10); -- rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); -+ tmp.full = dfixed_const(14); -+ rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp); -+ tmp.full = dfixed_const(10); -+ rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp); - } - - void rs690_mc_init(struct radeon_device *rdev) -@@ -241,20 +239,20 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, - return; - } - -- if (crtc->vsc.full > rfixed_const(2)) -- wm->num_line_pair.full = rfixed_const(2); -+ if (crtc->vsc.full > dfixed_const(2)) -+ wm->num_line_pair.full = dfixed_const(2); - else -- wm->num_line_pair.full = rfixed_const(1); -- -- b.full = rfixed_const(mode->crtc_hdisplay); -- c.full = rfixed_const(256); -- a.full = rfixed_div(b, c); -- request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair); -- request_fifo_depth.full = rfixed_ceil(request_fifo_depth); -- if (a.full < rfixed_const(4)) { -+ wm->num_line_pair.full = dfixed_const(1); -+ -+ b.full = dfixed_const(mode->crtc_hdisplay); -+ c.full = dfixed_const(256); -+ a.full = dfixed_div(b, c); -+ request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); -+ request_fifo_depth.full = dfixed_ceil(request_fifo_depth); -+ if (a.full < dfixed_const(4)) { - wm->lb_request_fifo_depth = 4; - } else { -- wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); -+ wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); - } - - /* Determine consumption rate -@@ -263,23 +261,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, - * vsc = vertical scaling ratio, defined as source/destination - * hsc = horizontal scaling ration, defined as source/destination - */ -- a.full = rfixed_const(mode->clock); -- b.full = rfixed_const(1000); -- a.full = rfixed_div(a, b); -- pclk.full = rfixed_div(b, a); -+ a.full = dfixed_const(mode->clock); -+ b.full = dfixed_const(1000); -+ a.full = dfixed_div(a, b); -+ pclk.full = dfixed_div(b, a); - if (crtc->rmx_type != RMX_OFF) { -- b.full = rfixed_const(2); -+ b.full = dfixed_const(2); - if (crtc->vsc.full > b.full) - b.full = crtc->vsc.full; -- b.full = rfixed_mul(b, crtc->hsc); -- c.full = rfixed_const(2); -- b.full = rfixed_div(b, c); -- consumption_time.full = rfixed_div(pclk, b); -+ b.full = dfixed_mul(b, crtc->hsc); -+ c.full = dfixed_const(2); -+ b.full = dfixed_div(b, c); -+ consumption_time.full = dfixed_div(pclk, b); - } else { - consumption_time.full = pclk.full; - } -- a.full = rfixed_const(1); -- wm->consumption_rate.full = rfixed_div(a, consumption_time); -+ a.full = dfixed_const(1); -+ wm->consumption_rate.full = dfixed_div(a, consumption_time); - - - /* Determine line time -@@ -287,18 +285,18 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, - * LineTime = total number of horizontal pixels - * pclk = pixel clock period(ns) - */ -- a.full = rfixed_const(crtc->base.mode.crtc_htotal); -- line_time.full = rfixed_mul(a, pclk); -+ a.full = dfixed_const(crtc->base.mode.crtc_htotal); -+ line_time.full = dfixed_mul(a, pclk); - - /* Determine active time - * ActiveTime = time of active region of display within one line, - * hactive = total number of horizontal active pixels - * htotal = total number of horizontal pixels - */ -- a.full = rfixed_const(crtc->base.mode.crtc_htotal); -- b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); -- wm->active_time.full = rfixed_mul(line_time, b); -- wm->active_time.full = rfixed_div(wm->active_time, a); -+ a.full = dfixed_const(crtc->base.mode.crtc_htotal); -+ b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); -+ wm->active_time.full = dfixed_mul(line_time, b); -+ wm->active_time.full = dfixed_div(wm->active_time, a); - - /* Maximun bandwidth is the minimun bandwidth of all component */ - rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; -@@ -306,8 +304,8 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, - if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && - rdev->pm.sideport_bandwidth.full) - rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; -- read_delay_latency.full = rfixed_const(370 * 800 * 1000); -- read_delay_latency.full = rfixed_div(read_delay_latency, -+ read_delay_latency.full = dfixed_const(370 * 800 * 1000); -+ read_delay_latency.full = dfixed_div(read_delay_latency, - rdev->pm.igp_sideport_mclk); - } else { - if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && -@@ -316,23 +314,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, - if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && - rdev->pm.ht_bandwidth.full) - rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; -- read_delay_latency.full = rfixed_const(5000); -+ read_delay_latency.full = dfixed_const(5000); - } - - /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ -- a.full = rfixed_const(16); -- rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a); -- a.full = rfixed_const(1000); -- rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk); -+ a.full = dfixed_const(16); -+ rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a); -+ a.full = dfixed_const(1000); -+ rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk); - /* Determine chunk time - * ChunkTime = the time it takes the DCP to send one chunk of data - * to the LB which consists of pipeline delay and inter chunk gap - * sclk = system clock(ns) - */ -- a.full = rfixed_const(256 * 13); -- chunk_time.full = rfixed_mul(rdev->pm.sclk, a); -- a.full = rfixed_const(10); -- chunk_time.full = rfixed_div(chunk_time, a); -+ a.full = dfixed_const(256 * 13); -+ chunk_time.full = dfixed_mul(rdev->pm.sclk, a); -+ a.full = dfixed_const(10); -+ chunk_time.full = dfixed_div(chunk_time, a); - - /* Determine the worst case latency - * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) -@@ -342,13 +340,13 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, - * ChunkTime = time it takes the DCP to send one chunk of data to the LB - * which consists of pipeline delay and inter chunk gap - */ -- if (rfixed_trunc(wm->num_line_pair) > 1) { -- a.full = rfixed_const(3); -- wm->worst_case_latency.full = rfixed_mul(a, chunk_time); -+ if (dfixed_trunc(wm->num_line_pair) > 1) { -+ a.full = dfixed_const(3); -+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time); - wm->worst_case_latency.full += read_delay_latency.full; - } else { -- a.full = rfixed_const(2); -- wm->worst_case_latency.full = rfixed_mul(a, chunk_time); -+ a.full = dfixed_const(2); -+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time); - wm->worst_case_latency.full += read_delay_latency.full; - } - -@@ -362,34 +360,34 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, - * of data to the LB which consists of - * pipeline delay and inter chunk gap - */ -- if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { -+ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { - tolerable_latency.full = line_time.full; - } else { -- tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); -+ tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); - tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; -- tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); -+ tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); - tolerable_latency.full = line_time.full - tolerable_latency.full; - } - /* We assume worst case 32bits (4 bytes) */ -- wm->dbpp.full = rfixed_const(4 * 8); -+ wm->dbpp.full = dfixed_const(4 * 8); - - /* Determine the maximum priority mark - * width = viewport width in pixels - */ -- a.full = rfixed_const(16); -- wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); -- wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); -- wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max); -+ a.full = dfixed_const(16); -+ wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); -+ wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); -+ wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); - - /* Determine estimated width */ - estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; -- estimated_width.full = rfixed_div(estimated_width, consumption_time); -- if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { -- wm->priority_mark.full = rfixed_const(10); -+ estimated_width.full = dfixed_div(estimated_width, consumption_time); -+ if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { -+ wm->priority_mark.full = dfixed_const(10); - } else { -- a.full = rfixed_const(16); -- wm->priority_mark.full = rfixed_div(estimated_width, a); -- wm->priority_mark.full = rfixed_ceil(wm->priority_mark); -+ a.full = dfixed_const(16); -+ wm->priority_mark.full = dfixed_div(estimated_width, a); -+ wm->priority_mark.full = dfixed_ceil(wm->priority_mark); - wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; - } - } -@@ -441,58 +439,58 @@ void rs690_bandwidth_update(struct radeon_device *rdev) - WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); - - if (mode0 && mode1) { -- if (rfixed_trunc(wm0.dbpp) > 64) -- a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); -+ if (dfixed_trunc(wm0.dbpp) > 64) -+ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); - else - a.full = wm0.num_line_pair.full; -- if (rfixed_trunc(wm1.dbpp) > 64) -- b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); -+ if (dfixed_trunc(wm1.dbpp) > 64) -+ b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); - else - b.full = wm1.num_line_pair.full; - a.full += b.full; -- fill_rate.full = rfixed_div(wm0.sclk, a); -+ fill_rate.full = dfixed_div(wm0.sclk, a); - if (wm0.consumption_rate.full > fill_rate.full) { - b.full = wm0.consumption_rate.full - fill_rate.full; -- b.full = rfixed_mul(b, wm0.active_time); -- a.full = rfixed_mul(wm0.worst_case_latency, -+ b.full = dfixed_mul(b, wm0.active_time); -+ a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); - a.full = a.full + b.full; -- b.full = rfixed_const(16 * 1000); -- priority_mark02.full = rfixed_div(a, b); -+ b.full = dfixed_const(16 * 1000); -+ priority_mark02.full = dfixed_div(a, b); - } else { -- a.full = rfixed_mul(wm0.worst_case_latency, -+ a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); -- b.full = rfixed_const(16 * 1000); -- priority_mark02.full = rfixed_div(a, b); -+ b.full = dfixed_const(16 * 1000); -+ priority_mark02.full = dfixed_div(a, b); - } - if (wm1.consumption_rate.full > fill_rate.full) { - b.full = wm1.consumption_rate.full - fill_rate.full; -- b.full = rfixed_mul(b, wm1.active_time); -- a.full = rfixed_mul(wm1.worst_case_latency, -+ b.full = dfixed_mul(b, wm1.active_time); -+ a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); - a.full = a.full + b.full; -- b.full = rfixed_const(16 * 1000); -- priority_mark12.full = rfixed_div(a, b); -+ b.full = dfixed_const(16 * 1000); -+ priority_mark12.full = dfixed_div(a, b); - } else { -- a.full = rfixed_mul(wm1.worst_case_latency, -+ a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); -- b.full = rfixed_const(16 * 1000); -- priority_mark12.full = rfixed_div(a, b); -+ b.full = dfixed_const(16 * 1000); -+ priority_mark12.full = dfixed_div(a, b); - } - if (wm0.priority_mark.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark.full; -- if (rfixed_trunc(priority_mark02) < 0) -+ if (dfixed_trunc(priority_mark02) < 0) - priority_mark02.full = 0; - if (wm0.priority_mark_max.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark_max.full; - if (wm1.priority_mark.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark.full; -- if (rfixed_trunc(priority_mark12) < 0) -+ if (dfixed_trunc(priority_mark12) < 0) - priority_mark12.full = 0; - if (wm1.priority_mark_max.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark_max.full; -- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); -- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); -+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); -+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); - if (rdev->disp_priority == 2) { - d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); - d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); -@@ -502,32 +500,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev) - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); - } else if (mode0) { -- if (rfixed_trunc(wm0.dbpp) > 64) -- a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); -+ if (dfixed_trunc(wm0.dbpp) > 64) -+ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); - else - a.full = wm0.num_line_pair.full; -- fill_rate.full = rfixed_div(wm0.sclk, a); -+ fill_rate.full = dfixed_div(wm0.sclk, a); - if (wm0.consumption_rate.full > fill_rate.full) { - b.full = wm0.consumption_rate.full - fill_rate.full; -- b.full = rfixed_mul(b, wm0.active_time); -- a.full = rfixed_mul(wm0.worst_case_latency, -+ b.full = dfixed_mul(b, wm0.active_time); -+ a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); - a.full = a.full + b.full; -- b.full = rfixed_const(16 * 1000); -- priority_mark02.full = rfixed_div(a, b); -+ b.full = dfixed_const(16 * 1000); -+ priority_mark02.full = dfixed_div(a, b); - } else { -- a.full = rfixed_mul(wm0.worst_case_latency, -+ a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); -- b.full = rfixed_const(16 * 1000); -- priority_mark02.full = rfixed_div(a, b); -+ b.full = dfixed_const(16 * 1000); -+ priority_mark02.full = dfixed_div(a, b); - } - if (wm0.priority_mark.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark.full; -- if (rfixed_trunc(priority_mark02) < 0) -+ if (dfixed_trunc(priority_mark02) < 0) - priority_mark02.full = 0; - if (wm0.priority_mark_max.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark_max.full; -- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); -+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); - if (rdev->disp_priority == 2) - d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); -@@ -537,32 +535,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev) - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, - S_006D4C_D2MODE_PRIORITY_B_OFF(1)); - } else { -- if (rfixed_trunc(wm1.dbpp) > 64) -- a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); -+ if (dfixed_trunc(wm1.dbpp) > 64) -+ a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); - else - a.full = wm1.num_line_pair.full; -- fill_rate.full = rfixed_div(wm1.sclk, a); -+ fill_rate.full = dfixed_div(wm1.sclk, a); - if (wm1.consumption_rate.full > fill_rate.full) { - b.full = wm1.consumption_rate.full - fill_rate.full; -- b.full = rfixed_mul(b, wm1.active_time); -- a.full = rfixed_mul(wm1.worst_case_latency, -+ b.full = dfixed_mul(b, wm1.active_time); -+ a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); - a.full = a.full + b.full; -- b.full = rfixed_const(16 * 1000); -- priority_mark12.full = rfixed_div(a, b); -+ b.full = dfixed_const(16 * 1000); -+ priority_mark12.full = dfixed_div(a, b); - } else { -- a.full = rfixed_mul(wm1.worst_case_latency, -+ a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); -- b.full = rfixed_const(16 * 1000); -- priority_mark12.full = rfixed_div(a, b); -+ b.full = dfixed_const(16 * 1000); -+ priority_mark12.full = dfixed_div(a, b); - } - if (wm1.priority_mark.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark.full; -- if (rfixed_trunc(priority_mark12) < 0) -+ if (dfixed_trunc(priority_mark12) < 0) - priority_mark12.full = 0; - if (wm1.priority_mark_max.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark_max.full; -- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); -+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); - if (rdev->disp_priority == 2) - d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, -@@ -653,7 +651,7 @@ int rs690_resume(struct radeon_device *rdev) - /* Resume clock before doing reset */ - rv515_clock_startup(rdev); - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); -@@ -678,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev) - - void rs690_fini(struct radeon_device *rdev) - { -- radeon_pm_fini(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); -@@ -717,7 +714,7 @@ int rs690_init(struct radeon_device *rdev) - return -EINVAL; - } - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, - "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), -@@ -729,8 +726,6 @@ int rs690_init(struct radeon_device *rdev) - - /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); -- /* Initialize power management */ -- radeon_pm_init(rdev); - /* initialize memory controller */ - rs690_mc_init(rdev); - rv515_debugfs(rdev); -diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c -index 9035121..7d9a7b0 100644 ---- a/drivers/gpu/drm/radeon/rv515.c -+++ b/drivers/gpu/drm/radeon/rv515.c -@@ -147,16 +147,11 @@ void rv515_gpu_init(struct radeon_device *rdev) - { - unsigned pipe_select_current, gb_pipe_select, tmp; - -- r100_hdp_reset(rdev); -- r100_rb2d_reset(rdev); -- - if (r100_gui_wait_for_idle(rdev)) { - printk(KERN_WARNING "Failed to wait GUI idle while " - "reseting GPU. Bad things might happen.\n"); - } -- - rv515_vga_render_disable(rdev); -- - r420_pipes_init(rdev); - gb_pipe_select = RREG32(0x402C); - tmp = RREG32(0x170C); -@@ -174,91 +169,6 @@ void rv515_gpu_init(struct radeon_device *rdev) - } - } - --int rv515_ga_reset(struct radeon_device *rdev) --{ -- uint32_t tmp; -- bool reinit_cp; -- int i; -- -- reinit_cp = rdev->cp.ready; -- rdev->cp.ready = false; -- for (i = 0; i < rdev->usec_timeout; i++) { -- WREG32(CP_CSQ_MODE, 0); -- WREG32(CP_CSQ_CNTL, 0); -- WREG32(RBBM_SOFT_RESET, 0x32005); -- (void)RREG32(RBBM_SOFT_RESET); -- udelay(200); -- WREG32(RBBM_SOFT_RESET, 0); -- /* Wait to prevent race in RBBM_STATUS */ -- mdelay(1); -- tmp = RREG32(RBBM_STATUS); -- if (tmp & ((1 << 20) | (1 << 26))) { -- DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp); -- /* GA still busy soft reset it */ -- WREG32(0x429C, 0x200); -- WREG32(VAP_PVS_STATE_FLUSH_REG, 0); -- WREG32(0x43E0, 0); -- WREG32(0x43E4, 0); -- WREG32(0x24AC, 0); -- } -- /* Wait to prevent race in RBBM_STATUS */ -- mdelay(1); -- tmp = RREG32(RBBM_STATUS); -- if (!(tmp & ((1 << 20) | (1 << 26)))) { -- break; -- } -- } -- for (i = 0; i < rdev->usec_timeout; i++) { -- tmp = RREG32(RBBM_STATUS); -- if (!(tmp & ((1 << 20) | (1 << 26)))) { -- DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", -- tmp); -- DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C)); -- DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0)); -- DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724)); -- if (reinit_cp) { -- return r100_cp_init(rdev, rdev->cp.ring_size); -- } -- return 0; -- } -- DRM_UDELAY(1); -- } -- tmp = RREG32(RBBM_STATUS); -- DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); -- return -1; --} -- --int rv515_gpu_reset(struct radeon_device *rdev) --{ -- uint32_t status; -- -- /* reset order likely matter */ -- status = RREG32(RBBM_STATUS); -- /* reset HDP */ -- r100_hdp_reset(rdev); -- /* reset rb2d */ -- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { -- r100_rb2d_reset(rdev); -- } -- /* reset GA */ -- if (status & ((1 << 20) | (1 << 26))) { -- rv515_ga_reset(rdev); -- } -- /* reset CP */ -- status = RREG32(RBBM_STATUS); -- if (status & (1 << 16)) { -- r100_cp_reset(rdev); -- } -- /* Check if GPU is idle */ -- status = RREG32(RBBM_STATUS); -- if (status & (1 << 31)) { -- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); -- return -1; -- } -- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); -- return 0; --} -- - static void rv515_vram_get_type(struct radeon_device *rdev) - { - uint32_t tmp; -@@ -335,7 +245,7 @@ static int rv515_debugfs_ga_info(struct seq_file *m, void *data) - - tmp = RREG32(0x2140); - seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp); -- radeon_gpu_reset(rdev); -+ radeon_asic_reset(rdev); - tmp = RREG32(0x425C); - seq_printf(m, "GA_IDLE 0x%08x\n", tmp); - return 0; -@@ -503,7 +413,7 @@ int rv515_resume(struct radeon_device *rdev) - /* Resume clock before doing reset */ - rv515_clock_startup(rdev); - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); -@@ -535,7 +445,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev) - - void rv515_fini(struct radeon_device *rdev) - { -- radeon_pm_fini(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); -@@ -573,7 +482,7 @@ int rv515_init(struct radeon_device *rdev) - return -EINVAL; - } - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ -- if (radeon_gpu_reset(rdev)) { -+ if (radeon_asic_reset(rdev)) { - dev_warn(rdev->dev, - "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), -@@ -584,8 +493,6 @@ int rv515_init(struct radeon_device *rdev) - return -EINVAL; - /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); -- /* Initialize power management */ -- radeon_pm_init(rdev); - /* initialize AGP */ - if (rdev->flags & RADEON_IS_AGP) { - r = radeon_agp_init(rdev); -@@ -885,20 +792,20 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, - return; - } - -- if (crtc->vsc.full > rfixed_const(2)) -- wm->num_line_pair.full = rfixed_const(2); -+ if (crtc->vsc.full > dfixed_const(2)) -+ wm->num_line_pair.full = dfixed_const(2); - else -- wm->num_line_pair.full = rfixed_const(1); -- -- b.full = rfixed_const(mode->crtc_hdisplay); -- c.full = rfixed_const(256); -- a.full = rfixed_div(b, c); -- request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair); -- request_fifo_depth.full = rfixed_ceil(request_fifo_depth); -- if (a.full < rfixed_const(4)) { -+ wm->num_line_pair.full = dfixed_const(1); -+ -+ b.full = dfixed_const(mode->crtc_hdisplay); -+ c.full = dfixed_const(256); -+ a.full = dfixed_div(b, c); -+ request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair); -+ request_fifo_depth.full = dfixed_ceil(request_fifo_depth); -+ if (a.full < dfixed_const(4)) { - wm->lb_request_fifo_depth = 4; - } else { -- wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); -+ wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth); - } - - /* Determine consumption rate -@@ -907,23 +814,23 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, - * vsc = vertical scaling ratio, defined as source/destination - * hsc = horizontal scaling ration, defined as source/destination - */ -- a.full = rfixed_const(mode->clock); -- b.full = rfixed_const(1000); -- a.full = rfixed_div(a, b); -- pclk.full = rfixed_div(b, a); -+ a.full = dfixed_const(mode->clock); -+ b.full = dfixed_const(1000); -+ a.full = dfixed_div(a, b); -+ pclk.full = dfixed_div(b, a); - if (crtc->rmx_type != RMX_OFF) { -- b.full = rfixed_const(2); -+ b.full = dfixed_const(2); - if (crtc->vsc.full > b.full) - b.full = crtc->vsc.full; -- b.full = rfixed_mul(b, crtc->hsc); -- c.full = rfixed_const(2); -- b.full = rfixed_div(b, c); -- consumption_time.full = rfixed_div(pclk, b); -+ b.full = dfixed_mul(b, crtc->hsc); -+ c.full = dfixed_const(2); -+ b.full = dfixed_div(b, c); -+ consumption_time.full = dfixed_div(pclk, b); - } else { - consumption_time.full = pclk.full; - } -- a.full = rfixed_const(1); -- wm->consumption_rate.full = rfixed_div(a, consumption_time); -+ a.full = dfixed_const(1); -+ wm->consumption_rate.full = dfixed_div(a, consumption_time); - - - /* Determine line time -@@ -931,27 +838,27 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, - * LineTime = total number of horizontal pixels - * pclk = pixel clock period(ns) - */ -- a.full = rfixed_const(crtc->base.mode.crtc_htotal); -- line_time.full = rfixed_mul(a, pclk); -+ a.full = dfixed_const(crtc->base.mode.crtc_htotal); -+ line_time.full = dfixed_mul(a, pclk); - - /* Determine active time - * ActiveTime = time of active region of display within one line, - * hactive = total number of horizontal active pixels - * htotal = total number of horizontal pixels - */ -- a.full = rfixed_const(crtc->base.mode.crtc_htotal); -- b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); -- wm->active_time.full = rfixed_mul(line_time, b); -- wm->active_time.full = rfixed_div(wm->active_time, a); -+ a.full = dfixed_const(crtc->base.mode.crtc_htotal); -+ b.full = dfixed_const(crtc->base.mode.crtc_hdisplay); -+ wm->active_time.full = dfixed_mul(line_time, b); -+ wm->active_time.full = dfixed_div(wm->active_time, a); - - /* Determine chunk time - * ChunkTime = the time it takes the DCP to send one chunk of data - * to the LB which consists of pipeline delay and inter chunk gap - * sclk = system clock(Mhz) - */ -- a.full = rfixed_const(600 * 1000); -- chunk_time.full = rfixed_div(a, rdev->pm.sclk); -- read_delay_latency.full = rfixed_const(1000); -+ a.full = dfixed_const(600 * 1000); -+ chunk_time.full = dfixed_div(a, rdev->pm.sclk); -+ read_delay_latency.full = dfixed_const(1000); - - /* Determine the worst case latency - * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) -@@ -961,9 +868,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, - * ChunkTime = time it takes the DCP to send one chunk of data to the LB - * which consists of pipeline delay and inter chunk gap - */ -- if (rfixed_trunc(wm->num_line_pair) > 1) { -- a.full = rfixed_const(3); -- wm->worst_case_latency.full = rfixed_mul(a, chunk_time); -+ if (dfixed_trunc(wm->num_line_pair) > 1) { -+ a.full = dfixed_const(3); -+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time); - wm->worst_case_latency.full += read_delay_latency.full; - } else { - wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; -@@ -979,34 +886,34 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, - * of data to the LB which consists of - * pipeline delay and inter chunk gap - */ -- if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { -+ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) { - tolerable_latency.full = line_time.full; - } else { -- tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); -+ tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2); - tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; -- tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); -+ tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time); - tolerable_latency.full = line_time.full - tolerable_latency.full; - } - /* We assume worst case 32bits (4 bytes) */ -- wm->dbpp.full = rfixed_const(2 * 16); -+ wm->dbpp.full = dfixed_const(2 * 16); - - /* Determine the maximum priority mark - * width = viewport width in pixels - */ -- a.full = rfixed_const(16); -- wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); -- wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); -- wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max); -+ a.full = dfixed_const(16); -+ wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay); -+ wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a); -+ wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max); - - /* Determine estimated width */ - estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; -- estimated_width.full = rfixed_div(estimated_width, consumption_time); -- if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { -+ estimated_width.full = dfixed_div(estimated_width, consumption_time); -+ if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { - wm->priority_mark.full = wm->priority_mark_max.full; - } else { -- a.full = rfixed_const(16); -- wm->priority_mark.full = rfixed_div(estimated_width, a); -- wm->priority_mark.full = rfixed_ceil(wm->priority_mark); -+ a.full = dfixed_const(16); -+ wm->priority_mark.full = dfixed_div(estimated_width, a); -+ wm->priority_mark.full = dfixed_ceil(wm->priority_mark); - wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; - } - } -@@ -1035,58 +942,58 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) - WREG32(LB_MAX_REQ_OUTSTANDING, tmp); - - if (mode0 && mode1) { -- if (rfixed_trunc(wm0.dbpp) > 64) -- a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); -+ if (dfixed_trunc(wm0.dbpp) > 64) -+ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); - else - a.full = wm0.num_line_pair.full; -- if (rfixed_trunc(wm1.dbpp) > 64) -- b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); -+ if (dfixed_trunc(wm1.dbpp) > 64) -+ b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); - else - b.full = wm1.num_line_pair.full; - a.full += b.full; -- fill_rate.full = rfixed_div(wm0.sclk, a); -+ fill_rate.full = dfixed_div(wm0.sclk, a); - if (wm0.consumption_rate.full > fill_rate.full) { - b.full = wm0.consumption_rate.full - fill_rate.full; -- b.full = rfixed_mul(b, wm0.active_time); -- a.full = rfixed_const(16); -- b.full = rfixed_div(b, a); -- a.full = rfixed_mul(wm0.worst_case_latency, -+ b.full = dfixed_mul(b, wm0.active_time); -+ a.full = dfixed_const(16); -+ b.full = dfixed_div(b, a); -+ a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); - priority_mark02.full = a.full + b.full; - } else { -- a.full = rfixed_mul(wm0.worst_case_latency, -+ a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); -- b.full = rfixed_const(16 * 1000); -- priority_mark02.full = rfixed_div(a, b); -+ b.full = dfixed_const(16 * 1000); -+ priority_mark02.full = dfixed_div(a, b); - } - if (wm1.consumption_rate.full > fill_rate.full) { - b.full = wm1.consumption_rate.full - fill_rate.full; -- b.full = rfixed_mul(b, wm1.active_time); -- a.full = rfixed_const(16); -- b.full = rfixed_div(b, a); -- a.full = rfixed_mul(wm1.worst_case_latency, -+ b.full = dfixed_mul(b, wm1.active_time); -+ a.full = dfixed_const(16); -+ b.full = dfixed_div(b, a); -+ a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); - priority_mark12.full = a.full + b.full; - } else { -- a.full = rfixed_mul(wm1.worst_case_latency, -+ a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); -- b.full = rfixed_const(16 * 1000); -- priority_mark12.full = rfixed_div(a, b); -+ b.full = dfixed_const(16 * 1000); -+ priority_mark12.full = dfixed_div(a, b); - } - if (wm0.priority_mark.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark.full; -- if (rfixed_trunc(priority_mark02) < 0) -+ if (dfixed_trunc(priority_mark02) < 0) - priority_mark02.full = 0; - if (wm0.priority_mark_max.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark_max.full; - if (wm1.priority_mark.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark.full; -- if (rfixed_trunc(priority_mark12) < 0) -+ if (dfixed_trunc(priority_mark12) < 0) - priority_mark12.full = 0; - if (wm1.priority_mark_max.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark_max.full; -- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); -- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); -+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); -+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); - if (rdev->disp_priority == 2) { - d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; - d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; -@@ -1096,32 +1003,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) - WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); - WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); - } else if (mode0) { -- if (rfixed_trunc(wm0.dbpp) > 64) -- a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); -+ if (dfixed_trunc(wm0.dbpp) > 64) -+ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); - else - a.full = wm0.num_line_pair.full; -- fill_rate.full = rfixed_div(wm0.sclk, a); -+ fill_rate.full = dfixed_div(wm0.sclk, a); - if (wm0.consumption_rate.full > fill_rate.full) { - b.full = wm0.consumption_rate.full - fill_rate.full; -- b.full = rfixed_mul(b, wm0.active_time); -- a.full = rfixed_const(16); -- b.full = rfixed_div(b, a); -- a.full = rfixed_mul(wm0.worst_case_latency, -+ b.full = dfixed_mul(b, wm0.active_time); -+ a.full = dfixed_const(16); -+ b.full = dfixed_div(b, a); -+ a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); - priority_mark02.full = a.full + b.full; - } else { -- a.full = rfixed_mul(wm0.worst_case_latency, -+ a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); -- b.full = rfixed_const(16); -- priority_mark02.full = rfixed_div(a, b); -+ b.full = dfixed_const(16); -+ priority_mark02.full = dfixed_div(a, b); - } - if (wm0.priority_mark.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark.full; -- if (rfixed_trunc(priority_mark02) < 0) -+ if (dfixed_trunc(priority_mark02) < 0) - priority_mark02.full = 0; - if (wm0.priority_mark_max.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark_max.full; -- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02); -+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); - if (rdev->disp_priority == 2) - d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; - WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); -@@ -1129,32 +1036,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) - WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); - WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); - } else { -- if (rfixed_trunc(wm1.dbpp) > 64) -- a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); -+ if (dfixed_trunc(wm1.dbpp) > 64) -+ a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); - else - a.full = wm1.num_line_pair.full; -- fill_rate.full = rfixed_div(wm1.sclk, a); -+ fill_rate.full = dfixed_div(wm1.sclk, a); - if (wm1.consumption_rate.full > fill_rate.full) { - b.full = wm1.consumption_rate.full - fill_rate.full; -- b.full = rfixed_mul(b, wm1.active_time); -- a.full = rfixed_const(16); -- b.full = rfixed_div(b, a); -- a.full = rfixed_mul(wm1.worst_case_latency, -+ b.full = dfixed_mul(b, wm1.active_time); -+ a.full = dfixed_const(16); -+ b.full = dfixed_div(b, a); -+ a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); - priority_mark12.full = a.full + b.full; - } else { -- a.full = rfixed_mul(wm1.worst_case_latency, -+ a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); -- b.full = rfixed_const(16 * 1000); -- priority_mark12.full = rfixed_div(a, b); -+ b.full = dfixed_const(16 * 1000); -+ priority_mark12.full = dfixed_div(a, b); - } - if (wm1.priority_mark.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark.full; -- if (rfixed_trunc(priority_mark12) < 0) -+ if (dfixed_trunc(priority_mark12) < 0) - priority_mark12.full = 0; - if (wm1.priority_mark_max.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark_max.full; -- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12); -+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); - if (rdev->disp_priority == 2) - d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; - WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); -diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h -index fc216e4..590309a 100644 ---- a/drivers/gpu/drm/radeon/rv515d.h -+++ b/drivers/gpu/drm/radeon/rv515d.h -@@ -217,6 +217,52 @@ - #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) - - /* Registers */ -+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0 -+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0) -+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1) -+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE -+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1) -+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1) -+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD -+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2) -+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1) -+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB -+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3) -+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1) -+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7 -+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4) -+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1) -+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF -+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5) -+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1) -+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF -+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6) -+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1) -+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF -+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7) -+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1) -+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F -+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8) -+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1) -+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF -+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9) -+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1) -+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF -+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10) -+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1) -+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF -+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11) -+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1) -+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF -+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12) -+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1) -+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF -+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13) -+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1) -+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF -+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14) -+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1) -+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF - #define R_0000F8_CONFIG_MEMSIZE 0x0000F8 - #define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0) - #define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF) -diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c -index 97958a6..cec536c 100644 ---- a/drivers/gpu/drm/radeon/rv770.c -+++ b/drivers/gpu/drm/radeon/rv770.c -@@ -42,6 +42,21 @@ - static void rv770_gpu_init(struct radeon_device *rdev); - void rv770_fini(struct radeon_device *rdev); - -+void rv770_pm_misc(struct radeon_device *rdev) -+{ -+ int req_ps_idx = rdev->pm.requested_power_state_index; -+ int req_cm_idx = rdev->pm.requested_clock_mode_index; -+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; -+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; -+ -+ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { -+ if (voltage->voltage != rdev->pm.current_vddc) { -+ radeon_atom_set_voltage(rdev, voltage->voltage); -+ rdev->pm.current_vddc = voltage->voltage; -+ DRM_DEBUG("Setting: v: %d\n", voltage->voltage); -+ } -+ } -+} - - /* - * GART -@@ -237,7 +252,6 @@ void r700_cp_stop(struct radeon_device *rdev) - WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); - } - -- - static int rv770_cp_load_microcode(struct radeon_device *rdev) - { - const __be32 *fw_data; -@@ -272,6 +286,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev) - return 0; - } - -+void r700_cp_fini(struct radeon_device *rdev) -+{ -+ r700_cp_stop(rdev); -+ radeon_ring_fini(rdev); -+} - - /* - * Core functions -@@ -906,23 +925,12 @@ int rv770_mc_init(struct radeon_device *rdev) - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); - rdev->mc.visible_vram_size = rdev->mc.aper_size; -- /* FIXME remove this once we support unmappable VRAM */ -- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { -- rdev->mc.mc_vram_size = rdev->mc.aper_size; -- rdev->mc.real_vram_size = rdev->mc.aper_size; -- } - r600_vram_gtt_location(rdev, &rdev->mc); - radeon_update_bandwidth_info(rdev); - - return 0; - } - --int rv770_gpu_reset(struct radeon_device *rdev) --{ -- /* FIXME: implement any rv770 specific bits */ -- return r600_gpu_reset(rdev); --} -- - static int rv770_startup(struct radeon_device *rdev) - { - int r; -@@ -1094,8 +1102,6 @@ int rv770_init(struct radeon_device *rdev) - r = radeon_clocks_init(rdev); - if (r) - return r; -- /* Initialize power management */ -- radeon_pm_init(rdev); - /* Fence driver */ - r = radeon_fence_driver_init(rdev); - if (r) -@@ -1132,7 +1138,7 @@ int rv770_init(struct radeon_device *rdev) - r = rv770_startup(rdev); - if (r) { - dev_err(rdev->dev, "disabling GPU acceleration\n"); -- r600_cp_fini(rdev); -+ r700_cp_fini(rdev); - r600_wb_fini(rdev); - r600_irq_fini(rdev); - radeon_irq_kms_fini(rdev); -@@ -1164,9 +1170,8 @@ int rv770_init(struct radeon_device *rdev) - - void rv770_fini(struct radeon_device *rdev) - { -- radeon_pm_fini(rdev); - r600_blit_fini(rdev); -- r600_cp_fini(rdev); -+ r700_cp_fini(rdev); - r600_wb_fini(rdev); - r600_irq_fini(rdev); - radeon_irq_kms_fini(rdev); -diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c -index bff6fc2..2d0c9ca 100644 ---- a/drivers/gpu/drm/savage/savage_bci.c -+++ b/drivers/gpu/drm/savage/savage_bci.c -@@ -539,11 +539,10 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset) - { - drm_savage_private_t *dev_priv; - -- dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL); -+ dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL); - if (dev_priv == NULL) - return -ENOMEM; - -- memset(dev_priv, 0, sizeof(drm_savage_private_t)); - dev->dev_private = (void *)dev_priv; - - dev_priv->chipset = (enum savage_family)chipset; -diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile -index 1e138f5..4256e20 100644 ---- a/drivers/gpu/drm/ttm/Makefile -+++ b/drivers/gpu/drm/ttm/Makefile -@@ -4,6 +4,6 @@ - ccflags-y := -Iinclude/drm - ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ - ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ -- ttm_object.o ttm_lock.o ttm_execbuf_util.o -+ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o - - obj-$(CONFIG_DRM_TTM) += ttm.o -diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c -index 0e3754a..555ebb1 100644 ---- a/drivers/gpu/drm/ttm/ttm_bo.c -+++ b/drivers/gpu/drm/ttm/ttm_bo.c -@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) - printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); - printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); - printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); -- printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset); -- printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size); - printk(KERN_ERR TTM_PFX " size: %llu\n", man->size); - printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", - man->available_caching); -@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) - - static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem, -- bool evict, bool interruptible, bool no_wait) -+ bool evict, bool interruptible, -+ bool no_wait_reserve, bool no_wait_gpu) - { - struct ttm_bo_device *bdev = bo->bdev; - bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); -@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, - - if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && - !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) -- ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); -+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); - else if (bdev->driver->move) - ret = bdev->driver->move(bo, evict, interruptible, -- no_wait, mem); -+ no_wait_reserve, no_wait_gpu, mem); - else -- ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); -+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem); - - if (ret) - goto out_err; -@@ -605,8 +604,22 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo) - } - EXPORT_SYMBOL(ttm_bo_unref); - -+int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) -+{ -+ return cancel_delayed_work_sync(&bdev->wq); -+} -+EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); -+ -+void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) -+{ -+ if (resched) -+ schedule_delayed_work(&bdev->wq, -+ ((HZ / 100) < 1) ? 1 : HZ / 100); -+} -+EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); -+ - static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, -- bool no_wait) -+ bool no_wait_reserve, bool no_wait_gpu) - { - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_global *glob = bo->glob; -@@ -615,7 +628,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, - int ret = 0; - - spin_lock(&bo->lock); -- ret = ttm_bo_wait(bo, false, interruptible, no_wait); -+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - spin_unlock(&bo->lock); - - if (unlikely(ret != 0)) { -@@ -631,6 +644,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, - - evict_mem = bo->mem; - evict_mem.mm_node = NULL; -+ evict_mem.bus.io_reserved = false; - - placement.fpfn = 0; - placement.lpfn = 0; -@@ -638,7 +652,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, - placement.num_busy_placement = 0; - bdev->driver->evict_flags(bo, &placement); - ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, -- no_wait); -+ no_wait_reserve, no_wait_gpu); - if (ret) { - if (ret != -ERESTARTSYS) { - printk(KERN_ERR TTM_PFX -@@ -650,7 +664,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, - } - - ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, -- no_wait); -+ no_wait_reserve, no_wait_gpu); - if (ret) { - if (ret != -ERESTARTSYS) - printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); -@@ -670,7 +684,8 @@ out: - - static int ttm_mem_evict_first(struct ttm_bo_device *bdev, - uint32_t mem_type, -- bool interruptible, bool no_wait) -+ bool interruptible, bool no_wait_reserve, -+ bool no_wait_gpu) - { - struct ttm_bo_global *glob = bdev->glob; - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; -@@ -687,11 +702,11 @@ retry: - bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); - kref_get(&bo->list_kref); - -- ret = ttm_bo_reserve_locked(bo, false, true, false, 0); -+ ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); - - if (unlikely(ret == -EBUSY)) { - spin_unlock(&glob->lru_lock); -- if (likely(!no_wait)) -+ if (likely(!no_wait_gpu)) - ret = ttm_bo_wait_unreserved(bo, interruptible); - - kref_put(&bo->list_kref, ttm_bo_release_list); -@@ -713,7 +728,7 @@ retry: - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); - -- ret = ttm_bo_evict(bo, interruptible, no_wait); -+ ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); - ttm_bo_unreserve(bo); - - kref_put(&bo->list_kref, ttm_bo_release_list); -@@ -764,7 +779,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, - uint32_t mem_type, - struct ttm_placement *placement, - struct ttm_mem_reg *mem, -- bool interruptible, bool no_wait) -+ bool interruptible, -+ bool no_wait_reserve, -+ bool no_wait_gpu) - { - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_global *glob = bdev->glob; -@@ -785,7 +802,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, - } - spin_unlock(&glob->lru_lock); - ret = ttm_mem_evict_first(bdev, mem_type, interruptible, -- no_wait); -+ no_wait_reserve, no_wait_gpu); - if (unlikely(ret != 0)) - return ret; - } while (1); -@@ -855,7 +872,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, - int ttm_bo_mem_space(struct ttm_buffer_object *bo, - struct ttm_placement *placement, - struct ttm_mem_reg *mem, -- bool interruptible, bool no_wait) -+ bool interruptible, bool no_wait_reserve, -+ bool no_wait_gpu) - { - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man; -@@ -952,7 +970,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, - } - - ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, -- interruptible, no_wait); -+ interruptible, no_wait_reserve, no_wait_gpu); - if (ret == 0 && mem->mm_node) { - mem->placement = cur_flags; - mem->mm_node->private = bo; -@@ -978,7 +996,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu); - - int ttm_bo_move_buffer(struct ttm_buffer_object *bo, - struct ttm_placement *placement, -- bool interruptible, bool no_wait) -+ bool interruptible, bool no_wait_reserve, -+ bool no_wait_gpu) - { - struct ttm_bo_global *glob = bo->glob; - int ret = 0; -@@ -992,20 +1011,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, - * instead of doing it here. - */ - spin_lock(&bo->lock); -- ret = ttm_bo_wait(bo, false, interruptible, no_wait); -+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - spin_unlock(&bo->lock); - if (ret) - return ret; - mem.num_pages = bo->num_pages; - mem.size = mem.num_pages << PAGE_SHIFT; - mem.page_alignment = bo->mem.page_alignment; -+ mem.bus.io_reserved = false; - /* - * Determine where to move the buffer. - */ -- ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait); -+ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu); - if (ret) - goto out_unlock; -- ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); -+ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); - out_unlock: - if (ret && mem.mm_node) { - spin_lock(&glob->lru_lock); -@@ -1039,7 +1059,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, - - int ttm_bo_validate(struct ttm_buffer_object *bo, - struct ttm_placement *placement, -- bool interruptible, bool no_wait) -+ bool interruptible, bool no_wait_reserve, -+ bool no_wait_gpu) - { - int ret; - -@@ -1054,7 +1075,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, - */ - ret = ttm_bo_mem_compat(placement, &bo->mem); - if (ret < 0) { -- ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait); -+ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu); - if (ret) - return ret; - } else { -@@ -1153,6 +1174,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, - bo->mem.num_pages = bo->num_pages; - bo->mem.mm_node = NULL; - bo->mem.page_alignment = page_alignment; -+ bo->mem.bus.io_reserved = false; - bo->buffer_start = buffer_start & PAGE_MASK; - bo->priv_flags = 0; - bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); -@@ -1175,7 +1197,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, - goto out_err; - } - -- ret = ttm_bo_validate(bo, placement, interruptible, false); -+ ret = ttm_bo_validate(bo, placement, interruptible, false, false); - if (ret) - goto out_err; - -@@ -1249,7 +1271,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, - spin_lock(&glob->lru_lock); - while (!list_empty(&man->lru)) { - spin_unlock(&glob->lru_lock); -- ret = ttm_mem_evict_first(bdev, mem_type, false, false); -+ ret = ttm_mem_evict_first(bdev, mem_type, false, false, false); - if (ret) { - if (allow_errors) { - return ret; -@@ -1553,26 +1575,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) - return true; - } - --int ttm_bo_pci_offset(struct ttm_bo_device *bdev, -- struct ttm_mem_reg *mem, -- unsigned long *bus_base, -- unsigned long *bus_offset, unsigned long *bus_size) --{ -- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; -- -- *bus_size = 0; -- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) -- return -EINVAL; -- -- if (ttm_mem_reg_is_pci(bdev, mem)) { -- *bus_offset = mem->mm_node->start << PAGE_SHIFT; -- *bus_size = mem->num_pages << PAGE_SHIFT; -- *bus_base = man->io_offset; -- } -- -- return 0; --} -- - void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) - { - struct ttm_bo_device *bdev = bo->bdev; -@@ -1581,8 +1583,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) - - if (!bdev->dev_mapping) - return; -- - unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); -+ ttm_mem_io_free(bdev, &bo->mem); - } - EXPORT_SYMBOL(ttm_bo_unmap_virtual); - -@@ -1811,7 +1813,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) - evict_mem.mem_type = TTM_PL_SYSTEM; - - ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, -- false, false); -+ false, false, false); - if (unlikely(ret != 0)) - goto out; - } -diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c -index d764e82..13012a1 100644 ---- a/drivers/gpu/drm/ttm/ttm_bo_util.c -+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c -@@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) - } - - int ttm_bo_move_ttm(struct ttm_buffer_object *bo, -- bool evict, bool no_wait, struct ttm_mem_reg *new_mem) -+ bool evict, bool no_wait_reserve, -+ bool no_wait_gpu, struct ttm_mem_reg *new_mem) - { - struct ttm_tt *ttm = bo->ttm; - struct ttm_mem_reg *old_mem = &bo->mem; -@@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - } - EXPORT_SYMBOL(ttm_bo_move_ttm); - -+int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) -+{ -+ int ret; -+ -+ if (!mem->bus.io_reserved) { -+ mem->bus.io_reserved = true; -+ ret = bdev->driver->io_mem_reserve(bdev, mem); -+ if (unlikely(ret != 0)) -+ return ret; -+ } -+ return 0; -+} -+ -+void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) -+{ -+ if (bdev->driver->io_mem_reserve) { -+ if (mem->bus.io_reserved) { -+ mem->bus.io_reserved = false; -+ bdev->driver->io_mem_free(bdev, mem); -+ } -+ } -+} -+ - int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, - void **virtual) - { -- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; -- unsigned long bus_offset; -- unsigned long bus_size; -- unsigned long bus_base; - int ret; - void *addr; - - *virtual = NULL; -- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); -- if (ret || bus_size == 0) -+ ret = ttm_mem_io_reserve(bdev, mem); -+ if (ret || !mem->bus.is_iomem) - return ret; - -- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) -- addr = (void *)(((u8 *) man->io_addr) + bus_offset); -- else { -+ if (mem->bus.addr) { -+ addr = mem->bus.addr; -+ } else { - if (mem->placement & TTM_PL_FLAG_WC) -- addr = ioremap_wc(bus_base + bus_offset, bus_size); -+ addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); - else -- addr = ioremap_nocache(bus_base + bus_offset, bus_size); -- if (!addr) -+ addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); -+ if (!addr) { -+ ttm_mem_io_free(bdev, mem); - return -ENOMEM; -+ } - } - *virtual = addr; - return 0; -@@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, - - man = &bdev->man[mem->mem_type]; - -- if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) -+ if (virtual && mem->bus.addr == NULL) - iounmap(virtual); -+ ttm_mem_io_free(bdev, mem); - } - - static int ttm_copy_io_page(void *dst, void *src, unsigned long page) -@@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, - } - - int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, -- bool evict, bool no_wait, struct ttm_mem_reg *new_mem) -+ bool evict, bool no_wait_reserve, bool no_wait_gpu, -+ struct ttm_mem_reg *new_mem) - { - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; -@@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) - EXPORT_SYMBOL(ttm_io_prot); - - static int ttm_bo_ioremap(struct ttm_buffer_object *bo, -- unsigned long bus_base, -- unsigned long bus_offset, -- unsigned long bus_size, -+ unsigned long offset, -+ unsigned long size, - struct ttm_bo_kmap_obj *map) - { -- struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_reg *mem = &bo->mem; -- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; - -- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { -+ if (bo->mem.bus.addr) { - map->bo_kmap_type = ttm_bo_map_premapped; -- map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); -+ map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); - } else { - map->bo_kmap_type = ttm_bo_map_iomap; - if (mem->placement & TTM_PL_FLAG_WC) -- map->virtual = ioremap_wc(bus_base + bus_offset, -- bus_size); -+ map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, -+ size); - else -- map->virtual = ioremap_nocache(bus_base + bus_offset, -- bus_size); -+ map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, -+ size); - } - return (!map->virtual) ? -ENOMEM : 0; - } -@@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, - unsigned long start_page, unsigned long num_pages, - struct ttm_bo_kmap_obj *map) - { -+ unsigned long offset, size; - int ret; -- unsigned long bus_base; -- unsigned long bus_offset; -- unsigned long bus_size; - - BUG_ON(!list_empty(&bo->swap)); - map->virtual = NULL; -+ map->bo = bo; - if (num_pages > bo->num_pages) - return -EINVAL; - if (start_page > bo->num_pages) -@@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, - if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) - return -EPERM; - #endif -- ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, -- &bus_offset, &bus_size); -+ ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); - if (ret) - return ret; -- if (bus_size == 0) { -+ if (!bo->mem.bus.is_iomem) { - return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); - } else { -- bus_offset += start_page << PAGE_SHIFT; -- bus_size = num_pages << PAGE_SHIFT; -- return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); -+ offset = start_page << PAGE_SHIFT; -+ size = num_pages << PAGE_SHIFT; -+ return ttm_bo_ioremap(bo, offset, size, map); - } - } - EXPORT_SYMBOL(ttm_bo_kmap); -@@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) - switch (map->bo_kmap_type) { - case ttm_bo_map_iomap: - iounmap(map->virtual); -+ ttm_mem_io_free(map->bo->bdev, &map->bo->mem); - break; - case ttm_bo_map_vmap: - vunmap(map->virtual); -@@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) - } - EXPORT_SYMBOL(ttm_bo_kunmap); - --int ttm_bo_pfn_prot(struct ttm_buffer_object *bo, -- unsigned long dst_offset, -- unsigned long *pfn, pgprot_t *prot) --{ -- struct ttm_mem_reg *mem = &bo->mem; -- struct ttm_bo_device *bdev = bo->bdev; -- unsigned long bus_offset; -- unsigned long bus_size; -- unsigned long bus_base; -- int ret; -- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, -- &bus_size); -- if (ret) -- return -EINVAL; -- if (bus_size != 0) -- *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT; -- else -- if (!bo->ttm) -- return -EINVAL; -- else -- *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm, -- dst_offset >> -- PAGE_SHIFT)); -- *prot = (mem->placement & TTM_PL_FLAG_CACHED) ? -- PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL); -- -- return 0; --} -- - int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - void *sync_obj, - void *sync_obj_arg, -- bool evict, bool no_wait, -+ bool evict, bool no_wait_reserve, -+ bool no_wait_gpu, - struct ttm_mem_reg *new_mem) - { - struct ttm_bo_device *bdev = bo->bdev; -diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c -index 668dbe8..fe6cb77 100644 ---- a/drivers/gpu/drm/ttm/ttm_bo_vm.c -+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c -@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) - struct ttm_buffer_object *bo = (struct ttm_buffer_object *) - vma->vm_private_data; - struct ttm_bo_device *bdev = bo->bdev; -- unsigned long bus_base; -- unsigned long bus_offset; -- unsigned long bus_size; - unsigned long page_offset; - unsigned long page_last; - unsigned long pfn; -@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) - struct page *page; - int ret; - int i; -- bool is_iomem; - unsigned long address = (unsigned long)vmf->virtual_address; - int retval = VM_FAULT_NOPAGE; - -@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) - return VM_FAULT_NOPAGE; - } - -- if (bdev->driver->fault_reserve_notify) -- bdev->driver->fault_reserve_notify(bo); -+ if (bdev->driver->fault_reserve_notify) { -+ ret = bdev->driver->fault_reserve_notify(bo); -+ switch (ret) { -+ case 0: -+ break; -+ case -EBUSY: -+ set_need_resched(); -+ case -ERESTARTSYS: -+ retval = VM_FAULT_NOPAGE; -+ goto out_unlock; -+ default: -+ retval = VM_FAULT_SIGBUS; -+ goto out_unlock; -+ } -+ } - - /* - * Wait for buffer data in transit, due to a pipelined -@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) - spin_unlock(&bo->lock); - - -- ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, -- &bus_size); -- if (unlikely(ret != 0)) { -+ ret = ttm_mem_io_reserve(bdev, &bo->mem); -+ if (ret) { - retval = VM_FAULT_SIGBUS; - goto out_unlock; - } - -- is_iomem = (bus_size != 0); -- - page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + - bo->vm_node->start - vma->vm_pgoff; - page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + -@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) - * vma->vm_page_prot when the object changes caching policy, with - * the correct locks held. - */ -- -- if (is_iomem) { -+ if (bo->mem.bus.is_iomem) { - vma->vm_page_prot = ttm_io_prot(bo->mem.placement, - vma->vm_page_prot); - } else { -@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) - */ - - for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { -- -- if (is_iomem) -- pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + -- page_offset; -+ if (bo->mem.bus.is_iomem) -+ pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; - else { - page = ttm_tt_get_page(ttm, page_offset); - if (unlikely(!page && i == 0)) { -@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) - retval = - (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; - goto out_unlock; -- - } - - address += PAGE_SIZE; -@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma) - - static void ttm_bo_vm_close(struct vm_area_struct *vma) - { -- struct ttm_buffer_object *bo = -- (struct ttm_buffer_object *)vma->vm_private_data; -+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; - - ttm_bo_unref(&bo); - vma->vm_private_data = NULL; -diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c -index 801b702..e70ddd8 100644 ---- a/drivers/gpu/drm/ttm/ttm_memory.c -+++ b/drivers/gpu/drm/ttm/ttm_memory.c -@@ -27,6 +27,7 @@ - - #include "ttm/ttm_memory.h" - #include "ttm/ttm_module.h" -+#include "ttm/ttm_page_alloc.h" - #include - #include - #include -@@ -393,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) - "Zone %7s: Available graphics memory: %llu kiB.\n", - zone->name, (unsigned long long) zone->max_mem >> 10); - } -+ ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); - return 0; - out_no_zone: - ttm_mem_global_release(glob); -@@ -405,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) - unsigned int i; - struct ttm_mem_zone *zone; - -+ /* let the page allocator first stop the shrink work. */ -+ ttm_page_alloc_fini(); -+ - flush_workqueue(glob->swap_queue); - destroy_workqueue(glob->swap_queue); - glob->swap_queue = NULL; -@@ -412,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) - zone = glob->zones[i]; - kobject_del(&zone->kobj); - kobject_put(&zone->kobj); -- } -+ } - kobject_del(&glob->kobj); - kobject_put(&glob->kobj); - } -diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c -new file mode 100644 -index 0000000..ef91069 ---- /dev/null -+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c -@@ -0,0 +1,855 @@ -+/* -+ * Copyright (c) Red Hat Inc. -+ -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sub license, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: Dave Airlie -+ * Jerome Glisse -+ * Pauli Nieminen -+ */ -+ -+/* simple list based uncached page pool -+ * - Pool collects resently freed pages for reuse -+ * - Use page->lru to keep a free list -+ * - doesn't track currently in use pages -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include /* for seq_printf */ -+#include -+ -+#include -+#include -+ -+#include "ttm/ttm_bo_driver.h" -+#include "ttm/ttm_page_alloc.h" -+ -+ -+#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) -+#define SMALL_ALLOCATION 16 -+#define FREE_ALL_PAGES (~0U) -+/* times are in msecs */ -+#define PAGE_FREE_INTERVAL 1000 -+ -+/** -+ * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. -+ * -+ * @lock: Protects the shared pool from concurrnet access. Must be used with -+ * irqsave/irqrestore variants because pool allocator maybe called from -+ * delayed work. -+ * @fill_lock: Prevent concurrent calls to fill. -+ * @list: Pool of free uc/wc pages for fast reuse. -+ * @gfp_flags: Flags to pass for alloc_page. -+ * @npages: Number of pages in pool. -+ */ -+struct ttm_page_pool { -+ spinlock_t lock; -+ bool fill_lock; -+ struct list_head list; -+ int gfp_flags; -+ unsigned npages; -+ char *name; -+ unsigned long nfrees; -+ unsigned long nrefills; -+}; -+ -+/** -+ * Limits for the pool. They are handled without locks because only place where -+ * they may change is in sysfs store. They won't have immediate effect anyway -+ * so forcing serialization to access them is pointless. -+ */ -+ -+struct ttm_pool_opts { -+ unsigned alloc_size; -+ unsigned max_size; -+ unsigned small; -+}; -+ -+#define NUM_POOLS 4 -+ -+/** -+ * struct ttm_pool_manager - Holds memory pools for fst allocation -+ * -+ * Manager is read only object for pool code so it doesn't need locking. -+ * -+ * @free_interval: minimum number of jiffies between freeing pages from pool. -+ * @page_alloc_inited: reference counting for pool allocation. -+ * @work: Work that is used to shrink the pool. Work is only run when there is -+ * some pages to free. -+ * @small_allocation: Limit in number of pages what is small allocation. -+ * -+ * @pools: All pool objects in use. -+ **/ -+struct ttm_pool_manager { -+ struct kobject kobj; -+ struct shrinker mm_shrink; -+ atomic_t page_alloc_inited; -+ struct ttm_pool_opts options; -+ -+ union { -+ struct ttm_page_pool pools[NUM_POOLS]; -+ struct { -+ struct ttm_page_pool wc_pool; -+ struct ttm_page_pool uc_pool; -+ struct ttm_page_pool wc_pool_dma32; -+ struct ttm_page_pool uc_pool_dma32; -+ } ; -+ }; -+}; -+ -+static struct attribute ttm_page_pool_max = { -+ .name = "pool_max_size", -+ .mode = S_IRUGO | S_IWUSR -+}; -+static struct attribute ttm_page_pool_small = { -+ .name = "pool_small_allocation", -+ .mode = S_IRUGO | S_IWUSR -+}; -+static struct attribute ttm_page_pool_alloc_size = { -+ .name = "pool_allocation_size", -+ .mode = S_IRUGO | S_IWUSR -+}; -+ -+static struct attribute *ttm_pool_attrs[] = { -+ &ttm_page_pool_max, -+ &ttm_page_pool_small, -+ &ttm_page_pool_alloc_size, -+ NULL -+}; -+ -+static void ttm_pool_kobj_release(struct kobject *kobj) -+{ -+ struct ttm_pool_manager *m = -+ container_of(kobj, struct ttm_pool_manager, kobj); -+ (void)m; -+} -+ -+static ssize_t ttm_pool_store(struct kobject *kobj, -+ struct attribute *attr, const char *buffer, size_t size) -+{ -+ struct ttm_pool_manager *m = -+ container_of(kobj, struct ttm_pool_manager, kobj); -+ int chars; -+ unsigned val; -+ chars = sscanf(buffer, "%u", &val); -+ if (chars == 0) -+ return size; -+ -+ /* Convert kb to number of pages */ -+ val = val / (PAGE_SIZE >> 10); -+ -+ if (attr == &ttm_page_pool_max) -+ m->options.max_size = val; -+ else if (attr == &ttm_page_pool_small) -+ m->options.small = val; -+ else if (attr == &ttm_page_pool_alloc_size) { -+ if (val > NUM_PAGES_TO_ALLOC*8) { -+ printk(KERN_ERR TTM_PFX -+ "Setting allocation size to %lu " -+ "is not allowed. Recommended size is " -+ "%lu\n", -+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), -+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); -+ return size; -+ } else if (val > NUM_PAGES_TO_ALLOC) { -+ printk(KERN_WARNING TTM_PFX -+ "Setting allocation size to " -+ "larger than %lu is not recommended.\n", -+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); -+ } -+ m->options.alloc_size = val; -+ } -+ -+ return size; -+} -+ -+static ssize_t ttm_pool_show(struct kobject *kobj, -+ struct attribute *attr, char *buffer) -+{ -+ struct ttm_pool_manager *m = -+ container_of(kobj, struct ttm_pool_manager, kobj); -+ unsigned val = 0; -+ -+ if (attr == &ttm_page_pool_max) -+ val = m->options.max_size; -+ else if (attr == &ttm_page_pool_small) -+ val = m->options.small; -+ else if (attr == &ttm_page_pool_alloc_size) -+ val = m->options.alloc_size; -+ -+ val = val * (PAGE_SIZE >> 10); -+ -+ return snprintf(buffer, PAGE_SIZE, "%u\n", val); -+} -+ -+static const struct sysfs_ops ttm_pool_sysfs_ops = { -+ .show = &ttm_pool_show, -+ .store = &ttm_pool_store, -+}; -+ -+static struct kobj_type ttm_pool_kobj_type = { -+ .release = &ttm_pool_kobj_release, -+ .sysfs_ops = &ttm_pool_sysfs_ops, -+ .default_attrs = ttm_pool_attrs, -+}; -+ -+static struct ttm_pool_manager _manager = { -+ .page_alloc_inited = ATOMIC_INIT(0) -+}; -+ -+#ifndef CONFIG_X86 -+static int set_pages_array_wb(struct page **pages, int addrinarray) -+{ -+#ifdef TTM_HAS_AGP -+ int i; -+ -+ for (i = 0; i < addrinarray; i++) -+ unmap_page_from_agp(pages[i]); -+#endif -+ return 0; -+} -+ -+static int set_pages_array_wc(struct page **pages, int addrinarray) -+{ -+#ifdef TTM_HAS_AGP -+ int i; -+ -+ for (i = 0; i < addrinarray; i++) -+ map_page_into_agp(pages[i]); -+#endif -+ return 0; -+} -+ -+static int set_pages_array_uc(struct page **pages, int addrinarray) -+{ -+#ifdef TTM_HAS_AGP -+ int i; -+ -+ for (i = 0; i < addrinarray; i++) -+ map_page_into_agp(pages[i]); -+#endif -+ return 0; -+} -+#endif -+ -+/** -+ * Select the right pool or requested caching state and ttm flags. */ -+static struct ttm_page_pool *ttm_get_pool(int flags, -+ enum ttm_caching_state cstate) -+{ -+ int pool_index; -+ -+ if (cstate == tt_cached) -+ return NULL; -+ -+ if (cstate == tt_wc) -+ pool_index = 0x0; -+ else -+ pool_index = 0x1; -+ -+ if (flags & TTM_PAGE_FLAG_DMA32) -+ pool_index |= 0x2; -+ -+ return &_manager.pools[pool_index]; -+} -+ -+/* set memory back to wb and free the pages. */ -+static void ttm_pages_put(struct page *pages[], unsigned npages) -+{ -+ unsigned i; -+ if (set_pages_array_wb(pages, npages)) -+ printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n", -+ npages); -+ for (i = 0; i < npages; ++i) -+ __free_page(pages[i]); -+} -+ -+static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, -+ unsigned freed_pages) -+{ -+ pool->npages -= freed_pages; -+ pool->nfrees += freed_pages; -+} -+ -+/** -+ * Free pages from pool. -+ * -+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC -+ * number of pages in one go. -+ * -+ * @pool: to free the pages from -+ * @free_all: If set to true will free all pages in pool -+ **/ -+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) -+{ -+ unsigned long irq_flags; -+ struct page *p; -+ struct page **pages_to_free; -+ unsigned freed_pages = 0, -+ npages_to_free = nr_free; -+ -+ if (NUM_PAGES_TO_ALLOC < nr_free) -+ npages_to_free = NUM_PAGES_TO_ALLOC; -+ -+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), -+ GFP_KERNEL); -+ if (!pages_to_free) { -+ printk(KERN_ERR TTM_PFX -+ "Failed to allocate memory for pool free operation.\n"); -+ return 0; -+ } -+ -+restart: -+ spin_lock_irqsave(&pool->lock, irq_flags); -+ -+ list_for_each_entry_reverse(p, &pool->list, lru) { -+ if (freed_pages >= npages_to_free) -+ break; -+ -+ pages_to_free[freed_pages++] = p; -+ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ -+ if (freed_pages >= NUM_PAGES_TO_ALLOC) { -+ /* remove range of pages from the pool */ -+ __list_del(p->lru.prev, &pool->list); -+ -+ ttm_pool_update_free_locked(pool, freed_pages); -+ /** -+ * Because changing page caching is costly -+ * we unlock the pool to prevent stalling. -+ */ -+ spin_unlock_irqrestore(&pool->lock, irq_flags); -+ -+ ttm_pages_put(pages_to_free, freed_pages); -+ if (likely(nr_free != FREE_ALL_PAGES)) -+ nr_free -= freed_pages; -+ -+ if (NUM_PAGES_TO_ALLOC >= nr_free) -+ npages_to_free = nr_free; -+ else -+ npages_to_free = NUM_PAGES_TO_ALLOC; -+ -+ freed_pages = 0; -+ -+ /* free all so restart the processing */ -+ if (nr_free) -+ goto restart; -+ -+ /* Not allowed to fall tough or break because -+ * following context is inside spinlock while we are -+ * outside here. -+ */ -+ goto out; -+ -+ } -+ } -+ -+ /* remove range of pages from the pool */ -+ if (freed_pages) { -+ __list_del(&p->lru, &pool->list); -+ -+ ttm_pool_update_free_locked(pool, freed_pages); -+ nr_free -= freed_pages; -+ } -+ -+ spin_unlock_irqrestore(&pool->lock, irq_flags); -+ -+ if (freed_pages) -+ ttm_pages_put(pages_to_free, freed_pages); -+out: -+ kfree(pages_to_free); -+ return nr_free; -+} -+ -+/* Get good estimation how many pages are free in pools */ -+static int ttm_pool_get_num_unused_pages(void) -+{ -+ unsigned i; -+ int total = 0; -+ for (i = 0; i < NUM_POOLS; ++i) -+ total += _manager.pools[i].npages; -+ -+ return total; -+} -+ -+/** -+ * Callback for mm to request pool to reduce number of page held. -+ */ -+static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) -+{ -+ static atomic_t start_pool = ATOMIC_INIT(0); -+ unsigned i; -+ unsigned pool_offset = atomic_add_return(1, &start_pool); -+ struct ttm_page_pool *pool; -+ -+ pool_offset = pool_offset % NUM_POOLS; -+ /* select start pool in round robin fashion */ -+ for (i = 0; i < NUM_POOLS; ++i) { -+ unsigned nr_free = shrink_pages; -+ if (shrink_pages == 0) -+ break; -+ pool = &_manager.pools[(i + pool_offset)%NUM_POOLS]; -+ shrink_pages = ttm_page_pool_free(pool, nr_free); -+ } -+ /* return estimated number of unused pages in pool */ -+ return ttm_pool_get_num_unused_pages(); -+} -+ -+static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) -+{ -+ manager->mm_shrink.shrink = &ttm_pool_mm_shrink; -+ manager->mm_shrink.seeks = 1; -+ register_shrinker(&manager->mm_shrink); -+} -+ -+static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) -+{ -+ unregister_shrinker(&manager->mm_shrink); -+} -+ -+static int ttm_set_pages_caching(struct page **pages, -+ enum ttm_caching_state cstate, unsigned cpages) -+{ -+ int r = 0; -+ /* Set page caching */ -+ switch (cstate) { -+ case tt_uncached: -+ r = set_pages_array_uc(pages, cpages); -+ if (r) -+ printk(KERN_ERR TTM_PFX -+ "Failed to set %d pages to uc!\n", -+ cpages); -+ break; -+ case tt_wc: -+ r = set_pages_array_wc(pages, cpages); -+ if (r) -+ printk(KERN_ERR TTM_PFX -+ "Failed to set %d pages to wc!\n", -+ cpages); -+ break; -+ default: -+ break; -+ } -+ return r; -+} -+ -+/** -+ * Free pages the pages that failed to change the caching state. If there is -+ * any pages that have changed their caching state already put them to the -+ * pool. -+ */ -+static void ttm_handle_caching_state_failure(struct list_head *pages, -+ int ttm_flags, enum ttm_caching_state cstate, -+ struct page **failed_pages, unsigned cpages) -+{ -+ unsigned i; -+ /* Failed pages have to be freed */ -+ for (i = 0; i < cpages; ++i) { -+ list_del(&failed_pages[i]->lru); -+ __free_page(failed_pages[i]); -+ } -+} -+ -+/** -+ * Allocate new pages with correct caching. -+ * -+ * This function is reentrant if caller updates count depending on number of -+ * pages returned in pages array. -+ */ -+static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, -+ int ttm_flags, enum ttm_caching_state cstate, unsigned count) -+{ -+ struct page **caching_array; -+ struct page *p; -+ int r = 0; -+ unsigned i, cpages; -+ unsigned max_cpages = min(count, -+ (unsigned)(PAGE_SIZE/sizeof(struct page *))); -+ -+ /* allocate array for page caching change */ -+ caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); -+ -+ if (!caching_array) { -+ printk(KERN_ERR TTM_PFX -+ "Unable to allocate table for new pages."); -+ return -ENOMEM; -+ } -+ -+ for (i = 0, cpages = 0; i < count; ++i) { -+ p = alloc_page(gfp_flags); -+ -+ if (!p) { -+ printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i); -+ -+ /* store already allocated pages in the pool after -+ * setting the caching state */ -+ if (cpages) { -+ r = ttm_set_pages_caching(caching_array, -+ cstate, cpages); -+ if (r) -+ ttm_handle_caching_state_failure(pages, -+ ttm_flags, cstate, -+ caching_array, cpages); -+ } -+ r = -ENOMEM; -+ goto out; -+ } -+ -+#ifdef CONFIG_HIGHMEM -+ /* gfp flags of highmem page should never be dma32 so we -+ * we should be fine in such case -+ */ -+ if (!PageHighMem(p)) -+#endif -+ { -+ caching_array[cpages++] = p; -+ if (cpages == max_cpages) { -+ -+ r = ttm_set_pages_caching(caching_array, -+ cstate, cpages); -+ if (r) { -+ ttm_handle_caching_state_failure(pages, -+ ttm_flags, cstate, -+ caching_array, cpages); -+ goto out; -+ } -+ cpages = 0; -+ } -+ } -+ -+ list_add(&p->lru, pages); -+ } -+ -+ if (cpages) { -+ r = ttm_set_pages_caching(caching_array, cstate, cpages); -+ if (r) -+ ttm_handle_caching_state_failure(pages, -+ ttm_flags, cstate, -+ caching_array, cpages); -+ } -+out: -+ kfree(caching_array); -+ -+ return r; -+} -+ -+/** -+ * Fill the given pool if there isn't enough pages and requested number of -+ * pages is small. -+ */ -+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, -+ int ttm_flags, enum ttm_caching_state cstate, unsigned count, -+ unsigned long *irq_flags) -+{ -+ struct page *p; -+ int r; -+ unsigned cpages = 0; -+ /** -+ * Only allow one pool fill operation at a time. -+ * If pool doesn't have enough pages for the allocation new pages are -+ * allocated from outside of pool. -+ */ -+ if (pool->fill_lock) -+ return; -+ -+ pool->fill_lock = true; -+ -+ /* If allocation request is small and there is not enough -+ * pages in pool we fill the pool first */ -+ if (count < _manager.options.small -+ && count > pool->npages) { -+ struct list_head new_pages; -+ unsigned alloc_size = _manager.options.alloc_size; -+ -+ /** -+ * Can't change page caching if in irqsave context. We have to -+ * drop the pool->lock. -+ */ -+ spin_unlock_irqrestore(&pool->lock, *irq_flags); -+ -+ INIT_LIST_HEAD(&new_pages); -+ r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, -+ cstate, alloc_size); -+ spin_lock_irqsave(&pool->lock, *irq_flags); -+ -+ if (!r) { -+ list_splice(&new_pages, &pool->list); -+ ++pool->nrefills; -+ pool->npages += alloc_size; -+ } else { -+ printk(KERN_ERR TTM_PFX -+ "Failed to fill pool (%p).", pool); -+ /* If we have any pages left put them to the pool. */ -+ list_for_each_entry(p, &pool->list, lru) { -+ ++cpages; -+ } -+ list_splice(&new_pages, &pool->list); -+ pool->npages += cpages; -+ } -+ -+ } -+ pool->fill_lock = false; -+} -+ -+/** -+ * Cut count nubmer of pages from the pool and put them to return list -+ * -+ * @return count of pages still to allocate to fill the request. -+ */ -+static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, -+ struct list_head *pages, int ttm_flags, -+ enum ttm_caching_state cstate, unsigned count) -+{ -+ unsigned long irq_flags; -+ struct list_head *p; -+ unsigned i; -+ -+ spin_lock_irqsave(&pool->lock, irq_flags); -+ ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); -+ -+ if (count >= pool->npages) { -+ /* take all pages from the pool */ -+ list_splice_init(&pool->list, pages); -+ count -= pool->npages; -+ pool->npages = 0; -+ goto out; -+ } -+ /* find the last pages to include for requested number of pages. Split -+ * pool to begin and halves to reduce search space. */ -+ if (count <= pool->npages/2) { -+ i = 0; -+ list_for_each(p, &pool->list) { -+ if (++i == count) -+ break; -+ } -+ } else { -+ i = pool->npages + 1; -+ list_for_each_prev(p, &pool->list) { -+ if (--i == count) -+ break; -+ } -+ } -+ /* Cut count number of pages from pool */ -+ list_cut_position(pages, &pool->list, p); -+ pool->npages -= count; -+ count = 0; -+out: -+ spin_unlock_irqrestore(&pool->lock, irq_flags); -+ return count; -+} -+ -+/* -+ * On success pages list will hold count number of correctly -+ * cached pages. -+ */ -+int ttm_get_pages(struct list_head *pages, int flags, -+ enum ttm_caching_state cstate, unsigned count) -+{ -+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); -+ struct page *p = NULL; -+ int gfp_flags = 0; -+ int r; -+ -+ /* set zero flag for page allocation if required */ -+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) -+ gfp_flags |= __GFP_ZERO; -+ -+ /* No pool for cached pages */ -+ if (pool == NULL) { -+ if (flags & TTM_PAGE_FLAG_DMA32) -+ gfp_flags |= GFP_DMA32; -+ else -+ gfp_flags |= GFP_HIGHUSER; -+ -+ for (r = 0; r < count; ++r) { -+ p = alloc_page(gfp_flags); -+ if (!p) { -+ -+ printk(KERN_ERR TTM_PFX -+ "Unable to allocate page."); -+ return -ENOMEM; -+ } -+ -+ list_add(&p->lru, pages); -+ } -+ return 0; -+ } -+ -+ -+ /* combine zero flag to pool flags */ -+ gfp_flags |= pool->gfp_flags; -+ -+ /* First we take pages from the pool */ -+ count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count); -+ -+ /* clear the pages coming from the pool if requested */ -+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { -+ list_for_each_entry(p, pages, lru) { -+ clear_page(page_address(p)); -+ } -+ } -+ -+ /* If pool didn't have enough pages allocate new one. */ -+ if (count > 0) { -+ /* ttm_alloc_new_pages doesn't reference pool so we can run -+ * multiple requests in parallel. -+ **/ -+ r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count); -+ if (r) { -+ /* If there is any pages in the list put them back to -+ * the pool. */ -+ printk(KERN_ERR TTM_PFX -+ "Failed to allocate extra pages " -+ "for large request."); -+ ttm_put_pages(pages, 0, flags, cstate); -+ return r; -+ } -+ } -+ -+ -+ return 0; -+} -+ -+/* Put all pages in pages list to correct pool to wait for reuse */ -+void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, -+ enum ttm_caching_state cstate) -+{ -+ unsigned long irq_flags; -+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); -+ struct page *p, *tmp; -+ -+ if (pool == NULL) { -+ /* No pool for this memory type so free the pages */ -+ -+ list_for_each_entry_safe(p, tmp, pages, lru) { -+ __free_page(p); -+ } -+ /* Make the pages list empty */ -+ INIT_LIST_HEAD(pages); -+ return; -+ } -+ if (page_count == 0) { -+ list_for_each_entry_safe(p, tmp, pages, lru) { -+ ++page_count; -+ } -+ } -+ -+ spin_lock_irqsave(&pool->lock, irq_flags); -+ list_splice_init(pages, &pool->list); -+ pool->npages += page_count; -+ /* Check that we don't go over the pool limit */ -+ page_count = 0; -+ if (pool->npages > _manager.options.max_size) { -+ page_count = pool->npages - _manager.options.max_size; -+ /* free at least NUM_PAGES_TO_ALLOC number of pages -+ * to reduce calls to set_memory_wb */ -+ if (page_count < NUM_PAGES_TO_ALLOC) -+ page_count = NUM_PAGES_TO_ALLOC; -+ } -+ spin_unlock_irqrestore(&pool->lock, irq_flags); -+ if (page_count) -+ ttm_page_pool_free(pool, page_count); -+} -+ -+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, -+ char *name) -+{ -+ spin_lock_init(&pool->lock); -+ pool->fill_lock = false; -+ INIT_LIST_HEAD(&pool->list); -+ pool->npages = pool->nfrees = 0; -+ pool->gfp_flags = flags; -+ pool->name = name; -+} -+ -+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) -+{ -+ int ret; -+ if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) -+ return 0; -+ -+ printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); -+ -+ ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); -+ -+ ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc"); -+ -+ ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32, -+ "wc dma"); -+ -+ ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32, -+ "uc dma"); -+ -+ _manager.options.max_size = max_pages; -+ _manager.options.small = SMALL_ALLOCATION; -+ _manager.options.alloc_size = NUM_PAGES_TO_ALLOC; -+ -+ kobject_init(&_manager.kobj, &ttm_pool_kobj_type); -+ ret = kobject_add(&_manager.kobj, &glob->kobj, "pool"); -+ if (unlikely(ret != 0)) { -+ kobject_put(&_manager.kobj); -+ return ret; -+ } -+ -+ ttm_pool_mm_shrink_init(&_manager); -+ -+ return 0; -+} -+ -+void ttm_page_alloc_fini() -+{ -+ int i; -+ -+ if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) -+ return; -+ -+ printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); -+ ttm_pool_mm_shrink_fini(&_manager); -+ -+ for (i = 0; i < NUM_POOLS; ++i) -+ ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES); -+ -+ kobject_put(&_manager.kobj); -+} -+ -+int ttm_page_alloc_debugfs(struct seq_file *m, void *data) -+{ -+ struct ttm_page_pool *p; -+ unsigned i; -+ char *h[] = {"pool", "refills", "pages freed", "size"}; -+ if (atomic_read(&_manager.page_alloc_inited) == 0) { -+ seq_printf(m, "No pool allocator running.\n"); -+ return 0; -+ } -+ seq_printf(m, "%6s %12s %13s %8s\n", -+ h[0], h[1], h[2], h[3]); -+ for (i = 0; i < NUM_POOLS; ++i) { -+ p = &_manager.pools[i]; -+ -+ seq_printf(m, "%6s %12ld %13ld %8d\n", -+ p->name, p->nrefills, -+ p->nfrees, p->npages); -+ } -+ return 0; -+} -+EXPORT_SYMBOL(ttm_page_alloc_debugfs); -diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c -index d5fd5b8..a7bab87 100644 ---- a/drivers/gpu/drm/ttm/ttm_tt.c -+++ b/drivers/gpu/drm/ttm/ttm_tt.c -@@ -39,6 +39,7 @@ - #include "ttm/ttm_module.h" - #include "ttm/ttm_bo_driver.h" - #include "ttm/ttm_placement.h" -+#include "ttm/ttm_page_alloc.h" - - static int ttm_tt_swapin(struct ttm_tt *ttm); - -@@ -56,21 +57,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm) - ttm->pages = NULL; - } - --static struct page *ttm_tt_alloc_page(unsigned page_flags) --{ -- gfp_t gfp_flags = GFP_USER; -- -- if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) -- gfp_flags |= __GFP_ZERO; -- -- if (page_flags & TTM_PAGE_FLAG_DMA32) -- gfp_flags |= __GFP_DMA32; -- else -- gfp_flags |= __GFP_HIGHMEM; -- -- return alloc_page(gfp_flags); --} -- - static void ttm_tt_free_user_pages(struct ttm_tt *ttm) - { - int write; -@@ -111,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm) - static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) - { - struct page *p; -+ struct list_head h; - struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; - int ret; - - while (NULL == (p = ttm->pages[index])) { -- p = ttm_tt_alloc_page(ttm->page_flags); - -- if (!p) -+ INIT_LIST_HEAD(&h); -+ -+ ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1); -+ -+ if (ret != 0) - return NULL; - -+ p = list_first_entry(&h, struct page, lru); -+ - ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); - if (unlikely(ret != 0)) - goto out_err; -@@ -228,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, - if (ttm->caching_state == c_state) - return 0; - -- if (c_state != tt_cached) { -- ret = ttm_tt_populate(ttm); -- if (unlikely(ret != 0)) -- return ret; -+ if (ttm->state == tt_unpopulated) { -+ /* Change caching but don't populate */ -+ ttm->caching_state = c_state; -+ return 0; - } - - if (ttm->caching_state == tt_cached) -@@ -282,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching); - static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) - { - int i; -+ unsigned count = 0; -+ struct list_head h; - struct page *cur_page; - struct ttm_backend *be = ttm->be; - -+ INIT_LIST_HEAD(&h); -+ - if (be) - be->func->clear(be); -- (void)ttm_tt_set_caching(ttm, tt_cached); - for (i = 0; i < ttm->num_pages; ++i) { -+ - cur_page = ttm->pages[i]; - ttm->pages[i] = NULL; - if (cur_page) { -@@ -298,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) - "Leaking pages.\n"); - ttm_mem_global_free_page(ttm->glob->mem_glob, - cur_page); -- __free_page(cur_page); -+ list_add(&cur_page->lru, &h); -+ count++; - } - } -+ ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state); - ttm->state = tt_unpopulated; - ttm->first_himem_page = ttm->num_pages; - ttm->last_lomem_page = -1; -diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile -index 1a3cb68..4505e17 100644 ---- a/drivers/gpu/drm/vmwgfx/Makefile -+++ b/drivers/gpu/drm/vmwgfx/Makefile -@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm - vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ - vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ - vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ -- vmwgfx_overlay.o -+ vmwgfx_overlay.o vmwgfx_fence.o - - obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c -index 825ebe3..c4f5114 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c -@@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) - int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) - { -- struct vmw_private *dev_priv = -- container_of(bdev, struct vmw_private, bdev); -- - switch (type) { - case TTM_PL_SYSTEM: - /* System memory */ -@@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - case TTM_PL_VRAM: - /* "On-card" video ram */ - man->gpu_offset = 0; -- man->io_offset = dev_priv->vram_start; -- man->io_size = dev_priv->vram_size; -- man->flags = TTM_MEMTYPE_FLAG_FIXED | -- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE; -- man->io_addr = NULL; -+ man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_WC; - break; -@@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo) - vmw_dmabuf_gmr_unbind(bo); - } - -+static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) -+{ -+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; -+ struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); -+ -+ mem->bus.addr = NULL; -+ mem->bus.is_iomem = false; -+ mem->bus.offset = 0; -+ mem->bus.size = mem->num_pages << PAGE_SHIFT; -+ mem->bus.base = 0; -+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) -+ return -EINVAL; -+ switch (mem->mem_type) { -+ case TTM_PL_SYSTEM: -+ /* System memory */ -+ return 0; -+ case TTM_PL_VRAM: -+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; -+ mem->bus.base = dev_priv->vram_start; -+ mem->bus.is_iomem = true; -+ break; -+ default: -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) -+{ -+} -+ -+static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) -+{ -+ return 0; -+} -+ - /** - * FIXME: We're using the old vmware polling method to sync. - * Do this with fences instead. -@@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = { - .sync_obj_unref = vmw_sync_obj_unref, - .sync_obj_ref = vmw_sync_obj_ref, - .move_notify = vmw_move_notify, -- .swap_notify = vmw_swap_notify -+ .swap_notify = vmw_swap_notify, -+ .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, -+ .io_mem_reserve = &vmw_ttm_io_mem_reserve, -+ .io_mem_free = &vmw_ttm_io_mem_free, - }; -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c -index 0c9c081..b793c8c 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c -@@ -88,6 +88,9 @@ - #define DRM_IOCTL_VMW_FENCE_WAIT \ - DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ - struct drm_vmw_fence_wait_arg) -+#define DRM_IOCTL_VMW_UPDATE_LAYOUT \ -+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ -+ struct drm_vmw_update_layout_arg) - - - /** -@@ -135,7 +138,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = { - VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, - DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, -- DRM_AUTH | DRM_UNLOCKED) -+ DRM_AUTH | DRM_UNLOCKED), -+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, -+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) - }; - - static struct pci_device_id vmw_pci_id_list[] = { -@@ -318,6 +323,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) - goto out_err3; - } - -+ /* Need mmio memory to check for fifo pitchlock cap. */ -+ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && -+ !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && -+ !vmw_fifo_have_pitchlock(dev_priv)) { -+ ret = -ENOSYS; -+ DRM_ERROR("Hardware has no pitchlock\n"); -+ goto out_err4; -+ } -+ - dev_priv->tdev = ttm_object_device_init - (dev_priv->mem_global_ref.object, 12); - -@@ -399,8 +413,6 @@ static int vmw_driver_unload(struct drm_device *dev) - { - struct vmw_private *dev_priv = vmw_priv(dev); - -- DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); -- - unregister_pm_notifier(&dev_priv->pm_nb); - - vmw_fb_close(dev_priv); -@@ -546,7 +558,6 @@ static int vmw_master_create(struct drm_device *dev, - { - struct vmw_master *vmaster; - -- DRM_INFO("Master create.\n"); - vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); - if (unlikely(vmaster == NULL)) - return -ENOMEM; -@@ -563,7 +574,6 @@ static void vmw_master_destroy(struct drm_device *dev, - { - struct vmw_master *vmaster = vmw_master(master); - -- DRM_INFO("Master destroy.\n"); - master->driver_priv = NULL; - kfree(vmaster); - } -@@ -579,8 +589,6 @@ static int vmw_master_set(struct drm_device *dev, - struct vmw_master *vmaster = vmw_master(file_priv->master); - int ret = 0; - -- DRM_INFO("Master set.\n"); -- - if (active) { - BUG_ON(active != &dev_priv->fbdev_master); - ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); -@@ -622,8 +630,6 @@ static void vmw_master_drop(struct drm_device *dev, - struct vmw_master *vmaster = vmw_master(file_priv->master); - int ret; - -- DRM_INFO("Master drop.\n"); -- - /** - * Make sure the master doesn't disappear while we have - * it locked. -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h -index 356dc93..eaad520 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h -@@ -41,12 +41,13 @@ - - #define VMWGFX_DRIVER_DATE "20100209" - #define VMWGFX_DRIVER_MAJOR 1 --#define VMWGFX_DRIVER_MINOR 0 -+#define VMWGFX_DRIVER_MINOR 2 - #define VMWGFX_DRIVER_PATCHLEVEL 0 - #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 - #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) - #define VMWGFX_MAX_RELOCATIONS 2048 - #define VMWGFX_MAX_GMRS 2048 -+#define VMWGFX_MAX_DISPLAYS 16 - - struct vmw_fpriv { - struct drm_master *locked_master; -@@ -102,6 +103,13 @@ struct vmw_surface { - struct vmw_cursor_snooper snooper; - }; - -+struct vmw_fence_queue { -+ struct list_head head; -+ struct timespec lag; -+ struct timespec lag_time; -+ spinlock_t lock; -+}; -+ - struct vmw_fifo_state { - unsigned long reserved_size; - __le32 *dynamic_buffer; -@@ -115,6 +123,7 @@ struct vmw_fifo_state { - uint32_t capabilities; - struct mutex fifo_mutex; - struct rw_semaphore rwsem; -+ struct vmw_fence_queue fence_queue; - }; - - struct vmw_relocation { -@@ -144,6 +153,14 @@ struct vmw_master { - struct ttm_lock lock; - }; - -+struct vmw_vga_topology_state { -+ uint32_t width; -+ uint32_t height; -+ uint32_t primary; -+ uint32_t pos_x; -+ uint32_t pos_y; -+}; -+ - struct vmw_private { - struct ttm_bo_device bdev; - struct ttm_bo_global_ref bo_global_ref; -@@ -171,14 +188,19 @@ struct vmw_private { - * VGA registers. - */ - -+ struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS]; - uint32_t vga_width; - uint32_t vga_height; - uint32_t vga_depth; - uint32_t vga_bpp; - uint32_t vga_pseudo; - uint32_t vga_red_mask; -- uint32_t vga_blue_mask; - uint32_t vga_green_mask; -+ uint32_t vga_blue_mask; -+ uint32_t vga_bpl; -+ uint32_t vga_pitchlock; -+ -+ uint32_t num_displays; - - /* - * Framebuffer info. -@@ -393,6 +415,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, - extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); - extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); - extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); -+extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); - - /** - * TTM glue - vmwgfx_ttm_glue.c -@@ -441,6 +464,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv, - uint32_t sequence, - bool interruptible, - unsigned long timeout); -+extern void vmw_update_sequence(struct vmw_private *dev_priv, -+ struct vmw_fifo_state *fifo_state); -+ -+ -+/** -+ * Rudimentary fence objects currently used only for throttling - -+ * vmwgfx_fence.c -+ */ -+ -+extern void vmw_fence_queue_init(struct vmw_fence_queue *queue); -+extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue); -+extern int vmw_fence_push(struct vmw_fence_queue *queue, -+ uint32_t sequence); -+extern int vmw_fence_pull(struct vmw_fence_queue *queue, -+ uint32_t signaled_sequence); -+extern int vmw_wait_lag(struct vmw_private *dev_priv, -+ struct vmw_fence_queue *queue, uint32_t us); - - /** - * Kernel framebuffer - vmwgfx_fb.c -@@ -466,6 +506,11 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, - struct ttm_object_file *tfile, - struct ttm_buffer_object *bo, - SVGA3dCmdHeader *header); -+void vmw_kms_write_svga(struct vmw_private *vmw_priv, -+ unsigned width, unsigned height, unsigned pitch, -+ unsigned bbp, unsigned depth); -+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); - - /** - * Overlay control - vmwgfx_overlay.c -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c -index 0897359..8e39685 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c -@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, - * Put BO in VRAM, only if there is space. - */ - -- ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false); -+ ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false); - if (unlikely(ret == -ERESTARTSYS)) - return ret; - -@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, - * previous contents. - */ - -- ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); -+ ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); - return ret; - } - -@@ -644,6 +644,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, - ret = copy_from_user(cmd, user_cmd, arg->command_size); - - if (unlikely(ret != 0)) { -+ ret = -EFAULT; - DRM_ERROR("Failed copying commands.\n"); - goto out_commit; - } -@@ -669,6 +670,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, - goto out_err; - - vmw_apply_relocations(sw_context); -+ -+ if (arg->throttle_us) { -+ ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue, -+ arg->throttle_us); -+ -+ if (unlikely(ret != 0)) -+ goto out_err; -+ } -+ - vmw_fifo_commit(dev_priv, arg->command_size); - - ret = vmw_fifo_send_fence(dev_priv, &sequence); -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c -index a933670..b0866f0 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c -@@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, - return -EINVAL; - } - -- /* without multimon its hard to resize */ -- if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) && -- (var->xres != par->max_width || -- var->yres != par->max_height)) { -- DRM_ERROR("Tried to resize, but we don't have multimon\n"); -+ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && -+ (var->xoffset != 0 || var->yoffset != 0)) { -+ DRM_ERROR("Can not handle panning without display topology\n"); - return -EINVAL; - } - -- if (var->xres > par->max_width || -- var->yres > par->max_height) { -+ if ((var->xoffset + var->xres) > par->max_width || -+ (var->yoffset + var->yres) > par->max_height) { - DRM_ERROR("Requested geom can not fit in framebuffer\n"); - return -EINVAL; - } -@@ -154,27 +152,11 @@ static int vmw_fb_set_par(struct fb_info *info) - struct vmw_fb_par *par = info->par; - struct vmw_private *vmw_priv = par->vmw_priv; - -- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { -- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); -- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); -- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); -- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); -- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); -- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); -- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); -- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); -- -- vmw_write(vmw_priv, SVGA_REG_ENABLE, 1); -- vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width); -- vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height); -- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp); -- vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth); -- vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); -- vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); -- vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); -- -+ vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres, -+ info->fix.line_length, -+ par->bpp, par->depth); -+ if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) { - /* TODO check if pitch and offset changes */ -- - vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); -@@ -183,13 +165,13 @@ static int vmw_fb_set_par(struct fb_info *info) - vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); - vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); -- } else { -- vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres); -- vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres); -- -- /* TODO check if pitch and offset changes */ - } - -+ /* This is really helpful since if this fails the user -+ * can probably not see anything on the screen. -+ */ -+ WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0); -+ - return 0; - } - -@@ -416,48 +398,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv) - unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; - int ret; - -+ /* XXX These shouldn't be hardcoded. */ - initial_width = 800; - initial_height = 600; - - fb_bbp = 32; - fb_depth = 24; - -- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { -- fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); -- fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); -- } else { -- fb_width = min(vmw_priv->fb_max_width, initial_width); -- fb_height = min(vmw_priv->fb_max_height, initial_height); -- } -+ /* XXX As shouldn't these be as well. */ -+ fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); -+ fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); - - initial_width = min(fb_width, initial_width); - initial_height = min(fb_height, initial_height); - -- vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width); -- vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height); -- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp); -- vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth); -- vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); -- vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); -- vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); -- -- fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE); -+ fb_pitch = fb_width * fb_bbp / 8; -+ fb_size = fb_pitch * fb_height; - fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); -- fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE); -- -- DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH)); -- DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT)); -- DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH)); -- DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT)); -- DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL)); -- DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH)); -- DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE)); -- DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK)); -- DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK)); -- DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK)); -- DRM_DEBUG("fb_offset 0x%08x\n", fb_offset); -- DRM_DEBUG("fb_pitch %u\n", fb_pitch); -- DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024); - - info = framebuffer_alloc(sizeof(*par), device); - if (!info) -@@ -559,8 +516,13 @@ int vmw_fb_init(struct vmw_private *vmw_priv) - info->pixmap.scan_align = 1; - #endif - -- info->aperture_base = vmw_priv->vram_start; -- info->aperture_size = vmw_priv->vram_size; -+ info->apertures = alloc_apertures(1); -+ if (!info->apertures) { -+ ret = -ENOMEM; -+ goto err_aper; -+ } -+ info->apertures->ranges[0].base = vmw_priv->vram_start; -+ info->apertures->ranges[0].size = vmw_priv->vram_size; - - /* - * Dirty & Deferred IO -@@ -580,6 +542,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv) - - err_defio: - fb_deferred_io_cleanup(info); -+err_aper: - ttm_bo_kunmap(&par->map); - err_unref: - ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo); -@@ -628,7 +591,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, - if (unlikely(ret != 0)) - return ret; - -- ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false); -+ ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false); - ttm_bo_unreserve(bo); - - return ret; -@@ -652,7 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, - if (unlikely(ret != 0)) - goto err_unlock; - -- ret = ttm_bo_validate(bo, &ne_placement, false, false); -+ ret = ttm_bo_validate(bo, &ne_placement, false, false, false); -+ -+ /* Could probably bug on */ -+ WARN_ON(bo->offset != 0); -+ - ttm_bo_unreserve(bo); - err_unlock: - ttm_write_unlock(&vmw_priv->active_master->lock); -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c -new file mode 100644 -index 0000000..61eacc1 ---- /dev/null -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c -@@ -0,0 +1,173 @@ -+/************************************************************************** -+ * -+ * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+ -+ -+#include "vmwgfx_drv.h" -+ -+struct vmw_fence { -+ struct list_head head; -+ uint32_t sequence; -+ struct timespec submitted; -+}; -+ -+void vmw_fence_queue_init(struct vmw_fence_queue *queue) -+{ -+ INIT_LIST_HEAD(&queue->head); -+ queue->lag = ns_to_timespec(0); -+ getrawmonotonic(&queue->lag_time); -+ spin_lock_init(&queue->lock); -+} -+ -+void vmw_fence_queue_takedown(struct vmw_fence_queue *queue) -+{ -+ struct vmw_fence *fence, *next; -+ -+ spin_lock(&queue->lock); -+ list_for_each_entry_safe(fence, next, &queue->head, head) { -+ kfree(fence); -+ } -+ spin_unlock(&queue->lock); -+} -+ -+int vmw_fence_push(struct vmw_fence_queue *queue, -+ uint32_t sequence) -+{ -+ struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); -+ -+ if (unlikely(!fence)) -+ return -ENOMEM; -+ -+ fence->sequence = sequence; -+ getrawmonotonic(&fence->submitted); -+ spin_lock(&queue->lock); -+ list_add_tail(&fence->head, &queue->head); -+ spin_unlock(&queue->lock); -+ -+ return 0; -+} -+ -+int vmw_fence_pull(struct vmw_fence_queue *queue, -+ uint32_t signaled_sequence) -+{ -+ struct vmw_fence *fence, *next; -+ struct timespec now; -+ bool updated = false; -+ -+ spin_lock(&queue->lock); -+ getrawmonotonic(&now); -+ -+ if (list_empty(&queue->head)) { -+ queue->lag = ns_to_timespec(0); -+ queue->lag_time = now; -+ updated = true; -+ goto out_unlock; -+ } -+ -+ list_for_each_entry_safe(fence, next, &queue->head, head) { -+ if (signaled_sequence - fence->sequence > (1 << 30)) -+ continue; -+ -+ queue->lag = timespec_sub(now, fence->submitted); -+ queue->lag_time = now; -+ updated = true; -+ list_del(&fence->head); -+ kfree(fence); -+ } -+ -+out_unlock: -+ spin_unlock(&queue->lock); -+ -+ return (updated) ? 0 : -EBUSY; -+} -+ -+static struct timespec vmw_timespec_add(struct timespec t1, -+ struct timespec t2) -+{ -+ t1.tv_sec += t2.tv_sec; -+ t1.tv_nsec += t2.tv_nsec; -+ if (t1.tv_nsec >= 1000000000L) { -+ t1.tv_sec += 1; -+ t1.tv_nsec -= 1000000000L; -+ } -+ -+ return t1; -+} -+ -+static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue) -+{ -+ struct timespec now; -+ -+ spin_lock(&queue->lock); -+ getrawmonotonic(&now); -+ queue->lag = vmw_timespec_add(queue->lag, -+ timespec_sub(now, queue->lag_time)); -+ queue->lag_time = now; -+ spin_unlock(&queue->lock); -+ return queue->lag; -+} -+ -+ -+static bool vmw_lag_lt(struct vmw_fence_queue *queue, -+ uint32_t us) -+{ -+ struct timespec lag, cond; -+ -+ cond = ns_to_timespec((s64) us * 1000); -+ lag = vmw_fifo_lag(queue); -+ return (timespec_compare(&lag, &cond) < 1); -+} -+ -+int vmw_wait_lag(struct vmw_private *dev_priv, -+ struct vmw_fence_queue *queue, uint32_t us) -+{ -+ struct vmw_fence *fence; -+ uint32_t sequence; -+ int ret; -+ -+ while (!vmw_lag_lt(queue, us)) { -+ spin_lock(&queue->lock); -+ if (list_empty(&queue->head)) -+ sequence = atomic_read(&dev_priv->fence_seq); -+ else { -+ fence = list_first_entry(&queue->head, -+ struct vmw_fence, head); -+ sequence = fence->sequence; -+ } -+ spin_unlock(&queue->lock); -+ -+ ret = vmw_wait_fence(dev_priv, false, sequence, true, -+ 3*HZ); -+ -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ (void) vmw_fence_pull(queue, sequence); -+ } -+ return 0; -+} -+ -+ -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c -index 39d43a0..e6a1eb7 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c -@@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; - uint32_t fifo_min, hwversion; - -+ if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) -+ return false; -+ - fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); - if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) - return false; -@@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) - return true; - } - -+bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) -+{ -+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt; -+ uint32_t caps; -+ -+ if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) -+ return false; -+ -+ caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); -+ if (caps & SVGA_FIFO_CAP_PITCHLOCK) -+ return true; -+ -+ return false; -+} -+ - int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) - { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; -@@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) - - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); - iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); -- -+ vmw_fence_queue_init(&fifo->fence_queue); - return vmw_fifo_send_fence(dev_priv, &dummy); - out_err: - vfree(fifo->static_buffer); -@@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) - dev_priv->enable_state); - - mutex_unlock(&dev_priv->hw_mutex); -+ vmw_fence_queue_takedown(&fifo->fence_queue); - - if (likely(fifo->last_buffer != NULL)) { - vfree(fifo->last_buffer); -@@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) - fifo_state->last_buffer_add = true; - vmw_fifo_commit(dev_priv, bytes); - fifo_state->last_buffer_add = false; -+ (void) vmw_fence_push(&fifo_state->fence_queue, *sequence); -+ vmw_update_sequence(dev_priv, fifo_state); - - out_err: - return ret; -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c -index 4d7cb53..e92298a 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c -@@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) - return (busy == 0); - } - -+void vmw_update_sequence(struct vmw_private *dev_priv, -+ struct vmw_fifo_state *fifo_state) -+{ -+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt; -+ -+ uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); -+ -+ if (dev_priv->last_read_sequence != sequence) { -+ dev_priv->last_read_sequence = sequence; -+ vmw_fence_pull(&fifo_state->fence_queue, sequence); -+ } -+} - - bool vmw_fence_signaled(struct vmw_private *dev_priv, - uint32_t sequence) - { -- __le32 __iomem *fifo_mem = dev_priv->mmio_virt; - struct vmw_fifo_state *fifo_state; - bool ret; - - if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) - return true; - -- dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); -+ fifo_state = &dev_priv->fifo; -+ vmw_update_sequence(dev_priv, fifo_state); - if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) - return true; - -- fifo_state = &dev_priv->fifo; - if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && - vmw_fifo_idle(dev_priv, sequence)) - return true; -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c -index 31f9afe..f1d6261 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c -@@ -30,6 +30,8 @@ - /* Might need a hrtimer here? */ - #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) - -+static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb); -+static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb); - - void vmw_display_unit_cleanup(struct vmw_display_unit *du) - { -@@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, - struct vmw_framebuffer_surface { - struct vmw_framebuffer base; - struct vmw_surface *surface; -+ struct vmw_dma_buffer *buffer; - struct delayed_work d_work; - struct mutex work_lock; - bool present_fs; -@@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, - vfbs->base.base.depth = 24; - vfbs->base.base.width = width; - vfbs->base.base.height = height; -- vfbs->base.pin = NULL; -- vfbs->base.unpin = NULL; -+ vfbs->base.pin = &vmw_surface_dmabuf_pin; -+ vfbs->base.unpin = &vmw_surface_dmabuf_unpin; - vfbs->surface = surface; - mutex_init(&vfbs->work_lock); - INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); -@@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { - .create_handle = vmw_framebuffer_create_handle, - }; - -+static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb) -+{ -+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); -+ struct vmw_framebuffer_surface *vfbs = -+ vmw_framebuffer_to_vfbs(&vfb->base); -+ unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height; -+ int ret; -+ -+ vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL); -+ if (unlikely(vfbs->buffer == NULL)) -+ return -ENOMEM; -+ -+ vmw_overlay_pause_all(dev_priv); -+ ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size, -+ &vmw_vram_ne_placement, -+ false, &vmw_dmabuf_bo_free); -+ vmw_overlay_resume_all(dev_priv); -+ -+ return ret; -+} -+ -+static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb) -+{ -+ struct ttm_buffer_object *bo; -+ struct vmw_framebuffer_surface *vfbs = -+ vmw_framebuffer_to_vfbs(&vfb->base); -+ -+ bo = &vfbs->buffer->base; -+ ttm_bo_unref(&bo); -+ vfbs->buffer = NULL; -+ -+ return 0; -+} -+ - static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) - { - struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); -@@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) - vmw_framebuffer_to_vfbd(&vfb->base); - int ret; - -+ - vmw_overlay_pause_all(dev_priv); - - ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); - -- if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { -- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); -- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0); -- vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); -- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); -- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); -- vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); -- vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); -- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); -- -- vmw_write(dev_priv, SVGA_REG_ENABLE, 1); -- vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width); -- vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height); -- vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel); -- vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth); -- vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000); -- vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); -- vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff); -- } else -- WARN_ON(true); -- - vmw_overlay_resume_all(dev_priv); - -+ WARN_ON(ret != 0); -+ - return 0; - } - -@@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, - - /* XXX get the first 3 from the surface info */ - vfbd->base.base.bits_per_pixel = 32; -- vfbd->base.base.pitch = width * 32 / 4; -+ vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8; - vfbd->base.base.depth = 24; - vfbd->base.base.width = width; - vfbd->base.base.height = height; -@@ -752,14 +771,8 @@ err_not_scanout: - return NULL; - } - --static int vmw_kms_fb_changed(struct drm_device *dev) --{ -- return 0; --} -- - static struct drm_mode_config_funcs vmw_kms_funcs = { - .fb_create = vmw_kms_fb_create, -- .fb_changed = vmw_kms_fb_changed, - }; - - int vmw_kms_init(struct vmw_private *dev_priv) -@@ -771,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv) - dev->mode_config.funcs = &vmw_kms_funcs; - dev->mode_config.min_width = 1; - dev->mode_config.min_height = 1; -- dev->mode_config.max_width = dev_priv->fb_max_width; -- dev->mode_config.max_height = dev_priv->fb_max_height; -+ /* assumed largest fb size */ -+ dev->mode_config.max_width = 8192; -+ dev->mode_config.max_height = 8192; - - ret = vmw_kms_init_legacy_display_system(dev_priv); - -@@ -832,49 +846,140 @@ out: - return ret; - } - -+void vmw_kms_write_svga(struct vmw_private *vmw_priv, -+ unsigned width, unsigned height, unsigned pitch, -+ unsigned bbp, unsigned depth) -+{ -+ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) -+ vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); -+ else if (vmw_fifo_have_pitchlock(vmw_priv)) -+ iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); -+ vmw_write(vmw_priv, SVGA_REG_WIDTH, width); -+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); -+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp); -+ vmw_write(vmw_priv, SVGA_REG_DEPTH, depth); -+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); -+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); -+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); -+} -+ - int vmw_kms_save_vga(struct vmw_private *vmw_priv) - { -- /* -- * setup a single multimon monitor with the size -- * of 0x0, this stops the UI from resizing when we -- * change the framebuffer size -- */ -- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { -- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); -- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); -- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); -- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); -- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); -- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); -- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); -- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); -- } -+ struct vmw_vga_topology_state *save; -+ uint32_t i; - - vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); - vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); -- vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); - vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH); -+ vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); - vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR); - vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); -- vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); - vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); -+ vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); -+ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) -+ vmw_priv->vga_pitchlock = -+ vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); -+ else if (vmw_fifo_have_pitchlock(vmw_priv)) -+ vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt + -+ SVGA_FIFO_PITCHLOCK); -+ -+ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) -+ return 0; - -+ vmw_priv->num_displays = vmw_read(vmw_priv, -+ SVGA_REG_NUM_GUEST_DISPLAYS); -+ -+ for (i = 0; i < vmw_priv->num_displays; ++i) { -+ save = &vmw_priv->vga_save[i]; -+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); -+ save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY); -+ save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X); -+ save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y); -+ save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); -+ save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); -+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); -+ } - return 0; - } - - int vmw_kms_restore_vga(struct vmw_private *vmw_priv) - { -+ struct vmw_vga_topology_state *save; -+ uint32_t i; -+ - vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); - vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); -- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); - vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth); -+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); - vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo); - vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); - vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); - vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); -+ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) -+ vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, -+ vmw_priv->vga_pitchlock); -+ else if (vmw_fifo_have_pitchlock(vmw_priv)) -+ iowrite32(vmw_priv->vga_pitchlock, -+ vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); -+ -+ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) -+ return 0; - -- /* TODO check for multimon */ -- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); -+ for (i = 0; i < vmw_priv->num_displays; ++i) { -+ save = &vmw_priv->vga_save[i]; -+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); -+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary); -+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x); -+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y); -+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width); -+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height); -+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); -+ } - - return 0; - } -+ -+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct vmw_private *dev_priv = vmw_priv(dev); -+ struct drm_vmw_update_layout_arg *arg = -+ (struct drm_vmw_update_layout_arg *)data; -+ struct vmw_master *vmaster = vmw_master(file_priv->master); -+ void __user *user_rects; -+ struct drm_vmw_rect *rects; -+ unsigned rects_size; -+ int ret; -+ -+ ret = ttm_read_lock(&vmaster->lock, true); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ if (!arg->num_outputs) { -+ struct drm_vmw_rect def_rect = {0, 0, 800, 600}; -+ vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect); -+ goto out_unlock; -+ } -+ -+ rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); -+ rects = kzalloc(rects_size, GFP_KERNEL); -+ if (unlikely(!rects)) { -+ ret = -ENOMEM; -+ goto out_unlock; -+ } -+ -+ user_rects = (void __user *)(unsigned long)arg->rects; -+ ret = copy_from_user(rects, user_rects, rects_size); -+ if (unlikely(ret != 0)) { -+ DRM_ERROR("Failed to get rects.\n"); -+ goto out_free; -+ } -+ -+ vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects); -+ -+out_free: -+ kfree(rects); -+out_unlock: -+ ttm_read_unlock(&vmaster->lock); -+ return ret; -+} -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h -index 8b95249..8a398a0 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h -@@ -94,9 +94,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, - int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); - - /* -- * Legacy display unit functions - vmwgfx_ldu.h -+ * Legacy display unit functions - vmwgfx_ldu.c - */ - int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); - int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); -+int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, -+ struct drm_vmw_rect *rects); - - #endif -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c -index 9089159..cfaf690 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c -@@ -38,6 +38,7 @@ struct vmw_legacy_display { - struct list_head active; - - unsigned num_active; -+ unsigned last_num_active; - - struct vmw_framebuffer *fb; - }; -@@ -48,9 +49,12 @@ struct vmw_legacy_display { - struct vmw_legacy_display_unit { - struct vmw_display_unit base; - -- struct list_head active; -+ unsigned pref_width; -+ unsigned pref_height; -+ bool pref_active; -+ struct drm_display_mode *pref_mode; - -- unsigned unit; -+ struct list_head active; - }; - - static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) -@@ -88,23 +92,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) - { - struct vmw_legacy_display *lds = dev_priv->ldu_priv; - struct vmw_legacy_display_unit *entry; -- struct drm_crtc *crtc; -+ struct drm_framebuffer *fb = NULL; -+ struct drm_crtc *crtc = NULL; - int i = 0; - -- /* to stop the screen from changing size on resize */ -- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); -- for (i = 0; i < lds->num_active; i++) { -- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); -- vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); -- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); -- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); -- vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); -- vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); -- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); -+ /* If there is no display topology the host just assumes -+ * that the guest will set the same layout as the host. -+ */ -+ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) { -+ int w = 0, h = 0; -+ list_for_each_entry(entry, &lds->active, active) { -+ crtc = &entry->base.crtc; -+ w = max(w, crtc->x + crtc->mode.hdisplay); -+ h = max(h, crtc->y + crtc->mode.vdisplay); -+ i++; -+ } -+ -+ if (crtc == NULL) -+ return 0; -+ fb = entry->base.crtc.fb; -+ -+ vmw_kms_write_svga(dev_priv, w, h, fb->pitch, -+ fb->bits_per_pixel, fb->depth); -+ -+ return 0; - } - -- /* Now set the mode */ -- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active); -+ if (!list_empty(&lds->active)) { -+ entry = list_entry(lds->active.next, typeof(*entry), active); -+ fb = entry->base.crtc.fb; -+ -+ vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch, -+ fb->bits_per_pixel, fb->depth); -+ } -+ -+ /* Make sure we always show something. */ -+ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, -+ lds->num_active ? lds->num_active : 1); -+ - i = 0; - list_for_each_entry(entry, &lds->active, active) { - crtc = &entry->base.crtc; -@@ -120,6 +145,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) - i++; - } - -+ BUG_ON(i != lds->num_active); -+ -+ lds->last_num_active = lds->num_active; -+ - return 0; - } - -@@ -130,6 +159,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv, - if (list_empty(&ldu->active)) - return 0; - -+ /* Must init otherwise list_empty(&ldu->active) will not work. */ - list_del_init(&ldu->active); - if (--(ld->num_active) == 0) { - BUG_ON(!ld->fb); -@@ -149,24 +179,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv, - struct vmw_legacy_display_unit *entry; - struct list_head *at; - -+ BUG_ON(!ld->num_active && ld->fb); -+ if (vfb != ld->fb) { -+ if (ld->fb && ld->fb->unpin) -+ ld->fb->unpin(ld->fb); -+ if (vfb->pin) -+ vfb->pin(vfb); -+ ld->fb = vfb; -+ } -+ - if (!list_empty(&ldu->active)) - return 0; - - at = &ld->active; - list_for_each_entry(entry, &ld->active, active) { -- if (entry->unit > ldu->unit) -+ if (entry->base.unit > ldu->base.unit) - break; - - at = &entry->active; - } - - list_add(&ldu->active, at); -- if (ld->num_active++ == 0) { -- BUG_ON(ld->fb); -- if (vfb->pin) -- vfb->pin(vfb); -- ld->fb = vfb; -- } -+ -+ ld->num_active++; - - return 0; - } -@@ -208,6 +243,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) - - /* ldu only supports one fb active at the time */ - if (dev_priv->ldu_priv->fb && vfb && -+ !(dev_priv->ldu_priv->num_active == 1 && -+ !list_empty(&ldu->active)) && - dev_priv->ldu_priv->fb != vfb) { - DRM_ERROR("Multiple framebuffers not supported\n"); - return -EINVAL; -@@ -300,8 +337,7 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector) - static enum drm_connector_status - vmw_ldu_connector_detect(struct drm_connector *connector) - { -- /* XXX vmwctrl should control connection status */ -- if (vmw_connector_to_ldu(connector)->base.unit == 0) -+ if (vmw_connector_to_ldu(connector)->pref_active) - return connector_status_connected; - return connector_status_disconnected; - } -@@ -312,10 +348,9 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = { - 752, 800, 0, 480, 489, 492, 525, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, - /* 800x600@60Hz */ -- { DRM_MODE("800x600", -- DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, -- 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, -- 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, -+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, -+ 968, 1056, 0, 600, 601, 605, 628, 0, -+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1024x768@60Hz */ - { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, - 1184, 1344, 0, 768, 771, 777, 806, 0, -@@ -387,10 +422,34 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = { - static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, - uint32_t max_width, uint32_t max_height) - { -+ struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); - struct drm_device *dev = connector->dev; - struct drm_display_mode *mode = NULL; -+ struct drm_display_mode prefmode = { DRM_MODE("preferred", -+ DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) -+ }; - int i; - -+ /* Add preferred mode */ -+ { -+ mode = drm_mode_duplicate(dev, &prefmode); -+ if (!mode) -+ return 0; -+ mode->hdisplay = ldu->pref_width; -+ mode->vdisplay = ldu->pref_height; -+ mode->vrefresh = drm_mode_vrefresh(mode); -+ drm_mode_probed_add(connector, mode); -+ -+ if (ldu->pref_mode) { -+ list_del_init(&ldu->pref_mode->head); -+ drm_mode_destroy(dev, ldu->pref_mode); -+ } -+ -+ ldu->pref_mode = mode; -+ } -+ - for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { - if (vmw_ldu_connector_builtin[i].hdisplay > max_width || - vmw_ldu_connector_builtin[i].vdisplay > max_height) -@@ -443,18 +502,21 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) - if (!ldu) - return -ENOMEM; - -- ldu->unit = unit; -+ ldu->base.unit = unit; - crtc = &ldu->base.crtc; - encoder = &ldu->base.encoder; - connector = &ldu->base.connector; - -+ INIT_LIST_HEAD(&ldu->active); -+ -+ ldu->pref_active = (unit == 0); -+ ldu->pref_width = 800; -+ ldu->pref_height = 600; -+ ldu->pref_mode = NULL; -+ - drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, - DRM_MODE_CONNECTOR_LVDS); -- /* Initial status */ -- if (unit == 0) -- connector->status = connector_status_connected; -- else -- connector->status = connector_status_disconnected; -+ connector->status = vmw_ldu_connector_detect(connector); - - drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, - DRM_MODE_ENCODER_LVDS); -@@ -462,8 +524,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) - encoder->possible_crtcs = (1 << unit); - encoder->possible_clones = 0; - -- INIT_LIST_HEAD(&ldu->active); -- - drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); - - drm_connector_attach_property(connector, -@@ -487,18 +547,22 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) - - INIT_LIST_HEAD(&dev_priv->ldu_priv->active); - dev_priv->ldu_priv->num_active = 0; -+ dev_priv->ldu_priv->last_num_active = 0; - dev_priv->ldu_priv->fb = NULL; - - drm_mode_create_dirty_info_property(dev_priv->dev); - - vmw_ldu_init(dev_priv, 0); -- vmw_ldu_init(dev_priv, 1); -- vmw_ldu_init(dev_priv, 2); -- vmw_ldu_init(dev_priv, 3); -- vmw_ldu_init(dev_priv, 4); -- vmw_ldu_init(dev_priv, 5); -- vmw_ldu_init(dev_priv, 6); -- vmw_ldu_init(dev_priv, 7); -+ /* for old hardware without multimon only enable one display */ -+ if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { -+ vmw_ldu_init(dev_priv, 1); -+ vmw_ldu_init(dev_priv, 2); -+ vmw_ldu_init(dev_priv, 3); -+ vmw_ldu_init(dev_priv, 4); -+ vmw_ldu_init(dev_priv, 5); -+ vmw_ldu_init(dev_priv, 6); -+ vmw_ldu_init(dev_priv, 7); -+ } - - return 0; - } -@@ -514,3 +578,42 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) - - return 0; - } -+ -+int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, -+ struct drm_vmw_rect *rects) -+{ -+ struct drm_device *dev = dev_priv->dev; -+ struct vmw_legacy_display_unit *ldu; -+ struct drm_connector *con; -+ int i; -+ -+ mutex_lock(&dev->mode_config.mutex); -+ -+#if 0 -+ DRM_INFO("%s: new layout ", __func__); -+ for (i = 0; i < (int)num; i++) -+ DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, -+ rects[i].w, rects[i].h); -+ DRM_INFO("\n"); -+#else -+ (void)i; -+#endif -+ -+ list_for_each_entry(con, &dev->mode_config.connector_list, head) { -+ ldu = vmw_connector_to_ldu(con); -+ if (num > ldu->base.unit) { -+ ldu->pref_width = rects[ldu->base.unit].w; -+ ldu->pref_height = rects[ldu->base.unit].h; -+ ldu->pref_active = true; -+ } else { -+ ldu->pref_width = 800; -+ ldu->pref_height = 600; -+ ldu->pref_active = false; -+ } -+ con->status = vmw_ldu_connector_detect(con); -+ } -+ -+ mutex_unlock(&dev->mode_config.mutex); -+ -+ return 0; -+} -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c -index 5b6eabe..df2036e 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c -@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, - if (pin) - overlay_placement = &vmw_vram_ne_placement; - -- ret = ttm_bo_validate(bo, overlay_placement, interruptible, false); -+ ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false); - - ttm_bo_unreserve(bo); - -@@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, - if (stream->buf != buf) - stream->buf = vmw_dmabuf_reference(buf); - stream->saved = *arg; -+ /* stream is no longer stopped/paused */ -+ stream->paused = false; - - return 0; - } -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c -index f8fbbc6..8612378 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c -@@ -597,8 +597,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, - - ret = copy_from_user(srf->sizes, user_sizes, - srf->num_sizes * sizeof(*srf->sizes)); -- if (unlikely(ret != 0)) -+ if (unlikely(ret != 0)) { -+ ret = -EFAULT; - goto out_err1; -+ } - - if (srf->scanout && - srf->num_sizes == 1 && -@@ -697,9 +699,11 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, - if (user_sizes) - ret = copy_to_user(user_sizes, srf->sizes, - srf->num_sizes * sizeof(*srf->sizes)); -- if (unlikely(ret != 0)) -+ if (unlikely(ret != 0)) { - DRM_ERROR("copy_to_user failed %p %u\n", - user_sizes, srf->num_sizes); -+ ret = -EFAULT; -+ } - out_bad_resource: - out_no_reference: - ttm_base_object_unref(&base); -diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig -index 61ab4da..8d0e31a 100644 ---- a/drivers/gpu/vga/Kconfig -+++ b/drivers/gpu/vga/Kconfig -@@ -18,12 +18,12 @@ config VGA_ARB_MAX_GPUS - multiple GPUS. The overhead for each GPU is very small. - - config VGA_SWITCHEROO -- bool "Laptop Hybrid Grapics - GPU switching support" -+ bool "Laptop Hybrid Graphics - GPU switching support" - depends on X86 - depends on ACPI - help -- Many laptops released in 2008/9/10 have two gpus with a multiplxer -+ Many laptops released in 2008/9/10 have two GPUs with a multiplexer - to switch between them. This adds support for dynamic switching when - X isn't running and delayed switching until the next logoff. This -- features is called hybrid graphics, ATI PowerXpress, and Nvidia -+ feature is called hybrid graphics, ATI PowerXpress, and Nvidia - HybridPower. -diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c -index 441e38c..b87569e 100644 ---- a/drivers/gpu/vga/vgaarb.c -+++ b/drivers/gpu/vga/vgaarb.c -@@ -1,12 +1,32 @@ - /* -- * vgaarb.c -+ * vgaarb.c: Implements the VGA arbitration. For details refer to -+ * Documentation/vgaarbiter.txt -+ * - * - * (C) Copyright 2005 Benjamin Herrenschmidt - * (C) Copyright 2007 Paulo R. Zanoni - * (C) Copyright 2007, 2009 Tiago Vignatti - * -- * Implements the VGA arbitration. For details refer to -- * Documentation/vgaarbiter.txt -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS -+ * IN THE SOFTWARE. -+ * - */ - - #include -@@ -155,8 +175,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, - (vgadev->decodes & VGA_RSRC_LEGACY_MEM)) - rsrc |= VGA_RSRC_LEGACY_MEM; - -- pr_devel("%s: %d\n", __func__, rsrc); -- pr_devel("%s: owns: %d\n", __func__, vgadev->owns); -+ pr_debug("%s: %d\n", __func__, rsrc); -+ pr_debug("%s: owns: %d\n", __func__, vgadev->owns); - - /* Check what resources we need to acquire */ - wants = rsrc & ~vgadev->owns; -@@ -268,7 +288,7 @@ static void __vga_put(struct vga_device *vgadev, unsigned int rsrc) - { - unsigned int old_locks = vgadev->locks; - -- pr_devel("%s\n", __func__); -+ pr_debug("%s\n", __func__); - - /* Update our counters, and account for equivalent legacy resources - * if we decode them -@@ -575,6 +595,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, - else - vga_decode_count--; - } -+ pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); - } - - void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) -@@ -831,7 +852,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, - curr_pos += 5; - remaining -= 5; - -- pr_devel("client 0x%p called 'lock'\n", priv); -+ pr_debug("client 0x%p called 'lock'\n", priv); - - if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { - ret_val = -EPROTO; -@@ -867,7 +888,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, - curr_pos += 7; - remaining -= 7; - -- pr_devel("client 0x%p called 'unlock'\n", priv); -+ pr_debug("client 0x%p called 'unlock'\n", priv); - - if (strncmp(curr_pos, "all", 3) == 0) - io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; -@@ -917,7 +938,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, - curr_pos += 8; - remaining -= 8; - -- pr_devel("client 0x%p called 'trylock'\n", priv); -+ pr_debug("client 0x%p called 'trylock'\n", priv); - - if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { - ret_val = -EPROTO; -@@ -961,7 +982,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, - - curr_pos += 7; - remaining -= 7; -- pr_devel("client 0x%p called 'target'\n", priv); -+ pr_debug("client 0x%p called 'target'\n", priv); - /* if target is default */ - if (!strncmp(curr_pos, "default", 7)) - pdev = pci_dev_get(vga_default_device()); -@@ -971,11 +992,11 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, - ret_val = -EPROTO; - goto done; - } -- pr_devel("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos, -+ pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos, - domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); - - pbus = pci_find_bus(domain, bus); -- pr_devel("vgaarb: pbus %p\n", pbus); -+ pr_debug("vgaarb: pbus %p\n", pbus); - if (pbus == NULL) { - pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n", - domain, bus); -@@ -983,7 +1004,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, - goto done; - } - pdev = pci_get_slot(pbus, devfn); -- pr_devel("vgaarb: pdev %p\n", pdev); -+ pr_debug("vgaarb: pdev %p\n", pdev); - if (!pdev) { - pr_err("vgaarb: invalid PCI address %x:%x\n", - bus, devfn); -@@ -993,7 +1014,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, - } - - vgadev = vgadev_find(pdev); -- pr_devel("vgaarb: vgadev %p\n", vgadev); -+ pr_debug("vgaarb: vgadev %p\n", vgadev); - if (vgadev == NULL) { - pr_err("vgaarb: this pci device is not a vga device\n"); - pci_dev_put(pdev); -@@ -1029,7 +1050,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, - } else if (strncmp(curr_pos, "decodes ", 8) == 0) { - curr_pos += 8; - remaining -= 8; -- pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv); -+ pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv); - - if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { - ret_val = -EPROTO; -@@ -1058,7 +1079,7 @@ static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait) - { - struct vga_arb_private *priv = file->private_data; - -- pr_devel("%s\n", __func__); -+ pr_debug("%s\n", __func__); - - if (priv == NULL) - return -ENODEV; -@@ -1071,7 +1092,7 @@ static int vga_arb_open(struct inode *inode, struct file *file) - struct vga_arb_private *priv; - unsigned long flags; - -- pr_devel("%s\n", __func__); -+ pr_debug("%s\n", __func__); - - priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL); - if (priv == NULL) -@@ -1101,7 +1122,7 @@ static int vga_arb_release(struct inode *inode, struct file *file) - unsigned long flags; - int i; - -- pr_devel("%s\n", __func__); -+ pr_debug("%s\n", __func__); - - if (priv == NULL) - return -ENODEV; -@@ -1112,7 +1133,7 @@ static int vga_arb_release(struct inode *inode, struct file *file) - uc = &priv->cards[i]; - if (uc->pdev == NULL) - continue; -- pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n", -+ pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n", - uc->io_cnt, uc->mem_cnt); - while (uc->io_cnt--) - vga_put(uc->pdev, VGA_RSRC_LEGACY_IO); -@@ -1165,7 +1186,7 @@ static int pci_notify(struct notifier_block *nb, unsigned long action, - struct pci_dev *pdev = to_pci_dev(dev); - bool notify = false; - -- pr_devel("%s\n", __func__); -+ pr_debug("%s\n", __func__); - - /* For now we're only intereted in devices added and removed. I didn't - * test this thing here, so someone needs to double check for the -diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig -index 7696a66..82cb8ff 100644 ---- a/drivers/staging/Kconfig -+++ b/drivers/staging/Kconfig -@@ -91,8 +91,6 @@ source "drivers/staging/line6/Kconfig" - - source "drivers/gpu/drm/vmwgfx/Kconfig" - --source "drivers/gpu/drm/nouveau/Kconfig" -- - source "drivers/staging/octeon/Kconfig" - - source "drivers/staging/serqt_usb2/Kconfig" -diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c -index ecf4055..4a56f46 100644 ---- a/drivers/video/efifb.c -+++ b/drivers/video/efifb.c -@@ -168,7 +168,7 @@ static void efifb_destroy(struct fb_info *info) - { - if (info->screen_base) - iounmap(info->screen_base); -- release_mem_region(info->aperture_base, info->aperture_size); -+ release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); - framebuffer_release(info); - } - -@@ -292,8 +292,13 @@ static int __devinit efifb_probe(struct platform_device *dev) - info->pseudo_palette = info->par; - info->par = NULL; - -- info->aperture_base = efifb_fix.smem_start; -- info->aperture_size = size_remap; -+ info->apertures = alloc_apertures(1); -+ if (!info->apertures) { -+ err = -ENOMEM; -+ goto err_release_fb; -+ } -+ info->apertures->ranges[0].base = efifb_fix.smem_start; -+ info->apertures->ranges[0].size = size_remap; - - info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); - if (!info->screen_base) { -diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c -index a15b44e..e08b7b5 100644 ---- a/drivers/video/fbmem.c -+++ b/drivers/video/fbmem.c -@@ -1468,16 +1468,67 @@ static int fb_check_foreignness(struct fb_info *fi) - return 0; - } - --static bool fb_do_apertures_overlap(struct fb_info *gen, struct fb_info *hw) -+static bool apertures_overlap(struct aperture *gen, struct aperture *hw) - { - /* is the generic aperture base the same as the HW one */ -- if (gen->aperture_base == hw->aperture_base) -+ if (gen->base == hw->base) - return true; - /* is the generic aperture base inside the hw base->hw base+size */ -- if (gen->aperture_base > hw->aperture_base && gen->aperture_base <= hw->aperture_base + hw->aperture_size) -+ if (gen->base > hw->base && gen->base <= hw->base + hw->size) - return true; - return false; - } -+ -+static bool fb_do_apertures_overlap(struct apertures_struct *gena, -+ struct apertures_struct *hwa) -+{ -+ int i, j; -+ if (!hwa || !gena) -+ return false; -+ -+ for (i = 0; i < hwa->count; ++i) { -+ struct aperture *h = &hwa->ranges[i]; -+ for (j = 0; j < gena->count; ++j) { -+ struct aperture *g = &gena->ranges[j]; -+ printk(KERN_DEBUG "checking generic (%llx %llx) vs hw (%llx %llx)\n", -+ g->base, g->size, h->base, h->size); -+ if (apertures_overlap(g, h)) -+ return true; -+ } -+ } -+ -+ return false; -+} -+ -+#define VGA_FB_PHYS 0xA0000 -+void remove_conflicting_framebuffers(struct apertures_struct *a, -+ const char *name, bool primary) -+{ -+ int i; -+ -+ /* check all firmware fbs and kick off if the base addr overlaps */ -+ for (i = 0 ; i < FB_MAX; i++) { -+ struct apertures_struct *gen_aper; -+ if (!registered_fb[i]) -+ continue; -+ -+ if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE)) -+ continue; -+ -+ gen_aper = registered_fb[i]->apertures; -+ if (fb_do_apertures_overlap(gen_aper, a) || -+ (primary && gen_aper && gen_aper->count && -+ gen_aper->ranges[0].base == VGA_FB_PHYS)) { -+ -+ printk(KERN_ERR "fb: conflicting fb hw usage " -+ "%s vs %s - removing generic driver\n", -+ name, registered_fb[i]->fix.id); -+ unregister_framebuffer(registered_fb[i]); -+ } -+ } -+} -+EXPORT_SYMBOL(remove_conflicting_framebuffers); -+ - /** - * register_framebuffer - registers a frame buffer device - * @fb_info: frame buffer info structure -@@ -1501,21 +1552,8 @@ register_framebuffer(struct fb_info *fb_info) - if (fb_check_foreignness(fb_info)) - return -ENOSYS; - -- /* check all firmware fbs and kick off if the base addr overlaps */ -- for (i = 0 ; i < FB_MAX; i++) { -- if (!registered_fb[i]) -- continue; -- -- if (registered_fb[i]->flags & FBINFO_MISC_FIRMWARE) { -- if (fb_do_apertures_overlap(registered_fb[i], fb_info)) { -- printk(KERN_ERR "fb: conflicting fb hw usage " -- "%s vs %s - removing generic driver\n", -- fb_info->fix.id, -- registered_fb[i]->fix.id); -- unregister_framebuffer(registered_fb[i]); -- } -- } -- } -+ remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, -+ fb_is_primary_device(fb_info)); - - num_registered_fb++; - for (i = 0 ; i < FB_MAX; i++) -diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c -index 81aa312..0a08f13 100644 ---- a/drivers/video/fbsysfs.c -+++ b/drivers/video/fbsysfs.c -@@ -80,6 +80,7 @@ EXPORT_SYMBOL(framebuffer_alloc); - */ - void framebuffer_release(struct fb_info *info) - { -+ kfree(info->apertures); - kfree(info); - } - EXPORT_SYMBOL(framebuffer_release); -diff --git a/drivers/video/offb.c b/drivers/video/offb.c -index 61f8b8f..46dda7d 100644 ---- a/drivers/video/offb.c -+++ b/drivers/video/offb.c -@@ -285,7 +285,7 @@ static void offb_destroy(struct fb_info *info) - { - if (info->screen_base) - iounmap(info->screen_base); -- release_mem_region(info->aperture_base, info->aperture_size); -+ release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); - framebuffer_release(info); - } - -@@ -491,8 +491,11 @@ static void __init offb_init_fb(const char *name, const char *full_name, - var->vmode = FB_VMODE_NONINTERLACED; - - /* set offb aperture size for generic probing */ -- info->aperture_base = address; -- info->aperture_size = fix->smem_len; -+ info->apertures = alloc_apertures(1); -+ if (!info->apertures) -+ goto out_aper; -+ info->apertures->ranges[0].base = address; -+ info->apertures->ranges[0].size = fix->smem_len; - - info->fbops = &offb_ops; - info->screen_base = ioremap(address, fix->smem_len); -@@ -501,17 +504,20 @@ static void __init offb_init_fb(const char *name, const char *full_name, - - fb_alloc_cmap(&info->cmap, 256, 0); - -- if (register_framebuffer(info) < 0) { -- iounmap(par->cmap_adr); -- par->cmap_adr = NULL; -- iounmap(info->screen_base); -- framebuffer_release(info); -- release_mem_region(res_start, res_size); -- return; -- } -+ if (register_framebuffer(info) < 0) -+ goto out_err; - - printk(KERN_INFO "fb%d: Open Firmware frame buffer device on %s\n", - info->node, full_name); -+ return; -+ -+out_err: -+ iounmap(info->screen_base); -+out_aper: -+ iounmap(par->cmap_adr); -+ par->cmap_adr = NULL; -+ framebuffer_release(info); -+ release_mem_region(res_start, res_size); - } - - -diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c -index 0cadf7a..090aa1a 100644 ---- a/drivers/video/vesafb.c -+++ b/drivers/video/vesafb.c -@@ -177,7 +177,7 @@ static void vesafb_destroy(struct fb_info *info) - { - if (info->screen_base) - iounmap(info->screen_base); -- release_mem_region(info->aperture_base, info->aperture_size); -+ release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); - framebuffer_release(info); - } - -@@ -295,8 +295,13 @@ static int __init vesafb_probe(struct platform_device *dev) - info->par = NULL; - - /* set vesafb aperture size for generic probing */ -- info->aperture_base = screen_info.lfb_base; -- info->aperture_size = size_total; -+ info->apertures = alloc_apertures(1); -+ if (!info->apertures) { -+ err = -ENOMEM; -+ goto err; -+ } -+ info->apertures->ranges[0].base = screen_info.lfb_base; -+ info->apertures->ranges[0].size = size_total; - - info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len); - if (!info->screen_base) { -diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c -index bf638a4..149c47a 100644 ---- a/drivers/video/vga16fb.c -+++ b/drivers/video/vga16fb.c -@@ -1263,10 +1263,19 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image - vga_imageblit_color(info, image); - } - -+static void vga16fb_destroy(struct fb_info *info) -+{ -+ iounmap(info->screen_base); -+ fb_dealloc_cmap(&info->cmap); -+ /* XXX unshare VGA regions */ -+ framebuffer_release(info); -+} -+ - static struct fb_ops vga16fb_ops = { - .owner = THIS_MODULE, - .fb_open = vga16fb_open, - .fb_release = vga16fb_release, -+ .fb_destroy = vga16fb_destroy, - .fb_check_var = vga16fb_check_var, - .fb_set_par = vga16fb_set_par, - .fb_setcolreg = vga16fb_setcolreg, -@@ -1306,6 +1315,11 @@ static int __devinit vga16fb_probe(struct platform_device *dev) - ret = -ENOMEM; - goto err_fb_alloc; - } -+ info->apertures = alloc_apertures(1); -+ if (!info->apertures) { -+ ret = -ENOMEM; -+ goto err_ioremap; -+ } - - /* XXX share VGA_FB_PHYS and I/O region with vgacon and others */ - info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS, 0); -@@ -1335,7 +1349,7 @@ static int __devinit vga16fb_probe(struct platform_device *dev) - info->fix = vga16fb_fix; - /* supports rectangles with widths of multiples of 8 */ - info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31; -- info->flags = FBINFO_FLAG_DEFAULT | -+ info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE | - FBINFO_HWACCEL_YPAN; - - i = (info->var.bits_per_pixel == 8) ? 256 : 16; -@@ -1354,6 +1368,9 @@ static int __devinit vga16fb_probe(struct platform_device *dev) - - vga16fb_update_fix(info); - -+ info->apertures->ranges[0].base = VGA_FB_PHYS; -+ info->apertures->ranges[0].size = VGA_FB_PHYS_LEN; -+ - if (register_framebuffer(info) < 0) { - printk(KERN_ERR "vga16fb: unable to register framebuffer\n"); - ret = -EINVAL; -@@ -1380,13 +1397,8 @@ static int vga16fb_remove(struct platform_device *dev) - { - struct fb_info *info = platform_get_drvdata(dev); - -- if (info) { -+ if (info) - unregister_framebuffer(info); -- iounmap(info->screen_base); -- fb_dealloc_cmap(&info->cmap); -- /* XXX unshare VGA regions */ -- framebuffer_release(info); -- } - - return 0; - } -diff --git a/include/drm/drmP.h b/include/drm/drmP.h -index 2f3b3a0..c1b9871 100644 ---- a/include/drm/drmP.h -+++ b/include/drm/drmP.h -@@ -1428,10 +1428,13 @@ extern void drm_sysfs_connector_remove(struct drm_connector *connector); - /* Graphics Execution Manager library functions (drm_gem.c) */ - int drm_gem_init(struct drm_device *dev); - void drm_gem_destroy(struct drm_device *dev); -+void drm_gem_object_release(struct drm_gem_object *obj); - void drm_gem_object_free(struct kref *kref); - void drm_gem_object_free_unlocked(struct kref *kref); - struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, - size_t size); -+int drm_gem_object_init(struct drm_device *dev, -+ struct drm_gem_object *obj, size_t size); - void drm_gem_object_handle_free(struct kref *kref); - void drm_gem_vm_open(struct vm_area_struct *vma); - void drm_gem_vm_close(struct vm_area_struct *vma); -diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h -index 1347524..93a1a31 100644 ---- a/include/drm/drm_crtc.h -+++ b/include/drm/drm_crtc.h -@@ -31,6 +31,7 @@ - #include - - #include -+#include - - struct drm_device; - struct drm_mode_set; -@@ -271,8 +272,6 @@ struct drm_framebuffer { - unsigned int depth; - int bits_per_pixel; - int flags; -- struct fb_info *fbdev; -- u32 pseudo_palette[17]; - struct list_head filp_head; - /* if you are using the helper */ - void *helper_private; -@@ -369,9 +368,6 @@ struct drm_crtc_funcs { - * @enabled: is this CRTC enabled? - * @x: x position on screen - * @y: y position on screen -- * @desired_mode: new desired mode -- * @desired_x: desired x for desired_mode -- * @desired_y: desired y for desired_mode - * @funcs: CRTC control functions - * - * Each CRTC may have one or more connectors associated with it. This structure -@@ -391,8 +387,6 @@ struct drm_crtc { - struct drm_display_mode mode; - - int x, y; -- struct drm_display_mode *desired_mode; -- int desired_x, desired_y; - const struct drm_crtc_funcs *funcs; - - /* CRTC gamma size for reporting to userspace */ -@@ -467,6 +461,15 @@ enum drm_connector_force { - DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ - }; - -+/* should we poll this connector for connects and disconnects */ -+/* hot plug detectable */ -+#define DRM_CONNECTOR_POLL_HPD (1 << 0) -+/* poll for connections */ -+#define DRM_CONNECTOR_POLL_CONNECT (1 << 1) -+/* can cleanly poll for disconnections without flickering the screen */ -+/* DACs should rarely do this without a lot of testing */ -+#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) -+ - /** - * drm_connector - central DRM connector control structure - * @crtc: CRTC this connector is currently connected to, NULL if none -@@ -511,6 +514,8 @@ struct drm_connector { - u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY]; - uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY]; - -+ uint8_t polled; /* DRM_CONNECTOR_POLL_* */ -+ - /* requested DPMS state */ - int dpms; - -@@ -521,7 +526,6 @@ struct drm_connector { - uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; - uint32_t force_encoder_id; - struct drm_encoder *encoder; /* currently active encoder */ -- void *fb_helper_private; - }; - - /** -@@ -548,16 +552,10 @@ struct drm_mode_set { - - /** - * struct drm_mode_config_funcs - configure CRTCs for a given screen layout -- * @resize: adjust CRTCs as necessary for the proposed layout -- * -- * Currently only a resize hook is available. DRM will call back into the -- * driver with a new screen width and height. If the driver can't support -- * the proposed size, it can return false. Otherwise it should adjust -- * the CRTC<->connector mappings as needed and update its view of the screen. - */ - struct drm_mode_config_funcs { - struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd); -- int (*fb_changed)(struct drm_device *dev); -+ void (*output_poll_changed)(struct drm_device *dev); - }; - - struct drm_mode_group { -@@ -590,14 +588,15 @@ struct drm_mode_config { - - struct list_head property_list; - -- /* in-kernel framebuffers - hung of filp_head in drm_framebuffer */ -- struct list_head fb_kernel_list; -- - int min_width, min_height; - int max_width, max_height; - struct drm_mode_config_funcs *funcs; - resource_size_t fb_base; - -+ /* output poll support */ -+ bool poll_enabled; -+ struct delayed_slow_work output_poll_slow_work; -+ - /* pointers to standard properties */ - struct list_head property_blob_list; - struct drm_property *edid_property; -@@ -666,8 +665,6 @@ extern void drm_fb_release(struct drm_file *file_priv); - extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); - extern struct edid *drm_get_edid(struct drm_connector *connector, - struct i2c_adapter *adapter); --extern int drm_do_probe_ddc_edid(struct i2c_adapter *adapter, -- unsigned char *buf, int len); - extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); - extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); - extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode); -@@ -799,8 +796,14 @@ extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, - extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, - int hdisplay, int vdisplay, int vrefresh, - bool interlaced, int margins); -+extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev, -+ int hdisplay, int vdisplay, int vrefresh, -+ bool interlaced, int margins, int GTF_M, -+ int GTF_2C, int GTF_K, int GTF_2J); - extern int drm_add_modes_noedid(struct drm_connector *connector, - int hdisplay, int vdisplay); - - extern bool drm_edid_is_valid(struct edid *edid); -+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, -+ int hsize, int vsize, int fresh); - #endif /* __DRM_CRTC_H__ */ -diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h -index b29e201..1121f77 100644 ---- a/include/drm/drm_crtc_helper.h -+++ b/include/drm/drm_crtc_helper.h -@@ -39,7 +39,6 @@ - - #include - --#include "drm_fb_helper.h" - struct drm_crtc_helper_funcs { - /* - * Control power levels on the CRTC. If the mode passed in is -@@ -96,8 +95,6 @@ struct drm_connector_helper_funcs { - - extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY); - extern void drm_helper_disable_unused_functions(struct drm_device *dev); --extern int drm_helper_hotplug_stage_two(struct drm_device *dev); --extern bool drm_helper_initial_config(struct drm_device *dev); - extern int drm_crtc_helper_set_config(struct drm_mode_set *set); - extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, - struct drm_display_mode *mode, -@@ -123,12 +120,17 @@ static inline void drm_encoder_helper_add(struct drm_encoder *encoder, - encoder->helper_private = (void *)funcs; - } - --static inline int drm_connector_helper_add(struct drm_connector *connector, -+static inline void drm_connector_helper_add(struct drm_connector *connector, - const struct drm_connector_helper_funcs *funcs) - { - connector->helper_private = (void *)funcs; -- return drm_fb_helper_add_connector(connector); - } - - extern int drm_helper_resume_force_mode(struct drm_device *dev); -+extern void drm_kms_helper_poll_init(struct drm_device *dev); -+extern void drm_kms_helper_poll_fini(struct drm_device *dev); -+extern void drm_helper_hpd_irq_event(struct drm_device *dev); -+ -+extern void drm_kms_helper_poll_disable(struct drm_device *dev); -+extern void drm_kms_helper_poll_enable(struct drm_device *dev); - #endif -diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h -index b420989..39e2cc5 100644 ---- a/include/drm/drm_edid.h -+++ b/include/drm/drm_edid.h -@@ -120,7 +120,7 @@ struct detailed_non_pixel { - struct detailed_data_string str; - struct detailed_data_monitor_range range; - struct detailed_data_wpindex color; -- struct std_timing timings[5]; -+ struct std_timing timings[6]; - struct cvt_timing cvt[4]; - } data; - } __attribute__((packed)); -@@ -201,7 +201,4 @@ struct edid { - - #define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) - --/* define the number of Extension EDID block */ --#define DRM_MAX_EDID_EXT_NUM 4 -- - #endif /* __DRM_EDID_H__ */ -diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h -index 58c892a..f0a6afc 100644 ---- a/include/drm/drm_fb_helper.h -+++ b/include/drm/drm_fb_helper.h -@@ -30,17 +30,12 @@ - #ifndef DRM_FB_HELPER_H - #define DRM_FB_HELPER_H - -+struct drm_fb_helper; -+ - struct drm_fb_helper_crtc { - uint32_t crtc_id; - struct drm_mode_set mode_set; --}; -- -- --struct drm_fb_helper_funcs { -- void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, -- u16 blue, int regno); -- void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, -- u16 *blue, int regno); -+ struct drm_display_mode *desired_mode; - }; - - /* mode specified on the command line */ -@@ -57,8 +52,28 @@ struct drm_fb_helper_cmdline_mode { - bool margins; - }; - -+struct drm_fb_helper_surface_size { -+ u32 fb_width; -+ u32 fb_height; -+ u32 surface_width; -+ u32 surface_height; -+ u32 surface_bpp; -+ u32 surface_depth; -+}; -+ -+struct drm_fb_helper_funcs { -+ void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, -+ u16 blue, int regno); -+ void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, -+ u16 *blue, int regno); -+ -+ int (*fb_probe)(struct drm_fb_helper *helper, -+ struct drm_fb_helper_surface_size *sizes); -+}; -+ - struct drm_fb_helper_connector { - struct drm_fb_helper_cmdline_mode cmdline_mode; -+ struct drm_connector *connector; - }; - - struct drm_fb_helper { -@@ -67,24 +82,26 @@ struct drm_fb_helper { - struct drm_display_mode *mode; - int crtc_count; - struct drm_fb_helper_crtc *crtc_info; -+ int connector_count; -+ struct drm_fb_helper_connector **connector_info; - struct drm_fb_helper_funcs *funcs; - int conn_limit; -+ struct fb_info *fbdev; -+ u32 pseudo_palette[17]; - struct list_head kernel_fb_list; -+ -+ /* we got a hotplug but fbdev wasn't running the console -+ delay until next set_par */ -+ bool delayed_hotplug; - }; - --int drm_fb_helper_single_fb_probe(struct drm_device *dev, -- int preferred_bpp, -- int (*fb_create)(struct drm_device *dev, -- uint32_t fb_width, -- uint32_t fb_height, -- uint32_t surface_width, -- uint32_t surface_height, -- uint32_t surface_depth, -- uint32_t surface_bpp, -- struct drm_framebuffer **fb_ptr)); --int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, -- int max_conn); --void drm_fb_helper_free(struct drm_fb_helper *helper); -+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper, -+ int preferred_bpp); -+ -+int drm_fb_helper_init(struct drm_device *dev, -+ struct drm_fb_helper *helper, int crtc_count, -+ int max_conn); -+void drm_fb_helper_fini(struct drm_fb_helper *helper); - int drm_fb_helper_blank(int blank, struct fb_info *info); - int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, - struct fb_info *info); -@@ -99,13 +116,15 @@ int drm_fb_helper_setcolreg(unsigned regno, - struct fb_info *info); - - void drm_fb_helper_restore(void); --void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, -+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, - uint32_t fb_width, uint32_t fb_height); - void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, - uint32_t depth); - --int drm_fb_helper_add_connector(struct drm_connector *connector); --int drm_fb_helper_parse_command_line(struct drm_device *dev); - int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); - -+bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); -+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); -+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); -+ - #endif -diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h -new file mode 100644 -index 0000000..4a08a66 ---- /dev/null -+++ b/include/drm/drm_fixed.h -@@ -0,0 +1,67 @@ -+/* -+ * Copyright 2009 Red Hat Inc. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: Dave Airlie -+ */ -+#ifndef DRM_FIXED_H -+#define DRM_FIXED_H -+ -+typedef union dfixed { -+ u32 full; -+} fixed20_12; -+ -+ -+#define dfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */ -+#define dfixed_const_half(A) (u32)(((A) << 12) + 2048) -+#define dfixed_const_666(A) (u32)(((A) << 12) + 2731) -+#define dfixed_const_8(A) (u32)(((A) << 12) + 3277) -+#define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12) -+#define dfixed_init(A) { .full = dfixed_const((A)) } -+#define dfixed_init_half(A) { .full = dfixed_const_half((A)) } -+#define dfixed_trunc(A) ((A).full >> 12) -+ -+static inline u32 dfixed_floor(fixed20_12 A) -+{ -+ u32 non_frac = dfixed_trunc(A); -+ -+ return dfixed_const(non_frac); -+} -+ -+static inline u32 dfixed_ceil(fixed20_12 A) -+{ -+ u32 non_frac = dfixed_trunc(A); -+ -+ if (A.full > dfixed_const(non_frac)) -+ return dfixed_const(non_frac + 1); -+ else -+ return dfixed_const(non_frac); -+} -+ -+static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B) -+{ -+ u64 tmp = ((u64)A.full << 13); -+ -+ do_div(tmp, B.full); -+ tmp += 1; -+ tmp /= 2; -+ return lower_32_bits(tmp); -+} -+#endif -diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h -index b64a8d7..7f0028e 100644 ---- a/include/drm/i915_drm.h -+++ b/include/drm/i915_drm.h -@@ -275,6 +275,7 @@ typedef struct drm_i915_irq_wait { - #define I915_PARAM_HAS_OVERLAY 7 - #define I915_PARAM_HAS_PAGEFLIPPING 8 - #define I915_PARAM_HAS_EXECBUF2 9 -+#define I915_PARAM_HAS_BSD 10 - - typedef struct drm_i915_getparam { - int param; -@@ -616,7 +617,9 @@ struct drm_i915_gem_execbuffer2 { - __u32 num_cliprects; - /** This is a struct drm_clip_rect *cliprects */ - __u64 cliprects_ptr; -- __u64 flags; /* currently unused */ -+#define I915_EXEC_RENDER (1<<0) -+#define I915_EXEC_BSD (1<<1) -+ __u64 flags; - __u64 rsvd1; - __u64 rsvd2; - }; -diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h -index a6a9f4a..fe917de 100644 ---- a/include/drm/nouveau_drm.h -+++ b/include/drm/nouveau_drm.h -@@ -79,6 +79,7 @@ struct drm_nouveau_gpuobj_free { - #define NOUVEAU_GETPARAM_CHIPSET_ID 11 - #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 - #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 -+#define NOUVEAU_GETPARAM_PTIMER_TIME 14 - struct drm_nouveau_getparam { - uint64_t param; - uint64_t value; -diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h -index 81e614b..5347063 100644 ---- a/include/drm/radeon_drm.h -+++ b/include/drm/radeon_drm.h -@@ -902,6 +902,8 @@ struct drm_radeon_cs { - #define RADEON_INFO_NUM_GB_PIPES 0x01 - #define RADEON_INFO_NUM_Z_PIPES 0x02 - #define RADEON_INFO_ACCEL_WORKING 0x03 -+#define RADEON_INFO_CRTC_FROM_ID 0x04 -+#define RADEON_INFO_ACCEL_WORKING2 0x05 - - struct drm_radeon_info { - uint32_t request; -diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h -index 81eb9f4..267a86c 100644 ---- a/include/drm/ttm/ttm_bo_api.h -+++ b/include/drm/ttm/ttm_bo_api.h -@@ -66,6 +66,26 @@ struct ttm_placement { - const uint32_t *busy_placement; - }; - -+/** -+ * struct ttm_bus_placement -+ * -+ * @addr: mapped virtual address -+ * @base: bus base address -+ * @is_iomem: is this io memory ? -+ * @size: size in byte -+ * @offset: offset from the base address -+ * -+ * Structure indicating the bus placement of an object. -+ */ -+struct ttm_bus_placement { -+ void *addr; -+ unsigned long base; -+ unsigned long size; -+ unsigned long offset; -+ bool is_iomem; -+ bool io_reserved; -+}; -+ - - /** - * struct ttm_mem_reg -@@ -75,6 +95,7 @@ struct ttm_placement { - * @num_pages: Actual size of memory region in pages. - * @page_alignment: Page alignment. - * @placement: Placement flags. -+ * @bus: Placement on io bus accessible to the CPU - * - * Structure indicating the placement and space resources used by a - * buffer object. -@@ -87,6 +108,7 @@ struct ttm_mem_reg { - uint32_t page_alignment; - uint32_t mem_type; - uint32_t placement; -+ struct ttm_bus_placement bus; - }; - - /** -@@ -274,6 +296,7 @@ struct ttm_bo_kmap_obj { - ttm_bo_map_kmap = 3, - ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, - } bo_kmap_type; -+ struct ttm_buffer_object *bo; - }; - - /** -@@ -313,7 +336,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, - * @bo: The buffer object. - * @placement: Proposed placement for the buffer object. - * @interruptible: Sleep interruptible if sleeping. -- * @no_wait: Return immediately if the buffer is busy. -+ * @no_wait_reserve: Return immediately if other buffers are busy. -+ * @no_wait_gpu: Return immediately if the GPU is busy. - * - * Changes placement and caching policy of the buffer object - * according proposed placement. -@@ -325,7 +349,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, - */ - extern int ttm_bo_validate(struct ttm_buffer_object *bo, - struct ttm_placement *placement, -- bool interruptible, bool no_wait); -+ bool interruptible, bool no_wait_reserve, -+ bool no_wait_gpu); - - /** - * ttm_bo_unref -@@ -337,6 +362,23 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo, - extern void ttm_bo_unref(struct ttm_buffer_object **bo); - - /** -+ * ttm_bo_lock_delayed_workqueue -+ * -+ * Prevent the delayed workqueue from running. -+ * Returns -+ * True if the workqueue was queued at the time -+ */ -+extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); -+ -+/** -+ * ttm_bo_unlock_delayed_workqueue -+ * -+ * Allows the delayed workqueue to run. -+ */ -+extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, -+ int resched); -+ -+/** - * ttm_bo_synccpu_write_grab - * - * @bo: The buffer object: -diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h -index 6b9db91..0ea602d 100644 ---- a/include/drm/ttm/ttm_bo_driver.h -+++ b/include/drm/ttm/ttm_bo_driver.h -@@ -176,8 +176,6 @@ struct ttm_tt { - - #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ - #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ --#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap -- before kernel access. */ - #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ - - /** -@@ -189,13 +187,6 @@ struct ttm_tt { - * managed by this memory type. - * @gpu_offset: If used, the GPU offset of the first managed page of - * fixed memory or the first managed location in an aperture. -- * @io_offset: The io_offset of the first managed page of IO memory or -- * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA -- * memory, this should be set to NULL. -- * @io_size: The size of a managed IO region (fixed memory or aperture). -- * @io_addr: Virtual kernel address if the io region is pre-mapped. For -- * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and -- * @io_addr should be set to NULL. - * @size: Size of the managed region. - * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, - * as defined in ttm_placement_common.h -@@ -221,9 +212,6 @@ struct ttm_mem_type_manager { - bool use_type; - uint32_t flags; - unsigned long gpu_offset; -- unsigned long io_offset; -- unsigned long io_size; -- void *io_addr; - uint64_t size; - uint32_t available_caching; - uint32_t default_caching; -@@ -311,7 +299,8 @@ struct ttm_bo_driver { - */ - int (*move) (struct ttm_buffer_object *bo, - bool evict, bool interruptible, -- bool no_wait, struct ttm_mem_reg *new_mem); -+ bool no_wait_reserve, bool no_wait_gpu, -+ struct ttm_mem_reg *new_mem); - - /** - * struct ttm_bo_driver_member verify_access -@@ -351,12 +340,21 @@ struct ttm_bo_driver { - struct ttm_mem_reg *new_mem); - /* notify the driver we are taking a fault on this BO - * and have reserved it */ -- void (*fault_reserve_notify)(struct ttm_buffer_object *bo); -+ int (*fault_reserve_notify)(struct ttm_buffer_object *bo); - - /** - * notify the driver that we're about to swap out this bo - */ - void (*swap_notify) (struct ttm_buffer_object *bo); -+ -+ /** -+ * Driver callback on when mapping io memory (for bo_move_memcpy -+ * for instance). TTM will take care to call io_mem_free whenever -+ * the mapping is not use anymore. io_mem_reserve & io_mem_free -+ * are balanced. -+ */ -+ int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); -+ void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); - }; - - /** -@@ -633,7 +631,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, - * @proposed_placement: Proposed new placement for the buffer object. - * @mem: A struct ttm_mem_reg. - * @interruptible: Sleep interruptible when sliping. -- * @no_wait: Don't sleep waiting for space to become available. -+ * @no_wait_reserve: Return immediately if other buffers are busy. -+ * @no_wait_gpu: Return immediately if the GPU is busy. - * - * Allocate memory space for the buffer object pointed to by @bo, using - * the placement flags in @mem, potentially evicting other idle buffer objects. -@@ -647,7 +646,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, - extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, - struct ttm_placement *placement, - struct ttm_mem_reg *mem, -- bool interruptible, bool no_wait); -+ bool interruptible, -+ bool no_wait_reserve, bool no_wait_gpu); - /** - * ttm_bo_wait_for_cpu - * -@@ -682,6 +682,11 @@ extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, - unsigned long *bus_offset, - unsigned long *bus_size); - -+extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev, -+ struct ttm_mem_reg *mem); -+extern void ttm_mem_io_free(struct ttm_bo_device *bdev, -+ struct ttm_mem_reg *mem); -+ - extern void ttm_bo_global_release(struct ttm_global_reference *ref); - extern int ttm_bo_global_init(struct ttm_global_reference *ref); - -@@ -798,7 +803,8 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, - * - * @bo: A pointer to a struct ttm_buffer_object. - * @evict: 1: This is an eviction. Don't try to pipeline. -- * @no_wait: Never sleep, but rather return with -EBUSY. -+ * @no_wait_reserve: Return immediately if other buffers are busy. -+ * @no_wait_gpu: Return immediately if the GPU is busy. - * @new_mem: struct ttm_mem_reg indicating where to move. - * - * Optimized move function for a buffer object with both old and -@@ -812,15 +818,16 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, - */ - - extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, -- bool evict, bool no_wait, -- struct ttm_mem_reg *new_mem); -+ bool evict, bool no_wait_reserve, -+ bool no_wait_gpu, struct ttm_mem_reg *new_mem); - - /** - * ttm_bo_move_memcpy - * - * @bo: A pointer to a struct ttm_buffer_object. - * @evict: 1: This is an eviction. Don't try to pipeline. -- * @no_wait: Never sleep, but rather return with -EBUSY. -+ * @no_wait_reserve: Return immediately if other buffers are busy. -+ * @no_wait_gpu: Return immediately if the GPU is busy. - * @new_mem: struct ttm_mem_reg indicating where to move. - * - * Fallback move function for a mappable buffer object in mappable memory. -@@ -834,8 +841,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - */ - - extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, -- bool evict, -- bool no_wait, struct ttm_mem_reg *new_mem); -+ bool evict, bool no_wait_reserve, -+ bool no_wait_gpu, struct ttm_mem_reg *new_mem); - - /** - * ttm_bo_free_old_node -@@ -854,7 +861,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); - * @sync_obj_arg: An argument to pass to the sync object idle / wait - * functions. - * @evict: This is an evict move. Don't return until the buffer is idle. -- * @no_wait: Never sleep, but rather return with -EBUSY. -+ * @no_wait_reserve: Return immediately if other buffers are busy. -+ * @no_wait_gpu: Return immediately if the GPU is busy. - * @new_mem: struct ttm_mem_reg indicating where to move. - * - * Accelerated move function to be called when an accelerated move -@@ -868,7 +876,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); - extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - void *sync_obj, - void *sync_obj_arg, -- bool evict, bool no_wait, -+ bool evict, bool no_wait_reserve, -+ bool no_wait_gpu, - struct ttm_mem_reg *new_mem); - /** - * ttm_io_prot -diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h -new file mode 100644 -index 0000000..8bb4de5 ---- /dev/null -+++ b/include/drm/ttm/ttm_page_alloc.h -@@ -0,0 +1,74 @@ -+/* -+ * Copyright (c) Red Hat Inc. -+ -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sub license, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: Dave Airlie -+ * Jerome Glisse -+ */ -+#ifndef TTM_PAGE_ALLOC -+#define TTM_PAGE_ALLOC -+ -+#include "ttm_bo_driver.h" -+#include "ttm_memory.h" -+ -+/** -+ * Get count number of pages from pool to pages list. -+ * -+ * @pages: heado of empty linked list where pages are filled. -+ * @flags: ttm flags for page allocation. -+ * @cstate: ttm caching state for the page. -+ * @count: number of pages to allocate. -+ */ -+int ttm_get_pages(struct list_head *pages, -+ int flags, -+ enum ttm_caching_state cstate, -+ unsigned count); -+/** -+ * Put linked list of pages to pool. -+ * -+ * @pages: list of pages to free. -+ * @page_count: number of pages in the list. Zero can be passed for unknown -+ * count. -+ * @flags: ttm flags for page allocation. -+ * @cstate: ttm caching state. -+ */ -+void ttm_put_pages(struct list_head *pages, -+ unsigned page_count, -+ int flags, -+ enum ttm_caching_state cstate); -+/** -+ * Initialize pool allocator. -+ * -+ * Pool allocator is internaly reference counted so it can be initialized -+ * multiple times but ttm_page_alloc_fini has to be called same number of -+ * times. -+ */ -+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); -+/** -+ * Free pool allocator. -+ */ -+void ttm_page_alloc_fini(void); -+ -+/** -+ * Output the state of pools to debugfs file -+ */ -+extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data); -+#endif -diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h -index c7645f4..4d08423 100644 ---- a/include/drm/vmwgfx_drm.h -+++ b/include/drm/vmwgfx_drm.h -@@ -50,6 +50,8 @@ - #define DRM_VMW_EXECBUF 12 - #define DRM_VMW_FIFO_DEBUG 13 - #define DRM_VMW_FENCE_WAIT 14 -+/* guarded by minor version >= 2 */ -+#define DRM_VMW_UPDATE_LAYOUT 15 - - - /*************************************************************************/ -@@ -585,4 +587,28 @@ struct drm_vmw_stream_arg { - * sure that the stream has been stopped. - */ - -+/*************************************************************************/ -+/** -+ * DRM_VMW_UPDATE_LAYOUT - Update layout -+ * -+ * Updates the prefered modes and connection status for connectors. The -+ * command conisits of one drm_vmw_update_layout_arg pointing out a array -+ * of num_outputs drm_vmw_rect's. -+ */ -+ -+/** -+ * struct drm_vmw_update_layout_arg -+ * -+ * @num_outputs: number of active -+ * @rects: pointer to array of drm_vmw_rect -+ * -+ * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. -+ */ -+ -+struct drm_vmw_update_layout_arg { -+ uint32_t num_outputs; -+ uint32_t pad64; -+ uint64_t rects; -+}; -+ - #endif -diff --git a/include/linux/fb.h b/include/linux/fb.h -index c10163b..1296af4 100644 ---- a/include/linux/fb.h -+++ b/include/linux/fb.h -@@ -403,6 +403,7 @@ struct fb_cursor { - #include - #include - #include -+#include - #include - - struct vm_area_struct; -@@ -862,10 +863,22 @@ struct fb_info { - /* we need the PCI or similiar aperture base/size not - smem_start/size as smem_start may just be an object - allocated inside the aperture so may not actually overlap */ -- resource_size_t aperture_base; -- resource_size_t aperture_size; -+ struct apertures_struct { -+ unsigned int count; -+ struct aperture { -+ resource_size_t base; -+ resource_size_t size; -+ } ranges[0]; -+ } *apertures; - }; - -+static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { -+ struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct) -+ + max_num * sizeof(struct aperture), GFP_KERNEL); -+ a->count = max_num; -+ return a; -+} -+ - #ifdef MODULE - #define FBINFO_DEFAULT FBINFO_MODULE - #else -@@ -958,6 +971,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, - /* drivers/video/fbmem.c */ - extern int register_framebuffer(struct fb_info *fb_info); - extern int unregister_framebuffer(struct fb_info *fb_info); -+extern void remove_conflicting_framebuffers(struct apertures_struct *a, -+ const char *name, bool primary); - extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); - extern int fb_show_logo(struct fb_info *fb_info, int rotate); - extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); -diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h -index 2dfaa29..c9a9759 100644 ---- a/include/linux/vgaarb.h -+++ b/include/linux/vgaarb.h -@@ -5,6 +5,27 @@ - * (C) Copyright 2005 Benjamin Herrenschmidt - * (C) Copyright 2007 Paulo R. Zanoni - * (C) Copyright 2007, 2009 Tiago Vignatti -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS -+ * IN THE SOFTWARE. -+ * - */ - - #ifndef LINUX_VGA_H diff --git a/drm-nouveau-abi16.patch b/drm-nouveau-abi16.patch new file mode 100644 index 0000000..7e05f55 --- /dev/null +++ b/drm-nouveau-abi16.patch @@ -0,0 +1,1342 @@ +From d550220dd73ffbd2ad4871dbd5b5f328c4e2227f Mon Sep 17 00:00:00 2001 +From: Ben Skeggs +Date: Thu, 11 Feb 2010 16:37:26 +1000 +Subject: [PATCH 3/4] drm-nouveau-abi16 + +drm/nv50: switch to indirect push buffer controls + +PFIFO on G80 and up has a new mode where the main ring buffer is simply a +ring of pointers to indirect buffers containing the actual command/data +packets. In order to be able to implement index buffers in the 3D driver +we need to be able to submit data-only push buffers right after the cmd +packet header, which is only possible using the new command submission +method. + +This commit doesn't make it possible to implement index buffers yet, some +userspace interface changes will be required, but it does allow for +testing/debugging of the hardware-side support in the meantime. + +Signed-off-by: Ben Skeggs + +drm/nouveau: remove PUSHBUF_CAL macro + +Signed-off-by: Ben Skeggs + +drm/nv50: make pushbuf dma object cover entire vm + +This allows us to submit push buffers from any memtype to the hardware. +We'll need this ability for VRAM index buffers at some point. + +Signed-off-by: Ben Skeggs + +drm/nouveau: new gem pushbuf interface, bump to 0.0.16 + +This commit breaks the userspace interface, and requires a new libdrm for +nouveau to operate again. + +The multiple GEM_PUSHBUF ioctls that were present in 0.0.15 for +compatibility purposes are now gone, and replaced with the new ioctl which +allows for multiple push buffers to be submitted (necessary for hw index +buffers in the nv50 3d driver) and relocations to be applied on any buffer. + +A number of other ioctls (CARD_INIT, GEM_PIN, GEM_UNPIN) that were needed +for userspace modesetting have also been removed. + +Signed-off-by: Ben Skeggs + +drm/nouveau: bump MAX_PUSH to 512 + +Signed-off-by: Ben Skeggs + +drm/nouveau: Unmap pushbuf BOs when we're done with them. + +If you're especially unlucky BOs would move around and their kmaps +would end up pointing to something else in GART, then ioctl_pushbuf() +would use the kmaps again corrupting textures or other pushbufs (the +most noticeable symptom was a PFIFO_DMA_PUSHER from time to time). + +Signed-off-by: Francisco Jerez +Signed-off-by: Ben Skeggs + +drm/nouveau: Don't reuse the same variable in a nested loop. + +Signed-off-by: Francisco Jerez +Signed-off-by: Ben Skeggs + +drm/nouveau: fix missing spin_unlock in failure path + +Found by sparse. + +Signed-off-by: Luca Barbieri +Signed-off-by: Francisco Jerez + +drm/nouveau: only kunmap buffers we mapped during validation + +Fixes suspend/resume regression introduced by 1723b75407...66b4b71229c2c. + +Signed-off-by: Ben Skeggs +--- + drivers/gpu/drm/nouveau/nouveau_channel.c | 26 +- + drivers/gpu/drm/nouveau/nouveau_debugfs.c | 11 + + drivers/gpu/drm/nouveau/nouveau_dma.c | 108 +++++++- + drivers/gpu/drm/nouveau/nouveau_dma.h | 21 +- + drivers/gpu/drm/nouveau/nouveau_drv.h | 26 +- + drivers/gpu/drm/nouveau/nouveau_gem.c | 490 ++++++++++------------------- + drivers/gpu/drm/nouveau/nouveau_state.c | 7 - + drivers/gpu/drm/nouveau/nv50_fifo.c | 8 +- + include/drm/nouveau_drm.h | 86 ++---- + 9 files changed, 365 insertions(+), 418 deletions(-) + +diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c +index 2281f99..adac0f8 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_channel.c ++++ b/drivers/gpu/drm/nouveau/nouveau_channel.c +@@ -35,22 +35,27 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_bo *pb = chan->pushbuf_bo; + struct nouveau_gpuobj *pushbuf = NULL; +- uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT; + int ret; + ++ if (dev_priv->card_type >= NV_50) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, ++ dev_priv->vm_end, NV_DMA_ACCESS_RO, ++ NV_DMA_TARGET_AGP, &pushbuf); ++ chan->pushbuf_base = pb->bo.offset; ++ } else + if (pb->bo.mem.mem_type == TTM_PL_TT) { + ret = nouveau_gpuobj_gart_dma_new(chan, 0, + dev_priv->gart_info.aper_size, + NV_DMA_ACCESS_RO, &pushbuf, + NULL); +- chan->pushbuf_base = start; ++ chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; + } else + if (dev_priv->card_type != NV_04) { + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, + dev_priv->fb_available_size, + NV_DMA_ACCESS_RO, + NV_DMA_TARGET_VIDMEM, &pushbuf); +- chan->pushbuf_base = start; ++ chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; + } else { + /* NV04 cmdbuf hack, from original ddx.. not sure of it's + * exact reason for existing :) PCI access to cmdbuf in +@@ -61,7 +66,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) + dev_priv->fb_available_size, + NV_DMA_ACCESS_RO, + NV_DMA_TARGET_PCI, &pushbuf); +- chan->pushbuf_base = start; ++ chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; + } + + ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf); +@@ -369,6 +374,14 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, + return ret; + init->channel = chan->id; + ++ if (chan->dma.ib_max) ++ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | ++ NOUVEAU_GEM_DOMAIN_GART; ++ else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM) ++ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; ++ else ++ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; ++ + init->subchan[0].handle = NvM2MF; + if (dev_priv->card_type < NV_50) + init->subchan[0].grclass = 0x0039; +@@ -408,7 +421,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, + ***********************************/ + + struct drm_ioctl_desc nouveau_ioctls[] = { +- DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), +@@ -418,13 +430,9 @@ struct drm_ioctl_desc nouveau_ioctls[] = { + DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), +- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH), +- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH), +- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), + DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), +- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH), + }; + + int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); +diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c +--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c ++++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c +@@ -47,12 +47,23 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data) + seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2); + seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2); + seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2); ++ if (chan->dma.ib_max) { ++ seq_printf(m, " ib max: 0x%08x\n", chan->dma.ib_max); ++ seq_printf(m, " ib put: 0x%08x\n", chan->dma.ib_put); ++ seq_printf(m, " ib free: 0x%08x\n", chan->dma.ib_free); ++ } + + seq_printf(m, "gpu fifo state:\n"); + seq_printf(m, " get: 0x%08x\n", + nvchan_rd32(chan, chan->user_get)); + seq_printf(m, " put: 0x%08x\n", + nvchan_rd32(chan, chan->user_put)); ++ if (chan->dma.ib_max) { ++ seq_printf(m, " ib get: 0x%08x\n", ++ nvchan_rd32(chan, 0x88)); ++ seq_printf(m, " ib put: 0x%08x\n", ++ nvchan_rd32(chan, 0x8c)); ++ } + + seq_printf(m, "last fence : %d\n", chan->fence.sequence); + seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack); +diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c +--- a/drivers/gpu/drm/nouveau/nouveau_dma.c ++++ b/drivers/gpu/drm/nouveau/nouveau_dma.c +@@ -32,7 +32,22 @@ + void + nouveau_dma_pre_init(struct nouveau_channel *chan) + { +- chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2; ++ struct drm_nouveau_private *dev_priv = chan->dev->dev_private; ++ struct nouveau_bo *pushbuf = chan->pushbuf_bo; ++ ++ if (dev_priv->card_type == NV_50) { ++ const int ib_size = pushbuf->bo.mem.size / 2; ++ ++ chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2; ++ chan->dma.ib_max = (ib_size / 8) - 1; ++ chan->dma.ib_put = 0; ++ chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put; ++ ++ chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2; ++ } else { ++ chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2; ++ } ++ + chan->dma.put = 0; + chan->dma.cur = chan->dma.put; + chan->dma.free = chan->dma.max - chan->dma.cur; +@@ -162,12 +177,101 @@ READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout) + return (val - chan->pushbuf_base) >> 2; + } + ++void ++nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, ++ int delta, int length) ++{ ++ struct nouveau_bo *pb = chan->pushbuf_bo; ++ uint64_t offset = bo->bo.offset + delta; ++ int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; ++ ++ BUG_ON(chan->dma.ib_free < 1); ++ nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); ++ nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); ++ ++ chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; ++ nvchan_wr32(chan, 0x8c, chan->dma.ib_put); ++ chan->dma.ib_free--; ++} ++ ++static int ++nv50_dma_push_wait(struct nouveau_channel *chan, int count) ++{ ++ uint32_t cnt = 0, prev_get = 0; ++ ++ while (chan->dma.ib_free < count) { ++ uint32_t get = nvchan_rd32(chan, 0x88); ++ if (get != prev_get) { ++ prev_get = get; ++ cnt = 0; ++ } ++ ++ if ((++cnt & 0xff) == 0) { ++ DRM_UDELAY(1); ++ if (cnt > 100000) ++ return -EBUSY; ++ } ++ ++ chan->dma.ib_free = get - chan->dma.ib_put; ++ if (chan->dma.ib_free <= 0) ++ chan->dma.ib_free += chan->dma.ib_max + 1; ++ } ++ ++ return 0; ++} ++ ++static int ++nv50_dma_wait(struct nouveau_channel *chan, int slots, int count) ++{ ++ uint32_t cnt = 0, prev_get = 0; ++ int ret; ++ ++ ret = nv50_dma_push_wait(chan, slots + 1); ++ if (unlikely(ret)) ++ return ret; ++ ++ while (chan->dma.free < count) { ++ int get = READ_GET(chan, &prev_get, &cnt); ++ if (unlikely(get < 0)) { ++ if (get == -EINVAL) ++ continue; ++ ++ return get; ++ } ++ ++ if (get <= chan->dma.cur) { ++ chan->dma.free = chan->dma.max - chan->dma.cur; ++ if (chan->dma.free >= count) ++ break; ++ ++ FIRE_RING(chan); ++ do { ++ get = READ_GET(chan, &prev_get, &cnt); ++ if (unlikely(get < 0)) { ++ if (get == -EINVAL) ++ continue; ++ return get; ++ } ++ } while (get == 0); ++ chan->dma.cur = 0; ++ chan->dma.put = 0; ++ } ++ ++ chan->dma.free = get - chan->dma.cur - 1; ++ } ++ ++ return 0; ++} ++ + int +-nouveau_dma_wait(struct nouveau_channel *chan, int size) ++nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size) + { + uint32_t prev_get = 0, cnt = 0; + int get; + ++ if (chan->dma.ib_max) ++ return nv50_dma_wait(chan, slots, size); ++ + while (chan->dma.free < size) { + get = READ_GET(chan, &prev_get, &cnt); + if (unlikely(get == -EBUSY)) +diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h +--- a/drivers/gpu/drm/nouveau/nouveau_dma.h ++++ b/drivers/gpu/drm/nouveau/nouveau_dma.h +@@ -31,6 +31,9 @@ + #define NOUVEAU_DMA_DEBUG 0 + #endif + ++void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *, ++ int delta, int length); ++ + /* + * There's a hw race condition where you can't jump to your PUT offset, + * to avoid this we jump to offset + SKIPS and fill the difference with +@@ -96,13 +99,11 @@ enum { + static __must_check inline int + RING_SPACE(struct nouveau_channel *chan, int size) + { +- if (chan->dma.free < size) { +- int ret; ++ int ret; + +- ret = nouveau_dma_wait(chan, size); +- if (ret) +- return ret; +- } ++ ret = nouveau_dma_wait(chan, 1, size); ++ if (ret) ++ return ret; + + chan->dma.free -= size; + return 0; +@@ -146,7 +147,13 @@ FIRE_RING(struct nouveau_channel *chan) + return; + chan->accel_done = true; + +- WRITE_PUT(chan->dma.cur); ++ if (chan->dma.ib_max) { ++ nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2, ++ (chan->dma.cur - chan->dma.put) << 2); ++ } else { ++ WRITE_PUT(chan->dma.cur); ++ } ++ + chan->dma.put = chan->dma.cur; + } + +diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h +--- a/drivers/gpu/drm/nouveau/nouveau_drv.h ++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h +@@ -34,7 +34,7 @@ + + #define DRIVER_MAJOR 0 + #define DRIVER_MINOR 0 +-#define DRIVER_PATCHLEVEL 15 ++#define DRIVER_PATCHLEVEL 16 + + #define NOUVEAU_FAMILY 0x0000FFFF + #define NOUVEAU_FLAGS 0xFFFF0000 +@@ -83,6 +83,7 @@ struct nouveau_bo { + struct drm_file *reserved_by; + struct list_head entry; + int pbbo_index; ++ bool validate_mapped; + + struct nouveau_channel *channel; + +@@ -239,6 +240,11 @@ struct nouveau_channel { + int cur; + int put; + /* access via pushbuf_bo */ ++ ++ int ib_base; ++ int ib_max; ++ int ib_free; ++ int ib_put; + } dma; + + uint32_t sw_subchannel[8]; +@@ -696,12 +702,6 @@ extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout, + uint32_t reg, uint32_t mask, uint32_t val); + extern bool nouveau_wait_for_idle(struct drm_device *); + extern int nouveau_card_init(struct drm_device *); +-extern int nouveau_ioctl_card_init(struct drm_device *, void *data, +- struct drm_file *); +-extern int nouveau_ioctl_suspend(struct drm_device *, void *data, +- struct drm_file *); +-extern int nouveau_ioctl_resume(struct drm_device *, void *data, +- struct drm_file *); + + /* nouveau_mem.c */ + extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, +@@ -845,7 +845,7 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan) + /* nouveau_dma.c */ + extern void nouveau_dma_pre_init(struct nouveau_channel *); + extern int nouveau_dma_init(struct nouveau_channel *); +-extern int nouveau_dma_wait(struct nouveau_channel *, int size); ++extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); + + /* nouveau_acpi.c */ + #ifdef CONFIG_ACPI +@@ -1152,16 +1152,6 @@ extern int nouveau_gem_ioctl_new(struct drm_device *, void *, + struct drm_file *); + extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, + struct drm_file *); +-extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *, +- struct drm_file *); +-extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *, +- struct drm_file *); +-extern int nouveau_gem_ioctl_pin(struct drm_device *, void *, +- struct drm_file *); +-extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *, +- struct drm_file *); +-extern int nouveau_gem_ioctl_tile(struct drm_device *, void *, +- struct drm_file *); + extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *, + struct drm_file *); + extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *, +diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c +--- a/drivers/gpu/drm/nouveau/nouveau_gem.c ++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c +@@ -243,6 +243,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) + nouveau_fence_unref((void *)&prev_fence); + } + ++ if (unlikely(nvbo->validate_mapped)) { ++ ttm_bo_kunmap(&nvbo->kmap); ++ nvbo->validate_mapped = false; ++ } ++ + list_del(&nvbo->entry); + nvbo->reserved_by = NULL; + ttm_bo_unreserve(&nvbo->bo); +@@ -302,11 +307,14 @@ retry: + if (ret == -EAGAIN) + ret = ttm_bo_wait_unreserved(&nvbo->bo, false); + drm_gem_object_unreference(gem); +- if (ret) ++ if (ret) { ++ NV_ERROR(dev, "fail reserve\n"); + return ret; ++ } + goto retry; + } + ++ b->user_priv = (uint64_t)(unsigned long)nvbo; + nvbo->reserved_by = file_priv; + nvbo->pbbo_index = i; + if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && +@@ -336,8 +344,10 @@ retry: + } + + ret = ttm_bo_wait_cpu(&nvbo->bo, false); +- if (ret) ++ if (ret) { ++ NV_ERROR(dev, "fail wait_cpu\n"); + return ret; ++ } + goto retry; + } + } +@@ -351,6 +361,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, + { + struct drm_nouveau_gem_pushbuf_bo __user *upbbo = + (void __force __user *)(uintptr_t)user_pbbo_ptr; ++ struct drm_device *dev = chan->dev; + struct nouveau_bo *nvbo; + int ret, relocs = 0; + +@@ -362,39 +373,46 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, + spin_lock(&nvbo->bo.lock); + ret = ttm_bo_wait(&nvbo->bo, false, false, false); + spin_unlock(&nvbo->bo.lock); +- if (unlikely(ret)) ++ if (unlikely(ret)) { ++ NV_ERROR(dev, "fail wait other chan\n"); + return ret; ++ } + } + + ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, + b->write_domains, + b->valid_domains); +- if (unlikely(ret)) ++ if (unlikely(ret)) { ++ NV_ERROR(dev, "fail set_domain\n"); + return ret; ++ } + + nvbo->channel = chan; + ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, + false, false); + nvbo->channel = NULL; +- if (unlikely(ret)) ++ if (unlikely(ret)) { ++ NV_ERROR(dev, "fail ttm_validate\n"); + return ret; ++ } + +- if (nvbo->bo.offset == b->presumed_offset && ++ if (nvbo->bo.offset == b->presumed.offset && + ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && +- b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) || ++ b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || + (nvbo->bo.mem.mem_type == TTM_PL_TT && +- b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART))) ++ b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) + continue; + + if (nvbo->bo.mem.mem_type == TTM_PL_TT) +- b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART; ++ b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; + else +- b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM; +- b->presumed_offset = nvbo->bo.offset; +- b->presumed_ok = 0; ++ b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; ++ b->presumed.offset = nvbo->bo.offset; ++ b->presumed.valid = 0; + relocs++; + +- if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b))) ++ if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, ++ &b->presumed, sizeof(b->presumed))) + return -EFAULT; + } + +@@ -408,6 +426,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, + uint64_t user_buffers, int nr_buffers, + struct validate_op *op, int *apply_relocs) + { ++ struct drm_device *dev = chan->dev; + int ret, relocs = 0; + + INIT_LIST_HEAD(&op->vram_list); +@@ -418,11 +437,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, + return 0; + + ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); +- if (unlikely(ret)) ++ if (unlikely(ret)) { ++ NV_ERROR(dev, "validate_init\n"); + return ret; ++ } + + ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); + if (unlikely(ret < 0)) { ++ NV_ERROR(dev, "validate vram_list\n"); + validate_fini(op, NULL); + return ret; + } +@@ -430,6 +452,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, + + ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); + if (unlikely(ret < 0)) { ++ NV_ERROR(dev, "validate gart_list\n"); + validate_fini(op, NULL); + return ret; + } +@@ -437,6 +460,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, + + ret = validate_list(chan, &op->both_list, pbbo, user_buffers); + if (unlikely(ret < 0)) { ++ NV_ERROR(dev, "validate both_list\n"); + validate_fini(op, NULL); + return ret; + } +@@ -465,59 +489,82 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size) + } + + static int +-nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, +- struct drm_nouveau_gem_pushbuf_bo *bo, +- unsigned nr_relocs, uint64_t ptr_relocs, +- unsigned nr_dwords, unsigned first_dword, +- uint32_t *pushbuf, bool is_iomem) ++nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, ++ struct drm_nouveau_gem_pushbuf *req, ++ struct drm_nouveau_gem_pushbuf_bo *bo) + { + struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; +- struct drm_device *dev = chan->dev; + int ret = 0; + unsigned i; + +- reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); ++ reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); + if (IS_ERR(reloc)) + return PTR_ERR(reloc); + +- for (i = 0; i < nr_relocs; i++) { ++ for (i = 0; i < req->nr_relocs; i++) { + struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; + struct drm_nouveau_gem_pushbuf_bo *b; ++ struct nouveau_bo *nvbo; + uint32_t data; + +- if (r->bo_index >= nr_bo || r->reloc_index < first_dword || +- r->reloc_index >= first_dword + nr_dwords) { +- NV_ERROR(dev, "Bad relocation %d\n", i); +- NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo); +- NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords); ++ if (unlikely(r->bo_index > req->nr_buffers)) { ++ NV_ERROR(dev, "reloc bo index invalid\n"); + ret = -EINVAL; + break; + } + + b = &bo[r->bo_index]; +- if (b->presumed_ok) ++ if (b->presumed.valid) + continue; + ++ if (unlikely(r->reloc_bo_index > req->nr_buffers)) { ++ NV_ERROR(dev, "reloc container bo index invalid\n"); ++ ret = -EINVAL; ++ break; ++ } ++ nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; ++ ++ if (unlikely(r->reloc_bo_offset + 4 > ++ nvbo->bo.mem.num_pages << PAGE_SHIFT)) { ++ NV_ERROR(dev, "reloc outside of bo\n"); ++ ret = -EINVAL; ++ break; ++ } ++ ++ if (!nvbo->kmap.virtual) { ++ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, ++ &nvbo->kmap); ++ if (ret) { ++ NV_ERROR(dev, "failed kmap for reloc\n"); ++ break; ++ } ++ nvbo->validate_mapped = true; ++ } ++ + if (r->flags & NOUVEAU_GEM_RELOC_LOW) +- data = b->presumed_offset + r->data; ++ data = b->presumed.offset + r->data; + else + if (r->flags & NOUVEAU_GEM_RELOC_HIGH) +- data = (b->presumed_offset + r->data) >> 32; ++ data = (b->presumed.offset + r->data) >> 32; + else + data = r->data; + + if (r->flags & NOUVEAU_GEM_RELOC_OR) { +- if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART) ++ if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) + data |= r->tor; + else + data |= r->vor; + } + +- if (is_iomem) +- iowrite32_native(data, (void __force __iomem *) +- &pushbuf[r->reloc_index]); +- else +- pushbuf[r->reloc_index] = data; ++ spin_lock(&nvbo->bo.lock); ++ ret = ttm_bo_wait(&nvbo->bo, false, false, false); ++ spin_unlock(&nvbo->bo.lock); ++ if (ret) { ++ NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); ++ break; ++ } ++ ++ nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); + } + + kfree(reloc); +@@ -528,127 +575,50 @@ int + nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_gem_pushbuf *req = data; +- struct drm_nouveau_gem_pushbuf_bo *bo = NULL; ++ struct drm_nouveau_gem_pushbuf_push *push; ++ struct drm_nouveau_gem_pushbuf_bo *bo; + struct nouveau_channel *chan; + struct validate_op op; +- struct nouveau_fence* fence = 0; +- uint32_t *pushbuf = NULL; +- int ret = 0, do_reloc = 0, i; ++ struct nouveau_fence *fence = 0; ++ int i, j, ret = 0, do_reloc = 0; + + NOUVEAU_CHECK_INITIALISED_WITH_RETURN; + NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); + +- if (req->nr_dwords >= chan->dma.max || +- req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || +- req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { +- NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); +- NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords, +- chan->dma.max - 1); +- NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers, +- NOUVEAU_GEM_MAX_BUFFERS); +- NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs, +- NOUVEAU_GEM_MAX_RELOCS); +- return -EINVAL; +- } +- +- pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t)); +- if (IS_ERR(pushbuf)) +- return PTR_ERR(pushbuf); +- +- bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); +- if (IS_ERR(bo)) { +- kfree(pushbuf); +- return PTR_ERR(bo); +- } +- +- mutex_lock(&dev->struct_mutex); +- +- /* Validate buffer list */ +- ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, +- req->nr_buffers, &op, &do_reloc); +- if (ret) +- goto out; +- +- /* Apply any relocations that are required */ +- if (do_reloc) { +- ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, +- bo, req->nr_relocs, +- req->relocs, +- req->nr_dwords, 0, +- pushbuf, false); +- if (ret) +- goto out; +- } +- +- /* Emit push buffer to the hw +- */ +- ret = RING_SPACE(chan, req->nr_dwords); +- if (ret) +- goto out; +- +- OUT_RINGp(chan, pushbuf, req->nr_dwords); ++ req->vram_available = dev_priv->fb_aper_free; ++ req->gart_available = dev_priv->gart_info.aper_free; ++ if (unlikely(req->nr_push == 0)) ++ goto out_next; + +- ret = nouveau_fence_new(chan, &fence, true); +- if (ret) { +- NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); +- WIND_RING(chan); +- goto out; ++ if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { ++ NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", ++ req->nr_push, NOUVEAU_GEM_MAX_PUSH); ++ return -EINVAL; + } + +- if (nouveau_gem_pushbuf_sync(chan)) { +- ret = nouveau_fence_wait(fence, NULL, false, false); +- if (ret) { +- for (i = 0; i < req->nr_dwords; i++) +- NV_ERROR(dev, "0x%08x\n", pushbuf[i]); +- NV_ERROR(dev, "^^ above push buffer is fail :(\n"); +- } ++ if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { ++ NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", ++ req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); ++ return -EINVAL; + } + +-out: +- validate_fini(&op, fence); +- nouveau_fence_unref((void**)&fence); +- mutex_unlock(&dev->struct_mutex); +- kfree(pushbuf); +- kfree(bo); +- return ret; +-} +- +-#define PUSHBUF_CAL (dev_priv->card_type >= NV_20) +- +-int +-nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, +- struct drm_file *file_priv) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct drm_nouveau_gem_pushbuf_call *req = data; +- struct drm_nouveau_gem_pushbuf_bo *bo = NULL; +- struct nouveau_channel *chan; +- struct drm_gem_object *gem; +- struct nouveau_bo *pbbo; +- struct validate_op op; +- struct nouveau_fence* fence = 0; +- int i, ret = 0, do_reloc = 0; +- +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; +- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); +- +- if (unlikely(req->handle == 0)) +- goto out_next; +- +- if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || +- req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { +- NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); +- NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers, +- NOUVEAU_GEM_MAX_BUFFERS); +- NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs, +- NOUVEAU_GEM_MAX_RELOCS); ++ if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { ++ NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", ++ req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); + return -EINVAL; + } + ++ push = u_memcpya(req->push, req->nr_push, sizeof(*push)); ++ if (IS_ERR(push)) ++ return PTR_ERR(push); ++ + bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); +- if (IS_ERR(bo)) ++ if (IS_ERR(bo)) { ++ kfree(push); + return PTR_ERR(bo); ++ } + + mutex_lock(&dev->struct_mutex); + +@@ -660,122 +630,84 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, + goto out; + } + +- /* Validate DMA push buffer */ +- gem = drm_gem_object_lookup(dev, file_priv, req->handle); +- if (!gem) { +- NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle); +- ret = -EINVAL; +- goto out; +- } +- pbbo = nouveau_gem_object(gem); +- +- if ((req->offset & 3) || req->nr_dwords < 2 || +- (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size || +- (unsigned long)req->nr_dwords > +- ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) { +- NV_ERROR(dev, "pb call misaligned or out of bounds: " +- "%d + %d * 4 > %ld\n", +- req->offset, req->nr_dwords, pbbo->bo.mem.size); +- ret = -EINVAL; +- drm_gem_object_unreference(gem); +- goto out; +- } +- +- ret = ttm_bo_reserve(&pbbo->bo, false, false, true, +- chan->fence.sequence); +- if (ret) { +- NV_ERROR(dev, "resv pb: %d\n", ret); +- drm_gem_object_unreference(gem); +- goto out; +- } +- +- nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type); +- ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false); +- if (ret) { +- NV_ERROR(dev, "validate pb: %d\n", ret); +- ttm_bo_unreserve(&pbbo->bo); +- drm_gem_object_unreference(gem); +- goto out; +- } +- +- list_add_tail(&pbbo->entry, &op.both_list); +- +- /* If presumed return address doesn't match, we need to map the +- * push buffer and fix it.. +- */ +- if (!PUSHBUF_CAL) { +- uint32_t retaddy; +- +- if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) { +- ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS); +- if (ret) { +- NV_ERROR(dev, "jmp_space: %d\n", ret); +- goto out; +- } +- } +- +- retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2); +- retaddy |= 0x20000000; +- if (retaddy != req->suffix0) { +- req->suffix0 = retaddy; +- do_reloc = 1; +- } +- } +- + /* Apply any relocations that are required */ + if (do_reloc) { +- void *pbvirt; +- bool is_iomem; +- ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages, +- &pbbo->kmap); ++ ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo); + if (ret) { +- NV_ERROR(dev, "kmap pb: %d\n", ret); ++ NV_ERROR(dev, "reloc apply: %d\n", ret); + goto out; + } ++ } + +- pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem); +- ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo, +- req->nr_relocs, +- req->relocs, +- req->nr_dwords, +- req->offset / 4, +- pbvirt, is_iomem); +- +- if (!PUSHBUF_CAL) { +- nouveau_bo_wr32(pbbo, +- req->offset / 4 + req->nr_dwords - 2, +- req->suffix0); +- } +- +- ttm_bo_kunmap(&pbbo->kmap); ++ if (chan->dma.ib_max) { ++ ret = nouveau_dma_wait(chan, req->nr_push + 1, 6); + if (ret) { +- NV_ERROR(dev, "reloc apply: %d\n", ret); ++ NV_INFO(dev, "nv50cal_space: %d\n", ret); + goto out; + } +- } + +- if (PUSHBUF_CAL) { +- ret = RING_SPACE(chan, 2); ++ for (i = 0; i < req->nr_push; i++) { ++ struct nouveau_bo *nvbo = (void *)(unsigned long) ++ bo[push[i].bo_index].user_priv; ++ ++ nv50_dma_push(chan, nvbo, push[i].offset, ++ push[i].length); ++ } ++ } else ++ if (dev_priv->card_type >= NV_20) { ++ ret = RING_SPACE(chan, req->nr_push * 2); + if (ret) { + NV_ERROR(dev, "cal_space: %d\n", ret); + goto out; + } +- OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + +- req->offset) | 2); +- OUT_RING(chan, 0); ++ ++ for (i = 0; i < req->nr_push; i++) { ++ struct nouveau_bo *nvbo = (void *)(unsigned long) ++ bo[push[i].bo_index].user_priv; ++ struct drm_mm_node *mem = nvbo->bo.mem.mm_node; ++ ++ OUT_RING(chan, ((mem->start << PAGE_SHIFT) + ++ push[i].offset) | 2); ++ OUT_RING(chan, 0); ++ } + } else { +- ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS); ++ ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); + if (ret) { + NV_ERROR(dev, "jmp_space: %d\n", ret); + goto out; + } +- OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + +- req->offset) | 0x20000000); +- OUT_RING(chan, 0); + +- /* Space the jumps apart with NOPs. */ +- for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) ++ for (i = 0; i < req->nr_push; i++) { ++ struct nouveau_bo *nvbo = (void *)(unsigned long) ++ bo[push[i].bo_index].user_priv; ++ struct drm_mm_node *mem = nvbo->bo.mem.mm_node; ++ uint32_t cmd; ++ ++ cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2); ++ cmd |= 0x20000000; ++ if (unlikely(cmd != req->suffix0)) { ++ if (!nvbo->kmap.virtual) { ++ ret = ttm_bo_kmap(&nvbo->bo, 0, ++ nvbo->bo.mem. ++ num_pages, ++ &nvbo->kmap); ++ if (ret) { ++ WIND_RING(chan); ++ goto out; ++ } ++ nvbo->validate_mapped = true; ++ } ++ ++ nouveau_bo_wr32(nvbo, (push[i].offset + ++ push[i].length - 8) / 4, cmd); ++ } ++ ++ OUT_RING(chan, ((mem->start << PAGE_SHIFT) + ++ push[i].offset) | 0x20000000); + OUT_RING(chan, 0); ++ for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) ++ OUT_RING(chan, 0); ++ } + } + + ret = nouveau_fence_new(chan, &fence, true); +@@ -790,9 +722,14 @@ out: + nouveau_fence_unref((void**)&fence); + mutex_unlock(&dev->struct_mutex); + kfree(bo); ++ kfree(push); + + out_next: +- if (PUSHBUF_CAL) { ++ if (chan->dma.ib_max) { ++ req->suffix0 = 0x00000000; ++ req->suffix1 = 0x00000000; ++ } else ++ if (dev_priv->card_type >= NV_20) { + req->suffix0 = 0x00020000; + req->suffix1 = 0x00000000; + } else { +@@ -804,19 +741,6 @@ out_next: + return ret; + } + +-int +-nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data, +- struct drm_file *file_priv) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct drm_nouveau_gem_pushbuf_call *req = data; +- +- req->vram_available = dev_priv->fb_aper_free; +- req->gart_available = dev_priv->gart_info.aper_free; +- +- return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv); +-} +- + static inline uint32_t + domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) + { +@@ -831,70 +755,6 @@ domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) + } + + int +-nouveau_gem_ioctl_pin(struct drm_device *dev, void *data, +- struct drm_file *file_priv) +-{ +- struct drm_nouveau_gem_pin *req = data; +- struct drm_gem_object *gem; +- struct nouveau_bo *nvbo; +- int ret = 0; +- +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; +- +- if (drm_core_check_feature(dev, DRIVER_MODESET)) { +- NV_ERROR(dev, "pin only allowed without kernel modesetting\n"); +- return -EINVAL; +- } +- +- if (!DRM_SUSER(DRM_CURPROC)) +- return -EPERM; +- +- gem = drm_gem_object_lookup(dev, file_priv, req->handle); +- if (!gem) +- return -EINVAL; +- nvbo = nouveau_gem_object(gem); +- +- ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain)); +- if (ret) +- goto out; +- +- req->offset = nvbo->bo.offset; +- if (nvbo->bo.mem.mem_type == TTM_PL_TT) +- req->domain = NOUVEAU_GEM_DOMAIN_GART; +- else +- req->domain = NOUVEAU_GEM_DOMAIN_VRAM; +- +-out: +- drm_gem_object_unreference_unlocked(gem); +- +- return ret; +-} +- +-int +-nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data, +- struct drm_file *file_priv) +-{ +- struct drm_nouveau_gem_pin *req = data; +- struct drm_gem_object *gem; +- int ret; +- +- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; +- +- if (drm_core_check_feature(dev, DRIVER_MODESET)) +- return -EINVAL; +- +- gem = drm_gem_object_lookup(dev, file_priv, req->handle); +- if (!gem) +- return -EINVAL; +- +- ret = nouveau_bo_unpin(nouveau_gem_object(gem)); +- +- drm_gem_object_unreference_unlocked(gem); +- +- return ret; +-} +- +-int + nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { +diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c +index a4851af..a8d77c8 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_state.c ++++ b/drivers/gpu/drm/nouveau/nouveau_state.c +@@ -776,13 +776,6 @@ int nouveau_unload(struct drm_device *dev) + return 0; + } + +-int +-nouveau_ioctl_card_init(struct drm_device *dev, void *data, +- struct drm_file *file_priv) +-{ +- return nouveau_card_init(dev); +-} +- + int nouveau_ioctl_getparam(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { +diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c +--- a/drivers/gpu/drm/nouveau/nv50_fifo.c ++++ b/drivers/gpu/drm/nouveau/nv50_fifo.c +@@ -280,17 +280,17 @@ nv50_fifo_create_context(struct nouveau_channel *chan) + + dev_priv->engine.instmem.prepare_access(dev, true); + +- nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base); +- nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base); + nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); + nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); +- nv_wo32(dev, ramfc, 0x3c/4, 0x00086078); + nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff); + nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff); + nv_wo32(dev, ramfc, 0x40/4, 0x00000000); + nv_wo32(dev, ramfc, 0x7c/4, 0x30000001); + nv_wo32(dev, ramfc, 0x78/4, 0x00000000); +- nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff); ++ nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078); ++ nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base + ++ chan->dma.ib_base * 4); ++ nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16); + + if (!IS_G80) { + nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id); +diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h +--- a/include/drm/nouveau_drm.h ++++ b/include/drm/nouveau_drm.h +@@ -25,13 +25,14 @@ + #ifndef __NOUVEAU_DRM_H__ + #define __NOUVEAU_DRM_H__ + +-#define NOUVEAU_DRM_HEADER_PATCHLEVEL 15 ++#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16 + + struct drm_nouveau_channel_alloc { + uint32_t fb_ctxdma_handle; + uint32_t tt_ctxdma_handle; + + int channel; ++ uint32_t pushbuf_domains; + + /* Notifier memory */ + uint32_t notifier_handle; +@@ -109,68 +110,58 @@ struct drm_nouveau_gem_new { + uint32_t align; + }; + ++#define NOUVEAU_GEM_MAX_BUFFERS 1024 ++struct drm_nouveau_gem_pushbuf_bo_presumed { ++ uint32_t valid; ++ uint32_t domain; ++ uint64_t offset; ++}; ++ + struct drm_nouveau_gem_pushbuf_bo { + uint64_t user_priv; + uint32_t handle; + uint32_t read_domains; + uint32_t write_domains; + uint32_t valid_domains; +- uint32_t presumed_ok; +- uint32_t presumed_domain; +- uint64_t presumed_offset; ++ struct drm_nouveau_gem_pushbuf_bo_presumed presumed; + }; + + #define NOUVEAU_GEM_RELOC_LOW (1 << 0) + #define NOUVEAU_GEM_RELOC_HIGH (1 << 1) + #define NOUVEAU_GEM_RELOC_OR (1 << 2) ++#define NOUVEAU_GEM_MAX_RELOCS 1024 + struct drm_nouveau_gem_pushbuf_reloc { ++ uint32_t reloc_bo_index; ++ uint32_t reloc_bo_offset; + uint32_t bo_index; +- uint32_t reloc_index; + uint32_t flags; + uint32_t data; + uint32_t vor; + uint32_t tor; + }; + +-#define NOUVEAU_GEM_MAX_BUFFERS 1024 +-#define NOUVEAU_GEM_MAX_RELOCS 1024 ++#define NOUVEAU_GEM_MAX_PUSH 512 ++struct drm_nouveau_gem_pushbuf_push { ++ uint32_t bo_index; ++ uint32_t pad; ++ uint64_t offset; ++ uint64_t length; ++}; + + struct drm_nouveau_gem_pushbuf { + uint32_t channel; +- uint32_t nr_dwords; + uint32_t nr_buffers; +- uint32_t nr_relocs; +- uint64_t dwords; + uint64_t buffers; +- uint64_t relocs; +-}; +- +-struct drm_nouveau_gem_pushbuf_call { +- uint32_t channel; +- uint32_t handle; +- uint32_t offset; +- uint32_t nr_buffers; + uint32_t nr_relocs; +- uint32_t nr_dwords; +- uint64_t buffers; ++ uint32_t nr_push; + uint64_t relocs; ++ uint64_t push; + uint32_t suffix0; + uint32_t suffix1; +- /* below only accessed for CALL2 */ + uint64_t vram_available; + uint64_t gart_available; + }; + +-struct drm_nouveau_gem_pin { +- uint32_t handle; +- uint32_t domain; +- uint64_t offset; +-}; +- +-struct drm_nouveau_gem_unpin { +- uint32_t handle; +-}; +- + #define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 + #define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002 + #define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 +@@ -183,14 +174,6 @@ struct drm_nouveau_gem_cpu_fini { + uint32_t handle; + }; + +-struct drm_nouveau_gem_tile { +- uint32_t handle; +- uint32_t offset; +- uint32_t size; +- uint32_t tile_mode; +- uint32_t tile_flags; +-}; +- + enum nouveau_bus_type { + NV_AGP = 0, + NV_PCI = 1, +@@ -200,22 +183,17 @@ enum nouveau_bus_type { + struct drm_nouveau_sarea { + }; + +-#define DRM_NOUVEAU_CARD_INIT 0x00 +-#define DRM_NOUVEAU_GETPARAM 0x01 +-#define DRM_NOUVEAU_SETPARAM 0x02 +-#define DRM_NOUVEAU_CHANNEL_ALLOC 0x03 +-#define DRM_NOUVEAU_CHANNEL_FREE 0x04 +-#define DRM_NOUVEAU_GROBJ_ALLOC 0x05 +-#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x06 +-#define DRM_NOUVEAU_GPUOBJ_FREE 0x07 ++#define DRM_NOUVEAU_GETPARAM 0x00 ++#define DRM_NOUVEAU_SETPARAM 0x01 ++#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 ++#define DRM_NOUVEAU_CHANNEL_FREE 0x03 ++#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 ++#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 ++#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 + #define DRM_NOUVEAU_GEM_NEW 0x40 + #define DRM_NOUVEAU_GEM_PUSHBUF 0x41 +-#define DRM_NOUVEAU_GEM_PUSHBUF_CALL 0x42 +-#define DRM_NOUVEAU_GEM_PIN 0x43 /* !KMS only */ +-#define DRM_NOUVEAU_GEM_UNPIN 0x44 /* !KMS only */ +-#define DRM_NOUVEAU_GEM_CPU_PREP 0x45 +-#define DRM_NOUVEAU_GEM_CPU_FINI 0x46 +-#define DRM_NOUVEAU_GEM_INFO 0x47 +-#define DRM_NOUVEAU_GEM_PUSHBUF_CALL2 0x48 ++#define DRM_NOUVEAU_GEM_CPU_PREP 0x42 ++#define DRM_NOUVEAU_GEM_CPU_FINI 0x43 ++#define DRM_NOUVEAU_GEM_INFO 0x44 + + #endif /* __NOUVEAU_DRM_H__ */ +-- +1.7.0 + diff --git a/drm-nouveau-acpi-edid-fallback.patch b/drm-nouveau-acpi-edid-fallback.patch new file mode 100644 index 0000000..3b57cb8 --- /dev/null +++ b/drm-nouveau-acpi-edid-fallback.patch @@ -0,0 +1,239 @@ +From 782468d6a9fb865677c166ceffc2271e1f709cc5 Mon Sep 17 00:00:00 2001 +From: Ben Skeggs +Date: Fri, 16 Apr 2010 08:12:34 +1000 +Subject: [PATCH 2/3] drm-nouveau-acpi-edid-fallback + +--- + drivers/gpu/drm/nouveau/nouveau_acpi.c | 81 ++++++++++++++++++++++++-- + drivers/gpu/drm/nouveau/nouveau_connector.c | 8 +++ + drivers/gpu/drm/nouveau/nouveau_drv.h | 20 +++++-- + drivers/gpu/drm/nouveau/nouveau_state.c | 5 +- + 4 files changed, 98 insertions(+), 16 deletions(-) + +diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c +index 48227e7..ac7fd04 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c ++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c +@@ -2,11 +2,13 @@ + #include + #include + #include ++#include + + #include "drmP.h" + #include "drm.h" + #include "drm_sarea.h" + #include "drm_crtc_helper.h" ++#include "nouveau_connector.h" + #include "nouveau_drv.h" + #include "nouveau_drm.h" + #include "nv50_display.h" +@@ -35,7 +37,7 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result) + 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, + }; + +- struct pci_dev *pdev = dev->pdev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; + struct acpi_handle *handle; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_object_list input; +@@ -43,11 +45,11 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result) + union acpi_object *obj; + int err; + +- handle = DEVICE_ACPI_HANDLE(&pdev->dev); +- +- if (!handle) ++ if (!dev_priv->acpi_device) + return -ENODEV; + ++ handle = dev_priv->acpi_device->handle; ++ + input.count = 4; + input.pointer = params; + params[0].type = ACPI_TYPE_BUFFER; +@@ -62,7 +64,8 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result) + + err = acpi_evaluate_object(handle, "_DSM", &input, &output); + if (err) { +- NV_INFO(dev, "failed to evaluate _DSM: %d\n", err); ++ if (err != AE_NOT_FOUND) ++ NV_INFO(dev, "failed to evaluate _DSM: %d\n", err); + return err; + } + +@@ -86,7 +89,7 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result) + return 0; + } + +-int nouveau_hybrid_setup(struct drm_device *dev) ++static int nouveau_hybrid_setup(struct drm_device *dev) + { + int result; + +@@ -110,7 +113,7 @@ int nouveau_hybrid_setup(struct drm_device *dev) + return 0; + } + +-bool nouveau_dsm_probe(struct drm_device *dev) ++static bool nouveau_dsm_probe(struct drm_device *dev) + { + int support = 0; + +@@ -123,3 +126,67 @@ bool nouveau_dsm_probe(struct drm_device *dev) + + return true; + } ++ ++int nouveau_acpi_get_edid(struct drm_device *dev, ++ struct drm_connector *connector, ++ struct edid **pedid) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ void *edid; ++ int connector_type = 0; ++ int ret; ++ ++ switch (connector->connector_type) { ++ case DRM_MODE_CONNECTOR_VGA: ++ connector_type = ACPI_VIDEO_DISPLAY_CRT; ++ break; ++ case DRM_MODE_CONNECTOR_Composite: ++ case DRM_MODE_CONNECTOR_SVIDEO: ++ case DRM_MODE_CONNECTOR_Component: ++ case DRM_MODE_CONNECTOR_9PinDIN: ++ connector_type = ACPI_VIDEO_DISPLAY_TV; ++ break; ++ case DRM_MODE_CONNECTOR_DVII: ++ case DRM_MODE_CONNECTOR_DVID: ++ case DRM_MODE_CONNECTOR_HDMIA: ++ case DRM_MODE_CONNECTOR_HDMIB: ++ case DRM_MODE_CONNECTOR_DisplayPort: ++ connector_type = ACPI_VIDEO_DISPLAY_DVI; ++ break; ++ case DRM_MODE_CONNECTOR_LVDS: ++ connector_type = ACPI_VIDEO_DISPLAY_LCD; ++ break; ++ } ++ ++ ret = acpi_video_get_edid(dev_priv->acpi_device, connector_type, -1, &edid); ++ ++ if (ret < 0) ++ return ret; ++ ++ *pedid = edid; ++ return 0; ++} ++ ++int nouveau_acpi_setup(struct drm_device *dev) ++{ ++ struct pci_dev *pdev = dev->pdev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ acpi_handle handle; ++ struct acpi_device *acpi_dev; ++ ++ handle = DEVICE_ACPI_HANDLE(&pdev->dev); ++ ++ if (!handle) ++ return -ENODEV; ++ ++ if (acpi_bus_get_device(handle, &acpi_dev)) ++ return -ENODEV; ++ ++ dev_priv->acpi_device = acpi_dev; ++ dev_priv->acpi_dsm = nouveau_dsm_probe(dev); ++ ++ if (dev_priv->acpi_dsm) ++ nouveau_hybrid_setup(dev); ++ ++ return 0; ++} +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c +index fb51958..5832b60 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c +@@ -356,6 +356,14 @@ nouveau_connector_detect_lvds(struct drm_connector *connector) + } + } + ++ /* Let's try ACPI */ ++ if (status != connector_status_connected && ++ !dev_priv->vbios.fp_no_ddc) { ++ nouveau_acpi_get_edid(dev, connector, &nv_connector->edid); ++ if (nv_connector->edid) ++ status = connector_status_connected; ++ } ++ + out: + #ifdef CONFIG_ACPI + if (status == connector_status_connected && +diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h +index c31159a..675d7ac 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drv.h ++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h +@@ -39,6 +39,8 @@ + #define NOUVEAU_FAMILY 0x0000FFFF + #define NOUVEAU_FLAGS 0xFFFF0000 + ++#include ++ + #include "ttm/ttm_bo_api.h" + #include "ttm/ttm_bo_driver.h" + #include "ttm/ttm_placement.h" +@@ -615,7 +617,11 @@ struct drm_nouveau_private { + } susres; + + struct backlight_device *backlight; ++ ++#ifdef CONFIG_ACPI + bool acpi_dsm; ++ struct acpi_device *acpi_device; ++#endif + + struct nouveau_channel *evo; + +@@ -846,16 +852,20 @@ extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); + + /* nouveau_acpi.c */ + #ifdef CONFIG_ACPI +-extern int nouveau_hybrid_setup(struct drm_device *dev); +-extern bool nouveau_dsm_probe(struct drm_device *dev); ++extern int nouveau_acpi_setup(struct drm_device *dev); ++extern int nouveau_acpi_get_edid(struct drm_device *dev, ++ struct drm_connector *connector, ++ struct edid **edid); + #else +-static inline int nouveau_hybrid_setup(struct drm_device *dev) ++static inline int nouveau_acpi_setup(struct drm_device *dev) + { + return 0; + } +-static inline bool nouveau_dsm_probe(struct drm_device *dev) ++static inline int nouveau_acpi_get_edid(struct drm_device *dev, ++ struct drm_connector *connector, ++ struct edid **edid) + { +- return false; ++ return -ENODEV; + } + #endif + +diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c +index 7c1d252..7ca9465 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_state.c ++++ b/drivers/gpu/drm/nouveau/nouveau_state.c +@@ -627,10 +627,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) + NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n", + dev->pci_vendor, dev->pci_device, dev->pdev->class); + +- dev_priv->acpi_dsm = nouveau_dsm_probe(dev); +- +- if (dev_priv->acpi_dsm) +- nouveau_hybrid_setup(dev); ++ nouveau_acpi_setup(dev); + + dev_priv->wq = create_workqueue("nouveau"); + if (!dev_priv->wq) +-- +1.7.1 + diff --git a/drm-nouveau-drm-fixed-header.patch b/drm-nouveau-drm-fixed-header.patch new file mode 100644 index 0000000..83df54f --- /dev/null +++ b/drm-nouveau-drm-fixed-header.patch @@ -0,0 +1,86 @@ +From 841045c942be3fd2bf928a7de3e730a00665347e Mon Sep 17 00:00:00 2001 +From: Ben Skeggs +Date: Wed, 28 Apr 2010 15:19:10 +1000 +Subject: [PATCH 3/3] drm-nouveau-drm-fixed-header + +--- + drivers/gpu/drm/nouveau/drm_fixed.h | 67 +++++++++++++++++++++++++++++++++++ + 1 files changed, 67 insertions(+), 0 deletions(-) + create mode 100644 drivers/gpu/drm/nouveau/drm_fixed.h + +diff --git a/drivers/gpu/drm/nouveau/drm_fixed.h b/drivers/gpu/drm/nouveau/drm_fixed.h +new file mode 100644 +index 0000000..4a08a66 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/drm_fixed.h +@@ -0,0 +1,67 @@ ++/* ++ * Copyright 2009 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Dave Airlie ++ */ ++#ifndef DRM_FIXED_H ++#define DRM_FIXED_H ++ ++typedef union dfixed { ++ u32 full; ++} fixed20_12; ++ ++ ++#define dfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */ ++#define dfixed_const_half(A) (u32)(((A) << 12) + 2048) ++#define dfixed_const_666(A) (u32)(((A) << 12) + 2731) ++#define dfixed_const_8(A) (u32)(((A) << 12) + 3277) ++#define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12) ++#define dfixed_init(A) { .full = dfixed_const((A)) } ++#define dfixed_init_half(A) { .full = dfixed_const_half((A)) } ++#define dfixed_trunc(A) ((A).full >> 12) ++ ++static inline u32 dfixed_floor(fixed20_12 A) ++{ ++ u32 non_frac = dfixed_trunc(A); ++ ++ return dfixed_const(non_frac); ++} ++ ++static inline u32 dfixed_ceil(fixed20_12 A) ++{ ++ u32 non_frac = dfixed_trunc(A); ++ ++ if (A.full > dfixed_const(non_frac)) ++ return dfixed_const(non_frac + 1); ++ else ++ return dfixed_const(non_frac); ++} ++ ++static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B) ++{ ++ u64 tmp = ((u64)A.full << 13); ++ ++ do_div(tmp, B.full); ++ tmp += 1; ++ tmp /= 2; ++ return lower_32_bits(tmp); ++} ++#endif +-- +1.7.1 + diff --git a/drm-nouveau-nva3-noaccel.patch b/drm-nouveau-nva3-noaccel.patch deleted file mode 100644 index d988eb5..0000000 --- a/drm-nouveau-nva3-noaccel.patch +++ /dev/null @@ -1,107 +0,0 @@ -From 030e105efc9a29c7d34fb59fb0e0a40e54178299 Mon Sep 17 00:00:00 2001 -From: Ben Skeggs -Date: Wed, 30 Jun 2010 13:34:05 +1000 -Subject: [PATCH] drm/nouveau: disable acceleration on NVA3/NVA5/NVA8 by default - -There's an GPU lockup problem for which the cause is currently unknown -on these chipsets. - -Until it's resolved, it's better to leave the user with a working system -without acceleration than to have random lockups. - -With this patch, acceleration will be off by default if a known problem -chipset is detected, but can be re-enabled with nouveau.noaccel=0 on -the kernel commandline. - -Signed-off-by: Ben Skeggs ---- - drivers/gpu/drm/nouveau/nouveau_drv.c | 2 +- - drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + - drivers/gpu/drm/nouveau/nouveau_state.c | 23 +++++++++++++++++++---- - 3 files changed, 21 insertions(+), 5 deletions(-) - -diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c -index b4d958c..02b564c 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_drv.c -+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c -@@ -72,7 +72,7 @@ int nouveau_ignorelid = 0; - module_param_named(ignorelid, nouveau_ignorelid, int, 0400); - - MODULE_PARM_DESC(noaccel, "Disable all acceleration"); --int nouveau_noaccel = 0; -+int nouveau_noaccel = -1; - module_param_named(noaccel, nouveau_noaccel, int, 0400); - - MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); -diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h -index 022648e..76ec783 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_drv.h -+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h -@@ -493,6 +493,7 @@ enum nouveau_card_type { - - struct drm_nouveau_private { - struct drm_device *dev; -+ bool noaccel; - - /* the card type, takes NV_* as values */ - enum nouveau_card_type card_type; -diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c -index 63c2d24..866f437 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_state.c -+++ b/drivers/gpu/drm/nouveau/nouveau_state.c -@@ -454,7 +454,7 @@ nouveau_card_init(struct drm_device *dev) - if (ret) - goto out_timer; - -- if (nouveau_noaccel) -+ if (dev_priv->noaccel) - engine->graph.accel_blocked = true; - else { - /* PGRAPH */ -@@ -509,10 +509,10 @@ out_display: - else - nv04_display_destroy(dev); - out_fifo: -- if (!nouveau_noaccel) -+ if (!dev_priv->noaccel) - engine->fifo.takedown(dev); - out_graph: -- if (!nouveau_noaccel) -+ if (!dev_priv->noaccel) - engine->graph.takedown(dev); - out_fb: - engine->fb.takedown(dev); -@@ -548,7 +548,7 @@ static void nouveau_card_takedown(struct drm_device *dev) - dev_priv->channel = NULL; - } - -- if (!nouveau_noaccel) { -+ if (!dev_priv->noaccel) { - engine->fifo.takedown(dev); - engine->graph.takedown(dev); - } -@@ -744,6 +744,21 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) - if (ret) - return ret; - -+ if (nouveau_noaccel == -1) { -+ switch (dev_priv->chipset) { -+ case 0xa3: -+ case 0xa5: -+ case 0xa8: -+ dev_priv->noaccel = true; -+ break; -+ default: -+ dev_priv->noaccel = false; -+ break; -+ } -+ } else { -+ dev_priv->noaccel = (nouveau_noaccel != 0); -+ } -+ - /* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */ - if (dev_priv->card_type >= NV_40) { - int ramin_bar = 2; --- -1.7.2 - diff --git a/drm-nouveau-race-fix.patch b/drm-nouveau-race-fix.patch deleted file mode 100644 index 267e913..0000000 --- a/drm-nouveau-race-fix.patch +++ /dev/null @@ -1,139 +0,0 @@ -From cfff162fe5d7a69a6a77cef306866145bf5b0567 Mon Sep 17 00:00:00 2001 -From: Ben Skeggs -Date: Fri, 23 Jul 2010 09:06:52 +1000 -Subject: [PATCH] drm/nouveau: fix race condition when under memory pressure - -rhbz#602663 - -When VRAM is running out it's possible that the client's push buffers get -evicted to main memory. When they're validated back in, the GPU may -be used for the copy back to VRAM, but the existing synchronisation code -only deals with inter-channel sync, not sync between PFIFO and PGRAPH on -the same channel. This leads to PFIFO fetching from command buffers that -haven't quite been copied by PGRAPH yet. - -This patch marks push buffers as so, and forces any GPU-assisted buffer -moves to be done on a different channel, which triggers the correct -synchronisation to happen before we submit them. - -After discussion with another nouveau developer, it was agreed that while -this patch is fine in itself, that we'd prefer to work out a nicer, but -likely much more invasive, fix upstream. - -Signed-off-by: Ben Skeggs ---- - drivers/gpu/drm/nouveau/nouveau_bo.c | 15 +++++++++++++ - drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + - drivers/gpu/drm/nouveau/nouveau_gem.c | 36 +++++++++++++++++++++++--------- - 3 files changed, 42 insertions(+), 10 deletions(-) - -diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c -index d8c341d..494a219 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_bo.c -+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c -@@ -36,6 +36,21 @@ - #include - #include - -+int -+nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan) -+{ -+ struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; -+ int ret; -+ -+ if (!prev_fence || nouveau_fence_channel(prev_fence) == chan) -+ return 0; -+ -+ spin_lock(&nvbo->bo.lock); -+ ret = ttm_bo_wait(&nvbo->bo, false, false, false); -+ spin_unlock(&nvbo->bo.lock); -+ return ret; -+} -+ - static void - nouveau_bo_del_ttm(struct ttm_buffer_object *bo) - { -diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h -index 51ccd90..022648e 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_drv.h -+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h -@@ -1098,6 +1098,7 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); - extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); - extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); - extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); -+extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *); - - /* nouveau_fence.c */ - struct nouveau_fence; -diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c -index 547f2c2..a915dcd 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_gem.c -+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c -@@ -361,16 +361,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, - - list_for_each_entry(nvbo, list, entry) { - struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; -- struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; - -- if (prev_fence && nouveau_fence_channel(prev_fence) != chan) { -- spin_lock(&nvbo->bo.lock); -- ret = ttm_bo_wait(&nvbo->bo, false, false, false); -- spin_unlock(&nvbo->bo.lock); -- if (unlikely(ret)) { -- NV_ERROR(dev, "fail wait other chan\n"); -- return ret; -- } -+ ret = nouveau_bo_sync_gpu(nvbo, chan); -+ if (unlikely(ret)) { -+ NV_ERROR(dev, "fail pre-validate sync\n"); -+ return ret; - } - - ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, -@@ -381,7 +376,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, - return ret; - } - -- nvbo->channel = chan; -+ nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; - ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, - false, false, false); - nvbo->channel = NULL; -@@ -390,6 +385,12 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, - return ret; - } - -+ ret = nouveau_bo_sync_gpu(nvbo, chan); -+ if (unlikely(ret)) { -+ NV_ERROR(dev, "fail post-validate sync\n"); -+ return ret; -+ } -+ - if (nvbo->bo.offset == b->presumed.offset && - ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && - b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || -@@ -615,6 +616,21 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, - - mutex_lock(&dev->struct_mutex); - -+ /* Mark push buffers as being used on PFIFO, the validation code -+ * will then make sure that if the pushbuf bo moves, that they -+ * happen on the kernel channel, which will in turn cause a sync -+ * to happen before we try and submit the push buffer. -+ */ -+ for (i = 0; i < req->nr_push; i++) { -+ if (push[i].bo_index >= req->nr_buffers) { -+ NV_ERROR(dev, "push %d buffer not in list\n", i); -+ ret = -EINVAL; -+ goto out; -+ } -+ -+ bo[push[i].bo_index].read_domains |= (1 << 31); -+ } -+ - /* Validate buffer list */ - ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, - req->nr_buffers, &op, &do_reloc); --- -1.7.2 - diff --git a/drm-nouveau-updates.patch b/drm-nouveau-updates.patch index a205168..c6b39f9 100644 --- a/drm-nouveau-updates.patch +++ b/drm-nouveau-updates.patch @@ -1,761 +1,1260 @@ -From 06b70a657cec75d89c60243d6c49bc5dae0b5612 Mon Sep 17 00:00:00 2001 -From: Ben Skeggs -Date: Mon, 31 May 2010 12:00:43 +1000 -Subject: [PATCH] drm-nouveau-updates +From 1fe467ac55c8503078f679b1f503e4895d6c5895 Mon Sep 17 00:00:00 2001 +From: Marcin Slusarz +Date: Wed, 17 Feb 2010 19:04:00 +0100 +Subject: [PATCH 1/3] drm-nouveau-updates +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit -drm/nouveau: reduce usage of fence spinlock to when absolutely necessary +drm/nouveau: fix pramdac_table range checking + +get_tmds_index_reg reads some value from stack when mlv happens +to be equal to size of pramdac_table array. Fix it. + +Reported-by: Dan Carpenter +Signed-off-by: Marcin Slusarz +Signed-off-by: Francisco Jerez + +drm/nouveau: fix nouveau_i2c_find bounds checking + +Reported-by: Dan Carpenter +Signed-off-by: Marcin Slusarz +Signed-off-by: Francisco Jerez + +drm/nouveau: fix i2ctable bounds checking + +i2c_entries seems to be the number of i2c entries, +so with index equal to this number, we could read +invalid data from i2ctable. Fix it. + +Signed-off-by: Marcin Slusarz +Signed-off-by: Francisco Jerez + +drm/nouveau: allow retrieval of vbios image from debugfs + +It's very useful to be able to access this without additional tools for +debugging purposes. Signed-off-by: Ben Skeggs -drm/nouveau: place notifiers in system memory by default +drm/nouveau: rename parsed_dcb_gpio to dcb_gpio_table Signed-off-by: Ben Skeggs -drm/nouveau: move LVDS detection back to connector detect() time +drm/nouveau: merge parsed_dcb and bios_parsed_dcb into dcb_table Signed-off-by: Ben Skeggs -drm/nouveau: use drm_mm in preference to custom code doing the same thing +drm/nouveau: merge nvbios and nouveau_bios_info Signed-off-by: Ben Skeggs -drm/nouveau: remove left-over !DRIVER_MODESET paths +drm/nouveau: reorganise bios header, add dcb connector type enums + +Signed-off-by: Ben Skeggs -It's far preferable to have the driver do nothing at all for "nomodeset". +drm/nouveau: parse dcb gpio/connector tables after encoders Signed-off-by: Ben Skeggs -drm/nouveau: missed some braces +drm/nouveau: check for known dcb connector types -Luckily this had absolutely no effect whatsoever :) +Signed-off-by: Ben Skeggs + +drm/nouveau: construct a connector table for cards that lack a real one -Reported-by: Marcin Slusarz Signed-off-by: Ben Skeggs -drm/nv50: fix memory detection for cards with >=4GiB VRAM +drm/nouveau: use dcb connector table for creating drm connectors + +This makes this code common to both the nv04 and nv50 paths. + +For the moment, we keep the previous behaviour with HDMI/eDP connectors +and report them as DVI-D/DP instead. This will be fixed once the rest +of the code has been fixed to deal with those types. Signed-off-by: Ben Skeggs -drm/nouveau: Put the dithering check back in nouveau_connector_create. +drm/nv50: enable hpd on any connector we know the gpio line for -a7b9f9e5adef dropped it by accident. +Signed-off-by: Ben Skeggs -Signed-off-by: Francisco Jerez -Tested-by: Thibaut Girka +drm/nouveau: use dcb connector types throughout the driver -drm/nouveau: Don't clear AGPCMD completely on INIT_RESET. +Signed-off-by: Ben Skeggs -We just need to clear the SBA and ENABLE bits to reset the AGP -controller: If the AGP bridge was configured to use "fast writes", -clearing the FW bit would break the subsequent MMIO writes and -eventually end with a lockup. +drm/nv50: Implement ctxprog/state generation. -Note that all the BIOSes I've seen do the same as we did (it works for -them because they don't use MMIO), OTOH the blob leaves FW untouched. +This removes dependence on external firmware for NV50 generation cards. +If the generated ctxprogs don't work for you for some reason, please +report it. + +Signed-off-by: Marcin Kościelnicki +Signed-off-by: Ben Skeggs + +drm/nouveau: Fix noaccel/nofbaccel option descriptions. + +Signed-off-by: Marcin Kościelnicki +Signed-off-by: Ben Skeggs + +drm/nouveau: support version 0x20 displayport tables + +Not entirely identical to 0x21, the per-encoder table header lacks the +third init table pointer. However, our current parsing of the table +should work just fine. + +Signed-off-by: Ben Skeggs + +drm/nouveau: report unknown connector state if lid closed + +This is in preference to disconnected. If there's no other outputs +connected this will cause LVDS to be programmed even with the lid +closed rather than having X fail to start because of no available +outputs. + +Signed-off-by: Ben Skeggs + +drm/nouveau: use ALIGN instead of open coding it +CC: Ben Skeggs +Signed-off-by: Matt Turner +Signed-off-by: Ben Skeggs + +drm/nouveau: protect channel create/destroy and irq handler with a spinlock + +The nv50 pgraph handler (for example) could reenable pgraph fifo access +and that would be bad when pgraph context is being unloaded (we need the +guarantee a ctxprog isn't running). + +Signed-off-by: Maarten Maathuis +Signed-off-by: Ben Skeggs + +drm/nv50: Remove redundant/incorrect ctxvals initialisation. + +11c/004 offset corresponds to PGRAPH reg 0x400828, and is initialised +earlier anyway by both our ctxprog generator and blob ctxvals. It's +actually incorrect with the generator, since we use different layout +on pre-NVA0. + +Signed-off-by: Marcin Kościelnicki +Signed-off-by: Ben Skeggs + +drm/nouveau: Fix fbcon corruption with font width not divisible by 8 + +NV50 is nice and has a switch that autoaligns stuff for us. Pre-NV50, +we need to align input bitmap width manually. + +Signed-off-by: Marcin Kościelnicki Signed-off-by: Francisco Jerez -drm/nouveau: Ignore broken legacy I2C entries. +drm/nv50: Make ctxprog wait until interrupt handler is done. -The nv05 card in the bug report [1] doesn't have usable I2C port -register offsets (they're all filled with zeros). Ignore them and use -the defaults. +This will fix races between generated ctxprogs and interrupt handler. -[1] http://bugs.launchpad.net/bugs/569505 +Signed-off-by: Marcin Kościelnicki +Signed-off-by: Ben Skeggs + +drm/nv50: Improve PGRAPH interrupt handling. + +This makes nouveau recognise and report more kinds of PGRAPH errors, as +well as prevent GPU lockups resulting from some of them. + +Lots of guesswork was involved and some part of this is probably +incorrect. Some potential-lockuop situations are handled by just +resetting a whole PGRAPH subunit, which doesn't sound like a "proper" +solution, but seems to work just fine... for now. + +Signed-off-by: Marcin Kościelnicki +Signed-off-by: Ben Skeggs + +drm/nouveau: add option to allow override of dcb connector table types + +Signed-off-by: Ben Skeggs + +drm/nouveau: Gigabyte NX85T connector table lies, it has DVI-I not HDMI + +Signed-off-by: Ben Skeggs + +drm/nv04-nv40: Fix up the programmed horizontal sync pulse delay. + +The calculated values were a little bit off (~16 clocks), the only +effect it could have had is a slightly offset image with respect to +the blob on analog outputs (bug 26790). Signed-off-by: Francisco Jerez -drm/nouveau: set encoder for lvds +drm/nouveau: print a message very early during suspend -fixes oops in nouveau_connector_get_modes with nv_encoder is NULL +- In case of suspend lockups it's nice to know it happened in nouveau. + +Signed-off-by: Maarten Maathuis + +drm/nv50: add a memory barrier to pushbuf submission + +- This is useful for vram pushbuffers that are write combined. +- pre-nv50 has one too (in WRITE_PUT). + +Signed-off-by: Maarten Maathuis + +drm/nv50: fix connector table parsing for some cards + +The connector table index in the DCB entry for each output type is an +index into the connector table, and does *not* necessarily match up +with what was previously called "index" in the connector table entries +themselves. + +Not real sure what that index is exactly, renamed to "index2" as we +still use it to prevent creating multiple TV connectors. + +Signed-off-by: Ben Skeggs + +drm/nouveau: Never evict VRAM buffers to system. + +VRAM->system is a synchronous operation: it involves scheduling a +VRAM->TT DMA transfer and stalling the CPU until it's finished so that +we can unbind the new memory from the translation tables. VRAM->TT can +always be performed asynchronously, even if TT is already full and we +have to move something out of it. + +Additionally, allowing VRAM->system behaves badly under heavy memory +pressure because once we run out of TT, stuff starts to be moved back +and forth between VRAM and system, and the TT contents are hardly +renewed. -Signed-off-by: Albert Damen Signed-off-by: Francisco Jerez -drm/nouveau: tidy connector/encoder creation a little +drm/nouveau: add module option to disable TV detection -Create connectors before encoders to avoid having to do another loop across -encoder list whenever we create a new connector. This allows us to pass -the connector to the encoder creation functions, and avoid using a -create_resources() callback since we can now call it directly. +Intended to be used as a workaround in cases where we falsely detect +that a TV is connected when it's not. -This can also potentially modify the connector ordering on nv50. On cards -where the DCB connector and encoder tables are in the same order, things -will be unchanged. However, there's some cards where the ordering between -the tables differ, and in one case, leads us to naming the connectors -"wrongly". +Signed-off-by: Ben Skeggs + +drm/nv50: add more 0x100c80 flushy magic + +Fixes the !vbo_fifo path in the 3D driver on certain chipsets. Still not +really any good idea of what exactly the magic achieves, but it makes +things work. + +While we're at it, in the PCIEGART path, flush on unbinding also. Signed-off-by: Ben Skeggs -drm/nouveau: downgrade severity of most init table parser errors +drm/nouveau: bail out of auxch transaction if we repeatedly recieve defers -As long as we know the length of the opcode, we're probably better off -trying to parse the remainder of an init table rather than aborting in -the middle of it. +There's one known case where we never stop recieving DEFER, and loop here +forever. Lets not do that.. Signed-off-by: Ben Skeggs -drm/nv50: fix DP->DVI if output has been programmed for native DP previously +drm/nv50: fix fbcon when framebuffer above 4GiB mark + +This can't actually happen right now, but lets fix it anyway. Signed-off-by: Ben Skeggs -drm/nv50: DCB quirk for Dell M6300 +drm/nv50: Fix NEWCTX_DONE flag number + +Signed-off-by: Marcin Kościelnicki +Signed-off-by: Ben Skeggs -Uncertain if this is a weirdo configuration, or a BIOS bug. If it's not -a BIOS bug, we still don't know how to make it work anyway so ignore a -"conflicting" DCB entry to prevent a display hang. +drm/nouveau: remove some unused members from drm_nouveau_private Signed-off-by: Ben Skeggs -drm/nv50: supply encoder disable() hook for SOR outputs +drm/nouveau: detect vram amount once, and save the value -Allows us to remove a driver hack that used to be necessary to disable -encoders in certain situations before setting up a mode. The DRM has -better knowledge of when this is needed than the driver does. +As opposed to repeatedly reading the amount back from the GPU every +time we need to know the VRAM size. -This fixes a number of display switching issues. +We should now fail to load gracefully on detecting no VRAM, rather than +something potentially messy happening. Signed-off-by: Ben Skeggs -drm/nv50: fix regression caused by ed15e77b6ee7c4fa6f50c18b3325e7f96ed3aade +drm/nv40: rework lvds table parsing -It became possible for us to have connectors present without any encoders -attached (TV out, we don't support TVDAC yet), which caused the DDX to -segfault. +All indications seem to be that the version 0x30 table should be handled +the same way as 0x40 (as used on G80), at least for the parts that we +currently try use. + +This commit cleans up the parsing to make it clearer about what we're +actually trying to achieve, and unifies the 0x30/0x40 parsing. Signed-off-by: Ben Skeggs -drm/nv04: fix regression caused by ed15e77b6ee7c4fa6f50c18b3325e7f96ed3aade +drm/nv40: add LVDS table quirk for Dell Latitude D620 + +Should fix: + https://bugzilla.redhat.com/show_bug.cgi?id=505132 + https://bugzilla.redhat.com/show_bug.cgi?id=543091 + https://bugzilla.redhat.com/show_bug.cgi?id=530425 + https://bugs.edge.launchpad.net/ubuntu/+source/xserver-xorg-video-nouveau/ + +bug/539730 Signed-off-by: Ben Skeggs -drm/nv50: when debugging on, log which crtc we connect an encoder to +drm/nv50: fix instmem init on IGPs if stolen mem crosses 4GiB mark Signed-off-by: Ben Skeggs -drm/nv17-nv40: Avoid using active CRTCs for load detection. +drm/nouveau: fixup the init failure paths some more -Signed-off-by: Francisco Jerez +Signed-off-by: Ben Skeggs -drm/nv04-nv40: Prevent invalid DAC/TVDAC combinations. +drm/nv50: move pdisp init earlier, and cleanup if it fails -Signed-off-by: Francisco Jerez +Moving it earlier is to avoid some extra cleanup if it fails. + +Signed-off-by: Ben Skeggs -drm/nouveau: Fix a couple of sparse warnings. +drm/nv50: Allow using the NVA3 new compute class. -Signed-off-by: Francisco Jerez +Signed-off-by: Marcin Kościelnicki +Signed-off-by: Ben Skeggs -drm/nouveau: INIT_CONFIGURE_PREINIT/CLK/MEM on newer BIOSes is not an error. +drm/nouveau: Make use of TTM busy_placements. -No need to spam the logs when they're found, they're equivalent to -INIT_DONE. +Previously we were filling it the same as "placements", but in some +cases there're valid alternatives that we were ignoring completely. +Keeping a back-up memory type helps on several low-mem situations. Signed-off-by: Francisco Jerez -drm/nv04-nv40: Drop redundant logging. +drm/nv50: preserve an unknown SOR_MODECTRL value for DP encoders -Signed-off-by: Francisco Jerez +This value interacts with some registers we don't currently know how to +program properly ourselves. The default of 5 that we were using matches +what the VBIOS on early DP cards do, but later ones use 6, which would +cause nouveau to program an incorrect mode on these chips. -drm/nouveau: Move the fence wait before migration resource clean-up. +Signed-off-by: Ben Skeggs -Avoids an oops in the fence wait failure path (bug 26521). +drm/nv50: punt hotplug irq handling out to workqueue -Signed-off-by: Francisco Jerez -Tested-by: Marcin Slusarz +On DP outputs we'll likely end up running vbios init tables here, which +may sleep. -drm/nouveau: Workaround broken TV load detection on a "Zotac FX5200". +Signed-off-by: Ben Skeggs + +drm/nv50: partially revert ec99dbe438787d62ecde3a22f8ce3f880a4f4e14 + +The commit mentioned above breaks the DP SOR_MODE_CTRL detection as once +nv50_display_init() has been called all the MODE_CTRL registers are reset. + +This wasn't noticed when initially writing the DP SOR_MODE_CTRL patch +as it was done on another machine, without ec99dbe..4e14 applied. + +This commit moves the nv50_display_init() call to back where it was, +after the KMS setup, and adds the additional cleanup needed. + +Signed-off-by: Ben Skeggs + +drm/nv50: another dodgy DP hack -The blob seems to have the same problem so it's probably a hardware -issue (bug 28810). +Allows *some* DP cards to keep working in some corner cases that most +people shouldn't hit. I hit it all the time with development, so this +can stay for now. +Signed-off-by: Ben Skeggs + +drm/nv50: Add NVA3 support in ctxprog/ctxvals generator. + +Signed-off-by: Marcin Kościelnicki +Signed-off-by: Ben Skeggs + +drm/nv40: Init some tiling-related PGRAPH state. + +Fixes garbled 3D on an nv46 card. + +Reported-by: Francesco Marella Signed-off-by: Francisco Jerez -drm/nv50: send evo "update" command after each disconnect +drm/nouveau: store raw gpio table entry in bios gpio structs + +And use our own version of the GPIO table for the INIT_GPIO opcode. -It turns out that the display engine signals an interrupt for disconnects -too. In order to make it easier to process the display interrupts -correctly, we want to ensure we only get one operation per interrupt -sequence - this is what this commit achieves. +Signed-off-by: Ben Skeggs + +drm/nv50: parse/use some more de-magiced parts of gpio table entries + +Signed-off-by: Ben Skeggs + +drm/nv50: implement gpio set/get routines + +Signed-off-by: Ben Skeggs + +Revert "drm/nouveau: report unknown connector state if lid closed" + +Included in upstream stable point-release. + +This reverts commit b30083bdb990bcc2829fce83d871a86059ff4fc1. + +drm/nouveau: fix a nouveau_bo dereference after it's been destroyed Signed-off-by: Ben Skeggs -drm/nv50: rewrite display irq handler +drm/nouveau: bios parser fixes for eDP boards + +Signed-off-by: Ben Skeggs -The previous handler basically worked correctly for a full-blown mode -change. However, it did nothing at all when a partial (encoder only) -reconfiguation was necessary, leading to the display hanging on certain -types of mode switch. +drm/nouveau: dump pll limits entries when debugging is on Signed-off-by: Ben Skeggs -drm/nouveau: move DP script invocation to nouveau_dp.c +drm/nv50: output calculated crtc pll when debugging on Signed-off-by: Ben Skeggs -drm/nv50: set DP display power state during DPMS +drm/nv50: fix suspend/resume with DP outputs Signed-off-by: Ben Skeggs -drm/nouveau: add scaler-only modes for eDP too +drm/nv50: store full dcb i2c entry from vbios Signed-off-by: Ben Skeggs -drm/nouveau: remove dev_priv->init_state and friends +drm/nv50: fix monitor detection on certain chipsets + +There appears to be some kind of switch on certain chips to control whether +the DP auxch or traditional i2c bus will be operational on a connector, +this commit hopefully fixes nouveau to do the right thing. + +Likely only relevent on chips with DP outputs. + +Signed-off-by: Ben Skeggs -Nouveau will no longer load at all if card initialisation fails, so all -these checks are unnecessary. +drm/nv50: send hotplug event to userspace Signed-off-by: Ben Skeggs -drm/nv50: implement DAC disconnect fix missed in earlier commit +drm/nv50: support fractional feedback divider on newer chips Signed-off-by: Ben Skeggs -drm/nouveau: add instmem flush() hook +drm/nouveau: don't execute INIT_GPIO unless we're really running the table -This removes the previous prepare_access() and finish_access() hooks, and -replaces it with a much simpler flush() hook. +This resulted in accidently switching off the eDP panel on certain laptops +since the default state in the GPIO table was off. -All the chipset-specific code before nv50 has its use removed completely, -as it's not required there at all. +Fixes rh#582621 Signed-off-by: Ben Skeggs -drm/nv50: move tlb flushing to a helper function +drm/nv50: fix iommu errors caused by device reading from address 0 Signed-off-by: Ben Skeggs -drm/nouveau: remove ability to use external firmware +drm/nouveau: fix POST detection for certain chipsets + +We totally fail at detecting un-POSTed chipsets prior to G80. This commit +changes the pre-G80 POST detection to read the programmed horizontal total +from CRTC 0, and assume the card isn't POSTed if it's 0. -This was always really a developer option, and if it's really necessary we -can hack this in ourselves. +NVIDIA use some other heuristics more similar to what we do on G80, but I +wasted quite a long time trying to figure out the exact specifics of what +they do so we can try this for a bit instead. Signed-off-by: Ben Skeggs -drm/nouveau: allocate fixed amount of PRAMIN per channel on all chipsets +drm/nv40: allow cold-booting of nv4x chipsets + +Signed-off-by: Ben Skeggs -Previously only done on nv50+ +drm/nouveau: fix init table handlers to return proper error codes -This commit also switches unknown NV2x/NV3x chipsets to noaccel mode. +We really want to be able to distinguish between INIT_DONE and an actual +error sometimes. This commit fixes up several lazy "return 0;" to be +actual error codes, and explicitly reserves "0" as "success, but stop +parsing this table". Signed-off-by: Ben Skeggs -drm/nouveau: remove unused fbdev_info +drm/nouveau: display error message for any failed init table opcode + +Some handlers don't report specific errors, but we still *really* want to +know if we failed to parse a complete init table. Signed-off-by: Ben Skeggs -drm/nv50: cleanup nv50_fifo.c +drm/nouveau: ensure we've parsed i2c table entry for INIT_*I2C* handlers + +We may not have parsed the entry yet if the i2c_index is for an i2c bus +that's not referenced by a DCB encoder. + +This could be done oh so much more nicely, except we have to care about +prehistoric DCB tables too, and they make life painful. Signed-off-by: Ben Skeggs -drm/nv20-nv30: move context table object out of dev_priv +drm/nouveau: support init table i2c device identifier 0x81 + +It appears to be meant to reference the second "default index". Signed-off-by: Ben Skeggs -drm/nv50: fix dp_set_tmds to work on the right OR +drm/nouveau: fix i2c-related init table handlers + +Mutliple issues. INIT_ZM_I2C_BYTE/INIT_I2C_BYTE didn't even try and +use the register value, and all the handlers were using the wrong +slave address. Signed-off-by: Ben Skeggs -drm/nouveau: fix mtrr cleanup path +drm/nv50: cast IGP memory location to u64 before shifting Signed-off-by: Ben Skeggs -drm/nv50: move dp_set_tmds() function to happen in the last display irq +drm/nouveau: match U/DP script against SOR link + +It appears version 0x21 'U' and 'd' tables require us to take the SOR link +into account when selecting the appropriate table for a particular output. + +Signed-off-by: Ben Skeggs -It seems on some chipsets that doing this from the 0x20 handler causes the -display engine to not ever signal the final 0x40 stage. +drm/nv50: fix memory detection for cards with >=4GiB VRAM Signed-off-by: Ben Skeggs -drm/nouveau: initialise display before enabling interrupts +drm/nouveau: completely fail init if we fail to map the PRAMIN BAR -In some situations it's possible we can receive a spurious hotplug IRQ -before we're ready to handle it, leading to an oops. +On cards where there's a specific BAR for PRAMIN, we used to try and fall +back to the "legacy" aperture within the mmio BAR. -Calling the display init before enabling interrupts should clear any -pending IRQs on the GPU and prevent this from happening. +This is doomed to cause problems, so lets just fail completely as there's +obviously something else very wrong anyway. Signed-off-by: Ben Skeggs -drm/nouveau: Fix crashes during fbcon init on single head cards. +drm/nouveau: Don't clear AGPCMD completely on INIT_RESET. + +We just need to clear the SBA and ENABLE bits to reset the AGP +controller: If the AGP bridge was configured to use "fast writes", +clearing the FW bit would break the subsequent MMIO writes and +eventually end with a lockup. + +Note that all the BIOSes I've seen do the same as we did (it works for +them because they don't use MMIO), OTOH the blob leaves FW untouched. Signed-off-by: Francisco Jerez -drm/nouveau: Disable PROM access on init. +drm/nouveau: Ignore broken legacy I2C entries. + +The nv05 card in the bug report [1] doesn't have usable I2C port +register offsets (they're all filled with zeros). Ignore them and use +the defaults. -On older cards ( -drm/nv04: Enable context switching on PFIFO init. +drm/nv50: use alternate source of SOR_MODE_CTRL for DP hack -Fixes a lockup when coming back from suspend. +Fixes module unload+reload on Dell M4500, where the "normal" registers +get reset to 0. -Signed-off-by: Francisco Jerez +Signed-off-by: Ben Skeggs + +drm/nv50: fix duallink_possible calculation for DCB 4.0 cards + +Signed-off-by: Ben Skeggs + +drm/nv50: obey dcb->duallink_possible + +It was once assumed that all G8x had dual-link TMDS everywhere, this isn't +actually the case - especially considering passive DP->DVI converters and +some HDMI connectors only support single-link. + +Signed-off-by: Ben Skeggs -drm/nouveau: fix pcirom vbios shadow breakage from acpi rom patch +drm/nouveau: fix dual-link displays when plugged into single-link outputs -On nv50 it became impossible to attempt a PCI ROM shadow of the VBIOS, -which will break some setups. +When selecting the native mode for a display we weren't taking into account +whether or not it was actually supported on that particular output. -This patch also removes the different ordering of shadow methods for -pre-nv50 chipsets. The reason for the different ordering was paranoia, -but it should hopefully be OK to try shadowing PRAMIN first. +This patch modifies our native mode selection to run all modes through +mode_valid() first. Signed-off-by: Ben Skeggs -drm/nv50: fix RAMHT size +drm/nouveau: reduce usage of fence spinlock to when absolutely necessary Signed-off-by: Ben Skeggs -drm/nouveau: remove quirk to fabricate DVI-A output on DCB 1.5 boards +drm/nouveau: move LVDS detection back to connector detect() time Signed-off-by: Ben Skeggs -drm/nouveau: support fetching LVDS EDID from ACPI +drm/nouveau: Put the dithering check back in nouveau_connector_create. + +a7b9f9e5adef dropped it by accident. + +Signed-off-by: Francisco Jerez +Tested-by: Thibaut Girka + +drm/nouveau: set encoder for lvds + +fixes oops in nouveau_connector_get_modes with nv_encoder is NULL + +Signed-off-by: Albert Damen +Signed-off-by: Francisco Jerez + +drm/nouveau: tidy connector/encoder creation a little + +Create connectors before encoders to avoid having to do another loop across +encoder list whenever we create a new connector. This allows us to pass +the connector to the encoder creation functions, and avoid using a +create_resources() callback since we can now call it directly. -Based on a patch from Matthew Garrett. +This can also potentially modify the connector ordering on nv50. On cards +where the DCB connector and encoder tables are in the same order, things +will be unchanged. However, there's some cards where the ordering between +the tables differ, and in one case, leads us to naming the connectors +"wrongly". Signed-off-by: Ben Skeggs -drm/nv50: fix regression that break LVDS in some places +drm/nv50: fix regression caused by ed15e77b6ee7c4fa6f50c18b3325e7f96ed3aade + +It became possible for us to have connectors present without any encoders +attached (TV out, we don't support TVDAC yet), which caused the DDX to +segfault. + +Signed-off-by: Ben Skeggs -A previous commit started additionally using the SOR link when trying to -match the correct output script. However, we never fill in this field -for LVDS so we can never match a script at all. +drm/nv04: fix regression caused by ed15e77b6ee7c4fa6f50c18b3325e7f96ed3aade Signed-off-by: Ben Skeggs --- - drivers/gpu/drm/nouveau/Makefile | 2 +- - drivers/gpu/drm/nouveau/nouveau_acpi.c | 38 +++- - drivers/gpu/drm/nouveau/nouveau_bios.c | 206 +++++++++------ - drivers/gpu/drm/nouveau/nouveau_bios.h | 2 + - drivers/gpu/drm/nouveau/nouveau_bo.c | 9 +- - drivers/gpu/drm/nouveau/nouveau_channel.c | 5 - - drivers/gpu/drm/nouveau/nouveau_connector.c | 281 ++++++++++---------- - drivers/gpu/drm/nouveau/nouveau_connector.h | 4 +- - drivers/gpu/drm/nouveau/nouveau_dma.c | 8 +- - drivers/gpu/drm/nouveau/nouveau_dp.c | 24 ++- - drivers/gpu/drm/nouveau/nouveau_drv.c | 26 +-- - drivers/gpu/drm/nouveau/nouveau_drv.h | 89 ++----- - drivers/gpu/drm/nouveau/nouveau_encoder.h | 10 +- - drivers/gpu/drm/nouveau/nouveau_fbcon.c | 5 +- - drivers/gpu/drm/nouveau/nouveau_fence.c | 31 +-- - drivers/gpu/drm/nouveau/nouveau_gem.c | 11 +- - drivers/gpu/drm/nouveau/nouveau_grctx.c | 160 ----------- - drivers/gpu/drm/nouveau/nouveau_mem.c | 261 ++----------------- - drivers/gpu/drm/nouveau/nouveau_notifier.c | 30 +-- - drivers/gpu/drm/nouveau/nouveau_object.c | 105 +++----- + drivers/gpu/drm/nouveau/Makefile | 7 +- + drivers/gpu/drm/nouveau/nouveau_bios.c | 1057 ++++++++----- + drivers/gpu/drm/nouveau/nouveau_bios.h | 133 +- + drivers/gpu/drm/nouveau/nouveau_bo.c | 68 +- + drivers/gpu/drm/nouveau/nouveau_calc.c | 4 +- + drivers/gpu/drm/nouveau/nouveau_channel.c | 17 +- + drivers/gpu/drm/nouveau/nouveau_connector.c | 383 +++--- + drivers/gpu/drm/nouveau/nouveau_connector.h | 3 +- + drivers/gpu/drm/nouveau/nouveau_debugfs.c | 18 +- + drivers/gpu/drm/nouveau/nouveau_dma.c | 5 + + drivers/gpu/drm/nouveau/nouveau_dp.c | 8 +- + drivers/gpu/drm/nouveau/nouveau_drv.c | 14 +- + drivers/gpu/drm/nouveau/nouveau_drv.h | 70 +- + drivers/gpu/drm/nouveau/nouveau_encoder.h | 7 +- + drivers/gpu/drm/nouveau/nouveau_fence.c | 31 +- + drivers/gpu/drm/nouveau/nouveau_gem.c | 55 +- + drivers/gpu/drm/nouveau/nouveau_hw.c | 6 +- + drivers/gpu/drm/nouveau/nouveau_i2c.c | 23 +- + drivers/gpu/drm/nouveau/nouveau_irq.c | 615 +++++++- + drivers/gpu/drm/nouveau/nouveau_mem.c | 129 +- drivers/gpu/drm/nouveau/nouveau_reg.h | 1 + - drivers/gpu/drm/nouveau/nouveau_sgdma.c | 46 +--- - drivers/gpu/drm/nouveau/nouveau_state.c | 172 ++++-------- - drivers/gpu/drm/nouveau/nv04_dac.c | 37 ++- + drivers/gpu/drm/nouveau/nouveau_sgdma.c | 18 + + drivers/gpu/drm/nouveau/nouveau_state.c | 39 +- + drivers/gpu/drm/nouveau/nv04_crtc.c | 6 +- + drivers/gpu/drm/nouveau/nv04_dac.c | 15 +- drivers/gpu/drm/nouveau/nv04_dfp.c | 12 +- - drivers/gpu/drm/nouveau/nv04_display.c | 23 ++- - drivers/gpu/drm/nouveau/nv04_fifo.c | 20 +- - drivers/gpu/drm/nouveau/nv04_graph.c | 5 +- - drivers/gpu/drm/nouveau/nv04_instmem.c | 21 +- - drivers/gpu/drm/nouveau/nv04_mc.c | 4 + - drivers/gpu/drm/nouveau/nv04_tv.c | 8 +- - drivers/gpu/drm/nouveau/nv10_fifo.c | 10 - - drivers/gpu/drm/nouveau/nv17_tv.c | 45 +++- - drivers/gpu/drm/nouveau/nv20_graph.c | 96 ++++--- - drivers/gpu/drm/nouveau/nv40_fifo.c | 8 - - drivers/gpu/drm/nouveau/nv40_graph.c | 58 ++--- - drivers/gpu/drm/nouveau/nv50_crtc.c | 42 +--- - drivers/gpu/drm/nouveau/nv50_dac.c | 43 ++- - drivers/gpu/drm/nouveau/nv50_display.c | 385 ++++++++++++++++----------- - drivers/gpu/drm/nouveau/nv50_fifo.c | 126 ++++------ - drivers/gpu/drm/nouveau/nv50_graph.c | 86 +++---- - drivers/gpu/drm/nouveau/nv50_instmem.c | 61 ++--- - drivers/gpu/drm/nouveau/nv50_sor.c | 105 ++++---- - 43 files changed, 1123 insertions(+), 1598 deletions(-) - delete mode 100644 drivers/gpu/drm/nouveau/nouveau_grctx.c + drivers/gpu/drm/nouveau/nv04_display.c | 64 +- + drivers/gpu/drm/nouveau/nv04_fbcon.c | 6 +- + drivers/gpu/drm/nouveau/nv04_fifo.c | 5 + + drivers/gpu/drm/nouveau/nv04_graph.c | 3 +- + drivers/gpu/drm/nouveau/nv04_tv.c | 10 +- + drivers/gpu/drm/nouveau/nv17_tv.c | 12 +- + drivers/gpu/drm/nouveau/nv40_fifo.c | 7 +- + drivers/gpu/drm/nouveau/nv40_graph.c | 21 + + drivers/gpu/drm/nouveau/nv50_calc.c | 87 + + drivers/gpu/drm/nouveau/nv50_crtc.c | 46 +- + drivers/gpu/drm/nouveau/nv50_dac.c | 13 +- + drivers/gpu/drm/nouveau/nv50_display.c | 122 +- + drivers/gpu/drm/nouveau/nv50_display.h | 1 + + drivers/gpu/drm/nouveau/nv50_fb.c | 38 + + drivers/gpu/drm/nouveau/nv50_fbcon.c | 17 +- + drivers/gpu/drm/nouveau/nv50_fifo.c | 5 + + drivers/gpu/drm/nouveau/nv50_gpio.c | 76 + + drivers/gpu/drm/nouveau/nv50_graph.c | 103 +- + drivers/gpu/drm/nouveau/nv50_grctx.c | 2383 +++++++++++++++++++++++++++ + drivers/gpu/drm/nouveau/nv50_instmem.c | 18 +- + drivers/gpu/drm/nouveau/nv50_sor.c | 42 +- + 47 files changed, 4702 insertions(+), 1121 deletions(-) + create mode 100644 drivers/gpu/drm/nouveau/nv50_calc.c + create mode 100644 drivers/gpu/drm/nouveau/nv50_fb.c + create mode 100644 drivers/gpu/drm/nouveau/nv50_gpio.c + create mode 100644 drivers/gpu/drm/nouveau/nv50_grctx.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile -index acd31ed..4a1db73 100644 +index 48c290b..acd31ed 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile -@@ -9,7 +9,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ - nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ - nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ - nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ -- nouveau_dp.o nouveau_grctx.o \ -+ nouveau_dp.o \ +@@ -12,17 +12,18 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ + nouveau_dp.o nouveau_grctx.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ - nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \ -diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c -index d4bcca8..c17a055 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_acpi.c -+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c -@@ -3,6 +3,7 @@ - #include - #include - #include -+#include - - #include "drmP.h" - #include "drm.h" -@@ -11,6 +12,7 @@ +- nv04_fb.o nv10_fb.o nv40_fb.o \ ++ nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \ + nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ + nv04_graph.o nv10_graph.o nv20_graph.o \ + nv40_graph.o nv50_graph.o \ +- nv40_grctx.o \ ++ nv40_grctx.o nv50_grctx.o \ + nv04_instmem.o nv50_instmem.o \ + nv50_crtc.o nv50_dac.o nv50_sor.o \ + nv50_cursor.o nv50_display.o nv50_fbcon.o \ + nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ + nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ +- nv17_gpio.o ++ nv17_gpio.o nv50_gpio.o \ ++ nv50_calc.o + + nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o + nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o +diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c +index 0e9cd1d..1803cc4 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_bios.c ++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c +@@ -26,6 +26,7 @@ + #define NV_DEBUG_NOTRACE #include "nouveau_drv.h" - #include "nouveau_drm.h" - #include "nv50_display.h" -+#include "nouveau_connector.h" - - #include - -@@ -42,7 +44,7 @@ static const char nouveau_dsm_muid[] = { - 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, + #include "nouveau_hw.h" ++#include "nouveau_encoder.h" + + /* these defines are made up */ + #define NV_CIO_CRE_44_HEADA 0x0 +@@ -256,6 +257,11 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) + struct init_tbl_entry { + char *name; + uint8_t id; ++ /* Return: ++ * > 0: success, length of opcode ++ * 0: success, but abort further parsing of table (INIT_DONE etc) ++ * < 0: failure, table parsing will be aborted ++ */ + int (*handler)(struct nvbios *, uint16_t, struct init_exec *); }; --static int nouveau_dsm(acpi_handle handle, int func, int arg, int *result) -+static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result) - { - struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; - struct acpi_object_list input; -@@ -259,3 +261,37 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) +@@ -311,11 +317,11 @@ valid_reg(struct nvbios *bios, uint32_t reg) + + /* C51 has misaligned regs on purpose. Marvellous */ + if (reg & 0x2 || +- (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) ++ (reg & 0x1 && dev_priv->vbios.chip_version != 0x51)) + NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg); + + /* warn on C51 regs that haven't been verified accessible in tracing */ +- if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && ++ if (reg & 0x1 && dev_priv->vbios.chip_version == 0x51 && + reg != 0x130d && reg != 0x1311 && reg != 0x60081d) + NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", + reg); +@@ -420,7 +426,7 @@ bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data) + LOG_OLD_VALUE(bios_rd32(bios, reg)); + BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data); + +- if (dev_priv->VBIOS.execute) { ++ if (dev_priv->vbios.execute) { + still_alive(); + nv_wr32(bios->dev, reg, data); + } +@@ -647,7 +653,7 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) + reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); + reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; + +- if (dev_priv->VBIOS.execute) { ++ if (dev_priv->vbios.execute) { + still_alive(); + nv_wr32(dev, reg + 4, reg1); + nv_wr32(dev, reg + 0, reg0); +@@ -689,7 +695,7 @@ setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk) + static int dcb_entry_idx_from_crtchead(struct drm_device *dev) { - return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + + /* + * For the results of this function to be correct, CR44 must have been +@@ -700,7 +706,7 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev) + + uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0); + +- if (dcb_entry > bios->bdcb.dcb.entries) { ++ if (dcb_entry > bios->dcb.entries) { + NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently " + "(%02X)\n", dcb_entry); + dcb_entry = 0x7f; /* unused / invalid marker */ +@@ -709,29 +715,121 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev) + return dcb_entry; } -+ -+int -+nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) + ++static int ++read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c) +{ -+ struct nouveau_connector *nv_connector = nouveau_connector(connector); -+ struct acpi_device *acpidev; -+ acpi_handle handle; -+ int type, ret; -+ void *edid; -+ -+ switch (connector->connector_type) { -+ case DRM_MODE_CONNECTOR_LVDS: -+ case DRM_MODE_CONNECTOR_eDP: -+ type = ACPI_VIDEO_DISPLAY_LCD; -+ break; -+ default: ++ uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4; ++ int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES; ++ int recordoffset = 0, rdofs = 1, wrofs = 0; ++ uint8_t port_type = 0; ++ ++ if (!i2ctable) + return -EINVAL; ++ ++ if (dcb_version >= 0x30) { ++ if (i2ctable[0] != dcb_version) /* necessary? */ ++ NV_WARN(dev, ++ "DCB I2C table version mismatch (%02X vs %02X)\n", ++ i2ctable[0], dcb_version); ++ dcb_i2c_ver = i2ctable[0]; ++ headerlen = i2ctable[1]; ++ if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES) ++ i2c_entries = i2ctable[2]; ++ else ++ NV_WARN(dev, ++ "DCB I2C table has more entries than indexable " ++ "(%d entries, max %d)\n", i2ctable[2], ++ DCB_MAX_NUM_I2C_ENTRIES); ++ entry_len = i2ctable[3]; ++ /* [4] is i2c_default_indices, read in parse_dcb_table() */ ++ } ++ /* ++ * It's your own fault if you call this function on a DCB 1.1 BIOS -- ++ * the test below is for DCB 1.2 ++ */ ++ if (dcb_version < 0x14) { ++ recordoffset = 2; ++ rdofs = 0; ++ wrofs = 1; + } + -+ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); -+ if (!handle) -+ return -ENODEV; ++ if (index == 0xf) ++ return 0; ++ if (index >= i2c_entries) { ++ NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n", ++ index, i2ctable[2]); ++ return -ENOENT; ++ } ++ if (i2ctable[headerlen + entry_len * index + 3] == 0xff) { ++ NV_ERROR(dev, "DCB I2C entry invalid\n"); ++ return -EINVAL; ++ } + -+ ret = acpi_bus_get_device(handle, &acpidev); -+ if (ret) -+ return -ENODEV; ++ if (dcb_i2c_ver >= 0x30) { ++ port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index]; + -+ ret = acpi_video_get_edid(acpidev, type, -1, &edid); -+ if (ret < 0) -+ return ret; ++ /* ++ * Fixup for chips using same address offset for read and ++ * write. ++ */ ++ if (port_type == 4) /* seen on C51 */ ++ rdofs = wrofs = 1; ++ if (port_type >= 5) /* G80+ */ ++ rdofs = wrofs = 0; ++ } ++ ++ if (dcb_i2c_ver >= 0x40) { ++ if (port_type != 5 && port_type != 6) ++ NV_WARN(dev, "DCB I2C table has port type %d\n", port_type); ++ ++ i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]); ++ } ++ ++ i2c->port_type = port_type; ++ i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index]; ++ i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index]; + -+ nv_connector->edid = edid; + return 0; +} -diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c -index fc924b6..0eb1b5a 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_bios.c -+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c -@@ -203,36 +203,26 @@ struct methods { - const bool rw; - }; - --static struct methods nv04_methods[] = { -- { "PROM", load_vbios_prom, false }, -- { "PRAMIN", load_vbios_pramin, true }, -- { "PCIROM", load_vbios_pci, true }, --}; -- --static struct methods nv50_methods[] = { -- { "ACPI", load_vbios_acpi, true }, -+static struct methods shadow_methods[] = { - { "PRAMIN", load_vbios_pramin, true }, - { "PROM", load_vbios_prom, false }, - { "PCIROM", load_vbios_pci, true }, -+ { "ACPI", load_vbios_acpi, true }, - }; - --#define METHODCNT 3 -- - static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) ++ + static struct nouveau_i2c_chan * + init_i2c_device_find(struct drm_device *dev, int i2c_index) { -- struct drm_nouveau_private *dev_priv = dev->dev_private; -- struct methods *methods; -- int i; -+ const int nr_methods = ARRAY_SIZE(shadow_methods); -+ struct methods *methods = shadow_methods; - int testscore = 3; -- int scores[METHODCNT]; -+ int scores[nr_methods], i; - - if (nouveau_vbios) { -- methods = nv04_methods; -- for (i = 0; i < METHODCNT; i++) -+ for (i = 0; i < nr_methods; i++) - if (!strcasecmp(nouveau_vbios, methods[i].desc)) - break; + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb; ++ struct dcb_table *dcb = &dev_priv->vbios.dcb; -- if (i < METHODCNT) { -+ if (i < nr_methods) { - NV_INFO(dev, "Attempting to use BIOS image from %s\n", - methods[i].desc); + if (i2c_index == 0xff) { + /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */ + int idx = dcb_entry_idx_from_crtchead(dev), shift = 0; +- int default_indices = bdcb->i2c_default_indices; ++ int default_indices = dcb->i2c_default_indices; -@@ -244,12 +234,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) - NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); - } +- if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default) ++ if (idx != 0x7f && dcb->entry[idx].i2c_upper_default) + shift = 4; -- if (dev_priv->card_type < NV_50) -- methods = nv04_methods; -- else -- methods = nv50_methods; -- -- for (i = 0; i < METHODCNT; i++) { -+ for (i = 0; i < nr_methods; i++) { - NV_TRACE(dev, "Attempting to load BIOS image from %s\n", - methods[i].desc); - data[0] = data[1] = 0; /* avoid reuse of previous image */ -@@ -260,7 +245,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) + i2c_index = (default_indices >> shift) & 0xf; } + if (i2c_index == 0x80) /* g80+ */ +- i2c_index = bdcb->i2c_default_indices & 0xf; ++ i2c_index = dcb->i2c_default_indices & 0xf; ++ else ++ if (i2c_index == 0x81) ++ i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4; ++ ++ if (i2c_index > DCB_MAX_NUM_I2C_ENTRIES) { ++ NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index); ++ return NULL; ++ } ++ ++ /* Make sure i2c table entry has been parsed, it may not ++ * have been if this is a bus not referenced by a DCB encoder ++ */ ++ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table, ++ i2c_index, &dcb->i2c[i2c_index]); + + return nouveau_i2c_find(dev, i2c_index); + } + +-static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv) ++static uint32_t ++get_tmds_index_reg(struct drm_device *dev, uint8_t mlv) + { + /* + * For mlv < 0x80, it is an index into a table of TMDS base addresses. +@@ -744,6 +842,7 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv) + */ - while (--testscore > 0) { -- for (i = 0; i < METHODCNT; i++) { -+ for (i = 0; i < nr_methods; i++) { - if (scores[i] == testscore) { - NV_TRACE(dev, "Using BIOS image from %s\n", - methods[i].desc); -@@ -935,7 +920,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset, + struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nvbios *bios = &dev_priv->vbios; + const int pramdac_offset[13] = { + 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 }; + const uint32_t pramdac_table[4] = { +@@ -756,13 +855,12 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv) + dcb_entry = dcb_entry_idx_from_crtchead(dev); + if (dcb_entry == 0x7f) + return 0; +- dacoffset = pramdac_offset[ +- dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or]; ++ dacoffset = pramdac_offset[bios->dcb.entry[dcb_entry].or]; + if (mlv == 0x81) + dacoffset ^= 8; + return 0x6808b0 + dacoffset; + } else { +- if (mlv > ARRAY_SIZE(pramdac_table)) { ++ if (mlv >= ARRAY_SIZE(pramdac_table)) { + NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n", + mlv); + return 0; +@@ -817,7 +915,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset, NV_ERROR(bios->dev, "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", offset, config, count); -- return -EINVAL; -+ return len; +- return 0; ++ return -EINVAL; } configval = ROM32(bios->data[offset + 11 + config * 4]); -@@ -1037,7 +1022,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset, +@@ -919,7 +1017,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset, NV_ERROR(bios->dev, "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", offset, config, count); -- return -EINVAL; -+ return len; +- return 0; ++ return -EINVAL; } freq = ROM16(bios->data[offset + 12 + config * 2]); -@@ -1209,7 +1194,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - dpe = nouveau_bios_dp_table(dev, dcb, &dummy); - if (!dpe) { - NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset); -- return -EINVAL; -+ return 3; - } - - switch (cond) { -@@ -1233,12 +1218,16 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - int ret; - - auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index); -- if (!auxch) -- return -ENODEV; -+ if (!auxch) { -+ NV_ERROR(dev, "0x%04X: couldn't get auxch\n", offset); -+ return 3; -+ } - - ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1); -- if (ret) -- return ret; -+ if (ret) { -+ NV_ERROR(dev, "0x%04X: auxch rd fail: %d\n", offset, ret); -+ return 3; -+ } +@@ -1066,6 +1164,126 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset, + } - if (cond & 1) - iexec->execute = false; -@@ -1407,7 +1396,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset, + static int ++init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ++{ ++ /* ++ * INIT_DP_CONDITION opcode: 0x3A ('') ++ * ++ * offset (8 bit): opcode ++ * offset + 1 (8 bit): "sub" opcode ++ * offset + 2 (8 bit): unknown ++ * ++ */ ++ ++ struct bit_displayport_encoder_table *dpe = NULL; ++ struct dcb_entry *dcb = bios->display.output; ++ struct drm_device *dev = bios->dev; ++ uint8_t cond = bios->data[offset + 1]; ++ int dummy; ++ ++ BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond); ++ ++ if (!iexec->execute) ++ return 3; ++ ++ dpe = nouveau_bios_dp_table(dev, dcb, &dummy); ++ if (!dpe) { ++ NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset); ++ return -EINVAL; ++ } ++ ++ switch (cond) { ++ case 0: ++ { ++ struct dcb_connector_table_entry *ent = ++ &bios->dcb.connector.entry[dcb->connector]; ++ ++ if (ent->type != DCB_CONNECTOR_eDP) ++ iexec->execute = false; ++ } ++ break; ++ case 1: ++ case 2: ++ if (!(dpe->unknown & cond)) ++ iexec->execute = false; ++ break; ++ case 5: ++ { ++ struct nouveau_i2c_chan *auxch; ++ int ret; ++ ++ auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index); ++ if (!auxch) ++ return -ENODEV; ++ ++ ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1); ++ if (ret) ++ return ret; ++ ++ if (cond & 1) ++ iexec->execute = false; ++ } ++ break; ++ default: ++ NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond); ++ break; ++ } ++ ++ if (iexec->execute) ++ BIOSLOG(bios, "0x%04X: continuing to execute\n", offset); ++ else ++ BIOSLOG(bios, "0x%04X: skipping following commands\n", offset); ++ ++ return 3; ++} ++ ++static int ++init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ++{ ++ /* ++ * INIT_3B opcode: 0x3B ('') ++ * ++ * offset (8 bit): opcode ++ * offset + 1 (8 bit): crtc index ++ * ++ */ ++ ++ uint8_t or = ffs(bios->display.output->or) - 1; ++ uint8_t index = bios->data[offset + 1]; ++ uint8_t data; ++ ++ if (!iexec->execute) ++ return 2; ++ ++ data = bios_idxprt_rd(bios, 0x3d4, index); ++ bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or)); ++ return 2; ++} ++ ++static int ++init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ++{ ++ /* ++ * INIT_3C opcode: 0x3C ('') ++ * ++ * offset (8 bit): opcode ++ * offset + 1 (8 bit): crtc index ++ * ++ */ ++ ++ uint8_t or = ffs(bios->display.output->or) - 1; ++ uint8_t index = bios->data[offset + 1]; ++ uint8_t data; ++ ++ if (!iexec->execute) ++ return 2; ++ ++ data = bios_idxprt_rd(bios, 0x3d4, index); ++ bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or)); ++ return 2; ++} ++ ++static int + init_idx_addr_latched(struct nvbios *bios, uint16_t offset, + struct init_exec *iexec) + { +@@ -1169,7 +1387,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset, NV_ERROR(bios->dev, "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", offset, config, count); -- return -EINVAL; -+ return len; +- return 0; ++ return -EINVAL; } freq = ROM32(bios->data[offset + 11 + config * 4]); -@@ -1467,6 +1456,7 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - * "mask n" and OR it with "data n" before writing it back to the device +@@ -1230,12 +1448,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) */ -+ struct drm_device *dev = bios->dev; uint8_t i2c_index = bios->data[offset + 1]; - uint8_t i2c_address = bios->data[offset + 2] >> 1; +- uint8_t i2c_address = bios->data[offset + 2]; ++ uint8_t i2c_address = bios->data[offset + 2] >> 1; uint8_t count = bios->data[offset + 3]; -@@ -1481,9 +1471,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - "Count: 0x%02X\n", - offset, i2c_index, i2c_address, count); +- int len = 4 + count * 3; + struct nouveau_i2c_chan *chan; +- struct i2c_msg msg; +- int i; ++ int len = 4 + count * 3; ++ int ret, i; -- chan = init_i2c_device_find(bios->dev, i2c_index); -- if (!chan) -- return -ENODEV; -+ chan = init_i2c_device_find(dev, i2c_index); -+ if (!chan) { -+ NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset); -+ return len; -+ } + if (!iexec->execute) + return len; +@@ -1246,35 +1463,34 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + + chan = init_i2c_device_find(bios->dev, i2c_index); + if (!chan) +- return 0; ++ return -ENODEV; for (i = 0; i < count; i++) { - uint8_t reg = bios->data[offset + 4 + i * 3]; -@@ -1494,8 +1486,10 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, - I2C_SMBUS_READ, reg, - I2C_SMBUS_BYTE_DATA, &val); -- if (ret < 0) -- return ret; -+ if (ret < 0) { -+ NV_ERROR(dev, "0x%04X: i2c rd fail: %d\n", offset, ret); -+ return len; -+ } +- uint8_t i2c_reg = bios->data[offset + 4 + i * 3]; ++ uint8_t reg = bios->data[offset + 4 + i * 3]; + uint8_t mask = bios->data[offset + 5 + i * 3]; + uint8_t data = bios->data[offset + 6 + i * 3]; +- uint8_t value; ++ union i2c_smbus_data val; + +- msg.addr = i2c_address; +- msg.flags = I2C_M_RD; +- msg.len = 1; +- msg.buf = &value; +- if (i2c_transfer(&chan->adapter, &msg, 1) != 1) +- return 0; ++ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, ++ I2C_SMBUS_READ, reg, ++ I2C_SMBUS_BYTE_DATA, &val); ++ if (ret < 0) ++ return ret; BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, " "Mask: 0x%02X, Data: 0x%02X\n", -@@ -1509,8 +1503,10 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, - I2C_SMBUS_WRITE, reg, - I2C_SMBUS_BYTE_DATA, &val); -- if (ret < 0) -- return ret; -+ if (ret < 0) { -+ NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret); -+ return len; -+ } +- offset, i2c_reg, value, mask, data); ++ offset, reg, val.byte, mask, data); + +- value = (value & mask) | data; ++ if (!bios->execute) ++ continue; + +- if (bios->execute) { +- msg.addr = i2c_address; +- msg.flags = 0; +- msg.len = 1; +- msg.buf = &value; +- if (i2c_transfer(&chan->adapter, &msg, 1) != 1) +- return 0; +- } ++ val.byte &= mask; ++ val.byte |= data; ++ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, ++ I2C_SMBUS_WRITE, reg, ++ I2C_SMBUS_BYTE_DATA, &val); ++ if (ret < 0) ++ return ret; } return len; -@@ -1535,6 +1531,7 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - * "DCB I2C table entry index", set the register to "data n" +@@ -1300,12 +1516,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) */ -+ struct drm_device *dev = bios->dev; uint8_t i2c_index = bios->data[offset + 1]; - uint8_t i2c_address = bios->data[offset + 2] >> 1; +- uint8_t i2c_address = bios->data[offset + 2]; ++ uint8_t i2c_address = bios->data[offset + 2] >> 1; uint8_t count = bios->data[offset + 3]; -@@ -1549,9 +1546,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - "Count: 0x%02X\n", - offset, i2c_index, i2c_address, count); +- int len = 4 + count * 2; + struct nouveau_i2c_chan *chan; +- struct i2c_msg msg; +- int i; ++ int len = 4 + count * 2; ++ int ret, i; -- chan = init_i2c_device_find(bios->dev, i2c_index); -- if (!chan) -- return -ENODEV; -+ chan = init_i2c_device_find(dev, i2c_index); -+ if (!chan) { -+ NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset); -+ return len; -+ } + if (!iexec->execute) + return len; +@@ -1316,23 +1531,25 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + + chan = init_i2c_device_find(bios->dev, i2c_index); + if (!chan) +- return 0; ++ return -ENODEV; for (i = 0; i < count; i++) { - uint8_t reg = bios->data[offset + 4 + i * 2]; -@@ -1568,8 +1567,10 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, - I2C_SMBUS_WRITE, reg, - I2C_SMBUS_BYTE_DATA, &val); -- if (ret < 0) -- return ret; -+ if (ret < 0) { -+ NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret); -+ return len; -+ } +- uint8_t i2c_reg = bios->data[offset + 4 + i * 2]; +- uint8_t data = bios->data[offset + 5 + i * 2]; ++ uint8_t reg = bios->data[offset + 4 + i * 2]; ++ union i2c_smbus_data val; ++ ++ val.byte = bios->data[offset + 5 + i * 2]; + + BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n", +- offset, i2c_reg, data); +- +- if (bios->execute) { +- msg.addr = i2c_address; +- msg.flags = 0; +- msg.len = 1; +- msg.buf = &data; +- if (i2c_transfer(&chan->adapter, &msg, 1) != 1) +- return 0; +- } ++ offset, reg, val.byte); ++ ++ if (!bios->execute) ++ continue; ++ ++ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, ++ I2C_SMBUS_WRITE, reg, ++ I2C_SMBUS_BYTE_DATA, &val); ++ if (ret < 0) ++ return ret; } return len; -@@ -1592,6 +1593,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - * address" on the I2C bus given by "DCB I2C table entry index" +@@ -1356,7 +1573,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) */ -+ struct drm_device *dev = bios->dev; uint8_t i2c_index = bios->data[offset + 1]; - uint8_t i2c_address = bios->data[offset + 2] >> 1; +- uint8_t i2c_address = bios->data[offset + 2]; ++ uint8_t i2c_address = bios->data[offset + 2] >> 1; uint8_t count = bios->data[offset + 3]; -@@ -1599,7 +1601,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + int len = 4 + count; struct nouveau_i2c_chan *chan; - struct i2c_msg msg; - uint8_t data[256]; -- int i; -+ int ret, i; - - if (!iexec->execute) - return len; -@@ -1608,9 +1610,11 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - "Count: 0x%02X\n", - offset, i2c_index, i2c_address, count); +@@ -1373,7 +1590,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) -- chan = init_i2c_device_find(bios->dev, i2c_index); -- if (!chan) -- return -ENODEV; -+ chan = init_i2c_device_find(dev, i2c_index); -+ if (!chan) { -+ NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset); -+ return len; -+ } + chan = init_i2c_device_find(bios->dev, i2c_index); + if (!chan) +- return 0; ++ return -ENODEV; for (i = 0; i < count; i++) { data[i] = bios->data[offset + 4 + i]; -@@ -1623,8 +1627,11 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - msg.flags = 0; +@@ -1387,7 +1604,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) msg.len = count; msg.buf = data; -- if (i2c_transfer(&chan->adapter, &msg, 1) != 1) -- return -EIO; -+ ret = i2c_transfer(&chan->adapter, &msg, 1); -+ if (ret != 1) { -+ NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret); -+ return len; -+ } + if (i2c_transfer(&chan->adapter, &msg, 1) != 1) +- return 0; ++ return -EIO; } return len; -@@ -1648,6 +1655,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - * used -- see get_tmds_index_reg() - */ - -+ struct drm_device *dev = bios->dev; - uint8_t mlv = bios->data[offset + 1]; - uint32_t tmdsaddr = bios->data[offset + 2]; - uint8_t mask = bios->data[offset + 3]; -@@ -1662,8 +1670,10 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - offset, mlv, tmdsaddr, mask, data); +@@ -1426,7 +1643,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) reg = get_tmds_index_reg(bios->dev, mlv); -- if (!reg) -- return -EINVAL; -+ if (!reg) { -+ NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset); -+ return 5; -+ } + if (!reg) +- return 0; ++ return -EINVAL; bios_wr32(bios, reg, tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE); -@@ -1693,6 +1703,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset, - * register is used -- see get_tmds_index_reg() - */ - -+ struct drm_device *dev = bios->dev; - uint8_t mlv = bios->data[offset + 1]; - uint8_t count = bios->data[offset + 2]; - int len = 3 + count * 2; -@@ -1706,8 +1717,10 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset, - offset, mlv, count); +@@ -1470,7 +1687,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset, reg = get_tmds_index_reg(bios->dev, mlv); -- if (!reg) -- return -EINVAL; -+ if (!reg) { -+ NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset); -+ return len; -+ } + if (!reg) +- return 0; ++ return -EINVAL; for (i = 0; i < count; i++) { uint8_t tmdsaddr = bios->data[offset + 3 + i * 2]; -@@ -2146,7 +2159,8 @@ init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +@@ -1909,7 +2126,8 @@ init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) /* no iexec->execute check by design */ pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19); @@ -765,5087 +1264,7846 @@ index fc924b6..0eb1b5a 100644 bios_wr32(bios, reg, value1); udelay(10); -@@ -2182,7 +2196,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset, +@@ -1945,7 +2163,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset, uint32_t reg, data; if (bios->major_version > 2) -- return -ENODEV; -+ return 0; +- return 0; ++ return -ENODEV; bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd( bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20); -@@ -2237,7 +2251,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset, +@@ -2000,7 +2218,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset, int clock; if (bios->major_version > 2) -- return -ENODEV; -+ return 0; +- return 0; ++ return -ENODEV; clock = ROM16(bios->data[meminitoffs + 4]) * 10; setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock); -@@ -2270,7 +2284,7 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset, +@@ -2033,7 +2251,7 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset, uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6)); if (bios->major_version > 2) -- return -ENODEV; -+ return 0; +- return 0; ++ return -ENODEV; bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX, cr3c); -@@ -2815,7 +2829,7 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) - - if (dev_priv->card_type != NV_50) { - NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n"); -- return -ENODEV; -+ return 1; - } +@@ -2572,48 +2790,37 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) + * each GPIO according to various values listed in each entry + */ - if (!iexec->execute) -@@ -2887,10 +2901,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, - uint8_t index; +- const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; ++ struct drm_nouveau_private *dev_priv = bios->dev->dev_private; + const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; +- const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr]; +- const uint8_t *gpio_entry; int i; -- - if (!iexec->execute) -- return len; +- return 1; +- +- if (bios->bdcb.version != 0x40) { +- NV_ERROR(bios->dev, "DCB table not version 4.0\n"); +- return 0; +- } - -+ /* critical! to know the length of the opcode */; - if (!blocklen) { +- if (!bios->bdcb.gpio_table_ptr) { +- NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n"); +- return 0; ++ if (dev_priv->card_type != NV_50) { ++ NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n"); ++ return -ENODEV; + } + +- gpio_entry = gpio_table + gpio_table[1]; +- for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) { +- uint32_t entry = ROM32(gpio_entry[0]), r, s, v; +- int line = (entry & 0x0000001f); ++ if (!iexec->execute) ++ return 1; + +- BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry); ++ for (i = 0; i < bios->dcb.gpio.entries; i++) { ++ struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i]; ++ uint32_t r, s, v; + +- if ((entry & 0x0000ff00) == 0x0000ff00) +- continue; ++ BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); + +- r = nv50_gpio_reg[line >> 3]; +- s = (line & 0x07) << 2; +- v = bios_rd32(bios, r) & ~(0x00000003 << s); +- if (entry & 0x01000000) +- v |= (((entry & 0x60000000) >> 29) ^ 2) << s; +- else +- v |= (((entry & 0x18000000) >> 27) ^ 2) << s; +- bios_wr32(bios, r, v); ++ BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n", ++ offset, gpio->tag, gpio->state_default); ++ if (bios->execute) ++ nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); + +- r = nv50_gpio_ctl[line >> 4]; +- s = (line & 0x0f); ++ /* The NVIDIA binary driver doesn't appear to actually do ++ * any of this, my VBIOS does however. ++ */ ++ /* Not a clue, needs de-magicing */ ++ r = nv50_gpio_ctl[gpio->line >> 4]; ++ s = (gpio->line & 0x0f); + v = bios_rd32(bios, r) & ~(0x00010001 << s); +- switch ((entry & 0x06000000) >> 25) { ++ switch ((gpio->entry & 0x06000000) >> 25) { + case 1: + v |= (0x00000001 << s); + break; +@@ -2669,7 +2876,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, NV_ERROR(bios->dev, "0x%04X: Zero block length - has the M table " -@@ -2898,6 +2909,9 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, - return -EINVAL; + "been parsed?\n", offset); +- return 0; ++ return -EINVAL; } -+ if (!iexec->execute) -+ return len; -+ strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf; - index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg]; - -@@ -3079,14 +3093,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +@@ -2853,14 +3060,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) if (!bios->display.output) { NV_ERROR(dev, "INIT_AUXCH: no active output\n"); -- return -EINVAL; -+ return len; +- return 0; ++ return -EINVAL; } auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); if (!auxch) { NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n", bios->display.output->i2c_index); -- return -ENODEV; -+ return len; +- return 0; ++ return -ENODEV; } if (!iexec->execute) -@@ -3099,7 +3113,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +@@ -2873,7 +3080,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1); if (ret) { NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret); -- return ret; -+ return len; +- return 0; ++ return ret; } data &= bios->data[offset + 0]; -@@ -3108,7 +3122,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +@@ -2882,7 +3089,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1); if (ret) { NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret); -- return ret; -+ return len; +- return 0; ++ return ret; } } -@@ -3138,14 +3152,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +@@ -2912,14 +3119,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) if (!bios->display.output) { NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n"); -- return -EINVAL; -+ return len; +- return 0; ++ return -EINVAL; } auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); if (!auxch) { NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n", bios->display.output->i2c_index); -- return -ENODEV; -+ return len; +- return 0; ++ return -ENODEV; } if (!iexec->execute) -@@ -3156,7 +3170,7 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +@@ -2930,7 +3137,7 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1); if (ret) { NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret); -- return ret; -+ return len; +- return 0; ++ return ret; } } -@@ -5166,10 +5180,14 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi - bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset]; - bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1]; - bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2]; -- bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4]; -- bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5]; -- bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6]; -- bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7]; -+ if (bios->data[legacy_i2c_offset + 4]) -+ bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4]; -+ if (bios->data[legacy_i2c_offset + 5]) -+ bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5]; -+ if (bios->data[legacy_i2c_offset + 6]) -+ bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6]; -+ if (bios->data[legacy_i2c_offset + 7]) -+ bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7]; +@@ -2947,6 +3154,9 @@ static struct init_tbl_entry itbl_entry[] = { + { "INIT_COPY" , 0x37, init_copy }, + { "INIT_NOT" , 0x38, init_not }, + { "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition }, ++ { "INIT_DP_CONDITION" , 0x3A, init_dp_condition }, ++ { "INIT_OP_3B" , 0x3B, init_op_3b }, ++ { "INIT_OP_3C" , 0x3C, init_op_3c }, + { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched }, + { "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 }, + { "INIT_PLL2" , 0x4B, init_pll2 }, +@@ -3014,7 +3224,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset, + * is changed back to EXECUTE. + */ - if (bmplength > 74) { - bios->fmaxvco = ROM32(bmp[67]); -@@ -5604,9 +5622,12 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, - if (conf & 0x4 || conf & 0x8) - entry->lvdsconf.use_power_scripts = true; +- int count = 0, i, res; ++ int count = 0, i, ret; + uint8_t id; + + /* +@@ -3029,26 +3239,33 @@ parse_init_table(struct nvbios *bios, unsigned int offset, + for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++) + ; + +- if (itbl_entry[i].name) { +- BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", +- offset, itbl_entry[i].id, itbl_entry[i].name); +- +- /* execute eventual command handler */ +- res = (*itbl_entry[i].handler)(bios, offset, iexec); +- if (!res) +- break; +- /* +- * Add the offset of the current command including all data +- * of that command. The offset will then be pointing on the +- * next op code. +- */ +- offset += res; +- } else { ++ if (!itbl_entry[i].name) { + NV_ERROR(bios->dev, + "0x%04X: Init table command not found: " + "0x%02X\n", offset, id); + return -ENOENT; + } ++ ++ BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset, ++ itbl_entry[i].id, itbl_entry[i].name); ++ ++ /* execute eventual command handler */ ++ ret = (*itbl_entry[i].handler)(bios, offset, iexec); ++ if (ret < 0) { ++ NV_ERROR(bios->dev, "0x%04X: Failed parsing init " ++ "table opcode: %s %d\n", offset, ++ itbl_entry[i].name, ret); ++ } ++ ++ if (ret <= 0) ++ break; ++ ++ /* ++ * Add the offset of the current command including all data ++ * of that command. The offset will then be pointing on the ++ * next op code. ++ */ ++ offset += ret; + } + + if (offset >= bios->length) +@@ -3123,7 +3340,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr, + struct dcb_entry *dcbent, int head, bool dl) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + struct init_exec iexec = {true, false}; + + NV_TRACE(dev, "0x%04X: Parsing digital output script table\n", +@@ -3140,7 +3357,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr, + static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0); + uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]); + +@@ -3194,10 +3411,9 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int + * of a list of pxclks and script pointers. + */ + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + unsigned int outputset = (dcbent->or == 4) ? 1 : 0; + uint16_t scriptptr = 0, clktable; +- uint8_t clktableptr = 0; + + /* + * For now we assume version 3.0 table - g80 support will need some +@@ -3216,26 +3432,29 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int + scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]); + break; + case LVDS_RESET: ++ clktable = bios->fp.lvdsmanufacturerpointer + 15; ++ if (dcbent->or == 4) ++ clktable += 8; ++ + if (dcbent->lvdsconf.use_straps_for_mode) { + if (bios->fp.dual_link) +- clktableptr += 2; +- if (bios->fp.BITbit1) +- clktableptr++; ++ clktable += 4; ++ if (bios->fp.if_is_24bit) ++ clktable += 2; } else { -- mask = ~0x5; -+ mask = ~0x7; -+ if (conf & 0x2) -+ entry->lvdsconf.use_acpi_for_edid = true; - if (conf & 0x4) - entry->lvdsconf.use_power_scripts = true; -+ entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4; + /* using EDID */ +- uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; +- int fallbackcmpval = (dcbent->or == 4) ? 4 : 1; ++ int cmpval_24bit = (dcbent->or == 4) ? 4 : 1; + + if (bios->fp.dual_link) { +- clktableptr += 2; +- fallbackcmpval *= 2; ++ clktable += 4; ++ cmpval_24bit <<= 1; + } +- if (fallbackcmpval & fallback) +- clktableptr++; ++ ++ if (bios->fp.strapless_is_24bit & cmpval_24bit) ++ clktable += 2; } - if (conf & mask) { - /* -@@ -5721,13 +5742,6 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, - case OUTPUT_TV: - entry->tvconf.has_component_output = false; + +- /* adding outputset * 8 may not be correct */ +- clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]); ++ clktable = ROM16(bios->data[clktable]); + if (!clktable) { + NV_ERROR(dev, "Pixel clock comparison table not found\n"); + return -ENOENT; +@@ -3261,7 +3480,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, + */ + + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; + uint32_t sel_clk_binding, sel_clk; + int ret; +@@ -3395,7 +3614,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) + #ifndef __powerpc__ + NV_ERROR(dev, "Pointer to flat panel table invalid\n"); + #endif +- bios->pub.digital_min_front_porch = 0x4b; ++ bios->digital_min_front_porch = 0x4b; + return 0; + } + +@@ -3428,7 +3647,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) + * fptable[4] is the minimum + * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap + */ +- bios->pub.digital_min_front_porch = fptable[4]; ++ bios->digital_min_front_porch = fptable[4]; + ofs = -7; + break; + default: +@@ -3467,7 +3686,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) + + /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */ + if (lth.lvds_ver > 0x10) +- bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf; ++ bios->fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf; + + /* + * If either the strap or xlated fpindex value are 0xf there is no +@@ -3491,7 +3710,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) + bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr]; + + if (!mode) /* just checking whether we can produce a mode */ +@@ -3562,11 +3781,11 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b + * until later, when this function should be called with non-zero pxclk + */ + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0; + struct lvdstableheader lth; + uint16_t lvdsofs; +- int ret, chip_version = bios->pub.chip_version; ++ int ret, chip_version = bios->chip_version; + + ret = parse_lvds_manufacturer_table_header(dev, bios, <h); + if (ret) +@@ -3637,37 +3856,40 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b + *if_is_24bit = bios->data[lvdsofs] & 16; break; -- case OUTPUT_TMDS: + case 0x30: - /* -- * Invent a DVI-A output, by copying the fields of the DVI-D -- * output; reported to work by math_b on an NV20(!). +- * My money would be on there being a 24 bit interface bit in +- * this table, but I have no example of a laptop bios with a +- * 24 bit panel to confirm that. Hence we shout loudly if any +- * bit other than bit 0 is set (I've not even seen bit 1) - */ -- fabricate_vga_output(dcb, entry->i2c_index, entry->heads); +- if (bios->data[lvdsofs] > 1) +- NV_ERROR(dev, +- "You have a very unusual laptop display; please report it\n"); ++ case 0x40: + /* + * No sign of the "power off for reset" or "reset for panel + * on" bits, but it's safer to assume we should + */ + bios->fp.power_off_for_reset = true; + bios->fp.reset_after_pclk_change = true; ++ + /* + * It's ok lvdsofs is wrong for nv4x edid case; dual_link is +- * over-written, and BITbit1 isn't used ++ * over-written, and if_is_24bit isn't used + */ + bios->fp.dual_link = bios->data[lvdsofs] & 1; +- bios->fp.BITbit1 = bios->data[lvdsofs] & 2; +- bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; - break; - case OUTPUT_LVDS: - if ((conn & 0x00003f00) != 0x10) - entry->lvdsconf.use_straps_for_mode = true; -@@ -5808,6 +5822,31 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb) - dcb->entries = newentries; - } +- case 0x40: +- bios->fp.dual_link = bios->data[lvdsofs] & 1; + bios->fp.if_is_24bit = bios->data[lvdsofs] & 2; + bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; + bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; + break; + } -+static bool -+apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) -+{ -+ /* Dell Precision M6300 -+ * DCB entry 2: 02025312 00000010 -+ * DCB entry 3: 02026312 00000020 ++ /* Dell Latitude D620 reports a too-high value for the dual-link ++ * transition freq, causing us to program the panel incorrectly. + * -+ * Identical, except apparently a different connector on a -+ * different SOR link. Not a clue how we're supposed to know -+ * which one is in use if it even shares an i2c line... ++ * It doesn't appear the VBIOS actually uses its transition freq ++ * (90000kHz), instead it uses the "Number of LVDS channels" field ++ * out of the panel ID structure (http://www.spwg.org/). + * -+ * Ignore the connector on the second SOR link to prevent -+ * nasty problems until this is sorted (assuming it's not a -+ * VBIOS bug). ++ * For the moment, a quirk will do :) + */ -+ if ((dev->pdev->device == 0x040d) && ++ if ((dev->pdev->device == 0x01d7) && + (dev->pdev->subsystem_vendor == 0x1028) && -+ (dev->pdev->subsystem_device == 0x019b)) { -+ if (*conn == 0x02026312 && *conf == 0x00000020) -+ return false; ++ (dev->pdev->subsystem_device == 0x01c2)) { ++ bios->fp.duallink_transition_clk = 80000; + } + -+ return true; -+} -+ - static int - parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) + /* set dual_link flag for EDID case */ + if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) + bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk); +@@ -3679,20 +3901,37 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b + + static uint8_t * + bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent, +- uint16_t record, int record_len, int record_nr) ++ uint16_t record, int record_len, int record_nr, ++ bool match_link) { -@@ -5941,6 +5980,9 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) - if ((connection & 0x0000000f) == 0x0000000f) + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + uint32_t entry; + uint16_t table; + int i, v; + ++ switch (dcbent->type) { ++ case OUTPUT_TMDS: ++ case OUTPUT_LVDS: ++ case OUTPUT_DP: ++ break; ++ default: ++ match_link = false; ++ break; ++ } ++ + for (i = 0; i < record_nr; i++, record += record_len) { + table = ROM16(bios->data[record]); + if (!table) continue; + entry = ROM32(bios->data[table]); -+ if (!apply_dcb_encoder_quirks(dev, i, &connection, &config)) -+ continue; ++ if (match_link) { ++ v = (entry & 0x00c00000) >> 22; ++ if (!(v & dcbent->sorconf.link)) ++ continue; ++ } + - NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n", - dcb->entries, connection, config); + v = (entry & 0x000f0000) >> 16; + if (!(v & dcbent->or)) + continue; +@@ -3716,7 +3955,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent, + int *length) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + uint8_t *table; -diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h -index adf4ec2..cc52aec 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_bios.h -+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h -@@ -81,6 +81,7 @@ struct dcb_connector_table_entry { - enum dcb_connector_type type; - uint8_t index2; - uint8_t gpio_tag; -+ void *drm; - }; + if (!bios->display.dp_table_ptr) { +@@ -3725,7 +3964,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent, + } + table = &bios->data[bios->display.dp_table_ptr]; + +- if (table[0] != 0x21) { ++ if (table[0] != 0x20 && table[0] != 0x21) { + NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n", + table[0]); + return NULL; +@@ -3734,7 +3973,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent, + *length = table[4]; + return bios_output_config_match(dev, dcbent, + bios->display.dp_table_ptr + table[1], +- table[2], table[3]); ++ table[2], table[3], table[0] >= 0x21); + } - struct dcb_connector_table { -@@ -117,6 +118,7 @@ struct dcb_entry { - struct { - struct sor_conf sor; - bool use_straps_for_mode; -+ bool use_acpi_for_edid; - bool use_power_scripts; - } lvdsconf; - struct { -diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c -index 6f3c195..d8c341d 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_bo.c -+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c -@@ -461,9 +461,9 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, - return ret; + int +@@ -3765,7 +4004,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, + */ - ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, -- evict, no_wait_reserve, no_wait_gpu, new_mem); -- if (nvbo->channel && nvbo->channel != chan) -- ret = nouveau_fence_wait(fence, NULL, false, false); -+ evict || (nvbo->channel && -+ nvbo->channel != chan), -+ no_wait_reserve, no_wait_gpu, new_mem); - nouveau_fence_unref((void *)&fence); - return ret; + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + uint8_t *table = &bios->data[bios->display.script_table_ptr]; + uint8_t *otable = NULL; + uint16_t script; +@@ -3823,7 +4062,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, + dcbent->type, dcbent->location, dcbent->or); + otable = bios_output_config_match(dev, dcbent, table[1] + + bios->display.script_table_ptr, +- table[2], table[3]); ++ table[2], table[3], table[0] >= 0x21); + if (!otable) { + NV_ERROR(dev, "Couldn't find matching output script table\n"); + return 1; +@@ -3918,8 +4157,8 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i + */ + + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; +- int cv = bios->pub.chip_version; ++ struct nvbios *bios = &dev_priv->vbios; ++ int cv = bios->chip_version; + uint16_t clktable = 0, scriptptr; + uint32_t sel_clk_binding, sel_clk; + +@@ -3978,8 +4217,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims + */ + + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; +- int cv = bios->pub.chip_version, pllindex = 0; ++ struct nvbios *bios = &dev_priv->vbios; ++ int cv = bios->chip_version, pllindex = 0; + uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0; + uint32_t crystal_strap_mask, crystal_straps; + +@@ -4293,31 +4532,32 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims + break; + } + +-#if 0 /* for easy debugging */ +- ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq); +- ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq); +- ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq); +- ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq); +- +- ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq); +- ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq); +- ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq); +- ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq); +- +- ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n); +- ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n); +- ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m); +- ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m); +- ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n); +- ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n); +- ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m); +- ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m); +- +- ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p); +- ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias); +- +- ErrorF("pll.refclk: %d\n", pll_lim->refclk); +-#endif ++ NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq); ++ NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq); ++ NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq); ++ NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq); ++ NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n); ++ NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n); ++ NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m); ++ NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m); ++ if (pll_lim->vco2.maxfreq) { ++ NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq); ++ NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq); ++ NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq); ++ NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq); ++ NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n); ++ NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n); ++ NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m); ++ NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m); ++ } ++ if (!pll_lim->max_p) { ++ NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p); ++ NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias); ++ } else { ++ NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p); ++ NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p); ++ } ++ NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk); + + return 0; } -@@ -711,8 +711,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, - return ret; +@@ -4332,7 +4572,7 @@ static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint + */ - /* Software copy if the card isn't up and running yet. */ -- if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || -- !dev_priv->channel) { -+ if (!dev_priv->channel) { - ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); - goto out; + bios->major_version = bios->data[offset + 3]; +- bios->pub.chip_version = bios->data[offset + 2]; ++ bios->chip_version = bios->data[offset + 2]; + NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n", + bios->data[offset + 3], bios->data[offset + 2], + bios->data[offset + 1], bios->data[offset]); +@@ -4402,7 +4642,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st } -diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c -index 1fc57ef..e952c3b 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_channel.c -+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c -@@ -257,9 +257,7 @@ nouveau_channel_free(struct nouveau_channel *chan) - nouveau_debugfs_channel_fini(chan); - - /* Give outstanding push buffers a chance to complete */ -- spin_lock_irqsave(&chan->fence.lock, flags); - nouveau_fence_update(chan); -- spin_unlock_irqrestore(&chan->fence.lock, flags); - if (chan->fence.sequence != chan->fence.sequence_ack) { - struct nouveau_fence *fence = NULL; -@@ -368,8 +366,6 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, - struct nouveau_channel *chan; - int ret; + /* First entry is normal dac, 2nd tv-out perhaps? */ +- bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff; ++ bios->dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff; -- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; -- - if (dev_priv->engine.graph.accel_blocked) - return -ENODEV; + return 0; + } +@@ -4526,8 +4766,8 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st + return -ENOSYS; + } -@@ -418,7 +414,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, - struct drm_nouveau_channel_free *cfree = data; - struct nouveau_channel *chan; +- bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]); +- bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]); ++ bios->dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]); ++ bios->tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]); -- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); + return 0; + } +@@ -4796,11 +5036,11 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi + uint16_t legacy_scripts_offset, legacy_i2c_offset; + + /* load needed defaults in case we can't parse this info */ +- bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX; +- bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX; +- bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX; +- bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX; +- bios->pub.digital_min_front_porch = 0x4b; ++ bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX; ++ bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX; ++ bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX; ++ bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX; ++ bios->digital_min_front_porch = 0x4b; + bios->fmaxvco = 256000; + bios->fminvco = 128000; + bios->fp.duallink_transition_clk = 90000; +@@ -4907,10 +5147,14 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi + bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset]; + bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1]; + bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2]; +- bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4]; +- bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5]; +- bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6]; +- bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7]; ++ if (bios->data[legacy_i2c_offset + 4]) ++ bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4]; ++ if (bios->data[legacy_i2c_offset + 5]) ++ bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5]; ++ if (bios->data[legacy_i2c_offset + 6]) ++ bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6]; ++ if (bios->data[legacy_i2c_offset + 7]) ++ bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7]; - nouveau_channel_free(chan); -diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c -index 9a61f3c..2914dd9 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_connector.c -+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c -@@ -236,20 +236,6 @@ nouveau_connector_detect(struct drm_connector *connector) - struct nouveau_i2c_chan *i2c; - int type, flags; + if (bmplength > 74) { + bios->fmaxvco = ROM32(bmp[67]); +@@ -4961,82 +5205,10 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len) + return 0; + } -- if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS) -- nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); -- if (nv_encoder && nv_connector->native_mode) { -- unsigned status = connector_status_connected; +-static int +-read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c) +-{ +- uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4; +- int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES; +- int recordoffset = 0, rdofs = 1, wrofs = 0; +- uint8_t port_type = 0; - --#if defined(CONFIG_ACPI_BUTTON) || \ -- (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE)) -- if (!nouveau_ignorelid && !acpi_lid_open()) -- status = connector_status_unknown; --#endif -- nouveau_connector_set_encoder(connector, nv_encoder); -- return status; +- if (!i2ctable) +- return -EINVAL; +- +- if (dcb_version >= 0x30) { +- if (i2ctable[0] != dcb_version) /* necessary? */ +- NV_WARN(dev, +- "DCB I2C table version mismatch (%02X vs %02X)\n", +- i2ctable[0], dcb_version); +- dcb_i2c_ver = i2ctable[0]; +- headerlen = i2ctable[1]; +- if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES) +- i2c_entries = i2ctable[2]; +- else +- NV_WARN(dev, +- "DCB I2C table has more entries than indexable " +- "(%d entries, max index 15)\n", i2ctable[2]); +- entry_len = i2ctable[3]; +- /* [4] is i2c_default_indices, read in parse_dcb_table() */ +- } +- /* +- * It's your own fault if you call this function on a DCB 1.1 BIOS -- +- * the test below is for DCB 1.2 +- */ +- if (dcb_version < 0x14) { +- recordoffset = 2; +- rdofs = 0; +- wrofs = 1; - } - - /* Cleanup the previous EDID block. */ - if (nv_connector->edid) { - drm_mode_connector_update_edid_property(connector, NULL); -@@ -321,6 +307,85 @@ detect_analog: - return connector_status_disconnected; +- if (index == 0xf) +- return 0; +- if (index > i2c_entries) { +- NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n", +- index, i2ctable[2]); +- return -ENOENT; +- } +- if (i2ctable[headerlen + entry_len * index + 3] == 0xff) { +- NV_ERROR(dev, "DCB I2C entry invalid\n"); +- return -EINVAL; +- } +- +- if (dcb_i2c_ver >= 0x30) { +- port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index]; +- +- /* +- * Fixup for chips using same address offset for read and +- * write. +- */ +- if (port_type == 4) /* seen on C51 */ +- rdofs = wrofs = 1; +- if (port_type >= 5) /* G80+ */ +- rdofs = wrofs = 0; +- } +- +- if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6) +- NV_WARN(dev, "DCB I2C table has port type %d\n", port_type); +- +- i2c->port_type = port_type; +- i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index]; +- i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index]; +- +- return 0; +-} +- + static struct dcb_gpio_entry * + new_gpio_entry(struct nvbios *bios) + { +- struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio; ++ struct dcb_gpio_table *gpio = &bios->dcb.gpio; + + return &gpio->entry[gpio->entries++]; } +@@ -5045,14 +5217,14 @@ struct dcb_gpio_entry * + nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + int i; -+static enum drm_connector_status -+nouveau_connector_detect_lvds(struct drm_connector *connector) +- for (i = 0; i < bios->bdcb.gpio.entries; i++) { +- if (bios->bdcb.gpio.entry[i].tag != tag) ++ for (i = 0; i < bios->dcb.gpio.entries; i++) { ++ if (bios->dcb.gpio.entry[i].tag != tag) + continue; + +- return &bios->bdcb.gpio.entry[i]; ++ return &bios->dcb.gpio.entry[i]; + } + + return NULL; +@@ -5075,32 +5247,32 @@ parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset) + gpio->tag = tag; + gpio->line = line; + gpio->invert = flags != 4; ++ gpio->entry = ent; + } + + static void + parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset) + { ++ uint32_t entry = ROM32(bios->data[offset]); + struct dcb_gpio_entry *gpio; +- uint32_t ent = ROM32(bios->data[offset]); +- uint8_t line = ent & 0x1f, +- tag = ent >> 8 & 0xff; + +- if (tag == 0xff) ++ if ((entry & 0x0000ff00) == 0x0000ff00) + return; + + gpio = new_gpio_entry(bios); +- +- /* Currently unused, we may need more fields parsed at some +- * point. */ +- gpio->tag = tag; +- gpio->line = line; ++ gpio->tag = (entry & 0x0000ff00) >> 8; ++ gpio->line = (entry & 0x0000001f) >> 0; ++ gpio->state_default = (entry & 0x01000000) >> 24; ++ gpio->state[0] = (entry & 0x18000000) >> 27; ++ gpio->state[1] = (entry & 0x60000000) >> 29; ++ gpio->entry = entry; + } + + static void + parse_dcb_gpio_table(struct nvbios *bios) + { + struct drm_device *dev = bios->dev; +- uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr; ++ uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr; + uint8_t *gpio_table = &bios->data[gpio_table_ptr]; + int header_len = gpio_table[1], + entries = gpio_table[2], +@@ -5108,7 +5280,7 @@ parse_dcb_gpio_table(struct nvbios *bios) + void (*parse_entry)(struct nvbios *, uint16_t) = NULL; + int i; + +- if (bios->bdcb.version >= 0x40) { ++ if (bios->dcb.version >= 0x40) { + if (gpio_table_ptr && entry_len != 4) { + NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); + return; +@@ -5116,7 +5288,7 @@ parse_dcb_gpio_table(struct nvbios *bios) + + parse_entry = parse_dcb40_gpio_entry; + +- } else if (bios->bdcb.version >= 0x30) { ++ } else if (bios->dcb.version >= 0x30) { + if (gpio_table_ptr && entry_len != 2) { + NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); + return; +@@ -5124,7 +5296,7 @@ parse_dcb_gpio_table(struct nvbios *bios) + + parse_entry = parse_dcb30_gpio_entry; + +- } else if (bios->bdcb.version >= 0x22) { ++ } else if (bios->dcb.version >= 0x22) { + /* + * DCBs older than v3.0 don't really have a GPIO + * table, instead they keep some GPIO info at fixed +@@ -5158,30 +5330,82 @@ struct dcb_connector_table_entry * + nouveau_bios_connector_entry(struct drm_device *dev, int index) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + struct dcb_connector_table_entry *cte; + +- if (index >= bios->bdcb.connector.entries) ++ if (index >= bios->dcb.connector.entries) + return NULL; + +- cte = &bios->bdcb.connector.entry[index]; ++ cte = &bios->dcb.connector.entry[index]; + if (cte->type == 0xff) + return NULL; + + return cte; + } + ++static enum dcb_connector_type ++divine_connector_type(struct nvbios *bios, int index) +{ -+ struct drm_device *dev = connector->dev; -+ struct drm_nouveau_private *dev_priv = dev->dev_private; -+ struct nouveau_connector *nv_connector = nouveau_connector(connector); -+ struct nouveau_encoder *nv_encoder = NULL; -+ enum drm_connector_status status = connector_status_disconnected; ++ struct dcb_table *dcb = &bios->dcb; ++ unsigned encoders = 0, type = DCB_CONNECTOR_NONE; ++ int i; + -+ /* Cleanup the previous EDID block. */ -+ if (nv_connector->edid) { -+ drm_mode_connector_update_edid_property(connector, NULL); -+ kfree(nv_connector->edid); -+ nv_connector->edid = NULL; ++ for (i = 0; i < dcb->entries; i++) { ++ if (dcb->entry[i].connector == index) ++ encoders |= (1 << dcb->entry[i].type); + } + -+ nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); -+ if (!nv_encoder) -+ return connector_status_disconnected; -+ -+ /* Try retrieving EDID via DDC */ -+ if (!dev_priv->vbios.fp_no_ddc) { -+ status = nouveau_connector_detect(connector); -+ if (status == connector_status_connected) -+ goto out; ++ if (encoders & (1 << OUTPUT_DP)) { ++ if (encoders & (1 << OUTPUT_TMDS)) ++ type = DCB_CONNECTOR_DP; ++ else ++ type = DCB_CONNECTOR_eDP; ++ } else ++ if (encoders & (1 << OUTPUT_TMDS)) { ++ if (encoders & (1 << OUTPUT_ANALOG)) ++ type = DCB_CONNECTOR_DVI_I; ++ else ++ type = DCB_CONNECTOR_DVI_D; ++ } else ++ if (encoders & (1 << OUTPUT_ANALOG)) { ++ type = DCB_CONNECTOR_VGA; ++ } else ++ if (encoders & (1 << OUTPUT_LVDS)) { ++ type = DCB_CONNECTOR_LVDS; ++ } else ++ if (encoders & (1 << OUTPUT_TV)) { ++ type = DCB_CONNECTOR_TV_0; + } + -+ /* On some laptops (Sony, i'm looking at you) there appears to -+ * be no direct way of accessing the panel's EDID. The only -+ * option available to us appears to be to ask ACPI for help.. -+ * -+ * It's important this check's before trying straps, one of the -+ * said manufacturer's laptops are configured in such a way -+ * the nouveau decides an entry in the VBIOS FP mode table is -+ * valid - it's not (rh#613284) -+ */ -+ if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) { -+ if (!nouveau_acpi_edid(dev, connector)) { -+ status = connector_status_connected; -+ goto out; -+ } -+ } ++ return type; ++} + -+ /* If no EDID found above, and the VBIOS indicates a hardcoded -+ * modeline is avalilable for the panel, set it as the panel's -+ * native mode and exit. -+ */ -+ if (nouveau_bios_fp_mode(dev, NULL) && (dev_priv->vbios.fp_no_ddc || -+ nv_encoder->dcb->lvdsconf.use_straps_for_mode)) { -+ status = connector_status_connected; -+ goto out; -+ } ++static void ++apply_dcb_connector_quirks(struct nvbios *bios, int idx) ++{ ++ struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx]; ++ struct drm_device *dev = bios->dev; + -+ /* Still nothing, some VBIOS images have a hardcoded EDID block -+ * stored for the panel stored in them. -+ */ -+ if (!dev_priv->vbios.fp_no_ddc) { -+ struct edid *edid = -+ (struct edid *)nouveau_bios_embedded_edid(dev); -+ if (edid) { -+ nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL); -+ *(nv_connector->edid) = *edid; -+ status = connector_status_connected; -+ } ++ /* Gigabyte NX85T */ ++ if ((dev->pdev->device == 0x0421) && ++ (dev->pdev->subsystem_vendor == 0x1458) && ++ (dev->pdev->subsystem_device == 0x344c)) { ++ if (cte->type == DCB_CONNECTOR_HDMI_1) ++ cte->type = DCB_CONNECTOR_DVI_I; + } -+ -+out: -+#if defined(CONFIG_ACPI_BUTTON) || \ -+ (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE)) -+ if (status == connector_status_connected && -+ !nouveau_ignorelid && !acpi_lid_open()) -+ status = connector_status_unknown; -+#endif -+ -+ drm_mode_connector_update_edid_property(connector, nv_connector->edid); -+ nouveau_connector_set_encoder(connector, nv_encoder); -+ return status; +} + static void - nouveau_connector_force(struct drm_connector *connector) - { -@@ -534,21 +599,27 @@ static int - nouveau_connector_get_modes(struct drm_connector *connector) + parse_dcb_connector_table(struct nvbios *bios) { - struct drm_device *dev = connector->dev; -+ struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_connector *nv_connector = nouveau_connector(connector); - struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; - int ret = 0; + struct drm_device *dev = bios->dev; +- struct dcb_connector_table *ct = &bios->bdcb.connector; ++ struct dcb_connector_table *ct = &bios->dcb.connector; + struct dcb_connector_table_entry *cte; +- uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr]; ++ uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr]; + uint8_t *entry; + int i; -- /* If we're not LVDS, destroy the previous native mode, the attached -- * monitor could have changed. -+ /* destroy the native mode, the attached monitor could have changed. - */ -- if (nv_connector->dcb->type != DCB_CONNECTOR_LVDS && -- nv_connector->native_mode) { -+ if (nv_connector->native_mode) { - drm_mode_destroy(dev, nv_connector->native_mode); - nv_connector->native_mode = NULL; +- if (!bios->bdcb.connector_table_ptr) { ++ if (!bios->dcb.connector_table_ptr) { + NV_DEBUG_KMS(dev, "No DCB connector table present\n"); + return; } +@@ -5199,12 +5423,14 @@ parse_dcb_connector_table(struct nvbios *bios) + entry = conntab + conntab[1]; + cte = &ct->entry[0]; + for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) { ++ cte->index = i; + if (conntab[3] == 2) + cte->entry = ROM16(entry[0]); + else + cte->entry = ROM32(entry[0]); ++ + cte->type = (cte->entry & 0x000000ff) >> 0; +- cte->index = (cte->entry & 0x00000f00) >> 8; ++ cte->index2 = (cte->entry & 0x00000f00) >> 8; + switch (cte->entry & 0x00033000) { + case 0x00001000: + cte->gpio_tag = 0x07; +@@ -5226,12 +5452,43 @@ parse_dcb_connector_table(struct nvbios *bios) + if (cte->type == 0xff) + continue; - if (nv_connector->edid) - ret = drm_add_edid_modes(connector, nv_connector->edid); -+ else -+ if (nv_encoder->dcb->type == OUTPUT_LVDS && -+ (nv_encoder->dcb->lvdsconf.use_straps_for_mode || -+ dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { -+ nv_connector->native_mode = drm_mode_create(dev); -+ nouveau_bios_fp_mode(dev, nv_connector->native_mode); -+ } ++ apply_dcb_connector_quirks(bios, i); ++ + NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n", + i, cte->entry, cte->type, cte->index, cte->gpio_tag); ++ ++ /* check for known types, fallback to guessing the type ++ * from attached encoders if we hit an unknown. ++ */ ++ switch (cte->type) { ++ case DCB_CONNECTOR_VGA: ++ case DCB_CONNECTOR_TV_0: ++ case DCB_CONNECTOR_TV_1: ++ case DCB_CONNECTOR_TV_3: ++ case DCB_CONNECTOR_DVI_I: ++ case DCB_CONNECTOR_DVI_D: ++ case DCB_CONNECTOR_LVDS: ++ case DCB_CONNECTOR_DP: ++ case DCB_CONNECTOR_eDP: ++ case DCB_CONNECTOR_HDMI_0: ++ case DCB_CONNECTOR_HDMI_1: ++ break; ++ default: ++ cte->type = divine_connector_type(bios, cte->index); ++ NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type); ++ break; ++ } ++ ++ if (nouveau_override_conntype) { ++ int type = divine_connector_type(bios, cte->index); ++ if (type != cte->type) ++ NV_WARN(dev, " -> type 0x%02x\n", cte->type); ++ } ++ + } + } - /* Find the native mode if this is a digital panel, if we didn't - * find any modes through DDC previously add the native mode to -@@ -569,7 +640,8 @@ nouveau_connector_get_modes(struct drm_connector *connector) - ret = get_slave_funcs(nv_encoder)-> - get_modes(to_drm_encoder(nv_encoder), connector); +-static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb) ++static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb) + { + struct dcb_entry *entry = &dcb->entry[dcb->entries]; -- if (nv_encoder->dcb->type == OUTPUT_LVDS) -+ if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS || -+ nv_connector->dcb->type == DCB_CONNECTOR_eDP) - ret += nouveau_connector_scaler_modes_add(connector); +@@ -5241,7 +5498,7 @@ static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb) + return entry; + } - return ret; -@@ -662,148 +734,74 @@ nouveau_connector_funcs = { - .force = nouveau_connector_force - }; +-static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads) ++static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads) + { + struct dcb_entry *entry = new_dcb_entry(dcb); --static int --nouveau_connector_create_lvds(struct drm_device *dev, -- struct drm_connector *connector) --{ -- struct nouveau_connector *nv_connector = nouveau_connector(connector); -- struct drm_nouveau_private *dev_priv = dev->dev_private; -- struct nouveau_i2c_chan *i2c = NULL; -- struct nouveau_encoder *nv_encoder; -- struct drm_display_mode native, *mode, *temp; -- bool dummy, if_is_24bit = false; -- int ret, flags; -- -- nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); -- if (!nv_encoder) -- return -ENODEV; -- -- ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &if_is_24bit); -- if (ret) { -- NV_ERROR(dev, "Error parsing LVDS table, disabling LVDS\n"); -- return ret; -- } -- nv_connector->use_dithering = !if_is_24bit; -- -- /* Firstly try getting EDID over DDC, if allowed and I2C channel -- * is available. -- */ -- if (!dev_priv->vbios.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf) -- i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); -- -- if (i2c) { -- nouveau_connector_ddc_prepare(connector, &flags); -- nv_connector->edid = drm_get_edid(connector, &i2c->adapter); -- nouveau_connector_ddc_finish(connector, flags); -- } -- -- /* If no EDID found above, and the VBIOS indicates a hardcoded -- * modeline is avalilable for the panel, set it as the panel's -- * native mode and exit. -- */ -- if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) && -- (nv_encoder->dcb->lvdsconf.use_straps_for_mode || -- dev_priv->vbios.fp_no_ddc)) { -- nv_connector->native_mode = drm_mode_duplicate(dev, &native); -- goto out; -- } -- -- /* Still nothing, some VBIOS images have a hardcoded EDID block -- * stored for the panel stored in them. -- */ -- if (!nv_connector->edid && !nv_connector->native_mode && -- !dev_priv->vbios.fp_no_ddc) { -- struct edid *edid = -- (struct edid *)nouveau_bios_embedded_edid(dev); -- if (edid) { -- nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL); -- *(nv_connector->edid) = *edid; -- } -- } -- -- if (!nv_connector->edid) -- goto out; -- -- /* We didn't find/use a panel mode from the VBIOS, so parse the EDID -- * block and look for the preferred mode there. -- */ -- ret = drm_add_edid_modes(connector, nv_connector->edid); -- if (ret == 0) -- goto out; -- nv_connector->detected_encoder = nv_encoder; -- nv_connector->native_mode = nouveau_connector_native_mode(connector); -- list_for_each_entry_safe(mode, temp, &connector->probed_modes, head) -- drm_mode_remove(connector, mode); -- --out: -- if (!nv_connector->native_mode) { -- NV_ERROR(dev, "LVDS present in DCB table, but couldn't " -- "determine its native mode. Disabling.\n"); -- return -ENODEV; -- } -- -- drm_mode_connector_update_edid_property(connector, nv_connector->edid); -- return 0; --} -+static const struct drm_connector_funcs -+nouveau_connector_funcs_lvds = { -+ .dpms = drm_helper_connector_dpms, -+ .save = NULL, -+ .restore = NULL, -+ .detect = nouveau_connector_detect_lvds, -+ .destroy = nouveau_connector_destroy, -+ .fill_modes = drm_helper_probe_single_connector_modes, -+ .set_property = nouveau_connector_set_property, -+ .force = nouveau_connector_force -+}; +@@ -5252,7 +5509,7 @@ static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads) + /* "or" mostly unused in early gen crt modesetting, 0 is fine */ + } --int --nouveau_connector_create(struct drm_device *dev, -- struct dcb_connector_table_entry *dcb) -+struct drm_connector * -+nouveau_connector_create(struct drm_device *dev, int index) +-static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads) ++static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads) { -+ const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_connector *nv_connector = NULL; -+ struct dcb_connector_table_entry *dcb = NULL; - struct drm_connector *connector; -- struct drm_encoder *encoder; -- int ret, type; -+ int type, ret = 0; + struct dcb_entry *entry = new_dcb_entry(dcb); - NV_DEBUG_KMS(dev, "\n"); +@@ -5279,7 +5536,7 @@ static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads) + #endif + } -+ if (index >= dev_priv->vbios.dcb.connector.entries) -+ return ERR_PTR(-EINVAL); -+ -+ dcb = &dev_priv->vbios.dcb.connector.entry[index]; -+ if (dcb->drm) -+ return dcb->drm; -+ - switch (dcb->type) { -- case DCB_CONNECTOR_NONE: -- return 0; - case DCB_CONNECTOR_VGA: -- NV_INFO(dev, "Detected a VGA connector\n"); - type = DRM_MODE_CONNECTOR_VGA; - break; - case DCB_CONNECTOR_TV_0: - case DCB_CONNECTOR_TV_1: - case DCB_CONNECTOR_TV_3: -- NV_INFO(dev, "Detected a TV connector\n"); - type = DRM_MODE_CONNECTOR_TV; - break; - case DCB_CONNECTOR_DVI_I: -- NV_INFO(dev, "Detected a DVI-I connector\n"); - type = DRM_MODE_CONNECTOR_DVII; - break; - case DCB_CONNECTOR_DVI_D: -- NV_INFO(dev, "Detected a DVI-D connector\n"); - type = DRM_MODE_CONNECTOR_DVID; - break; - case DCB_CONNECTOR_HDMI_0: - case DCB_CONNECTOR_HDMI_1: -- NV_INFO(dev, "Detected a HDMI connector\n"); - type = DRM_MODE_CONNECTOR_HDMIA; - break; - case DCB_CONNECTOR_LVDS: -- NV_INFO(dev, "Detected a LVDS connector\n"); - type = DRM_MODE_CONNECTOR_LVDS; -+ funcs = &nouveau_connector_funcs_lvds; - break; - case DCB_CONNECTOR_DP: -- NV_INFO(dev, "Detected a DisplayPort connector\n"); - type = DRM_MODE_CONNECTOR_DisplayPort; +-static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads) ++static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads) + { + struct dcb_entry *entry = new_dcb_entry(dcb); + +@@ -5290,23 +5547,17 @@ static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads) + } + + static bool +-parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, ++parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, + uint32_t conn, uint32_t conf, struct dcb_entry *entry) + { + entry->type = conn & 0xf; + entry->i2c_index = (conn >> 4) & 0xf; + entry->heads = (conn >> 8) & 0xf; +- if (bdcb->version >= 0x40) ++ if (dcb->version >= 0x40) + entry->connector = (conn >> 12) & 0xf; + entry->bus = (conn >> 16) & 0xf; + entry->location = (conn >> 20) & 0x3; + entry->or = (conn >> 24) & 0xf; +- /* +- * Normal entries consist of a single bit, but dual link has the +- * next most significant bit set too +- */ +- entry->duallink_possible = +- ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); + + switch (entry->type) { + case OUTPUT_ANALOG: +@@ -5314,7 +5565,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, + * Although the rest of a CRT conf dword is usually + * zeros, mac biosen have stuff there so we must mask + */ +- entry->crtconf.maxfreq = (bdcb->version < 0x30) ? ++ entry->crtconf.maxfreq = (dcb->version < 0x30) ? + (conf & 0xffff) * 10 : + (conf & 0xff) * 10000; break; - case DCB_CONNECTOR_eDP: -- NV_INFO(dev, "Detected an eDP connector\n"); - type = DRM_MODE_CONNECTOR_eDP; +@@ -5323,7 +5574,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, + uint32_t mask; + if (conf & 0x1) + entry->lvdsconf.use_straps_for_mode = true; +- if (bdcb->version < 0x22) { ++ if (dcb->version < 0x22) { + mask = ~0xd; + /* + * The laptop in bug 14567 lies and claims to not use +@@ -5347,7 +5598,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, + * Until we even try to use these on G8x, it's + * useless reporting unknown bits. They all are. + */ +- if (bdcb->version >= 0x40) ++ if (dcb->version >= 0x40) + break; + + NV_ERROR(dev, "Unknown LVDS configuration bits, " +@@ -5357,7 +5608,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, + } + case OUTPUT_TV: + { +- if (bdcb->version >= 0x30) ++ if (dcb->version >= 0x30) + entry->tvconf.has_component_output = conf & (0x8 << 4); + else + entry->tvconf.has_component_output = false; +@@ -5384,8 +5635,20 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, break; - default: - NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type); -- return -EINVAL; -+ return ERR_PTR(-EINVAL); + case 0xe: + /* weird g80 mobile type that "nv" treats as a terminator */ +- bdcb->dcb.entries--; ++ dcb->entries--; + return false; ++ default: ++ break; ++ } ++ ++ if (dcb->version < 0x40) { ++ /* Normal entries consist of a single bit, but dual link has ++ * the next most significant bit set too ++ */ ++ entry->duallink_possible = ++ ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); ++ } else { ++ entry->duallink_possible = (entry->sorconf.link == 3); } - nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); - if (!nv_connector) -- return -ENOMEM; -+ return ERR_PTR(-ENOMEM); - nv_connector->dcb = dcb; - connector = &nv_connector->base; + /* unsure what DCB version introduces this, 3.0? */ +@@ -5396,7 +5659,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, + } -@@ -811,27 +809,21 @@ nouveau_connector_create(struct drm_device *dev, - connector->interlace_allowed = false; - connector->doublescan_allowed = false; + static bool +-parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, ++parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, + uint32_t conn, uint32_t conf, struct dcb_entry *entry) + { + switch (conn & 0x0000000f) { +@@ -5462,27 +5725,27 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, + return true; + } -- drm_connector_init(dev, connector, &nouveau_connector_funcs, type); -+ drm_connector_init(dev, connector, funcs, type); - drm_connector_helper_add(connector, &nouveau_connector_helper_funcs); +-static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, ++static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb, + uint32_t conn, uint32_t conf) + { +- struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb); ++ struct dcb_entry *entry = new_dcb_entry(dcb); + bool ret; + +- if (bdcb->version >= 0x20) +- ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry); ++ if (dcb->version >= 0x20) ++ ret = parse_dcb20_entry(dev, dcb, conn, conf, entry); + else +- ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry); ++ ret = parse_dcb15_entry(dev, dcb, conn, conf, entry); + if (!ret) + return ret; -- /* attach encoders */ -- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); -- -- if (nv_encoder->dcb->connector != dcb->index) -- continue; -- -- if (get_slave_funcs(nv_encoder)) -- get_slave_funcs(nv_encoder)->create_resources(encoder, connector); -+ /* Check if we need dithering enabled */ -+ if (dcb->type == DCB_CONNECTOR_LVDS) { -+ bool dummy, is_24bit = false; +- read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table, +- entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]); ++ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table, ++ entry->i2c_index, &dcb->i2c[entry->i2c_index]); -- drm_mode_connector_attach_encoder(connector, encoder); -- } -+ ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit); -+ if (ret) { -+ NV_ERROR(dev, "Error parsing LVDS table, disabling " -+ "LVDS\n"); -+ goto fail; -+ } + return true; + } -- if (!connector->encoder_ids[0]) { -- NV_WARN(dev, " no encoders, ignoring\n"); -- drm_connector_cleanup(connector); -- kfree(connector); -- return 0; -+ nv_connector->use_dithering = !is_24bit; - } + static +-void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb) ++void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb) + { + /* + * DCB v2.0 lists each output combination separately. +@@ -5534,8 +5797,7 @@ static int + parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct bios_parsed_dcb *bdcb = &bios->bdcb; +- struct parsed_dcb *dcb; ++ struct dcb_table *dcb = &bios->dcb; + uint16_t dcbptr = 0, i2ctabptr = 0; + uint8_t *dcbtable; + uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; +@@ -5543,9 +5805,6 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) + int recordlength = 8, confofs = 4; + int i; - /* Init DVI-I specific properties */ -@@ -841,9 +833,6 @@ nouveau_connector_create(struct drm_device *dev, - drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0); +- dcb = bios->pub.dcb = &bdcb->dcb; +- dcb->entries = 0; +- + /* get the offset from 0x36 */ + if (dev_priv->card_type > NV_04) { + dcbptr = ROM16(bios->data[0x36]); +@@ -5567,21 +5826,21 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) + dcbtable = &bios->data[dcbptr]; + + /* get DCB version */ +- bdcb->version = dcbtable[0]; ++ dcb->version = dcbtable[0]; + NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n", +- bdcb->version >> 4, bdcb->version & 0xf); ++ dcb->version >> 4, dcb->version & 0xf); + +- if (bdcb->version >= 0x20) { /* NV17+ */ ++ if (dcb->version >= 0x20) { /* NV17+ */ + uint32_t sig; + +- if (bdcb->version >= 0x30) { /* NV40+ */ ++ if (dcb->version >= 0x30) { /* NV40+ */ + headerlen = dcbtable[1]; + entries = dcbtable[2]; + recordlength = dcbtable[3]; + i2ctabptr = ROM16(dcbtable[4]); + sig = ROM32(dcbtable[6]); +- bdcb->gpio_table_ptr = ROM16(dcbtable[10]); +- bdcb->connector_table_ptr = ROM16(dcbtable[20]); ++ dcb->gpio_table_ptr = ROM16(dcbtable[10]); ++ dcb->connector_table_ptr = ROM16(dcbtable[20]); + } else { + i2ctabptr = ROM16(dcbtable[2]); + sig = ROM32(dcbtable[4]); +@@ -5593,7 +5852,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) + "signature (%08X)\n", sig); + return -EINVAL; + } +- } else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */ ++ } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */ + char sig[8] = { 0 }; + + strncpy(sig, (char *)&dcbtable[-7], 7); +@@ -5641,14 +5900,11 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) + if (!i2ctabptr) + NV_WARN(dev, "No pointer to DCB I2C port table\n"); + else { +- bdcb->i2c_table = &bios->data[i2ctabptr]; +- if (bdcb->version >= 0x30) +- bdcb->i2c_default_indices = bdcb->i2c_table[4]; ++ dcb->i2c_table = &bios->data[i2ctabptr]; ++ if (dcb->version >= 0x30) ++ dcb->i2c_default_indices = dcb->i2c_table[4]; } -- if (dcb->type != DCB_CONNECTOR_LVDS) -- nv_connector->use_dithering = false; +- parse_dcb_gpio_table(bios); +- parse_dcb_connector_table(bios); - - switch (dcb->type) { - case DCB_CONNECTOR_VGA: - if (dev_priv->card_type >= NV_50) { -@@ -871,14 +860,12 @@ nouveau_connector_create(struct drm_device *dev, - } - - drm_sysfs_connector_add(connector); -+ dcb->drm = connector; -+ return dcb->drm; - -- if (dcb->type == DCB_CONNECTOR_LVDS) { -- ret = nouveau_connector_create_lvds(dev, connector); -- if (ret) { -- connector->funcs->destroy(connector); -- return ret; -- } -- } -+fail: -+ drm_connector_cleanup(connector); -+ kfree(connector); -+ return ERR_PTR(ret); + if (entries > DCB_MAX_NUM_ENTRIES) + entries = DCB_MAX_NUM_ENTRIES; -- return 0; - } -diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h -index 4ef38ab..1ce3d91 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_connector.h -+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h -@@ -49,7 +49,7 @@ static inline struct nouveau_connector *nouveau_connector( - return container_of(con, struct nouveau_connector, base); - } +@@ -5673,7 +5929,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) + NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n", + dcb->entries, connection, config); --int nouveau_connector_create(struct drm_device *, -- struct dcb_connector_table_entry *); -+struct drm_connector * -+nouveau_connector_create(struct drm_device *, int index); +- if (!parse_dcb_entry(dev, bdcb, connection, config)) ++ if (!parse_dcb_entry(dev, dcb, connection, config)) + break; + } - #endif /* __NOUVEAU_CONNECTOR_H__ */ -diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c -index 65c441a..2e3c6ca 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_dma.c -+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c -@@ -92,11 +92,9 @@ nouveau_dma_init(struct nouveau_channel *chan) - return ret; +@@ -5681,18 +5937,22 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) + * apart for v2.1+ not being known for requiring merging, this + * guarantees dcbent->index is the index of the entry in the rom image + */ +- if (bdcb->version < 0x21) ++ if (dcb->version < 0x21) + merge_like_dcb_entries(dev, dcb); - /* Map M2MF notifier object - fbcon. */ -- if (drm_core_check_feature(dev, DRIVER_MODESET)) { -- ret = nouveau_bo_map(chan->notifier_bo); -- if (ret) -- return ret; -- } -+ ret = nouveau_bo_map(chan->notifier_bo); -+ if (ret) -+ return ret; +- return dcb->entries ? 0 : -ENXIO; ++ if (!dcb->entries) ++ return -ENXIO; ++ ++ parse_dcb_gpio_table(bios); ++ parse_dcb_connector_table(bios); ++ return 0; + } - /* Insert NOPS for NOUVEAU_DMA_SKIPS */ - ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); -diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c -index deeb21c..184bc95 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_dp.c -+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c -@@ -271,12 +271,26 @@ nouveau_dp_link_train(struct drm_encoder *encoder) + static void + fixup_legacy_connector(struct nvbios *bios) { - struct drm_device *dev = encoder->dev; - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); -- uint8_t config[4]; -- uint8_t status[3]; -+ struct bit_displayport_encoder_table *dpe; -+ int dpe_headerlen; -+ uint8_t config[4], status[3]; - bool cr_done, cr_max_vs, eq_done; - int ret = 0, i, tries, voltage; - - NV_DEBUG_KMS(dev, "link training!!\n"); -+ -+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); -+ if (!dpe) { -+ NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or); -+ return false; -+ } +- struct bios_parsed_dcb *bdcb = &bios->bdcb; +- struct parsed_dcb *dcb = &bdcb->dcb; +- int high = 0, i; ++ struct dcb_table *dcb = &bios->dcb; ++ int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { }; + + /* + * DCB 3.0 also has the table in most cases, but there are some cards +@@ -5700,9 +5960,11 @@ fixup_legacy_connector(struct nvbios *bios) + * indices are all 0. We don't need the connector indices on pre-G80 + * chips (yet?) so limit the use to DCB 4.0 and above. + */ +- if (bdcb->version >= 0x40) ++ if (dcb->version >= 0x40) + return; + ++ dcb->connector.entries = 0; + -+ if (dpe->script0) { -+ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); -+ nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0), -+ nv_encoder->dcb); -+ } + /* + * No known connector info before v3.0, so make it up. the rule here + * is: anything on the same i2c bus is considered to be on the same +@@ -5710,37 +5972,38 @@ fixup_legacy_connector(struct nvbios *bios) + * its own unique connector index. + */ + for (i = 0; i < dcb->entries; i++) { +- if (dcb->entry[i].i2c_index == 0xf) +- continue; +- + /* + * Ignore the I2C index for on-chip TV-out, as there + * are cards with bogus values (nv31m in bug 23212), + * and it's otherwise useless. + */ + if (dcb->entry[i].type == OUTPUT_TV && +- dcb->entry[i].location == DCB_LOC_ON_CHIP) { ++ dcb->entry[i].location == DCB_LOC_ON_CHIP) + dcb->entry[i].i2c_index = 0xf; ++ i2c = dcb->entry[i].i2c_index; + - train: - cr_done = eq_done = false; - -@@ -403,6 +417,12 @@ stop: ++ if (i2c_conn[i2c]) { ++ dcb->entry[i].connector = i2c_conn[i2c] - 1; + continue; } - } - -+ if (dpe->script1) { -+ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or); -+ nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1), -+ nv_encoder->dcb); -+ } -+ - return eq_done; - } - -diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c -index 2737704..b4d958c 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_drv.c -+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c -@@ -35,10 +35,6 @@ - #include "drm_pciids.h" +- dcb->entry[i].connector = dcb->entry[i].i2c_index; +- if (dcb->entry[i].connector > high) +- high = dcb->entry[i].connector; ++ dcb->entry[i].connector = dcb->connector.entries++; ++ if (i2c != 0xf) ++ i2c_conn[i2c] = dcb->connector.entries; + } --MODULE_PARM_DESC(ctxfw, "Use external firmware blob for grctx init (NV40)"); --int nouveau_ctxfw = 0; --module_param_named(ctxfw, nouveau_ctxfw, int, 0400); +- for (i = 0; i < dcb->entries; i++) { +- if (dcb->entry[i].i2c_index != 0xf) +- continue; - - MODULE_PARM_DESC(noagp, "Disable AGP"); - int nouveau_noagp; - module_param_named(noagp, nouveau_noagp, int, 0400); -@@ -56,7 +52,7 @@ int nouveau_vram_pushbuf; - module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); - - MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); --int nouveau_vram_notify = 1; -+int nouveau_vram_notify = 0; - module_param_named(vram_notify, nouveau_vram_notify, int, 0400); - - MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); -@@ -155,9 +151,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) - struct drm_crtc *crtc; - int ret, i; +- dcb->entry[i].connector = ++high; ++ /* Fake the connector table as well as just connector indices */ ++ for (i = 0; i < dcb->connector.entries; i++) { ++ dcb->connector.entry[i].index = i; ++ dcb->connector.entry[i].type = divine_connector_type(bios, i); ++ dcb->connector.entry[i].gpio_tag = 0xff; + } + } -- if (!drm_core_check_feature(dev, DRIVER_MODESET)) -- return -ENODEV; -- - if (pm_state.event == PM_EVENT_PRETHAW) - return 0; + static void + fixup_legacy_i2c(struct nvbios *bios) + { +- struct parsed_dcb *dcb = &bios->bdcb.dcb; ++ struct dcb_table *dcb = &bios->dcb; + int i; -@@ -257,9 +250,6 @@ nouveau_pci_resume(struct pci_dev *pdev) - struct drm_crtc *crtc; - int ret, i; + for (i = 0; i < dcb->entries; i++) { +@@ -5826,7 +6089,7 @@ static int load_nv17_hw_sequencer_ucode(struct drm_device *dev, + uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + const uint8_t edid_sig[] = { + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + uint16_t offset = 0; +@@ -5859,7 +6122,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, + struct dcb_entry *dcbent) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + struct init_exec iexec = { true, false }; -- if (!drm_core_check_feature(dev, DRIVER_MODESET)) -- return -ENODEV; -- - nouveau_fbcon_save_disable_accel(dev); + mutex_lock(&bios->lock); +@@ -5872,7 +6135,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, + static bool NVInitVBIOS(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; - NV_INFO(dev, "We're back, enabling device...\n"); -@@ -323,7 +313,6 @@ nouveau_pci_resume(struct pci_dev *pdev) + memset(bios, 0, sizeof(struct nvbios)); + mutex_init(&bios->lock); +@@ -5888,7 +6151,7 @@ static bool NVInitVBIOS(struct drm_device *dev) + static int nouveau_parse_vbios_struct(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' }; + const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 }; + int offset; +@@ -5915,7 +6178,7 @@ int + nouveau_run_vbios_init(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + int i, ret = 0; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); -- int ret; - - ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); - if (!ret) -@@ -371,7 +360,8 @@ nouveau_pci_resume(struct pci_dev *pdev) - static struct drm_driver driver = { - .driver_features = - DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | -- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, -+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | -+ DRIVER_MODESET, - .load = nouveau_load, - .firstopen = nouveau_firstopen, - .lastclose = nouveau_lastclose, -@@ -438,16 +428,18 @@ static int __init nouveau_init(void) - nouveau_modeset = 1; + NVLockVgaCrtcs(dev, false); +@@ -5946,9 +6209,9 @@ nouveau_run_vbios_init(struct drm_device *dev) } -- if (nouveau_modeset == 1) { -- driver.driver_features |= DRIVER_MODESET; -- nouveau_register_dsm_handler(); -- } -+ if (!nouveau_modeset) -+ return 0; + if (dev_priv->card_type >= NV_50) { +- for (i = 0; i < bios->bdcb.dcb.entries; i++) { ++ for (i = 0; i < bios->dcb.entries; i++) { + nouveau_bios_run_display_table(dev, +- &bios->bdcb.dcb.entry[i], ++ &bios->dcb.entry[i], + 0, 0); + } + } +@@ -5962,26 +6225,48 @@ static void + nouveau_bios_i2c_devices_takedown(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + struct dcb_i2c_entry *entry; + int i; -+ nouveau_register_dsm_handler(); - return drm_init(&driver); +- entry = &bios->bdcb.dcb.i2c[0]; ++ entry = &bios->dcb.i2c[0]; + for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++) + nouveau_i2c_fini(dev, entry); } - static void __exit nouveau_exit(void) - { -+ if (!nouveau_modeset) -+ return; ++static bool ++nouveau_bios_posted(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ bool was_locked; ++ unsigned htotal; + - drm_exit(&driver); - nouveau_unregister_dsm_handler(); - } -diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h -index c697191..51ccd90 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_drv.h -+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h -@@ -123,14 +123,6 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo) - return ioptr; - } ++ if (dev_priv->chipset >= NV_50) { ++ if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && ++ NVReadVgaCrtc(dev, 0, 0x1a) == 0) ++ return false; ++ return true; ++ } ++ ++ was_locked = NVLockVgaCrtcs(dev, false); ++ htotal = NVReadVgaCrtc(dev, 0, 0x06); ++ htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8; ++ htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4; ++ htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10; ++ htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11; ++ NVLockVgaCrtcs(dev, was_locked); ++ return (htotal != 0); ++} ++ + int + nouveau_bios_init(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + uint32_t saved_nv_pextdev_boot_0; + bool was_locked; + int ret; --struct mem_block { -- struct mem_block *next; -- struct mem_block *prev; -- uint64_t start; -- uint64_t size; -- struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ --}; +- dev_priv->vbios = &bios->pub; - - enum nouveau_flags { - NV_NFORCE = 0x10000000, - NV_NFORCE2 = 0x20000000 -@@ -149,7 +141,7 @@ struct nouveau_gpuobj { - struct list_head list; - - struct nouveau_channel *im_channel; -- struct mem_block *im_pramin; -+ struct drm_mm_node *im_pramin; - struct nouveau_bo *im_backing; - uint32_t im_backing_start; - uint32_t *im_backing_suspend; -@@ -196,7 +188,7 @@ struct nouveau_channel { - struct list_head pending; - uint32_t sequence; - uint32_t sequence_ack; -- uint32_t last_sequence_irq; -+ atomic_t last_sequence_irq; - } fence; + if (!NVInitVBIOS(dev)) + return -ENODEV; - /* DMA push buffer */ -@@ -206,7 +198,7 @@ struct nouveau_channel { - - /* Notifier memory */ - struct nouveau_bo *notifier_bo; -- struct mem_block *notifier_heap; -+ struct drm_mm notifier_heap; - - /* PFIFO context */ - struct nouveau_gpuobj_ref *ramfc; -@@ -224,7 +216,7 @@ struct nouveau_channel { - - /* Objects */ - struct nouveau_gpuobj_ref *ramin; /* Private instmem */ -- struct mem_block *ramin_heap; /* Private PRAMIN heap */ -+ struct drm_mm ramin_heap; /* Private PRAMIN heap */ - struct nouveau_gpuobj_ref *ramht; /* Hash table */ - struct list_head ramht_refs; /* Objects referenced by RAMHT */ - -@@ -277,8 +269,7 @@ struct nouveau_instmem_engine { - void (*clear)(struct drm_device *, struct nouveau_gpuobj *); - int (*bind)(struct drm_device *, struct nouveau_gpuobj *); - int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); -- void (*prepare_access)(struct drm_device *, bool write); -- void (*finish_access)(struct drm_device *); -+ void (*flush)(struct drm_device *); - }; +@@ -6007,11 +6292,9 @@ nouveau_bios_init(struct drm_device *dev) + bios->execute = false; - struct nouveau_mc_engine { -@@ -303,10 +294,11 @@ struct nouveau_fb_engine { - }; + /* ... unless card isn't POSTed already */ +- if (dev_priv->card_type >= NV_10 && +- NVReadVgaCrtc(dev, 0, 0x00) == 0 && +- NVReadVgaCrtc(dev, 0, 0x1a) == 0) { ++ if (!nouveau_bios_posted(dev)) { + NV_INFO(dev, "Adaptor not initialised\n"); +- if (dev_priv->card_type < NV_50) { ++ if (dev_priv->card_type < NV_40) { + NV_ERROR(dev, "Unable to POST this chipset\n"); + return -ENODEV; + } +@@ -6023,10 +6306,8 @@ nouveau_bios_init(struct drm_device *dev) + bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0); - struct nouveau_fifo_engine { -- void *priv; -- - int channels; + ret = nouveau_run_vbios_init(dev); +- if (ret) { +- dev_priv->vbios = NULL; ++ if (ret) + return ret; +- } -+ struct nouveau_gpuobj_ref *playlist[2]; -+ int cur_playlist; -+ - int (*init)(struct drm_device *); - void (*takedown)(struct drm_device *); + /* feature_byte on BMP is poor, but init always sets CR4B */ + was_locked = NVLockVgaCrtcs(dev, false); +diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h +index fd94bd6..bd33a54 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_bios.h ++++ b/drivers/gpu/drm/nouveau/nouveau_bios.h +@@ -34,9 +34,73 @@ -@@ -339,10 +331,11 @@ struct nouveau_pgraph_object_class { - struct nouveau_pgraph_engine { - struct nouveau_pgraph_object_class *grclass; - bool accel_blocked; -- void *ctxprog; -- void *ctxvals; - int grctx_size; + #define DCB_LOC_ON_CHIP 0 -+ /* NV2x/NV3x context table (0x400780) */ -+ struct nouveau_gpuobj_ref *ctx_table; ++struct dcb_i2c_entry { ++ uint32_t entry; ++ uint8_t port_type; ++ uint8_t read, write; ++ struct nouveau_i2c_chan *chan; ++}; ++ ++enum dcb_gpio_tag { ++ DCB_GPIO_TVDAC0 = 0xc, ++ DCB_GPIO_TVDAC1 = 0x2d, ++}; ++ ++struct dcb_gpio_entry { ++ enum dcb_gpio_tag tag; ++ int line; ++ bool invert; ++ uint32_t entry; ++ uint8_t state_default; ++ uint8_t state[2]; ++}; ++ ++struct dcb_gpio_table { ++ int entries; ++ struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES]; ++}; ++ ++enum dcb_connector_type { ++ DCB_CONNECTOR_VGA = 0x00, ++ DCB_CONNECTOR_TV_0 = 0x10, ++ DCB_CONNECTOR_TV_1 = 0x11, ++ DCB_CONNECTOR_TV_3 = 0x13, ++ DCB_CONNECTOR_DVI_I = 0x30, ++ DCB_CONNECTOR_DVI_D = 0x31, ++ DCB_CONNECTOR_LVDS = 0x40, ++ DCB_CONNECTOR_DP = 0x46, ++ DCB_CONNECTOR_eDP = 0x47, ++ DCB_CONNECTOR_HDMI_0 = 0x60, ++ DCB_CONNECTOR_HDMI_1 = 0x61, ++ DCB_CONNECTOR_NONE = 0xff ++}; ++ ++struct dcb_connector_table_entry { ++ uint8_t index; ++ uint32_t entry; ++ enum dcb_connector_type type; ++ uint8_t index2; ++ uint8_t gpio_tag; ++ void *drm; ++}; ++ ++struct dcb_connector_table { ++ int entries; ++ struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES]; ++}; ++ ++enum dcb_type { ++ OUTPUT_ANALOG = 0, ++ OUTPUT_TV = 1, ++ OUTPUT_TMDS = 2, ++ OUTPUT_LVDS = 3, ++ OUTPUT_DP = 6, ++ OUTPUT_ANY = -1 ++}; + - int (*init)(struct drm_device *); - void (*takedown)(struct drm_device *); + struct dcb_entry { + int index; /* may not be raw dcb index if merging has happened */ +- uint8_t type; ++ enum dcb_type type; + uint8_t i2c_index; + uint8_t heads; + uint8_t connector; +@@ -71,69 +135,22 @@ struct dcb_entry { + bool i2c_upper_default; + }; + +-struct dcb_i2c_entry { +- uint8_t port_type; +- uint8_t read, write; +- struct nouveau_i2c_chan *chan; +-}; ++struct dcb_table { ++ uint8_t version; -@@ -500,11 +493,6 @@ enum nouveau_card_type { +-struct parsed_dcb { + int entries; + struct dcb_entry entry[DCB_MAX_NUM_ENTRIES]; +- struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES]; +-}; +- +-enum dcb_gpio_tag { +- DCB_GPIO_TVDAC0 = 0xc, +- DCB_GPIO_TVDAC1 = 0x2d, +-}; +- +-struct dcb_gpio_entry { +- enum dcb_gpio_tag tag; +- int line; +- bool invert; +-}; +- +-struct parsed_dcb_gpio { +- int entries; +- struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES]; +-}; +- +-struct dcb_connector_table_entry { +- uint32_t entry; +- uint8_t type; +- uint8_t index; +- uint8_t gpio_tag; +-}; +- +-struct dcb_connector_table { +- int entries; +- struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES]; +-}; +- +-struct bios_parsed_dcb { +- uint8_t version; +- +- struct parsed_dcb dcb; - struct drm_nouveau_private { - struct drm_device *dev; -- enum { -- NOUVEAU_CARD_INIT_DOWN, -- NOUVEAU_CARD_INIT_DONE, -- NOUVEAU_CARD_INIT_FAILED -- } init_state; + uint8_t *i2c_table; + uint8_t i2c_default_indices; ++ struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES]; - /* the card type, takes NV_* as values */ - enum nouveau_card_type card_type; -@@ -533,8 +521,6 @@ struct drm_nouveau_private { - atomic_t validate_sequence; - } ttm; + uint16_t gpio_table_ptr; +- struct parsed_dcb_gpio gpio; ++ struct dcb_gpio_table gpio; + uint16_t connector_table_ptr; + struct dcb_connector_table connector; + }; -- struct fb_info *fbdev_info; +-enum nouveau_encoder_type { +- OUTPUT_ANALOG = 0, +- OUTPUT_TV = 1, +- OUTPUT_TMDS = 2, +- OUTPUT_LVDS = 3, +- OUTPUT_DP = 6, +- OUTPUT_ANY = -1 +-}; - - int fifo_alloc_count; - struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; + enum nouveau_or { + OUTPUT_A = (1 << 0), + OUTPUT_B = (1 << 1), +@@ -190,8 +207,8 @@ struct pll_lims { + int refclk; + }; -@@ -595,11 +581,7 @@ struct drm_nouveau_private { - struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; - int vm_vram_pt_nr; +-struct nouveau_bios_info { +- struct parsed_dcb *dcb; ++struct nvbios { ++ struct drm_device *dev; -- struct mem_block *ramin_heap; + uint8_t chip_version; + +@@ -199,11 +216,6 @@ struct nouveau_bios_info { + uint32_t tvdactestval; + uint8_t digital_min_front_porch; + bool fp_no_ddc; +-}; - -- /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */ -- uint32_t ctx_table_size; -- struct nouveau_gpuobj_ref *ctx_table; -+ struct drm_mm ramin_heap; +-struct nvbios { +- struct drm_device *dev; +- struct nouveau_bios_info pub; - struct list_head gpuobj_list; + struct mutex lock; -@@ -618,6 +600,11 @@ struct drm_nouveau_private { - struct backlight_device *backlight; +@@ -234,7 +246,7 @@ struct nvbios { + uint16_t some_script_ptr; /* BIT I + 14 */ + uint16_t init96_tbl_ptr; /* BIT I + 16 */ - struct nouveau_channel *evo; -+ struct { -+ struct dcb_entry *dcb; -+ u16 script; -+ u32 pclk; -+ } evo_irq; +- struct bios_parsed_dcb bdcb; ++ struct dcb_table dcb; struct { - struct dentry *channel_root; -@@ -652,14 +639,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) - return 0; - } + int crtchead; +@@ -260,7 +272,6 @@ struct nvbios { + bool reset_after_pclk_change; + bool dual_link; + bool link_c_increment; +- bool BITbit1; + bool if_is_24bit; + int duallink_transition_clk; + uint8_t strapless_is_24bit; +diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c +index 028719f..8fac10d 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_bo.c ++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c +@@ -71,7 +71,7 @@ nouveau_bo_fixup_align(struct drm_device *dev, + * many small buffers. + */ + if (dev_priv->card_type == NV_50) { +- uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; ++ uint32_t block_size = dev_priv->vram_size >> 15; + int i; --#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \ -- struct drm_nouveau_private *nv = dev->dev_private; \ -- if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \ -- NV_ERROR(dev, "called without init\n"); \ -- return -EINVAL; \ -- } \ --} while (0) -- - #define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \ - struct drm_nouveau_private *nv = dev->dev_private; \ - if (!nouveau_channel_owner(dev, (cl), (id))) { \ -@@ -682,7 +661,6 @@ extern int nouveau_tv_disable; - extern char *nouveau_tv_norm; - extern int nouveau_reg_debug; - extern char *nouveau_vbios; --extern int nouveau_ctxfw; - extern int nouveau_ignorelid; - extern int nouveau_nofbaccel; - extern int nouveau_noaccel; -@@ -707,15 +685,7 @@ extern bool nouveau_wait_for_idle(struct drm_device *); - extern int nouveau_card_init(struct drm_device *); - - /* nouveau_mem.c */ --extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, -- uint64_t size); --extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, -- uint64_t size, int align2, -- struct drm_file *, int tail); --extern void nouveau_mem_takedown(struct mem_block **heap); --extern void nouveau_mem_free_block(struct mem_block *); - extern int nouveau_mem_detect(struct drm_device *dev); --extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); - extern int nouveau_mem_init(struct drm_device *); - extern int nouveau_mem_init_agp(struct drm_device *); - extern void nouveau_mem_close(struct drm_device *); -@@ -857,11 +827,13 @@ void nouveau_register_dsm_handler(void); - void nouveau_unregister_dsm_handler(void); - int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); - bool nouveau_acpi_rom_supported(struct pci_dev *pdev); -+int nouveau_acpi_edid(struct drm_device *, struct drm_connector *); - #else - static inline void nouveau_register_dsm_handler(void) {} - static inline void nouveau_unregister_dsm_handler(void) {} - static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; } - static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; } -+static inline int nouveau_acpi_edid(struct drm_device *, struct drm_connector *) { return -EINVAL; } - #endif + switch (tile_flags) { +@@ -153,17 +153,17 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, - /* nouveau_backlight.c */ -@@ -1035,12 +1007,6 @@ extern int nv50_graph_unload_context(struct drm_device *); - extern void nv50_graph_context_switch(struct drm_device *); - extern int nv50_grctx_init(struct nouveau_grctx *); + nvbo->placement.fpfn = 0; + nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; +- nouveau_bo_placement_set(nvbo, flags); ++ nouveau_bo_placement_set(nvbo, flags, 0); --/* nouveau_grctx.c */ --extern int nouveau_grctx_prog_load(struct drm_device *); --extern void nouveau_grctx_vals_load(struct drm_device *, -- struct nouveau_gpuobj *); --extern void nouveau_grctx_fini(struct drm_device *); -- - /* nv04_instmem.c */ - extern int nv04_instmem_init(struct drm_device *); - extern void nv04_instmem_takedown(struct drm_device *); -@@ -1051,8 +1017,7 @@ extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, - extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); - extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); - extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); --extern void nv04_instmem_prepare_access(struct drm_device *, bool write); --extern void nv04_instmem_finish_access(struct drm_device *); -+extern void nv04_instmem_flush(struct drm_device *); - - /* nv50_instmem.c */ - extern int nv50_instmem_init(struct drm_device *); -@@ -1064,8 +1029,8 @@ extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, - extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); - extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); - extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); --extern void nv50_instmem_prepare_access(struct drm_device *, bool write); --extern void nv50_instmem_finish_access(struct drm_device *); -+extern void nv50_instmem_flush(struct drm_device *); -+extern void nv50_vm_flush(struct drm_device *, int engine); - - /* nv04_mc.c */ - extern int nv04_mc_init(struct drm_device *); -@@ -1088,13 +1053,14 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, - unsigned long arg); + nvbo->channel = chan; + ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, + ttm_bo_type_device, &nvbo->placement, align, 0, + false, NULL, size, nouveau_bo_del_ttm); +- nvbo->channel = NULL; + if (ret) { + /* ttm will call nouveau_bo_del_ttm if it fails.. */ + return ret; + } ++ nvbo->channel = NULL; - /* nv04_dac.c */ --extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry); -+extern int nv04_dac_create(struct drm_connector *, struct dcb_entry *); - extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder); - extern int nv04_dac_output_offset(struct drm_encoder *encoder); - extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable); -+extern bool nv04_dac_in_use(struct drm_encoder *encoder); + spin_lock(&dev_priv->ttm.bo_list_lock); + list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list); +@@ -172,26 +172,33 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, + return 0; + } - /* nv04_dfp.c */ --extern int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry); -+extern int nv04_dfp_create(struct drm_connector *, struct dcb_entry *); - extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent); - extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent, - int head, bool dl); -@@ -1103,10 +1069,10 @@ extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode); - - /* nv04_tv.c */ - extern int nv04_tv_identify(struct drm_device *dev, int i2c_index); --extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry); -+extern int nv04_tv_create(struct drm_connector *, struct dcb_entry *); - - /* nv17_tv.c */ --extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry); -+extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *); - - /* nv04_display.c */ - extern int nv04_display_create(struct drm_device *); -@@ -1147,7 +1113,6 @@ extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); - extern int nouveau_fence_flush(void *obj, void *arg); - extern void nouveau_fence_unref(void **obj); - extern void *nouveau_fence_ref(void *obj); --extern void nouveau_fence_handler(struct drm_device *dev, int channel); - - /* nouveau_gem.c */ - extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, -diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h -index e1df820..a1a0d48 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_encoder.h -+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h -@@ -38,13 +38,15 @@ struct nouveau_encoder { - struct dcb_entry *dcb; - int or; - -+ /* different to drm_encoder.crtc, this reflects what's -+ * actually programmed on the hw, not the proposed crtc */ -+ struct drm_crtc *crtc; ++static void ++set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) ++{ ++ *n = 0; + - struct drm_display_mode mode; - int last_dpms; - - struct nv04_output_reg restore; - -- void (*disconnect)(struct nouveau_encoder *encoder); -- - union { - struct { - int mc_unknown; -@@ -71,8 +73,8 @@ static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc) - - struct nouveau_connector * - nouveau_encoder_connector_get(struct nouveau_encoder *encoder); --int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry); --int nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry); -+int nv50_sor_create(struct drm_connector *, struct dcb_entry *); -+int nv50_dac_create(struct drm_connector *, struct dcb_entry *); - - struct bit_displayport_encoder_table { - uint32_t match; -diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c -index 0a59f96..8415049 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c -+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c -@@ -337,7 +337,7 @@ static void nouveau_fbcon_output_status_changed(struct drm_fb_helper *fb_helper) - drm_helper_fb_hotplug_event(fb_helper, true); ++ if (type & TTM_PL_FLAG_VRAM) ++ pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; ++ if (type & TTM_PL_FLAG_TT) ++ pl[(*n)++] = TTM_PL_FLAG_TT | flags; ++ if (type & TTM_PL_FLAG_SYSTEM) ++ pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; ++} ++ + void +-nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype) ++nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) + { +- int n = 0; +- +- if (memtype & TTM_PL_FLAG_VRAM) +- nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING; +- if (memtype & TTM_PL_FLAG_TT) +- nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; +- if (memtype & TTM_PL_FLAG_SYSTEM) +- nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; +- nvbo->placement.placement = nvbo->placements; +- nvbo->placement.busy_placement = nvbo->placements; +- nvbo->placement.num_placement = n; +- nvbo->placement.num_busy_placement = n; +- +- if (nvbo->pin_refcnt) { +- while (n--) +- nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT; +- } ++ struct ttm_placement *pl = &nvbo->placement; ++ uint32_t flags = TTM_PL_MASK_CACHING | ++ (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); ++ ++ pl->placement = nvbo->placements; ++ set_placement_list(nvbo->placements, &pl->num_placement, ++ type, flags); ++ ++ pl->busy_placement = nvbo->busy_placements; ++ set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, ++ type | busy, flags); } --int -+static int - nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) + int +@@ -199,7 +206,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) { - struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb; -@@ -392,7 +392,8 @@ int nouveau_fbcon_init(struct drm_device *dev) - dev_priv->nfbdev = nfbdev; - nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; - -- ret = drm_fb_helper_init(dev, &nfbdev->helper, 2, 4, true); -+ ret = drm_fb_helper_init(dev, &nfbdev->helper, -+ nv_two_heads(dev) ? 2 : 1, 4, true); - if (ret) { - kfree(nfbdev); - return ret; -diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c -index faddf53..813d853 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_fence.c -+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c -@@ -67,12 +67,13 @@ nouveau_fence_update(struct nouveau_channel *chan) - if (USE_REFCNT) - sequence = nvchan_rd32(chan, 0x48); - else -- sequence = chan->fence.last_sequence_irq; -+ sequence = atomic_read(&chan->fence.last_sequence_irq); - - if (chan->fence.sequence_ack == sequence) - return; - chan->fence.sequence_ack = sequence; + struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); + struct ttm_buffer_object *bo = &nvbo->bo; +- int ret, i; ++ int ret; -+ spin_lock(&chan->fence.lock); - list_for_each_safe(entry, tmp, &chan->fence.pending) { - fence = list_entry(entry, struct nouveau_fence, entry); + if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { + NV_ERROR(nouveau_bdev(bo->bdev)->dev, +@@ -215,9 +222,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) + if (ret) + goto out; -@@ -84,6 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan) - if (sequence == chan->fence.sequence_ack) - break; - } -+ spin_unlock(&chan->fence.lock); - } +- nouveau_bo_placement_set(nvbo, memtype); +- for (i = 0; i < nvbo->placement.num_placement; i++) +- nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT; ++ nouveau_bo_placement_set(nvbo, memtype, 0); - int -@@ -119,7 +121,6 @@ nouveau_fence_emit(struct nouveau_fence *fence) + ret = ttm_bo_validate(bo, &nvbo->placement, false, false); + if (ret == 0) { +@@ -244,7 +249,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) { - struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private; - struct nouveau_channel *chan = fence->channel; -- unsigned long flags; - int ret; + struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); + struct ttm_buffer_object *bo = &nvbo->bo; +- int ret, i; ++ int ret; - ret = RING_SPACE(chan, 2); -@@ -127,9 +128,7 @@ nouveau_fence_emit(struct nouveau_fence *fence) + if (--nvbo->pin_refcnt) + return 0; +@@ -253,8 +258,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) + if (ret) return ret; - if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) { -- spin_lock_irqsave(&chan->fence.lock, flags); - nouveau_fence_update(chan); -- spin_unlock_irqrestore(&chan->fence.lock, flags); - - BUG_ON(chan->fence.sequence == - chan->fence.sequence_ack - 1); -@@ -138,9 +137,9 @@ nouveau_fence_emit(struct nouveau_fence *fence) - fence->sequence = ++chan->fence.sequence; - - kref_get(&fence->refcount); -- spin_lock_irqsave(&chan->fence.lock, flags); -+ spin_lock(&chan->fence.lock); - list_add_tail(&fence->entry, &chan->fence.pending); -- spin_unlock_irqrestore(&chan->fence.lock, flags); -+ spin_unlock(&chan->fence.lock); +- for (i = 0; i < nvbo->placement.num_placement; i++) +- nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; ++ nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); + + ret = ttm_bo_validate(bo, &nvbo->placement, false, false); + if (ret == 0) { +@@ -395,8 +399,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, + man->io_addr = NULL; + man->io_offset = drm_get_resource_start(dev, 1); + man->io_size = drm_get_resource_len(dev, 1); +- if (man->io_size > nouveau_mem_fb_amount(dev)) +- man->io_size = nouveau_mem_fb_amount(dev); ++ if (man->io_size > dev_priv->vram_size) ++ man->io_size = dev_priv->vram_size; + + man->gpu_offset = dev_priv->vm_vram_base; + break; +@@ -439,11 +443,11 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) - BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1); - OUT_RING(chan, fence->sequence); -@@ -173,14 +172,11 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg) - { - struct nouveau_fence *fence = nouveau_fence(sync_obj); - struct nouveau_channel *chan = fence->channel; -- unsigned long flags; + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: +- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT | ++ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, + TTM_PL_FLAG_SYSTEM); + break; + default: +- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); ++ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); + break; + } - if (fence->signalled) - return true; +diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c +index ee2b845..88f9bc0 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_calc.c ++++ b/drivers/gpu/drm/nouveau/nouveau_calc.c +@@ -274,7 +274,7 @@ getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk, + * returns calculated clock + */ + struct drm_nouveau_private *dev_priv = dev->dev_private; +- int cv = dev_priv->vbios->chip_version; ++ int cv = dev_priv->vbios.chip_version; + int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq; + int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m; + int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n; +@@ -373,7 +373,7 @@ getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk, + * returns calculated clock + */ + struct drm_nouveau_private *dev_priv = dev->dev_private; +- int chip_version = dev_priv->vbios->chip_version; ++ int chip_version = dev_priv->vbios.chip_version; + int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq; + int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq; + int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq; +diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c +index adac0f8..f9b2acf 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_channel.c ++++ b/drivers/gpu/drm/nouveau/nouveau_channel.c +@@ -142,7 +142,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, + GFP_KERNEL); + if (!dev_priv->fifos[channel]) + return -ENOMEM; +- dev_priv->fifo_alloc_count++; + chan = dev_priv->fifos[channel]; + INIT_LIST_HEAD(&chan->nvsw.vbl_wait); + INIT_LIST_HEAD(&chan->fence.pending); +@@ -258,9 +257,7 @@ nouveau_channel_free(struct nouveau_channel *chan) + nouveau_debugfs_channel_fini(chan); + /* Give outstanding push buffers a chance to complete */ - spin_lock_irqsave(&chan->fence.lock, flags); nouveau_fence_update(chan); - spin_unlock_irqrestore(&chan->fence.lock, flags); - return fence->signalled; - } - -@@ -221,27 +217,12 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg) - return 0; - } - --void --nouveau_fence_handler(struct drm_device *dev, int channel) --{ -- struct drm_nouveau_private *dev_priv = dev->dev_private; -- struct nouveau_channel *chan = NULL; -- -- if (channel >= 0 && channel < dev_priv->engine.fifo.channels) -- chan = dev_priv->fifos[channel]; -- -- if (chan) { -- spin_lock_irq(&chan->fence.lock); -- nouveau_fence_update(chan); -- spin_unlock_irq(&chan->fence.lock); -- } --} -- - int - nouveau_fence_init(struct nouveau_channel *chan) - { - INIT_LIST_HEAD(&chan->fence.pending); - spin_lock_init(&chan->fence.lock); -+ atomic_set(&chan->fence.last_sequence_irq, 0); - return 0; - } + if (chan->fence.sequence != chan->fence.sequence_ack) { + struct nouveau_fence *fence = NULL; -diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c -index 69c76cf..547f2c2 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_gem.c -+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c -@@ -137,8 +137,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, - uint32_t flags = 0; - int ret = 0; +@@ -280,9 +277,18 @@ nouveau_channel_free(struct nouveau_channel *chan) + */ + nouveau_fence_fini(chan); -- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; -- - if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) - dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; +- /* Ensure the channel is no longer active on the GPU */ ++ /* This will prevent pfifo from switching channels. */ + pfifo->reassign(dev, false); -@@ -577,10 +575,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, - struct drm_nouveau_gem_pushbuf_bo *bo; - struct nouveau_channel *chan; - struct validate_op op; -- struct nouveau_fence *fence = 0; -+ struct nouveau_fence *fence = NULL; - int i, j, ret = 0, do_reloc = 0; ++ /* We want to give pgraph a chance to idle and get rid of all potential ++ * errors. We need to do this before the lock, otherwise the irq handler ++ * is unable to process them. ++ */ ++ if (pgraph->channel(dev) == chan) ++ nouveau_wait_for_idle(dev); ++ ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ + pgraph->fifo_access(dev, false); + if (pgraph->channel(dev) == chan) + pgraph->unload_context(dev); +@@ -298,6 +304,8 @@ nouveau_channel_free(struct nouveau_channel *chan) -- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); + pfifo->reassign(dev, true); - req->vram_available = dev_priv->fb_aper_free; -@@ -760,8 +757,6 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, - bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); - int ret = -EINVAL; ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); ++ + /* Release the channel's resources */ + nouveau_gpuobj_ref_del(dev, &chan->pushbuf); + if (chan->pushbuf_bo) { +@@ -310,7 +318,6 @@ nouveau_channel_free(struct nouveau_channel *chan) + iounmap(chan->user); + + dev_priv->fifos[chan->id] = NULL; +- dev_priv->fifo_alloc_count--; + kfree(chan); + } -- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; -- - gem = drm_gem_object_lookup(dev, file_priv, req->handle); - if (!gem) - return ret; -@@ -800,8 +795,6 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, - struct nouveau_bo *nvbo; - int ret = -EINVAL; +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c +index a378bc3..fb51958 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c +@@ -218,7 +218,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector, + connector->interlace_allowed = true; + } -- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; -- - gem = drm_gem_object_lookup(dev, file_priv, req->handle); - if (!gem) - return ret; -@@ -827,8 +820,6 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data, - struct drm_gem_object *gem; - int ret; +- if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { ++ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) { + drm_connector_property_set_value(connector, + dev->mode_config.dvi_i_subconnector_property, + nv_encoder->dcb->type == OUTPUT_TMDS ? +@@ -236,19 +236,6 @@ nouveau_connector_detect(struct drm_connector *connector) + struct nouveau_i2c_chan *i2c; + int type, flags; -- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; -- - gem = drm_gem_object_lookup(dev, file_priv, req->handle); - if (!gem) - return -EINVAL; -diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c -deleted file mode 100644 -index f731c5f..0000000 ---- a/drivers/gpu/drm/nouveau/nouveau_grctx.c -+++ /dev/null -@@ -1,160 +0,0 @@ --/* -- * Copyright 2009 Red Hat Inc. -- * -- * Permission is hereby granted, free of charge, to any person obtaining a -- * copy of this software and associated documentation files (the "Software"), -- * to deal in the Software without restriction, including without limitation -- * the rights to use, copy, modify, merge, publish, distribute, sublicense, -- * and/or sell copies of the Software, and to permit persons to whom the -- * Software is furnished to do so, subject to the following conditions: -- * -- * The above copyright notice and this permission notice shall be included in -- * all copies or substantial portions of the Software. -- * -- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -- * OTHER DEALINGS IN THE SOFTWARE. -- * -- * Authors: Ben Skeggs -- */ -- --#include --#include -- --#include "drmP.h" --#include "nouveau_drv.h" -- --struct nouveau_ctxprog { -- uint32_t signature; -- uint8_t version; -- uint16_t length; -- uint32_t data[]; --} __attribute__ ((packed)); -- --struct nouveau_ctxvals { -- uint32_t signature; -- uint8_t version; -- uint32_t length; -- struct { -- uint32_t offset; -- uint32_t value; -- } data[]; --} __attribute__ ((packed)); -- --int --nouveau_grctx_prog_load(struct drm_device *dev) --{ -- struct drm_nouveau_private *dev_priv = dev->dev_private; -- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; -- const int chipset = dev_priv->chipset; -- const struct firmware *fw; -- const struct nouveau_ctxprog *cp; -- const struct nouveau_ctxvals *cv; -- char name[32]; -- int ret, i; -- -- if (pgraph->accel_blocked) -- return -ENODEV; -- -- if (!pgraph->ctxprog) { -- sprintf(name, "nouveau/nv%02x.ctxprog", chipset); -- ret = request_firmware(&fw, name, &dev->pdev->dev); -- if (ret) { -- NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset); -- return ret; -- } -- -- pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL); -- if (!pgraph->ctxprog) { -- NV_ERROR(dev, "OOM copying ctxprog\n"); -- release_firmware(fw); -- return -ENOMEM; -- } -- -- cp = pgraph->ctxprog; -- if (le32_to_cpu(cp->signature) != 0x5043564e || -- cp->version != 0 || -- le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) { -- NV_ERROR(dev, "ctxprog invalid\n"); -- release_firmware(fw); -- nouveau_grctx_fini(dev); -- return -EINVAL; -- } -- release_firmware(fw); -- } -- -- if (!pgraph->ctxvals) { -- sprintf(name, "nouveau/nv%02x.ctxvals", chipset); -- ret = request_firmware(&fw, name, &dev->pdev->dev); -- if (ret) { -- NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset); -- nouveau_grctx_fini(dev); -- return ret; -- } -- -- pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL); -- if (!pgraph->ctxvals) { -- NV_ERROR(dev, "OOM copying ctxvals\n"); -- release_firmware(fw); -- nouveau_grctx_fini(dev); -- return -ENOMEM; -- } -- -- cv = (void *)pgraph->ctxvals; -- if (le32_to_cpu(cv->signature) != 0x5643564e || -- cv->version != 0 || -- le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) { -- NV_ERROR(dev, "ctxvals invalid\n"); -- release_firmware(fw); -- nouveau_grctx_fini(dev); -- return -EINVAL; -- } -- release_firmware(fw); -- } -- -- cp = pgraph->ctxprog; -- -- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); -- for (i = 0; i < le16_to_cpu(cp->length); i++) -- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, -- le32_to_cpu(cp->data[i])); -- -- return 0; --} -- --void --nouveau_grctx_fini(struct drm_device *dev) --{ -- struct drm_nouveau_private *dev_priv = dev->dev_private; -- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; -- -- if (pgraph->ctxprog) { -- kfree(pgraph->ctxprog); -- pgraph->ctxprog = NULL; -- } +- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) +- nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); +- if (nv_encoder && nv_connector->native_mode) { +- unsigned status = connector_status_connected; - -- if (pgraph->ctxvals) { -- kfree(pgraph->ctxprog); -- pgraph->ctxvals = NULL; +-#ifdef CONFIG_ACPI +- if (!nouveau_ignorelid && !acpi_lid_open()) +- status = connector_status_unknown; +-#endif +- nouveau_connector_set_encoder(connector, nv_encoder); +- return status; - } --} -- --void --nouveau_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx) --{ -- struct drm_nouveau_private *dev_priv = dev->dev_private; -- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; -- struct nouveau_ctxvals *cv = pgraph->ctxvals; -- int i; - -- if (!cv) -- return; -- -- for (i = 0; i < le32_to_cpu(cv->length); i++) -- nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset), -- le32_to_cpu(cv->data[i].value)); --} -diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c -index c1fd42b..09db6f6 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_mem.c -+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c -@@ -35,162 +35,6 @@ - #include "drm_sarea.h" - #include "nouveau_drv.h" + /* Cleanup the previous EDID block. */ + if (nv_connector->edid) { + drm_mode_connector_update_edid_property(connector, NULL); +@@ -281,7 +268,7 @@ nouveau_connector_detect(struct drm_connector *connector) + * same i2c channel so the value returned from ddc_detect + * isn't necessarily correct. + */ +- if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { ++ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) { + if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL) + type = OUTPUT_TMDS; + else +@@ -302,7 +289,7 @@ nouveau_connector_detect(struct drm_connector *connector) --static struct mem_block * --split_block(struct mem_block *p, uint64_t start, uint64_t size, -- struct drm_file *file_priv) --{ -- /* Maybe cut off the start of an existing block */ -- if (start > p->start) { -- struct mem_block *newblock = -- kmalloc(sizeof(*newblock), GFP_KERNEL); -- if (!newblock) -- goto out; -- newblock->start = start; -- newblock->size = p->size - (start - p->start); -- newblock->file_priv = NULL; -- newblock->next = p->next; -- newblock->prev = p; -- p->next->prev = newblock; -- p->next = newblock; -- p->size -= newblock->size; -- p = newblock; -- } -- -- /* Maybe cut off the end of an existing block */ -- if (size < p->size) { -- struct mem_block *newblock = -- kmalloc(sizeof(*newblock), GFP_KERNEL); -- if (!newblock) -- goto out; -- newblock->start = start + size; -- newblock->size = p->size - size; -- newblock->file_priv = NULL; -- newblock->next = p->next; -- newblock->prev = p; -- p->next->prev = newblock; -- p->next = newblock; -- p->size = size; -- } -- --out: -- /* Our block is in the middle */ -- p->file_priv = file_priv; -- return p; --} -- --struct mem_block * --nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, -- int align2, struct drm_file *file_priv, int tail) --{ -- struct mem_block *p; -- uint64_t mask = (1 << align2) - 1; -- -- if (!heap) -- return NULL; -- -- if (tail) { -- list_for_each_prev(p, heap) { -- uint64_t start = ((p->start + p->size) - size) & ~mask; -- -- if (p->file_priv == NULL && start >= p->start && -- start + size <= p->start + p->size) -- return split_block(p, start, size, file_priv); -- } -- } else { -- list_for_each(p, heap) { -- uint64_t start = (p->start + mask) & ~mask; -- -- if (p->file_priv == NULL && -- start + size <= p->start + p->size) -- return split_block(p, start, size, file_priv); -- } -- } -- -- return NULL; --} -- --void nouveau_mem_free_block(struct mem_block *p) --{ -- p->file_priv = NULL; -- -- /* Assumes a single contiguous range. Needs a special file_priv in -- * 'heap' to stop it being subsumed. -- */ -- if (p->next->file_priv == NULL) { -- struct mem_block *q = p->next; -- p->size += q->size; -- p->next = q->next; -- p->next->prev = p; -- kfree(q); -- } -- -- if (p->prev->file_priv == NULL) { -- struct mem_block *q = p->prev; -- q->size += p->size; -- q->next = p->next; -- q->next->prev = q; -- kfree(p); -- } --} -- --/* Initialize. How to check for an uninitialized heap? -- */ --int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, -- uint64_t size) --{ -- struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); -- -- if (!blocks) -- return -ENOMEM; -- -- *heap = kmalloc(sizeof(**heap), GFP_KERNEL); -- if (!*heap) { -- kfree(blocks); -- return -ENOMEM; -- } -- -- blocks->start = start; -- blocks->size = size; -- blocks->file_priv = NULL; -- blocks->next = blocks->prev = *heap; -- -- memset(*heap, 0, sizeof(**heap)); -- (*heap)->file_priv = (struct drm_file *) -1; -- (*heap)->next = (*heap)->prev = blocks; -- return 0; --} -- --/* -- * Free all blocks associated with the releasing file_priv -- */ --void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) --{ -- struct mem_block *p; -- -- if (!heap || !heap->next) -- return; -- -- list_for_each(p, heap) { -- if (p->file_priv == file_priv) -- p->file_priv = NULL; -- } -- -- /* Assumes a single contiguous range. Needs a special file_priv in -- * 'heap' to stop it being subsumed. -- */ -- list_for_each(p, heap) { -- while ((p->file_priv == NULL) && -- (p->next->file_priv == NULL) && -- (p->next != heap)) { -- struct mem_block *q = p->next; -- p->size += q->size; -- p->next = q->next; -- p->next->prev = p; -- kfree(q); -- } -- } --} -- - /* - * NV10-NV40 tiling helpers - */ -@@ -299,7 +143,6 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, - phys |= 0x30; - } - -- dev_priv->engine.instmem.prepare_access(dev, true); - while (size) { - unsigned offset_h = upper_32_bits(phys); - unsigned offset_l = lower_32_bits(phys); -@@ -331,36 +174,12 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, - } - } - } -- dev_priv->engine.instmem.finish_access(dev); -- -- nv_wr32(dev, 0x100c80, 0x00050001); -- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { -- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); -- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); -- return -EBUSY; -- } -- -- nv_wr32(dev, 0x100c80, 0x00000001); -- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { -- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); -- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); -- return -EBUSY; -- } -- -- nv_wr32(dev, 0x100c80, 0x00040001); -- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { -- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); -- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); -- return -EBUSY; -- } -- -- nv_wr32(dev, 0x100c80, 0x00060001); -- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { -- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); -- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); -- return -EBUSY; -- } -+ dev_priv->engine.instmem.flush(dev); - -+ nv50_vm_flush(dev, 5); -+ nv50_vm_flush(dev, 0); -+ nv50_vm_flush(dev, 4); -+ nv50_vm_flush(dev, 6); - return 0; - } - -@@ -374,7 +193,6 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) - virt -= dev_priv->vm_vram_base; - pages = (size >> 16) << 1; - -- dev_priv->engine.instmem.prepare_access(dev, true); - while (pages) { - pgt = dev_priv->vm_vram_pt[virt >> 29]; - pte = (virt & 0x1ffe0000ULL) >> 15; -@@ -388,57 +206,19 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) - while (pte < end) - nv_wo32(dev, pgt, pte++, 0); - } -- dev_priv->engine.instmem.finish_access(dev); -+ dev_priv->engine.instmem.flush(dev); - -- nv_wr32(dev, 0x100c80, 0x00050001); -- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { -- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); -- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); -- return; -- } -- -- nv_wr32(dev, 0x100c80, 0x00000001); -- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { -- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); -- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); -- return; -- } -- -- nv_wr32(dev, 0x100c80, 0x00040001); -- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { -- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); -- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); -- return; -- } -- -- nv_wr32(dev, 0x100c80, 0x00060001); -- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { -- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); -- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); -- } -+ nv50_vm_flush(dev, 5); -+ nv50_vm_flush(dev, 0); -+ nv50_vm_flush(dev, 4); -+ nv50_vm_flush(dev, 6); + detect_analog: + nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); +- if (!nv_encoder) ++ if (!nv_encoder && !nouveau_tv_disable) + nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); + if (nv_encoder) { + struct drm_encoder *encoder = to_drm_encoder(nv_encoder); +@@ -320,14 +307,75 @@ detect_analog: + return connector_status_disconnected; } - /* - * Cleanup everything - */ --void nouveau_mem_takedown(struct mem_block **heap) --{ -- struct mem_block *p; -- -- if (!*heap) -- return; -- -- for (p = (*heap)->next; p != *heap;) { -- struct mem_block *q = p; -- p = p->next; -- kfree(q); -- } -- -- kfree(*heap); -- *heap = NULL; --} -- --void nouveau_mem_close(struct drm_device *dev) -+void -+nouveau_mem_close(struct drm_device *dev) ++static enum drm_connector_status ++nouveau_connector_detect_lvds(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_connector *nv_connector = nouveau_connector(connector); ++ struct nouveau_encoder *nv_encoder = NULL; ++ enum drm_connector_status status = connector_status_disconnected; ++ ++ /* Cleanup the previous EDID block. */ ++ if (nv_connector->edid) { ++ drm_mode_connector_update_edid_property(connector, NULL); ++ kfree(nv_connector->edid); ++ nv_connector->edid = NULL; ++ } ++ ++ nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); ++ if (!nv_encoder) ++ return connector_status_disconnected; ++ ++ if (!dev_priv->vbios.fp_no_ddc) { ++ status = nouveau_connector_detect(connector); ++ if (status == connector_status_connected) ++ goto out; ++ } ++ ++ /* If no EDID found above, and the VBIOS indicates a hardcoded ++ * modeline is avalilable for the panel, set it as the panel's ++ * native mode and exit. ++ */ ++ if (nouveau_bios_fp_mode(dev, NULL) && (dev_priv->vbios.fp_no_ddc || ++ nv_encoder->dcb->lvdsconf.use_straps_for_mode)) { ++ status = connector_status_connected; ++ goto out; ++ } ++ ++ /* Still nothing, some VBIOS images have a hardcoded EDID block ++ * stored for the panel stored in them. ++ */ ++ if (!dev_priv->vbios.fp_no_ddc) { ++ struct edid *edid = ++ (struct edid *)nouveau_bios_embedded_edid(dev); ++ if (edid) { ++ nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL); ++ *(nv_connector->edid) = *edid; ++ status = connector_status_connected; ++ } ++ } ++ ++out: ++#ifdef CONFIG_ACPI ++ if (status == connector_status_connected && ++ !nouveau_ignorelid && !acpi_lid_open()) ++ status = connector_status_unknown; ++#endif ++ ++ drm_mode_connector_update_edid_property(connector, nv_connector->edid); ++ nouveau_connector_set_encoder(connector, nv_encoder); ++ return status; ++} ++ + static void + nouveau_connector_force(struct drm_connector *connector) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - -@@ -449,8 +229,7 @@ void nouveau_mem_close(struct drm_device *dev) - - nouveau_ttm_global_release(dev_priv); +- struct drm_device *dev = connector->dev; ++ struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder; + int type; -- if (drm_core_has_AGP(dev) && dev->agp && -- drm_core_check_feature(dev, DRIVER_MODESET)) { -+ if (drm_core_has_AGP(dev) && dev->agp) { - struct drm_agp_mem *entry, *tempe; +- if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { ++ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) { + if (connector->force == DRM_FORCE_ON_DIGITAL) + type = OUTPUT_TMDS; + else +@@ -337,7 +385,7 @@ nouveau_connector_force(struct drm_connector *connector) + + nv_encoder = find_encoder_by_type(connector, type); + if (!nv_encoder) { +- NV_ERROR(dev, "can't find encoder to force %s on!\n", ++ NV_ERROR(connector->dev, "can't find encoder to force %s on!\n", + drm_get_connector_name(connector)); + connector->status = connector_status_disconnected; + return; +@@ -371,7 +419,7 @@ nouveau_connector_set_property(struct drm_connector *connector, + } - /* Remove AGP resources, but leave dev->agp -@@ -470,10 +249,10 @@ void nouveau_mem_close(struct drm_device *dev) - dev->agp->enabled = 0; - } + /* LVDS always needs gpu scaling */ +- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && ++ if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS && + value == DRM_MODE_SCALE_NONE) + return -EINVAL; -- if (dev_priv->fb_mtrr) { -+ if (dev_priv->fb_mtrr >= 0) { - drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1), - drm_get_resource_len(dev, 1), DRM_MTRR_WC); -- dev_priv->fb_mtrr = 0; -+ dev_priv->fb_mtrr = -1; - } +@@ -431,24 +479,27 @@ nouveau_connector_set_property(struct drm_connector *connector, } -@@ -536,12 +315,18 @@ nouveau_mem_detect(struct drm_device *dev) - } else - if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { - dev_priv->vram_size = nouveau_mem_detect_nforce(dev); -- } else { -+ } else -+ if (dev_priv->card_type < NV_50) { - dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); - dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; -- if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) -+ } else { -+ dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); -+ dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; -+ dev_priv->vram_size &= 0xffffffff00ll; -+ if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) { - dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); - dev_priv->vram_sys_base <<= 12; -+ } - } - - NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); -diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c -index 9537f3e..3ec181f 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_notifier.c -+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c -@@ -55,7 +55,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) - if (ret) - goto out_err; - -- ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size); -+ ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size); - if (ret) - goto out_err; - -@@ -80,7 +80,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) - nouveau_bo_unpin(chan->notifier_bo); - mutex_unlock(&dev->struct_mutex); - drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); -- nouveau_mem_takedown(&chan->notifier_heap); -+ drm_mm_takedown(&chan->notifier_heap); - } + static struct drm_display_mode * +-nouveau_connector_native_mode(struct nouveau_connector *connector) ++nouveau_connector_native_mode(struct drm_connector *connector) + { +- struct drm_device *dev = connector->base.dev; ++ struct drm_connector_helper_funcs *helper = connector->helper_private; ++ struct nouveau_connector *nv_connector = nouveau_connector(connector); ++ struct drm_device *dev = connector->dev; + struct drm_display_mode *mode, *largest = NULL; + int high_w = 0, high_h = 0, high_v = 0; - static void -@@ -90,7 +90,7 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev, - NV_DEBUG(dev, "\n"); +- /* Use preferred mode if there is one.. */ +- list_for_each_entry(mode, &connector->base.probed_modes, head) { ++ list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { ++ if (helper->mode_valid(connector, mode) != MODE_OK) ++ continue; ++ ++ /* Use preferred mode if there is one.. */ + if (mode->type & DRM_MODE_TYPE_PREFERRED) { + NV_DEBUG_KMS(dev, "native mode from preferred\n"); + return drm_mode_duplicate(dev, mode); + } +- } - if (gpuobj->priv) -- nouveau_mem_free_block(gpuobj->priv); -+ drm_mm_put_block(gpuobj->priv); - } +- /* Otherwise, take the resolution with the largest width, then height, +- * then vertical refresh +- */ +- list_for_each_entry(mode, &connector->base.probed_modes, head) { ++ /* Otherwise, take the resolution with the largest width, then ++ * height, then vertical refresh ++ */ + if (mode->hdisplay < high_w) + continue; - int -@@ -100,18 +100,13 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *nobj = NULL; -- struct mem_block *mem; -+ struct drm_mm_node *mem; - uint32_t offset; - int target, ret; - -- if (!chan->notifier_heap) { -- NV_ERROR(dev, "Channel %d doesn't have a notifier heap!\n", -- chan->id); -- return -EINVAL; -- } -- -- mem = nouveau_mem_alloc_block(chan->notifier_heap, size, 0, -- (struct drm_file *)-2, 0); -+ mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); -+ if (mem) -+ mem = drm_mm_get_block(mem, size, 0); - if (!mem) { - NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); - return -ENOMEM; -@@ -144,17 +139,17 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, - mem->size, NV_DMA_ACCESS_RW, target, - &nobj); - if (ret) { -- nouveau_mem_free_block(mem); -+ drm_mm_put_block(mem); - NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret); - return ret; - } -- nobj->dtor = nouveau_notifier_gpuobj_dtor; -- nobj->priv = mem; -+ nobj->dtor = nouveau_notifier_gpuobj_dtor; -+ nobj->priv = mem; +@@ -530,21 +581,28 @@ static int + nouveau_connector_get_modes(struct drm_connector *connector) + { + struct drm_device *dev = connector->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; ++ struct drm_display_mode mode; + int ret = 0; - ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL); - if (ret) { - nouveau_gpuobj_del(dev, &nobj); -- nouveau_mem_free_block(mem); -+ drm_mm_put_block(mem); - NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret); - return ret; +- /* If we're not LVDS, destroy the previous native mode, the attached +- * monitor could have changed. ++ /* destroy the native mode, the attached monitor could have changed. + */ +- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && +- nv_connector->native_mode) { ++ if (nv_connector->native_mode) { + drm_mode_destroy(dev, nv_connector->native_mode); + nv_connector->native_mode = NULL; } -@@ -170,7 +165,7 @@ nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset) - return -EINVAL; - if (poffset) { -- struct mem_block *mem = nobj->priv; -+ struct drm_mm_node *mem = nobj->priv; + if (nv_connector->edid) + ret = drm_add_edid_modes(connector, nv_connector->edid); ++ else ++ if (nv_encoder->dcb->type == OUTPUT_LVDS && ++ (nv_encoder->dcb->lvdsconf.use_straps_for_mode || ++ dev_priv->vbios.fp_no_ddc) && ++ nouveau_bios_fp_mode(dev, &mode)) { ++ nv_connector->native_mode = drm_mode_duplicate(dev, &mode); ++ } - if (*poffset >= mem->size) - return false; -@@ -189,7 +184,6 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, - struct nouveau_channel *chan; - int ret; + /* Find the native mode if this is a digital panel, if we didn't + * find any modes through DDC previously add the native mode to +@@ -552,7 +610,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) + */ + if (!nv_connector->native_mode) + nv_connector->native_mode = +- nouveau_connector_native_mode(nv_connector); ++ nouveau_connector_native_mode(connector); + if (ret == 0 && nv_connector->native_mode) { + struct drm_display_mode *mode; + +@@ -565,7 +623,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) + ret = get_slave_funcs(nv_encoder)-> + get_modes(to_drm_encoder(nv_encoder), connector); -- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); +- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ++ if (nv_encoder->dcb->type == OUTPUT_LVDS) + ret += nouveau_connector_scaler_modes_add(connector); - ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); -diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c -index e7c100b..4bf6b33 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_object.c -+++ b/drivers/gpu/drm/nouveau/nouveau_object.c -@@ -132,7 +132,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) - } - } + return ret; +@@ -583,9 +641,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector, -- instmem->prepare_access(dev, true); - co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); - do { - if (!nouveau_ramht_entry_valid(dev, ramht, co)) { -@@ -143,7 +142,7 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) - nv_wo32(dev, ramht, (co + 4)/4, ctx); + switch (nv_encoder->dcb->type) { + case OUTPUT_LVDS: +- BUG_ON(!nv_connector->native_mode); +- if (mode->hdisplay > nv_connector->native_mode->hdisplay || +- mode->vdisplay > nv_connector->native_mode->vdisplay) ++ if (nv_connector->native_mode && ++ (mode->hdisplay > nv_connector->native_mode->hdisplay || ++ mode->vdisplay > nv_connector->native_mode->vdisplay)) + return MODE_PANEL; + + min_clock = 0; +@@ -593,8 +651,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, + break; + case OUTPUT_TMDS: + if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || +- (dev_priv->card_type < NV_50 && +- !nv_encoder->dcb->duallink_possible)) ++ !nv_encoder->dcb->duallink_possible) + max_clock = 165000; + else + max_clock = 330000; +@@ -615,6 +672,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector, - list_add_tail(&ref->list, &chan->ramht_refs); -- instmem->finish_access(dev); -+ instmem->flush(dev); - return 0; - } - NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", -@@ -153,7 +152,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) - if (co >= dev_priv->ramht_size) - co = 0; - } while (co != ho); -- instmem->finish_access(dev); - - NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); - return -ENOMEM; -@@ -173,7 +171,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) - return; + clock *= 3; + break; ++ default: ++ BUG_ON(1); ++ return MODE_BAD; } -- instmem->prepare_access(dev, true); - co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); - do { - if (nouveau_ramht_entry_valid(dev, ramht, co) && -@@ -186,7 +183,7 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) - nv_wo32(dev, ramht, (co + 4)/4, 0x00000000); - - list_del(&ref->list); -- instmem->finish_access(dev); -+ instmem->flush(dev); - return; - } - -@@ -195,7 +192,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) - co = 0; - } while (co != ho); - list_del(&ref->list); -- instmem->finish_access(dev); - - NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", - chan->id, ref->handle); -@@ -209,7 +205,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; - struct nouveau_gpuobj *gpuobj; -- struct mem_block *pramin = NULL; -+ struct drm_mm *pramin = NULL; - int ret; + if (clock < min_clock) +@@ -656,193 +716,138 @@ nouveau_connector_funcs = { + .force = nouveau_connector_force + }; - NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", -@@ -233,25 +229,12 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, - * available. - */ - if (chan) { -- if (chan->ramin_heap) { -- NV_DEBUG(dev, "private heap\n"); -- pramin = chan->ramin_heap; -- } else -- if (dev_priv->card_type < NV_50) { -- NV_DEBUG(dev, "global heap fallback\n"); -- pramin = dev_priv->ramin_heap; -- } -+ NV_DEBUG(dev, "channel heap\n"); -+ pramin = &chan->ramin_heap; - } else { - NV_DEBUG(dev, "global heap\n"); -- pramin = dev_priv->ramin_heap; -- } -- -- if (!pramin) { -- NV_ERROR(dev, "No PRAMIN heap!\n"); -- return -EINVAL; +-static int +-nouveau_connector_create_lvds(struct drm_device *dev, +- struct drm_connector *connector) +-{ +- struct nouveau_connector *nv_connector = nouveau_connector(connector); +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_i2c_chan *i2c = NULL; +- struct nouveau_encoder *nv_encoder; +- struct drm_display_mode native, *mode, *temp; +- bool dummy, if_is_24bit = false; +- int ret, flags; +- +- nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); +- if (!nv_encoder) +- return -ENODEV; +- +- ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &if_is_24bit); +- if (ret) { +- NV_ERROR(dev, "Error parsing LVDS table, disabling LVDS\n"); +- return ret; - } -+ pramin = &dev_priv->ramin_heap; - -- if (!chan) { - ret = engine->instmem.populate(dev, gpuobj, &size); - if (ret) { - nouveau_gpuobj_del(dev, &gpuobj); -@@ -260,9 +243,10 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, - } - - /* Allocate a chunk of the PRAMIN aperture */ -- gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, -- drm_order(align), -- (struct drm_file *)-2, 0); -+ gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0); -+ if (gpuobj->im_pramin) -+ gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align); -+ - if (!gpuobj->im_pramin) { - nouveau_gpuobj_del(dev, &gpuobj); - return -ENOMEM; -@@ -279,10 +263,9 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, - if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { - int i; +- nv_connector->use_dithering = !if_is_24bit; +- +- /* Firstly try getting EDID over DDC, if allowed and I2C channel +- * is available. +- */ +- if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf) +- i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); +- +- if (i2c) { +- nouveau_connector_ddc_prepare(connector, &flags); +- nv_connector->edid = drm_get_edid(connector, &i2c->adapter); +- nouveau_connector_ddc_finish(connector, flags); +- } +- +- /* If no EDID found above, and the VBIOS indicates a hardcoded +- * modeline is avalilable for the panel, set it as the panel's +- * native mode and exit. +- */ +- if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) && +- (nv_encoder->dcb->lvdsconf.use_straps_for_mode || +- dev_priv->VBIOS.pub.fp_no_ddc)) { +- nv_connector->native_mode = drm_mode_duplicate(dev, &native); +- goto out; +- } +- +- /* Still nothing, some VBIOS images have a hardcoded EDID block +- * stored for the panel stored in them. +- */ +- if (!nv_connector->edid && !nv_connector->native_mode && +- !dev_priv->VBIOS.pub.fp_no_ddc) { +- struct edid *edid = +- (struct edid *)nouveau_bios_embedded_edid(dev); +- if (edid) { +- nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL); +- *(nv_connector->edid) = *edid; +- } +- } +- +- if (!nv_connector->edid) +- goto out; +- +- /* We didn't find/use a panel mode from the VBIOS, so parse the EDID +- * block and look for the preferred mode there. +- */ +- ret = drm_add_edid_modes(connector, nv_connector->edid); +- if (ret == 0) +- goto out; +- nv_connector->detected_encoder = nv_encoder; +- nv_connector->native_mode = nouveau_connector_native_mode(nv_connector); +- list_for_each_entry_safe(mode, temp, &connector->probed_modes, head) +- drm_mode_remove(connector, mode); +- +-out: +- if (!nv_connector->native_mode) { +- NV_ERROR(dev, "LVDS present in DCB table, but couldn't " +- "determine its native mode. Disabling.\n"); +- return -ENODEV; +- } +- +- drm_mode_connector_update_edid_property(connector, nv_connector->edid); +- return 0; +-} ++static const struct drm_connector_funcs ++nouveau_connector_funcs_lvds = { ++ .dpms = drm_helper_connector_dpms, ++ .save = NULL, ++ .restore = NULL, ++ .detect = nouveau_connector_detect_lvds, ++ .destroy = nouveau_connector_destroy, ++ .fill_modes = drm_helper_probe_single_connector_modes, ++ .set_property = nouveau_connector_set_property, ++ .force = nouveau_connector_force ++}; -- engine->instmem.prepare_access(dev, true); - for (i = 0; i < gpuobj->im_pramin->size; i += 4) - nv_wo32(dev, gpuobj, i/4, 0); -- engine->instmem.finish_access(dev); -+ engine->instmem.flush(dev); - } +-int +-nouveau_connector_create(struct drm_device *dev, int index, int type) ++struct drm_connector * ++nouveau_connector_create(struct drm_device *dev, int index) + { ++ const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_connector *nv_connector = NULL; ++ struct dcb_connector_table_entry *dcb = NULL; + struct drm_connector *connector; +- struct drm_encoder *encoder; +- int ret; ++ int type, ret = 0; - *gpuobj_ret = gpuobj; -@@ -370,10 +353,9 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) - } + NV_DEBUG_KMS(dev, "\n"); - if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { -- engine->instmem.prepare_access(dev, true); - for (i = 0; i < gpuobj->im_pramin->size; i += 4) - nv_wo32(dev, gpuobj, i/4, 0); -- engine->instmem.finish_access(dev); -+ engine->instmem.flush(dev); - } +- nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); +- if (!nv_connector) +- return -ENOMEM; +- nv_connector->dcb = nouveau_bios_connector_entry(dev, index); +- connector = &nv_connector->base; ++ if (index >= dev_priv->vbios.dcb.connector.entries) ++ return ERR_PTR(-EINVAL); - if (gpuobj->dtor) -@@ -386,7 +368,7 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) - if (gpuobj->flags & NVOBJ_FLAG_FAKE) - kfree(gpuobj->im_pramin); - else -- nouveau_mem_free_block(gpuobj->im_pramin); -+ drm_mm_put_block(gpuobj->im_pramin); +- switch (type) { +- case DRM_MODE_CONNECTOR_VGA: +- NV_INFO(dev, "Detected a VGA connector\n"); ++ dcb = &dev_priv->vbios.dcb.connector.entry[index]; ++ if (dcb->drm) ++ return dcb->drm; ++ ++ switch (dcb->type) { ++ case DCB_CONNECTOR_VGA: ++ type = DRM_MODE_CONNECTOR_VGA; + break; +- case DRM_MODE_CONNECTOR_DVID: +- NV_INFO(dev, "Detected a DVI-D connector\n"); ++ case DCB_CONNECTOR_TV_0: ++ case DCB_CONNECTOR_TV_1: ++ case DCB_CONNECTOR_TV_3: ++ type = DRM_MODE_CONNECTOR_TV; + break; +- case DRM_MODE_CONNECTOR_DVII: +- NV_INFO(dev, "Detected a DVI-I connector\n"); ++ case DCB_CONNECTOR_DVI_I: ++ type = DRM_MODE_CONNECTOR_DVII; + break; +- case DRM_MODE_CONNECTOR_LVDS: +- NV_INFO(dev, "Detected a LVDS connector\n"); ++ case DCB_CONNECTOR_DVI_D: ++ type = DRM_MODE_CONNECTOR_DVID; + break; +- case DRM_MODE_CONNECTOR_TV: +- NV_INFO(dev, "Detected a TV connector\n"); ++ case DCB_CONNECTOR_HDMI_0: ++ case DCB_CONNECTOR_HDMI_1: ++ type = DRM_MODE_CONNECTOR_HDMIA; + break; +- case DRM_MODE_CONNECTOR_DisplayPort: +- NV_INFO(dev, "Detected a DisplayPort connector\n"); ++ case DCB_CONNECTOR_LVDS: ++ type = DRM_MODE_CONNECTOR_LVDS; ++ funcs = &nouveau_connector_funcs_lvds; + break; +- default: +- NV_ERROR(dev, "Unknown connector, this is not good.\n"); ++ case DCB_CONNECTOR_DP: ++ type = DRM_MODE_CONNECTOR_DisplayPort; ++ break; ++ case DCB_CONNECTOR_eDP: ++ type = DRM_MODE_CONNECTOR_eDP; + break; ++ default: ++ NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type); ++ return ERR_PTR(-EINVAL); } - list_del(&gpuobj->list); -@@ -589,7 +571,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, - list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); - - if (p_offset != ~0) { -- gpuobj->im_pramin = kzalloc(sizeof(struct mem_block), -+ gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node), - GFP_KERNEL); - if (!gpuobj->im_pramin) { - nouveau_gpuobj_del(dev, &gpuobj); -@@ -605,10 +587,9 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, - } ++ nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); ++ if (!nv_connector) ++ return ERR_PTR(-ENOMEM); ++ nv_connector->dcb = dcb; ++ connector = &nv_connector->base; ++ + /* defaults, will get overridden in detect() */ + connector->interlace_allowed = false; + connector->doublescan_allowed = false; - if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { -- dev_priv->engine.instmem.prepare_access(dev, true); - for (i = 0; i < gpuobj->im_pramin->size; i += 4) - nv_wo32(dev, gpuobj, i/4, 0); -- dev_priv->engine.instmem.finish_access(dev); -+ dev_priv->engine.instmem.flush(dev); - } +- drm_connector_init(dev, connector, &nouveau_connector_funcs, type); ++ drm_connector_init(dev, connector, funcs, type); + drm_connector_helper_add(connector, &nouveau_connector_helper_funcs); - if (pref) { -@@ -696,8 +677,6 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, - return ret; ++ /* Check if we need dithering enabled */ ++ if (dcb->type == DCB_CONNECTOR_LVDS) { ++ bool dummy, is_24bit = false; ++ ++ ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit); ++ if (ret) { ++ NV_ERROR(dev, "Error parsing LVDS table, disabling " ++ "LVDS\n"); ++ goto fail; ++ } ++ ++ nv_connector->use_dithering = !is_24bit; ++ } ++ + /* Init DVI-I specific properties */ +- if (type == DRM_MODE_CONNECTOR_DVII) { ++ if (dcb->type == DCB_CONNECTOR_DVI_I) { + drm_mode_create_dvi_i_properties(dev); + drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0); + drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0); } -- instmem->prepare_access(dev, true); +- if (type != DRM_MODE_CONNECTOR_LVDS) +- nv_connector->use_dithering = false; - - if (dev_priv->card_type < NV_50) { - uint32_t frame, adjust, pte_flags = 0; - -@@ -734,7 +713,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, - nv_wo32(dev, *gpuobj, 5, flags5); - } - -- instmem->finish_access(dev); -+ instmem->flush(dev); - - (*gpuobj)->engine = NVOBJ_ENGINE_SW; - (*gpuobj)->class = class; -@@ -849,7 +828,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, - return ret; - } - -- dev_priv->engine.instmem.prepare_access(dev, true); - if (dev_priv->card_type >= NV_50) { - nv_wo32(dev, *gpuobj, 0, class); - nv_wo32(dev, *gpuobj, 5, 0x00010000); -@@ -874,7 +852,7 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, - } +- if (type == DRM_MODE_CONNECTOR_DVID || +- type == DRM_MODE_CONNECTOR_DVII || +- type == DRM_MODE_CONNECTOR_LVDS || +- type == DRM_MODE_CONNECTOR_DisplayPort) { +- nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; +- +- drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, +- nv_connector->scaling_mode); +- drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property, +- nv_connector->use_dithering ? DRM_MODE_DITHERING_ON +- : DRM_MODE_DITHERING_OFF); +- +- } else { +- nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; +- +- if (type == DRM_MODE_CONNECTOR_VGA && +- dev_priv->card_type >= NV_50) { ++ switch (dcb->type) { ++ case DCB_CONNECTOR_VGA: ++ if (dev_priv->card_type >= NV_50) { + drm_connector_attach_property(connector, + dev->mode_config.scaling_mode_property, + nv_connector->scaling_mode); } - } -- dev_priv->engine.instmem.finish_access(dev); -+ dev_priv->engine.instmem.flush(dev); - - (*gpuobj)->engine = NVOBJ_ENGINE_GR; - (*gpuobj)->class = class; -@@ -920,6 +898,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) - base = 0; - - /* PGRAPH context */ -+ size += dev_priv->engine.graph.grctx_size; - - if (dev_priv->card_type == NV_50) { - /* Various fixed table thingos */ -@@ -930,12 +909,8 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) - size += 0x8000; - /* RAMFC */ - size += 0x1000; -- /* PGRAPH context */ -- size += 0x70000; - } +- } +- +- /* attach encoders */ +- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { +- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); +- +- if (nv_encoder->dcb->connector != index) +- continue; +- +- if (get_slave_funcs(nv_encoder)) +- get_slave_funcs(nv_encoder)->create_resources(encoder, connector); ++ /* fall-through */ ++ case DCB_CONNECTOR_TV_0: ++ case DCB_CONNECTOR_TV_1: ++ case DCB_CONNECTOR_TV_3: ++ nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; ++ break; ++ default: ++ nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; -- NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", -- chan->id, size, base); - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, - &chan->ramin); - if (ret) { -@@ -944,8 +919,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) +- drm_mode_connector_attach_encoder(connector, encoder); ++ drm_connector_attach_property(connector, ++ dev->mode_config.scaling_mode_property, ++ nv_connector->scaling_mode); ++ drm_connector_attach_property(connector, ++ dev->mode_config.dithering_mode_property, ++ nv_connector->use_dithering ? ++ DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); ++ break; } - pramin = chan->ramin->gpuobj; - -- ret = nouveau_mem_init_heap(&chan->ramin_heap, -- pramin->im_pramin->start + base, size); -+ ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size); - if (ret) { - NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); - nouveau_gpuobj_ref_del(dev, &chan->ramin); -@@ -969,15 +943,11 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, - NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); + drm_sysfs_connector_add(connector); ++ dcb->drm = connector; ++ return dcb->drm; -- /* Reserve a block of PRAMIN for the channel -- *XXX: maybe on card_type == NV_50) { -- ret = nouveau_gpuobj_channel_init_pramin(chan); +- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { +- ret = nouveau_connector_create_lvds(dev, connector); - if (ret) { -- NV_ERROR(dev, "init pramin\n"); +- connector->funcs->destroy(connector); - return ret; - } -+ /* Allocate a chunk of memory for per-channel object storage */ -+ ret = nouveau_gpuobj_channel_init_pramin(chan); -+ if (ret) { -+ NV_ERROR(dev, "init pramin\n"); -+ return ret; - } - - /* NV50 VM -@@ -988,17 +958,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, - if (dev_priv->card_type >= NV_50) { - uint32_t vm_offset, pde; - -- instmem->prepare_access(dev, true); -- - vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; - vm_offset += chan->ramin->gpuobj->im_pramin->start; +- } ++fail: ++ drm_connector_cleanup(connector); ++ kfree(connector); ++ return ERR_PTR(ret); - ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, - 0, &chan->vm_pd, NULL); -- if (ret) { -- instmem->finish_access(dev); -+ if (ret) - return ret; -- } - for (i = 0; i < 0x4000; i += 8) { - nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000); - nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe); -@@ -1008,10 +974,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, - ret = nouveau_gpuobj_ref_add(dev, NULL, 0, - dev_priv->gart_info.sg_ctxdma, - &chan->vm_gart_pt); -- if (ret) { -- instmem->finish_access(dev); -+ if (ret) - return ret; -- } - nv_wo32(dev, chan->vm_pd, pde++, - chan->vm_gart_pt->instance | 0x03); - nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); -@@ -1021,17 +985,15 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, - ret = nouveau_gpuobj_ref_add(dev, NULL, 0, - dev_priv->vm_vram_pt[i], - &chan->vm_vram_pt[i]); -- if (ret) { -- instmem->finish_access(dev); -+ if (ret) - return ret; -- } - - nv_wo32(dev, chan->vm_pd, pde++, - chan->vm_vram_pt[i]->instance | 0x61); - nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); - } +- return 0; + } +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h +index 728b809..1ce3d91 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.h ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.h +@@ -49,6 +49,7 @@ static inline struct nouveau_connector *nouveau_connector( + return container_of(con, struct nouveau_connector, base); + } -- instmem->finish_access(dev); -+ instmem->flush(dev); - } +-int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type); ++struct drm_connector * ++nouveau_connector_create(struct drm_device *, int index); - /* RAMHT */ -@@ -1130,8 +1092,8 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) - for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) - nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); + #endif /* __NOUVEAU_CONNECTOR_H__ */ +diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c +index 89e36ee..a251886 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c ++++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c +@@ -137,16 +137,28 @@ nouveau_debugfs_memory_info(struct seq_file *m, void *data) + { + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_minor *minor = node->minor; +- struct drm_device *dev = minor->dev; ++ struct drm_nouveau_private *dev_priv = minor->dev->dev_private; ++ ++ seq_printf(m, "VRAM total: %dKiB\n", (int)(dev_priv->vram_size >> 10)); ++ return 0; ++} ++ ++static int ++nouveau_debugfs_vbios_image(struct seq_file *m, void *data) ++{ ++ struct drm_info_node *node = (struct drm_info_node *) m->private; ++ struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private; ++ int i; -- if (chan->ramin_heap) -- nouveau_mem_takedown(&chan->ramin_heap); -+ if (chan->ramin_heap.fl_entry.next) -+ drm_mm_takedown(&chan->ramin_heap); - if (chan->ramin) - nouveau_gpuobj_ref_del(dev, &chan->ramin); +- seq_printf(m, "VRAM total: %dKiB\n", +- (int)(nouveau_mem_fb_amount(dev) >> 10)); ++ for (i = 0; i < dev_priv->vbios.length; i++) ++ seq_printf(m, "%c", dev_priv->vbios.data[i]); + return 0; + } -@@ -1164,10 +1126,8 @@ nouveau_gpuobj_suspend(struct drm_device *dev) - return -ENOMEM; - } + static struct drm_info_list nouveau_debugfs_list[] = { + { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, + { "memory", nouveau_debugfs_memory_info, 0, NULL }, ++ { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, + }; + #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) -- dev_priv->engine.instmem.prepare_access(dev, false); - for (i = 0; i < gpuobj->im_pramin->size / 4; i++) - gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i); -- dev_priv->engine.instmem.finish_access(dev); - } +diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c +index c8482a1..65c441a 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_dma.c ++++ b/drivers/gpu/drm/nouveau/nouveau_dma.c +@@ -190,6 +190,11 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, + nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); - return 0; -@@ -1212,10 +1172,9 @@ nouveau_gpuobj_resume(struct drm_device *dev) - if (!gpuobj->im_backing_suspend) - continue; - -- dev_priv->engine.instmem.prepare_access(dev, true); - for (i = 0; i < gpuobj->im_pramin->size / 4; i++) - nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]); -- dev_priv->engine.instmem.finish_access(dev); -+ dev_priv->engine.instmem.flush(dev); + chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; ++ ++ DRM_MEMORYBARRIER(); ++ /* Flush writes. */ ++ nouveau_bo_rd32(pb, 0); ++ + nvchan_wr32(chan, 0x8c, chan->dma.ib_put); + chan->dma.ib_free--; + } +diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c +index f954ad9..deeb21c 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_dp.c ++++ b/drivers/gpu/drm/nouveau/nouveau_dp.c +@@ -483,7 +483,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, + ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT); + ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT); + +- for (;;) { ++ for (i = 0; i < 16; i++) { + nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000); + nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl); + nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000); +@@ -502,6 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, + break; } - nouveau_gpuobj_suspend_cleanup(dev); -@@ -1232,7 +1191,6 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, - struct nouveau_channel *chan; - int ret; - -- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); - - if (init->handle == ~0) -@@ -1283,7 +1241,6 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, - struct nouveau_channel *chan; - int ret; - -- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); ++ if (i == 16) { ++ NV_ERROR(dev, "auxch DEFER too many times, bailing\n"); ++ ret = -EREMOTEIO; ++ goto out; ++ } ++ + if (cmd & 1) { + if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { + ret = -EREMOTEIO; +diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c +index da3b93b..60a709c 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drv.c ++++ b/drivers/gpu/drm/nouveau/nouveau_drv.c +@@ -75,14 +75,22 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status"); + int nouveau_ignorelid = 0; + module_param_named(ignorelid, nouveau_ignorelid, int, 0400); + +-MODULE_PARM_DESC(noagp, "Disable all acceleration"); ++MODULE_PARM_DESC(noaccel, "Disable all acceleration"); + int nouveau_noaccel = 0; + module_param_named(noaccel, nouveau_noaccel, int, 0400); + +-MODULE_PARM_DESC(noagp, "Disable fbcon acceleration"); ++MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); + int nouveau_nofbaccel = 0; + module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); + ++MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type"); ++int nouveau_override_conntype = 0; ++module_param_named(override_conntype, nouveau_override_conntype, int, 0400); ++ ++MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n"); ++int nouveau_tv_disable = 0; ++module_param_named(tv_disable, nouveau_tv_disable, int, 0400); ++ + MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" + "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" + "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" +@@ -154,9 +162,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) + if (pm_state.event == PM_EVENT_PRETHAW) + return 0; - ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref); -diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h -index 6ca80a3..b6391a1 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_reg.h -+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h -@@ -814,6 +814,7 @@ - #define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000 - #define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff - #define NV50_SOR_DP_CTRL(i,l) (0x0061c10c + (i) * 0x800 + (l) * 0x80) -+#define NV50_SOR_DP_CTRL_ENABLED 0x00000001 - #define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000 - #define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000 - #define NV50_SOR_DP_CTRL_LANE_0_ENABLED 0x00010000 -diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c -index 1d6ee8b..491767f 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c -+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c -@@ -97,7 +97,6 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) ++ NV_INFO(dev, "Disabling fbcon acceleration...\n"); + fbdev_flags = dev_priv->fbdev_info->flags; + dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; - NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start); ++ NV_INFO(dev, "Unpinning framebuffer(s)...\n"); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_framebuffer *nouveau_fb; -- dev_priv->engine.instmem.prepare_access(nvbe->dev, true); - pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT); - nvbe->pte_start = pte; - for (i = 0; i < nvbe->nr_pages; i++) { -@@ -116,24 +115,11 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) - dma_offset += NV_CTXDMA_PAGE_SIZE; - } - } -- dev_priv->engine.instmem.finish_access(nvbe->dev); -+ dev_priv->engine.instmem.flush(nvbe->dev); +diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h +index 5be0cca..c31159a 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drv.h ++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h +@@ -76,6 +76,7 @@ struct nouveau_bo { + struct ttm_buffer_object bo; + struct ttm_placement placement; + u32 placements[3]; ++ u32 busy_placements[3]; + struct ttm_bo_kmap_obj kmap; + struct list_head head; + +@@ -195,7 +196,7 @@ struct nouveau_channel { + struct list_head pending; + uint32_t sequence; + uint32_t sequence_ack; +- uint32_t last_sequence_irq; ++ atomic_t last_sequence_irq; + } fence; - if (dev_priv->card_type == NV_50) { -- nv_wr32(dev, 0x100c80, 0x00050001); -- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { -- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); -- NV_ERROR(dev, "0x100c80 = 0x%08x\n", -- nv_rd32(dev, 0x100c80)); -- return -EBUSY; -- } -- -- nv_wr32(dev, 0x100c80, 0x00000001); -- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { -- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); -- NV_ERROR(dev, "0x100c80 = 0x%08x\n", -- nv_rd32(dev, 0x100c80)); -- return -EBUSY; -- } -+ nv50_vm_flush(dev, 5); /* PGRAPH */ -+ nv50_vm_flush(dev, 0); /* PFIFO */ - } + /* DMA push buffer */ +@@ -519,6 +520,7 @@ struct drm_nouveau_private { - nvbe->bound = true; -@@ -154,7 +140,6 @@ nouveau_sgdma_unbind(struct ttm_backend *be) - if (!nvbe->bound) - return 0; + struct workqueue_struct *wq; + struct work_struct irq_work; ++ struct work_struct hpd_work; -- dev_priv->engine.instmem.prepare_access(nvbe->dev, true); - pte = nvbe->pte_start; - for (i = 0; i < nvbe->nr_pages; i++) { - dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; -@@ -170,24 +155,11 @@ nouveau_sgdma_unbind(struct ttm_backend *be) - dma_offset += NV_CTXDMA_PAGE_SIZE; - } - } -- dev_priv->engine.instmem.finish_access(nvbe->dev); -+ dev_priv->engine.instmem.flush(nvbe->dev); + struct list_head vbl_waiting; - if (dev_priv->card_type == NV_50) { -- nv_wr32(dev, 0x100c80, 0x00050001); -- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { -- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); -- NV_ERROR(dev, "0x100c80 = 0x%08x\n", -- nv_rd32(dev, 0x100c80)); -- return -EBUSY; -- } -- -- nv_wr32(dev, 0x100c80, 0x00000001); -- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { -- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); -- NV_ERROR(dev, "0x100c80 = 0x%08x\n", -- nv_rd32(dev, 0x100c80)); -- return -EBUSY; -- } -+ nv50_vm_flush(dev, 5); -+ nv50_vm_flush(dev, 0); - } +@@ -533,12 +535,14 @@ struct drm_nouveau_private { - nvbe->bound = false; -@@ -272,7 +244,6 @@ nouveau_sgdma_init(struct drm_device *dev) - pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - -- dev_priv->engine.instmem.prepare_access(dev, true); - if (dev_priv->card_type < NV_50) { - /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and - * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE -@@ -294,7 +265,7 @@ nouveau_sgdma_init(struct drm_device *dev) - nv_wo32(dev, gpuobj, (i+4)/4, 0); - } - } -- dev_priv->engine.instmem.finish_access(dev); -+ dev_priv->engine.instmem.flush(dev); + struct fb_info *fbdev_info; - dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; - dev_priv->gart_info.aper_base = 0; -@@ -325,14 +296,11 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) - { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; -- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; - int pte; - - pte = (offset >> NV_CTXDMA_PAGE_SHIFT); - if (dev_priv->card_type < NV_50) { -- instmem->prepare_access(dev, false); - *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; -- instmem->finish_access(dev); - return 0; - } +- int fifo_alloc_count; + struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; -diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c -index 4c26be6..63c2d24 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_state.c -+++ b/drivers/gpu/drm/nouveau/nouveau_state.c -@@ -54,8 +54,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; -- engine->instmem.prepare_access = nv04_instmem_prepare_access; -- engine->instmem.finish_access = nv04_instmem_finish_access; -+ engine->instmem.flush = nv04_instmem_flush; - engine->mc.init = nv04_mc_init; - engine->mc.takedown = nv04_mc_takedown; - engine->timer.init = nv04_timer_init; -@@ -95,8 +94,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; -- engine->instmem.prepare_access = nv04_instmem_prepare_access; -- engine->instmem.finish_access = nv04_instmem_finish_access; -+ engine->instmem.flush = nv04_instmem_flush; - engine->mc.init = nv04_mc_init; - engine->mc.takedown = nv04_mc_takedown; - engine->timer.init = nv04_timer_init; -@@ -138,8 +136,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; -- engine->instmem.prepare_access = nv04_instmem_prepare_access; -- engine->instmem.finish_access = nv04_instmem_finish_access; -+ engine->instmem.flush = nv04_instmem_flush; - engine->mc.init = nv04_mc_init; - engine->mc.takedown = nv04_mc_takedown; - engine->timer.init = nv04_timer_init; -@@ -181,8 +178,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; -- engine->instmem.prepare_access = nv04_instmem_prepare_access; -- engine->instmem.finish_access = nv04_instmem_finish_access; -+ engine->instmem.flush = nv04_instmem_flush; - engine->mc.init = nv04_mc_init; - engine->mc.takedown = nv04_mc_takedown; - engine->timer.init = nv04_timer_init; -@@ -225,8 +221,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; -- engine->instmem.prepare_access = nv04_instmem_prepare_access; -- engine->instmem.finish_access = nv04_instmem_finish_access; -+ engine->instmem.flush = nv04_instmem_flush; - engine->mc.init = nv40_mc_init; - engine->mc.takedown = nv40_mc_takedown; - engine->timer.init = nv04_timer_init; -@@ -271,8 +266,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) - engine->instmem.clear = nv50_instmem_clear; - engine->instmem.bind = nv50_instmem_bind; - engine->instmem.unbind = nv50_instmem_unbind; -- engine->instmem.prepare_access = nv50_instmem_prepare_access; -- engine->instmem.finish_access = nv50_instmem_finish_access; -+ engine->instmem.flush = nv50_instmem_flush; - engine->mc.init = nv50_mc_init; - engine->mc.takedown = nv50_mc_takedown; - engine->timer.init = nv04_timer_init; -@@ -404,11 +398,6 @@ nouveau_card_init(struct drm_device *dev) - struct nouveau_engine *engine; - int ret; + struct nouveau_engine engine; + struct nouveau_channel *channel; -- NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); ++ /* For PFIFO and PGRAPH. */ ++ spinlock_t context_switch_lock; ++ + /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ + struct nouveau_gpuobj *ramht; + uint32_t ramin_rsvd_vram; +@@ -550,12 +554,6 @@ struct drm_nouveau_private { + uint32_t ramro_offset; + uint32_t ramro_size; + +- /* base physical adresses */ +- uint64_t fb_phys; +- uint64_t fb_available_size; +- uint64_t fb_mappable_pages; +- uint64_t fb_aper_free; - -- if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE) -- return 0; + struct { + enum { + NOUVEAU_GART_NONE = 0, +@@ -569,10 +567,6 @@ struct drm_nouveau_private { + struct nouveau_gpuobj *sg_ctxdma; + struct page *sg_dummy_page; + dma_addr_t sg_dummy_bus; +- +- /* nottm hack */ +- struct drm_ttm_backend *sg_be; +- unsigned long sg_handle; + } gart_info; + + /* nv10-nv40 tiling regions */ +@@ -581,6 +575,16 @@ struct drm_nouveau_private { + spinlock_t lock; + } tile; + ++ /* VRAM/fb configuration */ ++ uint64_t vram_size; ++ uint64_t vram_sys_base; ++ ++ uint64_t fb_phys; ++ uint64_t fb_available_size; ++ uint64_t fb_mappable_pages; ++ uint64_t fb_aper_free; ++ int fb_mtrr; ++ + /* G8x/G9x virtual address space */ + uint64_t vm_gart_base; + uint64_t vm_gart_size; +@@ -589,10 +593,6 @@ struct drm_nouveau_private { + uint64_t vm_end; + struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; + int vm_vram_pt_nr; +- uint64_t vram_sys_base; - - vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); - vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state, - nouveau_switcheroo_can_switch); -@@ -418,15 +407,12 @@ nouveau_card_init(struct drm_device *dev) - if (ret) - goto out; - engine = &dev_priv->engine; -- dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; - spin_lock_init(&dev_priv->context_switch_lock); +- /* the mtrr covering the FB */ +- int fb_mtrr; - /* Parse BIOS tables / Run init tables if card not POSTed */ -- if (drm_core_check_feature(dev, DRIVER_MODESET)) { -- ret = nouveau_bios_init(dev); -- if (ret) -- goto out; -- } -+ ret = nouveau_bios_init(dev); -+ if (ret) -+ goto out; + struct mem_block *ramin_heap; - ret = nouveau_mem_detect(dev); - if (ret) -@@ -482,12 +468,19 @@ nouveau_card_init(struct drm_device *dev) - goto out_graph; - } +@@ -602,8 +602,7 @@ struct drm_nouveau_private { -+ if (dev_priv->card_type >= NV_50) -+ ret = nv50_display_create(dev); -+ else -+ ret = nv04_display_create(dev); -+ if (ret) -+ goto out_fifo; -+ - /* this call irq_preinstall, register irq handler and - * call irq_postinstall - */ - ret = drm_irq_install(dev); - if (ret) -- goto out_fifo; -+ goto out_display; + struct list_head gpuobj_list; - ret = drm_vblank_init(dev, 0); - if (ret) -@@ -501,33 +494,20 @@ nouveau_card_init(struct drm_device *dev) - goto out_irq; - } +- struct nvbios VBIOS; +- struct nouveau_bios_info *vbios; ++ struct nvbios vbios; -- if (drm_core_check_feature(dev, DRIVER_MODESET)) { -- if (dev_priv->card_type >= NV_50) -- ret = nv50_display_create(dev); -- else -- ret = nv04_display_create(dev); -- if (ret) -- goto out_channel; -- } -- - ret = nouveau_backlight_init(dev); - if (ret) - NV_ERROR(dev, "Error %d registering backlight\n", ret); + struct nv04_mode_state mode_reg; + struct nv04_mode_state saved_reg; +@@ -612,11 +611,7 @@ struct drm_nouveau_private { + uint32_t dac_users[4]; -- dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; -- -- if (drm_core_check_feature(dev, DRIVER_MODESET)) -- nouveau_fbcon_init(dev); -- -+ nouveau_fbcon_init(dev); - return 0; + struct nouveau_suspend_resume { +- uint32_t fifo_mode; +- uint32_t graph_ctx_control; +- uint32_t graph_state; + uint32_t *ramin_copy; +- uint64_t ramin_size; + } susres; --out_channel: -- if (dev_priv->channel) { -- nouveau_channel_free(dev_priv->channel); -- dev_priv->channel = NULL; -- } - out_irq: - drm_irq_uninstall(dev); -+out_display: -+ if (dev_priv->card_type >= NV_50) -+ nv50_display_destroy(dev); -+ else -+ nv04_display_destroy(dev); - out_fifo: - if (!nouveau_noaccel) - engine->fifo.takedown(dev); -@@ -561,45 +541,37 @@ static void nouveau_card_takedown(struct drm_device *dev) - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; + struct backlight_device *backlight; +@@ -680,6 +675,7 @@ extern int nouveau_uscript_tmds; + extern int nouveau_vram_pushbuf; + extern int nouveau_vram_notify; + extern int nouveau_fbpercrtc; ++extern int nouveau_tv_disable; + extern char *nouveau_tv_norm; + extern int nouveau_reg_debug; + extern char *nouveau_vbios; +@@ -687,6 +683,7 @@ extern int nouveau_ctxfw; + extern int nouveau_ignorelid; + extern int nouveau_nofbaccel; + extern int nouveau_noaccel; ++extern int nouveau_override_conntype; + + /* nouveau_state.c */ + extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); +@@ -711,7 +708,7 @@ extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, + struct drm_file *, int tail); + extern void nouveau_mem_takedown(struct mem_block **heap); + extern void nouveau_mem_free_block(struct mem_block *); +-extern uint64_t nouveau_mem_fb_amount(struct drm_device *); ++extern int nouveau_mem_detect(struct drm_device *dev); + extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); + extern int nouveau_mem_init(struct drm_device *); + extern int nouveau_mem_init_agp(struct drm_device *); +@@ -928,6 +925,10 @@ extern void nv40_fb_takedown(struct drm_device *); + extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, + uint32_t, uint32_t); -- NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); -- -- if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { -- -- nouveau_backlight_exit(dev); -- -- if (dev_priv->channel) { -- nouveau_channel_free(dev_priv->channel); -- dev_priv->channel = NULL; -- } -+ nouveau_backlight_exit(dev); ++/* nv50_fb.c */ ++extern int nv50_fb_init(struct drm_device *); ++extern void nv50_fb_takedown(struct drm_device *); ++ + /* nv04_fifo.c */ + extern int nv04_fifo_init(struct drm_device *); + extern void nv04_fifo_disable(struct drm_device *); +@@ -1027,6 +1028,7 @@ extern void nv50_graph_destroy_context(struct nouveau_channel *); + extern int nv50_graph_load_context(struct nouveau_channel *); + extern int nv50_graph_unload_context(struct drm_device *); + extern void nv50_graph_context_switch(struct drm_device *); ++extern int nv50_grctx_init(struct nouveau_grctx *); -- if (!nouveau_noaccel) { -- engine->fifo.takedown(dev); -- engine->graph.takedown(dev); -- } -- engine->fb.takedown(dev); -- engine->timer.takedown(dev); -- engine->mc.takedown(dev); -+ if (dev_priv->channel) { -+ nouveau_channel_free(dev_priv->channel); -+ dev_priv->channel = NULL; -+ } + /* nouveau_grctx.c */ + extern int nouveau_grctx_prog_load(struct drm_device *); +@@ -1081,13 +1083,13 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); -- mutex_lock(&dev->struct_mutex); -- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); -- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); -- mutex_unlock(&dev->struct_mutex); -- nouveau_sgdma_takedown(dev); -+ if (!nouveau_noaccel) { -+ engine->fifo.takedown(dev); -+ engine->graph.takedown(dev); -+ } -+ engine->fb.takedown(dev); -+ engine->timer.takedown(dev); -+ engine->mc.takedown(dev); - -- nouveau_gpuobj_takedown(dev); -- nouveau_mem_close(dev); -- engine->instmem.takedown(dev); -+ mutex_lock(&dev->struct_mutex); -+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); -+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); -+ mutex_unlock(&dev->struct_mutex); -+ nouveau_sgdma_takedown(dev); + /* nv04_dac.c */ +-extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry); ++extern int nv04_dac_create(struct drm_connector *, struct dcb_entry *); + extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder); + extern int nv04_dac_output_offset(struct drm_encoder *encoder); + extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable); -- if (drm_core_check_feature(dev, DRIVER_MODESET)) -- drm_irq_uninstall(dev); -+ nouveau_gpuobj_takedown(dev); -+ nouveau_mem_close(dev); -+ engine->instmem.takedown(dev); + /* nv04_dfp.c */ +-extern int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry); ++extern int nv04_dfp_create(struct drm_connector *, struct dcb_entry *); + extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent); + extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent, + int head, bool dl); +@@ -1096,10 +1098,10 @@ extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode); -- nouveau_gpuobj_late_takedown(dev); -- nouveau_bios_takedown(dev); -+ drm_irq_uninstall(dev); + /* nv04_tv.c */ + extern int nv04_tv_identify(struct drm_device *dev, int i2c_index); +-extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry); ++extern int nv04_tv_create(struct drm_connector *, struct dcb_entry *); -- vga_client_register(dev->pdev, NULL, NULL, NULL); -+ nouveau_gpuobj_late_takedown(dev); -+ nouveau_bios_takedown(dev); + /* nv17_tv.c */ +-extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry); ++extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *); -- dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; -- } -+ vga_client_register(dev->pdev, NULL, NULL, NULL); - } + /* nv04_display.c */ + extern int nv04_display_create(struct drm_device *); +@@ -1119,7 +1121,8 @@ extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); + extern int nouveau_bo_unpin(struct nouveau_bo *); + extern int nouveau_bo_map(struct nouveau_bo *); + extern void nouveau_bo_unmap(struct nouveau_bo *); +-extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype); ++extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t type, ++ uint32_t busy); + extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); + extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); + extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); +@@ -1139,7 +1142,6 @@ extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); + extern int nouveau_fence_flush(void *obj, void *arg); + extern void nouveau_fence_unref(void **obj); + extern void *nouveau_fence_ref(void *obj); +-extern void nouveau_fence_handler(struct drm_device *dev, int channel); - /* here a client dies, release the stuff that was allocated for its -@@ -686,6 +658,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) - struct drm_nouveau_private *dev_priv; - uint32_t reg0; - resource_size_t mmio_start_offs; -+ int ret; + /* nouveau_gem.c */ + extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, +@@ -1163,6 +1165,16 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *, + int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); + int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); - dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); - if (!dev_priv) -@@ -694,7 +667,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) - dev_priv->dev = dev; ++/* nv50_gpio.c */ ++int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); ++int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); ++ ++/* nv50_calc. */ ++int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, ++ int *N1, int *M1, int *N2, int *M2, int *P); ++int nv50_calc_pll2(struct drm_device *, struct pll_lims *, ++ int clk, int *N, int *fN, int *M, int *P); ++ + #ifndef ioread32_native + #ifdef __BIG_ENDIAN + #define ioread16_native ioread16be +diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h +index bc4a240..e4442e2 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h ++++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h +@@ -47,6 +47,9 @@ struct nouveau_encoder { - dev_priv->flags = flags & NOUVEAU_FLAGS; -- dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; + union { + struct { ++ int mc_unknown; ++ uint32_t unk0; ++ uint32_t unk1; + int dpcd_version; + int link_nr; + int link_bw; +@@ -68,8 +71,8 @@ static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc) - NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n", - dev->pci_vendor, dev->pci_device, dev->pdev->class); -@@ -768,11 +740,9 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) - NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", - dev_priv->card_type, reg0); + struct nouveau_connector * + nouveau_encoder_connector_get(struct nouveau_encoder *encoder); +-int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry); +-int nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry); ++int nv50_sor_create(struct drm_connector *, struct dcb_entry *); ++int nv50_dac_create(struct drm_connector *, struct dcb_entry *); -- if (drm_core_check_feature(dev, DRIVER_MODESET)) { -- int ret = nouveau_remove_conflicting_drivers(dev); -- if (ret) -- return ret; -- } -+ ret = nouveau_remove_conflicting_drivers(dev); -+ if (ret) -+ return ret; + struct bit_displayport_encoder_table { + uint32_t match; +diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c +index faddf53..813d853 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fence.c ++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c +@@ -67,12 +67,13 @@ nouveau_fence_update(struct nouveau_channel *chan) + if (USE_REFCNT) + sequence = nvchan_rd32(chan, 0x48); + else +- sequence = chan->fence.last_sequence_irq; ++ sequence = atomic_read(&chan->fence.last_sequence_irq); - /* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */ - if (dev_priv->card_type >= NV_40) { -@@ -807,45 +777,27 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) - dev_priv->flags |= NV_NFORCE2; + if (chan->fence.sequence_ack == sequence) + return; + chan->fence.sequence_ack = sequence; - /* For kernel modesetting, init card now and bring up fbcon */ -- if (drm_core_check_feature(dev, DRIVER_MODESET)) { -- int ret = nouveau_card_init(dev); -- if (ret) -- return ret; -- } -+ ret = nouveau_card_init(dev); -+ if (ret) -+ return ret; ++ spin_lock(&chan->fence.lock); + list_for_each_safe(entry, tmp, &chan->fence.pending) { + fence = list_entry(entry, struct nouveau_fence, entry); - return 0; +@@ -84,6 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan) + if (sequence == chan->fence.sequence_ack) + break; + } ++ spin_unlock(&chan->fence.lock); } --static void nouveau_close(struct drm_device *dev) --{ -- struct drm_nouveau_private *dev_priv = dev->dev_private; -- -- /* In the case of an error dev_priv may not be allocated yet */ -- if (dev_priv) -- nouveau_card_takedown(dev); --} -- --/* KMS: we need mmio at load time, not when the first drm client opens. */ - void nouveau_lastclose(struct drm_device *dev) + int +@@ -119,7 +121,6 @@ nouveau_fence_emit(struct nouveau_fence *fence) { -- if (drm_core_check_feature(dev, DRIVER_MODESET)) -- return; -- -- nouveau_close(dev); - } + struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private; + struct nouveau_channel *chan = fence->channel; +- unsigned long flags; + int ret; - int nouveau_unload(struct drm_device *dev) - { - struct drm_nouveau_private *dev_priv = dev->dev_private; + ret = RING_SPACE(chan, 2); +@@ -127,9 +128,7 @@ nouveau_fence_emit(struct nouveau_fence *fence) + return ret; -- if (drm_core_check_feature(dev, DRIVER_MODESET)) { -- nouveau_fbcon_fini(dev); -- if (dev_priv->card_type >= NV_50) -- nv50_display_destroy(dev); -- else -- nv04_display_destroy(dev); -- nouveau_close(dev); -- } -+ nouveau_fbcon_fini(dev); -+ if (dev_priv->card_type >= NV_50) -+ nv50_display_destroy(dev); -+ else -+ nv04_display_destroy(dev); -+ nouveau_card_takedown(dev); + if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) { +- spin_lock_irqsave(&chan->fence.lock, flags); + nouveau_fence_update(chan); +- spin_unlock_irqrestore(&chan->fence.lock, flags); - iounmap(dev_priv->mmio); - iounmap(dev_priv->ramin); -@@ -861,8 +813,6 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_nouveau_getparam *getparam = data; + BUG_ON(chan->fence.sequence == + chan->fence.sequence_ack - 1); +@@ -138,9 +137,9 @@ nouveau_fence_emit(struct nouveau_fence *fence) + fence->sequence = ++chan->fence.sequence; -- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; -- - switch (getparam->param) { - case NOUVEAU_GETPARAM_CHIPSET_ID: - getparam->value = dev_priv->chipset; -@@ -931,8 +881,6 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data, - { - struct drm_nouveau_setparam *setparam = data; + kref_get(&fence->refcount); +- spin_lock_irqsave(&chan->fence.lock, flags); ++ spin_lock(&chan->fence.lock); + list_add_tail(&fence->entry, &chan->fence.pending); +- spin_unlock_irqrestore(&chan->fence.lock, flags); ++ spin_unlock(&chan->fence.lock); -- NOUVEAU_CHECK_INITIALISED_WITH_RETURN; -- - switch (setparam->param) { - default: - NV_ERROR(dev, "unknown parameter %lld\n", setparam->param); -diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c -index 1cb19e3..2d0fee5 100644 ---- a/drivers/gpu/drm/nouveau/nv04_dac.c -+++ b/drivers/gpu/drm/nouveau/nv04_dac.c -@@ -261,12 +261,11 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) - - saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); - head = (saved_routput & 0x100) >> 8; --#if 0 -- /* if there's a spare crtc, using it will minimise flicker for the case -- * where the in-use crtc is in use by an off-chip tmds encoder */ -- if (xf86_config->crtc[head]->enabled && !xf86_config->crtc[head ^ 1]->enabled) -+ -+ /* if there's a spare crtc, using it will minimise flicker */ -+ if (!(NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX) & 0xC0)) - head ^= 1; --#endif -+ - /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ - routput = (saved_routput & 0xfffffece) | head << 8; - -@@ -315,9 +314,12 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) + BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1); + OUT_RING(chan, fence->sequence); +@@ -173,14 +172,11 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg) { - struct drm_device *dev = encoder->dev; - struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; -- uint32_t sample = nv17_dac_sample_load(encoder); + struct nouveau_fence *fence = nouveau_fence(sync_obj); + struct nouveau_channel *chan = fence->channel; +- unsigned long flags; -- if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { -+ if (nv04_dac_in_use(encoder)) -+ return connector_status_disconnected; -+ -+ if (nv17_dac_sample_load(encoder) & -+ NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { - NV_INFO(dev, "Load detected on output %c\n", - '@' + ffs(dcb->or)); - return connector_status_connected; -@@ -330,6 +332,9 @@ static bool nv04_dac_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) - { -+ if (nv04_dac_in_use(encoder)) -+ return false; -+ - return true; - } + if (fence->signalled) + return true; -@@ -428,6 +433,17 @@ void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable) - } +- spin_lock_irqsave(&chan->fence.lock, flags); + nouveau_fence_update(chan); +- spin_unlock_irqrestore(&chan->fence.lock, flags); + return fence->signalled; } -+/* Check if the DAC corresponding to 'encoder' is being used by -+ * someone else. */ -+bool nv04_dac_in_use(struct drm_encoder *encoder) -+{ -+ struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; -+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; -+ -+ return nv_gf4_disp_arch(encoder->dev) && -+ (dev_priv->dac_users[ffs(dcb->or) - 1] & ~(1 << dcb->index)); -+} -+ - static void nv04_dac_dpms(struct drm_encoder *encoder, int mode) - { - struct drm_device *dev = encoder->dev; -@@ -501,11 +517,13 @@ static const struct drm_encoder_funcs nv04_dac_funcs = { - .destroy = nv04_dac_destroy, - }; - --int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry) -+int -+nv04_dac_create(struct drm_connector *connector, struct dcb_entry *entry) - { - const struct drm_encoder_helper_funcs *helper; -- struct drm_encoder *encoder; - struct nouveau_encoder *nv_encoder = NULL; -+ struct drm_device *dev = connector->dev; -+ struct drm_encoder *encoder; - - nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); - if (!nv_encoder) -@@ -527,5 +545,6 @@ int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry) - encoder->possible_crtcs = entry->heads; - encoder->possible_clones = 0; - -+ drm_mode_connector_attach_encoder(connector, encoder); +@@ -221,27 +217,12 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg) return 0; } -diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c -index 41634d4..3311f3a 100644 ---- a/drivers/gpu/drm/nouveau/nv04_dfp.c -+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c -@@ -413,10 +413,6 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) - struct dcb_entry *dcbe = nv_encoder->dcb; - int head = nouveau_crtc(encoder->crtc)->index; -- NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", -- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), -- nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); +-void +-nouveau_fence_handler(struct drm_device *dev, int channel) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_channel *chan = NULL; - - if (dcbe->type == OUTPUT_TMDS) - run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); - else if (dcbe->type == OUTPUT_LVDS) -@@ -584,11 +580,12 @@ static const struct drm_encoder_funcs nv04_dfp_funcs = { - .destroy = nv04_dfp_destroy, - }; - --int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry) -+int -+nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry) +- if (channel >= 0 && channel < dev_priv->engine.fifo.channels) +- chan = dev_priv->fifos[channel]; +- +- if (chan) { +- spin_lock_irq(&chan->fence.lock); +- nouveau_fence_update(chan); +- spin_unlock_irq(&chan->fence.lock); +- } +-} +- + int + nouveau_fence_init(struct nouveau_channel *chan) { - const struct drm_encoder_helper_funcs *helper; -- struct drm_encoder *encoder; - struct nouveau_encoder *nv_encoder = NULL; -+ struct drm_encoder *encoder; - int type; - - switch (entry->type) { -@@ -613,11 +610,12 @@ int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry) - nv_encoder->dcb = entry; - nv_encoder->or = ffs(entry->or) - 1; - -- drm_encoder_init(dev, encoder, &nv04_dfp_funcs, type); -+ drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type); - drm_encoder_helper_add(encoder, helper); - - encoder->possible_crtcs = entry->heads; - encoder->possible_clones = 0; - -+ drm_mode_connector_attach_encoder(connector, encoder); + INIT_LIST_HEAD(&chan->fence.pending); + spin_lock_init(&chan->fence.lock); ++ atomic_set(&chan->fence.last_sequence_irq, 0); return 0; } -diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c -index c7898b4..b35b7ed 100644 ---- a/drivers/gpu/drm/nouveau/nv04_display.c -+++ b/drivers/gpu/drm/nouveau/nv04_display.c -@@ -94,6 +94,7 @@ nv04_display_create(struct drm_device *dev) + +diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c +index 8265fed..0846a1e 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_gem.c ++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c +@@ -182,40 +182,35 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct dcb_table *dcb = &dev_priv->vbios.dcb; -+ struct drm_connector *connector, *ct; - struct drm_encoder *encoder; - struct drm_crtc *crtc; - int i, ret; -@@ -132,19 +133,23 @@ nv04_display_create(struct drm_device *dev) - for (i = 0; i < dcb->entries; i++) { - struct dcb_entry *dcbent = &dcb->entry[i]; + struct nouveau_bo *nvbo = gem->driver_private; + struct ttm_buffer_object *bo = &nvbo->bo; +- uint64_t flags; ++ uint32_t domains = valid_domains & ++ (write_domains ? write_domains : read_domains); ++ uint32_t pref_flags = 0, valid_flags = 0; + +- if (!valid_domains || (!read_domains && !write_domains)) ++ if (!domains) + return -EINVAL; -+ connector = nouveau_connector_create(dev, dcbent->connector); -+ if (IS_ERR(connector)) -+ continue; +- if (write_domains) { +- if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && +- (write_domains & NOUVEAU_GEM_DOMAIN_VRAM)) +- flags = TTM_PL_FLAG_VRAM; +- else +- if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && +- (write_domains & NOUVEAU_GEM_DOMAIN_GART)) +- flags = TTM_PL_FLAG_TT; +- else +- return -EINVAL; +- } else { +- if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && +- (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) && +- bo->mem.mem_type == TTM_PL_VRAM) +- flags = TTM_PL_FLAG_VRAM; +- else +- if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && +- (read_domains & NOUVEAU_GEM_DOMAIN_GART) && +- bo->mem.mem_type == TTM_PL_TT) +- flags = TTM_PL_FLAG_TT; +- else +- if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && +- (read_domains & NOUVEAU_GEM_DOMAIN_VRAM)) +- flags = TTM_PL_FLAG_VRAM; +- else +- flags = TTM_PL_FLAG_TT; +- } ++ if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) ++ valid_flags |= TTM_PL_FLAG_VRAM; + - switch (dcbent->type) { - case OUTPUT_ANALOG: -- ret = nv04_dac_create(dev, dcbent); -+ ret = nv04_dac_create(connector, dcbent); - break; - case OUTPUT_LVDS: - case OUTPUT_TMDS: -- ret = nv04_dfp_create(dev, dcbent); -+ ret = nv04_dfp_create(connector, dcbent); - break; - case OUTPUT_TV: - if (dcbent->location == DCB_LOC_ON_CHIP) -- ret = nv17_tv_create(dev, dcbent); -+ ret = nv17_tv_create(connector, dcbent); - else -- ret = nv04_tv_create(dev, dcbent); -+ ret = nv04_tv_create(connector, dcbent); - break; - default: - NV_WARN(dev, "DCB type %d not known\n", dcbent->type); -@@ -155,8 +160,14 @@ nv04_display_create(struct drm_device *dev) - continue; - } - -- for (i = 0; i < dcb->connector.entries; i++) -- nouveau_connector_create(dev, &dcb->connector.entry[i]); -+ list_for_each_entry_safe(connector, ct, -+ &dev->mode_config.connector_list, head) { -+ if (!connector->encoder_ids[0]) { -+ NV_WARN(dev, "%s has no encoders, removing\n", -+ drm_get_connector_name(connector)); -+ connector->funcs->destroy(connector); -+ } -+ } ++ if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) ++ valid_flags |= TTM_PL_FLAG_TT; ++ ++ if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && ++ bo->mem.mem_type == TTM_PL_VRAM) ++ pref_flags |= TTM_PL_FLAG_VRAM; ++ ++ else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && ++ bo->mem.mem_type == TTM_PL_TT) ++ pref_flags |= TTM_PL_FLAG_TT; ++ ++ else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) ++ pref_flags |= TTM_PL_FLAG_VRAM; ++ ++ else ++ pref_flags |= TTM_PL_FLAG_TT; ++ ++ nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); - /* Save previous state */ - NVLockVgaCrtcs(dev, false); -diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c -index 66fe559..06cedd9 100644 ---- a/drivers/gpu/drm/nouveau/nv04_fifo.c -+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c -@@ -112,6 +112,12 @@ nv04_fifo_channel_id(struct drm_device *dev) - NV03_PFIFO_CACHE1_PUSH1_CHID_MASK; +- nouveau_bo_placement_set(nvbo, flags); + return 0; } -+#ifdef __BIG_ENDIAN -+#define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN -+#else -+#define DMA_FETCH_ENDIANNESS 0 -+#endif -+ - int - nv04_fifo_create_context(struct nouveau_channel *chan) +diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c +index dc46792..7855b35 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_hw.c ++++ b/drivers/gpu/drm/nouveau/nouveau_hw.c +@@ -160,7 +160,7 @@ static void + setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv) { -@@ -131,18 +137,13 @@ nv04_fifo_create_context(struct nouveau_channel *chan) - spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - - /* Setup initial state */ -- dev_priv->engine.instmem.prepare_access(dev, true); - RAMFC_WR(DMA_PUT, chan->pushbuf_base); - RAMFC_WR(DMA_GET, chan->pushbuf_base); - RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4); - RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | - NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | --#ifdef __BIG_ENDIAN -- NV_PFIFO_CACHE1_BIG_ENDIAN | --#endif -- 0)); -- dev_priv->engine.instmem.finish_access(dev); -+ DMA_FETCH_ENDIANNESS)); - - /* enable the fifo dma operation */ - nv_wr32(dev, NV04_PFIFO_MODE, -@@ -169,8 +170,6 @@ nv04_fifo_do_load_context(struct drm_device *dev, int chid) struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t fc = NV04_RAMFC(chid), tmp; - -- dev_priv->engine.instmem.prepare_access(dev, false); -- - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); - tmp = nv_ri32(dev, fc + 8); -@@ -181,8 +180,6 @@ nv04_fifo_do_load_context(struct drm_device *dev, int chid) - nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20)); - nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24)); - -- dev_priv->engine.instmem.finish_access(dev); -- - nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); - nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); - } -@@ -223,7 +220,6 @@ nv04_fifo_unload_context(struct drm_device *dev) - return -EINVAL; - } - -- dev_priv->engine.instmem.prepare_access(dev, true); - RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); - RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); - tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16; -@@ -233,7 +229,6 @@ nv04_fifo_unload_context(struct drm_device *dev) - RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH)); - RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE)); - RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1)); -- dev_priv->engine.instmem.finish_access(dev); - - nv04_fifo_do_load_context(dev, pfifo->channels - 1); - nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); -@@ -297,6 +292,7 @@ nv04_fifo_init(struct drm_device *dev) - - nv04_fifo_init_intr(dev); - pfifo->enable(dev); -+ pfifo->reassign(dev, true); - - for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - if (dev_priv->fifos[i]) { -diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c -index 618355e..c897342 100644 ---- a/drivers/gpu/drm/nouveau/nv04_graph.c -+++ b/drivers/gpu/drm/nouveau/nv04_graph.c -@@ -342,7 +342,7 @@ static uint32_t nv04_graph_ctx_regs[] = { - }; - - struct graph_state { -- int nv04[ARRAY_SIZE(nv04_graph_ctx_regs)]; -+ uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)]; - }; - - struct nouveau_channel * -@@ -527,8 +527,7 @@ static int - nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +- int chip_version = dev_priv->vbios->chip_version; ++ int chip_version = dev_priv->vbios.chip_version; + uint32_t oldpll = NVReadRAMDAC(dev, 0, reg); + int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff; + uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1; +@@ -216,7 +216,7 @@ setPLL_double_highregs(struct drm_device *dev, uint32_t reg1, + struct nouveau_pll_vals *pv) { -- chan->fence.last_sequence_irq = data; -- nouveau_fence_handler(chan->dev, chan->id); -+ atomic_set(&chan->fence.last_sequence_irq, data); - return 0; - } + struct drm_nouveau_private *dev_priv = dev->dev_private; +- int chip_version = dev_priv->vbios->chip_version; ++ int chip_version = dev_priv->vbios.chip_version; + bool nv3035 = chip_version == 0x30 || chip_version == 0x35; + uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70); + uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1); +@@ -374,7 +374,7 @@ nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1, + struct nouveau_pll_vals *pv) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- int cv = dev_priv->vbios->chip_version; ++ int cv = dev_priv->vbios.chip_version; + + if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 || + cv >= 0x40) { +diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c +index 70e994d..316a3c7 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c ++++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c +@@ -254,16 +254,27 @@ struct nouveau_i2c_chan * + nouveau_i2c_find(struct drm_device *dev, int index) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index]; + +- if (index > DCB_MAX_NUM_I2C_ENTRIES) ++ if (index >= DCB_MAX_NUM_I2C_ENTRIES) + return NULL; + +- if (!bios->bdcb.dcb.i2c[index].chan) { +- if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index)) +- return NULL; ++ if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) { ++ uint32_t reg = 0xe500, val; ++ ++ if (i2c->port_type == 6) { ++ reg += i2c->read * 0x50; ++ val = 0x2002; ++ } else { ++ reg += ((i2c->entry & 0x1e00) >> 9) * 0x50; ++ val = 0xe001; ++ } ++ ++ nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val); + } -diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c -index a3b9563..4408232 100644 ---- a/drivers/gpu/drm/nouveau/nv04_instmem.c -+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c -@@ -49,10 +49,8 @@ nv04_instmem_determine_amount(struct drm_device *dev) - NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10); - - /* Clear all of it, except the BIOS image that's in the first 64KiB */ -- dev_priv->engine.instmem.prepare_access(dev, true); - for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4) - nv_wi32(dev, i, 0x00000000); -- dev_priv->engine.instmem.finish_access(dev); +- return bios->bdcb.dcb.i2c[index].chan; ++ if (!i2c->chan && nouveau_i2c_init(dev, i2c, index)) ++ return NULL; ++ return i2c->chan; } - static void -@@ -106,7 +104,7 @@ int nv04_instmem_init(struct drm_device *dev) - { - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t offset; -- int ret = 0; -+ int ret; +diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c +index 447f9f6..13e73ce 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_irq.c ++++ b/drivers/gpu/drm/nouveau/nouveau_irq.c +@@ -51,6 +51,7 @@ nouveau_irq_preinstall(struct drm_device *dev) - nv04_instmem_determine_amount(dev); - nv04_instmem_configure_fixed_tables(dev); -@@ -129,14 +127,14 @@ int nv04_instmem_init(struct drm_device *dev) - offset = 0x40000; + if (dev_priv->card_type == NV_50) { + INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); ++ INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); + INIT_LIST_HEAD(&dev_priv->vbl_waiting); } + } +@@ -311,6 +312,31 @@ nouveau_print_bitfield_names_(uint32_t value, + #define nouveau_print_bitfield_names(val, namelist) \ + nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist)) -- ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, -- offset, dev_priv->ramin_rsvd_vram - offset); -+ ret = drm_mm_init(&dev_priv->ramin_heap, offset, -+ dev_priv->ramin_rsvd_vram - offset); - if (ret) { -- dev_priv->ramin_heap = NULL; -- NV_ERROR(dev, "Failed to init RAMIN heap\n"); -+ NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret); -+ return ret; - } ++struct nouveau_enum_names { ++ uint32_t value; ++ const char *name; ++}; ++ ++static void ++nouveau_print_enum_names_(uint32_t value, ++ const struct nouveau_enum_names *namelist, ++ const int namelist_len) ++{ ++ /* ++ * Caller must have already printed the KERN_* log level for us. ++ * Also the caller is responsible for adding the newline. ++ */ ++ int i; ++ for (i = 0; i < namelist_len; ++i) { ++ if (value == namelist[i].value) { ++ printk("%s", namelist[i].name); ++ return; ++ } ++ } ++ printk("unknown value 0x%08x", value); ++} ++#define nouveau_print_enum_names(val, namelist) \ ++ nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist)) -- return ret; -+ return 0; - } + static int + nouveau_graph_chid_from_grctx(struct drm_device *dev) +@@ -427,14 +453,16 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id, + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t nsource = trap->nsource, nstatus = trap->nstatus; - void -@@ -186,12 +184,7 @@ nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) - } +- NV_INFO(dev, "%s - nSource:", id); +- nouveau_print_bitfield_names(nsource, nsource_names); +- printk(", nStatus:"); +- if (dev_priv->card_type < NV_10) +- nouveau_print_bitfield_names(nstatus, nstatus_names); +- else +- nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); +- printk("\n"); ++ if (dev_priv->card_type < NV_50) { ++ NV_INFO(dev, "%s - nSource:", id); ++ nouveau_print_bitfield_names(nsource, nsource_names); ++ printk(", nStatus:"); ++ if (dev_priv->card_type < NV_10) ++ nouveau_print_bitfield_names(nstatus, nstatus_names); ++ else ++ nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); ++ printk("\n"); ++ } - void --nv04_instmem_prepare_access(struct drm_device *dev, bool write) --{ --} -- --void --nv04_instmem_finish_access(struct drm_device *dev) -+nv04_instmem_flush(struct drm_device *dev) - { + NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x " + "Data 0x%08x:0x%08x\n", +@@ -578,27 +606,502 @@ nouveau_pgraph_irq_handler(struct drm_device *dev) } -diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c -index 617ed1e..2af43a1 100644 ---- a/drivers/gpu/drm/nouveau/nv04_mc.c -+++ b/drivers/gpu/drm/nouveau/nv04_mc.c -@@ -11,6 +11,10 @@ nv04_mc_init(struct drm_device *dev) - */ - - nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); + static void ++nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t trap[6]; ++ int i, ch; ++ uint32_t idx = nv_rd32(dev, 0x100c90); ++ if (idx & 0x80000000) { ++ idx &= 0xffffff; ++ if (display) { ++ for (i = 0; i < 6; i++) { ++ nv_wr32(dev, 0x100c90, idx | i << 24); ++ trap[i] = nv_rd32(dev, 0x100c94); ++ } ++ for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { ++ struct nouveau_channel *chan = dev_priv->fifos[ch]; + -+ /* Disable PROM access. */ -+ nv_wr32(dev, NV_PBUS_PCI_NV_20, NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED); ++ if (!chan || !chan->ramin) ++ continue; + - return 0; - } - -diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c -index c4e3404..84b5954 100644 ---- a/drivers/gpu/drm/nouveau/nv04_tv.c -+++ b/drivers/gpu/drm/nouveau/nv04_tv.c -@@ -223,10 +223,12 @@ static void nv04_tv_destroy(struct drm_encoder *encoder) - kfree(nv_encoder); - } - --int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry) -+int -+nv04_tv_create(struct drm_connector *connector, struct dcb_entry *entry) ++ if (trap[1] == chan->ramin->instance >> 12) ++ break; ++ } ++ NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n", ++ name, (trap[5]&0x100?"read":"write"), ++ trap[5]&0xff, trap[4]&0xffff, ++ trap[3]&0xffff, trap[0], trap[2], ch); ++ } ++ nv_wr32(dev, 0x100c90, idx | 0x80000000); ++ } else if (display) { ++ NV_INFO(dev, "%s - no VM fault?\n", name); ++ } ++} ++ ++static struct nouveau_enum_names nv50_mp_exec_error_names[] = ++{ ++ { 3, "STACK_UNDERFLOW" }, ++ { 4, "QUADON_ACTIVE" }, ++ { 8, "TIMEOUT" }, ++ { 0x10, "INVALID_OPCODE" }, ++ { 0x40, "BREAKPOINT" }, ++}; ++ ++static void ++nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t units = nv_rd32(dev, 0x1540); ++ uint32_t addr, mp10, status, pc, oplow, ophigh; ++ int i; ++ int mps = 0; ++ for (i = 0; i < 4; i++) { ++ if (!(units & 1 << (i+24))) ++ continue; ++ if (dev_priv->chipset < 0xa0) ++ addr = 0x408200 + (tpid << 12) + (i << 7); ++ else ++ addr = 0x408100 + (tpid << 11) + (i << 7); ++ mp10 = nv_rd32(dev, addr + 0x10); ++ status = nv_rd32(dev, addr + 0x14); ++ if (!status) ++ continue; ++ if (display) { ++ nv_rd32(dev, addr + 0x20); ++ pc = nv_rd32(dev, addr + 0x24); ++ oplow = nv_rd32(dev, addr + 0x70); ++ ophigh= nv_rd32(dev, addr + 0x74); ++ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " ++ "TP %d MP %d: ", tpid, i); ++ nouveau_print_enum_names(status, ++ nv50_mp_exec_error_names); ++ printk(" at %06x warp %d, opcode %08x %08x\n", ++ pc&0xffffff, pc >> 24, ++ oplow, ophigh); ++ } ++ nv_wr32(dev, addr + 0x10, mp10); ++ nv_wr32(dev, addr + 0x14, 0); ++ mps++; ++ } ++ if (!mps && display) ++ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " ++ "No MPs claiming errors?\n", tpid); ++} ++ ++static void ++nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, ++ uint32_t ustatus_new, int display, const char *name) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int tps = 0; ++ uint32_t units = nv_rd32(dev, 0x1540); ++ int i, r; ++ uint32_t ustatus_addr, ustatus; ++ for (i = 0; i < 16; i++) { ++ if (!(units & (1 << i))) ++ continue; ++ if (dev_priv->chipset < 0xa0) ++ ustatus_addr = ustatus_old + (i << 12); ++ else ++ ustatus_addr = ustatus_new + (i << 11); ++ ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; ++ if (!ustatus) ++ continue; ++ tps++; ++ switch (type) { ++ case 6: /* texture error... unknown for now */ ++ nv50_pfb_vm_trap(dev, display, name); ++ if (display) { ++ NV_ERROR(dev, "magic set %d:\n", i); ++ for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) ++ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, ++ nv_rd32(dev, r)); ++ } ++ break; ++ case 7: /* MP error */ ++ if (ustatus & 0x00010000) { ++ nv50_pgraph_mp_trap(dev, i, display); ++ ustatus &= ~0x00010000; ++ } ++ break; ++ case 8: /* TPDMA error */ ++ { ++ uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); ++ uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); ++ uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); ++ uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); ++ uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); ++ uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); ++ uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); ++ nv50_pfb_vm_trap(dev, display, name); ++ /* 2d engine destination */ ++ if (ustatus & 0x00000010) { ++ if (display) { ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", ++ i, e14, e10); ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", ++ i, e0c, e18, e1c, e20, e24); ++ } ++ ustatus &= ~0x00000010; ++ } ++ /* Render target */ ++ if (ustatus & 0x00000040) { ++ if (display) { ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", ++ i, e14, e10); ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", ++ i, e0c, e18, e1c, e20, e24); ++ } ++ ustatus &= ~0x00000040; ++ } ++ /* CUDA memory: l[], g[] or stack. */ ++ if (ustatus & 0x00000080) { ++ if (display) { ++ if (e18 & 0x80000000) { ++ /* g[] read fault? */ ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", ++ i, e14, e10 | ((e18 >> 24) & 0x1f)); ++ e18 &= ~0x1f000000; ++ } else if (e18 & 0xc) { ++ /* g[] write fault? */ ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", ++ i, e14, e10 | ((e18 >> 7) & 0x1f)); ++ e18 &= ~0x00000f80; ++ } else { ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", ++ i, e14, e10); ++ } ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", ++ i, e0c, e18, e1c, e20, e24); ++ } ++ ustatus &= ~0x00000080; ++ } ++ } ++ break; ++ } ++ if (ustatus) { ++ if (display) ++ NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); ++ } ++ nv_wr32(dev, ustatus_addr, 0xc0000000); ++ } ++ ++ if (!tps && display) ++ NV_INFO(dev, "%s - No TPs claiming errors?\n", name); ++} ++ ++static void ++nv50_pgraph_trap_handler(struct drm_device *dev) ++{ ++ struct nouveau_pgraph_trap trap; ++ uint32_t status = nv_rd32(dev, 0x400108); ++ uint32_t ustatus; ++ int display = nouveau_ratelimit(); ++ ++ ++ if (!status && display) { ++ nouveau_graph_trap_info(dev, &trap); ++ nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap); ++ NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n"); ++ } ++ ++ /* DISPATCH: Relays commands to other units and handles NOTIFY, ++ * COND, QUERY. If you get a trap from it, the command is still stuck ++ * in DISPATCH and you need to do something about it. */ ++ if (status & 0x001) { ++ ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; ++ if (!ustatus && display) { ++ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); ++ } ++ ++ /* Known to be triggered by screwed up NOTIFY and COND... */ ++ if (ustatus & 0x00000001) { ++ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT"); ++ nv_wr32(dev, 0x400500, 0); ++ if (nv_rd32(dev, 0x400808) & 0x80000000) { ++ if (display) { ++ if (nouveau_graph_trapped_channel(dev, &trap.channel)) ++ trap.channel = -1; ++ trap.class = nv_rd32(dev, 0x400814); ++ trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc; ++ trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7; ++ trap.data = nv_rd32(dev, 0x40080c); ++ trap.data2 = nv_rd32(dev, 0x400810); ++ nouveau_graph_dump_trap_info(dev, ++ "PGRAPH_TRAP_DISPATCH_FAULT", &trap); ++ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808)); ++ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848)); ++ } ++ nv_wr32(dev, 0x400808, 0); ++ } else if (display) { ++ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n"); ++ } ++ nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); ++ nv_wr32(dev, 0x400848, 0); ++ ustatus &= ~0x00000001; ++ } ++ if (ustatus & 0x00000002) { ++ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY"); ++ nv_wr32(dev, 0x400500, 0); ++ if (nv_rd32(dev, 0x40084c) & 0x80000000) { ++ if (display) { ++ if (nouveau_graph_trapped_channel(dev, &trap.channel)) ++ trap.channel = -1; ++ trap.class = nv_rd32(dev, 0x400814); ++ trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc; ++ trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7; ++ trap.data = nv_rd32(dev, 0x40085c); ++ trap.data2 = 0; ++ nouveau_graph_dump_trap_info(dev, ++ "PGRAPH_TRAP_DISPATCH_QUERY", &trap); ++ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c)); ++ } ++ nv_wr32(dev, 0x40084c, 0); ++ } else if (display) { ++ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n"); ++ } ++ ustatus &= ~0x00000002; ++ } ++ if (ustatus && display) ++ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus); ++ nv_wr32(dev, 0x400804, 0xc0000000); ++ nv_wr32(dev, 0x400108, 0x001); ++ status &= ~0x001; ++ } ++ ++ /* TRAPs other than dispatch use the "normal" trap regs. */ ++ if (status && display) { ++ nouveau_graph_trap_info(dev, &trap); ++ nouveau_graph_dump_trap_info(dev, ++ "PGRAPH_TRAP", &trap); ++ } ++ ++ /* M2MF: Memory to memory copy engine. */ ++ if (status & 0x002) { ++ ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; ++ if (!ustatus && display) { ++ NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n"); ++ } ++ if (ustatus & 0x00000001) { ++ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY"); ++ ustatus &= ~0x00000001; ++ } ++ if (ustatus & 0x00000002) { ++ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN"); ++ ustatus &= ~0x00000002; ++ } ++ if (ustatus & 0x00000004) { ++ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT"); ++ ustatus &= ~0x00000004; ++ } ++ NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n", ++ nv_rd32(dev, 0x406804), ++ nv_rd32(dev, 0x406808), ++ nv_rd32(dev, 0x40680c), ++ nv_rd32(dev, 0x406810)); ++ if (ustatus && display) ++ NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus); ++ /* No sane way found yet -- just reset the bugger. */ ++ nv_wr32(dev, 0x400040, 2); ++ nv_wr32(dev, 0x400040, 0); ++ nv_wr32(dev, 0x406800, 0xc0000000); ++ nv_wr32(dev, 0x400108, 0x002); ++ status &= ~0x002; ++ } ++ ++ /* VFETCH: Fetches data from vertex buffers. */ ++ if (status & 0x004) { ++ ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; ++ if (!ustatus && display) { ++ NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n"); ++ } ++ if (ustatus & 0x00000001) { ++ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT"); ++ NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n", ++ nv_rd32(dev, 0x400c00), ++ nv_rd32(dev, 0x400c08), ++ nv_rd32(dev, 0x400c0c), ++ nv_rd32(dev, 0x400c10)); ++ ustatus &= ~0x00000001; ++ } ++ if (ustatus && display) ++ NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus); ++ nv_wr32(dev, 0x400c04, 0xc0000000); ++ nv_wr32(dev, 0x400108, 0x004); ++ status &= ~0x004; ++ } ++ ++ /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ ++ if (status & 0x008) { ++ ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; ++ if (!ustatus && display) { ++ NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n"); ++ } ++ if (ustatus & 0x00000001) { ++ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT"); ++ NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n", ++ nv_rd32(dev, 0x401804), ++ nv_rd32(dev, 0x401808), ++ nv_rd32(dev, 0x40180c), ++ nv_rd32(dev, 0x401810)); ++ ustatus &= ~0x00000001; ++ } ++ if (ustatus && display) ++ NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus); ++ /* No sane way found yet -- just reset the bugger. */ ++ nv_wr32(dev, 0x400040, 0x80); ++ nv_wr32(dev, 0x400040, 0); ++ nv_wr32(dev, 0x401800, 0xc0000000); ++ nv_wr32(dev, 0x400108, 0x008); ++ status &= ~0x008; ++ } ++ ++ /* CCACHE: Handles code and c[] caches and fills them. */ ++ if (status & 0x010) { ++ ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; ++ if (!ustatus && display) { ++ NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n"); ++ } ++ if (ustatus & 0x00000001) { ++ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT"); ++ NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n", ++ nv_rd32(dev, 0x405800), ++ nv_rd32(dev, 0x405804), ++ nv_rd32(dev, 0x405808), ++ nv_rd32(dev, 0x40580c), ++ nv_rd32(dev, 0x405810), ++ nv_rd32(dev, 0x405814), ++ nv_rd32(dev, 0x40581c)); ++ ustatus &= ~0x00000001; ++ } ++ if (ustatus && display) ++ NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus); ++ nv_wr32(dev, 0x405018, 0xc0000000); ++ nv_wr32(dev, 0x400108, 0x010); ++ status &= ~0x010; ++ } ++ ++ /* Unknown, not seen yet... 0x402000 is the only trap status reg ++ * remaining, so try to handle it anyway. Perhaps related to that ++ * unknown DMA slot on tesla? */ ++ if (status & 0x20) { ++ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04"); ++ ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; ++ if (display) ++ NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus); ++ nv_wr32(dev, 0x402000, 0xc0000000); ++ /* no status modifiction on purpose */ ++ } ++ ++ /* TEXTURE: CUDA texturing units */ ++ if (status & 0x040) { ++ nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display, ++ "PGRAPH_TRAP_TEXTURE"); ++ nv_wr32(dev, 0x400108, 0x040); ++ status &= ~0x040; ++ } ++ ++ /* MP: CUDA execution engines. */ ++ if (status & 0x080) { ++ nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display, ++ "PGRAPH_TRAP_MP"); ++ nv_wr32(dev, 0x400108, 0x080); ++ status &= ~0x080; ++ } ++ ++ /* TPDMA: Handles TP-initiated uncached memory accesses: ++ * l[], g[], stack, 2d surfaces, render targets. */ ++ if (status & 0x100) { ++ nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display, ++ "PGRAPH_TRAP_TPDMA"); ++ nv_wr32(dev, 0x400108, 0x100); ++ status &= ~0x100; ++ } ++ ++ if (status) { ++ if (display) ++ NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n", ++ status); ++ nv_wr32(dev, 0x400108, status); ++ } ++} ++ ++/* There must be a *lot* of these. Will take some time to gather them up. */ ++static struct nouveau_enum_names nv50_data_error_names[] = ++{ ++ { 4, "INVALID_VALUE" }, ++ { 5, "INVALID_ENUM" }, ++ { 8, "INVALID_OBJECT" }, ++ { 0xc, "INVALID_BITFIELD" }, ++ { 0x28, "MP_NO_REG_SPACE" }, ++ { 0x2b, "MP_BLOCK_SIZE_MISMATCH" }, ++}; ++ ++static void + nv50_pgraph_irq_handler(struct drm_device *dev) { - struct nouveau_encoder *nv_encoder; - struct drm_encoder *encoder; -+ struct drm_device *dev = connector->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct i2c_adapter *adap; - struct drm_encoder_funcs *funcs = NULL; -@@ -266,7 +268,7 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry) ++ struct nouveau_pgraph_trap trap; ++ int unhandled = 0; + uint32_t status; + + while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { +- uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); +- ++ /* NOTIFY: You've set a NOTIFY an a command and it's done. */ + if (status & 0x00000001) { +- nouveau_pgraph_intr_notify(dev, nsource); ++ nouveau_graph_trap_info(dev, &trap); ++ if (nouveau_ratelimit()) ++ nouveau_graph_dump_trap_info(dev, ++ "PGRAPH_NOTIFY", &trap); + status &= ~0x00000001; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); + } - was_locked = NVLockVgaCrtcs(dev, false); +- if (status & 0x00000010) { +- nouveau_pgraph_intr_error(dev, nsource | +- NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); ++ /* COMPUTE_QUERY: Purpose and exact cause unknown, happens ++ * when you write 0x200 to 0x50c0 method 0x31c. */ ++ if (status & 0x00000002) { ++ nouveau_graph_trap_info(dev, &trap); ++ if (nouveau_ratelimit()) ++ nouveau_graph_dump_trap_info(dev, ++ "PGRAPH_COMPUTE_QUERY", &trap); ++ status &= ~0x00000002; ++ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002); ++ } ++ ++ /* Unknown, never seen: 0x4 */ + ++ /* ILLEGAL_MTHD: You used a wrong method for this class. */ ++ if (status & 0x00000010) { ++ nouveau_graph_trap_info(dev, &trap); ++ if (nouveau_pgraph_intr_swmthd(dev, &trap)) ++ unhandled = 1; ++ if (unhandled && nouveau_ratelimit()) ++ nouveau_graph_dump_trap_info(dev, ++ "PGRAPH_ILLEGAL_MTHD", &trap); + status &= ~0x00000010; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); + } -- ret = drm_i2c_encoder_init(encoder->dev, to_encoder_slave(encoder), adap, -+ ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), adap, - &nv04_tv_encoder_info[type].board_info); ++ /* ILLEGAL_CLASS: You used a wrong class. */ ++ if (status & 0x00000020) { ++ nouveau_graph_trap_info(dev, &trap); ++ if (nouveau_ratelimit()) ++ nouveau_graph_dump_trap_info(dev, ++ "PGRAPH_ILLEGAL_CLASS", &trap); ++ status &= ~0x00000020; ++ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020); ++ } ++ ++ /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */ ++ if (status & 0x00000040) { ++ nouveau_graph_trap_info(dev, &trap); ++ if (nouveau_ratelimit()) ++ nouveau_graph_dump_trap_info(dev, ++ "PGRAPH_DOUBLE_NOTIFY", &trap); ++ status &= ~0x00000040; ++ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040); ++ } ++ ++ /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */ + if (status & 0x00001000) { + nv_wr32(dev, 0x400500, 0x00000000); + nv_wr32(dev, NV03_PGRAPH_INTR, +@@ -613,49 +1116,59 @@ nv50_pgraph_irq_handler(struct drm_device *dev) + status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + } - NVLockVgaCrtcs(dev, was_locked); -@@ -294,7 +296,9 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry) +- if (status & 0x00100000) { +- nouveau_pgraph_intr_error(dev, nsource | +- NV03_PGRAPH_NSOURCE_DATA_ERROR); ++ /* BUFFER_NOTIFY: Your m2mf transfer finished */ ++ if (status & 0x00010000) { ++ nouveau_graph_trap_info(dev, &trap); ++ if (nouveau_ratelimit()) ++ nouveau_graph_dump_trap_info(dev, ++ "PGRAPH_BUFFER_NOTIFY", &trap); ++ status &= ~0x00010000; ++ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000); ++ } - /* Set the slave encoder configuration */ - sfuncs->set_config(encoder, nv04_tv_encoder_info[type].params); -+ sfuncs->create_resources(encoder, connector); ++ /* DATA_ERROR: Invalid value for this method, or invalid ++ * state in current PGRAPH context for this operation */ ++ if (status & 0x00100000) { ++ nouveau_graph_trap_info(dev, &trap); ++ if (nouveau_ratelimit()) { ++ nouveau_graph_dump_trap_info(dev, ++ "PGRAPH_DATA_ERROR", &trap); ++ NV_INFO (dev, "PGRAPH_DATA_ERROR - "); ++ nouveau_print_enum_names(nv_rd32(dev, 0x400110), ++ nv50_data_error_names); ++ printk("\n"); ++ } + status &= ~0x00100000; + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); + } -+ drm_mode_connector_attach_encoder(connector, encoder); - return 0; ++ /* TRAP: Something bad happened in the middle of command ++ * execution. Has a billion types, subtypes, and even ++ * subsubtypes. */ + if (status & 0x00200000) { +- int r; +- +- nouveau_pgraph_intr_error(dev, nsource | +- NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); +- +- NV_ERROR(dev, "magic set 1:\n"); +- for (r = 0x408900; r <= 0x408910; r += 4) +- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, +- nv_rd32(dev, r)); +- nv_wr32(dev, 0x408900, +- nv_rd32(dev, 0x408904) | 0xc0000000); +- for (r = 0x408e08; r <= 0x408e24; r += 4) +- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, +- nv_rd32(dev, r)); +- nv_wr32(dev, 0x408e08, +- nv_rd32(dev, 0x408e08) | 0xc0000000); +- +- NV_ERROR(dev, "magic set 2:\n"); +- for (r = 0x409900; r <= 0x409910; r += 4) +- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, +- nv_rd32(dev, r)); +- nv_wr32(dev, 0x409900, +- nv_rd32(dev, 0x409904) | 0xc0000000); +- for (r = 0x409e08; r <= 0x409e24; r += 4) +- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, +- nv_rd32(dev, r)); +- nv_wr32(dev, 0x409e08, +- nv_rd32(dev, 0x409e08) | 0xc0000000); +- ++ nv50_pgraph_trap_handler(dev); + status &= ~0x00200000; +- nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); + } - fail: -diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c -index 7aeabf2..7a4069c 100644 ---- a/drivers/gpu/drm/nouveau/nv10_fifo.c -+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c -@@ -55,7 +55,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan) - /* Fill entries that are seen filled in dumps of nvidia driver just - * after channel's is put into DMA mode - */ -- dev_priv->engine.instmem.prepare_access(dev, true); - nv_wi32(dev, fc + 0, chan->pushbuf_base); - nv_wi32(dev, fc + 4, chan->pushbuf_base); - nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); -@@ -66,7 +65,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan) - NV_PFIFO_CACHE1_BIG_ENDIAN | - #endif - 0); -- dev_priv->engine.instmem.finish_access(dev); ++ /* Unknown, never seen: 0x00400000 */ ++ ++ /* SINGLE_STEP: Happens on every method if you turned on ++ * single stepping in 40008c */ ++ if (status & 0x01000000) { ++ nouveau_graph_trap_info(dev, &trap); ++ if (nouveau_ratelimit()) ++ nouveau_graph_dump_trap_info(dev, ++ "PGRAPH_SINGLE_STEP", &trap); ++ status &= ~0x01000000; ++ nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000); ++ } ++ ++ /* 0x02000000 happens when you pause a ctxprog... ++ * but the only way this can happen that I know is by ++ * poking the relevant MMIO register, and we don't ++ * do that. */ ++ + if (status) { + NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", + status); +@@ -672,7 +1185,8 @@ nv50_pgraph_irq_handler(struct drm_device *dev) + } - /* enable the fifo dma operation */ - nv_wr32(dev, NV04_PFIFO_MODE, -@@ -91,8 +89,6 @@ nv10_fifo_do_load_context(struct drm_device *dev, int chid) + nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); +- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); ++ if (nv_rd32(dev, 0x400824) & (1 << 31)) ++ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); + } + + static void +@@ -691,11 +1205,14 @@ nouveau_irq_handler(DRM_IRQ_ARGS) + struct drm_device *dev = (struct drm_device *)arg; struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t fc = NV10_RAMFC(chid), tmp; + uint32_t status, fbdev_flags = 0; ++ unsigned long flags; -- dev_priv->engine.instmem.prepare_access(dev, false); -- - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); - nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); -@@ -117,8 +113,6 @@ nv10_fifo_do_load_context(struct drm_device *dev, int chid) - nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48)); - - out: -- dev_priv->engine.instmem.finish_access(dev); -- - nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); - nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); - } -@@ -155,8 +149,6 @@ nv10_fifo_unload_context(struct drm_device *dev) - return 0; - fc = NV10_RAMFC(chid); + status = nv_rd32(dev, NV03_PMC_INTR_0); + if (!status) + return IRQ_NONE; -- dev_priv->engine.instmem.prepare_access(dev, true); -- - nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); - nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); - nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); -@@ -179,8 +171,6 @@ nv10_fifo_unload_context(struct drm_device *dev) - nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); - - out: -- dev_priv->engine.instmem.finish_access(dev); -- - nv10_fifo_do_load_context(dev, pfifo->channels - 1); - nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); - return 0; -diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c -index 74c8803..359506e 100644 ---- a/drivers/gpu/drm/nouveau/nv17_tv.c -+++ b/drivers/gpu/drm/nouveau/nv17_tv.c -@@ -116,6 +116,20 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) - return sample; ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ + if (dev_priv->fbdev_info) { + fbdev_flags = dev_priv->fbdev_info->flags; + dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; +@@ -733,5 +1250,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS) + if (dev_priv->fbdev_info) + dev_priv->fbdev_info->flags = fbdev_flags; + ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); ++ + return IRQ_HANDLED; } +diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c +index 2dc09db..816948b 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_mem.c ++++ b/drivers/gpu/drm/nouveau/nouveau_mem.c +@@ -347,6 +347,20 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, + return -EBUSY; + } -+static bool -+get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) -+{ -+ /* Zotac FX5200 */ -+ if ((dev->pdev->device == 0x0322) && -+ (dev->pdev->subsystem_vendor == 0x19da) && -+ (dev->pdev->subsystem_device == 0x2035)) { -+ *pin_mask = 0xc; -+ return false; ++ nv_wr32(dev, 0x100c80, 0x00040001); ++ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { ++ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); ++ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); ++ return -EBUSY; + } + -+ return true; -+} ++ nv_wr32(dev, 0x100c80, 0x00060001); ++ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { ++ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); ++ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); ++ return -EBUSY; ++ } + - static enum drm_connector_status - nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) - { -@@ -124,12 +138,20 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) - struct drm_mode_config *conf = &dev->mode_config; - struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); - struct dcb_entry *dcb = tv_enc->base.dcb; -+ bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask); - -- if (dev_priv->chipset == 0x42 || -- dev_priv->chipset == 0x43) -- tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe; -- else -- tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe; -+ if (nv04_dac_in_use(encoder)) -+ return connector_status_disconnected; + return 0; + } + +@@ -387,6 +401,20 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); ++ return; ++ } + -+ if (reliable) { -+ if (dev_priv->chipset == 0x42 || -+ dev_priv->chipset == 0x43) -+ tv_enc->pin_mask = -+ nv42_tv_sample_load(encoder) >> 28 & 0xe; -+ else -+ tv_enc->pin_mask = -+ nv17_dac_sample_load(encoder) >> 28 & 0xe; ++ nv_wr32(dev, 0x100c80, 0x00040001); ++ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { ++ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); ++ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); ++ return; + } ++ ++ nv_wr32(dev, 0x100c80, 0x00060001); ++ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { ++ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); ++ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + } + } - switch (tv_enc->pin_mask) { - case 0x2: -@@ -154,7 +176,9 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) - conf->tv_subconnector_property, - tv_enc->subconnector); - -- if (tv_enc->subconnector) { -+ if (!reliable) { -+ return connector_status_unknown; -+ } else if (tv_enc->subconnector) { - NV_INFO(dev, "Load detected on output %c\n", - '@' + ffs(dcb->or)); - return connector_status_connected; -@@ -296,6 +320,9 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder, - { - struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); +@@ -449,9 +477,30 @@ void nouveau_mem_close(struct drm_device *dev) + } + } -+ if (nv04_dac_in_use(encoder)) -+ return false; +-/*XXX won't work on BSD because of pci_read_config_dword */ + static uint32_t +-nouveau_mem_fb_amount_igp(struct drm_device *dev) ++nouveau_mem_detect_nv04(struct drm_device *dev) ++{ ++ uint32_t boot0 = nv_rd32(dev, NV03_BOOT_0); + - if (tv_norm->kind == CTV_ENC_MODE) - adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock; - else -@@ -744,8 +771,10 @@ static struct drm_encoder_funcs nv17_tv_funcs = { - .destroy = nv17_tv_destroy, - }; - --int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry) -+int -+nv17_tv_create(struct drm_connector *connector, struct dcb_entry *entry) ++ if (boot0 & 0x00000100) ++ return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; ++ ++ switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { ++ case NV04_BOOT_0_RAM_AMOUNT_32MB: ++ return 32 * 1024 * 1024; ++ case NV04_BOOT_0_RAM_AMOUNT_16MB: ++ return 16 * 1024 * 1024; ++ case NV04_BOOT_0_RAM_AMOUNT_8MB: ++ return 8 * 1024 * 1024; ++ case NV04_BOOT_0_RAM_AMOUNT_4MB: ++ return 4 * 1024 * 1024; ++ } ++ ++ return 0; ++} ++ ++static uint32_t ++nouveau_mem_detect_nforce(struct drm_device *dev) { -+ struct drm_device *dev = connector->dev; - struct drm_encoder *encoder; - struct nv17_tv_encoder *tv_enc = NULL; - -@@ -774,5 +803,7 @@ int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry) - encoder->possible_crtcs = entry->heads; - encoder->possible_clones = 0; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct pci_dev *bridge; +@@ -463,11 +512,11 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev) + return 0; + } -+ nv17_tv_create_resources(encoder, connector); -+ drm_mode_connector_attach_encoder(connector, encoder); - return 0; +- if (dev_priv->flags&NV_NFORCE) { ++ if (dev_priv->flags & NV_NFORCE) { + pci_read_config_dword(bridge, 0x7C, &mem); + return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; + } else +- if (dev_priv->flags&NV_NFORCE2) { ++ if (dev_priv->flags & NV_NFORCE2) { + pci_read_config_dword(bridge, 0x84, &mem); + return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; + } +@@ -477,50 +526,39 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev) } -diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c -index d6fc0a8..191c15c 100644 ---- a/drivers/gpu/drm/nouveau/nv20_graph.c -+++ b/drivers/gpu/drm/nouveau/nv20_graph.c -@@ -370,68 +370,54 @@ nv20_graph_create_context(struct nouveau_channel *chan) + + /* returns the amount of FB ram in bytes */ +-uint64_t nouveau_mem_fb_amount(struct drm_device *dev) ++int ++nouveau_mem_detect(struct drm_device *dev) { - struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; -+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); -- unsigned int ctx_size; - unsigned int idoffs = 0x28/4; - int ret; - - switch (dev_priv->chipset) { - case 0x20: -- ctx_size = NV20_GRCTX_SIZE; - ctx_init = nv20_graph_context_init; - idoffs = 0; - break; - case 0x25: - case 0x28: -- ctx_size = NV25_GRCTX_SIZE; - ctx_init = nv25_graph_context_init; - break; - case 0x2a: -- ctx_size = NV2A_GRCTX_SIZE; - ctx_init = nv2a_graph_context_init; - idoffs = 0; - break; - case 0x30: - case 0x31: -- ctx_size = NV30_31_GRCTX_SIZE; - ctx_init = nv30_31_graph_context_init; - break; - case 0x34: -- ctx_size = NV34_GRCTX_SIZE; - ctx_init = nv34_graph_context_init; - break; - case 0x35: - case 0x36: -- ctx_size = NV35_36_GRCTX_SIZE; - ctx_init = nv35_36_graph_context_init; - break; - default: -- ctx_size = 0; -- ctx_init = nv35_36_graph_context_init; -- NV_ERROR(dev, "Please contact the devs if you want your NV%x" -- " card to work\n", dev_priv->chipset); -- return -ENOSYS; +- uint32_t boot0; +- +- switch (dev_priv->card_type) { +- case NV_04: +- boot0 = nv_rd32(dev, NV03_BOOT_0); +- if (boot0 & 0x00000100) +- return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; +- +- switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { +- case NV04_BOOT_0_RAM_AMOUNT_32MB: +- return 32 * 1024 * 1024; +- case NV04_BOOT_0_RAM_AMOUNT_16MB: +- return 16 * 1024 * 1024; +- case NV04_BOOT_0_RAM_AMOUNT_8MB: +- return 8 * 1024 * 1024; +- case NV04_BOOT_0_RAM_AMOUNT_4MB: +- return 4 * 1024 * 1024; +- } +- break; +- case NV_10: +- case NV_20: +- case NV_30: +- case NV_40: +- case NV_50: +- default: +- if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { +- return nouveau_mem_fb_amount_igp(dev); +- } else { +- uint64_t mem; +- mem = (nv_rd32(dev, NV04_FIFO_DATA) & +- NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> +- NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; +- return mem * 1024 * 1024; ++ ++ if (dev_priv->card_type == NV_04) { ++ dev_priv->vram_size = nouveau_mem_detect_nv04(dev); ++ } else ++ if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { ++ dev_priv->vram_size = nouveau_mem_detect_nforce(dev); ++ } else ++ if (dev_priv->card_type < NV_50) { ++ dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); ++ dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; ++ } else { ++ dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); ++ dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; ++ dev_priv->vram_size &= 0xffffffff00; ++ if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) { ++ dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); ++ dev_priv->vram_sys_base <<= 12; + } - break; -+ BUG_ON(1); } -- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16, -- NVOBJ_FLAG_ZERO_ALLOC, -- &chan->ramin_grctx); -+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, -+ 16, NVOBJ_FLAG_ZERO_ALLOC, -+ &chan->ramin_grctx); - if (ret) - return ret; +- NV_ERROR(dev, +- "Unable to detect video ram size. Please report your setup to " +- DRIVER_EMAIL "\n"); +- return 0; ++ NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); ++ if (dev_priv->vram_sys_base) { ++ NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", ++ dev_priv->vram_sys_base); ++ } ++ ++ if (dev_priv->vram_size) ++ return 0; ++ return -ENOMEM; + } - /* Initialise default context values */ -- dev_priv->engine.instmem.prepare_access(dev, true); - ctx_init(dev, chan->ramin_grctx->gpuobj); + #if __OS_HAS_AGP +@@ -631,15 +669,12 @@ nouveau_mem_init(struct drm_device *dev) + spin_lock_init(&dev_priv->ttm.bo_list_lock); + spin_lock_init(&dev_priv->tile.lock); - /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ - nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs, - (chan->id << 24) | 0x1); /* CTX_USER */ +- dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); +- ++ dev_priv->fb_available_size = dev_priv->vram_size; + dev_priv->fb_mappable_pages = dev_priv->fb_available_size; + if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1)) + dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1); + dev_priv->fb_mappable_pages >>= PAGE_SHIFT; -- nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, -- chan->ramin_grctx->instance >> 4); +- NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20)); - -- dev_priv->engine.instmem.finish_access(dev); -+ nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, -+ chan->ramin_grctx->instance >> 4); + /* remove reserved space at end of vram from available amount */ + dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; + dev_priv->fb_aper_free = dev_priv->fb_available_size; +diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h +index aa9b310..6ca80a3 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_reg.h ++++ b/drivers/gpu/drm/nouveau/nouveau_reg.h +@@ -826,6 +826,7 @@ + #define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000 + #define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) + #define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) ++#define NV50_SOR_DP_UNK128(i,l) (0x0061c128 + (i) * 0x800 + (l) * 0x80) + #define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) + + #define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000) +diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c +index ed15905..554fb45 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c ++++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c +@@ -171,6 +171,24 @@ nouveau_sgdma_unbind(struct ttm_backend *be) + } + dev_priv->engine.instmem.finish_access(nvbe->dev); + ++ if (dev_priv->card_type == NV_50) { ++ nv_wr32(dev, 0x100c80, 0x00050001); ++ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { ++ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); ++ NV_ERROR(dev, "0x100c80 = 0x%08x\n", ++ nv_rd32(dev, 0x100c80)); ++ return -EBUSY; ++ } ++ ++ nv_wr32(dev, 0x100c80, 0x00000001); ++ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { ++ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); ++ NV_ERROR(dev, "0x100c80 = 0x%08x\n", ++ nv_rd32(dev, 0x100c80)); ++ return -EBUSY; ++ } ++ } ++ + nvbe->bound = false; return 0; } +diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c +index a8d77c8..7c1d252 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_state.c ++++ b/drivers/gpu/drm/nouveau/nouveau_state.c +@@ -34,7 +34,6 @@ + #include "nouveau_drm.h" + #include "nv50_display.h" -@@ -440,13 +426,12 @@ nv20_graph_destroy_context(struct nouveau_channel *chan) - { - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; -+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; +-static int nouveau_stub_init(struct drm_device *dev) { return 0; } + static void nouveau_stub_takedown(struct drm_device *dev) {} - if (chan->ramin_grctx) - nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); - -- dev_priv->engine.instmem.prepare_access(dev, true); -- nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0); -- dev_priv->engine.instmem.finish_access(dev); -+ nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, 0); - } - - int -@@ -538,29 +523,44 @@ nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, - int - nv20_graph_init(struct drm_device *dev) - { -- struct drm_nouveau_private *dev_priv = -- (struct drm_nouveau_private *)dev->dev_private; -+ struct drm_nouveau_private *dev_priv = dev->dev_private; -+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - uint32_t tmp, vramsz; - int ret, i; + static int nouveau_init_engine_ptrs(struct drm_device *dev) +@@ -276,8 +275,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) + engine->timer.init = nv04_timer_init; + engine->timer.read = nv04_timer_read; + engine->timer.takedown = nv04_timer_takedown; +- engine->fb.init = nouveau_stub_init; +- engine->fb.takedown = nouveau_stub_takedown; ++ engine->fb.init = nv50_fb_init; ++ engine->fb.takedown = nv50_fb_takedown; + engine->graph.grclass = nv50_graph_grclass; + engine->graph.init = nv50_graph_init; + engine->graph.takedown = nv50_graph_takedown; +@@ -340,7 +339,7 @@ nouveau_card_init_channel(struct drm_device *dev) + + gpuobj = NULL; + ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, +- 0, nouveau_mem_fb_amount(dev), ++ 0, dev_priv->vram_size, + NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, + &gpuobj); + if (ret) +@@ -391,6 +390,7 @@ nouveau_card_init(struct drm_device *dev) + goto out; + engine = &dev_priv->engine; + dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; ++ spin_lock_init(&dev_priv->context_switch_lock); -+ switch (dev_priv->chipset) { -+ case 0x20: -+ pgraph->grctx_size = NV20_GRCTX_SIZE; -+ break; -+ case 0x25: -+ case 0x28: -+ pgraph->grctx_size = NV25_GRCTX_SIZE; -+ break; -+ case 0x2a: -+ pgraph->grctx_size = NV2A_GRCTX_SIZE; -+ break; -+ default: -+ NV_ERROR(dev, "unknown chipset, disabling acceleration\n"); -+ pgraph->accel_blocked = true; -+ return 0; -+ } -+ - nv_wr32(dev, NV03_PMC_ENABLE, - nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); - nv_wr32(dev, NV03_PMC_ENABLE, - nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); - -- if (!dev_priv->ctx_table) { -+ if (!pgraph->ctx_table) { - /* Create Context Pointer Table */ -- dev_priv->ctx_table_size = 32 * 4; -- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, -- dev_priv->ctx_table_size, 16, -+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16, - NVOBJ_FLAG_ZERO_ALLOC, -- &dev_priv->ctx_table); -+ &pgraph->ctx_table); - if (ret) - return ret; + /* Parse BIOS tables / Run init tables if card not POSTed */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { +@@ -399,6 +399,10 @@ nouveau_card_init(struct drm_device *dev) + goto out; } - nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, -- dev_priv->ctx_table->instance >> 4); -+ pgraph->ctx_table->instance >> 4); - - nv20_graph_rdi(dev); - -@@ -644,34 +644,52 @@ void - nv20_graph_takedown(struct drm_device *dev) - { - struct drm_nouveau_private *dev_priv = dev->dev_private; -+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - -- nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table); -+ nouveau_gpuobj_ref_del(dev, &pgraph->ctx_table); - } - - int - nv30_graph_init(struct drm_device *dev) - { - struct drm_nouveau_private *dev_priv = dev->dev_private; -+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - int ret, i; - -+ switch (dev_priv->chipset) { -+ case 0x30: -+ case 0x31: -+ pgraph->grctx_size = NV30_31_GRCTX_SIZE; -+ break; -+ case 0x34: -+ pgraph->grctx_size = NV34_GRCTX_SIZE; -+ break; -+ case 0x35: -+ case 0x36: -+ pgraph->grctx_size = NV35_36_GRCTX_SIZE; -+ break; -+ default: -+ NV_ERROR(dev, "unknown chipset, disabling acceleration\n"); -+ pgraph->accel_blocked = true; -+ return 0; -+ } ++ ret = nouveau_mem_detect(dev); ++ if (ret) ++ goto out_bios; + - nv_wr32(dev, NV03_PMC_ENABLE, - nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); - nv_wr32(dev, NV03_PMC_ENABLE, - nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); - -- if (!dev_priv->ctx_table) { -+ if (!pgraph->ctx_table) { - /* Create Context Pointer Table */ -- dev_priv->ctx_table_size = 32 * 4; -- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, -- dev_priv->ctx_table_size, 16, -+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16, - NVOBJ_FLAG_ZERO_ALLOC, -- &dev_priv->ctx_table); -+ &pgraph->ctx_table); + ret = nouveau_gpuobj_early_init(dev); + if (ret) + goto out_bios; +@@ -474,7 +478,7 @@ nouveau_card_init(struct drm_device *dev) + else + ret = nv04_display_create(dev); if (ret) - return ret; +- goto out_irq; ++ goto out_channel; } - nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, -- dev_priv->ctx_table->instance >> 4); -+ pgraph->ctx_table->instance >> 4); - - nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); - nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); -diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c -index 500ccfd..2b67f18 100644 ---- a/drivers/gpu/drm/nouveau/nv40_fifo.c -+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c -@@ -48,7 +48,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan) - - spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - -- dev_priv->engine.instmem.prepare_access(dev, true); - nv_wi32(dev, fc + 0, chan->pushbuf_base); - nv_wi32(dev, fc + 4, chan->pushbuf_base); - nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); -@@ -61,7 +60,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan) - 0x30000000 /* no idea.. */); - nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4); - nv_wi32(dev, fc + 60, 0x0001FFFF); -- dev_priv->engine.instmem.finish_access(dev); - - /* enable the fifo dma operation */ - nv_wr32(dev, NV04_PFIFO_MODE, -@@ -89,8 +87,6 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid) - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t fc = NV40_RAMFC(chid), tmp, tmp2; - -- dev_priv->engine.instmem.prepare_access(dev, false); -- - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); - nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); -@@ -127,8 +123,6 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid) - nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76)); - nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80)); - -- dev_priv->engine.instmem.finish_access(dev); -- - nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); - nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); - } -@@ -166,7 +160,6 @@ nv40_fifo_unload_context(struct drm_device *dev) - return 0; - fc = NV40_RAMFC(chid); - -- dev_priv->engine.instmem.prepare_access(dev, true); - nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); - nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); - nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); -@@ -200,7 +193,6 @@ nv40_fifo_unload_context(struct drm_device *dev) - tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16); - nv_wi32(dev, fc + 72, tmp); - #endif -- dev_priv->engine.instmem.finish_access(dev); - - nv40_fifo_do_load_context(dev, pfifo->channels - 1); - nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, -diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c -index 704a25d..ef550ce 100644 ---- a/drivers/gpu/drm/nouveau/nv40_graph.c -+++ b/drivers/gpu/drm/nouveau/nv40_graph.c -@@ -58,6 +58,7 @@ nv40_graph_create_context(struct nouveau_channel *chan) - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; -+ struct nouveau_grctx ctx = {}; - int ret; - - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, -@@ -67,20 +68,13 @@ nv40_graph_create_context(struct nouveau_channel *chan) - return ret; + ret = nouveau_backlight_init(dev); +@@ -488,6 +492,11 @@ nouveau_card_init(struct drm_device *dev) - /* Initialise default context values */ -- dev_priv->engine.instmem.prepare_access(dev, true); -- if (!pgraph->ctxprog) { -- struct nouveau_grctx ctx = {}; -- -- ctx.dev = chan->dev; -- ctx.mode = NOUVEAU_GRCTX_VALS; -- ctx.data = chan->ramin_grctx->gpuobj; -- nv40_grctx_init(&ctx); -- } else { -- nouveau_grctx_vals_load(dev, chan->ramin_grctx->gpuobj); -- } -+ ctx.dev = chan->dev; -+ ctx.mode = NOUVEAU_GRCTX_VALS; -+ ctx.data = chan->ramin_grctx->gpuobj; -+ nv40_grctx_init(&ctx); -+ - nv_wo32(dev, chan->ramin_grctx->gpuobj, 0, - chan->ramin_grctx->gpuobj->im_pramin->start); -- dev_priv->engine.instmem.finish_access(dev); return 0; - } - -@@ -238,7 +232,8 @@ nv40_graph_init(struct drm_device *dev) - struct drm_nouveau_private *dev_priv = - (struct drm_nouveau_private *)dev->dev_private; - struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; -- uint32_t vramsz; -+ struct nouveau_grctx ctx = {}; -+ uint32_t vramsz, *cp; - int i, j; - - nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & -@@ -246,32 +241,22 @@ nv40_graph_init(struct drm_device *dev) - nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | - NV_PMC_ENABLE_PGRAPH); - -- if (nouveau_ctxfw) { -- nouveau_grctx_prog_load(dev); -- dev_priv->engine.graph.grctx_size = 175 * 1024; -- } -+ cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL); -+ if (!cp) -+ return -ENOMEM; - -- if (!dev_priv->engine.graph.ctxprog) { -- struct nouveau_grctx ctx = {}; -- uint32_t *cp; -+ ctx.dev = dev; -+ ctx.mode = NOUVEAU_GRCTX_PROG; -+ ctx.data = cp; -+ ctx.ctxprog_max = 256; -+ nv40_grctx_init(&ctx); -+ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; - -- cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL); -- if (!cp) -- return -ENOMEM; -+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); -+ for (i = 0; i < ctx.ctxprog_len; i++) -+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); - -- ctx.dev = dev; -- ctx.mode = NOUVEAU_GRCTX_PROG; -- ctx.data = cp; -- ctx.ctxprog_max = 256; -- nv40_grctx_init(&ctx); -- dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; -- -- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); -- for (i = 0; i < ctx.ctxprog_len; i++) -- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); -- -- kfree(cp); -- } -+ kfree(cp); - - /* No context present currently */ - nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); -@@ -407,7 +392,6 @@ nv40_graph_init(struct drm_device *dev) - - void nv40_graph_takedown(struct drm_device *dev) - { -- nouveau_grctx_fini(dev); - } - - struct nouveau_pgraph_object_class nv40_graph_grclass[] = { -diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c -index b4e4a3b..5d11ea1 100644 ---- a/drivers/gpu/drm/nouveau/nv50_crtc.c -+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c -@@ -440,47 +440,15 @@ nv50_crtc_prepare(struct drm_crtc *crtc) - { - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - struct drm_device *dev = crtc->dev; -- struct drm_encoder *encoder; -- uint32_t dac = 0, sor = 0; - NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); ++out_channel: ++ if (dev_priv->channel) { ++ nouveau_channel_free(dev_priv->channel); ++ dev_priv->channel = NULL; ++ } + out_irq: + drm_irq_uninstall(dev); + out_fifo: +@@ -505,6 +514,7 @@ out_mc: + out_gpuobj: + nouveau_gpuobj_takedown(dev); + out_mem: ++ nouveau_sgdma_takedown(dev); + nouveau_mem_close(dev); + out_instmem: + engine->instmem.takedown(dev); +@@ -691,29 +701,24 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) + NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", + dev_priv->card_type, reg0); -- /* Disconnect all unused encoders. */ -- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); -- -- if (!drm_helper_encoder_in_use(encoder)) -- continue; -- -- if (nv_encoder->dcb->type == OUTPUT_ANALOG || -- nv_encoder->dcb->type == OUTPUT_TV) -- dac |= (1 << nv_encoder->or); -- else -- sor |= (1 << nv_encoder->or); -- } -- -- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); -- -- if (nv_encoder->dcb->type == OUTPUT_ANALOG || -- nv_encoder->dcb->type == OUTPUT_TV) { -- if (dac & (1 << nv_encoder->or)) -- continue; -- } else { -- if (sor & (1 << nv_encoder->or)) -- continue; -- } -- -- nv_encoder->disconnect(nv_encoder); +- /* map larger RAMIN aperture on NV40 cards */ +- dev_priv->ramin = NULL; ++ /* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */ + if (dev_priv->card_type >= NV_40) { + int ramin_bar = 2; + if (pci_resource_len(dev->pdev, ramin_bar) == 0) + ramin_bar = 3; + + dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar); +- dev_priv->ramin = ioremap( +- pci_resource_start(dev->pdev, ramin_bar), ++ dev_priv->ramin = ++ ioremap(pci_resource_start(dev->pdev, ramin_bar), + dev_priv->ramin_size); + if (!dev_priv->ramin) { +- NV_ERROR(dev, "Failed to init RAMIN mapping, " +- "limited instance memory available\n"); ++ NV_ERROR(dev, "Failed to PRAMIN BAR"); ++ return -ENOMEM; + } - } - - nv50_crtc_blank(nv_crtc, true); - } - - static void - nv50_crtc_commit(struct drm_crtc *crtc) - { -- struct drm_crtc *crtc2; - struct drm_device *dev = crtc->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *evo = dev_priv->evo; -@@ -491,20 +459,14 @@ nv50_crtc_commit(struct drm_crtc *crtc) - - nv50_crtc_blank(nv_crtc, false); +- /* On older cards (or if the above failed), create a map covering +- * the BAR0 PRAMIN aperture */ +- if (!dev_priv->ramin) { ++ } else { + dev_priv->ramin_size = 1 * 1024 * 1024; + dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN, +- dev_priv->ramin_size); ++ dev_priv->ramin_size); + if (!dev_priv->ramin) { + NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n"); + return -ENOMEM; +diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c +index d2f143e..9986aba 100644 +--- a/drivers/gpu/drm/nouveau/nv04_crtc.c ++++ b/drivers/gpu/drm/nouveau/nv04_crtc.c +@@ -230,9 +230,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode) + struct drm_framebuffer *fb = crtc->fb; + + /* Calculate our timings */ +- int horizDisplay = (mode->crtc_hdisplay >> 3) - 1; +- int horizStart = (mode->crtc_hsync_start >> 3) - 1; +- int horizEnd = (mode->crtc_hsync_end >> 3) - 1; ++ int horizDisplay = (mode->crtc_hdisplay >> 3) - 1; ++ int horizStart = (mode->crtc_hsync_start >> 3) + 1; ++ int horizEnd = (mode->crtc_hsync_end >> 3) + 1; + int horizTotal = (mode->crtc_htotal >> 3) - 5; + int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1; + int horizBlankEnd = (mode->crtc_htotal >> 3) - 1; +diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c +index 1d73b15..8066c56 100644 +--- a/drivers/gpu/drm/nouveau/nv04_dac.c ++++ b/drivers/gpu/drm/nouveau/nv04_dac.c +@@ -230,13 +230,13 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) + if (dcb->type == OUTPUT_TV) { + testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0); + +- if (dev_priv->vbios->tvdactestval) +- testval = dev_priv->vbios->tvdactestval; ++ if (dev_priv->vbios.tvdactestval) ++ testval = dev_priv->vbios.tvdactestval; + } else { + testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */ -- /* Explicitly blank all unused crtc's. */ -- list_for_each_entry(crtc2, &dev->mode_config.crtc_list, head) { -- if (!drm_helper_crtc_in_use(crtc2)) -- nv50_crtc_blank(nouveau_crtc(crtc2), true); -- } -- - ret = RING_SPACE(evo, 2); - if (ret) { - NV_ERROR(dev, "no space while committing crtc\n"); - return; +- if (dev_priv->vbios->dactestval) +- testval = dev_priv->vbios->dactestval; ++ if (dev_priv->vbios.dactestval) ++ testval = dev_priv->vbios.dactestval; } - BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); -- OUT_RING(evo, 0); -- FIRE_RING(evo); -+ OUT_RING (evo, 0); -+ FIRE_RING (evo); - } - static bool -diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c -index 1fd9537..1bc0859 100644 ---- a/drivers/gpu/drm/nouveau/nv50_dac.c -+++ b/drivers/gpu/drm/nouveau/nv50_dac.c -@@ -37,22 +37,31 @@ - #include "nv50_display.h" + saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); +@@ -501,11 +501,13 @@ static const struct drm_encoder_funcs nv04_dac_funcs = { + .destroy = nv04_dac_destroy, + }; - static void --nv50_dac_disconnect(struct nouveau_encoder *nv_encoder) -+nv50_dac_disconnect(struct drm_encoder *encoder) +-int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry) ++int ++nv04_dac_create(struct drm_connector *connector, struct dcb_entry *entry) { -- struct drm_device *dev = to_drm_encoder(nv_encoder)->dev; -+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); -+ struct drm_device *dev = encoder->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *evo = dev_priv->evo; - int ret; - -+ if (!nv_encoder->crtc) -+ return; -+ nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); -+ - NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or); - -- ret = RING_SPACE(evo, 2); -+ ret = RING_SPACE(evo, 4); - if (ret) { - NV_ERROR(dev, "no space while disconnecting DAC\n"); - return; - } - BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1); -- OUT_RING(evo, 0); -+ OUT_RING (evo, 0); -+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); -+ OUT_RING (evo, 0); -+ -+ nv_encoder->crtc = NULL; - } - - static enum drm_connector_status -@@ -213,7 +222,8 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, - uint32_t mode_ctl = 0, mode_ctl2 = 0; - int ret; - -- NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or); -+ NV_DEBUG_KMS(dev, "or %d type %d crtc %d\n", -+ nv_encoder->or, nv_encoder->dcb->type, crtc->index); + const struct drm_encoder_helper_funcs *helper; +- struct drm_encoder *encoder; + struct nouveau_encoder *nv_encoder = NULL; ++ struct drm_device *dev = connector->dev; ++ struct drm_encoder *encoder; - nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) +@@ -527,5 +529,6 @@ int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry) + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; -@@ -243,6 +253,14 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, - BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2); - OUT_RING(evo, mode_ctl); - OUT_RING(evo, mode_ctl2); -+ -+ nv_encoder->crtc = encoder->crtc; -+} -+ -+static struct drm_crtc * -+nv50_dac_crtc_get(struct drm_encoder *encoder) -+{ -+ return nouveau_encoder(encoder)->crtc; ++ drm_mode_connector_attach_encoder(connector, encoder); + return 0; } - - static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = { -@@ -253,7 +271,9 @@ static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = { - .prepare = nv50_dac_prepare, - .commit = nv50_dac_commit, - .mode_set = nv50_dac_mode_set, -- .detect = nv50_dac_detect -+ .get_crtc = nv50_dac_crtc_get, -+ .detect = nv50_dac_detect, -+ .disable = nv50_dac_disconnect - }; - - static void -@@ -275,14 +295,11 @@ static const struct drm_encoder_funcs nv50_dac_encoder_funcs = { +diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c +index 483f875..3559d89 100644 +--- a/drivers/gpu/drm/nouveau/nv04_dfp.c ++++ b/drivers/gpu/drm/nouveau/nv04_dfp.c +@@ -269,10 +269,10 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, + regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1; + if (!nv_gf4_disp_arch(dev) || + (output_mode->hsync_start - output_mode->hdisplay) >= +- dev_priv->vbios->digital_min_front_porch) ++ dev_priv->vbios.digital_min_front_porch) + regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay; + else +- regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1; ++ regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios.digital_min_front_porch - 1; + regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1; + regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1; + regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew; +@@ -584,11 +584,12 @@ static const struct drm_encoder_funcs nv04_dfp_funcs = { + .destroy = nv04_dfp_destroy, }; - int --nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry) -+nv50_dac_create(struct drm_connector *connector, struct dcb_entry *entry) +-int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry) ++int ++nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry) { - struct nouveau_encoder *nv_encoder; - struct drm_encoder *encoder; + const struct drm_encoder_helper_funcs *helper; +- struct drm_encoder *encoder; + struct nouveau_encoder *nv_encoder = NULL; ++ struct drm_encoder *encoder; + int type; -- NV_DEBUG_KMS(dev, "\n"); -- NV_INFO(dev, "Detected a DAC output\n"); -- - nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); - if (!nv_encoder) - return -ENOMEM; -@@ -291,14 +308,14 @@ nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry) + switch (entry->type) { +@@ -613,11 +614,12 @@ int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry) nv_encoder->dcb = entry; nv_encoder->or = ffs(entry->or) - 1; -- nv_encoder->disconnect = nv50_dac_disconnect; -- -- drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs, -+ drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs, - DRM_MODE_ENCODER_DAC); - drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs); +- drm_encoder_init(dev, encoder, &nv04_dfp_funcs, type); ++ drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type); + drm_encoder_helper_add(encoder, helper); encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; -+ -+ drm_mode_connector_attach_encoder(connector, encoder); - return 0; - } - -diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c -index e6a44af..7d59e91 100644 ---- a/drivers/gpu/drm/nouveau/nv50_display.c -+++ b/drivers/gpu/drm/nouveau/nv50_display.c -@@ -71,14 +71,13 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, - return ret; - } - -- dev_priv->engine.instmem.prepare_access(dev, true); - nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); - nv_wo32(dev, obj, 1, limit); - nv_wo32(dev, obj, 2, offset); - nv_wo32(dev, obj, 3, 0x00000000); - nv_wo32(dev, obj, 4, 0x00000000); - nv_wo32(dev, obj, 5, 0x00010000); -- dev_priv->engine.instmem.finish_access(dev); -+ dev_priv->engine.instmem.flush(dev); ++ drm_mode_connector_attach_encoder(connector, encoder); return 0; } -@@ -110,8 +109,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) - return ret; - } - -- ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj-> -- im_pramin->start, 32768); -+ ret = drm_mm_init(&chan->ramin_heap, -+ chan->ramin->gpuobj->im_pramin->start, 32768); - if (ret) { - NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); - nv50_evo_channel_del(pchan); -@@ -465,6 +464,7 @@ int nv50_display_create(struct drm_device *dev) +diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c +index ef77215..b35b7ed 100644 +--- a/drivers/gpu/drm/nouveau/nv04_display.c ++++ b/drivers/gpu/drm/nouveau/nv04_display.c +@@ -93,10 +93,10 @@ int + nv04_display_create(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct dcb_table *dcb = &dev_priv->vbios.dcb; +- struct parsed_dcb *dcb = dev_priv->vbios->dcb; ++ struct dcb_table *dcb = &dev_priv->vbios.dcb; + struct drm_connector *connector, *ct; - int ret, i; + struct drm_encoder *encoder; + struct drm_crtc *crtc; +- uint16_t connector[16] = { 0 }; + int i, ret; NV_DEBUG_KMS(dev, "\n"); -@@ -507,14 +507,18 @@ int nv50_display_create(struct drm_device *dev) - continue; - } +@@ -133,19 +133,23 @@ nv04_display_create(struct drm_device *dev) + for (i = 0; i < dcb->entries; i++) { + struct dcb_entry *dcbent = &dcb->entry[i]; -+ connector = nouveau_connector_create(dev, entry->connector); ++ connector = nouveau_connector_create(dev, dcbent->connector); + if (IS_ERR(connector)) + continue; + - switch (entry->type) { - case OUTPUT_TMDS: - case OUTPUT_LVDS: - case OUTPUT_DP: -- nv50_sor_create(dev, entry); -+ nv50_sor_create(connector, entry); - break; + switch (dcbent->type) { case OUTPUT_ANALOG: -- nv50_dac_create(dev, entry); -+ nv50_dac_create(connector, entry); +- ret = nv04_dac_create(dev, dcbent); ++ ret = nv04_dac_create(connector, dcbent); + break; + case OUTPUT_LVDS: + case OUTPUT_TMDS: +- ret = nv04_dfp_create(dev, dcbent); ++ ret = nv04_dfp_create(connector, dcbent); + break; + case OUTPUT_TV: + if (dcbent->location == DCB_LOC_ON_CHIP) +- ret = nv17_tv_create(dev, dcbent); ++ ret = nv17_tv_create(connector, dcbent); + else +- ret = nv04_tv_create(dev, dcbent); ++ ret = nv04_tv_create(connector, dcbent); break; default: - NV_WARN(dev, "DCB encoder %d unknown\n", entry->type); -@@ -522,11 +526,13 @@ int nv50_display_create(struct drm_device *dev) - } + NV_WARN(dev, "DCB type %d not known\n", dcbent->type); +@@ -154,51 +158,15 @@ nv04_display_create(struct drm_device *dev) + + if (ret) + continue; +- +- connector[dcbent->connector] |= (1 << dcbent->type); } -- for (i = 0 ; i < dcb->connector.entries; i++) { -- if (i != 0 && dcb->connector.entry[i].index2 == -- dcb->connector.entry[i - 1].index2) +- for (i = 0; i < dcb->entries; i++) { +- struct dcb_entry *dcbent = &dcb->entry[i]; +- uint16_t encoders; +- int type; +- +- encoders = connector[dcbent->connector]; +- if (!(encoders & (1 << dcbent->type))) +- continue; +- connector[dcbent->connector] = 0; +- +- switch (dcbent->type) { +- case OUTPUT_ANALOG: +- if (!MULTIPLE_ENCODERS(encoders)) +- type = DRM_MODE_CONNECTOR_VGA; +- else +- type = DRM_MODE_CONNECTOR_DVII; +- break; +- case OUTPUT_TMDS: +- if (!MULTIPLE_ENCODERS(encoders)) +- type = DRM_MODE_CONNECTOR_DVID; +- else +- type = DRM_MODE_CONNECTOR_DVII; +- break; +- case OUTPUT_LVDS: +- type = DRM_MODE_CONNECTOR_LVDS; +-#if 0 +- /* don't create i2c adapter when lvds ddc not allowed */ +- if (dcbent->lvdsconf.use_straps_for_mode || +- dev_priv->vbios->fp_no_ddc) +- i2c_index = 0xf; +-#endif +- break; +- case OUTPUT_TV: +- type = DRM_MODE_CONNECTOR_TV; +- break; +- default: +- type = DRM_MODE_CONNECTOR_Unknown; - continue; -- nouveau_connector_create(dev, &dcb->connector.entry[i]); + list_for_each_entry_safe(connector, ct, + &dev->mode_config.connector_list, head) { + if (!connector->encoder_ids[0]) { + NV_WARN(dev, "%s has no encoders, removing\n", + drm_get_connector_name(connector)); + connector->funcs->destroy(connector); -+ } + } +- +- nouveau_connector_create(dev, dcbent->connector, type); } - ret = nv50_display_init(dev); -@@ -552,131 +558,28 @@ int nv50_display_destroy(struct drm_device *dev) + /* Save previous state */ +diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c +index fd01caa..813b25c 100644 +--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c ++++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c +@@ -118,8 +118,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) + return; + } + +- width = (image->width + 31) & ~31; +- dsize = (width * image->height) >> 5; ++ width = ALIGN(image->width, 8); ++ dsize = ALIGN(width * image->height, 32) >> 5; + + if (info->fix.visual == FB_VISUAL_TRUECOLOR || + info->fix.visual == FB_VISUAL_DIRECTCOLOR) { +@@ -136,8 +136,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) + ((image->dx + image->width) & 0xffff)); + OUT_RING(chan, bg); + OUT_RING(chan, fg); +- OUT_RING(chan, (image->height << 16) | image->width); + OUT_RING(chan, (image->height << 16) | width); ++ OUT_RING(chan, (image->height << 16) | image->width); + OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); + + while (dsize) { +diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c +index f31347b..66fe559 100644 +--- a/drivers/gpu/drm/nouveau/nv04_fifo.c ++++ b/drivers/gpu/drm/nouveau/nv04_fifo.c +@@ -117,6 +117,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan) + { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; ++ unsigned long flags; + int ret; + + ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, +@@ -127,6 +128,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan) + if (ret) + return ret; + ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ + /* Setup initial state */ + dev_priv->engine.instmem.prepare_access(dev, true); + RAMFC_WR(DMA_PUT, chan->pushbuf_base); +@@ -144,6 +147,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan) + /* enable the fifo dma operation */ + nv_wr32(dev, NV04_PFIFO_MODE, + nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); ++ ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); return 0; } --static inline uint32_t --nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or) --{ -- struct drm_nouveau_private *dev_priv = dev->dev_private; -- uint32_t mc; -- -- if (sor) { -- if (dev_priv->chipset < 0x90 || -- dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) -- mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or)); -- else -- mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or)); -- } else { -- mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or)); -- } -- -- return mc; --} -- --static int --nv50_display_irq_head(struct drm_device *dev, int *phead, -- struct dcb_entry **pdcbent) --{ -- struct drm_nouveau_private *dev_priv = dev->dev_private; -- uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL); -- uint32_t dac = 0, sor = 0; -- int head, i, or = 0, type = OUTPUT_ANY; -- -- /* We're assuming that head 0 *or* head 1 will be active here, -- * and not both. I'm not sure if the hw will even signal both -- * ever, but it definitely shouldn't for us as we commit each -- * CRTC separately, and submission will be blocked by the GPU -- * until we handle each in turn. -- */ -- NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); -- head = ffs((unk30 >> 9) & 3) - 1; -- if (head < 0) -- return -EINVAL; -- -- /* This assumes CRTCs are never bound to multiple encoders, which -- * should be the case. -- */ -- for (i = 0; i < 3 && type == OUTPUT_ANY; i++) { -- uint32_t mc = nv50_display_mode_ctrl(dev, false, i); -- if (!(mc & (1 << head))) -- continue; -- -- switch ((mc >> 8) & 0xf) { -- case 0: type = OUTPUT_ANALOG; break; -- case 1: type = OUTPUT_TV; break; -- default: -- NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac); -- return -1; -- } -- -- or = i; -- } -- -- for (i = 0; i < 4 && type == OUTPUT_ANY; i++) { -- uint32_t mc = nv50_display_mode_ctrl(dev, true, i); -- if (!(mc & (1 << head))) -- continue; -- -- switch ((mc >> 8) & 0xf) { -- case 0: type = OUTPUT_LVDS; break; -- case 1: type = OUTPUT_TMDS; break; -- case 2: type = OUTPUT_TMDS; break; -- case 5: type = OUTPUT_TMDS; break; -- case 8: type = OUTPUT_DP; break; -- case 9: type = OUTPUT_DP; break; -- default: -- NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor); -- return -1; -- } -- -- or = i; -- } -- -- NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or); -- if (type == OUTPUT_ANY) { -- NV_ERROR(dev, "unknown encoder!!\n"); -- return -1; -- } -- -- for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { -- struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i]; -- -- if (dcbent->type != type) -- continue; -- -- if (!(dcbent->or & (1 << or))) -- continue; -- -- *phead = head; -- *pdcbent = dcbent; -- return 0; -- } -- -- NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or); -- return 0; --} -- --static uint32_t --nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent, -- int pxclk) -+static u16 -+nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb, -+ u32 mc, int pxclk) +diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c +index e260986..f0cbbc0 100644 +--- a/drivers/gpu/drm/nouveau/nv04_graph.c ++++ b/drivers/gpu/drm/nouveau/nv04_graph.c +@@ -527,8 +527,7 @@ static int + nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, + int mthd, uint32_t data) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_connector *nv_connector = NULL; +- chan->fence.last_sequence_irq = data; +- nouveau_fence_handler(chan->dev, chan->id); ++ atomic_set(&chan->fence.last_sequence_irq, data); + return 0; + } + +diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c +index 9c63099..84b5954 100644 +--- a/drivers/gpu/drm/nouveau/nv04_tv.c ++++ b/drivers/gpu/drm/nouveau/nv04_tv.c +@@ -223,10 +223,12 @@ static void nv04_tv_destroy(struct drm_encoder *encoder) + kfree(nv_encoder); + } + +-int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry) ++int ++nv04_tv_create(struct drm_connector *connector, struct dcb_entry *entry) + { + struct nouveau_encoder *nv_encoder; struct drm_encoder *encoder; - struct nvbios *bios = &dev_priv->vbios; -- uint32_t mc, script = 0, or; -+ u32 script = 0, or; ++ struct drm_device *dev = connector->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct i2c_adapter *adap; + struct drm_encoder_funcs *funcs = NULL; +@@ -262,11 +264,11 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry) + nv_encoder->or = ffs(entry->or) - 1; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + /* Run the slave-specific initialization */ +- adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter; ++ adap = &dev_priv->vbios.dcb.i2c[i2c_index].chan->adapter; -- if (nv_encoder->dcb != dcbent) -+ if (nv_encoder->dcb != dcb) - continue; + was_locked = NVLockVgaCrtcs(dev, false); - nv_connector = nouveau_encoder_connector_get(nv_encoder); - break; - } +- ret = drm_i2c_encoder_init(encoder->dev, to_encoder_slave(encoder), adap, ++ ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), adap, + &nv04_tv_encoder_info[type].board_info); -- or = ffs(dcbent->or) - 1; -- mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or); -- switch (dcbent->type) { -+ or = ffs(dcb->or) - 1; -+ switch (dcb->type) { - case OUTPUT_LVDS: - script = (mc >> 8) & 0xf; - if (bios->fp_no_ddc) { -@@ -767,17 +670,88 @@ nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr) - static void - nv50_display_unk10_handler(struct drm_device *dev) + NVLockVgaCrtcs(dev, was_locked); +@@ -294,7 +296,9 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry) + + /* Set the slave encoder configuration */ + sfuncs->set_config(encoder, nv04_tv_encoder_info[type].params); ++ sfuncs->create_resources(encoder, connector); + ++ drm_mode_connector_attach_encoder(connector, encoder); + return 0; + + fail: +diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c +index 21ac6e4..44437ff 100644 +--- a/drivers/gpu/drm/nouveau/nv17_tv.c ++++ b/drivers/gpu/drm/nouveau/nv17_tv.c +@@ -45,8 +45,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) + + #define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) + testval = RGB_TEST_DATA(0x82, 0xeb, 0x82); +- if (dev_priv->vbios->tvdactestval) +- testval = dev_priv->vbios->tvdactestval; ++ if (dev_priv->vbios.tvdactestval) ++ testval = dev_priv->vbios.tvdactestval; + + dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); + head = (dacclk & 0x100) >> 8; +@@ -367,7 +367,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder) + !enc->crtc && + nv04_dfp_get_bound_head(dev, dcb) == head) { + nv04_dfp_bind_head(dev, dcb, head ^ 1, +- dev_priv->VBIOS.fp.dual_link); ++ dev_priv->vbios.fp.dual_link); + } + } + +@@ -744,8 +744,10 @@ static struct drm_encoder_funcs nv17_tv_funcs = { + .destroy = nv17_tv_destroy, + }; + +-int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry) ++int ++nv17_tv_create(struct drm_connector *connector, struct dcb_entry *entry) { -- struct dcb_entry *dcbent; -- int head, ret; -+ struct drm_nouveau_private *dev_priv = dev->dev_private; -+ u32 unk30 = nv_rd32(dev, 0x610030), mc; -+ int i, crtc, or, type = OUTPUT_ANY; ++ struct drm_device *dev = connector->dev; + struct drm_encoder *encoder; + struct nv17_tv_encoder *tv_enc = NULL; -- ret = nv50_display_irq_head(dev, &head, &dcbent); -- if (ret) -- goto ack; -+ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); -+ dev_priv->evo_irq.dcb = NULL; +@@ -774,5 +776,7 @@ int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry) + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + ++ nv17_tv_create_resources(encoder, connector); ++ drm_mode_connector_attach_encoder(connector, encoder); + return 0; + } +diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c +index b4f19cc..500ccfd 100644 +--- a/drivers/gpu/drm/nouveau/nv40_fifo.c ++++ b/drivers/gpu/drm/nouveau/nv40_fifo.c +@@ -37,6 +37,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan) + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t fc = NV40_RAMFC(chan->id); ++ unsigned long flags; + int ret; - nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8); + ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, +@@ -45,6 +46,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan) + if (ret) + return ret; -- nouveau_bios_run_display_table(dev, dcbent, 0, -1); -+ /* Determine which CRTC we're dealing with, only 1 ever will be -+ * signalled at the same time with the current nouveau code. -+ */ -+ crtc = ffs((unk30 & 0x00000060) >> 5) - 1; -+ if (crtc < 0) -+ goto ack; -+ -+ /* Nothing needs to be done for the encoder */ -+ crtc = ffs((unk30 & 0x00000180) >> 7) - 1; -+ if (crtc < 0) -+ goto ack; -+ -+ /* Find which encoder was connected to the CRTC */ -+ for (i = 0; type == OUTPUT_ANY && i < 3; i++) { -+ mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); -+ NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc); -+ if (!(mc & (1 << crtc))) -+ continue; ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + -+ switch ((mc & 0x00000f00) >> 8) { -+ case 0: type = OUTPUT_ANALOG; break; -+ case 1: type = OUTPUT_TV; break; -+ default: -+ NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc); -+ goto ack; -+ } + dev_priv->engine.instmem.prepare_access(dev, true); + nv_wi32(dev, fc + 0, chan->pushbuf_base); + nv_wi32(dev, fc + 4, chan->pushbuf_base); +@@ -63,6 +66,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan) + /* enable the fifo dma operation */ + nv_wr32(dev, NV04_PFIFO_MODE, + nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); + -+ or = i; ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + return 0; + } + +@@ -273,7 +278,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev) + default: + nv_wr32(dev, 0x2230, 0); + nv_wr32(dev, NV40_PFIFO_RAMFC, +- ((nouveau_mem_fb_amount(dev) - 512 * 1024 + ++ ((dev_priv->vram_size - 512 * 1024 + + dev_priv->ramfc_offset) >> 16) | (3 << 16)); + break; + } +diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c +index 53e8afe..0616c96 100644 +--- a/drivers/gpu/drm/nouveau/nv40_graph.c ++++ b/drivers/gpu/drm/nouveau/nv40_graph.c +@@ -335,6 +335,27 @@ nv40_graph_init(struct drm_device *dev) + nv_wr32(dev, 0x400b38, 0x2ffff800); + nv_wr32(dev, 0x400b3c, 0x00006000); + ++ /* Tiling related stuff. */ ++ switch (dev_priv->chipset) { ++ case 0x44: ++ case 0x4a: ++ nv_wr32(dev, 0x400bc4, 0x1003d888); ++ nv_wr32(dev, 0x400bbc, 0xb7a7b500); ++ break; ++ case 0x46: ++ nv_wr32(dev, 0x400bc4, 0x0000e024); ++ nv_wr32(dev, 0x400bbc, 0xb7a7b520); ++ break; ++ case 0x4c: ++ case 0x4e: ++ case 0x67: ++ nv_wr32(dev, 0x400bc4, 0x1003d888); ++ nv_wr32(dev, 0x400bbc, 0xb7a7b540); ++ break; ++ default: ++ break; + } + -+ for (i = 0; type == OUTPUT_ANY && i < 4; i++) { -+ if (dev_priv->chipset < 0x90 || -+ dev_priv->chipset == 0x92 || -+ dev_priv->chipset == 0xa0) -+ mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i)); -+ else -+ mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i)); + /* Turn all the tiling regions off. */ + for (i = 0; i < pfb->num_tiles; i++) + nv40_graph_set_region_tiling(dev, i, 0, 0, 0); +diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c +new file mode 100644 +index 0000000..2cdc2bf +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv50_calc.c +@@ -0,0 +1,87 @@ ++/* ++ * Copyright 2010 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Ben Skeggs ++ */ ++ ++#include "drmP.h" ++#include "drm_fixed.h" ++#include "nouveau_drv.h" ++#include "nouveau_hw.h" ++ ++int ++nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk, ++ int *N1, int *M1, int *N2, int *M2, int *P) ++{ ++ struct nouveau_pll_vals pll_vals; ++ int ret; ++ ++ ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals); ++ if (ret <= 0) ++ return ret; ++ ++ *N1 = pll_vals.N1; ++ *M1 = pll_vals.M1; ++ *N2 = pll_vals.N2; ++ *M2 = pll_vals.M2; ++ *P = pll_vals.log2P; ++ return ret; ++} ++ ++int ++nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk, ++ int *N, int *fN, int *M, int *P) ++{ ++ fixed20_12 fb_div, a, b; ++ ++ *P = pll->vco1.maxfreq / clk; ++ if (*P > pll->max_p) ++ *P = pll->max_p; ++ if (*P < pll->min_p) ++ *P = pll->min_p; ++ ++ /* *M = ceil(refclk / pll->vco.max_inputfreq); */ ++ a.full = dfixed_const(pll->refclk); ++ b.full = dfixed_const(pll->vco1.max_inputfreq); ++ a.full = dfixed_div(a, b); ++ a.full = dfixed_ceil(a); ++ *M = dfixed_trunc(a); ++ ++ /* fb_div = (vco * *M) / refclk; */ ++ fb_div.full = dfixed_const(clk * *P); ++ fb_div.full = dfixed_mul(fb_div, a); ++ a.full = dfixed_const(pll->refclk); ++ fb_div.full = dfixed_div(fb_div, a); ++ ++ /* *N = floor(fb_div); */ ++ a.full = dfixed_floor(fb_div); ++ *N = dfixed_trunc(fb_div); ++ ++ /* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */ ++ b.full = dfixed_const(8192); ++ a.full = dfixed_mul(a, b); ++ fb_div.full = dfixed_mul(fb_div, b); ++ fb_div.full = fb_div.full - a.full; ++ *fN = dfixed_trunc(fb_div) - 4096; ++ *fN &= 0xffff; ++ ++ return clk; ++} +diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c +index d1a651e..03d0e41 100644 +--- a/drivers/gpu/drm/nouveau/nv50_crtc.c ++++ b/drivers/gpu/drm/nouveau/nv50_crtc.c +@@ -264,32 +264,40 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update) + int + nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) + { +- uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head); +- struct nouveau_pll_vals pll; +- struct pll_lims limits; ++ uint32_t reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head); ++ struct pll_lims pll; + uint32_t reg1, reg2; +- int ret; ++ int ret, N1, M1, N2, M2, P; + +- ret = get_pll_limits(dev, pll_reg, &limits); ++ ret = get_pll_limits(dev, reg, &pll); + if (ret) + return ret; -+ NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc); -+ if (!(mc & (1 << crtc))) -+ continue; +- ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll); +- if (ret <= 0) +- return ret; ++ if (pll.vco2.maxfreq) { ++ ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P); ++ if (ret <= 0) ++ return 0; + -+ switch ((mc & 0x00000f00) >> 8) { -+ case 0: type = OUTPUT_LVDS; break; -+ case 1: type = OUTPUT_TMDS; break; -+ case 2: type = OUTPUT_TMDS; break; -+ case 5: type = OUTPUT_TMDS; break; -+ case 8: type = OUTPUT_DP; break; -+ case 9: type = OUTPUT_DP; break; -+ default: -+ NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc); -+ goto ack; -+ } ++ NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n", ++ pclk, ret, N1, M1, N2, M2, P); + +- if (limits.vco2.maxfreq) { +- reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00; +- reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00; +- nv_wr32(dev, pll_reg, 0x10000611); +- nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1); +- nv_wr32(dev, pll_reg + 8, +- reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2); ++ reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00; ++ reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00; ++ nv_wr32(dev, reg, 0x10000611); ++ nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1); ++ nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2); + } else { +- reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000; +- nv_wr32(dev, pll_reg, 0x50000610); +- nv_wr32(dev, pll_reg + 4, reg1 | +- (pll.log2P << 16) | (pll.M1 << 8) | pll.N1); ++ ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); ++ if (ret <= 0) ++ return 0; + -+ or = i; -+ } ++ NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n", ++ pclk, ret, N1, N2, M1, P); + -+ /* There was no encoder to disable */ -+ if (type == OUTPUT_ANY) -+ goto ack; ++ reg1 = nv_rd32(dev, reg + 4) & 0xffc00000; ++ nv_wr32(dev, reg, 0x50000610); ++ nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1); ++ nv_wr32(dev, reg + 8, N2); + } + + return 0; +diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c +index f08f042..e114f81 100644 +--- a/drivers/gpu/drm/nouveau/nv50_dac.c ++++ b/drivers/gpu/drm/nouveau/nv50_dac.c +@@ -79,8 +79,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) + } + + /* Use bios provided value if possible. */ +- if (dev_priv->vbios->dactestval) { +- load_pattern = dev_priv->vbios->dactestval; ++ if (dev_priv->vbios.dactestval) { ++ load_pattern = dev_priv->vbios.dactestval; + NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n", + load_pattern); + } else { +@@ -275,14 +275,11 @@ static const struct drm_encoder_funcs nv50_dac_encoder_funcs = { + }; + + int +-nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry) ++nv50_dac_create(struct drm_connector *connector, struct dcb_entry *entry) + { + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; + +- NV_DEBUG_KMS(dev, "\n"); +- NV_INFO(dev, "Detected a DAC output\n"); +- + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; +@@ -293,12 +290,14 @@ nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry) + + nv_encoder->disconnect = nv50_dac_disconnect; + +- drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs, ++ drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs, + DRM_MODE_ENCODER_DAC); + drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; ++ ++ drm_mode_connector_attach_encoder(connector, encoder); + return 0; + } + +diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c +index 90f0bf5..a0d7467 100644 +--- a/drivers/gpu/drm/nouveau/nv50_display.c ++++ b/drivers/gpu/drm/nouveau/nv50_display.c +@@ -143,7 +143,7 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) + } + + ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, +- 0, nouveau_mem_fb_amount(dev)); ++ 0, dev_priv->vram_size); + if (ret) { + nv50_evo_channel_del(pchan); + return ret; +@@ -231,7 +231,7 @@ nv50_display_init(struct drm_device *dev) + /* This used to be in crtc unblank, but seems out of place there. */ + nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); + /* RAM is clamped to 256 MiB. */ +- ram_amount = nouveau_mem_fb_amount(dev); ++ ram_amount = dev_priv->vram_size; + NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount); + if (ram_amount > 256*1024*1024) + ram_amount = 256*1024*1024; +@@ -370,9 +370,7 @@ nv50_display_init(struct drm_device *dev) + struct nouveau_connector *conn = nouveau_connector(connector); + struct dcb_gpio_entry *gpio; + +- if (connector->connector_type != DRM_MODE_CONNECTOR_DVII && +- connector->connector_type != DRM_MODE_CONNECTOR_DVID && +- connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) ++ if (conn->dcb->gpio_tag == 0xff) + continue; + + gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag); +@@ -465,8 +463,8 @@ static int nv50_display_disable(struct drm_device *dev) + int nv50_display_create(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct parsed_dcb *dcb = dev_priv->vbios->dcb; +- uint32_t connector[16] = {}; ++ struct dcb_table *dcb = &dev_priv->vbios.dcb; ++ struct drm_connector *connector, *ct; + int ret, i; + + NV_DEBUG_KMS(dev, "\n"); +@@ -509,62 +507,39 @@ int nv50_display_create(struct drm_device *dev) + continue; + } + ++ connector = nouveau_connector_create(dev, entry->connector); ++ if (IS_ERR(connector)) ++ continue; ++ + switch (entry->type) { + case OUTPUT_TMDS: + case OUTPUT_LVDS: + case OUTPUT_DP: +- nv50_sor_create(dev, entry); ++ nv50_sor_create(connector, entry); + break; + case OUTPUT_ANALOG: +- nv50_dac_create(dev, entry); ++ nv50_dac_create(connector, entry); + break; + default: + NV_WARN(dev, "DCB encoder %d unknown\n", entry->type); + continue; + } +- +- connector[entry->connector] |= (1 << entry->type); + } + +- /* It appears that DCB 3.0+ VBIOS has a connector table, however, +- * I'm not 100% certain how to decode it correctly yet so just +- * look at what encoders are present on each connector index and +- * attempt to derive the connector type from that. +- */ +- for (i = 0 ; i < dcb->entries; i++) { +- struct dcb_entry *entry = &dcb->entry[i]; +- uint16_t encoders; +- int type; +- +- encoders = connector[entry->connector]; +- if (!(encoders & (1 << entry->type))) +- continue; +- connector[entry->connector] = 0; +- +- if (encoders & (1 << OUTPUT_DP)) { +- type = DRM_MODE_CONNECTOR_DisplayPort; +- } else if (encoders & (1 << OUTPUT_TMDS)) { +- if (encoders & (1 << OUTPUT_ANALOG)) +- type = DRM_MODE_CONNECTOR_DVII; +- else +- type = DRM_MODE_CONNECTOR_DVID; +- } else if (encoders & (1 << OUTPUT_ANALOG)) { +- type = DRM_MODE_CONNECTOR_VGA; +- } else if (encoders & (1 << OUTPUT_LVDS)) { +- type = DRM_MODE_CONNECTOR_LVDS; +- } else { +- type = DRM_MODE_CONNECTOR_Unknown; ++ list_for_each_entry_safe(connector, ct, ++ &dev->mode_config.connector_list, head) { ++ if (!connector->encoder_ids[0]) { ++ NV_WARN(dev, "%s has no encoders, removing\n", ++ drm_get_connector_name(connector)); ++ connector->funcs->destroy(connector); + } +- +- if (type == DRM_MODE_CONNECTOR_Unknown) +- continue; +- +- nouveau_connector_create(dev, entry->connector, type); + } + + ret = nv50_display_init(dev); +- if (ret) ++ if (ret) { ++ nv50_display_destroy(dev); + return ret; ++ } + + return 0; + } +@@ -667,8 +642,8 @@ nv50_display_irq_head(struct drm_device *dev, int *phead, + return -1; + } + +- for (i = 0; i < dev_priv->vbios->dcb->entries; i++) { +- struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i]; ++ for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { ++ struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i]; + + if (dcbent->type != type) + continue; +@@ -692,7 +667,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent, + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_connector *nv_connector = NULL; + struct drm_encoder *encoder; +- struct nvbios *bios = &dev_priv->VBIOS; ++ struct nvbios *bios = &dev_priv->vbios; + uint32_t mc, script = 0, or; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { +@@ -710,7 +685,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent, + switch (dcbent->type) { + case OUTPUT_LVDS: + script = (mc >> 8) & 0xf; +- if (bios->pub.fp_no_ddc) { ++ if (bios->fp_no_ddc) { + if (bios->fp.dual_link) + script |= 0x0100; + if (bios->fp.if_is_24bit) +@@ -815,6 +790,37 @@ ack: + } + + static void ++nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb) ++{ ++ int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1); ++ struct drm_encoder *encoder; ++ uint32_t tmp, unk0 = 0, unk1 = 0; ++ ++ if (dcb->type != OUTPUT_DP) ++ return; ++ ++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { ++ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); ++ ++ if (nv_encoder->dcb == dcb) { ++ unk0 = nv_encoder->dp.unk0; ++ unk1 = nv_encoder->dp.unk1; ++ break; ++ } ++ } ++ ++ if (unk0 || unk1) { ++ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); ++ tmp &= 0xfffffe03; ++ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0); ++ ++ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link)); ++ tmp &= 0xfef080c0; ++ nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1); ++ } ++} ++ ++static void + nv50_display_unk20_handler(struct drm_device *dev) + { + struct dcb_entry *dcbent; +@@ -837,6 +843,8 @@ nv50_display_unk20_handler(struct drm_device *dev) + + nouveau_bios_run_display_table(dev, dcbent, script, pclk); + ++ nv50_display_unk20_dp_hack(dev, dcbent); ++ + tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head)); + tmp &= ~0x000000f; + nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp); +@@ -919,10 +927,12 @@ nv50_display_error_handler(struct drm_device *dev) + nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000); + } + +-static void +-nv50_display_irq_hotplug(struct drm_device *dev) ++void ++nv50_display_irq_hotplug_bh(struct work_struct *work) + { +- struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_private *dev_priv = ++ container_of(work, struct drm_nouveau_private, hpd_work); ++ struct drm_device *dev = dev_priv->dev; + struct drm_connector *connector; + const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; + uint32_t unplug_mask, plug_mask, change_mask; +@@ -975,6 +985,8 @@ nv50_display_irq_hotplug(struct drm_device *dev) + nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054)); + if (dev_priv->chipset >= 0x90) + nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); ++ ++ drm_sysfs_hotplug_event(dev); + } + + void +@@ -983,8 +995,10 @@ nv50_display_irq_handler(struct drm_device *dev) + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t delayed = 0; + +- while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) +- nv50_display_irq_hotplug(dev); ++ if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { ++ if (!work_pending(&dev_priv->hpd_work)) ++ queue_work(dev_priv->wq, &dev_priv->hpd_work); ++ } + + while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { + uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); +diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h +index 3ae8d07..581d405 100644 +--- a/drivers/gpu/drm/nouveau/nv50_display.h ++++ b/drivers/gpu/drm/nouveau/nv50_display.h +@@ -37,6 +37,7 @@ + + void nv50_display_irq_handler(struct drm_device *dev); + void nv50_display_irq_handler_bh(struct work_struct *work); ++void nv50_display_irq_hotplug_bh(struct work_struct *work); + int nv50_display_init(struct drm_device *dev); + int nv50_display_create(struct drm_device *dev); + int nv50_display_destroy(struct drm_device *dev); +diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c +new file mode 100644 +index 0000000..32611bd +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv50_fb.c +@@ -0,0 +1,38 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv50_fb_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* Not a clue what this is exactly. Without pointing it at a ++ * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) ++ * cause IOMMU "read from address 0" errors (rh#561267) ++ */ ++ nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8); ++ ++ /* This is needed to get meaningful information from 100c90 ++ * on traps. No idea what these values mean exactly. */ ++ switch (dev_priv->chipset) { ++ case 0x50: ++ nv_wr32(dev, 0x100c90, 0x0707ff); ++ break; ++ case 0xa5: ++ case 0xa8: ++ nv_wr32(dev, 0x100c90, 0x0d0fff); ++ break; ++ default: ++ nv_wr32(dev, 0x100c90, 0x1d07ff); ++ break; ++ } ++ ++ return 0; ++} ++ ++void ++nv50_fb_takedown(struct drm_device *dev) ++{ ++} +diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c +index 0f57cdf..a8c70e7 100644 +--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c ++++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c +@@ -109,7 +109,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) + return; + } + +- width = (image->width + 31) & ~31; ++ width = ALIGN(image->width, 32); + dwords = (width * image->height) >> 5; + + BEGIN_RING(chan, NvSub2D, 0x0814, 2); +@@ -157,8 +157,11 @@ nv50_fbcon_accel_init(struct fb_info *info) + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + struct nouveau_gpuobj *eng2d = NULL; ++ uint64_t fb; + int ret, format; + ++ fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base; ++ + switch (info->var.bits_per_pixel) { + case 8: + format = 0xf3; +@@ -233,7 +236,7 @@ nv50_fbcon_accel_init(struct fb_info *info) + BEGIN_RING(chan, NvSub2D, 0x0808, 3); + OUT_RING(chan, 0); + OUT_RING(chan, 0); +- OUT_RING(chan, 0); ++ OUT_RING(chan, 1); + BEGIN_RING(chan, NvSub2D, 0x081c, 1); + OUT_RING(chan, 1); + BEGIN_RING(chan, NvSub2D, 0x0840, 4); +@@ -248,9 +251,8 @@ nv50_fbcon_accel_init(struct fb_info *info) + OUT_RING(chan, info->fix.line_length); + OUT_RING(chan, info->var.xres_virtual); + OUT_RING(chan, info->var.yres_virtual); +- OUT_RING(chan, 0); +- OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + +- dev_priv->vm_vram_base); ++ OUT_RING(chan, upper_32_bits(fb)); ++ OUT_RING(chan, lower_32_bits(fb)); + BEGIN_RING(chan, NvSub2D, 0x0230, 2); + OUT_RING(chan, format); + OUT_RING(chan, 1); +@@ -258,9 +260,8 @@ nv50_fbcon_accel_init(struct fb_info *info) + OUT_RING(chan, info->fix.line_length); + OUT_RING(chan, info->var.xres_virtual); + OUT_RING(chan, info->var.yres_virtual); +- OUT_RING(chan, 0); +- OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + +- dev_priv->vm_vram_base); ++ OUT_RING(chan, upper_32_bits(fb)); ++ OUT_RING(chan, lower_32_bits(fb)); + + return 0; + } +diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c +index df5335a..e20c0e2 100644 +--- a/drivers/gpu/drm/nouveau/nv50_fifo.c ++++ b/drivers/gpu/drm/nouveau/nv50_fifo.c +@@ -243,6 +243,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan) + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramfc = NULL; ++ unsigned long flags; + int ret; + + NV_DEBUG(dev, "ch%d\n", chan->id); +@@ -278,6 +279,8 @@ nv50_fifo_create_context(struct nouveau_channel *chan) + return ret; + } + ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ + dev_priv->engine.instmem.prepare_access(dev, true); + + nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); +@@ -306,10 +309,12 @@ nv50_fifo_create_context(struct nouveau_channel *chan) + ret = nv50_fifo_channel_enable(dev, chan->id, false); + if (ret) { + NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret); ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + nouveau_gpuobj_ref_del(dev, &chan->ramfc); + return ret; + } + ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + return 0; + } + +diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c +new file mode 100644 +index 0000000..c61782b +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv50_gpio.c +@@ -0,0 +1,76 @@ ++/* ++ * Copyright 2010 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Ben Skeggs ++ */ ++ ++#include "drmP.h" ++#include "nouveau_drv.h" ++#include "nouveau_hw.h" ++ ++static int ++nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) ++{ ++ const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; ++ ++ if (gpio->line > 32) ++ return -EINVAL; ++ ++ *reg = nv50_gpio_reg[gpio->line >> 3]; ++ *shift = (gpio->line & 7) << 2; ++ return 0; ++} ++ ++int ++nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) ++{ ++ struct dcb_gpio_entry *gpio; ++ uint32_t r, s, v; ++ ++ gpio = nouveau_bios_gpio_entry(dev, tag); ++ if (!gpio) ++ return -ENOENT; ++ ++ if (nv50_gpio_location(gpio, &r, &s)) ++ return -EINVAL; ++ ++ v = nv_rd32(dev, r) >> (s + 2); ++ return ((v & 1) == (gpio->state[1] & 1)); ++} ++ ++int ++nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) ++{ ++ struct dcb_gpio_entry *gpio; ++ uint32_t r, s, v; ++ ++ gpio = nouveau_bios_gpio_entry(dev, tag); ++ if (!gpio) ++ return -ENOENT; ++ ++ if (nv50_gpio_location(gpio, &r, &s)) ++ return -EINVAL; ++ ++ v = nv_rd32(dev, r) & ~(0x3 << s); ++ v |= (gpio->state[state] ^ 2) << s; ++ nv_wr32(dev, r, v); ++ return 0; ++} +diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c +index 6d50480..b203d06 100644 +--- a/drivers/gpu/drm/nouveau/nv50_graph.c ++++ b/drivers/gpu/drm/nouveau/nv50_graph.c +@@ -28,30 +28,7 @@ + #include "drm.h" + #include "nouveau_drv.h" + +-MODULE_FIRMWARE("nouveau/nv50.ctxprog"); +-MODULE_FIRMWARE("nouveau/nv50.ctxvals"); +-MODULE_FIRMWARE("nouveau/nv84.ctxprog"); +-MODULE_FIRMWARE("nouveau/nv84.ctxvals"); +-MODULE_FIRMWARE("nouveau/nv86.ctxprog"); +-MODULE_FIRMWARE("nouveau/nv86.ctxvals"); +-MODULE_FIRMWARE("nouveau/nv92.ctxprog"); +-MODULE_FIRMWARE("nouveau/nv92.ctxvals"); +-MODULE_FIRMWARE("nouveau/nv94.ctxprog"); +-MODULE_FIRMWARE("nouveau/nv94.ctxvals"); +-MODULE_FIRMWARE("nouveau/nv96.ctxprog"); +-MODULE_FIRMWARE("nouveau/nv96.ctxvals"); +-MODULE_FIRMWARE("nouveau/nv98.ctxprog"); +-MODULE_FIRMWARE("nouveau/nv98.ctxvals"); +-MODULE_FIRMWARE("nouveau/nva0.ctxprog"); +-MODULE_FIRMWARE("nouveau/nva0.ctxvals"); +-MODULE_FIRMWARE("nouveau/nva5.ctxprog"); +-MODULE_FIRMWARE("nouveau/nva5.ctxvals"); +-MODULE_FIRMWARE("nouveau/nva8.ctxprog"); +-MODULE_FIRMWARE("nouveau/nva8.ctxvals"); +-MODULE_FIRMWARE("nouveau/nvaa.ctxprog"); +-MODULE_FIRMWARE("nouveau/nvaa.ctxvals"); +-MODULE_FIRMWARE("nouveau/nvac.ctxprog"); +-MODULE_FIRMWARE("nouveau/nvac.ctxvals"); ++#include "nouveau_grctx.h" + + #define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) + +@@ -79,6 +56,10 @@ nv50_graph_init_intr(struct drm_device *dev) + static void + nv50_graph_init_regs__nv(struct drm_device *dev) + { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t units = nv_rd32(dev, 0x1540); ++ int i; ++ + NV_DEBUG(dev, "\n"); + + nv_wr32(dev, 0x400804, 0xc0000000); +@@ -88,6 +69,20 @@ nv50_graph_init_regs__nv(struct drm_device *dev) + nv_wr32(dev, 0x405018, 0xc0000000); + nv_wr32(dev, 0x402000, 0xc0000000); + ++ for (i = 0; i < 16; i++) { ++ if (units & 1 << i) { ++ if (dev_priv->chipset < 0xa0) { ++ nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000); ++ nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000); ++ nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000); ++ } else { ++ nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000); ++ nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000); ++ nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000); ++ } ++ } ++ } ++ + nv_wr32(dev, 0x400108, 0xffffffff); + + nv_wr32(dev, 0x400824, 0x00004000); +@@ -111,9 +106,34 @@ nv50_graph_init_ctxctl(struct drm_device *dev) + + NV_DEBUG(dev, "\n"); + +- nouveau_grctx_prog_load(dev); +- if (!dev_priv->engine.graph.ctxprog) +- dev_priv->engine.graph.accel_blocked = true; ++ if (nouveau_ctxfw) { ++ nouveau_grctx_prog_load(dev); ++ dev_priv->engine.graph.grctx_size = 0x70000; ++ } ++ if (!dev_priv->engine.graph.ctxprog) { ++ struct nouveau_grctx ctx = {}; ++ uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL); ++ int i; ++ if (!cp) { ++ NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n"); ++ dev_priv->engine.graph.accel_blocked = true; ++ return 0; ++ } ++ ctx.dev = dev; ++ ctx.mode = NOUVEAU_GRCTX_PROG; ++ ctx.data = cp; ++ ctx.ctxprog_max = 512; ++ if (!nv50_grctx_init(&ctx)) { ++ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; ++ ++ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); ++ for (i = 0; i < ctx.ctxprog_len; i++) ++ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); ++ } else { ++ dev_priv->engine.graph.accel_blocked = true; ++ } ++ kfree(cp); ++ } + + nv_wr32(dev, 0x400320, 4); + nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); +@@ -193,13 +213,13 @@ nv50_graph_create_context(struct nouveau_channel *chan) + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; + struct nouveau_gpuobj *ctx; +- uint32_t grctx_size = 0x70000; ++ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + int hdr, ret; + + NV_DEBUG(dev, "ch%d\n", chan->id); + +- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000, +- NVOBJ_FLAG_ZERO_ALLOC | ++ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, ++ 0x1000, NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); + if (ret) + return ret; +@@ -209,7 +229,7 @@ nv50_graph_create_context(struct nouveau_channel *chan) + dev_priv->engine.instmem.prepare_access(dev, true); + nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); + nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + +- grctx_size - 1); ++ pgraph->grctx_size - 1); + nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); + nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); + nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); +@@ -217,12 +237,16 @@ nv50_graph_create_context(struct nouveau_channel *chan) + dev_priv->engine.instmem.finish_access(dev); + + dev_priv->engine.instmem.prepare_access(dev, true); +- nouveau_grctx_vals_load(dev, ctx); ++ if (!pgraph->ctxprog) { ++ struct nouveau_grctx ctx = {}; ++ ctx.dev = chan->dev; ++ ctx.mode = NOUVEAU_GRCTX_VALS; ++ ctx.data = chan->ramin_grctx->gpuobj; ++ nv50_grctx_init(&ctx); ++ } else { ++ nouveau_grctx_vals_load(dev, ctx); ++ } + nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); +- if ((dev_priv->chipset & 0xf0) == 0xa0) +- nv_wo32(dev, ctx, 0x00004/4, 0x00000000); +- else +- nv_wo32(dev, ctx, 0x0011c/4, 0x00000000); + dev_priv->engine.instmem.finish_access(dev); + + return 0; +@@ -386,9 +410,10 @@ struct nouveau_pgraph_object_class nv50_graph_grclass[] = { + { 0x5039, false, NULL }, /* m2mf */ + { 0x502d, false, NULL }, /* 2d */ + { 0x50c0, false, NULL }, /* compute */ ++ { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */ + { 0x5097, false, NULL }, /* tesla (nv50) */ +- { 0x8297, false, NULL }, /* tesla (nv80/nv90) */ +- { 0x8397, false, NULL }, /* tesla (nva0) */ +- { 0x8597, false, NULL }, /* tesla (nva8) */ ++ { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */ ++ { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */ ++ { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ + {} + }; +diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c +new file mode 100644 +index 0000000..42a8fb2 +--- /dev/null ++++ b/drivers/gpu/drm/nouveau/nv50_grctx.c +@@ -0,0 +1,2383 @@ ++/* ++ * Copyright 2009 Marcin Kościelnicki ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#define CP_FLAG_CLEAR 0 ++#define CP_FLAG_SET 1 ++#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0) ++#define CP_FLAG_SWAP_DIRECTION_LOAD 0 ++#define CP_FLAG_SWAP_DIRECTION_SAVE 1 ++#define CP_FLAG_UNK01 ((0 * 32) + 1) ++#define CP_FLAG_UNK01_CLEAR 0 ++#define CP_FLAG_UNK01_SET 1 ++#define CP_FLAG_UNK03 ((0 * 32) + 3) ++#define CP_FLAG_UNK03_CLEAR 0 ++#define CP_FLAG_UNK03_SET 1 ++#define CP_FLAG_USER_SAVE ((0 * 32) + 5) ++#define CP_FLAG_USER_SAVE_NOT_PENDING 0 ++#define CP_FLAG_USER_SAVE_PENDING 1 ++#define CP_FLAG_USER_LOAD ((0 * 32) + 6) ++#define CP_FLAG_USER_LOAD_NOT_PENDING 0 ++#define CP_FLAG_USER_LOAD_PENDING 1 ++#define CP_FLAG_UNK0B ((0 * 32) + 0xb) ++#define CP_FLAG_UNK0B_CLEAR 0 ++#define CP_FLAG_UNK0B_SET 1 ++#define CP_FLAG_UNK1D ((0 * 32) + 0x1d) ++#define CP_FLAG_UNK1D_CLEAR 0 ++#define CP_FLAG_UNK1D_SET 1 ++#define CP_FLAG_UNK20 ((1 * 32) + 0) ++#define CP_FLAG_UNK20_CLEAR 0 ++#define CP_FLAG_UNK20_SET 1 ++#define CP_FLAG_STATUS ((2 * 32) + 0) ++#define CP_FLAG_STATUS_BUSY 0 ++#define CP_FLAG_STATUS_IDLE 1 ++#define CP_FLAG_AUTO_SAVE ((2 * 32) + 4) ++#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0 ++#define CP_FLAG_AUTO_SAVE_PENDING 1 ++#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5) ++#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 ++#define CP_FLAG_AUTO_LOAD_PENDING 1 ++#define CP_FLAG_NEWCTX ((2 * 32) + 10) ++#define CP_FLAG_NEWCTX_BUSY 0 ++#define CP_FLAG_NEWCTX_DONE 1 ++#define CP_FLAG_XFER ((2 * 32) + 11) ++#define CP_FLAG_XFER_IDLE 0 ++#define CP_FLAG_XFER_BUSY 1 ++#define CP_FLAG_ALWAYS ((2 * 32) + 13) ++#define CP_FLAG_ALWAYS_FALSE 0 ++#define CP_FLAG_ALWAYS_TRUE 1 ++#define CP_FLAG_INTR ((2 * 32) + 15) ++#define CP_FLAG_INTR_NOT_PENDING 0 ++#define CP_FLAG_INTR_PENDING 1 ++ ++#define CP_CTX 0x00100000 ++#define CP_CTX_COUNT 0x000f0000 ++#define CP_CTX_COUNT_SHIFT 16 ++#define CP_CTX_REG 0x00003fff ++#define CP_LOAD_SR 0x00200000 ++#define CP_LOAD_SR_VALUE 0x000fffff ++#define CP_BRA 0x00400000 ++#define CP_BRA_IP 0x0001ff00 ++#define CP_BRA_IP_SHIFT 8 ++#define CP_BRA_IF_CLEAR 0x00000080 ++#define CP_BRA_FLAG 0x0000007f ++#define CP_WAIT 0x00500000 ++#define CP_WAIT_SET 0x00000080 ++#define CP_WAIT_FLAG 0x0000007f ++#define CP_SET 0x00700000 ++#define CP_SET_1 0x00000080 ++#define CP_SET_FLAG 0x0000007f ++#define CP_NEWCTX 0x00600004 ++#define CP_NEXT_TO_SWAP 0x00600005 ++#define CP_SET_CONTEXT_POINTER 0x00600006 ++#define CP_SET_XFER_POINTER 0x00600007 ++#define CP_ENABLE 0x00600009 ++#define CP_END 0x0060000c ++#define CP_NEXT_TO_CURRENT 0x0060000d ++#define CP_DISABLE1 0x0090ffff ++#define CP_DISABLE2 0x0091ffff ++#define CP_XFER_1 0x008000ff ++#define CP_XFER_2 0x008800ff ++#define CP_SEEK_1 0x00c000ff ++#define CP_SEEK_2 0x00c800ff ++ ++#include "drmP.h" ++#include "nouveau_drv.h" ++#include "nouveau_grctx.h" ++ ++/* ++ * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's ++ * the GPU itself that does context-switching, but it needs a special ++ * microcode to do it. And it's the driver's task to supply this microcode, ++ * further known as ctxprog, as well as the initial context values, known ++ * as ctxvals. ++ * ++ * Without ctxprog, you cannot switch contexts. Not even in software, since ++ * the majority of context [xfer strands] isn't accessible directly. You're ++ * stuck with a single channel, and you also suffer all the problems resulting ++ * from missing ctxvals, since you cannot load them. ++ * ++ * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to ++ * run 2d operations, but trying to utilise 3d or CUDA will just lock you up, ++ * since you don't have... some sort of needed setup. ++ * ++ * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since ++ * it's too much hassle to handle no-ctxprog as a special case. ++ */ ++ ++/* ++ * How ctxprogs work. ++ * ++ * The ctxprog is written in its own kind of microcode, with very small and ++ * crappy set of available commands. You upload it to a small [512 insns] ++ * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to ++ * switch channel. or when the driver explicitely requests it. Stuff visible ++ * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands, ++ * the per-channel context save area in VRAM [known as ctxvals or grctx], ++ * 4 flags registers, a scratch register, two grctx pointers, plus many ++ * random poorly-understood details. ++ * ++ * When ctxprog runs, it's supposed to check what operations are asked of it, ++ * save old context if requested, optionally reset PGRAPH and switch to the ++ * new channel, and load the new context. Context consists of three major ++ * parts: subset of MMIO registers and two "xfer areas". ++ */ ++ ++/* TODO: ++ * - document unimplemented bits compared to nvidia ++ * - NVAx: make a TP subroutine, use it. ++ * - use 0x4008fc instead of 0x1540? ++ */ ++ ++enum cp_label { ++ cp_check_load = 1, ++ cp_setup_auto_load, ++ cp_setup_load, ++ cp_setup_save, ++ cp_swap_state, ++ cp_prepare_exit, ++ cp_exit, ++}; ++ ++static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx); ++static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx); ++static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx); ++ ++/* Main function: construct the ctxprog skeleton, call the other functions. */ ++ ++int ++nv50_grctx_init(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ ++ switch (dev_priv->chipset) { ++ case 0x50: ++ case 0x84: ++ case 0x86: ++ case 0x92: ++ case 0x94: ++ case 0x96: ++ case 0x98: ++ case 0xa0: ++ case 0xa3: ++ case 0xa5: ++ case 0xa8: ++ case 0xaa: ++ case 0xac: ++ break; ++ default: ++ NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for " ++ "your NV%x card.\n", dev_priv->chipset); ++ NV_ERROR(ctx->dev, "Disabling acceleration. Please contact " ++ "the devs.\n"); ++ return -ENOSYS; ++ } ++ /* decide whether we're loading/unloading the context */ ++ cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); ++ cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save); ++ ++ cp_name(ctx, cp_check_load); ++ cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load); ++ cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load); ++ cp_bra (ctx, ALWAYS, TRUE, cp_exit); ++ ++ /* setup for context load */ ++ cp_name(ctx, cp_setup_auto_load); ++ cp_out (ctx, CP_DISABLE1); ++ cp_out (ctx, CP_DISABLE2); ++ cp_out (ctx, CP_ENABLE); ++ cp_out (ctx, CP_NEXT_TO_SWAP); ++ cp_set (ctx, UNK01, SET); ++ cp_name(ctx, cp_setup_load); ++ cp_out (ctx, CP_NEWCTX); ++ cp_wait(ctx, NEWCTX, BUSY); ++ cp_set (ctx, UNK1D, CLEAR); ++ cp_set (ctx, SWAP_DIRECTION, LOAD); ++ cp_bra (ctx, UNK0B, SET, cp_prepare_exit); ++ cp_bra (ctx, ALWAYS, TRUE, cp_swap_state); ++ ++ /* setup for context save */ ++ cp_name(ctx, cp_setup_save); ++ cp_set (ctx, UNK1D, SET); ++ cp_wait(ctx, STATUS, BUSY); ++ cp_wait(ctx, INTR, PENDING); ++ cp_bra (ctx, STATUS, BUSY, cp_setup_save); ++ cp_set (ctx, UNK01, SET); ++ cp_set (ctx, SWAP_DIRECTION, SAVE); ++ ++ /* general PGRAPH state */ ++ cp_name(ctx, cp_swap_state); ++ cp_set (ctx, UNK03, SET); ++ cp_pos (ctx, 0x00004/4); ++ cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */ ++ cp_pos (ctx, 0x00100/4); ++ nv50_graph_construct_mmio(ctx); ++ nv50_graph_construct_xfer1(ctx); ++ nv50_graph_construct_xfer2(ctx); ++ ++ cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load); ++ ++ cp_set (ctx, UNK20, SET); ++ cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */ ++ cp_lsr (ctx, ctx->ctxvals_base); ++ cp_out (ctx, CP_SET_XFER_POINTER); ++ cp_lsr (ctx, 4); ++ cp_out (ctx, CP_SEEK_1); ++ cp_out (ctx, CP_XFER_1); ++ cp_wait(ctx, XFER, BUSY); ++ ++ /* pre-exit state updates */ ++ cp_name(ctx, cp_prepare_exit); ++ cp_set (ctx, UNK01, CLEAR); ++ cp_set (ctx, UNK03, CLEAR); ++ cp_set (ctx, UNK1D, CLEAR); ++ ++ cp_bra (ctx, USER_SAVE, PENDING, cp_exit); ++ cp_out (ctx, CP_NEXT_TO_CURRENT); ++ ++ cp_name(ctx, cp_exit); ++ cp_set (ctx, USER_SAVE, NOT_PENDING); ++ cp_set (ctx, USER_LOAD, NOT_PENDING); ++ cp_out (ctx, CP_END); ++ ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */ ++ ++ return 0; ++} ++ ++/* ++ * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which ++ * registers to save/restore and the default values for them. ++ */ ++ ++static void ++nv50_graph_construct_mmio(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ int i, j; ++ int offset, base; ++ uint32_t units = nv_rd32 (ctx->dev, 0x1540); ++ ++ /* 0800: DISPATCH */ ++ cp_ctx(ctx, 0x400808, 7); ++ gr_def(ctx, 0x400814, 0x00000030); ++ cp_ctx(ctx, 0x400834, 0x32); ++ if (dev_priv->chipset == 0x50) { ++ gr_def(ctx, 0x400834, 0xff400040); ++ gr_def(ctx, 0x400838, 0xfff00080); ++ gr_def(ctx, 0x40083c, 0xfff70090); ++ gr_def(ctx, 0x400840, 0xffe806a8); ++ } ++ gr_def(ctx, 0x400844, 0x00000002); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ gr_def(ctx, 0x400894, 0x00001000); ++ gr_def(ctx, 0x4008e8, 0x00000003); ++ gr_def(ctx, 0x4008ec, 0x00001000); ++ if (dev_priv->chipset == 0x50) ++ cp_ctx(ctx, 0x400908, 0xb); ++ else if (dev_priv->chipset < 0xa0) ++ cp_ctx(ctx, 0x400908, 0xc); ++ else ++ cp_ctx(ctx, 0x400908, 0xe); ++ ++ if (dev_priv->chipset >= 0xa0) ++ cp_ctx(ctx, 0x400b00, 0x1); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { ++ cp_ctx(ctx, 0x400b10, 0x1); ++ gr_def(ctx, 0x400b10, 0x0001629d); ++ cp_ctx(ctx, 0x400b20, 0x1); ++ gr_def(ctx, 0x400b20, 0x0001629d); ++ } ++ ++ /* 0C00: VFETCH */ ++ cp_ctx(ctx, 0x400c08, 0x2); ++ gr_def(ctx, 0x400c08, 0x0000fe0c); ++ ++ /* 1000 */ ++ if (dev_priv->chipset < 0xa0) { ++ cp_ctx(ctx, 0x401008, 0x4); ++ gr_def(ctx, 0x401014, 0x00001000); ++ } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) { ++ cp_ctx(ctx, 0x401008, 0x5); ++ gr_def(ctx, 0x401018, 0x00001000); ++ } else { ++ cp_ctx(ctx, 0x401008, 0x5); ++ gr_def(ctx, 0x401018, 0x00004000); ++ } ++ ++ /* 1400 */ ++ cp_ctx(ctx, 0x401400, 0x8); ++ cp_ctx(ctx, 0x401424, 0x3); ++ if (dev_priv->chipset == 0x50) ++ gr_def(ctx, 0x40142c, 0x0001fd87); ++ else ++ gr_def(ctx, 0x40142c, 0x00000187); ++ cp_ctx(ctx, 0x401540, 0x5); ++ gr_def(ctx, 0x401550, 0x00001018); ++ ++ /* 1800: STREAMOUT */ ++ cp_ctx(ctx, 0x401814, 0x1); ++ gr_def(ctx, 0x401814, 0x000000ff); ++ if (dev_priv->chipset == 0x50) { ++ cp_ctx(ctx, 0x40181c, 0xe); ++ gr_def(ctx, 0x401850, 0x00000004); ++ } else if (dev_priv->chipset < 0xa0) { ++ cp_ctx(ctx, 0x40181c, 0xf); ++ gr_def(ctx, 0x401854, 0x00000004); ++ } else { ++ cp_ctx(ctx, 0x40181c, 0x13); ++ gr_def(ctx, 0x401864, 0x00000004); ++ } ++ ++ /* 1C00 */ ++ cp_ctx(ctx, 0x401c00, 0x1); ++ switch (dev_priv->chipset) { ++ case 0x50: ++ gr_def(ctx, 0x401c00, 0x0001005f); ++ break; ++ case 0x84: ++ case 0x86: ++ case 0x94: ++ gr_def(ctx, 0x401c00, 0x044d00df); ++ break; ++ case 0x92: ++ case 0x96: ++ case 0x98: ++ case 0xa0: ++ case 0xaa: ++ case 0xac: ++ gr_def(ctx, 0x401c00, 0x042500df); ++ break; ++ case 0xa3: ++ case 0xa5: ++ case 0xa8: ++ gr_def(ctx, 0x401c00, 0x142500df); ++ break; ++ } ++ ++ /* 2400 */ ++ cp_ctx(ctx, 0x402400, 0x1); ++ if (dev_priv->chipset == 0x50) ++ cp_ctx(ctx, 0x402408, 0x1); ++ else ++ cp_ctx(ctx, 0x402408, 0x2); ++ gr_def(ctx, 0x402408, 0x00000600); ++ ++ /* 2800 */ ++ cp_ctx(ctx, 0x402800, 0x1); ++ if (dev_priv->chipset == 0x50) ++ gr_def(ctx, 0x402800, 0x00000006); ++ ++ /* 2C00 */ ++ cp_ctx(ctx, 0x402c08, 0x6); ++ if (dev_priv->chipset != 0x50) ++ gr_def(ctx, 0x402c14, 0x01000000); ++ gr_def(ctx, 0x402c18, 0x000000ff); ++ if (dev_priv->chipset == 0x50) ++ cp_ctx(ctx, 0x402ca0, 0x1); ++ else ++ cp_ctx(ctx, 0x402ca0, 0x2); ++ if (dev_priv->chipset < 0xa0) ++ gr_def(ctx, 0x402ca0, 0x00000400); ++ else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) ++ gr_def(ctx, 0x402ca0, 0x00000800); ++ else ++ gr_def(ctx, 0x402ca0, 0x00000400); ++ cp_ctx(ctx, 0x402cac, 0x4); ++ ++ /* 3000 */ ++ cp_ctx(ctx, 0x403004, 0x1); ++ gr_def(ctx, 0x403004, 0x00000001); ++ ++ /* 3404 */ ++ if (dev_priv->chipset >= 0xa0) { ++ cp_ctx(ctx, 0x403404, 0x1); ++ gr_def(ctx, 0x403404, 0x00000001); ++ } ++ ++ /* 5000 */ ++ cp_ctx(ctx, 0x405000, 0x1); ++ switch (dev_priv->chipset) { ++ case 0x50: ++ gr_def(ctx, 0x405000, 0x00300080); ++ break; ++ case 0x84: ++ case 0xa0: ++ case 0xa3: ++ case 0xa5: ++ case 0xa8: ++ case 0xaa: ++ case 0xac: ++ gr_def(ctx, 0x405000, 0x000e0080); ++ break; ++ case 0x86: ++ case 0x92: ++ case 0x94: ++ case 0x96: ++ case 0x98: ++ gr_def(ctx, 0x405000, 0x00000080); ++ break; ++ } ++ cp_ctx(ctx, 0x405014, 0x1); ++ gr_def(ctx, 0x405014, 0x00000004); ++ cp_ctx(ctx, 0x40501c, 0x1); ++ cp_ctx(ctx, 0x405024, 0x1); ++ cp_ctx(ctx, 0x40502c, 0x1); ++ ++ /* 5400 or maybe 4800 */ ++ if (dev_priv->chipset == 0x50) { ++ offset = 0x405400; ++ cp_ctx(ctx, 0x405400, 0xea); ++ } else if (dev_priv->chipset < 0x94) { ++ offset = 0x405400; ++ cp_ctx(ctx, 0x405400, 0xcb); ++ } else if (dev_priv->chipset < 0xa0) { ++ offset = 0x405400; ++ cp_ctx(ctx, 0x405400, 0xcc); ++ } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { ++ offset = 0x404800; ++ cp_ctx(ctx, 0x404800, 0xda); ++ } else { ++ offset = 0x405400; ++ cp_ctx(ctx, 0x405400, 0xd4); ++ } ++ gr_def(ctx, offset + 0x0c, 0x00000002); ++ gr_def(ctx, offset + 0x10, 0x00000001); ++ if (dev_priv->chipset >= 0x94) ++ offset += 4; ++ gr_def(ctx, offset + 0x1c, 0x00000001); ++ gr_def(ctx, offset + 0x20, 0x00000100); ++ gr_def(ctx, offset + 0x38, 0x00000002); ++ gr_def(ctx, offset + 0x3c, 0x00000001); ++ gr_def(ctx, offset + 0x40, 0x00000001); ++ gr_def(ctx, offset + 0x50, 0x00000001); ++ gr_def(ctx, offset + 0x54, 0x003fffff); ++ gr_def(ctx, offset + 0x58, 0x00001fff); ++ gr_def(ctx, offset + 0x60, 0x00000001); ++ gr_def(ctx, offset + 0x64, 0x00000001); ++ gr_def(ctx, offset + 0x6c, 0x00000001); ++ gr_def(ctx, offset + 0x70, 0x00000001); ++ gr_def(ctx, offset + 0x74, 0x00000001); ++ gr_def(ctx, offset + 0x78, 0x00000004); ++ gr_def(ctx, offset + 0x7c, 0x00000001); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ offset += 4; ++ gr_def(ctx, offset + 0x80, 0x00000001); ++ gr_def(ctx, offset + 0x84, 0x00000001); ++ gr_def(ctx, offset + 0x88, 0x00000007); ++ gr_def(ctx, offset + 0x8c, 0x00000001); ++ gr_def(ctx, offset + 0x90, 0x00000007); ++ gr_def(ctx, offset + 0x94, 0x00000001); ++ gr_def(ctx, offset + 0x98, 0x00000001); ++ gr_def(ctx, offset + 0x9c, 0x00000001); ++ if (dev_priv->chipset == 0x50) { ++ gr_def(ctx, offset + 0xb0, 0x00000001); ++ gr_def(ctx, offset + 0xb4, 0x00000001); ++ gr_def(ctx, offset + 0xbc, 0x00000001); ++ gr_def(ctx, offset + 0xc0, 0x0000000a); ++ gr_def(ctx, offset + 0xd0, 0x00000040); ++ gr_def(ctx, offset + 0xd8, 0x00000002); ++ gr_def(ctx, offset + 0xdc, 0x00000100); ++ gr_def(ctx, offset + 0xe0, 0x00000001); ++ gr_def(ctx, offset + 0xe4, 0x00000100); ++ gr_def(ctx, offset + 0x100, 0x00000001); ++ gr_def(ctx, offset + 0x124, 0x00000004); ++ gr_def(ctx, offset + 0x13c, 0x00000001); ++ gr_def(ctx, offset + 0x140, 0x00000100); ++ gr_def(ctx, offset + 0x148, 0x00000001); ++ gr_def(ctx, offset + 0x154, 0x00000100); ++ gr_def(ctx, offset + 0x158, 0x00000001); ++ gr_def(ctx, offset + 0x15c, 0x00000100); ++ gr_def(ctx, offset + 0x164, 0x00000001); ++ gr_def(ctx, offset + 0x170, 0x00000100); ++ gr_def(ctx, offset + 0x174, 0x00000001); ++ gr_def(ctx, offset + 0x17c, 0x00000001); ++ gr_def(ctx, offset + 0x188, 0x00000002); ++ gr_def(ctx, offset + 0x190, 0x00000001); ++ gr_def(ctx, offset + 0x198, 0x00000001); ++ gr_def(ctx, offset + 0x1ac, 0x00000003); ++ offset += 0xd0; ++ } else { ++ gr_def(ctx, offset + 0xb0, 0x00000001); ++ gr_def(ctx, offset + 0xb4, 0x00000100); ++ gr_def(ctx, offset + 0xbc, 0x00000001); ++ gr_def(ctx, offset + 0xc8, 0x00000100); ++ gr_def(ctx, offset + 0xcc, 0x00000001); ++ gr_def(ctx, offset + 0xd0, 0x00000100); ++ gr_def(ctx, offset + 0xd8, 0x00000001); ++ gr_def(ctx, offset + 0xe4, 0x00000100); ++ } ++ gr_def(ctx, offset + 0xf8, 0x00000004); ++ gr_def(ctx, offset + 0xfc, 0x00000070); ++ gr_def(ctx, offset + 0x100, 0x00000080); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ offset += 4; ++ gr_def(ctx, offset + 0x114, 0x0000000c); ++ if (dev_priv->chipset == 0x50) ++ offset -= 4; ++ gr_def(ctx, offset + 0x11c, 0x00000008); ++ gr_def(ctx, offset + 0x120, 0x00000014); ++ if (dev_priv->chipset == 0x50) { ++ gr_def(ctx, offset + 0x124, 0x00000026); ++ offset -= 0x18; ++ } else { ++ gr_def(ctx, offset + 0x128, 0x00000029); ++ gr_def(ctx, offset + 0x12c, 0x00000027); ++ gr_def(ctx, offset + 0x130, 0x00000026); ++ gr_def(ctx, offset + 0x134, 0x00000008); ++ gr_def(ctx, offset + 0x138, 0x00000004); ++ gr_def(ctx, offset + 0x13c, 0x00000027); ++ } ++ gr_def(ctx, offset + 0x148, 0x00000001); ++ gr_def(ctx, offset + 0x14c, 0x00000002); ++ gr_def(ctx, offset + 0x150, 0x00000003); ++ gr_def(ctx, offset + 0x154, 0x00000004); ++ gr_def(ctx, offset + 0x158, 0x00000005); ++ gr_def(ctx, offset + 0x15c, 0x00000006); ++ gr_def(ctx, offset + 0x160, 0x00000007); ++ gr_def(ctx, offset + 0x164, 0x00000001); ++ gr_def(ctx, offset + 0x1a8, 0x000000cf); ++ if (dev_priv->chipset == 0x50) ++ offset -= 4; ++ gr_def(ctx, offset + 0x1d8, 0x00000080); ++ gr_def(ctx, offset + 0x1dc, 0x00000004); ++ gr_def(ctx, offset + 0x1e0, 0x00000004); ++ if (dev_priv->chipset == 0x50) ++ offset -= 4; ++ else ++ gr_def(ctx, offset + 0x1e4, 0x00000003); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { ++ gr_def(ctx, offset + 0x1ec, 0x00000003); ++ offset += 8; ++ } ++ gr_def(ctx, offset + 0x1e8, 0x00000001); ++ if (dev_priv->chipset == 0x50) ++ offset -= 4; ++ gr_def(ctx, offset + 0x1f4, 0x00000012); ++ gr_def(ctx, offset + 0x1f8, 0x00000010); ++ gr_def(ctx, offset + 0x1fc, 0x0000000c); ++ gr_def(ctx, offset + 0x200, 0x00000001); ++ gr_def(ctx, offset + 0x210, 0x00000004); ++ gr_def(ctx, offset + 0x214, 0x00000002); ++ gr_def(ctx, offset + 0x218, 0x00000004); ++ if (dev_priv->chipset >= 0xa0) ++ offset += 4; ++ gr_def(ctx, offset + 0x224, 0x003fffff); ++ gr_def(ctx, offset + 0x228, 0x00001fff); ++ if (dev_priv->chipset == 0x50) ++ offset -= 0x20; ++ else if (dev_priv->chipset >= 0xa0) { ++ gr_def(ctx, offset + 0x250, 0x00000001); ++ gr_def(ctx, offset + 0x254, 0x00000001); ++ gr_def(ctx, offset + 0x258, 0x00000002); ++ offset += 0x10; ++ } ++ gr_def(ctx, offset + 0x250, 0x00000004); ++ gr_def(ctx, offset + 0x254, 0x00000014); ++ gr_def(ctx, offset + 0x258, 0x00000001); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ offset += 4; ++ gr_def(ctx, offset + 0x264, 0x00000002); ++ if (dev_priv->chipset >= 0xa0) ++ offset += 8; ++ gr_def(ctx, offset + 0x270, 0x00000001); ++ gr_def(ctx, offset + 0x278, 0x00000002); ++ gr_def(ctx, offset + 0x27c, 0x00001000); ++ if (dev_priv->chipset == 0x50) ++ offset -= 0xc; ++ else { ++ gr_def(ctx, offset + 0x280, 0x00000e00); ++ gr_def(ctx, offset + 0x284, 0x00001000); ++ gr_def(ctx, offset + 0x288, 0x00001e00); ++ } ++ gr_def(ctx, offset + 0x290, 0x00000001); ++ gr_def(ctx, offset + 0x294, 0x00000001); ++ gr_def(ctx, offset + 0x298, 0x00000001); ++ gr_def(ctx, offset + 0x29c, 0x00000001); ++ gr_def(ctx, offset + 0x2a0, 0x00000001); ++ gr_def(ctx, offset + 0x2b0, 0x00000200); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { ++ gr_def(ctx, offset + 0x2b4, 0x00000200); ++ offset += 4; ++ } ++ if (dev_priv->chipset < 0xa0) { ++ gr_def(ctx, offset + 0x2b8, 0x00000001); ++ gr_def(ctx, offset + 0x2bc, 0x00000070); ++ gr_def(ctx, offset + 0x2c0, 0x00000080); ++ gr_def(ctx, offset + 0x2cc, 0x00000001); ++ gr_def(ctx, offset + 0x2d0, 0x00000070); ++ gr_def(ctx, offset + 0x2d4, 0x00000080); ++ } else { ++ gr_def(ctx, offset + 0x2b8, 0x00000001); ++ gr_def(ctx, offset + 0x2bc, 0x000000f0); ++ gr_def(ctx, offset + 0x2c0, 0x000000ff); ++ gr_def(ctx, offset + 0x2cc, 0x00000001); ++ gr_def(ctx, offset + 0x2d0, 0x000000f0); ++ gr_def(ctx, offset + 0x2d4, 0x000000ff); ++ gr_def(ctx, offset + 0x2dc, 0x00000009); ++ offset += 4; ++ } ++ gr_def(ctx, offset + 0x2e4, 0x00000001); ++ gr_def(ctx, offset + 0x2e8, 0x000000cf); ++ gr_def(ctx, offset + 0x2f0, 0x00000001); ++ gr_def(ctx, offset + 0x300, 0x000000cf); ++ gr_def(ctx, offset + 0x308, 0x00000002); ++ gr_def(ctx, offset + 0x310, 0x00000001); ++ gr_def(ctx, offset + 0x318, 0x00000001); ++ gr_def(ctx, offset + 0x320, 0x000000cf); ++ gr_def(ctx, offset + 0x324, 0x000000cf); ++ gr_def(ctx, offset + 0x328, 0x00000001); ++ ++ /* 6000? */ ++ if (dev_priv->chipset == 0x50) ++ cp_ctx(ctx, 0x4063e0, 0x1); ++ ++ /* 6800: M2MF */ ++ if (dev_priv->chipset < 0x90) { ++ cp_ctx(ctx, 0x406814, 0x2b); ++ gr_def(ctx, 0x406818, 0x00000f80); ++ gr_def(ctx, 0x406860, 0x007f0080); ++ gr_def(ctx, 0x40689c, 0x007f0080); ++ } else { ++ cp_ctx(ctx, 0x406814, 0x4); ++ if (dev_priv->chipset == 0x98) ++ gr_def(ctx, 0x406818, 0x00000f80); ++ else ++ gr_def(ctx, 0x406818, 0x00001f80); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ gr_def(ctx, 0x40681c, 0x00000030); ++ cp_ctx(ctx, 0x406830, 0x3); ++ } ++ ++ /* 7000: per-ROP group state */ ++ for (i = 0; i < 8; i++) { ++ if (units & (1<<(i+16))) { ++ cp_ctx(ctx, 0x407000 + (i<<8), 3); ++ if (dev_priv->chipset == 0x50) ++ gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820); ++ else if (dev_priv->chipset != 0xa5) ++ gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821); ++ else ++ gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821); ++ gr_def(ctx, 0x407004 + (i<<8), 0x89058001); ++ ++ if (dev_priv->chipset == 0x50) { ++ cp_ctx(ctx, 0x407010 + (i<<8), 1); ++ } else if (dev_priv->chipset < 0xa0) { ++ cp_ctx(ctx, 0x407010 + (i<<8), 2); ++ gr_def(ctx, 0x407010 + (i<<8), 0x00001000); ++ gr_def(ctx, 0x407014 + (i<<8), 0x0000001f); ++ } else { ++ cp_ctx(ctx, 0x407010 + (i<<8), 3); ++ gr_def(ctx, 0x407010 + (i<<8), 0x00001000); ++ if (dev_priv->chipset != 0xa5) ++ gr_def(ctx, 0x407014 + (i<<8), 0x000000ff); ++ else ++ gr_def(ctx, 0x407014 + (i<<8), 0x000001ff); ++ } ++ ++ cp_ctx(ctx, 0x407080 + (i<<8), 4); ++ if (dev_priv->chipset != 0xa5) ++ gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa); ++ else ++ gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa); ++ if (dev_priv->chipset == 0x50) ++ gr_def(ctx, 0x407084 + (i<<8), 0x000000c0); ++ else ++ gr_def(ctx, 0x407084 + (i<<8), 0x400000c0); ++ gr_def(ctx, 0x407088 + (i<<8), 0xb7892080); ++ ++ if (dev_priv->chipset < 0xa0) ++ cp_ctx(ctx, 0x407094 + (i<<8), 1); ++ else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) ++ cp_ctx(ctx, 0x407094 + (i<<8), 3); ++ else { ++ cp_ctx(ctx, 0x407094 + (i<<8), 4); ++ gr_def(ctx, 0x4070a0 + (i<<8), 1); ++ } ++ } ++ } ++ ++ cp_ctx(ctx, 0x407c00, 0x3); ++ if (dev_priv->chipset < 0x90) ++ gr_def(ctx, 0x407c00, 0x00010040); ++ else if (dev_priv->chipset < 0xa0) ++ gr_def(ctx, 0x407c00, 0x00390040); ++ else ++ gr_def(ctx, 0x407c00, 0x003d0040); ++ gr_def(ctx, 0x407c08, 0x00000022); ++ if (dev_priv->chipset >= 0xa0) { ++ cp_ctx(ctx, 0x407c10, 0x3); ++ cp_ctx(ctx, 0x407c20, 0x1); ++ cp_ctx(ctx, 0x407c2c, 0x1); ++ } ++ ++ if (dev_priv->chipset < 0xa0) { ++ cp_ctx(ctx, 0x407d00, 0x9); ++ } else { ++ cp_ctx(ctx, 0x407d00, 0x15); ++ } ++ if (dev_priv->chipset == 0x98) ++ gr_def(ctx, 0x407d08, 0x00380040); ++ else { ++ if (dev_priv->chipset < 0x90) ++ gr_def(ctx, 0x407d08, 0x00010040); ++ else if (dev_priv->chipset < 0xa0) ++ gr_def(ctx, 0x407d08, 0x00390040); ++ else ++ gr_def(ctx, 0x407d08, 0x003d0040); ++ gr_def(ctx, 0x407d0c, 0x00000022); ++ } ++ ++ /* 8000+: per-TP state */ ++ for (i = 0; i < 10; i++) { ++ if (units & (1<chipset < 0xa0) ++ base = 0x408000 + (i<<12); ++ else ++ base = 0x408000 + (i<<11); ++ if (dev_priv->chipset < 0xa0) ++ offset = base + 0xc00; ++ else ++ offset = base + 0x80; ++ cp_ctx(ctx, offset + 0x00, 1); ++ gr_def(ctx, offset + 0x00, 0x0000ff0a); ++ cp_ctx(ctx, offset + 0x08, 1); ++ ++ /* per-MP state */ ++ for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) { ++ if (!(units & (1 << (j+24)))) continue; ++ if (dev_priv->chipset < 0xa0) ++ offset = base + 0x200 + (j<<7); ++ else ++ offset = base + 0x100 + (j<<7); ++ cp_ctx(ctx, offset, 0x20); ++ gr_def(ctx, offset + 0x00, 0x01800000); ++ gr_def(ctx, offset + 0x04, 0x00160000); ++ gr_def(ctx, offset + 0x08, 0x01800000); ++ gr_def(ctx, offset + 0x18, 0x0003ffff); ++ switch (dev_priv->chipset) { ++ case 0x50: ++ gr_def(ctx, offset + 0x1c, 0x00080000); ++ break; ++ case 0x84: ++ gr_def(ctx, offset + 0x1c, 0x00880000); ++ break; ++ case 0x86: ++ gr_def(ctx, offset + 0x1c, 0x008c0000); ++ break; ++ case 0x92: ++ case 0x96: ++ case 0x98: ++ gr_def(ctx, offset + 0x1c, 0x118c0000); ++ break; ++ case 0x94: ++ gr_def(ctx, offset + 0x1c, 0x10880000); ++ break; ++ case 0xa0: ++ case 0xa5: ++ gr_def(ctx, offset + 0x1c, 0x310c0000); ++ break; ++ case 0xa3: ++ case 0xa8: ++ case 0xaa: ++ case 0xac: ++ gr_def(ctx, offset + 0x1c, 0x300c0000); ++ break; ++ } ++ gr_def(ctx, offset + 0x40, 0x00010401); ++ if (dev_priv->chipset == 0x50) ++ gr_def(ctx, offset + 0x48, 0x00000040); ++ else ++ gr_def(ctx, offset + 0x48, 0x00000078); ++ gr_def(ctx, offset + 0x50, 0x000000bf); ++ gr_def(ctx, offset + 0x58, 0x00001210); ++ if (dev_priv->chipset == 0x50) ++ gr_def(ctx, offset + 0x5c, 0x00000080); ++ else ++ gr_def(ctx, offset + 0x5c, 0x08000080); ++ if (dev_priv->chipset >= 0xa0) ++ gr_def(ctx, offset + 0x68, 0x0000003e); ++ } ++ ++ if (dev_priv->chipset < 0xa0) ++ cp_ctx(ctx, base + 0x300, 0x4); ++ else ++ cp_ctx(ctx, base + 0x300, 0x5); ++ if (dev_priv->chipset == 0x50) ++ gr_def(ctx, base + 0x304, 0x00007070); ++ else if (dev_priv->chipset < 0xa0) ++ gr_def(ctx, base + 0x304, 0x00027070); ++ else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) ++ gr_def(ctx, base + 0x304, 0x01127070); ++ else ++ gr_def(ctx, base + 0x304, 0x05127070); ++ ++ if (dev_priv->chipset < 0xa0) ++ cp_ctx(ctx, base + 0x318, 1); ++ else ++ cp_ctx(ctx, base + 0x320, 1); ++ if (dev_priv->chipset == 0x50) ++ gr_def(ctx, base + 0x318, 0x0003ffff); ++ else if (dev_priv->chipset < 0xa0) ++ gr_def(ctx, base + 0x318, 0x03ffffff); ++ else ++ gr_def(ctx, base + 0x320, 0x07ffffff); ++ ++ if (dev_priv->chipset < 0xa0) ++ cp_ctx(ctx, base + 0x324, 5); ++ else ++ cp_ctx(ctx, base + 0x328, 4); ++ ++ if (dev_priv->chipset < 0xa0) { ++ cp_ctx(ctx, base + 0x340, 9); ++ offset = base + 0x340; ++ } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { ++ cp_ctx(ctx, base + 0x33c, 0xb); ++ offset = base + 0x344; ++ } else { ++ cp_ctx(ctx, base + 0x33c, 0xd); ++ offset = base + 0x344; ++ } ++ gr_def(ctx, offset + 0x0, 0x00120407); ++ gr_def(ctx, offset + 0x4, 0x05091507); ++ if (dev_priv->chipset == 0x84) ++ gr_def(ctx, offset + 0x8, 0x05100202); ++ else ++ gr_def(ctx, offset + 0x8, 0x05010202); ++ gr_def(ctx, offset + 0xc, 0x00030201); ++ if (dev_priv->chipset == 0xa3) ++ cp_ctx(ctx, base + 0x36c, 1); ++ ++ cp_ctx(ctx, base + 0x400, 2); ++ gr_def(ctx, base + 0x404, 0x00000040); ++ cp_ctx(ctx, base + 0x40c, 2); ++ gr_def(ctx, base + 0x40c, 0x0d0c0b0a); ++ gr_def(ctx, base + 0x410, 0x00141210); ++ ++ if (dev_priv->chipset < 0xa0) ++ offset = base + 0x800; ++ else ++ offset = base + 0x500; ++ cp_ctx(ctx, offset, 6); ++ gr_def(ctx, offset + 0x0, 0x000001f0); ++ gr_def(ctx, offset + 0x4, 0x00000001); ++ gr_def(ctx, offset + 0x8, 0x00000003); ++ if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa) ++ gr_def(ctx, offset + 0xc, 0x00008000); ++ gr_def(ctx, offset + 0x14, 0x00039e00); ++ cp_ctx(ctx, offset + 0x1c, 2); ++ if (dev_priv->chipset == 0x50) ++ gr_def(ctx, offset + 0x1c, 0x00000040); ++ else ++ gr_def(ctx, offset + 0x1c, 0x00000100); ++ gr_def(ctx, offset + 0x20, 0x00003800); ++ ++ if (dev_priv->chipset >= 0xa0) { ++ cp_ctx(ctx, base + 0x54c, 2); ++ if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) ++ gr_def(ctx, base + 0x54c, 0x003fe006); ++ else ++ gr_def(ctx, base + 0x54c, 0x003fe007); ++ gr_def(ctx, base + 0x550, 0x003fe000); ++ } ++ ++ if (dev_priv->chipset < 0xa0) ++ offset = base + 0xa00; ++ else ++ offset = base + 0x680; ++ cp_ctx(ctx, offset, 1); ++ gr_def(ctx, offset, 0x00404040); ++ ++ if (dev_priv->chipset < 0xa0) ++ offset = base + 0xe00; ++ else ++ offset = base + 0x700; ++ cp_ctx(ctx, offset, 2); ++ if (dev_priv->chipset < 0xa0) ++ gr_def(ctx, offset, 0x0077f005); ++ else if (dev_priv->chipset == 0xa5) ++ gr_def(ctx, offset, 0x6cf7f007); ++ else if (dev_priv->chipset == 0xa8) ++ gr_def(ctx, offset, 0x6cfff007); ++ else if (dev_priv->chipset == 0xac) ++ gr_def(ctx, offset, 0x0cfff007); ++ else ++ gr_def(ctx, offset, 0x0cf7f007); ++ if (dev_priv->chipset == 0x50) ++ gr_def(ctx, offset + 0x4, 0x00007fff); ++ else if (dev_priv->chipset < 0xa0) ++ gr_def(ctx, offset + 0x4, 0x003f7fff); ++ else ++ gr_def(ctx, offset + 0x4, 0x02bf7fff); ++ cp_ctx(ctx, offset + 0x2c, 1); ++ if (dev_priv->chipset == 0x50) { ++ cp_ctx(ctx, offset + 0x50, 9); ++ gr_def(ctx, offset + 0x54, 0x000003ff); ++ gr_def(ctx, offset + 0x58, 0x00000003); ++ gr_def(ctx, offset + 0x5c, 0x00000003); ++ gr_def(ctx, offset + 0x60, 0x000001ff); ++ gr_def(ctx, offset + 0x64, 0x0000001f); ++ gr_def(ctx, offset + 0x68, 0x0000000f); ++ gr_def(ctx, offset + 0x6c, 0x0000000f); ++ } else if(dev_priv->chipset < 0xa0) { ++ cp_ctx(ctx, offset + 0x50, 1); ++ cp_ctx(ctx, offset + 0x70, 1); ++ } else { ++ cp_ctx(ctx, offset + 0x50, 1); ++ cp_ctx(ctx, offset + 0x60, 5); ++ } ++ } ++ } ++} ++ ++/* ++ * xfer areas. These are a pain. ++ * ++ * There are 2 xfer areas: the first one is big and contains all sorts of ++ * stuff, the second is small and contains some per-TP context. ++ * ++ * Each area is split into 8 "strands". The areas, when saved to grctx, ++ * are made of 8-word blocks. Each block contains a single word from ++ * each strand. The strands are independent of each other, their ++ * addresses are unrelated to each other, and data in them is closely ++ * packed together. The strand layout varies a bit between cards: here ++ * and there, a single word is thrown out in the middle and the whole ++ * strand is offset by a bit from corresponding one on another chipset. ++ * For this reason, addresses of stuff in strands are almost useless. ++ * Knowing sequence of stuff and size of gaps between them is much more ++ * useful, and that's how we build the strands in our generator. ++ * ++ * NVA0 takes this mess to a whole new level by cutting the old strands ++ * into a few dozen pieces [known as genes], rearranging them randomly, ++ * and putting them back together to make new strands. Hopefully these ++ * genes correspond more or less directly to the same PGRAPH subunits ++ * as in 400040 register. ++ * ++ * The most common value in default context is 0, and when the genes ++ * are separated by 0's, gene bounduaries are quite speculative... ++ * some of them can be clearly deduced, others can be guessed, and yet ++ * others won't be resolved without figuring out the real meaning of ++ * given ctxval. For the same reason, ending point of each strand ++ * is unknown. Except for strand 0, which is the longest strand and ++ * its end corresponds to end of the whole xfer. ++ * ++ * An unsolved mystery is the seek instruction: it takes an argument ++ * in bits 8-18, and that argument is clearly the place in strands to ++ * seek to... but the offsets don't seem to correspond to offsets as ++ * seen in grctx. Perhaps there's another, real, not randomly-changing ++ * addressing in strands, and the xfer insn just happens to skip over ++ * the unused bits? NV10-NV30 PIPE comes to mind... ++ * ++ * As far as I know, there's no way to access the xfer areas directly ++ * without the help of ctxprog. ++ */ ++ ++static inline void ++xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { ++ int i; ++ if (val && ctx->mode == NOUVEAU_GRCTX_VALS) ++ for (i = 0; i < num; i++) ++ nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val); ++ ctx->ctxvals_pos += num << 3; ++} ++ ++/* Gene declarations... */ ++ ++static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx); ++static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx); ++static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx); ++static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx); ++static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx); ++static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx); ++static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx); ++static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx); ++static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx); ++static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx); ++static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx); ++static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx); ++static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx); ++ ++static void ++nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ int i; ++ int offset; ++ int size = 0; ++ uint32_t units = nv_rd32 (ctx->dev, 0x1540); ++ ++ offset = (ctx->ctxvals_pos+0x3f)&~0x3f; ++ ctx->ctxvals_base = offset; ++ ++ if (dev_priv->chipset < 0xa0) { ++ /* Strand 0 */ ++ ctx->ctxvals_pos = offset; ++ switch (dev_priv->chipset) { ++ case 0x50: ++ xf_emit(ctx, 0x99, 0); ++ break; ++ case 0x84: ++ case 0x86: ++ xf_emit(ctx, 0x384, 0); ++ break; ++ case 0x92: ++ case 0x94: ++ case 0x96: ++ case 0x98: ++ xf_emit(ctx, 0x380, 0); ++ break; ++ } ++ nv50_graph_construct_gene_m2mf (ctx); ++ switch (dev_priv->chipset) { ++ case 0x50: ++ case 0x84: ++ case 0x86: ++ case 0x98: ++ xf_emit(ctx, 0x4c4, 0); ++ break; ++ case 0x92: ++ case 0x94: ++ case 0x96: ++ xf_emit(ctx, 0x984, 0); ++ break; ++ } ++ nv50_graph_construct_gene_unk5(ctx); ++ if (dev_priv->chipset == 0x50) ++ xf_emit(ctx, 0xa, 0); ++ else ++ xf_emit(ctx, 0xb, 0); ++ nv50_graph_construct_gene_unk4(ctx); ++ nv50_graph_construct_gene_unk3(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; ++ ++ /* Strand 1 */ ++ ctx->ctxvals_pos = offset + 0x1; ++ nv50_graph_construct_gene_unk6(ctx); ++ nv50_graph_construct_gene_unk7(ctx); ++ nv50_graph_construct_gene_unk8(ctx); ++ switch (dev_priv->chipset) { ++ case 0x50: ++ case 0x92: ++ xf_emit(ctx, 0xfb, 0); ++ break; ++ case 0x84: ++ xf_emit(ctx, 0xd3, 0); ++ break; ++ case 0x94: ++ case 0x96: ++ xf_emit(ctx, 0xab, 0); ++ break; ++ case 0x86: ++ case 0x98: ++ xf_emit(ctx, 0x6b, 0); ++ break; ++ } ++ xf_emit(ctx, 2, 0x4e3bfdf); ++ xf_emit(ctx, 4, 0); ++ xf_emit(ctx, 1, 0x0fac6881); ++ xf_emit(ctx, 0xb, 0); ++ xf_emit(ctx, 2, 0x4e3bfdf); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; ++ ++ /* Strand 2 */ ++ ctx->ctxvals_pos = offset + 0x2; ++ switch (dev_priv->chipset) { ++ case 0x50: ++ case 0x92: ++ xf_emit(ctx, 0xa80, 0); ++ break; ++ case 0x84: ++ xf_emit(ctx, 0xa7e, 0); ++ break; ++ case 0x94: ++ case 0x96: ++ xf_emit(ctx, 0xa7c, 0); ++ break; ++ case 0x86: ++ case 0x98: ++ xf_emit(ctx, 0xa7a, 0); ++ break; ++ } ++ xf_emit(ctx, 1, 0x3fffff); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 0x1fff); ++ xf_emit(ctx, 0xe, 0); ++ nv50_graph_construct_gene_unk9(ctx); ++ nv50_graph_construct_gene_unk2(ctx); ++ nv50_graph_construct_gene_unk1(ctx); ++ nv50_graph_construct_gene_unk10(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; ++ ++ /* Strand 3: per-ROP group state */ ++ ctx->ctxvals_pos = offset + 3; ++ for (i = 0; i < 6; i++) ++ if (units & (1 << (i + 16))) ++ nv50_graph_construct_gene_ropc(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; ++ ++ /* Strands 4-7: per-TP state */ ++ for (i = 0; i < 4; i++) { ++ ctx->ctxvals_pos = offset + 4 + i; ++ if (units & (1 << (2 * i))) ++ nv50_graph_construct_xfer_tp(ctx); ++ if (units & (1 << (2 * i + 1))) ++ nv50_graph_construct_xfer_tp(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; ++ } ++ } else { ++ /* Strand 0 */ ++ ctx->ctxvals_pos = offset; ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 0x385, 0); ++ else ++ xf_emit(ctx, 0x384, 0); ++ nv50_graph_construct_gene_m2mf(ctx); ++ xf_emit(ctx, 0x950, 0); ++ nv50_graph_construct_gene_unk10(ctx); ++ xf_emit(ctx, 1, 0x0fac6881); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 3, 0); ++ } ++ nv50_graph_construct_gene_unk8(ctx); ++ if (dev_priv->chipset == 0xa0) ++ xf_emit(ctx, 0x189, 0); ++ else if (dev_priv->chipset == 0xa3) ++ xf_emit(ctx, 0xd5, 0); ++ else if (dev_priv->chipset == 0xa5) ++ xf_emit(ctx, 0x99, 0); ++ else if (dev_priv->chipset == 0xaa) ++ xf_emit(ctx, 0x65, 0); ++ else ++ xf_emit(ctx, 0x6d, 0); ++ nv50_graph_construct_gene_unk9(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; + -+ /* Disable the encoder */ -+ for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { -+ struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i]; ++ /* Strand 1 */ ++ ctx->ctxvals_pos = offset + 1; ++ nv50_graph_construct_gene_unk1(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; + -+ if (dcb->type == type && (dcb->or & (1 << or))) { -+ nouveau_bios_run_display_table(dev, dcb, 0, -1); -+ dev_priv->evo_irq.dcb = dcb; -+ goto ack; ++ /* Strand 2 */ ++ ctx->ctxvals_pos = offset + 2; ++ if (dev_priv->chipset == 0xa0) { ++ nv50_graph_construct_gene_unk2(ctx); + } -+ } ++ xf_emit(ctx, 0x36, 0); ++ nv50_graph_construct_gene_unk5(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; + -+ NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc); - ack: - nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10); - nv_wr32(dev, 0x610030, 0x80000000); -@@ -817,33 +791,103 @@ nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb) - static void - nv50_display_unk20_handler(struct drm_device *dev) - { -- struct dcb_entry *dcbent; -- uint32_t tmp, pclk, script; -- int head, or, ret; -+ struct drm_nouveau_private *dev_priv = dev->dev_private; -+ u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc; -+ struct dcb_entry *dcb; -+ int i, crtc, or, type = OUTPUT_ANY; - -- ret = nv50_display_irq_head(dev, &head, &dcbent); -- if (ret) -+ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); -+ dcb = dev_priv->evo_irq.dcb; -+ if (dcb) { -+ nouveau_bios_run_display_table(dev, dcb, 0, -2); -+ dev_priv->evo_irq.dcb = NULL; -+ } ++ /* Strand 3 */ ++ ctx->ctxvals_pos = offset + 3; ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ nv50_graph_construct_gene_unk6(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; + -+ /* CRTC clock change requested? */ -+ crtc = ffs((unk30 & 0x00000600) >> 9) - 1; -+ if (crtc >= 0) { -+ pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)); -+ pclk &= 0x003fffff; ++ /* Strand 4 */ ++ ctx->ctxvals_pos = offset + 4; ++ if (dev_priv->chipset == 0xa0) ++ xf_emit(ctx, 0xa80, 0); ++ else if (dev_priv->chipset == 0xa3) ++ xf_emit(ctx, 0xa7c, 0); ++ else ++ xf_emit(ctx, 0xa7a, 0); ++ xf_emit(ctx, 1, 0x3fffff); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 0x1fff); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; + -+ nv50_crtc_set_clock(dev, crtc, pclk); ++ /* Strand 5 */ ++ ctx->ctxvals_pos = offset + 5; ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x0fac6881); ++ xf_emit(ctx, 0xb, 0); ++ xf_emit(ctx, 2, 0x4e3bfdf); ++ xf_emit(ctx, 3, 0); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 2, 0x4e3bfdf); ++ xf_emit(ctx, 2, 0); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 1, 0); ++ for (i = 0; i < 8; i++) ++ if (units & (1<<(i+16))) ++ nv50_graph_construct_gene_ropc(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; + -+ tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc)); -+ tmp &= ~0x000000f; -+ nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp); -+ } ++ /* Strand 6 */ ++ ctx->ctxvals_pos = offset + 6; ++ nv50_graph_construct_gene_unk3(ctx); ++ xf_emit(ctx, 0xb, 0); ++ nv50_graph_construct_gene_unk4(ctx); ++ nv50_graph_construct_gene_unk7(ctx); ++ if (units & (1 << 0)) ++ nv50_graph_construct_xfer_tp(ctx); ++ if (units & (1 << 1)) ++ nv50_graph_construct_xfer_tp(ctx); ++ if (units & (1 << 2)) ++ nv50_graph_construct_xfer_tp(ctx); ++ if (units & (1 << 3)) ++ nv50_graph_construct_xfer_tp(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; + -+ /* Nothing needs to be done for the encoder */ -+ crtc = ffs((unk30 & 0x00000180) >> 7) - 1; -+ if (crtc < 0) - goto ack; -- or = ffs(dcbent->or) - 1; -- pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff; -- script = nv50_display_script_select(dev, dcbent, pclk); -+ pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff; - -- NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk); -+ /* Find which encoder is connected to the CRTC */ -+ for (i = 0; type == OUTPUT_ANY && i < 3; i++) { -+ mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(i)); -+ NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc); -+ if (!(mc & (1 << crtc))) -+ continue; - -- if (dcbent->type != OUTPUT_DP) -- nouveau_bios_run_display_table(dev, dcbent, 0, -2); -+ switch ((mc & 0x00000f00) >> 8) { -+ case 0: type = OUTPUT_ANALOG; break; -+ case 1: type = OUTPUT_TV; break; -+ default: -+ NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc); -+ goto ack; ++ /* Strand 7 */ ++ ctx->ctxvals_pos = offset + 7; ++ if (dev_priv->chipset == 0xa0) { ++ if (units & (1 << 4)) ++ nv50_graph_construct_xfer_tp(ctx); ++ if (units & (1 << 5)) ++ nv50_graph_construct_xfer_tp(ctx); ++ if (units & (1 << 6)) ++ nv50_graph_construct_xfer_tp(ctx); ++ if (units & (1 << 7)) ++ nv50_graph_construct_xfer_tp(ctx); ++ if (units & (1 << 8)) ++ nv50_graph_construct_xfer_tp(ctx); ++ if (units & (1 << 9)) ++ nv50_graph_construct_xfer_tp(ctx); ++ } else { ++ nv50_graph_construct_gene_unk2(ctx); + } - -- nv50_crtc_set_clock(dev, head, pclk); -+ or = i; ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; + } - -- nouveau_bios_run_display_table(dev, dcbent, script, pclk); -+ for (i = 0; type == OUTPUT_ANY && i < 4; i++) { -+ if (dev_priv->chipset < 0x90 || -+ dev_priv->chipset == 0x92 || -+ dev_priv->chipset == 0xa0) -+ mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(i)); -+ else -+ mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(i)); - -- nv50_display_unk20_dp_hack(dev, dcbent); -+ NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc); -+ if (!(mc & (1 << crtc))) -+ continue; - -- tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head)); -- tmp &= ~0x000000f; -- nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp); -+ switch ((mc & 0x00000f00) >> 8) { -+ case 0: type = OUTPUT_LVDS; break; -+ case 1: type = OUTPUT_TMDS; break; -+ case 2: type = OUTPUT_TMDS; break; -+ case 5: type = OUTPUT_TMDS; break; -+ case 8: type = OUTPUT_DP; break; -+ case 9: type = OUTPUT_DP; break; -+ default: -+ NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc); -+ goto ack; -+ } + -+ or = i; -+ } ++ ctx->ctxvals_pos = offset + size * 8; ++ ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f; ++ cp_lsr (ctx, offset); ++ cp_out (ctx, CP_SET_XFER_POINTER); ++ cp_lsr (ctx, size); ++ cp_out (ctx, CP_SEEK_1); ++ cp_out (ctx, CP_XFER_1); ++ cp_wait(ctx, XFER, BUSY); ++} + -+ if (type == OUTPUT_ANY) -+ goto ack; ++/* ++ * non-trivial demagiced parts of ctx init go here ++ */ + -+ /* Enable the encoder */ -+ for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { -+ dcb = &dev_priv->vbios.dcb.entry[i]; -+ if (dcb->type == type && (dcb->or & (1 << or))) -+ break; -+ } ++static void ++nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx) ++{ ++ /* m2mf state */ ++ xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */ ++ xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */ ++ xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */ ++ xf_emit (ctx, 1, 0); /* OFFSET_IN */ ++ xf_emit (ctx, 1, 0); /* OFFSET_OUT */ ++ xf_emit (ctx, 1, 0); /* PITCH_IN */ ++ xf_emit (ctx, 1, 0); /* PITCH_OUT */ ++ xf_emit (ctx, 1, 0); /* LINE_LENGTH */ ++ xf_emit (ctx, 1, 0); /* LINE_COUNT */ ++ xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */ ++ xf_emit (ctx, 1, 1); /* LINEAR_IN */ ++ xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */ ++ xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */ ++ xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */ ++ xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */ ++ xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */ ++ xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */ ++ xf_emit (ctx, 1, 1); /* LINEAR_OUT */ ++ xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */ ++ xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */ ++ xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */ ++ xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */ ++ xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */ ++ xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */ ++ xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */ ++ xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */ ++} + -+ if (i == dev_priv->vbios.dcb.entries) { -+ NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc); -+ goto ack; ++static void ++nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ /* end of area 2 on pre-NVA0, area 1 on NVAx */ ++ xf_emit(ctx, 2, 4); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x80); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 0x80c14); ++ xf_emit(ctx, 1, 0); ++ if (dev_priv->chipset == 0x50) ++ xf_emit(ctx, 1, 0x3ff); ++ else ++ xf_emit(ctx, 1, 0x7ff); ++ switch (dev_priv->chipset) { ++ case 0x50: ++ case 0x86: ++ case 0x98: ++ case 0xaa: ++ case 0xac: ++ xf_emit(ctx, 0x542, 0); ++ break; ++ case 0x84: ++ case 0x92: ++ case 0x94: ++ case 0x96: ++ xf_emit(ctx, 0x942, 0); ++ break; ++ case 0xa0: ++ case 0xa3: ++ xf_emit(ctx, 0x2042, 0); ++ break; ++ case 0xa5: ++ case 0xa8: ++ xf_emit(ctx, 0x842, 0); ++ break; + } ++ xf_emit(ctx, 2, 4); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x80); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x27); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x26); ++ xf_emit(ctx, 3, 0); ++} + -+ script = nv50_display_script_select(dev, dcb, mc, pclk); -+ nouveau_bios_run_display_table(dev, dcb, script, pclk); -+ -+ nv50_display_unk20_dp_hack(dev, dcb); - -- if (dcbent->type != OUTPUT_ANALOG) { -+ if (dcb->type != OUTPUT_ANALOG) { - tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or)); - tmp &= ~0x00000f0f; - if (script & 0x0100) -@@ -853,24 +897,61 @@ nv50_display_unk20_handler(struct drm_device *dev) - nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0); - } - -+ dev_priv->evo_irq.dcb = dcb; -+ dev_priv->evo_irq.pclk = pclk; -+ dev_priv->evo_irq.script = script; ++static void ++nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx) ++{ ++ /* end of area 2 on pre-NVA0, area 1 on NVAx */ ++ xf_emit(ctx, 0x10, 0x04000000); ++ xf_emit(ctx, 0x24, 0); ++ xf_emit(ctx, 2, 0x04e3bfdf); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 0x1fe21); ++} + - ack: - nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20); - nv_wr32(dev, 0x610030, 0x80000000); - } - -+/* If programming a TMDS output on a SOR that can also be configured for -+ * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off. -+ * -+ * It looks like the VBIOS TMDS scripts make an attempt at this, however, -+ * the VBIOS scripts on at least one board I have only switch it off on -+ * link 0, causing a blank display if the output has previously been -+ * programmed for DisplayPort. -+ */ +static void -+nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb) ++nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx) +{ -+ int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1); -+ struct drm_encoder *encoder; -+ u32 tmp; ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */ ++ if (dev_priv->chipset != 0x50) { ++ xf_emit(ctx, 5, 0); ++ xf_emit(ctx, 1, 0x80c14); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 0x804); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 2, 4); ++ xf_emit(ctx, 1, 0x8100c12); ++ } ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 2, 4); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x10); ++ if (dev_priv->chipset == 0x50) ++ xf_emit(ctx, 3, 0); ++ else ++ xf_emit(ctx, 4, 0); ++ xf_emit(ctx, 1, 0x804); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0x1a); ++ if (dev_priv->chipset != 0x50) ++ xf_emit(ctx, 1, 0x7f); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0x80c14); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x8100c12); ++ xf_emit(ctx, 2, 4); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x10); ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0x8100c12); ++ xf_emit(ctx, 6, 0); ++ if (dev_priv->chipset == 0x50) ++ xf_emit(ctx, 1, 0x3ff); ++ else ++ xf_emit(ctx, 1, 0x7ff); ++ xf_emit(ctx, 1, 0x80c14); ++ xf_emit(ctx, 0x38, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 0x10); ++ xf_emit(ctx, 0x38, 0); ++ xf_emit(ctx, 2, 0x88); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 0x16, 0); ++ xf_emit(ctx, 1, 0x26); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 0x3f800000); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 4, 0); ++ else ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 0x1a); ++ xf_emit(ctx, 1, 0x10); ++ if (dev_priv->chipset != 0x50) ++ xf_emit(ctx, 0x28, 0); ++ else ++ xf_emit(ctx, 0x25, 0); ++ xf_emit(ctx, 1, 0x52); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x26); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 2, 4); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x1a); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 0x00ffff00); ++ xf_emit(ctx, 1, 0); ++} + -+ if (dcb->type != OUTPUT_TMDS) -+ return; ++static void ++nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */ ++ xf_emit(ctx, 1, 0x3f); ++ xf_emit(ctx, 0xa, 0); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 2, 0x04000000); ++ xf_emit(ctx, 8, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 4); ++ if (dev_priv->chipset == 0x50) ++ xf_emit(ctx, 0x10, 0); ++ else ++ xf_emit(ctx, 0x11, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0x1001); ++ xf_emit(ctx, 4, 0xffff); ++ xf_emit(ctx, 0x20, 0); ++ xf_emit(ctx, 0x10, 0x3f800000); ++ xf_emit(ctx, 1, 0x10); ++ if (dev_priv->chipset == 0x50) ++ xf_emit(ctx, 1, 0); ++ else ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 3); ++ xf_emit(ctx, 2, 0); ++} + -+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { -+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); ++static void ++nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx) ++{ ++ /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */ ++ xf_emit(ctx, 2, 0x04000000); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x80); ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 0x80); ++ xf_emit(ctx, 1, 0); ++} + -+ if (nv_encoder->dcb->type == OUTPUT_DP && -+ nv_encoder->dcb->or & (1 << or)) { -+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); -+ tmp &= ~NV50_SOR_DP_CTRL_ENABLED; -+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp); -+ break; -+ } -+ } ++static void ++nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */ ++ xf_emit(ctx, 2, 4); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 0x1c4d, 0); ++ else ++ xf_emit(ctx, 0x1c4b, 0); ++ xf_emit(ctx, 2, 4); ++ xf_emit(ctx, 1, 0x8100c12); ++ if (dev_priv->chipset != 0x50) ++ xf_emit(ctx, 1, 3); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x8100c12); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x80c14); ++ xf_emit(ctx, 1, 1); ++ if (dev_priv->chipset >= 0xa0) ++ xf_emit(ctx, 2, 4); ++ xf_emit(ctx, 1, 0x80c14); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 0x8100c12); ++ xf_emit(ctx, 1, 0x27); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 0x3c1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 0x16, 0); ++ xf_emit(ctx, 1, 0x8100c12); ++ xf_emit(ctx, 1, 0); +} + - static void - nv50_display_unk40_handler(struct drm_device *dev) - { -- struct dcb_entry *dcbent; -- int head, pclk, script, ret; -+ struct drm_nouveau_private *dev_priv = dev->dev_private; -+ struct dcb_entry *dcb = dev_priv->evo_irq.dcb; -+ u16 script = dev_priv->evo_irq.script; -+ u32 unk30 = nv_rd32(dev, 0x610030), pclk = dev_priv->evo_irq.pclk; - -- ret = nv50_display_irq_head(dev, &head, &dcbent); -- if (ret) -+ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); -+ dev_priv->evo_irq.dcb = NULL; -+ if (!dcb) - goto ack; -- pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff; -- script = nv50_display_script_select(dev, dcbent, pclk); - -- nouveau_bios_run_display_table(dev, dcbent, script, -pclk); -+ nouveau_bios_run_display_table(dev, dcb, script, -pclk); -+ nv50_display_unk40_dp_set_tmds(dev, dcb); - - ack: - nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40); -diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c -index e20c0e2..fb0281a 100644 ---- a/drivers/gpu/drm/nouveau/nv50_fifo.c -+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c -@@ -28,41 +28,33 @@ - #include "drm.h" - #include "nouveau_drv.h" - --struct nv50_fifo_priv { -- struct nouveau_gpuobj_ref *thingo[2]; -- int cur_thingo; --}; -- --#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) -- - static void --nv50_fifo_init_thingo(struct drm_device *dev) -+nv50_fifo_playlist_update(struct drm_device *dev) - { - struct drm_nouveau_private *dev_priv = dev->dev_private; -- struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv; -+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; - struct nouveau_gpuobj_ref *cur; - int i, nr; - - NV_DEBUG(dev, "\n"); - -- cur = priv->thingo[priv->cur_thingo]; -- priv->cur_thingo = !priv->cur_thingo; -+ cur = pfifo->playlist[pfifo->cur_playlist]; -+ pfifo->cur_playlist = !pfifo->cur_playlist; - - /* We never schedule channel 0 or 127 */ -- dev_priv->engine.instmem.prepare_access(dev, true); - for (i = 1, nr = 0; i < 127; i++) { - if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) - nv_wo32(dev, cur->gpuobj, nr++, i); - } -- dev_priv->engine.instmem.finish_access(dev); -+ dev_priv->engine.instmem.flush(dev); - - nv_wr32(dev, 0x32f4, cur->instance >> 12); - nv_wr32(dev, 0x32ec, nr); - nv_wr32(dev, 0x2500, 0x101); - } - --static int --nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt) +static void -+nv50_fifo_channel_enable(struct drm_device *dev, int channel) - { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->fifos[channel]; -@@ -70,37 +62,28 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt) - - NV_DEBUG(dev, "ch%d\n", channel); - -- if (!chan->ramfc) -- return -EINVAL; -- -- if (IS_G80) ++nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */ ++ xf_emit(ctx, 4, 0); ++ xf_emit(ctx, 1, 0xf); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 8, 0); ++ else ++ xf_emit(ctx, 4, 0); ++ xf_emit(ctx, 1, 0x20); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 0x11, 0); ++ else if (dev_priv->chipset >= 0xa0) ++ xf_emit(ctx, 0xf, 0); ++ else ++ xf_emit(ctx, 0xe, 0); ++ xf_emit(ctx, 1, 0x1a); ++ xf_emit(ctx, 0xd, 0); ++ xf_emit(ctx, 2, 4); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 8); ++ xf_emit(ctx, 1, 0); + if (dev_priv->chipset == 0x50) - inst = chan->ramfc->instance >> 12; - else - inst = chan->ramfc->instance >> 8; -- nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), -- inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); - -- if (!nt) -- nv50_fifo_init_thingo(dev); -- return 0; -+ nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst | -+ NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); - } - - static void --nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt) -+nv50_fifo_channel_disable(struct drm_device *dev, int channel) - { - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t inst; - -- NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt); -+ NV_DEBUG(dev, "ch%d\n", channel); - -- if (IS_G80) ++ xf_emit(ctx, 1, 0x3ff); ++ else ++ xf_emit(ctx, 1, 0x7ff); ++ if (dev_priv->chipset == 0xa8) ++ xf_emit(ctx, 1, 0x1e00); ++ xf_emit(ctx, 0xc, 0); ++ xf_emit(ctx, 1, 0xf); + if (dev_priv->chipset == 0x50) - inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80; - else - inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84; - nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst); -- -- if (!nt) -- nv50_fifo_init_thingo(dev); - } - - static void -@@ -133,12 +116,12 @@ nv50_fifo_init_context_table(struct drm_device *dev) - - for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { - if (dev_priv->fifos[i]) -- nv50_fifo_channel_enable(dev, i, true); -+ nv50_fifo_channel_enable(dev, i); - else -- nv50_fifo_channel_disable(dev, i, true); -+ nv50_fifo_channel_disable(dev, i); - } - -- nv50_fifo_init_thingo(dev); -+ nv50_fifo_playlist_update(dev); - } - - static void -@@ -162,41 +145,38 @@ nv50_fifo_init_regs(struct drm_device *dev) - nv_wr32(dev, 0x3270, 0); - - /* Enable dummy channels setup by nv50_instmem.c */ -- nv50_fifo_channel_enable(dev, 0, true); -- nv50_fifo_channel_enable(dev, 127, true); -+ nv50_fifo_channel_enable(dev, 0); -+ nv50_fifo_channel_enable(dev, 127); - } - - int - nv50_fifo_init(struct drm_device *dev) - { - struct drm_nouveau_private *dev_priv = dev->dev_private; -- struct nv50_fifo_priv *priv; -+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; - int ret; - - NV_DEBUG(dev, "\n"); - -- priv = dev_priv->engine.fifo.priv; -- if (priv) { -- priv->cur_thingo = !priv->cur_thingo; -+ if (pfifo->playlist[0]) { -+ pfifo->cur_playlist = !pfifo->cur_playlist; - goto just_reset; - } - -- priv = kzalloc(sizeof(*priv), GFP_KERNEL); -- if (!priv) -- return -ENOMEM; -- dev_priv->engine.fifo.priv = priv; -- - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, -- NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]); -+ NVOBJ_FLAG_ZERO_ALLOC, -+ &pfifo->playlist[0]); - if (ret) { -- NV_ERROR(dev, "error creating thingo0: %d\n", ret); -+ NV_ERROR(dev, "error creating playlist 0: %d\n", ret); - return ret; - } - - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, -- NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]); -+ NVOBJ_FLAG_ZERO_ALLOC, -+ &pfifo->playlist[1]); - if (ret) { -- NV_ERROR(dev, "error creating thingo1: %d\n", ret); -+ nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]); -+ NV_ERROR(dev, "error creating playlist 1: %d\n", ret); - return ret; - } - -@@ -216,18 +196,15 @@ void - nv50_fifo_takedown(struct drm_device *dev) - { - struct drm_nouveau_private *dev_priv = dev->dev_private; -- struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv; -+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; - - NV_DEBUG(dev, "\n"); - -- if (!priv) -+ if (!pfifo->playlist[0]) - return; - -- nouveau_gpuobj_ref_del(dev, &priv->thingo[0]); -- nouveau_gpuobj_ref_del(dev, &priv->thingo[1]); -- -- dev_priv->engine.fifo.priv = NULL; -- kfree(priv); -+ nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]); -+ nouveau_gpuobj_ref_del(dev, &pfifo->playlist[1]); - } - - int -@@ -248,7 +225,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan) - - NV_DEBUG(dev, "ch%d\n", chan->id); - -- if (IS_G80) { ++ xf_emit(ctx, 0x125, 0); ++ else if (dev_priv->chipset < 0xa0) ++ xf_emit(ctx, 0x126, 0); ++ else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) ++ xf_emit(ctx, 0x124, 0); ++ else ++ xf_emit(ctx, 0x1f7, 0); ++ xf_emit(ctx, 1, 0xf); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 3, 0); ++ else ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 0xa1, 0); ++ else ++ xf_emit(ctx, 0x5a, 0); ++ xf_emit(ctx, 1, 0xf); ++ if (dev_priv->chipset < 0xa0) ++ xf_emit(ctx, 0x834, 0); ++ else if (dev_priv->chipset == 0xa0) ++ xf_emit(ctx, 0x1873, 0); ++ else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 0x8ba, 0); ++ else ++ xf_emit(ctx, 0x833, 0); ++ xf_emit(ctx, 1, 0xf); ++ xf_emit(ctx, 0xf, 0); ++} ++ ++static void ++nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */ ++ xf_emit(ctx, 2, 0); ++ if (dev_priv->chipset == 0x50) ++ xf_emit(ctx, 2, 1); ++ else ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 2, 0x100); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 8); ++ xf_emit(ctx, 5, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 3, 1); ++ xf_emit(ctx, 1, 0xcf); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 6, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 3, 1); ++ xf_emit(ctx, 4, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0x15); ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 0x4444480); ++ xf_emit(ctx, 0x37, 0); ++} ++ ++static void ++nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx) ++{ ++ /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */ ++ xf_emit(ctx, 4, 0); ++ xf_emit(ctx, 1, 0x8100c12); ++ xf_emit(ctx, 4, 0); ++ xf_emit(ctx, 1, 0x100); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 0x10001); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x10001); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0x10001); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 2); ++} ++ ++static void ++nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */ ++ xf_emit(ctx, 1, 0x3f800000); ++ xf_emit(ctx, 6, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 0x1a); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 0x12, 0); ++ xf_emit(ctx, 1, 0x00ffff00); ++ xf_emit(ctx, 6, 0); ++ xf_emit(ctx, 1, 0xf); ++ xf_emit(ctx, 7, 0); ++ xf_emit(ctx, 1, 0x0fac6881); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 0xf, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 2, 0); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 1, 3); ++ else if (dev_priv->chipset >= 0xa0) ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 2, 0x04000000); ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 5); ++ xf_emit(ctx, 1, 0x52); + if (dev_priv->chipset == 0x50) { - uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start; - uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start; - -@@ -281,10 +258,10 @@ nv50_fifo_create_context(struct nouveau_channel *chan) - - spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - -- dev_priv->engine.instmem.prepare_access(dev, true); -- - nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); -- nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); -+ nv_wo32(dev, ramfc, 0x80/4, (0 << 27) /* 4KiB */ | -+ (4 << 24) /* SEARCH_FULL */ | -+ (chan->ramht->instance >> 4)); - nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff); - nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff); - nv_wo32(dev, ramfc, 0x40/4, 0x00000000); -@@ -295,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan) - chan->dma.ib_base * 4); - nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16); - -- if (!IS_G80) { -+ if (dev_priv->chipset != 0x50) { - nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id); - nv_wo32(dev, chan->ramin->gpuobj, 1, - chan->ramfc->instance >> 8); -@@ -304,16 +281,10 @@ nv50_fifo_create_context(struct nouveau_channel *chan) - nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12); - } - -- dev_priv->engine.instmem.finish_access(dev); -- -- ret = nv50_fifo_channel_enable(dev, chan->id, false); -- if (ret) { -- NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret); -- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); -- nouveau_gpuobj_ref_del(dev, &chan->ramfc); -- return ret; -- } -+ dev_priv->engine.instmem.flush(dev); - -+ nv50_fifo_channel_enable(dev, chan->id); -+ nv50_fifo_playlist_update(dev); - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); - return 0; - } -@@ -328,11 +299,12 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan) - - /* This will ensure the channel is seen as disabled. */ - chan->ramfc = NULL; -- nv50_fifo_channel_disable(dev, chan->id, false); -+ nv50_fifo_channel_disable(dev, chan->id); - - /* Dummy channel, also used on ch 127 */ - if (chan->id == 0) -- nv50_fifo_channel_disable(dev, 127, false); -+ nv50_fifo_channel_disable(dev, 127); -+ nv50_fifo_playlist_update(dev); - - nouveau_gpuobj_ref_del(dev, &ramfc); - nouveau_gpuobj_ref_del(dev, &chan->cache); -@@ -349,8 +321,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan) - - NV_DEBUG(dev, "ch%d\n", chan->id); - -- dev_priv->engine.instmem.prepare_access(dev, false); -- - nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4)); - nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4)); - nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4)); -@@ -396,7 +366,7 @@ nv50_fifo_load_context(struct nouveau_channel *chan) - nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); - - /* guessing that all the 0x34xx regs aren't on NV50 */ -- if (!IS_G80) { -+ if (dev_priv->chipset != 0x50) { - nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4)); - nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4)); - nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4)); -@@ -404,8 +374,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan) - nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4)); - } - -- dev_priv->engine.instmem.finish_access(dev); -- - nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); - return 0; - } -@@ -434,8 +402,6 @@ nv50_fifo_unload_context(struct drm_device *dev) - ramfc = chan->ramfc->gpuobj; - cache = chan->cache->gpuobj; - -- dev_priv->engine.instmem.prepare_access(dev, true); -- - nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330)); - nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334)); - nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240)); -@@ -482,7 +448,7 @@ nv50_fifo_unload_context(struct drm_device *dev) - } - - /* guessing that all the 0x34xx regs aren't on NV50 */ -- if (!IS_G80) { ++ xf_emit(ctx, 0x13, 0); ++ } else { ++ xf_emit(ctx, 4, 0); ++ xf_emit(ctx, 1, 1); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 0x11, 0); ++ else ++ xf_emit(ctx, 0x10, 0); ++ } ++ xf_emit(ctx, 0x10, 0x3f800000); ++ xf_emit(ctx, 1, 0x10); ++ xf_emit(ctx, 0x26, 0); ++ xf_emit(ctx, 1, 0x8100c12); ++ xf_emit(ctx, 1, 5); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 4, 0xffff); ++ if (dev_priv->chipset != 0x50) ++ xf_emit(ctx, 1, 3); ++ if (dev_priv->chipset < 0xa0) ++ xf_emit(ctx, 0x1f, 0); ++ else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 0xc, 0); ++ else ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 0x00ffff00); ++ xf_emit(ctx, 1, 0x1a); + if (dev_priv->chipset != 0x50) { - nv_wo32(dev, ramfc, 0x84/4, ptr >> 1); - nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c)); - nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400)); -@@ -491,7 +457,7 @@ nv50_fifo_unload_context(struct drm_device *dev) - nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410)); - } - -- dev_priv->engine.instmem.finish_access(dev); -+ dev_priv->engine.instmem.flush(dev); - - /*XXX: probably reload ch127 (NULL) state back too */ - nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127); -diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c -index b203d06..1413028 100644 ---- a/drivers/gpu/drm/nouveau/nv50_graph.c -+++ b/drivers/gpu/drm/nouveau/nv50_graph.c -@@ -30,8 +30,6 @@ - - #include "nouveau_grctx.h" - --#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) -- - static void - nv50_graph_init_reset(struct drm_device *dev) - { -@@ -103,37 +101,33 @@ static int - nv50_graph_init_ctxctl(struct drm_device *dev) - { - struct drm_nouveau_private *dev_priv = dev->dev_private; -+ struct nouveau_grctx ctx = {}; -+ uint32_t *cp; ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 3); ++ } ++ if (dev_priv->chipset < 0xa0) ++ xf_emit(ctx, 0x26, 0); ++ else ++ xf_emit(ctx, 0x3c, 0); ++ xf_emit(ctx, 1, 0x102); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 4, 4); ++ if (dev_priv->chipset >= 0xa0) ++ xf_emit(ctx, 8, 0); ++ xf_emit(ctx, 2, 4); ++ xf_emit(ctx, 1, 0); ++ if (dev_priv->chipset == 0x50) ++ xf_emit(ctx, 1, 0x3ff); ++ else ++ xf_emit(ctx, 1, 0x7ff); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x102); ++ xf_emit(ctx, 9, 0); ++ xf_emit(ctx, 4, 4); ++ xf_emit(ctx, 0x2c, 0); ++} ++ ++static void ++nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ int magic2; ++ if (dev_priv->chipset == 0x50) { ++ magic2 = 0x00003e60; ++ } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { ++ magic2 = 0x001ffe67; ++ } else { ++ magic2 = 0x00087e67; ++ } ++ xf_emit(ctx, 8, 0); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, magic2); ++ xf_emit(ctx, 4, 0); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 7, 0); ++ if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 1, 0x15); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0x10); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 4, 0); ++ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) { ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 0x400); ++ xf_emit(ctx, 1, 0x300); ++ xf_emit(ctx, 1, 0x1001); ++ if (dev_priv->chipset != 0xa0) { ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 1, 0); ++ else ++ xf_emit(ctx, 1, 0x15); ++ } ++ xf_emit(ctx, 3, 0); ++ } ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 8, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0x10); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 0x13, 0); ++ xf_emit(ctx, 1, 0x10); ++ xf_emit(ctx, 0x10, 0); ++ xf_emit(ctx, 0x10, 0x3f800000); ++ xf_emit(ctx, 0x19, 0); ++ xf_emit(ctx, 1, 0x10); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x3f); ++ xf_emit(ctx, 6, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ if (dev_priv->chipset >= 0xa0) { ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 0x1001); ++ xf_emit(ctx, 0xb, 0); ++ } else { ++ xf_emit(ctx, 0xc, 0); ++ } ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 7, 0); ++ xf_emit(ctx, 1, 0xf); ++ xf_emit(ctx, 7, 0); ++ xf_emit(ctx, 1, 0x11); ++ if (dev_priv->chipset == 0x50) ++ xf_emit(ctx, 4, 0); ++ else ++ xf_emit(ctx, 6, 0); ++ xf_emit(ctx, 3, 1); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, magic2); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x0fac6881); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 0x18, 1); ++ xf_emit(ctx, 8, 2); ++ xf_emit(ctx, 8, 1); ++ xf_emit(ctx, 8, 2); ++ xf_emit(ctx, 8, 1); ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 5, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 0x16, 0); ++ } else { ++ if (dev_priv->chipset >= 0xa0) ++ xf_emit(ctx, 0x1b, 0); ++ else ++ xf_emit(ctx, 0x15, 0); ++ } ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 2, 1); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 2, 1); ++ if (dev_priv->chipset >= 0xa0) ++ xf_emit(ctx, 4, 0); ++ else ++ xf_emit(ctx, 3, 0); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { ++ xf_emit(ctx, 0x10, 1); ++ xf_emit(ctx, 8, 2); ++ xf_emit(ctx, 0x10, 1); ++ xf_emit(ctx, 8, 2); ++ xf_emit(ctx, 8, 1); ++ xf_emit(ctx, 3, 0); ++ } ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 0x5b, 0); ++} ++ ++static void ++nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ int magic3; ++ if (dev_priv->chipset == 0x50) ++ magic3 = 0x1000; ++ else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) ++ magic3 = 0x1e00; ++ else ++ magic3 = 0; ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 4); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 0x24, 0); ++ else if (dev_priv->chipset >= 0xa0) ++ xf_emit(ctx, 0x14, 0); ++ else ++ xf_emit(ctx, 0x15, 0); ++ xf_emit(ctx, 2, 4); ++ if (dev_priv->chipset >= 0xa0) ++ xf_emit(ctx, 1, 0x03020100); ++ else ++ xf_emit(ctx, 1, 0x00608080); ++ xf_emit(ctx, 4, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 2, 4); ++ xf_emit(ctx, 1, 0x80); ++ if (magic3) ++ xf_emit(ctx, 1, magic3); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 0x24, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 0x80); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 0x03020100); ++ xf_emit(ctx, 1, 3); ++ if (magic3) ++ xf_emit(ctx, 1, magic3); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 4, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 3); ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 4); ++ if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96) ++ xf_emit(ctx, 0x1024, 0); ++ else if (dev_priv->chipset < 0xa0) ++ xf_emit(ctx, 0xa24, 0); ++ else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) ++ xf_emit(ctx, 0x214, 0); ++ else ++ xf_emit(ctx, 0x414, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 3); ++ xf_emit(ctx, 2, 0); ++} ++ ++static void ++nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ int magic1, magic2; ++ if (dev_priv->chipset == 0x50) { ++ magic1 = 0x3ff; ++ magic2 = 0x00003e60; ++ } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { ++ magic1 = 0x7ff; ++ magic2 = 0x001ffe67; ++ } else { ++ magic1 = 0x7ff; ++ magic2 = 0x00087e67; ++ } ++ xf_emit(ctx, 3, 0); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 0xc, 0); ++ xf_emit(ctx, 1, 0xf); ++ xf_emit(ctx, 0xb, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 4, 0xffff); ++ xf_emit(ctx, 8, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 5, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 2, 0); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { ++ xf_emit(ctx, 1, 3); ++ xf_emit(ctx, 1, 0); ++ } else if (dev_priv->chipset >= 0xa0) ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 0xa, 0); ++ xf_emit(ctx, 2, 1); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 2, 1); ++ xf_emit(ctx, 1, 2); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 0x18, 1); ++ xf_emit(ctx, 8, 2); ++ xf_emit(ctx, 8, 1); ++ xf_emit(ctx, 8, 2); ++ xf_emit(ctx, 8, 1); ++ xf_emit(ctx, 1, 0); ++ } ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 7, 0); ++ xf_emit(ctx, 1, 0x0fac6881); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 3, 0xcf); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 0xa, 0); ++ xf_emit(ctx, 2, 1); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 2, 1); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 8, 1); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 7, 0); ++ xf_emit(ctx, 1, 0x0fac6881); ++ xf_emit(ctx, 1, 0xf); ++ xf_emit(ctx, 7, 0); ++ xf_emit(ctx, 1, magic2); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 0x11); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 2, 1); ++ else ++ xf_emit(ctx, 1, 1); ++ if(dev_priv->chipset == 0x50) ++ xf_emit(ctx, 1, 0); ++ else ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 5, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 4, 0); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 7, 0); ++ xf_emit(ctx, 1, 0x0fac6881); ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, magic1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 2, 0); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 0x28, 0); ++ xf_emit(ctx, 8, 8); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 7, 0); ++ xf_emit(ctx, 1, 0x0fac6881); ++ xf_emit(ctx, 8, 0x400); ++ xf_emit(ctx, 8, 0x300); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0xf); ++ xf_emit(ctx, 7, 0); ++ xf_emit(ctx, 1, 0x20); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 1, 0x100); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 0x40); ++ xf_emit(ctx, 1, 0x100); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 3); ++ xf_emit(ctx, 4, 0); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, magic2); ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 1, 0x0fac6881); ++ xf_emit(ctx, 9, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 4, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0x400); ++ xf_emit(ctx, 1, 0x300); ++ xf_emit(ctx, 1, 0x1001); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 4, 0); ++ else ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 7, 0); ++ xf_emit(ctx, 1, 0x0fac6881); ++ xf_emit(ctx, 1, 0xf); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { ++ xf_emit(ctx, 0x15, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 3, 0); ++ } else ++ xf_emit(ctx, 0x17, 0); ++ if (dev_priv->chipset >= 0xa0) ++ xf_emit(ctx, 1, 0x0fac6881); ++ xf_emit(ctx, 1, magic2); ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 2, 1); ++ xf_emit(ctx, 3, 0); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 2, 1); ++ else ++ xf_emit(ctx, 1, 1); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 2, 0); ++ else if (dev_priv->chipset != 0x50) ++ xf_emit(ctx, 1, 0); ++} ++ ++static void ++nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ if (dev_priv->chipset == 0x50) ++ xf_emit(ctx, 2, 0); ++ else ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 0x2a712488); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x4085c000); ++ xf_emit(ctx, 1, 0x40); ++ xf_emit(ctx, 1, 0x100); ++ xf_emit(ctx, 1, 0x10100); ++ xf_emit(ctx, 1, 0x02800000); ++} ++ ++static void ++nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ xf_emit(ctx, 2, 0x04e3bfdf); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x00ffff00); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 2, 1); ++ else ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 0x00ffff00); ++ xf_emit(ctx, 8, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0x30201000); ++ xf_emit(ctx, 1, 0x70605040); ++ xf_emit(ctx, 1, 0xb8a89888); ++ xf_emit(ctx, 1, 0xf8e8d8c8); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x1a); ++} ++ ++static void ++nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 0xfac6881); ++ xf_emit(ctx, 4, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 2, 1); ++ xf_emit(ctx, 2, 0); ++ xf_emit(ctx, 1, 1); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 0xb, 0); ++ else ++ xf_emit(ctx, 0xa, 0); ++ xf_emit(ctx, 8, 1); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 7, 0); ++ xf_emit(ctx, 1, 0xfac6881); ++ xf_emit(ctx, 1, 0xf); ++ xf_emit(ctx, 7, 0); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 1, 1); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { ++ xf_emit(ctx, 6, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 6, 0); ++ } else { ++ xf_emit(ctx, 0xb, 0); ++ } ++} ++ ++static void ++nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ if (dev_priv->chipset < 0xa0) { ++ nv50_graph_construct_xfer_tp_x1(ctx); ++ nv50_graph_construct_xfer_tp_x2(ctx); ++ nv50_graph_construct_xfer_tp_x3(ctx); ++ if (dev_priv->chipset == 0x50) ++ xf_emit(ctx, 0xf, 0); ++ else ++ xf_emit(ctx, 0x12, 0); ++ nv50_graph_construct_xfer_tp_x4(ctx); ++ } else { ++ nv50_graph_construct_xfer_tp_x3(ctx); ++ if (dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 0xc, 0); ++ else ++ xf_emit(ctx, 0xa, 0); ++ nv50_graph_construct_xfer_tp_x2(ctx); ++ nv50_graph_construct_xfer_tp_x5(ctx); ++ nv50_graph_construct_xfer_tp_x4(ctx); ++ nv50_graph_construct_xfer_tp_x1(ctx); ++ } ++} ++ ++static void ++nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; ++ int i, mpcnt; ++ if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa) ++ mpcnt = 1; ++ else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8) ++ mpcnt = 2; ++ else ++ mpcnt = 3; ++ for (i = 0; i < mpcnt; i++) { ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x80); ++ xf_emit(ctx, 1, 0x80007004); ++ xf_emit(ctx, 1, 0x04000400); ++ if (dev_priv->chipset >= 0xa0) ++ xf_emit(ctx, 1, 0xc0); ++ xf_emit(ctx, 1, 0x1000); ++ xf_emit(ctx, 2, 0); ++ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) { ++ xf_emit(ctx, 1, 0xe00); ++ xf_emit(ctx, 1, 0x1e00); ++ } ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 2, 0); ++ if (dev_priv->chipset == 0x50) ++ xf_emit(ctx, 2, 0x1000); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 2); ++ if (dev_priv->chipset >= 0xaa) ++ xf_emit(ctx, 0xb, 0); ++ else if (dev_priv->chipset >= 0xa0) ++ xf_emit(ctx, 0xc, 0); ++ else ++ xf_emit(ctx, 0xa, 0); ++ } ++ xf_emit(ctx, 1, 0x08100c12); ++ xf_emit(ctx, 1, 0); ++ if (dev_priv->chipset >= 0xa0) { ++ xf_emit(ctx, 1, 0x1fe21); ++ } ++ xf_emit(ctx, 5, 0); ++ xf_emit(ctx, 4, 0xffff); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 2, 0x10001); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 0x1fe21); ++ xf_emit(ctx, 1, 0); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 4, 0); ++ xf_emit(ctx, 1, 0x08100c12); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 8, 0); ++ xf_emit(ctx, 1, 0xfac6881); ++ xf_emit(ctx, 1, 0); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) ++ xf_emit(ctx, 1, 3); ++ xf_emit(ctx, 3, 0); ++ xf_emit(ctx, 1, 4); ++ xf_emit(ctx, 9, 0); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 2, 1); ++ xf_emit(ctx, 1, 2); ++ xf_emit(ctx, 3, 1); ++ xf_emit(ctx, 1, 0); ++ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { ++ xf_emit(ctx, 8, 2); ++ xf_emit(ctx, 0x10, 1); ++ xf_emit(ctx, 8, 2); ++ xf_emit(ctx, 0x18, 1); ++ xf_emit(ctx, 3, 0); ++ } ++ xf_emit(ctx, 1, 4); ++ if (dev_priv->chipset == 0x50) ++ xf_emit(ctx, 0x3a0, 0); ++ else if (dev_priv->chipset < 0x94) ++ xf_emit(ctx, 0x3a2, 0); ++ else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa) ++ xf_emit(ctx, 0x39f, 0); ++ else ++ xf_emit(ctx, 0x3a3, 0); ++ xf_emit(ctx, 1, 0x11); ++ xf_emit(ctx, 1, 0); ++ xf_emit(ctx, 1, 1); ++ xf_emit(ctx, 0x2d, 0); ++} ++ ++static void ++nv50_graph_construct_xfer2(struct nouveau_grctx *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int i; - - NV_DEBUG(dev, "\n"); - -- if (nouveau_ctxfw) { -- nouveau_grctx_prog_load(dev); -- dev_priv->engine.graph.grctx_size = 0x70000; -+ cp = kmalloc(512 * 4, GFP_KERNEL); -+ if (!cp) { -+ NV_ERROR(dev, "failed to allocate ctxprog\n"); -+ dev_priv->engine.graph.accel_blocked = true; -+ return 0; - } -- if (!dev_priv->engine.graph.ctxprog) { -- struct nouveau_grctx ctx = {}; -- uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL); -- int i; -- if (!cp) { -- NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n"); -- dev_priv->engine.graph.accel_blocked = true; -- return 0; -- } -- ctx.dev = dev; -- ctx.mode = NOUVEAU_GRCTX_PROG; -- ctx.data = cp; -- ctx.ctxprog_max = 512; -- if (!nv50_grctx_init(&ctx)) { -- dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; -- -- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); -- for (i = 0; i < ctx.ctxprog_len; i++) -- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); -- } else { -- dev_priv->engine.graph.accel_blocked = true; -- } -- kfree(cp); -+ -+ ctx.dev = dev; -+ ctx.mode = NOUVEAU_GRCTX_PROG; -+ ctx.data = cp; -+ ctx.ctxprog_max = 512; -+ if (!nv50_grctx_init(&ctx)) { -+ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; -+ -+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); -+ for (i = 0; i < ctx.ctxprog_len; i++) -+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); ++ uint32_t offset; ++ uint32_t units = nv_rd32 (ctx->dev, 0x1540); ++ int size = 0; ++ ++ offset = (ctx->ctxvals_pos+0x3f)&~0x3f; ++ ++ if (dev_priv->chipset < 0xa0) { ++ for (i = 0; i < 8; i++) { ++ ctx->ctxvals_pos = offset + i; ++ if (i == 0) ++ xf_emit(ctx, 1, 0x08100c12); ++ if (units & (1 << i)) ++ nv50_graph_construct_xfer_tp2(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; ++ } + } else { -+ dev_priv->engine.graph.accel_blocked = true; - } -+ kfree(cp); - - nv_wr32(dev, 0x400320, 4); - nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); -@@ -164,7 +158,6 @@ void - nv50_graph_takedown(struct drm_device *dev) - { - NV_DEBUG(dev, "\n"); -- nouveau_grctx_fini(dev); - } - - void -@@ -212,8 +205,9 @@ nv50_graph_create_context(struct nouveau_channel *chan) - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; -- struct nouveau_gpuobj *ctx; -+ struct nouveau_gpuobj *obj; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; -+ struct nouveau_grctx ctx = {}; - int hdr, ret; - - NV_DEBUG(dev, "ch%d\n", chan->id); -@@ -223,10 +217,9 @@ nv50_graph_create_context(struct nouveau_channel *chan) - NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); - if (ret) - return ret; -- ctx = chan->ramin_grctx->gpuobj; -+ obj = chan->ramin_grctx->gpuobj; - -- hdr = IS_G80 ? 0x200 : 0x20; -- dev_priv->engine.instmem.prepare_access(dev, true); -+ hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; - nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); - nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + - pgraph->grctx_size - 1); -@@ -234,21 +227,15 @@ nv50_graph_create_context(struct nouveau_channel *chan) - nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); - nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); - nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000); -- dev_priv->engine.instmem.finish_access(dev); -- -- dev_priv->engine.instmem.prepare_access(dev, true); -- if (!pgraph->ctxprog) { -- struct nouveau_grctx ctx = {}; -- ctx.dev = chan->dev; -- ctx.mode = NOUVEAU_GRCTX_VALS; -- ctx.data = chan->ramin_grctx->gpuobj; -- nv50_grctx_init(&ctx); -- } else { -- nouveau_grctx_vals_load(dev, ctx); -- } -- nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); -- dev_priv->engine.instmem.finish_access(dev); - -+ ctx.dev = chan->dev; -+ ctx.mode = NOUVEAU_GRCTX_VALS; -+ ctx.data = obj; -+ nv50_grctx_init(&ctx); ++ /* Strand 0: TPs 0, 1 */ ++ ctx->ctxvals_pos = offset; ++ xf_emit(ctx, 1, 0x08100c12); ++ if (units & (1 << 0)) ++ nv50_graph_construct_xfer_tp2(ctx); ++ if (units & (1 << 1)) ++ nv50_graph_construct_xfer_tp2(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; + -+ nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12); ++ /* Strand 0: TPs 2, 3 */ ++ ctx->ctxvals_pos = offset + 1; ++ if (units & (1 << 2)) ++ nv50_graph_construct_xfer_tp2(ctx); ++ if (units & (1 << 3)) ++ nv50_graph_construct_xfer_tp2(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; + -+ dev_priv->engine.instmem.flush(dev); - return 0; - } - -@@ -257,17 +244,16 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) - { - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; -- int i, hdr = IS_G80 ? 0x200 : 0x20; -+ int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; - - NV_DEBUG(dev, "ch%d\n", chan->id); - - if (!chan->ramin || !chan->ramin->gpuobj) - return; - -- dev_priv->engine.instmem.prepare_access(dev, true); - for (i = hdr; i < hdr + 24; i += 4) - nv_wo32(dev, chan->ramin->gpuobj, i/4, 0); -- dev_priv->engine.instmem.finish_access(dev); -+ dev_priv->engine.instmem.flush(dev); - - nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); - } ++ /* Strand 0: TPs 4, 5, 6 */ ++ ctx->ctxvals_pos = offset + 2; ++ if (units & (1 << 4)) ++ nv50_graph_construct_xfer_tp2(ctx); ++ if (units & (1 << 5)) ++ nv50_graph_construct_xfer_tp2(ctx); ++ if (units & (1 << 6)) ++ nv50_graph_construct_xfer_tp2(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; ++ ++ /* Strand 0: TPs 7, 8, 9 */ ++ ctx->ctxvals_pos = offset + 3; ++ if (units & (1 << 7)) ++ nv50_graph_construct_xfer_tp2(ctx); ++ if (units & (1 << 8)) ++ nv50_graph_construct_xfer_tp2(ctx); ++ if (units & (1 << 9)) ++ nv50_graph_construct_xfer_tp2(ctx); ++ if ((ctx->ctxvals_pos-offset)/8 > size) ++ size = (ctx->ctxvals_pos-offset)/8; ++ } ++ ctx->ctxvals_pos = offset + size * 8; ++ ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f; ++ cp_lsr (ctx, offset); ++ cp_out (ctx, CP_SET_XFER_POINTER); ++ cp_lsr (ctx, size); ++ cp_out (ctx, CP_SEEK_2); ++ cp_out (ctx, CP_XFER_2); ++ cp_wait(ctx, XFER, BUSY); ++} diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c -index 5f21df3..0c8a6f2 100644 +index f0dc4e3..5f21df3 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c -@@ -35,8 +35,6 @@ struct nv50_instmem_priv { - struct nouveau_gpuobj_ref *pramin_pt; - struct nouveau_gpuobj_ref *pramin_bar; - struct nouveau_gpuobj_ref *fb_bar; -- -- bool last_access_wr; - }; - - #define NV50_INSTMEM_PAGE_SHIFT 12 -@@ -147,7 +145,7 @@ nv50_instmem_init(struct drm_device *dev) - if (ret) - return ret; - -- if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base)) -+ if (drm_mm_init(&chan->ramin_heap, c_base, c_size - c_base)) - return -ENOMEM; - - /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ -@@ -262,23 +260,18 @@ nv50_instmem_init(struct drm_device *dev) - - /* Assume that praying isn't enough, check that we can re-read the - * entire fake channel back from the PRAMIN BAR */ -- dev_priv->engine.instmem.prepare_access(dev, false); - for (i = 0; i < c_size; i += 4) { - if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) { - NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n", - i); -- dev_priv->engine.instmem.finish_access(dev); - return -EINVAL; - } - } -- dev_priv->engine.instmem.finish_access(dev); - - nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700); - - /* Global PRAMIN heap */ -- if (nouveau_mem_init_heap(&dev_priv->ramin_heap, -- c_size, dev_priv->ramin_size - c_size)) { -- dev_priv->ramin_heap = NULL; -+ if (drm_mm_init(&dev_priv->ramin_heap, c_size, dev_priv->ramin_size - c_size)) { - NV_ERROR(dev, "Failed to init RAMIN heap\n"); - } - -@@ -321,7 +314,7 @@ nv50_instmem_takedown(struct drm_device *dev) - nouveau_gpuobj_del(dev, &chan->vm_pd); - nouveau_gpuobj_ref_del(dev, &chan->ramfc); - nouveau_gpuobj_ref_del(dev, &chan->ramin); -- nouveau_mem_takedown(&chan->ramin_heap); -+ drm_mm_takedown(&chan->ramin_heap); - - dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; - kfree(chan); -@@ -436,14 +429,14 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) - if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) - return -EINVAL; - -- NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", -+ NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", - gpuobj->im_pramin->start, gpuobj->im_pramin->size); - - pte = (gpuobj->im_pramin->start >> 12) << 1; - pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; - vram = gpuobj->im_backing_start; - -- NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", -+ NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", - gpuobj->im_pramin->start, pte, pte_end); - NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); +@@ -63,9 +63,10 @@ nv50_instmem_init(struct drm_device *dev) + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan; + uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; ++ uint32_t save_nv001700; ++ uint64_t v; + struct nv50_instmem_priv *priv; + int ret, i; +- uint32_t v, save_nv001700; -@@ -453,27 +446,15 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) - vram |= 0x30; - } + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) +@@ -76,17 +77,12 @@ nv50_instmem_init(struct drm_device *dev) + for (i = 0x1700; i <= 0x1710; i += 4) + priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); -- dev_priv->engine.instmem.prepare_access(dev, true); - while (pte < pte_end) { - nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram)); - nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram)); - vram += NV50_INSTMEM_PAGE_SIZE; - } -- dev_priv->engine.instmem.finish_access(dev); +- if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) +- dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; +- else +- dev_priv->vram_sys_base = 0; - -- nv_wr32(dev, 0x100c80, 0x00040001); -- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { -- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (1)\n"); -- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); -- return -EBUSY; -- } -+ dev_priv->engine.instmem.flush(dev); - -- nv_wr32(dev, 0x100c80, 0x00060001); -- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { -- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); -- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); -- return -EBUSY; -- } -+ nv50_vm_flush(dev, 4); -+ nv50_vm_flush(dev, 6); - - gpuobj->im_bound = 1; - return 0; -@@ -492,36 +473,28 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) - pte = (gpuobj->im_pramin->start >> 12) << 1; - pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; - -- dev_priv->engine.instmem.prepare_access(dev, true); - while (pte < pte_end) { - nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); - nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); + /* Reserve the last MiB of VRAM, we should probably try to avoid + * setting up the below tables over the top of the VBIOS image at + * some point. + */ + dev_priv->ramin_rsvd_vram = 1 << 20; +- c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram; ++ c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; + c_size = 128 << 10; + c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; + c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; +@@ -106,7 +102,7 @@ nv50_instmem_init(struct drm_device *dev) + dev_priv->vm_gart_size = NV50_VM_BLOCK; + + dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; +- dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev); ++ dev_priv->vm_vram_size = dev_priv->vram_size; + if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM) + dev_priv->vm_vram_size = NV50_VM_MAX_VRAM; + dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK); +@@ -189,8 +185,8 @@ nv50_instmem_init(struct drm_device *dev) + + i = 0; + while (v < dev_priv->vram_sys_base + c_offset + c_size) { +- BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v); +- BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); ++ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v)); ++ BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v)); + v += 0x1000; + i += 8; } -- dev_priv->engine.instmem.finish_access(dev); -+ dev_priv->engine.instmem.flush(dev); - - gpuobj->im_bound = 0; - return 0; - } +@@ -390,7 +386,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, + if (gpuobj->im_backing) + return -EINVAL; - void --nv50_instmem_prepare_access(struct drm_device *dev, bool write) -+nv50_instmem_flush(struct drm_device *dev) - { -- struct drm_nouveau_private *dev_priv = dev->dev_private; -- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; -- -- priv->last_access_wr = write; -+ nv_wr32(dev, 0x070000, 0x00000001); -+ if (!nv_wait(0x070000, 0x00000001, 0x00000000)) -+ NV_ERROR(dev, "PRAMIN flush timeout\n"); - } +- *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1); ++ *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE); + if (*sz == 0) + return -EINVAL; - void --nv50_instmem_finish_access(struct drm_device *dev) -+nv50_vm_flush(struct drm_device *dev, int engine) - { -- struct drm_nouveau_private *dev_priv = dev->dev_private; -- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; -- -- if (priv->last_access_wr) { -- nv_wr32(dev, 0x070000, 0x00000001); -- if (!nv_wait(0x070000, 0x00000001, 0x00000000)) -- NV_ERROR(dev, "PRAMIN flush timeout\n"); -- } -+ nv_wr32(dev, 0x100c80, (engine << 16) | 1); -+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) -+ NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); - } -- diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c -index 812778d..bcd4cf8 100644 +index c2fff54..4832bba 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c -@@ -37,52 +37,32 @@ - #include "nv50_display.h" - - static void --nv50_sor_disconnect(struct nouveau_encoder *nv_encoder) -+nv50_sor_disconnect(struct drm_encoder *encoder) - { -- struct drm_device *dev = to_drm_encoder(nv_encoder)->dev; -+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); -+ struct drm_device *dev = encoder->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *evo = dev_priv->evo; - int ret; - -+ if (!nv_encoder->crtc) -+ return; -+ nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); -+ - NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or); - -- ret = RING_SPACE(evo, 2); -+ ret = RING_SPACE(evo, 4); - if (ret) { - NV_ERROR(dev, "no space while disconnecting SOR\n"); - return; - } - BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); -- OUT_RING(evo, 0); --} -- --static void --nv50_sor_dp_link_train(struct drm_encoder *encoder) --{ -- struct drm_device *dev = encoder->dev; -- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); -- struct bit_displayport_encoder_table *dpe; -- int dpe_headerlen; -- -- dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); -- if (!dpe) { -- NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or); -- return; -- } -+ OUT_RING (evo, 0); -+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); -+ OUT_RING (evo, 0); - -- if (dpe->script0) { -- NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); -- nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0), -- nv_encoder->dcb); -- } -- -- if (!nouveau_dp_link_train(encoder)) -- NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or); -- -- if (dpe->script1) { -- NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or); -- nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1), -- nv_encoder->dcb); -- } -+ nv_encoder->crtc = NULL; -+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; - } - - static void -@@ -94,14 +74,16 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) - uint32_t val; - int or = nv_encoder->or; - -- NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); -+ NV_DEBUG_KMS(dev, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode); - - nv_encoder->last_dpms = mode; - list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { - struct nouveau_encoder *nvenc = nouveau_encoder(enc); - - if (nvenc == nv_encoder || -- nvenc->disconnect != nv50_sor_disconnect || -+ (nvenc->dcb->type != OUTPUT_TMDS && -+ nvenc->dcb->type != OUTPUT_LVDS && -+ nvenc->dcb->type != OUTPUT_DP) || - nvenc->dcb->or != nv_encoder->dcb->or) - continue; - -@@ -133,8 +115,22 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) - nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or))); - } - -- if (nv_encoder->dcb->type == OUTPUT_DP && mode == DRM_MODE_DPMS_ON) -- nv50_sor_dp_link_train(encoder); -+ if (nv_encoder->dcb->type == OUTPUT_DP) { -+ struct nouveau_i2c_chan *auxch; -+ -+ auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); -+ if (!auxch) -+ return; -+ -+ if (mode == DRM_MODE_DPMS_ON) { -+ u8 status = DP_SET_POWER_D0; -+ nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); -+ nouveau_dp_link_train(encoder); -+ } else { -+ u8 status = DP_SET_POWER_D3; -+ nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); -+ } -+ } - } - - static void -@@ -196,7 +192,8 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, - uint32_t mode_ctl = 0; - int ret; - -- NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or); -+ NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n", -+ nv_encoder->or, nv_encoder->dcb->type, crtc->index); - - nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); - -@@ -239,6 +236,14 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, - } - BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); - OUT_RING(evo, mode_ctl); -+ -+ nv_encoder->crtc = encoder->crtc; -+} -+ -+static struct drm_crtc * -+nv50_sor_crtc_get(struct drm_encoder *encoder) -+{ -+ return nouveau_encoder(encoder)->crtc; - } - - static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = { -@@ -249,7 +254,9 @@ static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = { - .prepare = nv50_sor_prepare, - .commit = nv50_sor_commit, - .mode_set = nv50_sor_mode_set, -- .detect = NULL -+ .get_crtc = nv50_sor_crtc_get, -+ .detect = NULL, -+ .disable = nv50_sor_disconnect - }; - - static void -@@ -272,32 +279,22 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = { +@@ -211,7 +211,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + mode_ctl = 0x0200; + break; + case OUTPUT_DP: +- mode_ctl |= 0x00050000; ++ mode_ctl |= (nv_encoder->dp.mc_unknown << 16); + if (nv_encoder->dcb->sorconf.link & 1) + mode_ctl |= 0x00000800; + else +@@ -272,32 +272,22 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = { }; int @@ -5881,23 +9139,36 @@ index 812778d..bcd4cf8 100644 break; default: return -EINVAL; -@@ -310,8 +307,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) - - nv_encoder->dcb = entry; - nv_encoder->or = ffs(entry->or) - 1; -- -- nv_encoder->disconnect = nv50_sor_disconnect; -+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; - - drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type); - drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs); -@@ -342,5 +338,6 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) - nv_encoder->dp.mc_unknown = 5; - } +@@ -319,5 +309,29 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; ++ if (nv_encoder->dcb->type == OUTPUT_DP) { ++ int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1); ++ uint32_t tmp; ++ ++ tmp = nv_rd32(dev, 0x61c700 + (or * 0x800)); ++ ++ switch ((tmp & 0x00000f00) >> 8) { ++ case 8: ++ case 9: ++ nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16; ++ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); ++ nv_encoder->dp.unk0 = tmp & 0x000001fc; ++ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link)); ++ nv_encoder->dp.unk1 = tmp & 0x010f7f3f; ++ break; ++ default: ++ break; ++ } ++ ++ if (!nv_encoder->dp.mc_unknown) ++ nv_encoder->dp.mc_unknown = 5; ++ } ++ + drm_mode_connector_attach_encoder(connector, encoder); return 0; } -- -1.7.1.1 +1.7.1 diff --git a/drm-radeon-evergreen.patch b/drm-radeon-evergreen.patch new file mode 100644 index 0000000..5303446 --- /dev/null +++ b/drm-radeon-evergreen.patch @@ -0,0 +1,11726 @@ +diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile +index 1cc7b93..83c5907 100644 +--- a/drivers/gpu/drm/radeon/Makefile ++++ b/drivers/gpu/drm/radeon/Makefile +@@ -54,7 +54,8 @@ radeon-y += radeon_device.o radeon_kms.o \ + radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ + rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ + r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ +- r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o ++ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ ++ evergreen.o + + radeon-$(CONFIG_COMPAT) += radeon_ioc32.o + +diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c +index d75788f..b7fe660 100644 +--- a/drivers/gpu/drm/radeon/atom.c ++++ b/drivers/gpu/drm/radeon/atom.c +@@ -52,15 +52,17 @@ + + typedef struct { + struct atom_context *ctx; +- + uint32_t *ps, *ws; + int ps_shift; + uint16_t start; ++ unsigned last_jump; ++ unsigned long last_jump_jiffies; ++ bool abort; + } atom_exec_context; + + int atom_debug = 0; +-static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); +-void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); ++static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); ++int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); + + static uint32_t atom_arg_mask[8] = + { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, +@@ -604,12 +606,17 @@ static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) + static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) + { + int idx = U8((*ptr)++); ++ int r = 0; ++ + if (idx < ATOM_TABLE_NAMES_CNT) + SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); + else + SDEBUG(" table: %d\n", idx); + if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) +- atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); ++ r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); ++ if (r) { ++ ctx->abort = true; ++ } + } + + static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) +@@ -673,6 +680,8 @@ static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) + static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) + { + int execute = 0, target = U16(*ptr); ++ unsigned long cjiffies; ++ + (*ptr) += 2; + switch (arg) { + case ATOM_COND_ABOVE: +@@ -700,8 +709,25 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) + if (arg != ATOM_COND_ALWAYS) + SDEBUG(" taken: %s\n", execute ? "yes" : "no"); + SDEBUG(" target: 0x%04X\n", target); +- if (execute) ++ if (execute) { ++ if (ctx->last_jump == (ctx->start + target)) { ++ cjiffies = jiffies; ++ if (time_after(cjiffies, ctx->last_jump_jiffies)) { ++ cjiffies -= ctx->last_jump_jiffies; ++ if ((jiffies_to_msecs(cjiffies) > 1000)) { ++ DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n"); ++ ctx->abort = true; ++ } ++ } else { ++ /* jiffies wrap around we will just wait a little longer */ ++ ctx->last_jump_jiffies = jiffies; ++ } ++ } else { ++ ctx->last_jump = ctx->start + target; ++ ctx->last_jump_jiffies = jiffies; ++ } + *ptr = ctx->start + target; ++ } + } + + static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) +@@ -1104,7 +1130,7 @@ static struct { + atom_op_shr, ATOM_ARG_MC}, { + atom_op_debug, 0},}; + +-static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) ++static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) + { + int base = CU16(ctx->cmd_table + 4 + 2 * index); + int len, ws, ps, ptr; +@@ -1112,7 +1138,7 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 + atom_exec_context ectx; + + if (!base) +- return; ++ return -EINVAL; + + len = CU16(base + ATOM_CT_SIZE_PTR); + ws = CU8(base + ATOM_CT_WS_PTR); +@@ -1125,6 +1151,8 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 + ectx.ps_shift = ps / 4; + ectx.start = base; + ectx.ps = params; ++ ectx.abort = false; ++ ectx.last_jump = 0; + if (ws) + ectx.ws = kzalloc(4 * ws, GFP_KERNEL); + else +@@ -1137,6 +1165,11 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 + SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); + else + SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); ++ if (ectx.abort) { ++ DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", ++ base, len, ws, ps, ptr - 1); ++ return -EINVAL; ++ } + + if (op < ATOM_OP_CNT && op > 0) + opcode_table[op].func(&ectx, &ptr, +@@ -1152,10 +1185,13 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 + + if (ws) + kfree(ectx.ws); ++ return 0; + } + +-void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) ++int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) + { ++ int r; ++ + mutex_lock(&ctx->mutex); + /* reset reg block */ + ctx->reg_block = 0; +@@ -1163,8 +1199,9 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) + ctx->fb_base = 0; + /* reset io mode */ + ctx->io_mode = ATOM_IO_MM; +- atom_execute_table_locked(ctx, index, params); ++ r = atom_execute_table_locked(ctx, index, params); + mutex_unlock(&ctx->mutex); ++ return r; + } + + static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; +@@ -1248,9 +1285,7 @@ int atom_asic_init(struct atom_context *ctx) + + if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) + return 1; +- atom_execute_table(ctx, ATOM_CMD_INIT, ps); +- +- return 0; ++ return atom_execute_table(ctx, ATOM_CMD_INIT, ps); + } + + void atom_destroy(struct atom_context *ctx) +diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h +index bc73781..1b26263 100644 +--- a/drivers/gpu/drm/radeon/atom.h ++++ b/drivers/gpu/drm/radeon/atom.h +@@ -140,7 +140,7 @@ struct atom_context { + extern int atom_debug; + + struct atom_context *atom_parse(struct card_info *, void *); +-void atom_execute_table(struct atom_context *, int, uint32_t *); ++int atom_execute_table(struct atom_context *, int, uint32_t *); + int atom_asic_init(struct atom_context *); + void atom_destroy(struct atom_context *); + void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start); +diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h +index 91ad0d1..6732b5d 100644 +--- a/drivers/gpu/drm/radeon/atombios.h ++++ b/drivers/gpu/drm/radeon/atombios.h +@@ -1,5 +1,5 @@ + /* +- * Copyright 2006-2007 Advanced Micro Devices, Inc. ++ * Copyright 2006-2007 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), +@@ -20,10 +20,12 @@ + * OTHER DEALINGS IN THE SOFTWARE. + */ + +-/****************************************************************************/ ++ ++/****************************************************************************/ + /*Portion I: Definitions shared between VBIOS and Driver */ + /****************************************************************************/ + ++ + #ifndef _ATOMBIOS_H + #define _ATOMBIOS_H + +@@ -40,39 +42,46 @@ + #endif + + #ifdef _H2INC +-#ifndef ULONG +-typedef unsigned long ULONG; +-#endif ++ #ifndef ULONG ++ typedef unsigned long ULONG; ++ #endif + +-#ifndef UCHAR +-typedef unsigned char UCHAR; +-#endif ++ #ifndef UCHAR ++ typedef unsigned char UCHAR; ++ #endif + +-#ifndef USHORT +-typedef unsigned short USHORT; +-#endif ++ #ifndef USHORT ++ typedef unsigned short USHORT; ++ #endif + #endif +- +-#define ATOM_DAC_A 0 ++ ++#define ATOM_DAC_A 0 + #define ATOM_DAC_B 1 + #define ATOM_EXT_DAC 2 + + #define ATOM_CRTC1 0 + #define ATOM_CRTC2 1 ++#define ATOM_CRTC3 2 ++#define ATOM_CRTC4 3 ++#define ATOM_CRTC5 4 ++#define ATOM_CRTC6 5 ++#define ATOM_CRTC_INVALID 0xFF + + #define ATOM_DIGA 0 + #define ATOM_DIGB 1 + + #define ATOM_PPLL1 0 + #define ATOM_PPLL2 1 ++#define ATOM_DCPLL 2 ++#define ATOM_PPLL_INVALID 0xFF + + #define ATOM_SCALER1 0 + #define ATOM_SCALER2 1 + +-#define ATOM_SCALER_DISABLE 0 +-#define ATOM_SCALER_CENTER 1 +-#define ATOM_SCALER_EXPANSION 2 +-#define ATOM_SCALER_MULTI_EX 3 ++#define ATOM_SCALER_DISABLE 0 ++#define ATOM_SCALER_CENTER 1 ++#define ATOM_SCALER_EXPANSION 2 ++#define ATOM_SCALER_MULTI_EX 3 + + #define ATOM_DISABLE 0 + #define ATOM_ENABLE 1 +@@ -82,6 +91,7 @@ typedef unsigned short USHORT; + #define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5) + #define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5) + #define ATOM_ENCODER_INIT (ATOM_DISABLE+7) ++#define ATOM_GET_STATUS (ATOM_DISABLE+8) + + #define ATOM_BLANKING 1 + #define ATOM_BLANKING_OFF 0 +@@ -114,7 +124,7 @@ typedef unsigned short USHORT; + #define ATOM_DAC2_CV ATOM_DAC1_CV + #define ATOM_DAC2_NTSC ATOM_DAC1_NTSC + #define ATOM_DAC2_PAL ATOM_DAC1_PAL +- ++ + #define ATOM_PM_ON 0 + #define ATOM_PM_STANDBY 1 + #define ATOM_PM_SUSPEND 2 +@@ -134,6 +144,7 @@ typedef unsigned short USHORT; + #define ATOM_PANEL_MISC_TEMPORAL 0x00000040 + #define ATOM_PANEL_MISC_API_ENABLED 0x00000080 + ++ + #define MEMTYPE_DDR1 "DDR1" + #define MEMTYPE_DDR2 "DDR2" + #define MEMTYPE_DDR3 "DDR3" +@@ -145,19 +156,19 @@ typedef unsigned short USHORT; + + /* Maximum size of that FireGL flag string */ + +-#define ATOM_FIREGL_FLAG_STRING "FGL" /* Flag used to enable FireGL Support */ +-#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 /* sizeof( ATOM_FIREGL_FLAG_STRING ) */ ++#define ATOM_FIREGL_FLAG_STRING "FGL" //Flag used to enable FireGL Support ++#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 //sizeof( ATOM_FIREGL_FLAG_STRING ) + +-#define ATOM_FAKE_DESKTOP_STRING "DSK" /* Flag used to enable mobile ASIC on Desktop */ +-#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING ++#define ATOM_FAKE_DESKTOP_STRING "DSK" //Flag used to enable mobile ASIC on Desktop ++#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING + +-#define ATOM_M54T_FLAG_STRING "M54T" /* Flag used to enable M54T Support */ +-#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 /* sizeof( ATOM_M54T_FLAG_STRING ) */ ++#define ATOM_M54T_FLAG_STRING "M54T" //Flag used to enable M54T Support ++#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 //sizeof( ATOM_M54T_FLAG_STRING ) + + #define HW_ASSISTED_I2C_STATUS_FAILURE 2 + #define HW_ASSISTED_I2C_STATUS_SUCCESS 1 + +-#pragma pack(1) /* BIOS data must use byte aligment */ ++#pragma pack(1) /* BIOS data must use byte aligment */ + + /* Define offset to location of ROM header. */ + +@@ -165,367 +176,410 @@ typedef unsigned short USHORT; + #define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L + + #define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94 +-#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */ ++#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */ + #define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f + #define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e + + /* Common header for all ROM Data tables. +- Every table pointed _ATOM_MASTER_DATA_TABLE has this common header. ++ Every table pointed _ATOM_MASTER_DATA_TABLE has this common header. + And the pointer actually points to this header. */ + +-typedef struct _ATOM_COMMON_TABLE_HEADER { +- USHORT usStructureSize; +- UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */ +- UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */ +- /*Image can't be updated, while Driver needs to carry the new table! */ +-} ATOM_COMMON_TABLE_HEADER; +- +-typedef struct _ATOM_ROM_HEADER { +- ATOM_COMMON_TABLE_HEADER sHeader; +- UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios, +- atombios should init it as "ATOM", don't change the position */ +- USHORT usBiosRuntimeSegmentAddress; +- USHORT usProtectedModeInfoOffset; +- USHORT usConfigFilenameOffset; +- USHORT usCRC_BlockOffset; +- USHORT usBIOS_BootupMessageOffset; +- USHORT usInt10Offset; +- USHORT usPciBusDevInitCode; +- USHORT usIoBaseAddress; +- USHORT usSubsystemVendorID; +- USHORT usSubsystemID; +- USHORT usPCI_InfoOffset; +- USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */ +- USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */ +- UCHAR ucExtendedFunctionCode; +- UCHAR ucReserved; +-} ATOM_ROM_HEADER; ++typedef struct _ATOM_COMMON_TABLE_HEADER ++{ ++ USHORT usStructureSize; ++ UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */ ++ UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */ ++ /*Image can't be updated, while Driver needs to carry the new table! */ ++}ATOM_COMMON_TABLE_HEADER; ++ ++typedef struct _ATOM_ROM_HEADER ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios, ++ atombios should init it as "ATOM", don't change the position */ ++ USHORT usBiosRuntimeSegmentAddress; ++ USHORT usProtectedModeInfoOffset; ++ USHORT usConfigFilenameOffset; ++ USHORT usCRC_BlockOffset; ++ USHORT usBIOS_BootupMessageOffset; ++ USHORT usInt10Offset; ++ USHORT usPciBusDevInitCode; ++ USHORT usIoBaseAddress; ++ USHORT usSubsystemVendorID; ++ USHORT usSubsystemID; ++ USHORT usPCI_InfoOffset; ++ USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */ ++ USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */ ++ UCHAR ucExtendedFunctionCode; ++ UCHAR ucReserved; ++}ATOM_ROM_HEADER; + + /*==============================Command Table Portion==================================== */ + + #ifdef UEFI_BUILD +-#define UTEMP USHORT +-#define USHORT void* ++ #define UTEMP USHORT ++ #define USHORT void* + #endif + +-typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES { +- USHORT ASIC_Init; /* Function Table, used by various SW components,latest version 1.1 */ +- USHORT GetDisplaySurfaceSize; /* Atomic Table, Used by Bios when enabling HW ICON */ +- USHORT ASIC_RegistersInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ +- USHORT VRAM_BlockVenderDetection; /* Atomic Table, used only by Bios */ +- USHORT DIGxEncoderControl; /* Only used by Bios */ +- USHORT MemoryControllerInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ +- USHORT EnableCRTCMemReq; /* Function Table,directly used by various SW components,latest version 2.1 */ +- USHORT MemoryParamAdjust; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed */ +- USHORT DVOEncoderControl; /* Function Table,directly used by various SW components,latest version 1.2 */ +- USHORT GPIOPinControl; /* Atomic Table, only used by Bios */ +- USHORT SetEngineClock; /*Function Table,directly used by various SW components,latest version 1.1 */ +- USHORT SetMemoryClock; /* Function Table,directly used by various SW components,latest version 1.1 */ +- USHORT SetPixelClock; /*Function Table,directly used by various SW components,latest version 1.2 */ +- USHORT DynamicClockGating; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ +- USHORT ResetMemoryDLL; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ +- USHORT ResetMemoryDevice; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ +- USHORT MemoryPLLInit; +- USHORT AdjustDisplayPll; /* only used by Bios */ +- USHORT AdjustMemoryController; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ +- USHORT EnableASIC_StaticPwrMgt; /* Atomic Table, only used by Bios */ +- USHORT ASIC_StaticPwrMgtStatusChange; /* Obsolete, only used by Bios */ +- USHORT DAC_LoadDetection; /* Atomic Table, directly used by various SW components,latest version 1.2 */ +- USHORT LVTMAEncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.3 */ +- USHORT LCD1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT DAC1EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT DAC2EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT DVOOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT CV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT GetConditionalGoldenSetting; /* only used by Bios */ +- USHORT TVEncoderControl; /* Function Table,directly used by various SW components,latest version 1.1 */ +- USHORT TMDSAEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */ +- USHORT LVDSEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */ +- USHORT TV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT EnableScaler; /* Atomic Table, used only by Bios */ +- USHORT BlankCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT EnableCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT GetPixelClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT EnableVGA_Render; /* Function Table,directly used by various SW components,latest version 1.1 */ +- USHORT EnableVGA_Access; /* Obsolete , only used by Bios */ +- USHORT SetCRTC_Timing; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT SetCRTC_OverScan; /* Atomic Table, used by various SW components,latest version 1.1 */ +- USHORT SetCRTC_Replication; /* Atomic Table, used only by Bios */ +- USHORT SelectCRTC_Source; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT EnableGraphSurfaces; /* Atomic Table, used only by Bios */ +- USHORT UpdateCRTC_DoubleBufferRegisters; +- USHORT LUT_AutoFill; /* Atomic Table, only used by Bios */ +- USHORT EnableHW_IconCursor; /* Atomic Table, only used by Bios */ +- USHORT GetMemoryClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT GetEngineClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT SetCRTC_UsingDTDTiming; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT ExternalEncoderControl; /* Atomic Table, directly used by various SW components,latest version 2.1 */ +- USHORT LVTMAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT VRAM_BlockDetectionByStrap; /* Atomic Table, used only by Bios */ +- USHORT MemoryCleanUp; /* Atomic Table, only used by Bios */ +- USHORT ProcessI2cChannelTransaction; /* Function Table,only used by Bios */ +- USHORT WriteOneByteToHWAssistedI2C; /* Function Table,indirectly used by various SW components */ +- USHORT ReadHWAssistedI2CStatus; /* Atomic Table, indirectly used by various SW components */ +- USHORT SpeedFanControl; /* Function Table,indirectly used by various SW components,called from ASIC_Init */ +- USHORT PowerConnectorDetection; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT MC_Synchronization; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ +- USHORT ComputeMemoryEnginePLL; /* Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock */ +- USHORT MemoryRefreshConversion; /* Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock */ +- USHORT VRAM_GetCurrentInfoBlock; /* Atomic Table, used only by Bios */ +- USHORT DynamicMemorySettings; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ +- USHORT MemoryTraining; /* Atomic Table, used only by Bios */ +- USHORT EnableSpreadSpectrumOnPPLL; /* Atomic Table, directly used by various SW components,latest version 1.2 */ +- USHORT TMDSAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT SetVoltage; /* Function Table,directly and/or indirectly used by various SW components,latest version 1.1 */ +- USHORT DAC1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT DAC2OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ +- USHORT SetupHWAssistedI2CStatus; /* Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" */ +- USHORT ClockSource; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ +- USHORT MemoryDeviceInit; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ +- USHORT EnableYUV; /* Atomic Table, indirectly used by various SW components,called from EnableVGARender */ +- USHORT DIG1EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ +- USHORT DIG2EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ +- USHORT DIG1TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ +- USHORT DIG2TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ +- USHORT ProcessAuxChannelTransaction; /* Function Table,only used by Bios */ +- USHORT DPEncoderService; /* Function Table,only used by Bios */ +-} ATOM_MASTER_LIST_OF_COMMAND_TABLES; +- +-/* For backward compatible */ ++typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ ++ USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1 ++ USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON ++ USHORT ASIC_RegistersInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init ++ USHORT VRAM_BlockVenderDetection; //Atomic Table, used only by Bios ++ USHORT DIGxEncoderControl; //Only used by Bios ++ USHORT MemoryControllerInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init ++ USHORT EnableCRTCMemReq; //Function Table,directly used by various SW components,latest version 2.1 ++ USHORT MemoryParamAdjust; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed ++ USHORT DVOEncoderControl; //Function Table,directly used by various SW components,latest version 1.2 ++ USHORT GPIOPinControl; //Atomic Table, only used by Bios ++ USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1 ++ USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1 ++ USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2 ++ USHORT DynamicClockGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init ++ USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock ++ USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock ++ USHORT MemoryPLLInit; ++ USHORT AdjustDisplayPll; //only used by Bios ++ USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock ++ USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios ++ USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios ++ USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2 ++ USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3 ++ USHORT LCD1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead ++ USHORT GetConditionalGoldenSetting; //only used by Bios ++ USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1 ++ USHORT TMDSAEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3 ++ USHORT LVDSEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3 ++ USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead ++ USHORT EnableScaler; //Atomic Table, used only by Bios ++ USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT GetPixelClock; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT EnableVGA_Render; //Function Table,directly used by various SW components,latest version 1.1 ++ USHORT GetSCLKOverMCLKRatio; //Atomic Table, only used by Bios ++ USHORT SetCRTC_Timing; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT SetCRTC_OverScan; //Atomic Table, used by various SW components,latest version 1.1 ++ USHORT SetCRTC_Replication; //Atomic Table, used only by Bios ++ USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios ++ USHORT UpdateCRTC_DoubleBufferRegisters; ++ USHORT LUT_AutoFill; //Atomic Table, only used by Bios ++ USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios ++ USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT GetEngineClock; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT SetCRTC_UsingDTDTiming; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT ExternalEncoderControl; //Atomic Table, directly used by various SW components,latest version 2.1 ++ USHORT LVTMAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT VRAM_BlockDetectionByStrap; //Atomic Table, used only by Bios ++ USHORT MemoryCleanUp; //Atomic Table, only used by Bios ++ USHORT ProcessI2cChannelTransaction; //Function Table,only used by Bios ++ USHORT WriteOneByteToHWAssistedI2C; //Function Table,indirectly used by various SW components ++ USHORT ReadHWAssistedI2CStatus; //Atomic Table, indirectly used by various SW components ++ USHORT SpeedFanControl; //Function Table,indirectly used by various SW components,called from ASIC_Init ++ USHORT PowerConnectorDetection; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT MC_Synchronization; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock ++ USHORT ComputeMemoryEnginePLL; //Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock ++ USHORT MemoryRefreshConversion; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock ++ USHORT VRAM_GetCurrentInfoBlock; //Atomic Table, used only by Bios ++ USHORT DynamicMemorySettings; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock ++ USHORT MemoryTraining; //Atomic Table, used only by Bios ++ USHORT EnableSpreadSpectrumOnPPLL; //Atomic Table, directly used by various SW components,latest version 1.2 ++ USHORT TMDSAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1 ++ USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 ++ USHORT SetupHWAssistedI2CStatus; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" ++ USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init ++ USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock ++ USHORT EnableYUV; //Atomic Table, indirectly used by various SW components,called from EnableVGARender ++ USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1 ++ USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1 ++ USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1 ++ USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1 ++ USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios ++ USHORT DPEncoderService; //Function Table,only used by Bios ++}ATOM_MASTER_LIST_OF_COMMAND_TABLES; ++ ++// For backward compatible + #define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction + #define UNIPHYTransmitterControl DIG1TransmitterControl + #define LVTMATransmitterControl DIG2TransmitterControl + #define SetCRTC_DPM_State GetConditionalGoldenSetting + #define SetUniphyInstance ASIC_StaticPwrMgtStatusChange ++#define HPDInterruptService ReadHWAssistedI2CStatus ++#define EnableVGA_Access GetSCLKOverMCLKRatio + +-typedef struct _ATOM_MASTER_COMMAND_TABLE { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables; +-} ATOM_MASTER_COMMAND_TABLE; +- +-/****************************************************************************/ +-/* Structures used in every command table */ +-/****************************************************************************/ +-typedef struct _ATOM_TABLE_ATTRIBUTE { ++typedef struct _ATOM_MASTER_COMMAND_TABLE ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables; ++}ATOM_MASTER_COMMAND_TABLE; ++ ++/****************************************************************************/ ++// Structures used in every command table ++/****************************************************************************/ ++typedef struct _ATOM_TABLE_ATTRIBUTE ++{ + #if ATOM_BIG_ENDIAN +- USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */ +- USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */ +- USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */ ++ USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag ++ USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword), ++ USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword), + #else +- USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */ +- USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */ +- USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */ ++ USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword), ++ USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword), ++ USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag + #endif +-} ATOM_TABLE_ATTRIBUTE; +- +-typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS { +- ATOM_TABLE_ATTRIBUTE sbfAccess; +- USHORT susAccess; +-} ATOM_TABLE_ATTRIBUTE_ACCESS; ++}ATOM_TABLE_ATTRIBUTE; + +-/****************************************************************************/ +-/* Common header for all command tables. */ +-/* Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. */ +-/* And the pointer actually points to this header. */ +-/****************************************************************************/ +-typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER { +- ATOM_COMMON_TABLE_HEADER CommonHeader; +- ATOM_TABLE_ATTRIBUTE TableAttribute; +-} ATOM_COMMON_ROM_COMMAND_TABLE_HEADER; ++typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS ++{ ++ ATOM_TABLE_ATTRIBUTE sbfAccess; ++ USHORT susAccess; ++}ATOM_TABLE_ATTRIBUTE_ACCESS; ++ ++/****************************************************************************/ ++// Common header for all command tables. ++// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. ++// And the pointer actually points to this header. ++/****************************************************************************/ ++typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER ++{ ++ ATOM_COMMON_TABLE_HEADER CommonHeader; ++ ATOM_TABLE_ATTRIBUTE TableAttribute; ++}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER; + +-/****************************************************************************/ +-/* Structures used by ComputeMemoryEnginePLLTable */ +-/****************************************************************************/ ++/****************************************************************************/ ++// Structures used by ComputeMemoryEnginePLLTable ++/****************************************************************************/ + #define COMPUTE_MEMORY_PLL_PARAM 1 + #define COMPUTE_ENGINE_PLL_PARAM 2 + +-typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS { +- ULONG ulClock; /* When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div */ +- UCHAR ucAction; /* 0:reserved //1:Memory //2:Engine */ +- UCHAR ucReserved; /* may expand to return larger Fbdiv later */ +- UCHAR ucFbDiv; /* return value */ +- UCHAR ucPostDiv; /* return value */ +-} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS; +- +-typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 { +- ULONG ulClock; /* When return, [23:0] return real clock */ +- UCHAR ucAction; /* 0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register */ +- USHORT usFbDiv; /* return Feedback value to be written to register */ +- UCHAR ucPostDiv; /* return post div to be written to register */ +-} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2; ++typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS ++{ ++ ULONG ulClock; //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div ++ UCHAR ucAction; //0:reserved //1:Memory //2:Engine ++ UCHAR ucReserved; //may expand to return larger Fbdiv later ++ UCHAR ucFbDiv; //return value ++ UCHAR ucPostDiv; //return value ++}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS; ++ ++typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 ++{ ++ ULONG ulClock; //When return, [23:0] return real clock ++ UCHAR ucAction; //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register ++ USHORT usFbDiv; //return Feedback value to be written to register ++ UCHAR ucPostDiv; //return post div to be written to register ++}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2; + #define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS + +-#define SET_CLOCK_FREQ_MASK 0x00FFFFFF /* Clock change tables only take bit [23:0] as the requested clock value */ +-#define USE_NON_BUS_CLOCK_MASK 0x01000000 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */ +-#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */ +-#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */ +-#define FIRST_TIME_CHANGE_CLOCK 0x08000000 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */ +-#define SKIP_SW_PROGRAM_PLL 0x10000000 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */ ++ ++#define SET_CLOCK_FREQ_MASK 0x00FFFFFF //Clock change tables only take bit [23:0] as the requested clock value ++#define USE_NON_BUS_CLOCK_MASK 0x01000000 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) ++#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 //Only applicable to memory clock change, when set, using memory self refresh during clock transition ++#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change ++#define FIRST_TIME_CHANGE_CLOCK 0x08000000 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup ++#define SKIP_SW_PROGRAM_PLL 0x10000000 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL + #define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK + +-#define b3USE_NON_BUS_CLOCK_MASK 0x01 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */ +-#define b3USE_MEMORY_SELF_REFRESH 0x02 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */ +-#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */ +-#define b3FIRST_TIME_CHANGE_CLOCK 0x08 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */ +-#define b3SKIP_SW_PROGRAM_PLL 0x10 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */ ++#define b3USE_NON_BUS_CLOCK_MASK 0x01 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) ++#define b3USE_MEMORY_SELF_REFRESH 0x02 //Only applicable to memory clock change, when set, using memory self refresh during clock transition ++#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change ++#define b3FIRST_TIME_CHANGE_CLOCK 0x08 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup ++#define b3SKIP_SW_PROGRAM_PLL 0x10 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL + +-typedef struct _ATOM_COMPUTE_CLOCK_FREQ { ++typedef struct _ATOM_COMPUTE_CLOCK_FREQ ++{ + #if ATOM_BIG_ENDIAN +- ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */ +- ULONG ulClockFreq:24; /* in unit of 10kHz */ ++ ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM ++ ULONG ulClockFreq:24; // in unit of 10kHz + #else +- ULONG ulClockFreq:24; /* in unit of 10kHz */ +- ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */ ++ ULONG ulClockFreq:24; // in unit of 10kHz ++ ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM + #endif +-} ATOM_COMPUTE_CLOCK_FREQ; +- +-typedef struct _ATOM_S_MPLL_FB_DIVIDER { +- USHORT usFbDivFrac; +- USHORT usFbDiv; +-} ATOM_S_MPLL_FB_DIVIDER; ++}ATOM_COMPUTE_CLOCK_FREQ; + +-typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 { +- union { +- ATOM_COMPUTE_CLOCK_FREQ ulClock; /* Input Parameter */ +- ATOM_S_MPLL_FB_DIVIDER ulFbDiv; /* Output Parameter */ +- }; +- UCHAR ucRefDiv; /* Output Parameter */ +- UCHAR ucPostDiv; /* Output Parameter */ +- UCHAR ucCntlFlag; /* Output Parameter */ +- UCHAR ucReserved; +-} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3; ++typedef struct _ATOM_S_MPLL_FB_DIVIDER ++{ ++ USHORT usFbDivFrac; ++ USHORT usFbDiv; ++}ATOM_S_MPLL_FB_DIVIDER; + +-/* ucCntlFlag */ ++typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 ++{ ++ union ++ { ++ ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter ++ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter ++ }; ++ UCHAR ucRefDiv; //Output Parameter ++ UCHAR ucPostDiv; //Output Parameter ++ UCHAR ucCntlFlag; //Output Parameter ++ UCHAR ucReserved; ++}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3; ++ ++// ucCntlFlag + #define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1 + #define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2 + #define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4 ++#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9 8 + +-typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER { +- ATOM_COMPUTE_CLOCK_FREQ ulClock; +- ULONG ulReserved[2]; +-} DYNAMICE_MEMORY_SETTINGS_PARAMETER; +- +-typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER { +- ATOM_COMPUTE_CLOCK_FREQ ulClock; +- ULONG ulMemoryClock; +- ULONG ulReserved; +-} DYNAMICE_ENGINE_SETTINGS_PARAMETER; +- +-/****************************************************************************/ +-/* Structures used by SetEngineClockTable */ +-/****************************************************************************/ +-typedef struct _SET_ENGINE_CLOCK_PARAMETERS { +- ULONG ulTargetEngineClock; /* In 10Khz unit */ +-} SET_ENGINE_CLOCK_PARAMETERS; + +-typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION { +- ULONG ulTargetEngineClock; /* In 10Khz unit */ +- COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; +-} SET_ENGINE_CLOCK_PS_ALLOCATION; ++// V4 are only used for APU which PLL outside GPU ++typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ++{ ++#if ATOM_BIG_ENDIAN ++ ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly ++ ULONG ulClock:24; //Input= target clock, output = actual clock ++#else ++ ULONG ulClock:24; //Input= target clock, output = actual clock ++ ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly ++#endif ++}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4; + +-/****************************************************************************/ +-/* Structures used by SetMemoryClockTable */ +-/****************************************************************************/ +-typedef struct _SET_MEMORY_CLOCK_PARAMETERS { +- ULONG ulTargetMemoryClock; /* In 10Khz unit */ +-} SET_MEMORY_CLOCK_PARAMETERS; ++typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER ++{ ++ ATOM_COMPUTE_CLOCK_FREQ ulClock; ++ ULONG ulReserved[2]; ++}DYNAMICE_MEMORY_SETTINGS_PARAMETER; + +-typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION { +- ULONG ulTargetMemoryClock; /* In 10Khz unit */ +- COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; +-} SET_MEMORY_CLOCK_PS_ALLOCATION; ++typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER ++{ ++ ATOM_COMPUTE_CLOCK_FREQ ulClock; ++ ULONG ulMemoryClock; ++ ULONG ulReserved; ++}DYNAMICE_ENGINE_SETTINGS_PARAMETER; ++ ++/****************************************************************************/ ++// Structures used by SetEngineClockTable ++/****************************************************************************/ ++typedef struct _SET_ENGINE_CLOCK_PARAMETERS ++{ ++ ULONG ulTargetEngineClock; //In 10Khz unit ++}SET_ENGINE_CLOCK_PARAMETERS; + +-/****************************************************************************/ +-/* Structures used by ASIC_Init.ctb */ +-/****************************************************************************/ +-typedef struct _ASIC_INIT_PARAMETERS { +- ULONG ulDefaultEngineClock; /* In 10Khz unit */ +- ULONG ulDefaultMemoryClock; /* In 10Khz unit */ +-} ASIC_INIT_PARAMETERS; ++typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION ++{ ++ ULONG ulTargetEngineClock; //In 10Khz unit ++ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; ++}SET_ENGINE_CLOCK_PS_ALLOCATION; ++ ++/****************************************************************************/ ++// Structures used by SetMemoryClockTable ++/****************************************************************************/ ++typedef struct _SET_MEMORY_CLOCK_PARAMETERS ++{ ++ ULONG ulTargetMemoryClock; //In 10Khz unit ++}SET_MEMORY_CLOCK_PARAMETERS; + +-typedef struct _ASIC_INIT_PS_ALLOCATION { +- ASIC_INIT_PARAMETERS sASICInitClocks; +- SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; /* Caller doesn't need to init this structure */ +-} ASIC_INIT_PS_ALLOCATION; ++typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION ++{ ++ ULONG ulTargetMemoryClock; //In 10Khz unit ++ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; ++}SET_MEMORY_CLOCK_PS_ALLOCATION; ++ ++/****************************************************************************/ ++// Structures used by ASIC_Init.ctb ++/****************************************************************************/ ++typedef struct _ASIC_INIT_PARAMETERS ++{ ++ ULONG ulDefaultEngineClock; //In 10Khz unit ++ ULONG ulDefaultMemoryClock; //In 10Khz unit ++}ASIC_INIT_PARAMETERS; + +-/****************************************************************************/ +-/* Structure used by DynamicClockGatingTable.ctb */ +-/****************************************************************************/ +-typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS { +- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ +- UCHAR ucPadding[3]; +-} DYNAMIC_CLOCK_GATING_PARAMETERS; ++typedef struct _ASIC_INIT_PS_ALLOCATION ++{ ++ ASIC_INIT_PARAMETERS sASICInitClocks; ++ SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure ++}ASIC_INIT_PS_ALLOCATION; ++ ++/****************************************************************************/ ++// Structure used by DynamicClockGatingTable.ctb ++/****************************************************************************/ ++typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS ++{ ++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE ++ UCHAR ucPadding[3]; ++}DYNAMIC_CLOCK_GATING_PARAMETERS; + #define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS + +-/****************************************************************************/ +-/* Structure used by EnableASIC_StaticPwrMgtTable.ctb */ +-/****************************************************************************/ +-typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS { +- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ +- UCHAR ucPadding[3]; +-} ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS; ++/****************************************************************************/ ++// Structure used by EnableASIC_StaticPwrMgtTable.ctb ++/****************************************************************************/ ++typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS ++{ ++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE ++ UCHAR ucPadding[3]; ++}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS; + #define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS + +-/****************************************************************************/ +-/* Structures used by DAC_LoadDetectionTable.ctb */ +-/****************************************************************************/ +-typedef struct _DAC_LOAD_DETECTION_PARAMETERS { +- USHORT usDeviceID; /* {ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} */ +- UCHAR ucDacType; /* {ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} */ +- UCHAR ucMisc; /* Valid only when table revision =1.3 and above */ +-} DAC_LOAD_DETECTION_PARAMETERS; ++/****************************************************************************/ ++// Structures used by DAC_LoadDetectionTable.ctb ++/****************************************************************************/ ++typedef struct _DAC_LOAD_DETECTION_PARAMETERS ++{ ++ USHORT usDeviceID; //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} ++ UCHAR ucDacType; //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} ++ UCHAR ucMisc; //Valid only when table revision =1.3 and above ++}DAC_LOAD_DETECTION_PARAMETERS; + +-/* DAC_LOAD_DETECTION_PARAMETERS.ucMisc */ ++// DAC_LOAD_DETECTION_PARAMETERS.ucMisc + #define DAC_LOAD_MISC_YPrPb 0x01 + +-typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION { +- DAC_LOAD_DETECTION_PARAMETERS sDacload; +- ULONG Reserved[2]; /* Don't set this one, allocation for EXT DAC */ +-} DAC_LOAD_DETECTION_PS_ALLOCATION; +- +-/****************************************************************************/ +-/* Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb */ +-/****************************************************************************/ +-typedef struct _DAC_ENCODER_CONTROL_PARAMETERS { +- USHORT usPixelClock; /* in 10KHz; for bios convenient */ +- UCHAR ucDacStandard; /* See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) */ +- UCHAR ucAction; /* 0: turn off encoder */ +- /* 1: setup and turn on encoder */ +- /* 7: ATOM_ENCODER_INIT Initialize DAC */ +-} DAC_ENCODER_CONTROL_PARAMETERS; ++typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION ++{ ++ DAC_LOAD_DETECTION_PARAMETERS sDacload; ++ ULONG Reserved[2];// Don't set this one, allocation for EXT DAC ++}DAC_LOAD_DETECTION_PS_ALLOCATION; ++ ++/****************************************************************************/ ++// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb ++/****************************************************************************/ ++typedef struct _DAC_ENCODER_CONTROL_PARAMETERS ++{ ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ UCHAR ucDacStandard; // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) ++ UCHAR ucAction; // 0: turn off encoder ++ // 1: setup and turn on encoder ++ // 7: ATOM_ENCODER_INIT Initialize DAC ++}DAC_ENCODER_CONTROL_PARAMETERS; + + #define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS + +-/****************************************************************************/ +-/* Structures used by DIG1EncoderControlTable */ +-/* DIG2EncoderControlTable */ +-/* ExternalEncoderControlTable */ +-/****************************************************************************/ +-typedef struct _DIG_ENCODER_CONTROL_PARAMETERS { +- USHORT usPixelClock; /* in 10KHz; for bios convenient */ +- UCHAR ucConfig; +- /* [2] Link Select: */ +- /* =0: PHY linkA if bfLane<3 */ +- /* =1: PHY linkB if bfLanes<3 */ +- /* =0: PHY linkA+B if bfLanes=3 */ +- /* [3] Transmitter Sel */ +- /* =0: UNIPHY or PCIEPHY */ +- /* =1: LVTMA */ +- UCHAR ucAction; /* =0: turn off encoder */ +- /* =1: turn on encoder */ +- UCHAR ucEncoderMode; +- /* =0: DP encoder */ +- /* =1: LVDS encoder */ +- /* =2: DVI encoder */ +- /* =3: HDMI encoder */ +- /* =4: SDVO encoder */ +- UCHAR ucLaneNum; /* how many lanes to enable */ +- UCHAR ucReserved[2]; +-} DIG_ENCODER_CONTROL_PARAMETERS; ++/****************************************************************************/ ++// Structures used by DIG1EncoderControlTable ++// DIG2EncoderControlTable ++// ExternalEncoderControlTable ++/****************************************************************************/ ++typedef struct _DIG_ENCODER_CONTROL_PARAMETERS ++{ ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ UCHAR ucConfig; ++ // [2] Link Select: ++ // =0: PHY linkA if bfLane<3 ++ // =1: PHY linkB if bfLanes<3 ++ // =0: PHY linkA+B if bfLanes=3 ++ // [3] Transmitter Sel ++ // =0: UNIPHY or PCIEPHY ++ // =1: LVTMA ++ UCHAR ucAction; // =0: turn off encoder ++ // =1: turn on encoder ++ UCHAR ucEncoderMode; ++ // =0: DP encoder ++ // =1: LVDS encoder ++ // =2: DVI encoder ++ // =3: HDMI encoder ++ // =4: SDVO encoder ++ UCHAR ucLaneNum; // how many lanes to enable ++ UCHAR ucReserved[2]; ++}DIG_ENCODER_CONTROL_PARAMETERS; + #define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS + #define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS + +-/* ucConfig */ ++//ucConfig + #define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 + #define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 + #define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 +@@ -539,52 +593,57 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS { + #define ATOM_ENCODER_CONFIG_LVTMA 0x08 + #define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00 + #define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08 +-#define ATOM_ENCODER_CONFIG_DIGB 0x80 /* VBIOS Internal use, outside SW should set this bit=0 */ +-/* ucAction */ +-/* ATOM_ENABLE: Enable Encoder */ +-/* ATOM_DISABLE: Disable Encoder */ ++#define ATOM_ENCODER_CONFIG_DIGB 0x80 // VBIOS Internal use, outside SW should set this bit=0 ++// ucAction ++// ATOM_ENABLE: Enable Encoder ++// ATOM_DISABLE: Disable Encoder + +-/* ucEncoderMode */ ++//ucEncoderMode + #define ATOM_ENCODER_MODE_DP 0 + #define ATOM_ENCODER_MODE_LVDS 1 + #define ATOM_ENCODER_MODE_DVI 2 + #define ATOM_ENCODER_MODE_HDMI 3 + #define ATOM_ENCODER_MODE_SDVO 4 ++#define ATOM_ENCODER_MODE_DP_AUDIO 5 + #define ATOM_ENCODER_MODE_TV 13 + #define ATOM_ENCODER_MODE_CV 14 + #define ATOM_ENCODER_MODE_CRT 15 + +-typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 { ++typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 ++{ + #if ATOM_BIG_ENDIAN +- UCHAR ucReserved1:2; +- UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */ +- UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */ +- UCHAR ucReserved:1; +- UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */ ++ UCHAR ucReserved1:2; ++ UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF ++ UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F ++ UCHAR ucReserved:1; ++ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz + #else +- UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */ +- UCHAR ucReserved:1; +- UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */ +- UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */ +- UCHAR ucReserved1:2; ++ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz ++ UCHAR ucReserved:1; ++ UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F ++ UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF ++ UCHAR ucReserved1:2; + #endif +-} ATOM_DIG_ENCODER_CONFIG_V2; ++}ATOM_DIG_ENCODER_CONFIG_V2; + +-typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 { +- USHORT usPixelClock; /* in 10KHz; for bios convenient */ +- ATOM_DIG_ENCODER_CONFIG_V2 acConfig; +- UCHAR ucAction; +- UCHAR ucEncoderMode; +- /* =0: DP encoder */ +- /* =1: LVDS encoder */ +- /* =2: DVI encoder */ +- /* =3: HDMI encoder */ +- /* =4: SDVO encoder */ +- UCHAR ucLaneNum; /* how many lanes to enable */ +- UCHAR ucReserved[2]; +-} DIG_ENCODER_CONTROL_PARAMETERS_V2; + +-/* ucConfig */ ++typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 ++{ ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ ATOM_DIG_ENCODER_CONFIG_V2 acConfig; ++ UCHAR ucAction; ++ UCHAR ucEncoderMode; ++ // =0: DP encoder ++ // =1: LVDS encoder ++ // =2: DVI encoder ++ // =3: HDMI encoder ++ // =4: SDVO encoder ++ UCHAR ucLaneNum; // how many lanes to enable ++ UCHAR ucStatus; // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS ++ UCHAR ucReserved; ++}DIG_ENCODER_CONTROL_PARAMETERS_V2; ++ ++//ucConfig + #define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01 + #define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00 + #define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01 +@@ -596,58 +655,122 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 { + #define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08 + #define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10 + +-/****************************************************************************/ +-/* Structures used by UNIPHYTransmitterControlTable */ +-/* LVTMATransmitterControlTable */ +-/* DVOOutputControlTable */ +-/****************************************************************************/ +-typedef struct _ATOM_DP_VS_MODE { +- UCHAR ucLaneSel; +- UCHAR ucLaneSet; +-} ATOM_DP_VS_MODE; +- +-typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS { +- union { +- USHORT usPixelClock; /* in 10KHz; for bios convenient */ +- USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */ +- ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */ ++// ucAction: ++// ATOM_DISABLE ++// ATOM_ENABLE ++#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08 ++#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09 ++#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a ++#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b ++#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c ++#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d ++#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e ++#define ATOM_ENCODER_CMD_SETUP 0x0f ++ ++// ucStatus ++#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10 ++#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00 ++ ++// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver ++typedef struct _ATOM_DIG_ENCODER_CONFIG_V3 ++{ ++#if ATOM_BIG_ENDIAN ++ UCHAR ucReserved1:1; ++ UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F ++ UCHAR ucReserved:3; ++ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz ++#else ++ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz ++ UCHAR ucReserved:3; ++ UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F ++ UCHAR ucReserved1:1; ++#endif ++}ATOM_DIG_ENCODER_CONFIG_V3; ++ ++#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70 ++ ++ ++typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 ++{ ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ ATOM_DIG_ENCODER_CONFIG_V3 acConfig; ++ UCHAR ucAction; ++ UCHAR ucEncoderMode; ++ // =0: DP encoder ++ // =1: LVDS encoder ++ // =2: DVI encoder ++ // =3: HDMI encoder ++ // =4: SDVO encoder ++ // =5: DP audio ++ UCHAR ucLaneNum; // how many lanes to enable ++ UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP ++ UCHAR ucReserved; ++}DIG_ENCODER_CONTROL_PARAMETERS_V3; ++ ++ ++// define ucBitPerColor: ++#define PANEL_BPC_UNDEFINE 0x00 ++#define PANEL_6BIT_PER_COLOR 0x01 ++#define PANEL_8BIT_PER_COLOR 0x02 ++#define PANEL_10BIT_PER_COLOR 0x03 ++#define PANEL_12BIT_PER_COLOR 0x04 ++#define PANEL_16BIT_PER_COLOR 0x05 ++ ++/****************************************************************************/ ++// Structures used by UNIPHYTransmitterControlTable ++// LVTMATransmitterControlTable ++// DVOOutputControlTable ++/****************************************************************************/ ++typedef struct _ATOM_DP_VS_MODE ++{ ++ UCHAR ucLaneSel; ++ UCHAR ucLaneSet; ++}ATOM_DP_VS_MODE; ++ ++typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS ++{ ++ union ++ { ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h ++ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode + }; +- UCHAR ucConfig; +- /* [0]=0: 4 lane Link, */ +- /* =1: 8 lane Link ( Dual Links TMDS ) */ +- /* [1]=0: InCoherent mode */ +- /* =1: Coherent Mode */ +- /* [2] Link Select: */ +- /* =0: PHY linkA if bfLane<3 */ +- /* =1: PHY linkB if bfLanes<3 */ +- /* =0: PHY linkA+B if bfLanes=3 */ +- /* [5:4]PCIE lane Sel */ +- /* =0: lane 0~3 or 0~7 */ +- /* =1: lane 4~7 */ +- /* =2: lane 8~11 or 8~15 */ +- /* =3: lane 12~15 */ +- UCHAR ucAction; /* =0: turn off encoder */ +- /* =1: turn on encoder */ +- UCHAR ucReserved[4]; +-} DIG_TRANSMITTER_CONTROL_PARAMETERS; +- +-#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS +- +-/* ucInitInfo */ +-#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff +- +-/* ucConfig */ ++ UCHAR ucConfig; ++ // [0]=0: 4 lane Link, ++ // =1: 8 lane Link ( Dual Links TMDS ) ++ // [1]=0: InCoherent mode ++ // =1: Coherent Mode ++ // [2] Link Select: ++ // =0: PHY linkA if bfLane<3 ++ // =1: PHY linkB if bfLanes<3 ++ // =0: PHY linkA+B if bfLanes=3 ++ // [5:4]PCIE lane Sel ++ // =0: lane 0~3 or 0~7 ++ // =1: lane 4~7 ++ // =2: lane 8~11 or 8~15 ++ // =3: lane 12~15 ++ UCHAR ucAction; // =0: turn off encoder ++ // =1: turn on encoder ++ UCHAR ucReserved[4]; ++}DIG_TRANSMITTER_CONTROL_PARAMETERS; ++ ++#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS ++ ++//ucInitInfo ++#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff ++ ++//ucConfig + #define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01 + #define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02 + #define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04 + #define ATOM_TRANSMITTER_CONFIG_LINKA 0x00 + #define ATOM_TRANSMITTER_CONFIG_LINKB 0x04 +-#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00 ++#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00 + #define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04 + +-#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ +-#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ +-#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ ++#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE ++#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 // only used when ATOM_TRANSMITTER_ACTION_ENABLE ++#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE + + #define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30 + #define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00 +@@ -661,7 +784,7 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS { + #define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80 + #define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0 + +-/* ucAction */ ++//ucAction + #define ATOM_TRANSMITTER_ACTION_DISABLE 0 + #define ATOM_TRANSMITTER_ACTION_ENABLE 1 + #define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2 +@@ -674,93 +797,168 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS { + #define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9 + #define ATOM_TRANSMITTER_ACTION_SETUP 10 + #define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11 ++#define ATOM_TRANSMITTER_ACTION_POWER_ON 12 ++#define ATOM_TRANSMITTER_ACTION_POWER_OFF 13 + +-/* Following are used for DigTransmitterControlTable ver1.2 */ +-typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 { ++// Following are used for DigTransmitterControlTable ver1.2 ++typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 ++{ + #if ATOM_BIG_ENDIAN +- UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */ +- /* =1 Dig Transmitter 2 ( Uniphy CD ) */ +- /* =2 Dig Transmitter 3 ( Uniphy EF ) */ +- UCHAR ucReserved:1; +- UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */ +- UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */ +- UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */ +- /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */ +- +- UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */ +- UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */ ++ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) ++ // =1 Dig Transmitter 2 ( Uniphy CD ) ++ // =2 Dig Transmitter 3 ( Uniphy EF ) ++ UCHAR ucReserved:1; ++ UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector ++ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) ++ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E ++ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F ++ ++ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) ++ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector + #else +- UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */ +- UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */ +- UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */ +- /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */ +- UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */ +- UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */ +- UCHAR ucReserved:1; +- UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */ +- /* =1 Dig Transmitter 2 ( Uniphy CD ) */ +- /* =2 Dig Transmitter 3 ( Uniphy EF ) */ ++ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector ++ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) ++ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E ++ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F ++ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) ++ UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector ++ UCHAR ucReserved:1; ++ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) ++ // =1 Dig Transmitter 2 ( Uniphy CD ) ++ // =2 Dig Transmitter 3 ( Uniphy EF ) + #endif +-} ATOM_DIG_TRANSMITTER_CONFIG_V2; ++}ATOM_DIG_TRANSMITTER_CONFIG_V2; + +-/* ucConfig */ +-/* Bit0 */ ++//ucConfig ++//Bit0 + #define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01 + +-/* Bit1 */ ++//Bit1 + #define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02 + +-/* Bit2 */ ++//Bit2 + #define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04 +-#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00 ++#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00 + #define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04 + +-/* Bit3 */ ++// Bit3 + #define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08 +-#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */ +-#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */ ++#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP ++#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP + +-/* Bit4 */ ++// Bit4 + #define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10 + +-/* Bit7:6 */ ++// Bit7:6 + #define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0 +-#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 /* AB */ +-#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 /* CD */ +-#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 /* EF */ +- +-typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 { +- union { +- USHORT usPixelClock; /* in 10KHz; for bios convenient */ +- USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */ +- ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */ ++#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 //AB ++#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 //CD ++#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 //EF ++ ++typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 ++{ ++ union ++ { ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h ++ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode + }; +- ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig; +- UCHAR ucAction; /* define as ATOM_TRANSMITER_ACTION_XXX */ +- UCHAR ucReserved[4]; +-} DIG_TRANSMITTER_CONTROL_PARAMETERS_V2; ++ ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig; ++ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX ++ UCHAR ucReserved[4]; ++}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2; + +-/****************************************************************************/ +-/* Structures used by DAC1OuputControlTable */ +-/* DAC2OuputControlTable */ +-/* LVTMAOutputControlTable (Before DEC30) */ +-/* TMDSAOutputControlTable (Before DEC30) */ +-/****************************************************************************/ +-typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS { +- UCHAR ucAction; /* Possible input:ATOM_ENABLE||ATOMDISABLE */ +- /* When the display is LCD, in addition to above: */ +- /* ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| */ +- /* ATOM_LCD_SELFTEST_STOP */ ++typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3 ++{ ++#if ATOM_BIG_ENDIAN ++ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) ++ // =1 Dig Transmitter 2 ( Uniphy CD ) ++ // =2 Dig Transmitter 3 ( Uniphy EF ) ++ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2 ++ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F ++ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E ++ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F ++ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) ++ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector ++#else ++ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector ++ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) ++ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E ++ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F ++ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F ++ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2 ++ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) ++ // =1 Dig Transmitter 2 ( Uniphy CD ) ++ // =2 Dig Transmitter 3 ( Uniphy EF ) ++#endif ++}ATOM_DIG_TRANSMITTER_CONFIG_V3; + +- UCHAR aucPadding[3]; /* padding to DWORD aligned */ +-} DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS; ++typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 ++{ ++ union ++ { ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h ++ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode ++ }; ++ ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig; ++ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX ++ UCHAR ucLaneNum; ++ UCHAR ucReserved[3]; ++}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3; ++ ++//ucConfig ++//Bit0 ++#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR 0x01 ++ ++//Bit1 ++#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT 0x02 ++ ++//Bit2 ++#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK 0x04 ++#define ATOM_TRANSMITTER_CONFIG_V3_LINKA 0x00 ++#define ATOM_TRANSMITTER_CONFIG_V3_LINKB 0x04 ++ ++// Bit3 ++#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK 0x08 ++#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER 0x00 ++#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER 0x08 ++ ++// Bit5:4 ++#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK 0x30 ++#define ATOM_TRASMITTER_CONFIG_V3_P1PLL 0x00 ++#define ATOM_TRASMITTER_CONFIG_V3_P2PLL 0x10 ++#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT 0x20 ++ ++// Bit7:6 ++#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK 0xC0 ++#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1 0x00 //AB ++#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD ++#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF ++ ++/****************************************************************************/ ++// Structures used by DAC1OuputControlTable ++// DAC2OuputControlTable ++// LVTMAOutputControlTable (Before DEC30) ++// TMDSAOutputControlTable (Before DEC30) ++/****************************************************************************/ ++typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS ++{ ++ UCHAR ucAction; // Possible input:ATOM_ENABLE||ATOMDISABLE ++ // When the display is LCD, in addition to above: ++ // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| ++ // ATOM_LCD_SELFTEST_STOP ++ ++ UCHAR aucPadding[3]; // padding to DWORD aligned ++}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS; + + #define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS + +-#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS ++ ++#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS + #define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION + +-#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS ++#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS + #define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION + + #define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS +@@ -782,397 +980,550 @@ typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS { + #define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION + #define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS + +-/****************************************************************************/ +-/* Structures used by BlankCRTCTable */ +-/****************************************************************************/ +-typedef struct _BLANK_CRTC_PARAMETERS { +- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ +- UCHAR ucBlanking; /* ATOM_BLANKING or ATOM_BLANKINGOFF */ +- USHORT usBlackColorRCr; +- USHORT usBlackColorGY; +- USHORT usBlackColorBCb; +-} BLANK_CRTC_PARAMETERS; ++/****************************************************************************/ ++// Structures used by BlankCRTCTable ++/****************************************************************************/ ++typedef struct _BLANK_CRTC_PARAMETERS ++{ ++ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucBlanking; // ATOM_BLANKING or ATOM_BLANKINGOFF ++ USHORT usBlackColorRCr; ++ USHORT usBlackColorGY; ++ USHORT usBlackColorBCb; ++}BLANK_CRTC_PARAMETERS; + #define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS + +-/****************************************************************************/ +-/* Structures used by EnableCRTCTable */ +-/* EnableCRTCMemReqTable */ +-/* UpdateCRTC_DoubleBufferRegistersTable */ +-/****************************************************************************/ +-typedef struct _ENABLE_CRTC_PARAMETERS { +- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ +- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ +- UCHAR ucPadding[2]; +-} ENABLE_CRTC_PARAMETERS; ++/****************************************************************************/ ++// Structures used by EnableCRTCTable ++// EnableCRTCMemReqTable ++// UpdateCRTC_DoubleBufferRegistersTable ++/****************************************************************************/ ++typedef struct _ENABLE_CRTC_PARAMETERS ++{ ++ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE ++ UCHAR ucPadding[2]; ++}ENABLE_CRTC_PARAMETERS; + #define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS + +-/****************************************************************************/ +-/* Structures used by SetCRTC_OverScanTable */ +-/****************************************************************************/ +-typedef struct _SET_CRTC_OVERSCAN_PARAMETERS { +- USHORT usOverscanRight; /* right */ +- USHORT usOverscanLeft; /* left */ +- USHORT usOverscanBottom; /* bottom */ +- USHORT usOverscanTop; /* top */ +- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ +- UCHAR ucPadding[3]; +-} SET_CRTC_OVERSCAN_PARAMETERS; ++/****************************************************************************/ ++// Structures used by SetCRTC_OverScanTable ++/****************************************************************************/ ++typedef struct _SET_CRTC_OVERSCAN_PARAMETERS ++{ ++ USHORT usOverscanRight; // right ++ USHORT usOverscanLeft; // left ++ USHORT usOverscanBottom; // bottom ++ USHORT usOverscanTop; // top ++ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucPadding[3]; ++}SET_CRTC_OVERSCAN_PARAMETERS; + #define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS + +-/****************************************************************************/ +-/* Structures used by SetCRTC_ReplicationTable */ +-/****************************************************************************/ +-typedef struct _SET_CRTC_REPLICATION_PARAMETERS { +- UCHAR ucH_Replication; /* horizontal replication */ +- UCHAR ucV_Replication; /* vertical replication */ +- UCHAR usCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ +- UCHAR ucPadding; +-} SET_CRTC_REPLICATION_PARAMETERS; ++/****************************************************************************/ ++// Structures used by SetCRTC_ReplicationTable ++/****************************************************************************/ ++typedef struct _SET_CRTC_REPLICATION_PARAMETERS ++{ ++ UCHAR ucH_Replication; // horizontal replication ++ UCHAR ucV_Replication; // vertical replication ++ UCHAR usCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucPadding; ++}SET_CRTC_REPLICATION_PARAMETERS; + #define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS + +-/****************************************************************************/ +-/* Structures used by SelectCRTC_SourceTable */ +-/****************************************************************************/ +-typedef struct _SELECT_CRTC_SOURCE_PARAMETERS { +- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ +- UCHAR ucDevice; /* ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... */ +- UCHAR ucPadding[2]; +-} SELECT_CRTC_SOURCE_PARAMETERS; ++/****************************************************************************/ ++// Structures used by SelectCRTC_SourceTable ++/****************************************************************************/ ++typedef struct _SELECT_CRTC_SOURCE_PARAMETERS ++{ ++ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucDevice; // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... ++ UCHAR ucPadding[2]; ++}SELECT_CRTC_SOURCE_PARAMETERS; + #define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS + +-typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 { +- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ +- UCHAR ucEncoderID; /* DAC1/DAC2/TVOUT/DIG1/DIG2/DVO */ +- UCHAR ucEncodeMode; /* Encoding mode, only valid when using DIG1/DIG2/DVO */ +- UCHAR ucPadding; +-} SELECT_CRTC_SOURCE_PARAMETERS_V2; +- +-/* ucEncoderID */ +-/* #define ASIC_INT_DAC1_ENCODER_ID 0x00 */ +-/* #define ASIC_INT_TV_ENCODER_ID 0x02 */ +-/* #define ASIC_INT_DIG1_ENCODER_ID 0x03 */ +-/* #define ASIC_INT_DAC2_ENCODER_ID 0x04 */ +-/* #define ASIC_EXT_TV_ENCODER_ID 0x06 */ +-/* #define ASIC_INT_DVO_ENCODER_ID 0x07 */ +-/* #define ASIC_INT_DIG2_ENCODER_ID 0x09 */ +-/* #define ASIC_EXT_DIG_ENCODER_ID 0x05 */ +- +-/* ucEncodeMode */ +-/* #define ATOM_ENCODER_MODE_DP 0 */ +-/* #define ATOM_ENCODER_MODE_LVDS 1 */ +-/* #define ATOM_ENCODER_MODE_DVI 2 */ +-/* #define ATOM_ENCODER_MODE_HDMI 3 */ +-/* #define ATOM_ENCODER_MODE_SDVO 4 */ +-/* #define ATOM_ENCODER_MODE_TV 13 */ +-/* #define ATOM_ENCODER_MODE_CV 14 */ +-/* #define ATOM_ENCODER_MODE_CRT 15 */ +- +-/****************************************************************************/ +-/* Structures used by SetPixelClockTable */ +-/* GetPixelClockTable */ +-/****************************************************************************/ +-/* Major revision=1., Minor revision=1 */ +-typedef struct _PIXEL_CLOCK_PARAMETERS { +- USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ +- /* 0 means disable PPLL */ +- USHORT usRefDiv; /* Reference divider */ +- USHORT usFbDiv; /* feedback divider */ +- UCHAR ucPostDiv; /* post divider */ +- UCHAR ucFracFbDiv; /* fractional feedback divider */ +- UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ +- UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */ +- UCHAR ucCRTC; /* Which CRTC uses this Ppll */ +- UCHAR ucPadding; +-} PIXEL_CLOCK_PARAMETERS; +- +-/* Major revision=1., Minor revision=2, add ucMiscIfno */ +-/* ucMiscInfo: */ ++typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 ++{ ++ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucEncoderID; // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO ++ UCHAR ucEncodeMode; // Encoding mode, only valid when using DIG1/DIG2/DVO ++ UCHAR ucPadding; ++}SELECT_CRTC_SOURCE_PARAMETERS_V2; ++ ++//ucEncoderID ++//#define ASIC_INT_DAC1_ENCODER_ID 0x00 ++//#define ASIC_INT_TV_ENCODER_ID 0x02 ++//#define ASIC_INT_DIG1_ENCODER_ID 0x03 ++//#define ASIC_INT_DAC2_ENCODER_ID 0x04 ++//#define ASIC_EXT_TV_ENCODER_ID 0x06 ++//#define ASIC_INT_DVO_ENCODER_ID 0x07 ++//#define ASIC_INT_DIG2_ENCODER_ID 0x09 ++//#define ASIC_EXT_DIG_ENCODER_ID 0x05 ++ ++//ucEncodeMode ++//#define ATOM_ENCODER_MODE_DP 0 ++//#define ATOM_ENCODER_MODE_LVDS 1 ++//#define ATOM_ENCODER_MODE_DVI 2 ++//#define ATOM_ENCODER_MODE_HDMI 3 ++//#define ATOM_ENCODER_MODE_SDVO 4 ++//#define ATOM_ENCODER_MODE_TV 13 ++//#define ATOM_ENCODER_MODE_CV 14 ++//#define ATOM_ENCODER_MODE_CRT 15 ++ ++/****************************************************************************/ ++// Structures used by SetPixelClockTable ++// GetPixelClockTable ++/****************************************************************************/ ++//Major revision=1., Minor revision=1 ++typedef struct _PIXEL_CLOCK_PARAMETERS ++{ ++ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) ++ // 0 means disable PPLL ++ USHORT usRefDiv; // Reference divider ++ USHORT usFbDiv; // feedback divider ++ UCHAR ucPostDiv; // post divider ++ UCHAR ucFracFbDiv; // fractional feedback divider ++ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 ++ UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER ++ UCHAR ucCRTC; // Which CRTC uses this Ppll ++ UCHAR ucPadding; ++}PIXEL_CLOCK_PARAMETERS; ++ ++//Major revision=1., Minor revision=2, add ucMiscIfno ++//ucMiscInfo: + #define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1 + #define MISC_DEVICE_INDEX_MASK 0xF0 + #define MISC_DEVICE_INDEX_SHIFT 4 + +-typedef struct _PIXEL_CLOCK_PARAMETERS_V2 { +- USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ +- /* 0 means disable PPLL */ +- USHORT usRefDiv; /* Reference divider */ +- USHORT usFbDiv; /* feedback divider */ +- UCHAR ucPostDiv; /* post divider */ +- UCHAR ucFracFbDiv; /* fractional feedback divider */ +- UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ +- UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */ +- UCHAR ucCRTC; /* Which CRTC uses this Ppll */ +- UCHAR ucMiscInfo; /* Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog */ +-} PIXEL_CLOCK_PARAMETERS_V2; +- +-/* Major revision=1., Minor revision=3, structure/definition change */ +-/* ucEncoderMode: */ +-/* ATOM_ENCODER_MODE_DP */ +-/* ATOM_ENOCDER_MODE_LVDS */ +-/* ATOM_ENOCDER_MODE_DVI */ +-/* ATOM_ENOCDER_MODE_HDMI */ +-/* ATOM_ENOCDER_MODE_SDVO */ +-/* ATOM_ENCODER_MODE_TV 13 */ +-/* ATOM_ENCODER_MODE_CV 14 */ +-/* ATOM_ENCODER_MODE_CRT 15 */ +- +-/* ucDVOConfig */ +-/* #define DVO_ENCODER_CONFIG_RATE_SEL 0x01 */ +-/* #define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 */ +-/* #define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 */ +-/* #define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c */ +-/* #define DVO_ENCODER_CONFIG_LOW12BIT 0x00 */ +-/* #define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 */ +-/* #define DVO_ENCODER_CONFIG_24BIT 0x08 */ +- +-/* ucMiscInfo: also changed, see below */ ++typedef struct _PIXEL_CLOCK_PARAMETERS_V2 ++{ ++ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) ++ // 0 means disable PPLL ++ USHORT usRefDiv; // Reference divider ++ USHORT usFbDiv; // feedback divider ++ UCHAR ucPostDiv; // post divider ++ UCHAR ucFracFbDiv; // fractional feedback divider ++ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 ++ UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER ++ UCHAR ucCRTC; // Which CRTC uses this Ppll ++ UCHAR ucMiscInfo; // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog ++}PIXEL_CLOCK_PARAMETERS_V2; ++ ++//Major revision=1., Minor revision=3, structure/definition change ++//ucEncoderMode: ++//ATOM_ENCODER_MODE_DP ++//ATOM_ENOCDER_MODE_LVDS ++//ATOM_ENOCDER_MODE_DVI ++//ATOM_ENOCDER_MODE_HDMI ++//ATOM_ENOCDER_MODE_SDVO ++//ATOM_ENCODER_MODE_TV 13 ++//ATOM_ENCODER_MODE_CV 14 ++//ATOM_ENCODER_MODE_CRT 15 ++ ++//ucDVOConfig ++//#define DVO_ENCODER_CONFIG_RATE_SEL 0x01 ++//#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 ++//#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 ++//#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c ++//#define DVO_ENCODER_CONFIG_LOW12BIT 0x00 ++//#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 ++//#define DVO_ENCODER_CONFIG_24BIT 0x08 ++ ++//ucMiscInfo: also changed, see below + #define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01 + #define PIXEL_CLOCK_MISC_VGA_MODE 0x02 + #define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04 + #define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00 + #define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04 + #define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08 ++#define PIXEL_CLOCK_MISC_REF_DIV_SRC 0x10 ++// V1.4 for RoadRunner ++#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10 ++#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20 + +-typedef struct _PIXEL_CLOCK_PARAMETERS_V3 { +- USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ +- /* 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. */ +- USHORT usRefDiv; /* Reference divider */ +- USHORT usFbDiv; /* feedback divider */ +- UCHAR ucPostDiv; /* post divider */ +- UCHAR ucFracFbDiv; /* fractional feedback divider */ +- UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ +- UCHAR ucTransmitterId; /* graphic encoder id defined in objectId.h */ +- union { +- UCHAR ucEncoderMode; /* encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ */ +- UCHAR ucDVOConfig; /* when use DVO, need to know SDR/DDR, 12bit or 24bit */ ++typedef struct _PIXEL_CLOCK_PARAMETERS_V3 ++{ ++ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) ++ // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. ++ USHORT usRefDiv; // Reference divider ++ USHORT usFbDiv; // feedback divider ++ UCHAR ucPostDiv; // post divider ++ UCHAR ucFracFbDiv; // fractional feedback divider ++ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 ++ UCHAR ucTransmitterId; // graphic encoder id defined in objectId.h ++ union ++ { ++ UCHAR ucEncoderMode; // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ ++ UCHAR ucDVOConfig; // when use DVO, need to know SDR/DDR, 12bit or 24bit + }; +- UCHAR ucMiscInfo; /* bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel */ +- /* bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source */ +-} PIXEL_CLOCK_PARAMETERS_V3; ++ UCHAR ucMiscInfo; // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel ++ // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source ++ // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider ++}PIXEL_CLOCK_PARAMETERS_V3; + + #define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2 + #define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST + +-/****************************************************************************/ +-/* Structures used by AdjustDisplayPllTable */ +-/****************************************************************************/ +-typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS { ++typedef struct _PIXEL_CLOCK_PARAMETERS_V5 ++{ ++ UCHAR ucCRTC; // ATOM_CRTC1~6, indicate the CRTC controller to ++ // drive the pixel clock. not used for DCPLL case. ++ union{ ++ UCHAR ucReserved; ++ UCHAR ucFracFbDiv; // [gphan] temporary to prevent build problem. remove it after driver code is changed. ++ }; ++ USHORT usPixelClock; // target the pixel clock to drive the CRTC timing ++ // 0 means disable PPLL/DCPLL. ++ USHORT usFbDiv; // feedback divider integer part. ++ UCHAR ucPostDiv; // post divider. ++ UCHAR ucRefDiv; // Reference divider ++ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL ++ UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h, ++ // indicate which graphic encoder will be used. ++ UCHAR ucEncoderMode; // Encoder mode: ++ UCHAR ucMiscInfo; // bit[0]= Force program PPLL ++ // bit[1]= when VGA timing is used. ++ // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp ++ // bit[4]= RefClock source for PPLL. ++ // =0: XTLAIN( default mode ) ++ // =1: other external clock source, which is pre-defined ++ // by VBIOS depend on the feature required. ++ // bit[7:5]: reserved. ++ ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 ) ++ ++}PIXEL_CLOCK_PARAMETERS_V5; ++ ++#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL 0x01 ++#define PIXEL_CLOCK_V5_MISC_VGA_MODE 0x02 ++#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK 0x0c ++#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP 0x00 ++#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP 0x04 ++#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08 ++#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10 ++ ++typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2 ++{ ++ PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput; ++}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2; ++ ++typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2 ++{ ++ UCHAR ucStatus; ++ UCHAR ucRefDivSrc; // =1: reference clock source from XTALIN, =0: source from PCIE ref clock ++ UCHAR ucReserved[2]; ++}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2; ++ ++typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3 ++{ ++ PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput; ++}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3; ++ ++/****************************************************************************/ ++// Structures used by AdjustDisplayPllTable ++/****************************************************************************/ ++typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS ++{ + USHORT usPixelClock; + UCHAR ucTransmitterID; + UCHAR ucEncodeMode; +- union { +- UCHAR ucDVOConfig; /* if DVO, need passing link rate and output 12bitlow or 24bit */ +- UCHAR ucConfig; /* if none DVO, not defined yet */ ++ union ++ { ++ UCHAR ucDVOConfig; //if DVO, need passing link rate and output 12bitlow or 24bit ++ UCHAR ucConfig; //if none DVO, not defined yet + }; + UCHAR ucReserved[3]; +-} ADJUST_DISPLAY_PLL_PARAMETERS; ++}ADJUST_DISPLAY_PLL_PARAMETERS; + + #define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10 +- + #define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS + +-/****************************************************************************/ +-/* Structures used by EnableYUVTable */ +-/****************************************************************************/ +-typedef struct _ENABLE_YUV_PARAMETERS { +- UCHAR ucEnable; /* ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) */ +- UCHAR ucCRTC; /* Which CRTC needs this YUV or RGB format */ +- UCHAR ucPadding[2]; +-} ENABLE_YUV_PARAMETERS; ++typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 ++{ ++ USHORT usPixelClock; // target pixel clock ++ UCHAR ucTransmitterID; // transmitter id defined in objectid.h ++ UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI ++ UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX ++ UCHAR ucReserved[3]; ++}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3; ++ ++// usDispPllConfig v1.2 for RoadRunner ++#define DISPPLL_CONFIG_DVO_RATE_SEL 0x0001 // need only when ucTransmitterID = DVO ++#define DISPPLL_CONFIG_DVO_DDR_SPEED 0x0000 // need only when ucTransmitterID = DVO ++#define DISPPLL_CONFIG_DVO_SDR_SPEED 0x0001 // need only when ucTransmitterID = DVO ++#define DISPPLL_CONFIG_DVO_OUTPUT_SEL 0x000c // need only when ucTransmitterID = DVO ++#define DISPPLL_CONFIG_DVO_LOW12BIT 0x0000 // need only when ucTransmitterID = DVO ++#define DISPPLL_CONFIG_DVO_UPPER12BIT 0x0004 // need only when ucTransmitterID = DVO ++#define DISPPLL_CONFIG_DVO_24BIT 0x0008 // need only when ucTransmitterID = DVO ++#define DISPPLL_CONFIG_SS_ENABLE 0x0010 // Only used when ucEncoderMode = DP or LVDS ++#define DISPPLL_CONFIG_COHERENT_MODE 0x0020 // Only used when ucEncoderMode = TMDS or HDMI ++#define DISPPLL_CONFIG_DUAL_LINK 0x0040 // Only used when ucEncoderMode = TMDS or LVDS ++ ++ ++typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 ++{ ++ ULONG ulDispPllFreq; // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc ++ UCHAR ucRefDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given ) ++ UCHAR ucPostDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider ++ UCHAR ucReserved[2]; ++}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3; ++ ++typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 ++{ ++ union ++ { ++ ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 sInput; ++ ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput; ++ }; ++} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3; ++ ++/****************************************************************************/ ++// Structures used by EnableYUVTable ++/****************************************************************************/ ++typedef struct _ENABLE_YUV_PARAMETERS ++{ ++ UCHAR ucEnable; // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) ++ UCHAR ucCRTC; // Which CRTC needs this YUV or RGB format ++ UCHAR ucPadding[2]; ++}ENABLE_YUV_PARAMETERS; + #define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS + +-/****************************************************************************/ +-/* Structures used by GetMemoryClockTable */ +-/****************************************************************************/ +-typedef struct _GET_MEMORY_CLOCK_PARAMETERS { +- ULONG ulReturnMemoryClock; /* current memory speed in 10KHz unit */ ++/****************************************************************************/ ++// Structures used by GetMemoryClockTable ++/****************************************************************************/ ++typedef struct _GET_MEMORY_CLOCK_PARAMETERS ++{ ++ ULONG ulReturnMemoryClock; // current memory speed in 10KHz unit + } GET_MEMORY_CLOCK_PARAMETERS; + #define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS + +-/****************************************************************************/ +-/* Structures used by GetEngineClockTable */ +-/****************************************************************************/ +-typedef struct _GET_ENGINE_CLOCK_PARAMETERS { +- ULONG ulReturnEngineClock; /* current engine speed in 10KHz unit */ ++/****************************************************************************/ ++// Structures used by GetEngineClockTable ++/****************************************************************************/ ++typedef struct _GET_ENGINE_CLOCK_PARAMETERS ++{ ++ ULONG ulReturnEngineClock; // current engine speed in 10KHz unit + } GET_ENGINE_CLOCK_PARAMETERS; + #define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS + +-/****************************************************************************/ +-/* Following Structures and constant may be obsolete */ +-/****************************************************************************/ +-/* Maxium 8 bytes,the data read in will be placed in the parameter space. */ +-/* Read operaion successeful when the paramter space is non-zero, otherwise read operation failed */ +-typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS { +- USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ +- USHORT usVRAMAddress; /* Adress in Frame Buffer where to pace raw EDID */ +- USHORT usStatus; /* When use output: lower byte EDID checksum, high byte hardware status */ +- /* WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte */ +- UCHAR ucSlaveAddr; /* Read from which slave */ +- UCHAR ucLineNumber; /* Read from which HW assisted line */ +-} READ_EDID_FROM_HW_I2C_DATA_PARAMETERS; ++/****************************************************************************/ ++// Following Structures and constant may be obsolete ++/****************************************************************************/ ++//Maxium 8 bytes,the data read in will be placed in the parameter space. ++//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed ++typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS ++{ ++ USHORT usPrescale; //Ratio between Engine clock and I2C clock ++ USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID ++ USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status ++ //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte ++ UCHAR ucSlaveAddr; //Read from which slave ++ UCHAR ucLineNumber; //Read from which HW assisted line ++}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS; + #define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS + ++ + #define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0 + #define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1 + #define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2 + #define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3 + #define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4 + +-typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS { +- USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ +- USHORT usByteOffset; /* Write to which byte */ +- /* Upper portion of usByteOffset is Format of data */ +- /* 1bytePS+offsetPS */ +- /* 2bytesPS+offsetPS */ +- /* blockID+offsetPS */ +- /* blockID+offsetID */ +- /* blockID+counterID+offsetID */ +- UCHAR ucData; /* PS data1 */ +- UCHAR ucStatus; /* Status byte 1=success, 2=failure, Also is used as PS data2 */ +- UCHAR ucSlaveAddr; /* Write to which slave */ +- UCHAR ucLineNumber; /* Write from which HW assisted line */ +-} WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS; ++typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS ++{ ++ USHORT usPrescale; //Ratio between Engine clock and I2C clock ++ USHORT usByteOffset; //Write to which byte ++ //Upper portion of usByteOffset is Format of data ++ //1bytePS+offsetPS ++ //2bytesPS+offsetPS ++ //blockID+offsetPS ++ //blockID+offsetID ++ //blockID+counterID+offsetID ++ UCHAR ucData; //PS data1 ++ UCHAR ucStatus; //Status byte 1=success, 2=failure, Also is used as PS data2 ++ UCHAR ucSlaveAddr; //Write to which slave ++ UCHAR ucLineNumber; //Write from which HW assisted line ++}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS; + + #define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS + +-typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS { +- USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ +- UCHAR ucSlaveAddr; /* Write to which slave */ +- UCHAR ucLineNumber; /* Write from which HW assisted line */ +-} SET_UP_HW_I2C_DATA_PARAMETERS; ++typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS ++{ ++ USHORT usPrescale; //Ratio between Engine clock and I2C clock ++ UCHAR ucSlaveAddr; //Write to which slave ++ UCHAR ucLineNumber; //Write from which HW assisted line ++}SET_UP_HW_I2C_DATA_PARAMETERS; ++ + + /**************************************************************************/ + #define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS + +-/****************************************************************************/ +-/* Structures used by PowerConnectorDetectionTable */ +-/****************************************************************************/ +-typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS { +- UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */ +- UCHAR ucPwrBehaviorId; +- USHORT usPwrBudget; /* how much power currently boot to in unit of watt */ +-} POWER_CONNECTOR_DETECTION_PARAMETERS; +- +-typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION { +- UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */ +- UCHAR ucReserved; +- USHORT usPwrBudget; /* how much power currently boot to in unit of watt */ +- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; +-} POWER_CONNECTOR_DETECTION_PS_ALLOCATION; ++/****************************************************************************/ ++// Structures used by PowerConnectorDetectionTable ++/****************************************************************************/ ++typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS ++{ ++ UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected ++ UCHAR ucPwrBehaviorId; ++ USHORT usPwrBudget; //how much power currently boot to in unit of watt ++}POWER_CONNECTOR_DETECTION_PARAMETERS; ++ ++typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION ++{ ++ UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected ++ UCHAR ucReserved; ++ USHORT usPwrBudget; //how much power currently boot to in unit of watt ++ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; ++}POWER_CONNECTOR_DETECTION_PS_ALLOCATION; + + /****************************LVDS SS Command Table Definitions**********************/ + +-/****************************************************************************/ +-/* Structures used by EnableSpreadSpectrumOnPPLLTable */ +-/****************************************************************************/ +-typedef struct _ENABLE_LVDS_SS_PARAMETERS { +- USHORT usSpreadSpectrumPercentage; +- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ +- UCHAR ucSpreadSpectrumStepSize_Delay; /* bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY */ +- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ +- UCHAR ucPadding[3]; +-} ENABLE_LVDS_SS_PARAMETERS; +- +-/* ucTableFormatRevision=1,ucTableContentRevision=2 */ +-typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 { +- USHORT usSpreadSpectrumPercentage; +- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ +- UCHAR ucSpreadSpectrumStep; /* */ +- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ +- UCHAR ucSpreadSpectrumDelay; +- UCHAR ucSpreadSpectrumRange; +- UCHAR ucPadding; +-} ENABLE_LVDS_SS_PARAMETERS_V2; +- +-/* This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. */ +-typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL { +- USHORT usSpreadSpectrumPercentage; +- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ +- UCHAR ucSpreadSpectrumStep; /* */ +- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ +- UCHAR ucSpreadSpectrumDelay; +- UCHAR ucSpreadSpectrumRange; +- UCHAR ucPpll; /* ATOM_PPLL1/ATOM_PPLL2 */ +-} ENABLE_SPREAD_SPECTRUM_ON_PPLL; ++/****************************************************************************/ ++// Structures used by EnableSpreadSpectrumOnPPLLTable ++/****************************************************************************/ ++typedef struct _ENABLE_LVDS_SS_PARAMETERS ++{ ++ USHORT usSpreadSpectrumPercentage; ++ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD ++ UCHAR ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY ++ UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE ++ UCHAR ucPadding[3]; ++}ENABLE_LVDS_SS_PARAMETERS; ++ ++//ucTableFormatRevision=1,ucTableContentRevision=2 ++typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 ++{ ++ USHORT usSpreadSpectrumPercentage; ++ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD ++ UCHAR ucSpreadSpectrumStep; // ++ UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE ++ UCHAR ucSpreadSpectrumDelay; ++ UCHAR ucSpreadSpectrumRange; ++ UCHAR ucPadding; ++}ENABLE_LVDS_SS_PARAMETERS_V2; ++ ++//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. ++typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL ++{ ++ USHORT usSpreadSpectrumPercentage; ++ UCHAR ucSpreadSpectrumType; // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD ++ UCHAR ucSpreadSpectrumStep; // ++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE ++ UCHAR ucSpreadSpectrumDelay; ++ UCHAR ucSpreadSpectrumRange; ++ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2 ++}ENABLE_SPREAD_SPECTRUM_ON_PPLL; ++ ++typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 ++{ ++ USHORT usSpreadSpectrumPercentage; ++ UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread. ++ // Bit[1]: 1-Ext. 0-Int. ++ // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL ++ // Bits[7:4] reserved ++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE ++ USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8] ++ USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC ++}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2; ++ ++#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD 0x00 ++#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD 0x01 ++#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD 0x02 ++#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK 0x0c ++#define ATOM_PPLL_SS_TYPE_V2_P1PLL 0x00 ++#define ATOM_PPLL_SS_TYPE_V2_P2PLL 0x04 ++#define ATOM_PPLL_SS_TYPE_V2_DCPLL 0x08 ++#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK 0x00FF ++#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT 0 ++#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00 ++#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8 + + #define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL + + /**************************************************************************/ + +-typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION { +- PIXEL_CLOCK_PARAMETERS sPCLKInput; +- ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved; /* Caller doesn't need to init this portion */ +-} SET_PIXEL_CLOCK_PS_ALLOCATION; ++typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION ++{ ++ PIXEL_CLOCK_PARAMETERS sPCLKInput; ++ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion ++}SET_PIXEL_CLOCK_PS_ALLOCATION; + + #define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION + +-/****************************************************************************/ +-/* Structures used by ### */ +-/****************************************************************************/ +-typedef struct _MEMORY_TRAINING_PARAMETERS { +- ULONG ulTargetMemoryClock; /* In 10Khz unit */ +-} MEMORY_TRAINING_PARAMETERS; ++/****************************************************************************/ ++// Structures used by ### ++/****************************************************************************/ ++typedef struct _MEMORY_TRAINING_PARAMETERS ++{ ++ ULONG ulTargetMemoryClock; //In 10Khz unit ++}MEMORY_TRAINING_PARAMETERS; + #define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS + ++ + /****************************LVDS and other encoder command table definitions **********************/ + +-/****************************************************************************/ +-/* Structures used by LVDSEncoderControlTable (Before DCE30) */ +-/* LVTMAEncoderControlTable (Before DCE30) */ +-/* TMDSAEncoderControlTable (Before DCE30) */ +-/****************************************************************************/ +-typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS { +- USHORT usPixelClock; /* in 10KHz; for bios convenient */ +- UCHAR ucMisc; /* bit0=0: Enable single link */ +- /* =1: Enable dual link */ +- /* Bit1=0: 666RGB */ +- /* =1: 888RGB */ +- UCHAR ucAction; /* 0: turn off encoder */ +- /* 1: setup and turn on encoder */ +-} LVDS_ENCODER_CONTROL_PARAMETERS; + +-#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS ++/****************************************************************************/ ++// Structures used by LVDSEncoderControlTable (Before DCE30) ++// LVTMAEncoderControlTable (Before DCE30) ++// TMDSAEncoderControlTable (Before DCE30) ++/****************************************************************************/ ++typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS ++{ ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ UCHAR ucMisc; // bit0=0: Enable single link ++ // =1: Enable dual link ++ // Bit1=0: 666RGB ++ // =1: 888RGB ++ UCHAR ucAction; // 0: turn off encoder ++ // 1: setup and turn on encoder ++}LVDS_ENCODER_CONTROL_PARAMETERS; + ++#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS ++ + #define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS + #define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS + + #define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS + #define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS + +-/* ucTableFormatRevision=1,ucTableContentRevision=2 */ +-typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 { +- USHORT usPixelClock; /* in 10KHz; for bios convenient */ +- UCHAR ucMisc; /* see PANEL_ENCODER_MISC_xx definitions below */ +- UCHAR ucAction; /* 0: turn off encoder */ +- /* 1: setup and turn on encoder */ +- UCHAR ucTruncate; /* bit0=0: Disable truncate */ +- /* =1: Enable truncate */ +- /* bit4=0: 666RGB */ +- /* =1: 888RGB */ +- UCHAR ucSpatial; /* bit0=0: Disable spatial dithering */ +- /* =1: Enable spatial dithering */ +- /* bit4=0: 666RGB */ +- /* =1: 888RGB */ +- UCHAR ucTemporal; /* bit0=0: Disable temporal dithering */ +- /* =1: Enable temporal dithering */ +- /* bit4=0: 666RGB */ +- /* =1: 888RGB */ +- /* bit5=0: Gray level 2 */ +- /* =1: Gray level 4 */ +- UCHAR ucFRC; /* bit4=0: 25FRC_SEL pattern E */ +- /* =1: 25FRC_SEL pattern F */ +- /* bit6:5=0: 50FRC_SEL pattern A */ +- /* =1: 50FRC_SEL pattern B */ +- /* =2: 50FRC_SEL pattern C */ +- /* =3: 50FRC_SEL pattern D */ +- /* bit7=0: 75FRC_SEL pattern E */ +- /* =1: 75FRC_SEL pattern F */ +-} LVDS_ENCODER_CONTROL_PARAMETERS_V2; + +-#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 ++//ucTableFormatRevision=1,ucTableContentRevision=2 ++typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 ++{ ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ UCHAR ucMisc; // see PANEL_ENCODER_MISC_xx defintions below ++ UCHAR ucAction; // 0: turn off encoder ++ // 1: setup and turn on encoder ++ UCHAR ucTruncate; // bit0=0: Disable truncate ++ // =1: Enable truncate ++ // bit4=0: 666RGB ++ // =1: 888RGB ++ UCHAR ucSpatial; // bit0=0: Disable spatial dithering ++ // =1: Enable spatial dithering ++ // bit4=0: 666RGB ++ // =1: 888RGB ++ UCHAR ucTemporal; // bit0=0: Disable temporal dithering ++ // =1: Enable temporal dithering ++ // bit4=0: 666RGB ++ // =1: 888RGB ++ // bit5=0: Gray level 2 ++ // =1: Gray level 4 ++ UCHAR ucFRC; // bit4=0: 25FRC_SEL pattern E ++ // =1: 25FRC_SEL pattern F ++ // bit6:5=0: 50FRC_SEL pattern A ++ // =1: 50FRC_SEL pattern B ++ // =2: 50FRC_SEL pattern C ++ // =3: 50FRC_SEL pattern D ++ // bit7=0: 75FRC_SEL pattern E ++ // =1: 75FRC_SEL pattern F ++}LVDS_ENCODER_CONTROL_PARAMETERS_V2; + ++#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 ++ + #define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 + #define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 +- ++ + #define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 + #define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2 + +@@ -1185,38 +1536,42 @@ typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 { + #define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3 + #define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3 + +-/****************************************************************************/ +-/* Structures used by ### */ +-/****************************************************************************/ +-typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS { +- UCHAR ucEnable; /* Enable or Disable External TMDS encoder */ +- UCHAR ucMisc; /* Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} */ +- UCHAR ucPadding[2]; +-} ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS; +- +-typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION { +- ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder; +- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */ +-} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION; ++/****************************************************************************/ ++// Structures used by ### ++/****************************************************************************/ ++typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS ++{ ++ UCHAR ucEnable; // Enable or Disable External TMDS encoder ++ UCHAR ucMisc; // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} ++ UCHAR ucPadding[2]; ++}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS; ++ ++typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ++{ ++ ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder; ++ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion ++}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION; + + #define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 + +-typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 { +- ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder; +- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */ +-} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2; ++typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 ++{ ++ ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder; ++ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion ++}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2; + +-typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION { +- DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder; +- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; +-} EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION; ++typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION ++{ ++ DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder; ++ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; ++}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION; + +-/****************************************************************************/ +-/* Structures used by DVOEncoderControlTable */ +-/****************************************************************************/ +-/* ucTableFormatRevision=1,ucTableContentRevision=3 */ ++/****************************************************************************/ ++// Structures used by DVOEncoderControlTable ++/****************************************************************************/ ++//ucTableFormatRevision=1,ucTableContentRevision=3 + +-/* ucDVOConfig: */ ++//ucDVOConfig: + #define DVO_ENCODER_CONFIG_RATE_SEL 0x01 + #define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 + #define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 +@@ -1225,21 +1580,22 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION { + #define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 + #define DVO_ENCODER_CONFIG_24BIT 0x08 + +-typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 { +- USHORT usPixelClock; +- UCHAR ucDVOConfig; +- UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */ +- UCHAR ucReseved[4]; +-} DVO_ENCODER_CONTROL_PARAMETERS_V3; ++typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 ++{ ++ USHORT usPixelClock; ++ UCHAR ucDVOConfig; ++ UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT ++ UCHAR ucReseved[4]; ++}DVO_ENCODER_CONTROL_PARAMETERS_V3; + #define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3 + +-/* ucTableFormatRevision=1 */ +-/* ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for */ +-/* bit1=0: non-coherent mode */ +-/* =1: coherent mode */ ++//ucTableFormatRevision=1 ++//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for ++// bit1=0: non-coherent mode ++// =1: coherent mode + +-/* ========================================================================================== */ +-/* Only change is here next time when changing encoder parameter definitions again! */ ++//========================================================================================== ++//Only change is here next time when changing encoder parameter definitions again! + #define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3 + #define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST + +@@ -1252,7 +1608,7 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 { + #define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS + #define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION + +-/* ========================================================================================== */ ++//========================================================================================== + #define PANEL_ENCODER_MISC_DUAL 0x01 + #define PANEL_ENCODER_MISC_COHERENT 0x02 + #define PANEL_ENCODER_MISC_TMDS_LINKB 0x04 +@@ -1281,159 +1637,159 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 { + #define PANEL_ENCODER_75FRC_E 0x00 + #define PANEL_ENCODER_75FRC_F 0x80 + +-/****************************************************************************/ +-/* Structures used by SetVoltageTable */ +-/****************************************************************************/ ++/****************************************************************************/ ++// Structures used by SetVoltageTable ++/****************************************************************************/ + #define SET_VOLTAGE_TYPE_ASIC_VDDC 1 + #define SET_VOLTAGE_TYPE_ASIC_MVDDC 2 + #define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3 + #define SET_VOLTAGE_TYPE_ASIC_VDDCI 4 + #define SET_VOLTAGE_INIT_MODE 5 +-#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 /* Gets the Max. voltage for the soldered Asic */ ++#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 //Gets the Max. voltage for the soldered Asic + + #define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1 + #define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2 + #define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4 + + #define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0 +-#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1 ++#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1 + #define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2 + +-typedef struct _SET_VOLTAGE_PARAMETERS { +- UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */ +- UCHAR ucVoltageMode; /* To set all, to set source A or source B or ... */ +- UCHAR ucVoltageIndex; /* An index to tell which voltage level */ +- UCHAR ucReserved; +-} SET_VOLTAGE_PARAMETERS; +- +-typedef struct _SET_VOLTAGE_PARAMETERS_V2 { +- UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */ +- UCHAR ucVoltageMode; /* Not used, maybe use for state machine for differen power mode */ +- USHORT usVoltageLevel; /* real voltage level */ +-} SET_VOLTAGE_PARAMETERS_V2; +- +-typedef struct _SET_VOLTAGE_PS_ALLOCATION { +- SET_VOLTAGE_PARAMETERS sASICSetVoltage; +- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; +-} SET_VOLTAGE_PS_ALLOCATION; +- +-/****************************************************************************/ +-/* Structures used by TVEncoderControlTable */ +-/****************************************************************************/ +-typedef struct _TV_ENCODER_CONTROL_PARAMETERS { +- USHORT usPixelClock; /* in 10KHz; for bios convenient */ +- UCHAR ucTvStandard; /* See definition "ATOM_TV_NTSC ..." */ +- UCHAR ucAction; /* 0: turn off encoder */ +- /* 1: setup and turn on encoder */ +-} TV_ENCODER_CONTROL_PARAMETERS; +- +-typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION { +- TV_ENCODER_CONTROL_PARAMETERS sTVEncoder; +- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Don't set this one */ +-} TV_ENCODER_CONTROL_PS_ALLOCATION; +- +-/* ==============================Data Table Portion==================================== */ +- +-#ifdef UEFI_BUILD +-#define UTEMP USHORT +-#define USHORT void* +-#endif +- +-/****************************************************************************/ +-/* Structure used in Data.mtb */ +-/****************************************************************************/ +-typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES { +- USHORT UtilityPipeLine; /* Offest for the utility to get parser info,Don't change this position! */ +- USHORT MultimediaCapabilityInfo; /* Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios */ +- USHORT MultimediaConfigInfo; /* Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios */ +- USHORT StandardVESA_Timing; /* Only used by Bios */ +- USHORT FirmwareInfo; /* Shared by various SW components,latest version 1.4 */ +- USHORT DAC_Info; /* Will be obsolete from R600 */ +- USHORT LVDS_Info; /* Shared by various SW components,latest version 1.1 */ +- USHORT TMDS_Info; /* Will be obsolete from R600 */ +- USHORT AnalogTV_Info; /* Shared by various SW components,latest version 1.1 */ +- USHORT SupportedDevicesInfo; /* Will be obsolete from R600 */ +- USHORT GPIO_I2C_Info; /* Shared by various SW components,latest version 1.2 will be used from R600 */ +- USHORT VRAM_UsageByFirmware; /* Shared by various SW components,latest version 1.3 will be used from R600 */ +- USHORT GPIO_Pin_LUT; /* Shared by various SW components,latest version 1.1 */ +- USHORT VESA_ToInternalModeLUT; /* Only used by Bios */ +- USHORT ComponentVideoInfo; /* Shared by various SW components,latest version 2.1 will be used from R600 */ +- USHORT PowerPlayInfo; /* Shared by various SW components,latest version 2.1,new design from R600 */ +- USHORT CompassionateData; /* Will be obsolete from R600 */ +- USHORT SaveRestoreInfo; /* Only used by Bios */ +- USHORT PPLL_SS_Info; /* Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info */ +- USHORT OemInfo; /* Defined and used by external SW, should be obsolete soon */ +- USHORT XTMDS_Info; /* Will be obsolete from R600 */ +- USHORT MclkSS_Info; /* Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used */ +- USHORT Object_Header; /* Shared by various SW components,latest version 1.1 */ +- USHORT IndirectIOAccess; /* Only used by Bios,this table position can't change at all!! */ +- USHORT MC_InitParameter; /* Only used by command table */ +- USHORT ASIC_VDDC_Info; /* Will be obsolete from R600 */ +- USHORT ASIC_InternalSS_Info; /* New tabel name from R600, used to be called "ASIC_MVDDC_Info" */ +- USHORT TV_VideoMode; /* Only used by command table */ +- USHORT VRAM_Info; /* Only used by command table, latest version 1.3 */ +- USHORT MemoryTrainingInfo; /* Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 */ +- USHORT IntegratedSystemInfo; /* Shared by various SW components */ +- USHORT ASIC_ProfilingInfo; /* New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 */ +- USHORT VoltageObjectInfo; /* Shared by various SW components, latest version 1.1 */ +- USHORT PowerSourceInfo; /* Shared by various SW components, latest versoin 1.1 */ +-} ATOM_MASTER_LIST_OF_DATA_TABLES; +- +-#ifdef UEFI_BUILD +-#define USHORT UTEMP +-#endif ++typedef struct _SET_VOLTAGE_PARAMETERS ++{ ++ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ ++ UCHAR ucVoltageMode; // To set all, to set source A or source B or ... ++ UCHAR ucVoltageIndex; // An index to tell which voltage level ++ UCHAR ucReserved; ++}SET_VOLTAGE_PARAMETERS; + +-typedef struct _ATOM_MASTER_DATA_TABLE { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; +-} ATOM_MASTER_DATA_TABLE; ++typedef struct _SET_VOLTAGE_PARAMETERS_V2 ++{ ++ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ ++ UCHAR ucVoltageMode; // Not used, maybe use for state machine for differen power mode ++ USHORT usVoltageLevel; // real voltage level ++}SET_VOLTAGE_PARAMETERS_V2; + +-/****************************************************************************/ +-/* Structure used in MultimediaCapabilityInfoTable */ +-/****************************************************************************/ +-typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ULONG ulSignature; /* HW info table signature string "$ATI" */ +- UCHAR ucI2C_Type; /* I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) */ +- UCHAR ucTV_OutInfo; /* Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) */ +- UCHAR ucVideoPortInfo; /* Provides the video port capabilities */ +- UCHAR ucHostPortInfo; /* Provides host port configuration information */ +-} ATOM_MULTIMEDIA_CAPABILITY_INFO; ++typedef struct _SET_VOLTAGE_PS_ALLOCATION ++{ ++ SET_VOLTAGE_PARAMETERS sASICSetVoltage; ++ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; ++}SET_VOLTAGE_PS_ALLOCATION; ++ ++/****************************************************************************/ ++// Structures used by TVEncoderControlTable ++/****************************************************************************/ ++typedef struct _TV_ENCODER_CONTROL_PARAMETERS ++{ ++ USHORT usPixelClock; // in 10KHz; for bios convenient ++ UCHAR ucTvStandard; // See definition "ATOM_TV_NTSC ..." ++ UCHAR ucAction; // 0: turn off encoder ++ // 1: setup and turn on encoder ++}TV_ENCODER_CONTROL_PARAMETERS; + +-/****************************************************************************/ +-/* Structure used in MultimediaConfigInfoTable */ +-/****************************************************************************/ +-typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ULONG ulSignature; /* MM info table signature sting "$MMT" */ +- UCHAR ucTunerInfo; /* Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) */ +- UCHAR ucAudioChipInfo; /* List the audio chip type (3:0) product type (4) and OEM revision (7:5) */ +- UCHAR ucProductID; /* Defines as OEM ID or ATI board ID dependent on product type setting */ +- UCHAR ucMiscInfo1; /* Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) */ +- UCHAR ucMiscInfo2; /* I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) */ +- UCHAR ucMiscInfo3; /* Video Decoder Type (3:0) Video In Standard/Crystal (7:4) */ +- UCHAR ucMiscInfo4; /* Video Decoder Host Config (2:0) reserved (7:3) */ +- UCHAR ucVideoInput0Info; /* Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */ +- UCHAR ucVideoInput1Info; /* Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */ +- UCHAR ucVideoInput2Info; /* Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */ +- UCHAR ucVideoInput3Info; /* Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */ +- UCHAR ucVideoInput4Info; /* Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */ +-} ATOM_MULTIMEDIA_CONFIG_INFO; ++typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION ++{ ++ TV_ENCODER_CONTROL_PARAMETERS sTVEncoder; ++ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; // Don't set this one ++}TV_ENCODER_CONTROL_PS_ALLOCATION; + +-/****************************************************************************/ +-/* Structures used in FirmwareInfoTable */ +-/****************************************************************************/ ++//==============================Data Table Portion==================================== + +-/* usBIOSCapability Definition: */ +-/* Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */ +-/* Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */ +-/* Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */ +-/* Others: Reserved */ ++/****************************************************************************/ ++// Structure used in Data.mtb ++/****************************************************************************/ ++typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES ++{ ++ USHORT UtilityPipeLine; // Offest for the utility to get parser info,Don't change this position! ++ USHORT MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios ++ USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios ++ USHORT StandardVESA_Timing; // Only used by Bios ++ USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4 ++ USHORT DAC_Info; // Will be obsolete from R600 ++ USHORT LVDS_Info; // Shared by various SW components,latest version 1.1 ++ USHORT TMDS_Info; // Will be obsolete from R600 ++ USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1 ++ USHORT SupportedDevicesInfo; // Will be obsolete from R600 ++ USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600 ++ USHORT VRAM_UsageByFirmware; // Shared by various SW components,latest version 1.3 will be used from R600 ++ USHORT GPIO_Pin_LUT; // Shared by various SW components,latest version 1.1 ++ USHORT VESA_ToInternalModeLUT; // Only used by Bios ++ USHORT ComponentVideoInfo; // Shared by various SW components,latest version 2.1 will be used from R600 ++ USHORT PowerPlayInfo; // Shared by various SW components,latest version 2.1,new design from R600 ++ USHORT CompassionateData; // Will be obsolete from R600 ++ USHORT SaveRestoreInfo; // Only used by Bios ++ USHORT PPLL_SS_Info; // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info ++ USHORT OemInfo; // Defined and used by external SW, should be obsolete soon ++ USHORT XTMDS_Info; // Will be obsolete from R600 ++ USHORT MclkSS_Info; // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used ++ USHORT Object_Header; // Shared by various SW components,latest version 1.1 ++ USHORT IndirectIOAccess; // Only used by Bios,this table position can't change at all!! ++ USHORT MC_InitParameter; // Only used by command table ++ USHORT ASIC_VDDC_Info; // Will be obsolete from R600 ++ USHORT ASIC_InternalSS_Info; // New tabel name from R600, used to be called "ASIC_MVDDC_Info" ++ USHORT TV_VideoMode; // Only used by command table ++ USHORT VRAM_Info; // Only used by command table, latest version 1.3 ++ USHORT MemoryTrainingInfo; // Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 ++ USHORT IntegratedSystemInfo; // Shared by various SW components ++ USHORT ASIC_ProfilingInfo; // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 ++ USHORT VoltageObjectInfo; // Shared by various SW components, latest version 1.1 ++ USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1 ++}ATOM_MASTER_LIST_OF_DATA_TABLES; ++ ++typedef struct _ATOM_MASTER_DATA_TABLE ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; ++}ATOM_MASTER_DATA_TABLE; ++ ++/****************************************************************************/ ++// Structure used in MultimediaCapabilityInfoTable ++/****************************************************************************/ ++typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulSignature; // HW info table signature string "$ATI" ++ UCHAR ucI2C_Type; // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) ++ UCHAR ucTV_OutInfo; // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) ++ UCHAR ucVideoPortInfo; // Provides the video port capabilities ++ UCHAR ucHostPortInfo; // Provides host port configuration information ++}ATOM_MULTIMEDIA_CAPABILITY_INFO; ++ ++/****************************************************************************/ ++// Structure used in MultimediaConfigInfoTable ++/****************************************************************************/ ++typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulSignature; // MM info table signature sting "$MMT" ++ UCHAR ucTunerInfo; // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) ++ UCHAR ucAudioChipInfo; // List the audio chip type (3:0) product type (4) and OEM revision (7:5) ++ UCHAR ucProductID; // Defines as OEM ID or ATI board ID dependent on product type setting ++ UCHAR ucMiscInfo1; // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) ++ UCHAR ucMiscInfo2; // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) ++ UCHAR ucMiscInfo3; // Video Decoder Type (3:0) Video In Standard/Crystal (7:4) ++ UCHAR ucMiscInfo4; // Video Decoder Host Config (2:0) reserved (7:3) ++ UCHAR ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) ++ UCHAR ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) ++ UCHAR ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) ++ UCHAR ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) ++ UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) ++}ATOM_MULTIMEDIA_CONFIG_INFO; ++ ++/****************************************************************************/ ++// Structures used in FirmwareInfoTable ++/****************************************************************************/ ++ ++// usBIOSCapability Defintion: ++// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; ++// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; ++// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; ++// Others: Reserved + #define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001 + #define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002 + #define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004 +-#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 +-#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 ++#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 // (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable. ++#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 // (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable. + #define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020 + #define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040 + #define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080 +@@ -1441,242 +1797,292 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO { + #define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00 + #define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000 + #define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000 ++#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT 0x0008 // (valid from v2.1 ): =1: memclk ss enable with external ss chip ++#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT 0x0010 // (valid from v2.1 ): =1: engclk ss enable with external ss chip + + #ifndef _H2INC + +-/* Please don't add or expand this bitfield structure below, this one will retire soon.! */ +-typedef struct _ATOM_FIRMWARE_CAPABILITY { ++//Please don't add or expand this bitfield structure below, this one will retire soon.! ++typedef struct _ATOM_FIRMWARE_CAPABILITY ++{ + #if ATOM_BIG_ENDIAN +- USHORT Reserved:3; +- USHORT HyperMemory_Size:4; +- USHORT HyperMemory_Support:1; +- USHORT PPMode_Assigned:1; +- USHORT WMI_SUPPORT:1; +- USHORT GPUControlsBL:1; +- USHORT EngineClockSS_Support:1; +- USHORT MemoryClockSS_Support:1; +- USHORT ExtendedDesktopSupport:1; +- USHORT DualCRTC_Support:1; +- USHORT FirmwarePosted:1; ++ USHORT Reserved:3; ++ USHORT HyperMemory_Size:4; ++ USHORT HyperMemory_Support:1; ++ USHORT PPMode_Assigned:1; ++ USHORT WMI_SUPPORT:1; ++ USHORT GPUControlsBL:1; ++ USHORT EngineClockSS_Support:1; ++ USHORT MemoryClockSS_Support:1; ++ USHORT ExtendedDesktopSupport:1; ++ USHORT DualCRTC_Support:1; ++ USHORT FirmwarePosted:1; + #else +- USHORT FirmwarePosted:1; +- USHORT DualCRTC_Support:1; +- USHORT ExtendedDesktopSupport:1; +- USHORT MemoryClockSS_Support:1; +- USHORT EngineClockSS_Support:1; +- USHORT GPUControlsBL:1; +- USHORT WMI_SUPPORT:1; +- USHORT PPMode_Assigned:1; +- USHORT HyperMemory_Support:1; +- USHORT HyperMemory_Size:4; +- USHORT Reserved:3; ++ USHORT FirmwarePosted:1; ++ USHORT DualCRTC_Support:1; ++ USHORT ExtendedDesktopSupport:1; ++ USHORT MemoryClockSS_Support:1; ++ USHORT EngineClockSS_Support:1; ++ USHORT GPUControlsBL:1; ++ USHORT WMI_SUPPORT:1; ++ USHORT PPMode_Assigned:1; ++ USHORT HyperMemory_Support:1; ++ USHORT HyperMemory_Size:4; ++ USHORT Reserved:3; + #endif +-} ATOM_FIRMWARE_CAPABILITY; ++}ATOM_FIRMWARE_CAPABILITY; + +-typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS { +- ATOM_FIRMWARE_CAPABILITY sbfAccess; +- USHORT susAccess; +-} ATOM_FIRMWARE_CAPABILITY_ACCESS; ++typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS ++{ ++ ATOM_FIRMWARE_CAPABILITY sbfAccess; ++ USHORT susAccess; ++}ATOM_FIRMWARE_CAPABILITY_ACCESS; + + #else + +-typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS { +- USHORT susAccess; +-} ATOM_FIRMWARE_CAPABILITY_ACCESS; ++typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS ++{ ++ USHORT susAccess; ++}ATOM_FIRMWARE_CAPABILITY_ACCESS; + + #endif + +-typedef struct _ATOM_FIRMWARE_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ULONG ulFirmwareRevision; +- ULONG ulDefaultEngineClock; /* In 10Khz unit */ +- ULONG ulDefaultMemoryClock; /* In 10Khz unit */ +- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ +- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ +- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ +- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ +- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ +- ULONG ulASICMaxEngineClock; /* In 10Khz unit */ +- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ +- UCHAR ucASICMaxTemperature; +- UCHAR ucPadding[3]; /* Don't use them */ +- ULONG aulReservedForBIOS[3]; /* Don't use them */ +- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ +- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ +- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ +- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ +- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ +- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ +- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ +- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ +- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ +- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit, the definitions above can't change!!! */ +- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; +- USHORT usReferenceClock; /* In 10Khz unit */ +- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ +- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ +- UCHAR ucDesign_ID; /* Indicate what is the board design */ +- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ +-} ATOM_FIRMWARE_INFO; +- +-typedef struct _ATOM_FIRMWARE_INFO_V1_2 { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ULONG ulFirmwareRevision; +- ULONG ulDefaultEngineClock; /* In 10Khz unit */ +- ULONG ulDefaultMemoryClock; /* In 10Khz unit */ +- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ +- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ +- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ +- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ +- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ +- ULONG ulASICMaxEngineClock; /* In 10Khz unit */ +- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ +- UCHAR ucASICMaxTemperature; +- UCHAR ucMinAllowedBL_Level; +- UCHAR ucPadding[2]; /* Don't use them */ +- ULONG aulReservedForBIOS[2]; /* Don't use them */ +- ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ +- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ +- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ +- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ +- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ +- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ +- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ +- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ +- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ +- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ +- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ +- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; +- USHORT usReferenceClock; /* In 10Khz unit */ +- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ +- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ +- UCHAR ucDesign_ID; /* Indicate what is the board design */ +- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ +-} ATOM_FIRMWARE_INFO_V1_2; +- +-typedef struct _ATOM_FIRMWARE_INFO_V1_3 { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ULONG ulFirmwareRevision; +- ULONG ulDefaultEngineClock; /* In 10Khz unit */ +- ULONG ulDefaultMemoryClock; /* In 10Khz unit */ +- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ +- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ +- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ +- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ +- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ +- ULONG ulASICMaxEngineClock; /* In 10Khz unit */ +- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ +- UCHAR ucASICMaxTemperature; +- UCHAR ucMinAllowedBL_Level; +- UCHAR ucPadding[2]; /* Don't use them */ +- ULONG aulReservedForBIOS; /* Don't use them */ +- ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */ +- ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ +- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ +- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ +- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ +- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ +- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ +- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ +- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ +- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ +- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ +- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ +- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; +- USHORT usReferenceClock; /* In 10Khz unit */ +- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ +- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ +- UCHAR ucDesign_ID; /* Indicate what is the board design */ +- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ +-} ATOM_FIRMWARE_INFO_V1_3; +- +-typedef struct _ATOM_FIRMWARE_INFO_V1_4 { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ULONG ulFirmwareRevision; +- ULONG ulDefaultEngineClock; /* In 10Khz unit */ +- ULONG ulDefaultMemoryClock; /* In 10Khz unit */ +- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ +- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ +- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ +- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ +- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ +- ULONG ulASICMaxEngineClock; /* In 10Khz unit */ +- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ +- UCHAR ucASICMaxTemperature; +- UCHAR ucMinAllowedBL_Level; +- USHORT usBootUpVDDCVoltage; /* In MV unit */ +- USHORT usLcdMinPixelClockPLL_Output; /* In MHz unit */ +- USHORT usLcdMaxPixelClockPLL_Output; /* In MHz unit */ +- ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */ +- ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ +- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ +- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ +- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ +- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ +- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ +- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ +- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ +- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ +- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ +- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ +- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; +- USHORT usReferenceClock; /* In 10Khz unit */ +- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ +- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ +- UCHAR ucDesign_ID; /* Indicate what is the board design */ +- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ +-} ATOM_FIRMWARE_INFO_V1_4; +- +-#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V1_4 +- +-/****************************************************************************/ +-/* Structures used in IntegratedSystemInfoTable */ +-/****************************************************************************/ ++typedef struct _ATOM_FIRMWARE_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulFirmwareRevision; ++ ULONG ulDefaultEngineClock; //In 10Khz unit ++ ULONG ulDefaultMemoryClock; //In 10Khz unit ++ ULONG ulDriverTargetEngineClock; //In 10Khz unit ++ ULONG ulDriverTargetMemoryClock; //In 10Khz unit ++ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit ++ ULONG ulASICMaxEngineClock; //In 10Khz unit ++ ULONG ulASICMaxMemoryClock; //In 10Khz unit ++ UCHAR ucASICMaxTemperature; ++ UCHAR ucPadding[3]; //Don't use them ++ ULONG aulReservedForBIOS[3]; //Don't use them ++ USHORT usMinEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Output; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit ++ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk ++ USHORT usMinPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMinPixelClockPLL_Output; //In 10Khz unit, the definitions above can't change!!! ++ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; ++ USHORT usReferenceClock; //In 10Khz unit ++ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit ++ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit ++ UCHAR ucDesign_ID; //Indicate what is the board design ++ UCHAR ucMemoryModule_ID; //Indicate what is the board design ++}ATOM_FIRMWARE_INFO; ++ ++typedef struct _ATOM_FIRMWARE_INFO_V1_2 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulFirmwareRevision; ++ ULONG ulDefaultEngineClock; //In 10Khz unit ++ ULONG ulDefaultMemoryClock; //In 10Khz unit ++ ULONG ulDriverTargetEngineClock; //In 10Khz unit ++ ULONG ulDriverTargetMemoryClock; //In 10Khz unit ++ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit ++ ULONG ulASICMaxEngineClock; //In 10Khz unit ++ ULONG ulASICMaxMemoryClock; //In 10Khz unit ++ UCHAR ucASICMaxTemperature; ++ UCHAR ucMinAllowedBL_Level; ++ UCHAR ucPadding[2]; //Don't use them ++ ULONG aulReservedForBIOS[2]; //Don't use them ++ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Output; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit ++ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk ++ USHORT usMinPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output ++ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; ++ USHORT usReferenceClock; //In 10Khz unit ++ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit ++ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit ++ UCHAR ucDesign_ID; //Indicate what is the board design ++ UCHAR ucMemoryModule_ID; //Indicate what is the board design ++}ATOM_FIRMWARE_INFO_V1_2; ++ ++typedef struct _ATOM_FIRMWARE_INFO_V1_3 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulFirmwareRevision; ++ ULONG ulDefaultEngineClock; //In 10Khz unit ++ ULONG ulDefaultMemoryClock; //In 10Khz unit ++ ULONG ulDriverTargetEngineClock; //In 10Khz unit ++ ULONG ulDriverTargetMemoryClock; //In 10Khz unit ++ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit ++ ULONG ulASICMaxEngineClock; //In 10Khz unit ++ ULONG ulASICMaxMemoryClock; //In 10Khz unit ++ UCHAR ucASICMaxTemperature; ++ UCHAR ucMinAllowedBL_Level; ++ UCHAR ucPadding[2]; //Don't use them ++ ULONG aulReservedForBIOS; //Don't use them ++ ULONG ul3DAccelerationEngineClock;//In 10Khz unit ++ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Output; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit ++ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk ++ USHORT usMinPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output ++ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; ++ USHORT usReferenceClock; //In 10Khz unit ++ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit ++ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit ++ UCHAR ucDesign_ID; //Indicate what is the board design ++ UCHAR ucMemoryModule_ID; //Indicate what is the board design ++}ATOM_FIRMWARE_INFO_V1_3; ++ ++typedef struct _ATOM_FIRMWARE_INFO_V1_4 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulFirmwareRevision; ++ ULONG ulDefaultEngineClock; //In 10Khz unit ++ ULONG ulDefaultMemoryClock; //In 10Khz unit ++ ULONG ulDriverTargetEngineClock; //In 10Khz unit ++ ULONG ulDriverTargetMemoryClock; //In 10Khz unit ++ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit ++ ULONG ulASICMaxEngineClock; //In 10Khz unit ++ ULONG ulASICMaxMemoryClock; //In 10Khz unit ++ UCHAR ucASICMaxTemperature; ++ UCHAR ucMinAllowedBL_Level; ++ USHORT usBootUpVDDCVoltage; //In MV unit ++ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit ++ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit ++ ULONG ul3DAccelerationEngineClock;//In 10Khz unit ++ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Output; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit ++ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk ++ USHORT usMinPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output ++ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; ++ USHORT usReferenceClock; //In 10Khz unit ++ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit ++ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit ++ UCHAR ucDesign_ID; //Indicate what is the board design ++ UCHAR ucMemoryModule_ID; //Indicate what is the board design ++}ATOM_FIRMWARE_INFO_V1_4; ++ ++//the structure below to be used from Cypress ++typedef struct _ATOM_FIRMWARE_INFO_V2_1 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulFirmwareRevision; ++ ULONG ulDefaultEngineClock; //In 10Khz unit ++ ULONG ulDefaultMemoryClock; //In 10Khz unit ++ ULONG ulReserved1; ++ ULONG ulReserved2; ++ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit ++ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit ++ ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock ++ ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit ++ UCHAR ucReserved1; //Was ucASICMaxTemperature; ++ UCHAR ucMinAllowedBL_Level; ++ USHORT usBootUpVDDCVoltage; //In MV unit ++ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit ++ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit ++ ULONG ulReserved4; //Was ulAsicMaximumVoltage ++ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit ++ USHORT usMinEngineClockPLL_Output; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit ++ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit ++ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk ++ USHORT usMinPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit ++ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output ++ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; ++ USHORT usCoreReferenceClock; //In 10Khz unit ++ USHORT usMemoryReferenceClock; //In 10Khz unit ++ USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock ++ UCHAR ucMemoryModule_ID; //Indicate what is the board design ++ UCHAR ucReserved4[3]; ++}ATOM_FIRMWARE_INFO_V2_1; ++ ++ ++#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1 ++ ++/****************************************************************************/ ++// Structures used in IntegratedSystemInfoTable ++/****************************************************************************/ + #define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2 + #define IGP_CAP_FLAG_AC_CARD 0x4 + #define IGP_CAP_FLAG_SDVO_CARD 0x8 + #define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10 + +-typedef struct _ATOM_INTEGRATED_SYSTEM_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ULONG ulBootUpEngineClock; /* in 10kHz unit */ +- ULONG ulBootUpMemoryClock; /* in 10kHz unit */ +- ULONG ulMaxSystemMemoryClock; /* in 10kHz unit */ +- ULONG ulMinSystemMemoryClock; /* in 10kHz unit */ +- UCHAR ucNumberOfCyclesInPeriodHi; +- UCHAR ucLCDTimingSel; /* =0:not valid.!=0 sel this timing descriptor from LCD EDID. */ +- USHORT usReserved1; +- USHORT usInterNBVoltageLow; /* An intermidiate PMW value to set the voltage */ +- USHORT usInterNBVoltageHigh; /* Another intermidiate PMW value to set the voltage */ +- ULONG ulReserved[2]; +- +- USHORT usFSBClock; /* In MHz unit */ +- USHORT usCapabilityFlag; /* Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable */ +- /* Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card */ +- /* Bit[4]==1: P/2 mode, ==0: P/1 mode */ +- USHORT usPCIENBCfgReg7; /* bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal */ +- USHORT usK8MemoryClock; /* in MHz unit */ +- USHORT usK8SyncStartDelay; /* in 0.01 us unit */ +- USHORT usK8DataReturnTime; /* in 0.01 us unit */ +- UCHAR ucMaxNBVoltage; +- UCHAR ucMinNBVoltage; +- UCHAR ucMemoryType; /* [7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved */ +- UCHAR ucNumberOfCyclesInPeriod; /* CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod */ +- UCHAR ucStartingPWM_HighTime; /* CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime */ +- UCHAR ucHTLinkWidth; /* 16 bit vs. 8 bit */ +- UCHAR ucMaxNBVoltageHigh; +- UCHAR ucMinNBVoltageHigh; +-} ATOM_INTEGRATED_SYSTEM_INFO; ++typedef struct _ATOM_INTEGRATED_SYSTEM_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulBootUpEngineClock; //in 10kHz unit ++ ULONG ulBootUpMemoryClock; //in 10kHz unit ++ ULONG ulMaxSystemMemoryClock; //in 10kHz unit ++ ULONG ulMinSystemMemoryClock; //in 10kHz unit ++ UCHAR ucNumberOfCyclesInPeriodHi; ++ UCHAR ucLCDTimingSel; //=0:not valid.!=0 sel this timing descriptor from LCD EDID. ++ USHORT usReserved1; ++ USHORT usInterNBVoltageLow; //An intermidiate PMW value to set the voltage ++ USHORT usInterNBVoltageHigh; //Another intermidiate PMW value to set the voltage ++ ULONG ulReserved[2]; ++ ++ USHORT usFSBClock; //In MHz unit ++ USHORT usCapabilityFlag; //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable ++ //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card ++ //Bit[4]==1: P/2 mode, ==0: P/1 mode ++ USHORT usPCIENBCfgReg7; //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal ++ USHORT usK8MemoryClock; //in MHz unit ++ USHORT usK8SyncStartDelay; //in 0.01 us unit ++ USHORT usK8DataReturnTime; //in 0.01 us unit ++ UCHAR ucMaxNBVoltage; ++ UCHAR ucMinNBVoltage; ++ UCHAR ucMemoryType; //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved ++ UCHAR ucNumberOfCyclesInPeriod; //CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod ++ UCHAR ucStartingPWM_HighTime; //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime ++ UCHAR ucHTLinkWidth; //16 bit vs. 8 bit ++ UCHAR ucMaxNBVoltageHigh; ++ UCHAR ucMinNBVoltageHigh; ++}ATOM_INTEGRATED_SYSTEM_INFO; + + /* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO +-ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock ++ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock + For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock + ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 + For AMD IGP,for now this can be 0 +-ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 ++ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 + For AMD IGP,for now this can be 0 + +-usFSBClock: For Intel IGP,it's FSB Freq ++usFSBClock: For Intel IGP,it's FSB Freq + For AMD IGP,it's HT Link Speed + + usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200 +@@ -1687,98 +2093,113 @@ VC:Voltage Control + ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. + ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. + +-ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value. +-ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0 ++ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value. ++ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0 + + ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. + ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. + ++ + usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all. + usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all. + */ + ++ + /* + The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST; +-Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need. ++Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need. + The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries. + + SW components can access the IGP system infor structure in the same way as before + */ + +-typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ULONG ulBootUpEngineClock; /* in 10kHz unit */ +- ULONG ulReserved1[2]; /* must be 0x0 for the reserved */ +- ULONG ulBootUpUMAClock; /* in 10kHz unit */ +- ULONG ulBootUpSidePortClock; /* in 10kHz unit */ +- ULONG ulMinSidePortClock; /* in 10kHz unit */ +- ULONG ulReserved2[6]; /* must be 0x0 for the reserved */ +- ULONG ulSystemConfig; /* see explanation below */ +- ULONG ulBootUpReqDisplayVector; +- ULONG ulOtherDisplayMisc; +- ULONG ulDDISlot1Config; +- ULONG ulDDISlot2Config; +- UCHAR ucMemoryType; /* [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved */ +- UCHAR ucUMAChannelNumber; +- UCHAR ucDockingPinBit; +- UCHAR ucDockingPinPolarity; +- ULONG ulDockingPinCFGInfo; +- ULONG ulCPUCapInfo; +- USHORT usNumberOfCyclesInPeriod; +- USHORT usMaxNBVoltage; +- USHORT usMinNBVoltage; +- USHORT usBootUpNBVoltage; +- ULONG ulHTLinkFreq; /* in 10Khz */ +- USHORT usMinHTLinkWidth; +- USHORT usMaxHTLinkWidth; +- USHORT usUMASyncStartDelay; +- USHORT usUMADataReturnTime; +- USHORT usLinkStatusZeroTime; +- USHORT usReserved; +- ULONG ulHighVoltageHTLinkFreq; /* in 10Khz */ +- ULONG ulLowVoltageHTLinkFreq; /* in 10Khz */ +- USHORT usMaxUpStreamHTLinkWidth; +- USHORT usMaxDownStreamHTLinkWidth; +- USHORT usMinUpStreamHTLinkWidth; +- USHORT usMinDownStreamHTLinkWidth; +- ULONG ulReserved3[97]; /* must be 0x0 */ +-} ATOM_INTEGRATED_SYSTEM_INFO_V2; ++ ++typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulBootUpEngineClock; //in 10kHz unit ++ ULONG ulReserved1[2]; //must be 0x0 for the reserved ++ ULONG ulBootUpUMAClock; //in 10kHz unit ++ ULONG ulBootUpSidePortClock; //in 10kHz unit ++ ULONG ulMinSidePortClock; //in 10kHz unit ++ ULONG ulReserved2[6]; //must be 0x0 for the reserved ++ ULONG ulSystemConfig; //see explanation below ++ ULONG ulBootUpReqDisplayVector; ++ ULONG ulOtherDisplayMisc; ++ ULONG ulDDISlot1Config; ++ ULONG ulDDISlot2Config; ++ UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved ++ UCHAR ucUMAChannelNumber; ++ UCHAR ucDockingPinBit; ++ UCHAR ucDockingPinPolarity; ++ ULONG ulDockingPinCFGInfo; ++ ULONG ulCPUCapInfo; ++ USHORT usNumberOfCyclesInPeriod; ++ USHORT usMaxNBVoltage; ++ USHORT usMinNBVoltage; ++ USHORT usBootUpNBVoltage; ++ ULONG ulHTLinkFreq; //in 10Khz ++ USHORT usMinHTLinkWidth; ++ USHORT usMaxHTLinkWidth; ++ USHORT usUMASyncStartDelay; ++ USHORT usUMADataReturnTime; ++ USHORT usLinkStatusZeroTime; ++ USHORT usDACEfuse; //for storing badgap value (for RS880 only) ++ ULONG ulHighVoltageHTLinkFreq; // in 10Khz ++ ULONG ulLowVoltageHTLinkFreq; // in 10Khz ++ USHORT usMaxUpStreamHTLinkWidth; ++ USHORT usMaxDownStreamHTLinkWidth; ++ USHORT usMinUpStreamHTLinkWidth; ++ USHORT usMinDownStreamHTLinkWidth; ++ USHORT usFirmwareVersion; //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW. ++ USHORT usFullT0Time; // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us. ++ ULONG ulReserved3[96]; //must be 0x0 ++}ATOM_INTEGRATED_SYSTEM_INFO_V2; + + /* + ulBootUpEngineClock: Boot-up Engine Clock in 10Khz; + ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present + ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock + +-ulSystemConfig: +-Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode; ++ulSystemConfig: ++Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode; + Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state + =0: system boots up at driver control state. Power state depends on PowerPlay table. + Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used. + Bit[3]=1: Only one power state(Performance) will be supported. + =0: Multiple power states supported from PowerPlay table. +-Bit[4]=1: CLMC is supported and enabled on current system. +- =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface. +-Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement. ++Bit[4]=1: CLMC is supported and enabled on current system. ++ =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface. ++Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement. + =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied. + Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored. + =0: Voltage settings is determined by powerplay table. + Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue. + =0: Enable CLMC as regular mode, CDLD and CILR will be enabled. ++Bit[8]=1: CDLF is supported and enabled on current system. ++ =0: CDLF is not supported or enabled on current system. ++Bit[9]=1: DLL Shut Down feature is enabled on current system. ++ =0: DLL Shut Down feature is not enabled or supported on current system. + + ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions. + + ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion; +- [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition; ++ [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSupportedStd definition; + + ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design). + [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12) +- [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12) +- [15:8] - Lane configuration attribute; ++ [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12) ++ When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time. ++ in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example: ++ one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2. ++ ++ [15:8] - Lane configuration attribute; + [23:16]- Connector type, possible value: + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D + CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D + CONNECTOR_OBJECT_ID_HDMI_TYPE_A + CONNECTOR_OBJECT_ID_DISPLAYPORT ++ CONNECTOR_OBJECT_ID_eDP + [31:24]- Reserved + + ulDDISlot2Config: Same as Slot1. +@@ -1787,29 +2208,31 @@ For IGP, Hypermemory is the only memory type showed in CCC. + + ucUMAChannelNumber: how many channels for the UMA; + +-ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin ++ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin + ucDockingPinBit: which bit in this register to read the pin status; + ucDockingPinPolarity:Polarity of the pin when docked; + + ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0 + + usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. +-usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode. ++ ++usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode. + usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode. + GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0 + PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1 + GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE ++ + usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value. + + ulHTLinkFreq: Bootup HT link Frequency in 10Khz. +-usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth. +- If CDLW enabled, both upstream and downstream width should be the same during bootup. +-usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth. ++usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth. + If CDLW enabled, both upstream and downstream width should be the same during bootup. ++usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth. ++ If CDLW enabled, both upstream and downstream width should be the same during bootup. + +-usUMASyncStartDelay: Memory access latency, required for watermark calculation ++usUMASyncStartDelay: Memory access latency, required for watermark calculation + usUMADataReturnTime: Memory access latency, required for watermark calculation +-usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us ++usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us + for Griffin or Greyhound. SBIOS needs to convert to actual time by: + if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us) + if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us) +@@ -1817,7 +2240,7 @@ for Griffin or Greyhound. SBIOS needs to convert to actual time by: + if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us) + + ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0. +- This must be less than or equal to ulHTLinkFreq(bootup frequency). ++ This must be less than or equal to ulHTLinkFreq(bootup frequency). + ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0. + This must be less than or equal to ulHighVoltageHTLinkFreq. + +@@ -1827,14 +2250,17 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep + usMinDownStreamHTLinkWidth: same as above. + */ + ++ + #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 + #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 +-#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004 ++#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004 + #define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008 + #define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010 + #define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020 + #define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040 + #define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080 ++#define SYSTEM_CONFIG_CDLF_ENABLED 0x00000100 ++#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED 0x00000200 + + #define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF + +@@ -1851,6 +2277,41 @@ usMinDownStreamHTLinkWidth: same as above. + + #define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000 + ++// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR ++typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulBootUpEngineClock; //in 10kHz unit ++ ULONG ulDentistVCOFreq; //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK. ++ ULONG ulLClockFreq; //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge ++ ULONG ulBootUpUMAClock; //in 10kHz unit ++ ULONG ulReserved1[8]; //must be 0x0 for the reserved ++ ULONG ulBootUpReqDisplayVector; ++ ULONG ulOtherDisplayMisc; ++ ULONG ulReserved2[4]; //must be 0x0 for the reserved ++ ULONG ulSystemConfig; //TBD ++ ULONG ulCPUCapInfo; //TBD ++ USHORT usMaxNBVoltage; //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse; ++ USHORT usMinNBVoltage; //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse; ++ USHORT usBootUpNBVoltage; //boot up NB voltage ++ UCHAR ucHtcTmpLmt; //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD ++ UCHAR ucTjOffset; //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD ++ ULONG ulReserved3[4]; //must be 0x0 for the reserved ++ ULONG ulDDISlot1Config; //see above ulDDISlot1Config definition ++ ULONG ulDDISlot2Config; ++ ULONG ulDDISlot3Config; ++ ULONG ulDDISlot4Config; ++ ULONG ulReserved4[4]; //must be 0x0 for the reserved ++ UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved ++ UCHAR ucUMAChannelNumber; ++ USHORT usReserved; ++ ULONG ulReserved5[4]; //must be 0x0 for the reserved ++ ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default ++ ULONG ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback ++ ULONG ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications ++ ULONG ulReserved6[61]; //must be 0x0 ++}ATOM_INTEGRATED_SYSTEM_INFO_V5; ++ + #define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000 + #define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001 + #define ATOM_TV_INT_ENCODER1_INDEX 0x00000002 +@@ -1866,8 +2327,8 @@ usMinDownStreamHTLinkWidth: same as above. + #define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C + #define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D + +-/* define ASIC internal encoder id ( bit vector ) */ +-#define ASIC_INT_DAC1_ENCODER_ID 0x00 ++// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable ++#define ASIC_INT_DAC1_ENCODER_ID 0x00 + #define ASIC_INT_TV_ENCODER_ID 0x02 + #define ASIC_INT_DIG1_ENCODER_ID 0x03 + #define ASIC_INT_DAC2_ENCODER_ID 0x04 +@@ -1875,10 +2336,24 @@ usMinDownStreamHTLinkWidth: same as above. + #define ASIC_INT_DVO_ENCODER_ID 0x07 + #define ASIC_INT_DIG2_ENCODER_ID 0x09 + #define ASIC_EXT_DIG_ENCODER_ID 0x05 ++#define ASIC_EXT_DIG2_ENCODER_ID 0x08 ++#define ASIC_INT_DIG3_ENCODER_ID 0x0a ++#define ASIC_INT_DIG4_ENCODER_ID 0x0b ++#define ASIC_INT_DIG5_ENCODER_ID 0x0c ++#define ASIC_INT_DIG6_ENCODER_ID 0x0d + +-/* define Encoder attribute */ ++//define Encoder attribute + #define ATOM_ANALOG_ENCODER 0 +-#define ATOM_DIGITAL_ENCODER 1 ++#define ATOM_DIGITAL_ENCODER 1 ++#define ATOM_DP_ENCODER 2 ++ ++#define ATOM_ENCODER_ENUM_MASK 0x70 ++#define ATOM_ENCODER_ENUM_ID1 0x00 ++#define ATOM_ENCODER_ENUM_ID2 0x10 ++#define ATOM_ENCODER_ENUM_ID3 0x20 ++#define ATOM_ENCODER_ENUM_ID4 0x30 ++#define ATOM_ENCODER_ENUM_ID5 0x40 ++#define ATOM_ENCODER_ENUM_ID6 0x50 + + #define ATOM_DEVICE_CRT1_INDEX 0x00000000 + #define ATOM_DEVICE_LCD1_INDEX 0x00000001 +@@ -1886,45 +2361,40 @@ usMinDownStreamHTLinkWidth: same as above. + #define ATOM_DEVICE_DFP1_INDEX 0x00000003 + #define ATOM_DEVICE_CRT2_INDEX 0x00000004 + #define ATOM_DEVICE_LCD2_INDEX 0x00000005 +-#define ATOM_DEVICE_TV2_INDEX 0x00000006 ++#define ATOM_DEVICE_DFP6_INDEX 0x00000006 + #define ATOM_DEVICE_DFP2_INDEX 0x00000007 + #define ATOM_DEVICE_CV_INDEX 0x00000008 +-#define ATOM_DEVICE_DFP3_INDEX 0x00000009 +-#define ATOM_DEVICE_DFP4_INDEX 0x0000000A +-#define ATOM_DEVICE_DFP5_INDEX 0x0000000B ++#define ATOM_DEVICE_DFP3_INDEX 0x00000009 ++#define ATOM_DEVICE_DFP4_INDEX 0x0000000A ++#define ATOM_DEVICE_DFP5_INDEX 0x0000000B ++ + #define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C + #define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D + #define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E + #define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F + #define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1) + #define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO +-#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1) ++#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1 ) + + #define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1) + +-#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX) +-#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX) +-#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX) +-#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX) +-#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX) +-#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX) +-#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX) +-#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX) +-#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX) +-#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX) +-#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX ) +-#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX) +- +-#define ATOM_DEVICE_CRT_SUPPORT \ +- (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT) +-#define ATOM_DEVICE_DFP_SUPPORT \ +- (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | \ +- ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | \ +- ATOM_DEVICE_DFP5_SUPPORT) +-#define ATOM_DEVICE_TV_SUPPORT \ +- (ATOM_DEVICE_TV1_SUPPORT | ATOM_DEVICE_TV2_SUPPORT) +-#define ATOM_DEVICE_LCD_SUPPORT \ +- (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT) ++#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX ) ++#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX ) ++#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX ) ++#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX ) ++#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX ) ++#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX ) ++#define ATOM_DEVICE_DFP6_SUPPORT (0x1L << ATOM_DEVICE_DFP6_INDEX ) ++#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX ) ++#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX ) ++#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX ) ++#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX ) ++#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX ) ++ ++#define ATOM_DEVICE_CRT_SUPPORT (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT) ++#define ATOM_DEVICE_DFP_SUPPORT (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT) ++#define ATOM_DEVICE_TV_SUPPORT (ATOM_DEVICE_TV1_SUPPORT) ++#define ATOM_DEVICE_LCD_SUPPORT (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT) + + #define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0 + #define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004 +@@ -1942,6 +2412,7 @@ usMinDownStreamHTLinkWidth: same as above. + #define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E + #define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F + ++ + #define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F + #define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000 + #define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000 +@@ -1958,139 +2429,150 @@ usMinDownStreamHTLinkWidth: same as above. + #define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004 + #define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001 + #define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002 +-#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 /* For IGP RS600 */ +-#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 /* For IGP RS690 */ ++#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 //For IGP RS600 ++#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 //For IGP RS690 + + #define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080 + #define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007 + #define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000 + #define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001 + +-/* usDeviceSupport: */ +-/* Bits0 = 0 - no CRT1 support= 1- CRT1 is supported */ +-/* Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported */ +-/* Bit 2 = 0 - no TV1 support= 1- TV1 is supported */ +-/* Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported */ +-/* Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported */ +-/* Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported */ +-/* Bit 6 = 0 - no TV2 support= 1- TV2 is supported */ +-/* Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported */ +-/* Bit 8 = 0 - no CV support= 1- CV is supported */ +-/* Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported */ +-/* Byte1 (Supported Device Info) */ +-/* Bit 0 = = 0 - no CV support= 1- CV is supported */ +-/* */ +-/* */ +- +-/* ucI2C_ConfigID */ +-/* [7:0] - I2C LINE Associate ID */ +-/* = 0 - no I2C */ +-/* [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) */ +-/* = 0, [6:0]=SW assisted I2C ID */ +-/* [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use */ +-/* = 2, HW engine for Multimedia use */ +-/* = 3-7 Reserved for future I2C engines */ +-/* [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C */ +- +-typedef struct _ATOM_I2C_ID_CONFIG { +-#if ATOM_BIG_ENDIAN +- UCHAR bfHW_Capable:1; +- UCHAR bfHW_EngineID:3; +- UCHAR bfI2C_LineMux:4; +-#else +- UCHAR bfI2C_LineMux:4; +- UCHAR bfHW_EngineID:3; +- UCHAR bfHW_Capable:1; +-#endif +-} ATOM_I2C_ID_CONFIG; +- +-typedef union _ATOM_I2C_ID_CONFIG_ACCESS { +- ATOM_I2C_ID_CONFIG sbfAccess; +- UCHAR ucAccess; +-} ATOM_I2C_ID_CONFIG_ACCESS; ++// usDeviceSupport: ++// Bits0 = 0 - no CRT1 support= 1- CRT1 is supported ++// Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported ++// Bit 2 = 0 - no TV1 support= 1- TV1 is supported ++// Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported ++// Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported ++// Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported ++// Bit 6 = 0 - no DFP6 support= 1- DFP6 is supported ++// Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported ++// Bit 8 = 0 - no CV support= 1- CV is supported ++// Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported ++// Bit 10 = 0 - no DFP4 support= 1- DFP4 is supported ++// Bit 11 = 0 - no DFP5 support= 1- DFP5 is supported ++// ++// + + /****************************************************************************/ +-/* Structure used in GPIO_I2C_InfoTable */ ++/* Structure used in MclkSS_InfoTable */ + /****************************************************************************/ +-typedef struct _ATOM_GPIO_I2C_ASSIGMENT { +- USHORT usClkMaskRegisterIndex; +- USHORT usClkEnRegisterIndex; +- USHORT usClkY_RegisterIndex; +- USHORT usClkA_RegisterIndex; +- USHORT usDataMaskRegisterIndex; +- USHORT usDataEnRegisterIndex; +- USHORT usDataY_RegisterIndex; +- USHORT usDataA_RegisterIndex; +- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; +- UCHAR ucClkMaskShift; +- UCHAR ucClkEnShift; +- UCHAR ucClkY_Shift; +- UCHAR ucClkA_Shift; +- UCHAR ucDataMaskShift; +- UCHAR ucDataEnShift; +- UCHAR ucDataY_Shift; +- UCHAR ucDataA_Shift; +- UCHAR ucReserved1; +- UCHAR ucReserved2; +-} ATOM_GPIO_I2C_ASSIGMENT; +- +-typedef struct _ATOM_GPIO_I2C_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE]; +-} ATOM_GPIO_I2C_INFO; ++// ucI2C_ConfigID ++// [7:0] - I2C LINE Associate ID ++// = 0 - no I2C ++// [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) ++// = 0, [6:0]=SW assisted I2C ID ++// [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use ++// = 2, HW engine for Multimedia use ++// = 3-7 Reserved for future I2C engines ++// [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C ++ ++typedef struct _ATOM_I2C_ID_CONFIG ++{ ++#if ATOM_BIG_ENDIAN ++ UCHAR bfHW_Capable:1; ++ UCHAR bfHW_EngineID:3; ++ UCHAR bfI2C_LineMux:4; ++#else ++ UCHAR bfI2C_LineMux:4; ++ UCHAR bfHW_EngineID:3; ++ UCHAR bfHW_Capable:1; ++#endif ++}ATOM_I2C_ID_CONFIG; + +-/****************************************************************************/ +-/* Common Structure used in other structures */ +-/****************************************************************************/ ++typedef union _ATOM_I2C_ID_CONFIG_ACCESS ++{ ++ ATOM_I2C_ID_CONFIG sbfAccess; ++ UCHAR ucAccess; ++}ATOM_I2C_ID_CONFIG_ACCESS; ++ ++ ++/****************************************************************************/ ++// Structure used in GPIO_I2C_InfoTable ++/****************************************************************************/ ++typedef struct _ATOM_GPIO_I2C_ASSIGMENT ++{ ++ USHORT usClkMaskRegisterIndex; ++ USHORT usClkEnRegisterIndex; ++ USHORT usClkY_RegisterIndex; ++ USHORT usClkA_RegisterIndex; ++ USHORT usDataMaskRegisterIndex; ++ USHORT usDataEnRegisterIndex; ++ USHORT usDataY_RegisterIndex; ++ USHORT usDataA_RegisterIndex; ++ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; ++ UCHAR ucClkMaskShift; ++ UCHAR ucClkEnShift; ++ UCHAR ucClkY_Shift; ++ UCHAR ucClkA_Shift; ++ UCHAR ucDataMaskShift; ++ UCHAR ucDataEnShift; ++ UCHAR ucDataY_Shift; ++ UCHAR ucDataA_Shift; ++ UCHAR ucReserved1; ++ UCHAR ucReserved2; ++}ATOM_GPIO_I2C_ASSIGMENT; ++ ++typedef struct _ATOM_GPIO_I2C_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE]; ++}ATOM_GPIO_I2C_INFO; ++ ++/****************************************************************************/ ++// Common Structure used in other structures ++/****************************************************************************/ + + #ifndef _H2INC +- +-/* Please don't add or expand this bitfield structure below, this one will retire soon.! */ +-typedef struct _ATOM_MODE_MISC_INFO { ++ ++//Please don't add or expand this bitfield structure below, this one will retire soon.! ++typedef struct _ATOM_MODE_MISC_INFO ++{ + #if ATOM_BIG_ENDIAN +- USHORT Reserved:6; +- USHORT RGB888:1; +- USHORT DoubleClock:1; +- USHORT Interlace:1; +- USHORT CompositeSync:1; +- USHORT V_ReplicationBy2:1; +- USHORT H_ReplicationBy2:1; +- USHORT VerticalCutOff:1; +- USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */ +- USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */ +- USHORT HorizontalCutOff:1; ++ USHORT Reserved:6; ++ USHORT RGB888:1; ++ USHORT DoubleClock:1; ++ USHORT Interlace:1; ++ USHORT CompositeSync:1; ++ USHORT V_ReplicationBy2:1; ++ USHORT H_ReplicationBy2:1; ++ USHORT VerticalCutOff:1; ++ USHORT VSyncPolarity:1; //0=Active High, 1=Active Low ++ USHORT HSyncPolarity:1; //0=Active High, 1=Active Low ++ USHORT HorizontalCutOff:1; + #else +- USHORT HorizontalCutOff:1; +- USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */ +- USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */ +- USHORT VerticalCutOff:1; +- USHORT H_ReplicationBy2:1; +- USHORT V_ReplicationBy2:1; +- USHORT CompositeSync:1; +- USHORT Interlace:1; +- USHORT DoubleClock:1; +- USHORT RGB888:1; +- USHORT Reserved:6; ++ USHORT HorizontalCutOff:1; ++ USHORT HSyncPolarity:1; //0=Active High, 1=Active Low ++ USHORT VSyncPolarity:1; //0=Active High, 1=Active Low ++ USHORT VerticalCutOff:1; ++ USHORT H_ReplicationBy2:1; ++ USHORT V_ReplicationBy2:1; ++ USHORT CompositeSync:1; ++ USHORT Interlace:1; ++ USHORT DoubleClock:1; ++ USHORT RGB888:1; ++ USHORT Reserved:6; + #endif +-} ATOM_MODE_MISC_INFO; +- +-typedef union _ATOM_MODE_MISC_INFO_ACCESS { +- ATOM_MODE_MISC_INFO sbfAccess; +- USHORT usAccess; +-} ATOM_MODE_MISC_INFO_ACCESS; +- ++}ATOM_MODE_MISC_INFO; ++ ++typedef union _ATOM_MODE_MISC_INFO_ACCESS ++{ ++ ATOM_MODE_MISC_INFO sbfAccess; ++ USHORT usAccess; ++}ATOM_MODE_MISC_INFO_ACCESS; ++ + #else +- +-typedef union _ATOM_MODE_MISC_INFO_ACCESS { +- USHORT usAccess; +-} ATOM_MODE_MISC_INFO_ACCESS; +- ++ ++typedef union _ATOM_MODE_MISC_INFO_ACCESS ++{ ++ USHORT usAccess; ++}ATOM_MODE_MISC_INFO_ACCESS; ++ + #endif + +-/* usModeMiscInfo- */ ++// usModeMiscInfo- + #define ATOM_H_CUTOFF 0x01 +-#define ATOM_HSYNC_POLARITY 0x02 /* 0=Active High, 1=Active Low */ +-#define ATOM_VSYNC_POLARITY 0x04 /* 0=Active High, 1=Active Low */ ++#define ATOM_HSYNC_POLARITY 0x02 //0=Active High, 1=Active Low ++#define ATOM_VSYNC_POLARITY 0x04 //0=Active High, 1=Active Low + #define ATOM_V_CUTOFF 0x08 + #define ATOM_H_REPLICATIONBY2 0x10 + #define ATOM_V_REPLICATIONBY2 0x20 +@@ -2099,10 +2581,10 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS { + #define ATOM_DOUBLE_CLOCK_MODE 0x100 + #define ATOM_RGB888_MODE 0x200 + +-/* usRefreshRate- */ ++//usRefreshRate- + #define ATOM_REFRESH_43 43 + #define ATOM_REFRESH_47 47 +-#define ATOM_REFRESH_56 56 ++#define ATOM_REFRESH_56 56 + #define ATOM_REFRESH_60 60 + #define ATOM_REFRESH_65 65 + #define ATOM_REFRESH_70 70 +@@ -2110,192 +2592,233 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS { + #define ATOM_REFRESH_75 75 + #define ATOM_REFRESH_85 85 + +-/* ATOM_MODE_TIMING data are exactly the same as VESA timing data. */ +-/* Translation from EDID to ATOM_MODE_TIMING, use the following formula. */ +-/* */ +-/* VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK */ +-/* = EDID_HA + EDID_HBL */ +-/* VESA_HDISP = VESA_ACTIVE = EDID_HA */ +-/* VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH */ +-/* = EDID_HA + EDID_HSO */ +-/* VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW */ +-/* VESA_BORDER = EDID_BORDER */ +- +-/****************************************************************************/ +-/* Structure used in SetCRTC_UsingDTDTimingTable */ +-/****************************************************************************/ +-typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS { +- USHORT usH_Size; +- USHORT usH_Blanking_Time; +- USHORT usV_Size; +- USHORT usV_Blanking_Time; +- USHORT usH_SyncOffset; +- USHORT usH_SyncWidth; +- USHORT usV_SyncOffset; +- USHORT usV_SyncWidth; +- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; +- UCHAR ucH_Border; /* From DFP EDID */ +- UCHAR ucV_Border; +- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ +- UCHAR ucPadding[3]; +-} SET_CRTC_USING_DTD_TIMING_PARAMETERS; +- +-/****************************************************************************/ +-/* Structure used in SetCRTC_TimingTable */ +-/****************************************************************************/ +-typedef struct _SET_CRTC_TIMING_PARAMETERS { +- USHORT usH_Total; /* horizontal total */ +- USHORT usH_Disp; /* horizontal display */ +- USHORT usH_SyncStart; /* horozontal Sync start */ +- USHORT usH_SyncWidth; /* horizontal Sync width */ +- USHORT usV_Total; /* vertical total */ +- USHORT usV_Disp; /* vertical display */ +- USHORT usV_SyncStart; /* vertical Sync start */ +- USHORT usV_SyncWidth; /* vertical Sync width */ +- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; +- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ +- UCHAR ucOverscanRight; /* right */ +- UCHAR ucOverscanLeft; /* left */ +- UCHAR ucOverscanBottom; /* bottom */ +- UCHAR ucOverscanTop; /* top */ +- UCHAR ucReserved; +-} SET_CRTC_TIMING_PARAMETERS; ++// ATOM_MODE_TIMING data are exactly the same as VESA timing data. ++// Translation from EDID to ATOM_MODE_TIMING, use the following formula. ++// ++// VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK ++// = EDID_HA + EDID_HBL ++// VESA_HDISP = VESA_ACTIVE = EDID_HA ++// VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH ++// = EDID_HA + EDID_HSO ++// VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW ++// VESA_BORDER = EDID_BORDER ++ ++/****************************************************************************/ ++// Structure used in SetCRTC_UsingDTDTimingTable ++/****************************************************************************/ ++typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS ++{ ++ USHORT usH_Size; ++ USHORT usH_Blanking_Time; ++ USHORT usV_Size; ++ USHORT usV_Blanking_Time; ++ USHORT usH_SyncOffset; ++ USHORT usH_SyncWidth; ++ USHORT usV_SyncOffset; ++ USHORT usV_SyncWidth; ++ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; ++ UCHAR ucH_Border; // From DFP EDID ++ UCHAR ucV_Border; ++ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucPadding[3]; ++}SET_CRTC_USING_DTD_TIMING_PARAMETERS; ++ ++/****************************************************************************/ ++// Structure used in SetCRTC_TimingTable ++/****************************************************************************/ ++typedef struct _SET_CRTC_TIMING_PARAMETERS ++{ ++ USHORT usH_Total; // horizontal total ++ USHORT usH_Disp; // horizontal display ++ USHORT usH_SyncStart; // horozontal Sync start ++ USHORT usH_SyncWidth; // horizontal Sync width ++ USHORT usV_Total; // vertical total ++ USHORT usV_Disp; // vertical display ++ USHORT usV_SyncStart; // vertical Sync start ++ USHORT usV_SyncWidth; // vertical Sync width ++ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; ++ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 ++ UCHAR ucOverscanRight; // right ++ UCHAR ucOverscanLeft; // left ++ UCHAR ucOverscanBottom; // bottom ++ UCHAR ucOverscanTop; // top ++ UCHAR ucReserved; ++}SET_CRTC_TIMING_PARAMETERS; + #define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS + +-/****************************************************************************/ +-/* Structure used in StandardVESA_TimingTable */ +-/* AnalogTV_InfoTable */ +-/* ComponentVideoInfoTable */ +-/****************************************************************************/ +-typedef struct _ATOM_MODE_TIMING { +- USHORT usCRTC_H_Total; +- USHORT usCRTC_H_Disp; +- USHORT usCRTC_H_SyncStart; +- USHORT usCRTC_H_SyncWidth; +- USHORT usCRTC_V_Total; +- USHORT usCRTC_V_Disp; +- USHORT usCRTC_V_SyncStart; +- USHORT usCRTC_V_SyncWidth; +- USHORT usPixelClock; /* in 10Khz unit */ +- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; +- USHORT usCRTC_OverscanRight; +- USHORT usCRTC_OverscanLeft; +- USHORT usCRTC_OverscanBottom; +- USHORT usCRTC_OverscanTop; +- USHORT usReserve; +- UCHAR ucInternalModeNumber; +- UCHAR ucRefreshRate; +-} ATOM_MODE_TIMING; +- +-typedef struct _ATOM_DTD_FORMAT { +- USHORT usPixClk; +- USHORT usHActive; +- USHORT usHBlanking_Time; +- USHORT usVActive; +- USHORT usVBlanking_Time; +- USHORT usHSyncOffset; +- USHORT usHSyncWidth; +- USHORT usVSyncOffset; +- USHORT usVSyncWidth; +- USHORT usImageHSize; +- USHORT usImageVSize; +- UCHAR ucHBorder; +- UCHAR ucVBorder; +- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; +- UCHAR ucInternalModeNumber; +- UCHAR ucRefreshRate; +-} ATOM_DTD_FORMAT; +- +-/****************************************************************************/ +-/* Structure used in LVDS_InfoTable */ +-/* * Need a document to describe this table */ +-/****************************************************************************/ ++/****************************************************************************/ ++// Structure used in StandardVESA_TimingTable ++// AnalogTV_InfoTable ++// ComponentVideoInfoTable ++/****************************************************************************/ ++typedef struct _ATOM_MODE_TIMING ++{ ++ USHORT usCRTC_H_Total; ++ USHORT usCRTC_H_Disp; ++ USHORT usCRTC_H_SyncStart; ++ USHORT usCRTC_H_SyncWidth; ++ USHORT usCRTC_V_Total; ++ USHORT usCRTC_V_Disp; ++ USHORT usCRTC_V_SyncStart; ++ USHORT usCRTC_V_SyncWidth; ++ USHORT usPixelClock; //in 10Khz unit ++ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; ++ USHORT usCRTC_OverscanRight; ++ USHORT usCRTC_OverscanLeft; ++ USHORT usCRTC_OverscanBottom; ++ USHORT usCRTC_OverscanTop; ++ USHORT usReserve; ++ UCHAR ucInternalModeNumber; ++ UCHAR ucRefreshRate; ++}ATOM_MODE_TIMING; ++ ++typedef struct _ATOM_DTD_FORMAT ++{ ++ USHORT usPixClk; ++ USHORT usHActive; ++ USHORT usHBlanking_Time; ++ USHORT usVActive; ++ USHORT usVBlanking_Time; ++ USHORT usHSyncOffset; ++ USHORT usHSyncWidth; ++ USHORT usVSyncOffset; ++ USHORT usVSyncWidth; ++ USHORT usImageHSize; ++ USHORT usImageVSize; ++ UCHAR ucHBorder; ++ UCHAR ucVBorder; ++ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; ++ UCHAR ucInternalModeNumber; ++ UCHAR ucRefreshRate; ++}ATOM_DTD_FORMAT; ++ ++/****************************************************************************/ ++// Structure used in LVDS_InfoTable ++// * Need a document to describe this table ++/****************************************************************************/ + #define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 + #define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 + #define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 + #define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 + +-/* Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. */ +-/* Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL */ +-#define LCDPANEL_CAP_READ_EDID 0x1 +- +-/* ucTableFormatRevision=1 */ +-/* ucTableContentRevision=1 */ +-typedef struct _ATOM_LVDS_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_DTD_FORMAT sLCDTiming; +- USHORT usModePatchTableOffset; +- USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */ +- USHORT usOffDelayInMs; +- UCHAR ucPowerSequenceDigOntoDEin10Ms; +- UCHAR ucPowerSequenceDEtoBLOnin10Ms; +- UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */ +- /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */ +- /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */ +- /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */ +- UCHAR ucPanelDefaultRefreshRate; +- UCHAR ucPanelIdentification; +- UCHAR ucSS_Id; +-} ATOM_LVDS_INFO; +- +-/* ucTableFormatRevision=1 */ +-/* ucTableContentRevision=2 */ +-typedef struct _ATOM_LVDS_INFO_V12 { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_DTD_FORMAT sLCDTiming; +- USHORT usExtInfoTableOffset; +- USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */ +- USHORT usOffDelayInMs; +- UCHAR ucPowerSequenceDigOntoDEin10Ms; +- UCHAR ucPowerSequenceDEtoBLOnin10Ms; +- UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */ +- /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */ +- /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */ +- /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */ +- UCHAR ucPanelDefaultRefreshRate; +- UCHAR ucPanelIdentification; +- UCHAR ucSS_Id; +- USHORT usLCDVenderID; +- USHORT usLCDProductID; +- UCHAR ucLCDPanel_SpecialHandlingCap; +- UCHAR ucPanelInfoSize; /* start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable */ +- UCHAR ucReserved[2]; +-} ATOM_LVDS_INFO_V12; ++//ucTableFormatRevision=1 ++//ucTableContentRevision=1 ++typedef struct _ATOM_LVDS_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_DTD_FORMAT sLCDTiming; ++ USHORT usModePatchTableOffset; ++ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec. ++ USHORT usOffDelayInMs; ++ UCHAR ucPowerSequenceDigOntoDEin10Ms; ++ UCHAR ucPowerSequenceDEtoBLOnin10Ms; ++ UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} ++ // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} ++ // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} ++ // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} ++ UCHAR ucPanelDefaultRefreshRate; ++ UCHAR ucPanelIdentification; ++ UCHAR ucSS_Id; ++}ATOM_LVDS_INFO; ++ ++//ucTableFormatRevision=1 ++//ucTableContentRevision=2 ++typedef struct _ATOM_LVDS_INFO_V12 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_DTD_FORMAT sLCDTiming; ++ USHORT usExtInfoTableOffset; ++ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec. ++ USHORT usOffDelayInMs; ++ UCHAR ucPowerSequenceDigOntoDEin10Ms; ++ UCHAR ucPowerSequenceDEtoBLOnin10Ms; ++ UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} ++ // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} ++ // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} ++ // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} ++ UCHAR ucPanelDefaultRefreshRate; ++ UCHAR ucPanelIdentification; ++ UCHAR ucSS_Id; ++ USHORT usLCDVenderID; ++ USHORT usLCDProductID; ++ UCHAR ucLCDPanel_SpecialHandlingCap; ++ UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable ++ UCHAR ucReserved[2]; ++}ATOM_LVDS_INFO_V12; ++ ++//Definitions for ucLCDPanel_SpecialHandlingCap: ++ ++//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. ++//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL ++#define LCDPANEL_CAP_READ_EDID 0x1 ++ ++//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together ++//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static ++//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12 ++#define LCDPANEL_CAP_DRR_SUPPORTED 0x2 ++ ++//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP. ++#define LCDPANEL_CAP_eDP 0x4 ++ ++ ++//Color Bit Depth definition in EDID V1.4 @BYTE 14h ++//Bit 6 5 4 ++ // 0 0 0 - Color bit depth is undefined ++ // 0 0 1 - 6 Bits per Primary Color ++ // 0 1 0 - 8 Bits per Primary Color ++ // 0 1 1 - 10 Bits per Primary Color ++ // 1 0 0 - 12 Bits per Primary Color ++ // 1 0 1 - 14 Bits per Primary Color ++ // 1 1 0 - 16 Bits per Primary Color ++ // 1 1 1 - Reserved ++ ++#define PANEL_COLOR_BIT_DEPTH_MASK 0x70 ++ ++// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled} ++#define PANEL_RANDOM_DITHER 0x80 ++#define PANEL_RANDOM_DITHER_MASK 0x80 ++ + + #define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 + +-typedef struct _ATOM_PATCH_RECORD_MODE { +- UCHAR ucRecordType; +- USHORT usHDisp; +- USHORT usVDisp; +-} ATOM_PATCH_RECORD_MODE; ++typedef struct _ATOM_PATCH_RECORD_MODE ++{ ++ UCHAR ucRecordType; ++ USHORT usHDisp; ++ USHORT usVDisp; ++}ATOM_PATCH_RECORD_MODE; + +-typedef struct _ATOM_LCD_RTS_RECORD { +- UCHAR ucRecordType; +- UCHAR ucRTSValue; +-} ATOM_LCD_RTS_RECORD; ++typedef struct _ATOM_LCD_RTS_RECORD ++{ ++ UCHAR ucRecordType; ++ UCHAR ucRTSValue; ++}ATOM_LCD_RTS_RECORD; + +-/* !! If the record below exits, it shoud always be the first record for easy use in command table!!! */ +-typedef struct _ATOM_LCD_MODE_CONTROL_CAP { +- UCHAR ucRecordType; +- USHORT usLCDCap; +-} ATOM_LCD_MODE_CONTROL_CAP; ++//!! If the record below exits, it shoud always be the first record for easy use in command table!!! ++// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead. ++typedef struct _ATOM_LCD_MODE_CONTROL_CAP ++{ ++ UCHAR ucRecordType; ++ USHORT usLCDCap; ++}ATOM_LCD_MODE_CONTROL_CAP; + + #define LCD_MODE_CAP_BL_OFF 1 + #define LCD_MODE_CAP_CRTC_OFF 2 + #define LCD_MODE_CAP_PANEL_OFF 4 + +-typedef struct _ATOM_FAKE_EDID_PATCH_RECORD { +- UCHAR ucRecordType; +- UCHAR ucFakeEDIDLength; +- UCHAR ucFakeEDIDString[1]; /* This actually has ucFakeEdidLength elements. */ ++typedef struct _ATOM_FAKE_EDID_PATCH_RECORD ++{ ++ UCHAR ucRecordType; ++ UCHAR ucFakeEDIDLength; ++ UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements. + } ATOM_FAKE_EDID_PATCH_RECORD; + +-typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD { +- UCHAR ucRecordType; +- USHORT usHSize; +- USHORT usVSize; +-} ATOM_PANEL_RESOLUTION_PATCH_RECORD; ++typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD ++{ ++ UCHAR ucRecordType; ++ USHORT usHSize; ++ USHORT usVSize; ++}ATOM_PANEL_RESOLUTION_PATCH_RECORD; + + #define LCD_MODE_PATCH_RECORD_MODE_TYPE 1 + #define LCD_RTS_RECORD_TYPE 2 +@@ -2306,21 +2829,25 @@ typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD { + + /****************************Spread Spectrum Info Table Definitions **********************/ + +-/* ucTableFormatRevision=1 */ +-/* ucTableContentRevision=2 */ +-typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT { +- USHORT usSpreadSpectrumPercentage; +- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ +- UCHAR ucSS_Step; +- UCHAR ucSS_Delay; +- UCHAR ucSS_Id; +- UCHAR ucRecommendedRef_Div; +- UCHAR ucSS_Range; /* it was reserved for V11 */ +-} ATOM_SPREAD_SPECTRUM_ASSIGNMENT; ++//ucTableFormatRevision=1 ++//ucTableContentRevision=2 ++typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT ++{ ++ USHORT usSpreadSpectrumPercentage; ++ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS Others:TBD ++ UCHAR ucSS_Step; ++ UCHAR ucSS_Delay; ++ UCHAR ucSS_Id; ++ UCHAR ucRecommendedRef_Div; ++ UCHAR ucSS_Range; //it was reserved for V11 ++}ATOM_SPREAD_SPECTRUM_ASSIGNMENT; + + #define ATOM_MAX_SS_ENTRY 16 +-#define ATOM_DP_SS_ID1 0x0f1 /* SS modulation freq=30k */ +-#define ATOM_DP_SS_ID2 0x0f2 /* SS modulation freq=33k */ ++#define ATOM_DP_SS_ID1 0x0f1 // SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well. ++#define ATOM_DP_SS_ID2 0x0f2 // SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable. ++#define ATOM_LVLINK_2700MHz_SS_ID 0x0f3 // SS ID for LV link translator chip at 2.7Ghz ++#define ATOM_LVLINK_1620MHz_SS_ID 0x0f4 // SS ID for LV link translator chip at 1.62Ghz ++ + + #define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000 + #define ATOM_SS_DOWN_SPREAD_MODE 0x00000000 +@@ -2329,29 +2856,30 @@ typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT { + #define ATOM_INTERNAL_SS_MASK 0x00000000 + #define ATOM_EXTERNAL_SS_MASK 0x00000002 + #define EXEC_SS_STEP_SIZE_SHIFT 2 +-#define EXEC_SS_DELAY_SHIFT 4 ++#define EXEC_SS_DELAY_SHIFT 4 + #define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4 + +-typedef struct _ATOM_SPREAD_SPECTRUM_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY]; +-} ATOM_SPREAD_SPECTRUM_INFO; +- +-/****************************************************************************/ +-/* Structure used in AnalogTV_InfoTable (Top level) */ +-/****************************************************************************/ +-/* ucTVBootUpDefaultStd definiton: */ +- +-/* ATOM_TV_NTSC 1 */ +-/* ATOM_TV_NTSCJ 2 */ +-/* ATOM_TV_PAL 3 */ +-/* ATOM_TV_PALM 4 */ +-/* ATOM_TV_PALCN 5 */ +-/* ATOM_TV_PALN 6 */ +-/* ATOM_TV_PAL60 7 */ +-/* ATOM_TV_SECAM 8 */ +- +-/* ucTVSuppportedStd definition: */ ++typedef struct _ATOM_SPREAD_SPECTRUM_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY]; ++}ATOM_SPREAD_SPECTRUM_INFO; ++ ++/****************************************************************************/ ++// Structure used in AnalogTV_InfoTable (Top level) ++/****************************************************************************/ ++//ucTVBootUpDefaultStd definiton: ++ ++//ATOM_TV_NTSC 1 ++//ATOM_TV_NTSCJ 2 ++//ATOM_TV_PAL 3 ++//ATOM_TV_PALM 4 ++//ATOM_TV_PALCN 5 ++//ATOM_TV_PALN 6 ++//ATOM_TV_PAL60 7 ++//ATOM_TV_SECAM 8 ++ ++//ucTVSupportedStd definition: + #define NTSC_SUPPORT 0x1 + #define NTSCJ_SUPPORT 0x2 + +@@ -2364,46 +2892,58 @@ typedef struct _ATOM_SPREAD_SPECTRUM_INFO { + + #define MAX_SUPPORTED_TV_TIMING 2 + +-typedef struct _ATOM_ANALOG_TV_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- UCHAR ucTV_SupportedStandard; +- UCHAR ucTV_BootUpDefaultStandard; +- UCHAR ucExt_TV_ASIC_ID; +- UCHAR ucExt_TV_ASIC_SlaveAddr; +- /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; */ +- ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING]; +-} ATOM_ANALOG_TV_INFO; ++typedef struct _ATOM_ANALOG_TV_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucTV_SupportedStandard; ++ UCHAR ucTV_BootUpDefaultStandard; ++ UCHAR ucExt_TV_ASIC_ID; ++ UCHAR ucExt_TV_ASIC_SlaveAddr; ++ /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];*/ ++ ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING]; ++}ATOM_ANALOG_TV_INFO; + + #define MAX_SUPPORTED_TV_TIMING_V1_2 3 + +-typedef struct _ATOM_ANALOG_TV_INFO_V1_2 { +- ATOM_COMMON_TABLE_HEADER sHeader; +- UCHAR ucTV_SupportedStandard; +- UCHAR ucTV_BootUpDefaultStandard; +- UCHAR ucExt_TV_ASIC_ID; +- UCHAR ucExt_TV_ASIC_SlaveAddr; +- ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; +-} ATOM_ANALOG_TV_INFO_V1_2; ++typedef struct _ATOM_ANALOG_TV_INFO_V1_2 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucTV_SupportedStandard; ++ UCHAR ucTV_BootUpDefaultStandard; ++ UCHAR ucExt_TV_ASIC_ID; ++ UCHAR ucExt_TV_ASIC_SlaveAddr; ++ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; ++}ATOM_ANALOG_TV_INFO_V1_2; ++ ++typedef struct _ATOM_DPCD_INFO ++{ ++ UCHAR ucRevisionNumber; //10h : Revision 1.0; 11h : Revision 1.1 ++ UCHAR ucMaxLinkRate; //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane ++ UCHAR ucMaxLane; //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP ++ UCHAR ucMaxDownSpread; //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec) ++}ATOM_DPCD_INFO; ++ ++#define ATOM_DPCD_MAX_LANE_MASK 0x1F + + /**************************************************************************/ +-/* VRAM usage and their definitions */ ++// VRAM usage and their defintions + +-/* One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */ +-/* Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */ +-/* All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! */ +-/* To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR */ +-/* To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX */ ++// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. ++// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. ++// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! ++// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR ++// To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX + + #ifndef VESA_MEMORY_IN_64K_BLOCK +-#define VESA_MEMORY_IN_64K_BLOCK 0x100 /* 256*64K=16Mb (Max. VESA memory is 16Mb!) */ ++#define VESA_MEMORY_IN_64K_BLOCK 0x100 //256*64K=16Mb (Max. VESA memory is 16Mb!) + #endif + +-#define ATOM_EDID_RAW_DATASIZE 256 /* In Bytes */ +-#define ATOM_HWICON_SURFACE_SIZE 4096 /* In Bytes */ ++#define ATOM_EDID_RAW_DATASIZE 256 //In Bytes ++#define ATOM_HWICON_SURFACE_SIZE 4096 //In Bytes + #define ATOM_HWICON_INFOTABLE_SIZE 32 + #define MAX_DTD_MODE_IN_VRAM 6 +-#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) /* 28= (SIZEOF ATOM_DTD_FORMAT) */ +-#define ATOM_STD_MODE_SUPPORT_TBL_SIZE (32*8) /* 32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) */ ++#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT) ++#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) + #define DFP_ENCODER_TYPE_OFFSET 0x80 + #define DP_ENCODER_LANE_NUM_OFFSET 0x84 + #define DP_ENCODER_LINK_RATE_OFFSET 0x88 +@@ -2417,7 +2957,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 { + + #define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) + #define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +-#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + + #define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) + +@@ -2431,13 +2971,13 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 { + + #define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) + #define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +-#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +-#define ATOM_TV2_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) +-#define ATOM_TV2_DTD_MODE_TBL_ADDR (ATOM_TV2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +-#define ATOM_TV2_STD_MODE_TBL_ADDR (ATOM_TV2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_DFP6_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_DFP6_DTD_MODE_TBL_ADDR (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) ++#define ATOM_DFP6_STD_MODE_TBL_ADDR (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +-#define ATOM_DFP2_EDID_ADDR (ATOM_TV2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_DFP2_EDID_ADDR (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) + #define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) + #define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +@@ -2457,533 +2997,850 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 { + #define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) + #define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) + +-#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE) ++#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE) + +-#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 256) +-#define ATOM_STACK_STORAGE_END (ATOM_STACK_STORAGE_START + 512) ++#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256) ++#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512 + +-/* The size below is in Kb! */ ++//The size below is in Kb! + #define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) +- ++ + #define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L + #define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 + #define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 + #define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0 + +-/***********************************************************************************/ +-/* Structure used in VRAM_UsageByFirmwareTable */ +-/* Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm */ +-/* at running time. */ +-/* note2: From RV770, the memory is more than 32bit addressable, so we will change */ +-/* ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains */ +-/* exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware */ +-/* (in offset to start of memory address) is KB aligned instead of byte aligend. */ +-/***********************************************************************************/ ++/***********************************************************************************/ ++// Structure used in VRAM_UsageByFirmwareTable ++// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm ++// at running time. ++// note2: From RV770, the memory is more than 32bit addressable, so we will change ++// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains ++// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware ++// (in offset to start of memory address) is KB aligned instead of byte aligend. ++/***********************************************************************************/ ++// Note3: ++/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter, ++for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have: ++ ++If (ulStartAddrUsedByFirmware!=0) ++FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB; ++Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose ++else //Non VGA case ++ if (FB_Size<=2Gb) ++ FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB; ++ else ++ FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB ++ ++CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/ ++ + #define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1 + +-typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO { +- ULONG ulStartAddrUsedByFirmware; +- USHORT usFirmwareUseInKb; +- USHORT usReserved; +-} ATOM_FIRMWARE_VRAM_RESERVE_INFO; ++typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO ++{ ++ ULONG ulStartAddrUsedByFirmware; ++ USHORT usFirmwareUseInKb; ++ USHORT usReserved; ++}ATOM_FIRMWARE_VRAM_RESERVE_INFO; + +-typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_FIRMWARE_VRAM_RESERVE_INFO +- asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO]; +-} ATOM_VRAM_USAGE_BY_FIRMWARE; ++typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_FIRMWARE_VRAM_RESERVE_INFO asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO]; ++}ATOM_VRAM_USAGE_BY_FIRMWARE; + +-/****************************************************************************/ +-/* Structure used in GPIO_Pin_LUTTable */ +-/****************************************************************************/ +-typedef struct _ATOM_GPIO_PIN_ASSIGNMENT { +- USHORT usGpioPin_AIndex; +- UCHAR ucGpioPinBitShift; +- UCHAR ucGPIO_ID; +-} ATOM_GPIO_PIN_ASSIGNMENT; ++// change verion to 1.5, when allow driver to allocate the vram area for command table access. ++typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 ++{ ++ ULONG ulStartAddrUsedByFirmware; ++ USHORT usFirmwareUseInKb; ++ USHORT usFBUsedByDrvInKb; ++}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5; + +-typedef struct _ATOM_GPIO_PIN_LUT { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1]; +-} ATOM_GPIO_PIN_LUT; ++typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO]; ++}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5; ++ ++/****************************************************************************/ ++// Structure used in GPIO_Pin_LUTTable ++/****************************************************************************/ ++typedef struct _ATOM_GPIO_PIN_ASSIGNMENT ++{ ++ USHORT usGpioPin_AIndex; ++ UCHAR ucGpioPinBitShift; ++ UCHAR ucGPIO_ID; ++}ATOM_GPIO_PIN_ASSIGNMENT; + +-/****************************************************************************/ +-/* Structure used in ComponentVideoInfoTable */ +-/****************************************************************************/ ++typedef struct _ATOM_GPIO_PIN_LUT ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1]; ++}ATOM_GPIO_PIN_LUT; ++ ++/****************************************************************************/ ++// Structure used in ComponentVideoInfoTable ++/****************************************************************************/ + #define GPIO_PIN_ACTIVE_HIGH 0x1 + + #define MAX_SUPPORTED_CV_STANDARDS 5 + +-/* definitions for ATOM_D_INFO.ucSettings */ +-#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F /* [4:0] */ +-#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 /* [6:5] = must be zeroed out */ +-#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 /* [7] */ ++// definitions for ATOM_D_INFO.ucSettings ++#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F // [4:0] ++#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 // [6:5] = must be zeroed out ++#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 // [7] + +-typedef struct _ATOM_GPIO_INFO { +- USHORT usAOffset; +- UCHAR ucSettings; +- UCHAR ucReserved; +-} ATOM_GPIO_INFO; ++typedef struct _ATOM_GPIO_INFO ++{ ++ USHORT usAOffset; ++ UCHAR ucSettings; ++ UCHAR ucReserved; ++}ATOM_GPIO_INFO; + +-/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) */ ++// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) + #define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2 + +-/* definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i */ +-#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 /* [7]; */ +-#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F /* [6:0] */ +- +-/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode */ +-/* Line 3 out put 5V. */ +-#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 /* represent gpio 3 state for 16:9 */ +-#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 /* represent gpio 4 state for 16:9 */ +-#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0 +- +-/* Line 3 out put 2.2V */ +-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 /* represent gpio 3 state for 4:3 Letter box */ +-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 /* represent gpio 4 state for 4:3 Letter box */ +-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2 +- +-/* Line 3 out put 0V */ +-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 /* represent gpio 3 state for 4:3 */ +-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 /* represent gpio 4 state for 4:3 */ +-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4 +- +-#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F /* bit [5:0] */ +- +-#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 /* bit 7 */ +- +-/* GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. */ +-#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 /* bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */ +-#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 /* bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */ +- +-typedef struct _ATOM_COMPONENT_VIDEO_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- USHORT usMask_PinRegisterIndex; +- USHORT usEN_PinRegisterIndex; +- USHORT usY_PinRegisterIndex; +- USHORT usA_PinRegisterIndex; +- UCHAR ucBitShift; +- UCHAR ucPinActiveState; /* ucPinActiveState: Bit0=1 active high, =0 active low */ +- ATOM_DTD_FORMAT sReserved; /* must be zeroed out */ +- UCHAR ucMiscInfo; +- UCHAR uc480i; +- UCHAR uc480p; +- UCHAR uc720p; +- UCHAR uc1080i; +- UCHAR ucLetterBoxMode; +- UCHAR ucReserved[3]; +- UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */ +- ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; +- ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; +-} ATOM_COMPONENT_VIDEO_INFO; +- +-/* ucTableFormatRevision=2 */ +-/* ucTableContentRevision=1 */ +-typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 { +- ATOM_COMMON_TABLE_HEADER sHeader; +- UCHAR ucMiscInfo; +- UCHAR uc480i; +- UCHAR uc480p; +- UCHAR uc720p; +- UCHAR uc1080i; +- UCHAR ucReserved; +- UCHAR ucLetterBoxMode; +- UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */ +- ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; +- ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; +-} ATOM_COMPONENT_VIDEO_INFO_V21; ++// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i ++#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 //[7]; ++#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F //[6:0] ++ ++// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode ++//Line 3 out put 5V. ++#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 //represent gpio 3 state for 16:9 ++#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 //represent gpio 4 state for 16:9 ++#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0 ++ ++//Line 3 out put 2.2V ++#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 //represent gpio 3 state for 4:3 Letter box ++#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 //represent gpio 4 state for 4:3 Letter box ++#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2 ++ ++//Line 3 out put 0V ++#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 //represent gpio 3 state for 4:3 ++#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 //represent gpio 4 state for 4:3 ++#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4 ++ ++#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F // bit [5:0] ++ ++#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 //bit 7 ++ ++//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. ++#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. ++#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. ++ ++ ++typedef struct _ATOM_COMPONENT_VIDEO_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usMask_PinRegisterIndex; ++ USHORT usEN_PinRegisterIndex; ++ USHORT usY_PinRegisterIndex; ++ USHORT usA_PinRegisterIndex; ++ UCHAR ucBitShift; ++ UCHAR ucPinActiveState; //ucPinActiveState: Bit0=1 active high, =0 active low ++ ATOM_DTD_FORMAT sReserved; // must be zeroed out ++ UCHAR ucMiscInfo; ++ UCHAR uc480i; ++ UCHAR uc480p; ++ UCHAR uc720p; ++ UCHAR uc1080i; ++ UCHAR ucLetterBoxMode; ++ UCHAR ucReserved[3]; ++ UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector ++ ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; ++ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; ++}ATOM_COMPONENT_VIDEO_INFO; ++ ++//ucTableFormatRevision=2 ++//ucTableContentRevision=1 ++typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucMiscInfo; ++ UCHAR uc480i; ++ UCHAR uc480p; ++ UCHAR uc720p; ++ UCHAR uc1080i; ++ UCHAR ucReserved; ++ UCHAR ucLetterBoxMode; ++ UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector ++ ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; ++ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; ++}ATOM_COMPONENT_VIDEO_INFO_V21; + + #define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21 + +-/****************************************************************************/ +-/* Structure used in object_InfoTable */ +-/****************************************************************************/ +-typedef struct _ATOM_OBJECT_HEADER { +- ATOM_COMMON_TABLE_HEADER sHeader; +- USHORT usDeviceSupport; +- USHORT usConnectorObjectTableOffset; +- USHORT usRouterObjectTableOffset; +- USHORT usEncoderObjectTableOffset; +- USHORT usProtectionObjectTableOffset; /* only available when Protection block is independent. */ +- USHORT usDisplayPathTableOffset; +-} ATOM_OBJECT_HEADER; +- +-typedef struct _ATOM_DISPLAY_OBJECT_PATH { +- USHORT usDeviceTag; /* supported device */ +- USHORT usSize; /* the size of ATOM_DISPLAY_OBJECT_PATH */ +- USHORT usConnObjectId; /* Connector Object ID */ +- USHORT usGPUObjectId; /* GPU ID */ +- USHORT usGraphicObjIds[1]; /* 1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. */ +-} ATOM_DISPLAY_OBJECT_PATH; +- +-typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE { +- UCHAR ucNumOfDispPath; +- UCHAR ucVersion; +- UCHAR ucPadding[2]; +- ATOM_DISPLAY_OBJECT_PATH asDispPath[1]; +-} ATOM_DISPLAY_OBJECT_PATH_TABLE; +- +-typedef struct _ATOM_OBJECT /* each object has this structure */ +-{ +- USHORT usObjectID; +- USHORT usSrcDstTableOffset; +- USHORT usRecordOffset; /* this pointing to a bunch of records defined below */ +- USHORT usReserved; +-} ATOM_OBJECT; +- +-typedef struct _ATOM_OBJECT_TABLE /* Above 4 object table offset pointing to a bunch of objects all have this structure */ +-{ +- UCHAR ucNumberOfObjects; +- UCHAR ucPadding[3]; +- ATOM_OBJECT asObjects[1]; +-} ATOM_OBJECT_TABLE; +- +-typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT /* usSrcDstTableOffset pointing to this structure */ +-{ +- UCHAR ucNumberOfSrc; +- USHORT usSrcObjectID[1]; +- UCHAR ucNumberOfDst; +- USHORT usDstObjectID[1]; +-} ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT; +- +-/* Related definitions, all records are differnt but they have a commond header */ +-typedef struct _ATOM_COMMON_RECORD_HEADER { +- UCHAR ucRecordType; /* An emun to indicate the record type */ +- UCHAR ucRecordSize; /* The size of the whole record in byte */ +-} ATOM_COMMON_RECORD_HEADER; +- +-#define ATOM_I2C_RECORD_TYPE 1 ++/****************************************************************************/ ++// Structure used in object_InfoTable ++/****************************************************************************/ ++typedef struct _ATOM_OBJECT_HEADER ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usDeviceSupport; ++ USHORT usConnectorObjectTableOffset; ++ USHORT usRouterObjectTableOffset; ++ USHORT usEncoderObjectTableOffset; ++ USHORT usProtectionObjectTableOffset; //only available when Protection block is independent. ++ USHORT usDisplayPathTableOffset; ++}ATOM_OBJECT_HEADER; ++ ++typedef struct _ATOM_OBJECT_HEADER_V3 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usDeviceSupport; ++ USHORT usConnectorObjectTableOffset; ++ USHORT usRouterObjectTableOffset; ++ USHORT usEncoderObjectTableOffset; ++ USHORT usProtectionObjectTableOffset; //only available when Protection block is independent. ++ USHORT usDisplayPathTableOffset; ++ USHORT usMiscObjectTableOffset; ++}ATOM_OBJECT_HEADER_V3; ++ ++typedef struct _ATOM_DISPLAY_OBJECT_PATH ++{ ++ USHORT usDeviceTag; //supported device ++ USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH ++ USHORT usConnObjectId; //Connector Object ID ++ USHORT usGPUObjectId; //GPU ID ++ USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. ++}ATOM_DISPLAY_OBJECT_PATH; ++ ++typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE ++{ ++ UCHAR ucNumOfDispPath; ++ UCHAR ucVersion; ++ UCHAR ucPadding[2]; ++ ATOM_DISPLAY_OBJECT_PATH asDispPath[1]; ++}ATOM_DISPLAY_OBJECT_PATH_TABLE; ++ ++ ++typedef struct _ATOM_OBJECT //each object has this structure ++{ ++ USHORT usObjectID; ++ USHORT usSrcDstTableOffset; ++ USHORT usRecordOffset; //this pointing to a bunch of records defined below ++ USHORT usReserved; ++}ATOM_OBJECT; ++ ++typedef struct _ATOM_OBJECT_TABLE //Above 4 object table offset pointing to a bunch of objects all have this structure ++{ ++ UCHAR ucNumberOfObjects; ++ UCHAR ucPadding[3]; ++ ATOM_OBJECT asObjects[1]; ++}ATOM_OBJECT_TABLE; ++ ++typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure ++{ ++ UCHAR ucNumberOfSrc; ++ USHORT usSrcObjectID[1]; ++ UCHAR ucNumberOfDst; ++ USHORT usDstObjectID[1]; ++}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT; ++ ++ ++//Two definitions below are for OPM on MXM module designs ++ ++#define EXT_HPDPIN_LUTINDEX_0 0 ++#define EXT_HPDPIN_LUTINDEX_1 1 ++#define EXT_HPDPIN_LUTINDEX_2 2 ++#define EXT_HPDPIN_LUTINDEX_3 3 ++#define EXT_HPDPIN_LUTINDEX_4 4 ++#define EXT_HPDPIN_LUTINDEX_5 5 ++#define EXT_HPDPIN_LUTINDEX_6 6 ++#define EXT_HPDPIN_LUTINDEX_7 7 ++#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES (EXT_HPDPIN_LUTINDEX_7+1) ++ ++#define EXT_AUXDDC_LUTINDEX_0 0 ++#define EXT_AUXDDC_LUTINDEX_1 1 ++#define EXT_AUXDDC_LUTINDEX_2 2 ++#define EXT_AUXDDC_LUTINDEX_3 3 ++#define EXT_AUXDDC_LUTINDEX_4 4 ++#define EXT_AUXDDC_LUTINDEX_5 5 ++#define EXT_AUXDDC_LUTINDEX_6 6 ++#define EXT_AUXDDC_LUTINDEX_7 7 ++#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1) ++ ++typedef struct _EXT_DISPLAY_PATH ++{ ++ USHORT usDeviceTag; //A bit vector to show what devices are supported ++ USHORT usDeviceACPIEnum; //16bit device ACPI id. ++ USHORT usDeviceConnector; //A physical connector for displays to plug in, using object connector definitions ++ UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT ++ UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT ++ USHORT usExtEncoderObjId; //external encoder object id ++ USHORT usReserved[3]; ++}EXT_DISPLAY_PATH; ++ ++#define NUMBER_OF_UCHAR_FOR_GUID 16 ++#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7 ++ ++typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string ++ EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries. ++ UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0. ++ UCHAR Reserved [7]; // for potential expansion ++}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO; ++ ++//Related definitions, all records are differnt but they have a commond header ++typedef struct _ATOM_COMMON_RECORD_HEADER ++{ ++ UCHAR ucRecordType; //An emun to indicate the record type ++ UCHAR ucRecordSize; //The size of the whole record in byte ++}ATOM_COMMON_RECORD_HEADER; ++ ++ ++#define ATOM_I2C_RECORD_TYPE 1 + #define ATOM_HPD_INT_RECORD_TYPE 2 + #define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3 + #define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4 +-#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ +-#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ ++#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE ++#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE + #define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7 +-#define ATOM_JTAG_RECORD_TYPE 8 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ ++#define ATOM_JTAG_RECORD_TYPE 8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE + #define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9 + #define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10 + #define ATOM_CONNECTOR_CF_RECORD_TYPE 11 + #define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12 + #define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13 +-#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14 +-#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15 +- +-/* Must be updated when new record type is added,equal to that record definition! */ +-#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_CF_RECORD_TYPE +- +-typedef struct _ATOM_I2C_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; +- ATOM_I2C_ID_CONFIG sucI2cId; +- UCHAR ucI2CAddr; /* The slave address, it's 0 when the record is attached to connector for DDC */ +-} ATOM_I2C_RECORD; +- +-typedef struct _ATOM_HPD_INT_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; +- UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ +- UCHAR ucPlugged_PinState; +-} ATOM_HPD_INT_RECORD; +- +-typedef struct _ATOM_OUTPUT_PROTECTION_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; +- UCHAR ucProtectionFlag; +- UCHAR ucReserved; +-} ATOM_OUTPUT_PROTECTION_RECORD; +- +-typedef struct _ATOM_CONNECTOR_DEVICE_TAG { +- ULONG ulACPIDeviceEnum; /* Reserved for now */ +- USHORT usDeviceID; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT" */ +- USHORT usPadding; +-} ATOM_CONNECTOR_DEVICE_TAG; +- +-typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; +- UCHAR ucNumberOfDevice; +- UCHAR ucReserved; +- ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation */ +-} ATOM_CONNECTOR_DEVICE_TAG_RECORD; +- +-typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; +- UCHAR ucConfigGPIOID; +- UCHAR ucConfigGPIOState; /* Set to 1 when it's active high to enable external flow in */ +- UCHAR ucFlowinGPIPID; +- UCHAR ucExtInGPIPID; +-} ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD; +- +-typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; +- UCHAR ucCTL1GPIO_ID; +- UCHAR ucCTL1GPIOState; /* Set to 1 when it's active high */ +- UCHAR ucCTL2GPIO_ID; +- UCHAR ucCTL2GPIOState; /* Set to 1 when it's active high */ +- UCHAR ucCTL3GPIO_ID; +- UCHAR ucCTL3GPIOState; /* Set to 1 when it's active high */ +- UCHAR ucCTLFPGA_IN_ID; +- UCHAR ucPadding[3]; +-} ATOM_ENCODER_FPGA_CONTROL_RECORD; +- +-typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; +- UCHAR ucGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ +- UCHAR ucTVActiveState; /* Indicating when the pin==0 or 1 when TV is connected */ +-} ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD; +- +-typedef struct _ATOM_JTAG_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; +- UCHAR ucTMSGPIO_ID; +- UCHAR ucTMSGPIOState; /* Set to 1 when it's active high */ +- UCHAR ucTCKGPIO_ID; +- UCHAR ucTCKGPIOState; /* Set to 1 when it's active high */ +- UCHAR ucTDOGPIO_ID; +- UCHAR ucTDOGPIOState; /* Set to 1 when it's active high */ +- UCHAR ucTDIGPIO_ID; +- UCHAR ucTDIGPIOState; /* Set to 1 when it's active high */ +- UCHAR ucPadding[2]; +-} ATOM_JTAG_RECORD; +- +-/* The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually */ +-typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR { +- UCHAR ucGPIOID; /* GPIO_ID, find the corresponding ID in GPIO_LUT table */ +- UCHAR ucGPIO_PinState; /* Pin state showing how to set-up the pin */ +-} ATOM_GPIO_PIN_CONTROL_PAIR; +- +-typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; +- UCHAR ucFlags; /* Future expnadibility */ +- UCHAR ucNumberOfPins; /* Number of GPIO pins used to control the object */ +- ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; /* the real gpio pin pair determined by number of pins ucNumberOfPins */ +-} ATOM_OBJECT_GPIO_CNTL_RECORD; +- +-/* Definitions for GPIO pin state */ ++#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14 ++#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15 ++#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE 16 //This is for the case when connectors are not known to object table ++#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table ++#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record ++#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19 ++ ++ ++//Must be updated when new record type is added,equal to that record definition! ++#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE ++ ++typedef struct _ATOM_I2C_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ ATOM_I2C_ID_CONFIG sucI2cId; ++ UCHAR ucI2CAddr; //The slave address, it's 0 when the record is attached to connector for DDC ++}ATOM_I2C_RECORD; ++ ++typedef struct _ATOM_HPD_INT_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucHPDIntGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info ++ UCHAR ucPlugged_PinState; ++}ATOM_HPD_INT_RECORD; ++ ++ ++typedef struct _ATOM_OUTPUT_PROTECTION_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucProtectionFlag; ++ UCHAR ucReserved; ++}ATOM_OUTPUT_PROTECTION_RECORD; ++ ++typedef struct _ATOM_CONNECTOR_DEVICE_TAG ++{ ++ ULONG ulACPIDeviceEnum; //Reserved for now ++ USHORT usDeviceID; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT" ++ USHORT usPadding; ++}ATOM_CONNECTOR_DEVICE_TAG; ++ ++typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucNumberOfDevice; ++ UCHAR ucReserved; ++ ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation ++}ATOM_CONNECTOR_DEVICE_TAG_RECORD; ++ ++ ++typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucConfigGPIOID; ++ UCHAR ucConfigGPIOState; //Set to 1 when it's active high to enable external flow in ++ UCHAR ucFlowinGPIPID; ++ UCHAR ucExtInGPIPID; ++}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD; ++ ++typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucCTL1GPIO_ID; ++ UCHAR ucCTL1GPIOState; //Set to 1 when it's active high ++ UCHAR ucCTL2GPIO_ID; ++ UCHAR ucCTL2GPIOState; //Set to 1 when it's active high ++ UCHAR ucCTL3GPIO_ID; ++ UCHAR ucCTL3GPIOState; //Set to 1 when it's active high ++ UCHAR ucCTLFPGA_IN_ID; ++ UCHAR ucPadding[3]; ++}ATOM_ENCODER_FPGA_CONTROL_RECORD; ++ ++typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info ++ UCHAR ucTVActiveState; //Indicating when the pin==0 or 1 when TV is connected ++}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD; ++ ++typedef struct _ATOM_JTAG_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucTMSGPIO_ID; ++ UCHAR ucTMSGPIOState; //Set to 1 when it's active high ++ UCHAR ucTCKGPIO_ID; ++ UCHAR ucTCKGPIOState; //Set to 1 when it's active high ++ UCHAR ucTDOGPIO_ID; ++ UCHAR ucTDOGPIOState; //Set to 1 when it's active high ++ UCHAR ucTDIGPIO_ID; ++ UCHAR ucTDIGPIOState; //Set to 1 when it's active high ++ UCHAR ucPadding[2]; ++}ATOM_JTAG_RECORD; ++ ++ ++//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually ++typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR ++{ ++ UCHAR ucGPIOID; // GPIO_ID, find the corresponding ID in GPIO_LUT table ++ UCHAR ucGPIO_PinState; // Pin state showing how to set-up the pin ++}ATOM_GPIO_PIN_CONTROL_PAIR; ++ ++typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucFlags; // Future expnadibility ++ UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object ++ ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins ++}ATOM_OBJECT_GPIO_CNTL_RECORD; ++ ++//Definitions for GPIO pin state + #define GPIO_PIN_TYPE_INPUT 0x00 + #define GPIO_PIN_TYPE_OUTPUT 0x10 + #define GPIO_PIN_TYPE_HW_CONTROL 0x20 + +-/* For GPIO_PIN_TYPE_OUTPUT the following is defined */ ++//For GPIO_PIN_TYPE_OUTPUT the following is defined + #define GPIO_PIN_OUTPUT_STATE_MASK 0x01 + #define GPIO_PIN_OUTPUT_STATE_SHIFT 0 + #define GPIO_PIN_STATE_ACTIVE_LOW 0x0 + #define GPIO_PIN_STATE_ACTIVE_HIGH 0x1 + +-typedef struct _ATOM_ENCODER_DVO_CF_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; +- ULONG ulStrengthControl; /* DVOA strength control for CF */ +- UCHAR ucPadding[2]; +-} ATOM_ENCODER_DVO_CF_RECORD; ++// Indexes to GPIO array in GLSync record ++#define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0 ++#define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1 ++#define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2 ++#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ 3 ++#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4 ++#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5 ++#define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6 ++#define ATOM_GPIO_INDEX_GLSYNC_MAX 7 ++ ++typedef struct _ATOM_ENCODER_DVO_CF_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ ULONG ulStrengthControl; // DVOA strength control for CF ++ UCHAR ucPadding[2]; ++}ATOM_ENCODER_DVO_CF_RECORD; + +-/* value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle */ ++// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle + #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 + #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 + +-typedef struct _ATOM_CONNECTOR_CF_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; +- USHORT usMaxPixClk; +- UCHAR ucFlowCntlGpioId; +- UCHAR ucSwapCntlGpioId; +- UCHAR ucConnectedDvoBundle; +- UCHAR ucPadding; +-} ATOM_CONNECTOR_CF_RECORD; +- +-typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; +- ATOM_DTD_FORMAT asTiming; +-} ATOM_CONNECTOR_HARDCODE_DTD_RECORD; +- +-typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; /* ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE */ +- UCHAR ucSubConnectorType; /* CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A */ +- UCHAR ucReserved; +-} ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD; +- +-typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; +- UCHAR ucMuxType; /* decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state */ +- UCHAR ucMuxControlPin; +- UCHAR ucMuxState[2]; /* for alligment purpose */ +-} ATOM_ROUTER_DDC_PATH_SELECT_RECORD; +- +-typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD { +- ATOM_COMMON_RECORD_HEADER sheader; +- UCHAR ucMuxType; +- UCHAR ucMuxControlPin; +- UCHAR ucMuxState[2]; /* for alligment purpose */ +-} ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD; +- +-/* define ucMuxType */ ++typedef struct _ATOM_CONNECTOR_CF_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ USHORT usMaxPixClk; ++ UCHAR ucFlowCntlGpioId; ++ UCHAR ucSwapCntlGpioId; ++ UCHAR ucConnectedDvoBundle; ++ UCHAR ucPadding; ++}ATOM_CONNECTOR_CF_RECORD; ++ ++typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ ATOM_DTD_FORMAT asTiming; ++}ATOM_CONNECTOR_HARDCODE_DTD_RECORD; ++ ++typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE ++ UCHAR ucSubConnectorType; //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A ++ UCHAR ucReserved; ++}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD; ++ ++ ++typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucMuxType; //decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state ++ UCHAR ucMuxControlPin; ++ UCHAR ucMuxState[2]; //for alligment purpose ++}ATOM_ROUTER_DDC_PATH_SELECT_RECORD; ++ ++typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucMuxType; ++ UCHAR ucMuxControlPin; ++ UCHAR ucMuxState[2]; //for alligment purpose ++}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD; ++ ++// define ucMuxType + #define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f + #define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01 + +-/****************************************************************************/ +-/* ASIC voltage data table */ +-/****************************************************************************/ +-typedef struct _ATOM_VOLTAGE_INFO_HEADER { +- USHORT usVDDCBaseLevel; /* In number of 50mv unit */ +- USHORT usReserved; /* For possible extension table offset */ +- UCHAR ucNumOfVoltageEntries; +- UCHAR ucBytesPerVoltageEntry; +- UCHAR ucVoltageStep; /* Indicating in how many mv increament is one step, 0.5mv unit */ +- UCHAR ucDefaultVoltageEntry; +- UCHAR ucVoltageControlI2cLine; +- UCHAR ucVoltageControlAddress; +- UCHAR ucVoltageControlOffset; +-} ATOM_VOLTAGE_INFO_HEADER; +- +-typedef struct _ATOM_VOLTAGE_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_VOLTAGE_INFO_HEADER viHeader; +- UCHAR ucVoltageEntries[64]; /* 64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry */ +-} ATOM_VOLTAGE_INFO; +- +-typedef struct _ATOM_VOLTAGE_FORMULA { +- USHORT usVoltageBaseLevel; /* In number of 1mv unit */ +- USHORT usVoltageStep; /* Indicating in how many mv increament is one step, 1mv unit */ +- UCHAR ucNumOfVoltageEntries; /* Number of Voltage Entry, which indicate max Voltage */ +- UCHAR ucFlag; /* bit0=0 :step is 1mv =1 0.5mv */ +- UCHAR ucBaseVID; /* if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep */ +- UCHAR ucReserved; +- UCHAR ucVIDAdjustEntries[32]; /* 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries */ +-} ATOM_VOLTAGE_FORMULA; +- +-typedef struct _ATOM_VOLTAGE_CONTROL { +- UCHAR ucVoltageControlId; /* Indicate it is controlled by I2C or GPIO or HW state machine */ +- UCHAR ucVoltageControlI2cLine; +- UCHAR ucVoltageControlAddress; +- UCHAR ucVoltageControlOffset; +- USHORT usGpioPin_AIndex; /* GPIO_PAD register index */ +- UCHAR ucGpioPinBitShift[9]; /* at most 8 pin support 255 VIDs, termintate with 0xff */ +- UCHAR ucReserved; +-} ATOM_VOLTAGE_CONTROL; +- +-/* Define ucVoltageControlId */ ++typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ UCHAR ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES]; //An fixed size array which maps external pins to internal GPIO_PIN_INFO table ++}ATOM_CONNECTOR_HPDPIN_LUT_RECORD; ++ ++typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ ATOM_I2C_ID_CONFIG ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES]; //An fixed size array which maps external pins to internal DDC ID ++}ATOM_CONNECTOR_AUXDDC_LUT_RECORD; ++ ++typedef struct _ATOM_OBJECT_LINK_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ USHORT usObjectID; //could be connector, encorder or other object in object.h ++}ATOM_OBJECT_LINK_RECORD; ++ ++typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD ++{ ++ ATOM_COMMON_RECORD_HEADER sheader; ++ USHORT usReserved; ++}ATOM_CONNECTOR_REMOTE_CAP_RECORD; ++ ++/****************************************************************************/ ++// ASIC voltage data table ++/****************************************************************************/ ++typedef struct _ATOM_VOLTAGE_INFO_HEADER ++{ ++ USHORT usVDDCBaseLevel; //In number of 50mv unit ++ USHORT usReserved; //For possible extension table offset ++ UCHAR ucNumOfVoltageEntries; ++ UCHAR ucBytesPerVoltageEntry; ++ UCHAR ucVoltageStep; //Indicating in how many mv increament is one step, 0.5mv unit ++ UCHAR ucDefaultVoltageEntry; ++ UCHAR ucVoltageControlI2cLine; ++ UCHAR ucVoltageControlAddress; ++ UCHAR ucVoltageControlOffset; ++}ATOM_VOLTAGE_INFO_HEADER; ++ ++typedef struct _ATOM_VOLTAGE_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_VOLTAGE_INFO_HEADER viHeader; ++ UCHAR ucVoltageEntries[64]; //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry ++}ATOM_VOLTAGE_INFO; ++ ++ ++typedef struct _ATOM_VOLTAGE_FORMULA ++{ ++ USHORT usVoltageBaseLevel; // In number of 1mv unit ++ USHORT usVoltageStep; // Indicating in how many mv increament is one step, 1mv unit ++ UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage ++ UCHAR ucFlag; // bit0=0 :step is 1mv =1 0.5mv ++ UCHAR ucBaseVID; // if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep ++ UCHAR ucReserved; ++ UCHAR ucVIDAdjustEntries[32]; // 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries ++}ATOM_VOLTAGE_FORMULA; ++ ++typedef struct _VOLTAGE_LUT_ENTRY ++{ ++ USHORT usVoltageCode; // The Voltage ID, either GPIO or I2C code ++ USHORT usVoltageValue; // The corresponding Voltage Value, in mV ++}VOLTAGE_LUT_ENTRY; ++ ++typedef struct _ATOM_VOLTAGE_FORMULA_V2 ++{ ++ UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage ++ UCHAR ucReserved[3]; ++ VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries ++}ATOM_VOLTAGE_FORMULA_V2; ++ ++typedef struct _ATOM_VOLTAGE_CONTROL ++{ ++ UCHAR ucVoltageControlId; //Indicate it is controlled by I2C or GPIO or HW state machine ++ UCHAR ucVoltageControlI2cLine; ++ UCHAR ucVoltageControlAddress; ++ UCHAR ucVoltageControlOffset; ++ USHORT usGpioPin_AIndex; //GPIO_PAD register index ++ UCHAR ucGpioPinBitShift[9]; //at most 8 pin support 255 VIDs, termintate with 0xff ++ UCHAR ucReserved; ++}ATOM_VOLTAGE_CONTROL; ++ ++// Define ucVoltageControlId + #define VOLTAGE_CONTROLLED_BY_HW 0x00 + #define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F + #define VOLTAGE_CONTROLLED_BY_GPIO 0x80 +-#define VOLTAGE_CONTROL_ID_LM64 0x01 /* I2C control, used for R5xx Core Voltage */ +-#define VOLTAGE_CONTROL_ID_DAC 0x02 /* I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI */ +-#define VOLTAGE_CONTROL_ID_VT116xM 0x03 /* I2C control, used for R6xx Core Voltage */ +-#define VOLTAGE_CONTROL_ID_DS4402 0x04 +- +-typedef struct _ATOM_VOLTAGE_OBJECT { +- UCHAR ucVoltageType; /* Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI */ +- UCHAR ucSize; /* Size of Object */ +- ATOM_VOLTAGE_CONTROL asControl; /* describ how to control */ +- ATOM_VOLTAGE_FORMULA asFormula; /* Indicate How to convert real Voltage to VID */ +-} ATOM_VOLTAGE_OBJECT; +- +-typedef struct _ATOM_VOLTAGE_OBJECT_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_VOLTAGE_OBJECT asVoltageObj[3]; /* Info for Voltage control */ +-} ATOM_VOLTAGE_OBJECT_INFO; +- +-typedef struct _ATOM_LEAKID_VOLTAGE { +- UCHAR ucLeakageId; +- UCHAR ucReserved; +- USHORT usVoltage; +-} ATOM_LEAKID_VOLTAGE; +- +-typedef struct _ATOM_ASIC_PROFILE_VOLTAGE { +- UCHAR ucProfileId; +- UCHAR ucReserved; +- USHORT usSize; +- USHORT usEfuseSpareStartAddr; +- USHORT usFuseIndex[8]; /* from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, */ +- ATOM_LEAKID_VOLTAGE asLeakVol[2]; /* Leakid and relatd voltage */ +-} ATOM_ASIC_PROFILE_VOLTAGE; +- +-/* ucProfileId */ +-#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1 ++#define VOLTAGE_CONTROL_ID_LM64 0x01 //I2C control, used for R5xx Core Voltage ++#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI ++#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage ++#define VOLTAGE_CONTROL_ID_DS4402 0x04 ++ ++typedef struct _ATOM_VOLTAGE_OBJECT ++{ ++ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI ++ UCHAR ucSize; //Size of Object ++ ATOM_VOLTAGE_CONTROL asControl; //describ how to control ++ ATOM_VOLTAGE_FORMULA asFormula; //Indicate How to convert real Voltage to VID ++}ATOM_VOLTAGE_OBJECT; ++ ++typedef struct _ATOM_VOLTAGE_OBJECT_V2 ++{ ++ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI ++ UCHAR ucSize; //Size of Object ++ ATOM_VOLTAGE_CONTROL asControl; //describ how to control ++ ATOM_VOLTAGE_FORMULA_V2 asFormula; //Indicate How to convert real Voltage to VID ++}ATOM_VOLTAGE_OBJECT_V2; ++ ++typedef struct _ATOM_VOLTAGE_OBJECT_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_VOLTAGE_OBJECT asVoltageObj[3]; //Info for Voltage control ++}ATOM_VOLTAGE_OBJECT_INFO; ++ ++typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V2 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_VOLTAGE_OBJECT_V2 asVoltageObj[3]; //Info for Voltage control ++}ATOM_VOLTAGE_OBJECT_INFO_V2; ++ ++typedef struct _ATOM_LEAKID_VOLTAGE ++{ ++ UCHAR ucLeakageId; ++ UCHAR ucReserved; ++ USHORT usVoltage; ++}ATOM_LEAKID_VOLTAGE; ++ ++typedef struct _ATOM_ASIC_PROFILE_VOLTAGE ++{ ++ UCHAR ucProfileId; ++ UCHAR ucReserved; ++ USHORT usSize; ++ USHORT usEfuseSpareStartAddr; ++ USHORT usFuseIndex[8]; //from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, ++ ATOM_LEAKID_VOLTAGE asLeakVol[2]; //Leakid and relatd voltage ++}ATOM_ASIC_PROFILE_VOLTAGE; ++ ++//ucProfileId ++#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1 + #define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1 + #define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2 + +-typedef struct _ATOM_ASIC_PROFILING_INFO { +- ATOM_COMMON_TABLE_HEADER asHeader; +- ATOM_ASIC_PROFILE_VOLTAGE asVoltage; +-} ATOM_ASIC_PROFILING_INFO; +- +-typedef struct _ATOM_POWER_SOURCE_OBJECT { +- UCHAR ucPwrSrcId; /* Power source */ +- UCHAR ucPwrSensorType; /* GPIO, I2C or none */ +- UCHAR ucPwrSensId; /* if GPIO detect, it is GPIO id, if I2C detect, it is I2C id */ +- UCHAR ucPwrSensSlaveAddr; /* Slave address if I2C detect */ +- UCHAR ucPwrSensRegIndex; /* I2C register Index if I2C detect */ +- UCHAR ucPwrSensRegBitMask; /* detect which bit is used if I2C detect */ +- UCHAR ucPwrSensActiveState; /* high active or low active */ +- UCHAR ucReserve[3]; /* reserve */ +- USHORT usSensPwr; /* in unit of watt */ +-} ATOM_POWER_SOURCE_OBJECT; +- +-typedef struct _ATOM_POWER_SOURCE_INFO { +- ATOM_COMMON_TABLE_HEADER asHeader; +- UCHAR asPwrbehave[16]; +- ATOM_POWER_SOURCE_OBJECT asPwrObj[1]; +-} ATOM_POWER_SOURCE_INFO; +- +-/* Define ucPwrSrcId */ ++typedef struct _ATOM_ASIC_PROFILING_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER asHeader; ++ ATOM_ASIC_PROFILE_VOLTAGE asVoltage; ++}ATOM_ASIC_PROFILING_INFO; ++ ++typedef struct _ATOM_POWER_SOURCE_OBJECT ++{ ++ UCHAR ucPwrSrcId; // Power source ++ UCHAR ucPwrSensorType; // GPIO, I2C or none ++ UCHAR ucPwrSensId; // if GPIO detect, it is GPIO id, if I2C detect, it is I2C id ++ UCHAR ucPwrSensSlaveAddr; // Slave address if I2C detect ++ UCHAR ucPwrSensRegIndex; // I2C register Index if I2C detect ++ UCHAR ucPwrSensRegBitMask; // detect which bit is used if I2C detect ++ UCHAR ucPwrSensActiveState; // high active or low active ++ UCHAR ucReserve[3]; // reserve ++ USHORT usSensPwr; // in unit of watt ++}ATOM_POWER_SOURCE_OBJECT; ++ ++typedef struct _ATOM_POWER_SOURCE_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER asHeader; ++ UCHAR asPwrbehave[16]; ++ ATOM_POWER_SOURCE_OBJECT asPwrObj[1]; ++}ATOM_POWER_SOURCE_INFO; ++ ++ ++//Define ucPwrSrcId + #define POWERSOURCE_PCIE_ID1 0x00 + #define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01 + #define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02 + #define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04 + #define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08 + +-/* define ucPwrSensorId */ ++//define ucPwrSensorId + #define POWER_SENSOR_ALWAYS 0x00 + #define POWER_SENSOR_GPIO 0x01 + #define POWER_SENSOR_I2C 0x02 + ++typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ULONG ulBootUpEngineClock; ++ ULONG ulDentistVCOFreq; ++ ULONG ulBootUpUMAClock; ++ ULONG ulReserved1[8]; ++ ULONG ulBootUpReqDisplayVector; ++ ULONG ulOtherDisplayMisc; ++ ULONG ulGPUCapInfo; ++ ULONG ulReserved2[3]; ++ ULONG ulSystemConfig; ++ ULONG ulCPUCapInfo; ++ USHORT usMaxNBVoltage; ++ USHORT usMinNBVoltage; ++ USHORT usBootUpNBVoltage; ++ USHORT usExtDispConnInfoOffset; ++ UCHAR ucHtcTmpLmt; ++ UCHAR ucTjOffset; ++ UCHAR ucMemoryType; ++ UCHAR ucUMAChannelNumber; ++ ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10]; ++ ULONG ulCSR_M3_ARB_CNTL_UVD[10]; ++ ULONG ulCSR_M3_ARB_CNTL_FS3D[10]; ++ ULONG ulReserved3[42]; ++ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; ++}ATOM_INTEGRATED_SYSTEM_INFO_V6; ++ ++/********************************************************************************************************************** ++// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description ++//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. ++//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. ++//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. ++//ulReserved1[8] Reserved by now, must be 0x0. ++//ulBootUpReqDisplayVector VBIOS boot up display IDs ++// ATOM_DEVICE_CRT1_SUPPORT 0x0001 ++// ATOM_DEVICE_CRT2_SUPPORT 0x0010 ++// ATOM_DEVICE_DFP1_SUPPORT 0x0008 ++// ATOM_DEVICE_DFP6_SUPPORT 0x0040 ++// ATOM_DEVICE_DFP2_SUPPORT 0x0080 ++// ATOM_DEVICE_DFP3_SUPPORT 0x0200 ++// ATOM_DEVICE_DFP4_SUPPORT 0x0400 ++// ATOM_DEVICE_DFP5_SUPPORT 0x0800 ++// ATOM_DEVICE_LCD1_SUPPORT 0x0002 ++//ulOtherDisplayMisc Other display related flags, not defined yet. ++//ulGPUCapInfo TBD ++//ulReserved2[3] must be 0x0 for the reserved. ++//ulSystemConfig TBD ++//ulCPUCapInfo TBD ++//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. ++//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. ++//usBootUpNBVoltage Boot up NB voltage in unit of mv. ++//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register. ++//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed. ++//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. ++//ucUMAChannelNumber System memory channel numbers. ++//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table. ++//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default ++//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback. ++//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications. ++**********************************************************************************************************************/ ++ + /**************************************************************************/ +-/* This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design */ +-/* Memory SS Info Table */ +-/* Define Memory Clock SS chip ID */ ++// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design ++//Memory SS Info Table ++//Define Memory Clock SS chip ID + #define ICS91719 1 + #define ICS91720 2 + +-/* Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol */ +-typedef struct _ATOM_I2C_DATA_RECORD { +- UCHAR ucNunberOfBytes; /* Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" */ +- UCHAR ucI2CData[1]; /* I2C data in bytes, should be less than 16 bytes usually */ +-} ATOM_I2C_DATA_RECORD; +- +-/* Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information */ +-typedef struct _ATOM_I2C_DEVICE_SETUP_INFO { +- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* I2C line and HW/SW assisted cap. */ +- UCHAR ucSSChipID; /* SS chip being used */ +- UCHAR ucSSChipSlaveAddr; /* Slave Address to set up this SS chip */ +- UCHAR ucNumOfI2CDataRecords; /* number of data block */ +- ATOM_I2C_DATA_RECORD asI2CData[1]; +-} ATOM_I2C_DEVICE_SETUP_INFO; +- +-/* ========================================================================================== */ +-typedef struct _ATOM_ASIC_MVDD_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1]; +-} ATOM_ASIC_MVDD_INFO; +- +-/* ========================================================================================== */ ++//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol ++typedef struct _ATOM_I2C_DATA_RECORD ++{ ++ UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" ++ UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually ++}ATOM_I2C_DATA_RECORD; ++ ++ ++//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information ++typedef struct _ATOM_I2C_DEVICE_SETUP_INFO ++{ ++ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //I2C line and HW/SW assisted cap. ++ UCHAR ucSSChipID; //SS chip being used ++ UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip ++ UCHAR ucNumOfI2CDataRecords; //number of data block ++ ATOM_I2C_DATA_RECORD asI2CData[1]; ++}ATOM_I2C_DEVICE_SETUP_INFO; ++ ++//========================================================================================== ++typedef struct _ATOM_ASIC_MVDD_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1]; ++}ATOM_ASIC_MVDD_INFO; ++ ++//========================================================================================== + #define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO + +-/* ========================================================================================== */ ++//========================================================================================== + /**************************************************************************/ + +-typedef struct _ATOM_ASIC_SS_ASSIGNMENT { +- ULONG ulTargetClockRange; /* Clock Out frequence (VCO ), in unit of 10Khz */ +- USHORT usSpreadSpectrumPercentage; /* in unit of 0.01% */ +- USHORT usSpreadRateInKhz; /* in unit of kHz, modulation freq */ +- UCHAR ucClockIndication; /* Indicate which clock source needs SS */ +- UCHAR ucSpreadSpectrumMode; /* Bit1=0 Down Spread,=1 Center Spread. */ +- UCHAR ucReserved[2]; +-} ATOM_ASIC_SS_ASSIGNMENT; +- +-/* Define ucSpreadSpectrumType */ ++typedef struct _ATOM_ASIC_SS_ASSIGNMENT ++{ ++ ULONG ulTargetClockRange; //Clock Out frequence (VCO ), in unit of 10Khz ++ USHORT usSpreadSpectrumPercentage; //in unit of 0.01% ++ USHORT usSpreadRateInKhz; //in unit of kHz, modulation freq ++ UCHAR ucClockIndication; //Indicate which clock source needs SS ++ UCHAR ucSpreadSpectrumMode; //Bit1=0 Down Spread,=1 Center Spread. ++ UCHAR ucReserved[2]; ++}ATOM_ASIC_SS_ASSIGNMENT; ++ ++//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type. ++//SS is not required or enabled if a match is not found. + #define ASIC_INTERNAL_MEMORY_SS 1 + #define ASIC_INTERNAL_ENGINE_SS 2 +-#define ASIC_INTERNAL_UVD_SS 3 ++#define ASIC_INTERNAL_UVD_SS 3 ++#define ASIC_INTERNAL_SS_ON_TMDS 4 ++#define ASIC_INTERNAL_SS_ON_HDMI 5 ++#define ASIC_INTERNAL_SS_ON_LVDS 6 ++#define ASIC_INTERNAL_SS_ON_DP 7 ++#define ASIC_INTERNAL_SS_ON_DCPLL 8 ++ ++typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2 ++{ ++ ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz ++ //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 ) ++ USHORT usSpreadSpectrumPercentage; //in unit of 0.01% ++ USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq ++ UCHAR ucClockIndication; //Indicate which clock source needs SS ++ UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS ++ UCHAR ucReserved[2]; ++}ATOM_ASIC_SS_ASSIGNMENT_V2; ++ ++//ucSpreadSpectrumMode ++//#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000 ++//#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000 ++//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001 ++//#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001 ++//#define ATOM_INTERNAL_SS_MASK 0x00000000 ++//#define ATOM_EXTERNAL_SS_MASK 0x00000002 ++ ++typedef struct _ATOM_ASIC_INTERNAL_SS_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4]; ++}ATOM_ASIC_INTERNAL_SS_INFO; + +-typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4]; +-} ATOM_ASIC_INTERNAL_SS_INFO; ++typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only. ++}ATOM_ASIC_INTERNAL_SS_INFO_V2; + +-/* ==============================Scratch Pad Definition Portion=============================== */ ++typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3 ++{ ++ ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz ++ //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 ) ++ USHORT usSpreadSpectrumPercentage; //in unit of 0.01% ++ USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq ++ UCHAR ucClockIndication; //Indicate which clock source needs SS ++ UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS ++ UCHAR ucReserved[2]; ++}ATOM_ASIC_SS_ASSIGNMENT_V3; ++ ++typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only. ++}ATOM_ASIC_INTERNAL_SS_INFO_V3; ++ ++ ++//==============================Scratch Pad Definition Portion=============================== + #define ATOM_DEVICE_CONNECT_INFO_DEF 0 + #define ATOM_ROM_LOCATION_DEF 1 + #define ATOM_TV_STANDARD_DEF 2 +@@ -2995,7 +3852,8 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_I2C_CHANNEL_STATUS_DEF 8 + #define ATOM_I2C_CHANNEL_STATUS1_DEF 9 + +-/* BIOS_0_SCRATCH Definition */ ++ ++// BIOS_0_SCRATCH Definition + #define ATOM_S0_CRT1_MONO 0x00000001L + #define ATOM_S0_CRT1_COLOR 0x00000002L + #define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR) +@@ -3008,6 +3866,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S0_CV_DIN_A 0x00000020L + #define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A) + ++ + #define ATOM_S0_CRT2_MONO 0x00000100L + #define ATOM_S0_CRT2_COLOR 0x00000200L + #define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR) +@@ -3025,28 +3884,27 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S0_DFP2 0x00020000L + #define ATOM_S0_LCD1 0x00040000L + #define ATOM_S0_LCD2 0x00080000L +-#define ATOM_S0_TV2 0x00100000L +-#define ATOM_S0_DFP3 0x00200000L +-#define ATOM_S0_DFP4 0x00400000L +-#define ATOM_S0_DFP5 0x00800000L ++#define ATOM_S0_DFP6 0x00100000L ++#define ATOM_S0_DFP3 0x00200000L ++#define ATOM_S0_DFP4 0x00400000L ++#define ATOM_S0_DFP5 0x00800000L + +-#define ATOM_S0_DFP_MASK \ +- (ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5) ++#define ATOM_S0_DFP_MASK ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6 + +-#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L /* If set, indicates we are running a PCIE asic with */ +- /* the FAD/HDP reg access bug. Bit is read by DAL */ ++#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L // If set, indicates we are running a PCIE asic with ++ // the FAD/HDP reg access bug. Bit is read by DAL, this is obsolete from RV5xx + + #define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L + #define ATOM_S0_THERMAL_STATE_SHIFT 26 + + #define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L +-#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29 ++#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29 + + #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 + #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 + #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 + +-/* Byte aligned definition for BIOS usage */ ++//Byte aligned defintion for BIOS usage + #define ATOM_S0_CRT1_MONOb0 0x01 + #define ATOM_S0_CRT1_COLORb0 0x02 + #define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0) +@@ -3076,8 +3934,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S0_DFP2b2 0x02 + #define ATOM_S0_LCD1b2 0x04 + #define ATOM_S0_LCD2b2 0x08 +-#define ATOM_S0_TV2b2 0x10 +-#define ATOM_S0_DFP3b2 0x20 ++#define ATOM_S0_DFP6b2 0x10 ++#define ATOM_S0_DFP3b2 0x20 ++#define ATOM_S0_DFP4b2 0x40 ++#define ATOM_S0_DFP5b2 0x80 ++ + + #define ATOM_S0_THERMAL_STATE_MASKb3 0x1C + #define ATOM_S0_THERMAL_STATE_SHIFTb3 2 +@@ -3085,43 +3946,20 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0 + #define ATOM_S0_LCD1_SHIFT 18 + +-/* BIOS_1_SCRATCH Definition */ ++// BIOS_1_SCRATCH Definition + #define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL + #define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L + +-/* BIOS_2_SCRATCH Definition */ ++// BIOS_2_SCRATCH Definition + #define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL + #define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L + #define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8 + +-#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L +-#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L +-#define ATOM_S2_TV1_DPMS_STATE 0x00040000L +-#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L +-#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L +-#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L +-#define ATOM_S2_TV2_DPMS_STATE 0x00400000L +-#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L +-#define ATOM_S2_CV_DPMS_STATE 0x01000000L +-#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L +-#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L +-#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L +- +-#define ATOM_S2_DFP_DPM_STATE \ +- (ATOM_S2_DFP1_DPMS_STATE | ATOM_S2_DFP2_DPMS_STATE | \ +- ATOM_S2_DFP3_DPMS_STATE | ATOM_S2_DFP4_DPMS_STATE | \ +- ATOM_S2_DFP5_DPMS_STATE) +- +-#define ATOM_S2_DEVICE_DPMS_STATE \ +- (ATOM_S2_CRT1_DPMS_STATE + ATOM_S2_LCD1_DPMS_STATE + \ +- ATOM_S2_TV1_DPMS_STATE + ATOM_S2_DFP_DPMS_STATE + \ +- ATOM_S2_CRT2_DPMS_STATE + ATOM_S2_LCD2_DPMS_STATE + \ +- ATOM_S2_TV2_DPMS_STATE + ATOM_S2_CV_DPMS_STATE) +- + #define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L + #define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26 + #define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L + ++#define ATOM_S2_DEVICE_DPMS_STATE 0x00010000L + #define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L + + #define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0 +@@ -3131,21 +3969,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30 + #define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L + +-/* Byte aligned definition for BIOS usage */ ++ ++//Byte aligned defintion for BIOS usage + #define ATOM_S2_TV1_STANDARD_MASKb0 0x0F + #define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF +-#define ATOM_S2_CRT1_DPMS_STATEb2 0x01 +-#define ATOM_S2_LCD1_DPMS_STATEb2 0x02 +-#define ATOM_S2_TV1_DPMS_STATEb2 0x04 +-#define ATOM_S2_DFP1_DPMS_STATEb2 0x08 +-#define ATOM_S2_CRT2_DPMS_STATEb2 0x10 +-#define ATOM_S2_LCD2_DPMS_STATEb2 0x20 +-#define ATOM_S2_TV2_DPMS_STATEb2 0x40 +-#define ATOM_S2_DFP2_DPMS_STATEb2 0x80 +-#define ATOM_S2_CV_DPMS_STATEb3 0x01 +-#define ATOM_S2_DFP3_DPMS_STATEb3 0x02 +-#define ATOM_S2_DFP4_DPMS_STATEb3 0x04 +-#define ATOM_S2_DFP5_DPMS_STATEb3 0x08 ++#define ATOM_S2_DEVICE_DPMS_STATEb2 0x01 + + #define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF + #define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C +@@ -3153,21 +3981,22 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20 + #define ATOM_S2_ROTATION_STATE_MASKb3 0xC0 + +-/* BIOS_3_SCRATCH Definition */ ++ ++// BIOS_3_SCRATCH Definition + #define ATOM_S3_CRT1_ACTIVE 0x00000001L + #define ATOM_S3_LCD1_ACTIVE 0x00000002L + #define ATOM_S3_TV1_ACTIVE 0x00000004L + #define ATOM_S3_DFP1_ACTIVE 0x00000008L + #define ATOM_S3_CRT2_ACTIVE 0x00000010L + #define ATOM_S3_LCD2_ACTIVE 0x00000020L +-#define ATOM_S3_TV2_ACTIVE 0x00000040L ++#define ATOM_S3_DFP6_ACTIVE 0x00000040L + #define ATOM_S3_DFP2_ACTIVE 0x00000080L + #define ATOM_S3_CV_ACTIVE 0x00000100L + #define ATOM_S3_DFP3_ACTIVE 0x00000200L + #define ATOM_S3_DFP4_ACTIVE 0x00000400L + #define ATOM_S3_DFP5_ACTIVE 0x00000800L + +-#define ATOM_S3_DEVICE_ACTIVE_MASK 0x000003FFL ++#define ATOM_S3_DEVICE_ACTIVE_MASK 0x00000FFFL + + #define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L + #define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L +@@ -3178,7 +4007,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L + #define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L + #define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L +-#define ATOM_S3_TV2_CRTC_ACTIVE 0x00400000L ++#define ATOM_S3_DFP6_CRTC_ACTIVE 0x00400000L + #define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L + #define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L + #define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L +@@ -3187,17 +4016,18 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + + #define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L + #define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L ++//Below two definitions are not supported in pplib, but in the old powerplay in DAL + #define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L + #define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L + +-/* Byte aligned definition for BIOS usage */ ++//Byte aligned defintion for BIOS usage + #define ATOM_S3_CRT1_ACTIVEb0 0x01 + #define ATOM_S3_LCD1_ACTIVEb0 0x02 + #define ATOM_S3_TV1_ACTIVEb0 0x04 + #define ATOM_S3_DFP1_ACTIVEb0 0x08 + #define ATOM_S3_CRT2_ACTIVEb0 0x10 + #define ATOM_S3_LCD2_ACTIVEb0 0x20 +-#define ATOM_S3_TV2_ACTIVEb0 0x40 ++#define ATOM_S3_DFP6_ACTIVEb0 0x40 + #define ATOM_S3_DFP2_ACTIVEb0 0x80 + #define ATOM_S3_CV_ACTIVEb1 0x01 + #define ATOM_S3_DFP3_ACTIVEb1 0x02 +@@ -3212,7 +4042,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08 + #define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10 + #define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20 +-#define ATOM_S3_TV2_CRTC_ACTIVEb2 0x40 ++#define ATOM_S3_DFP6_CRTC_ACTIVEb2 0x40 + #define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80 + #define ATOM_S3_CV_CRTC_ACTIVEb3 0x01 + #define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02 +@@ -3221,35 +4051,31 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + + #define ATOM_S3_ACTIVE_CRTC2w1 0xFFF + +-#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20 +-#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40 +-#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80 +- +-/* BIOS_4_SCRATCH Definition */ ++// BIOS_4_SCRATCH Definition + #define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL + #define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L + #define ATOM_S4_LCD1_REFRESH_SHIFT 8 + +-/* Byte aligned definition for BIOS usage */ ++//Byte aligned defintion for BIOS usage + #define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF + #define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0 + #define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0 + +-/* BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! */ ++// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! + #define ATOM_S5_DOS_REQ_CRT1b0 0x01 + #define ATOM_S5_DOS_REQ_LCD1b0 0x02 + #define ATOM_S5_DOS_REQ_TV1b0 0x04 + #define ATOM_S5_DOS_REQ_DFP1b0 0x08 + #define ATOM_S5_DOS_REQ_CRT2b0 0x10 + #define ATOM_S5_DOS_REQ_LCD2b0 0x20 +-#define ATOM_S5_DOS_REQ_TV2b0 0x40 ++#define ATOM_S5_DOS_REQ_DFP6b0 0x40 + #define ATOM_S5_DOS_REQ_DFP2b0 0x80 + #define ATOM_S5_DOS_REQ_CVb1 0x01 + #define ATOM_S5_DOS_REQ_DFP3b1 0x02 + #define ATOM_S5_DOS_REQ_DFP4b1 0x04 + #define ATOM_S5_DOS_REQ_DFP5b1 0x08 + +-#define ATOM_S5_DOS_REQ_DEVICEw0 0x03FF ++#define ATOM_S5_DOS_REQ_DEVICEw0 0x0FFF + + #define ATOM_S5_DOS_REQ_CRT1 0x0001 + #define ATOM_S5_DOS_REQ_LCD1 0x0002 +@@ -3257,22 +4083,21 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S5_DOS_REQ_DFP1 0x0008 + #define ATOM_S5_DOS_REQ_CRT2 0x0010 + #define ATOM_S5_DOS_REQ_LCD2 0x0020 +-#define ATOM_S5_DOS_REQ_TV2 0x0040 ++#define ATOM_S5_DOS_REQ_DFP6 0x0040 + #define ATOM_S5_DOS_REQ_DFP2 0x0080 + #define ATOM_S5_DOS_REQ_CV 0x0100 +-#define ATOM_S5_DOS_REQ_DFP3 0x0200 +-#define ATOM_S5_DOS_REQ_DFP4 0x0400 +-#define ATOM_S5_DOS_REQ_DFP5 0x0800 ++#define ATOM_S5_DOS_REQ_DFP3 0x0200 ++#define ATOM_S5_DOS_REQ_DFP4 0x0400 ++#define ATOM_S5_DOS_REQ_DFP5 0x0800 + + #define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0 + #define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0 + #define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0 + #define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1 +-#define ATOM_S5_DOS_FORCE_DEVICEw1 \ +- (ATOM_S5_DOS_FORCE_CRT1b2 + ATOM_S5_DOS_FORCE_TV1b2 + \ +- ATOM_S5_DOS_FORCE_CRT2b2 + (ATOM_S5_DOS_FORCE_CVb3 << 8)) ++#define ATOM_S5_DOS_FORCE_DEVICEw1 (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\ ++ (ATOM_S5_DOS_FORCE_CVb3<<8)) + +-/* BIOS_6_SCRATCH Definition */ ++// BIOS_6_SCRATCH Definition + #define ATOM_S6_DEVICE_CHANGE 0x00000001L + #define ATOM_S6_SCALER_CHANGE 0x00000002L + #define ATOM_S6_LID_CHANGE 0x00000004L +@@ -3285,11 +4110,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L + #define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L + #define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L +-#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L /* Normal expansion Request bit for LCD */ +-#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L /* Aspect ratio expansion Request bit for LCD */ ++#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L //Normal expansion Request bit for LCD ++#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L //Aspect ratio expansion Request bit for LCD + +-#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L /* This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion */ +-#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L /* This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion */ ++#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion ++#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion + + #define ATOM_S6_ACC_REQ_CRT1 0x00010000L + #define ATOM_S6_ACC_REQ_LCD1 0x00020000L +@@ -3297,7 +4122,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S6_ACC_REQ_DFP1 0x00080000L + #define ATOM_S6_ACC_REQ_CRT2 0x00100000L + #define ATOM_S6_ACC_REQ_LCD2 0x00200000L +-#define ATOM_S6_ACC_REQ_TV2 0x00400000L ++#define ATOM_S6_ACC_REQ_DFP6 0x00400000L + #define ATOM_S6_ACC_REQ_DFP2 0x00800000L + #define ATOM_S6_ACC_REQ_CV 0x01000000L + #define ATOM_S6_ACC_REQ_DFP3 0x02000000L +@@ -3310,7 +4135,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L + #define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L + +-/* Byte aligned definition for BIOS usage */ ++//Byte aligned defintion for BIOS usage + #define ATOM_S6_DEVICE_CHANGEb0 0x01 + #define ATOM_S6_SCALER_CHANGEb0 0x02 + #define ATOM_S6_LID_CHANGEb0 0x04 +@@ -3320,11 +4145,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S6_LID_STATEb0 0x40 + #define ATOM_S6_DOCK_STATEb0 0x80 + #define ATOM_S6_CRITICAL_STATEb1 0x01 +-#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02 ++#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02 + #define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04 + #define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08 +-#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10 +-#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20 ++#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10 ++#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20 + + #define ATOM_S6_ACC_REQ_CRT1b2 0x01 + #define ATOM_S6_ACC_REQ_LCD1b2 0x02 +@@ -3332,12 +4157,12 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S6_ACC_REQ_DFP1b2 0x08 + #define ATOM_S6_ACC_REQ_CRT2b2 0x10 + #define ATOM_S6_ACC_REQ_LCD2b2 0x20 +-#define ATOM_S6_ACC_REQ_TV2b2 0x40 ++#define ATOM_S6_ACC_REQ_DFP6b2 0x40 + #define ATOM_S6_ACC_REQ_DFP2b2 0x80 + #define ATOM_S6_ACC_REQ_CVb3 0x01 +-#define ATOM_S6_ACC_REQ_DFP3b3 0x02 +-#define ATOM_S6_ACC_REQ_DFP4b3 0x04 +-#define ATOM_S6_ACC_REQ_DFP5b3 0x08 ++#define ATOM_S6_ACC_REQ_DFP3b3 0x02 ++#define ATOM_S6_ACC_REQ_DFP4b3 0x04 ++#define ATOM_S6_ACC_REQ_DFP5b3 0x08 + + #define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0 + #define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10 +@@ -3366,7 +4191,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + #define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30 + #define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31 + +-/* BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! */ ++// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! + #define ATOM_S7_DOS_MODE_TYPEb0 0x03 + #define ATOM_S7_DOS_MODE_VGAb0 0x00 + #define ATOM_S7_DOS_MODE_VESAb0 0x01 +@@ -3378,220 +4203,194 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { + + #define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8 + +-/* BIOS_8_SCRATCH Definition */ ++// BIOS_8_SCRATCH Definition + #define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF +-#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000 ++#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000 + + #define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0 + #define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16 + +-/* BIOS_9_SCRATCH Definition */ +-#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK ++// BIOS_9_SCRATCH Definition ++#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK + #define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF + #endif +-#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK ++#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK + #define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000 + #endif +-#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT ++#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT + #define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0 + #endif +-#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT ++#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT + #define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16 + #endif + ++ + #define ATOM_FLAG_SET 0x20 + #define ATOM_FLAG_CLEAR 0 +-#define CLEAR_ATOM_S6_ACC_MODE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR) +-#define SET_ATOM_S6_DEVICE_CHANGE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET) +-#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET) +-#define SET_ATOM_S6_SCALER_CHANGE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET) +-#define SET_ATOM_S6_LID_CHANGE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET) +- +-#define SET_ATOM_S6_LID_STATE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) |\ +- ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET) +-#define CLEAR_ATOM_S6_LID_STATE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR) +- +-#define SET_ATOM_S6_DOCK_CHANGE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8)| \ +- ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET) +-#define SET_ATOM_S6_DOCK_STATE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET) +-#define CLEAR_ATOM_S6_DOCK_STATE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR) +- +-#define SET_ATOM_S6_THERMAL_STATE_CHANGE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET) +-#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET) +-#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET) +- +-#define SET_ATOM_S6_CRITICAL_STATE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET) +-#define CLEAR_ATOM_S6_CRITICAL_STATE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR) +- +-#define SET_ATOM_S6_REQ_SCALER \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET) +-#define CLEAR_ATOM_S6_REQ_SCALER \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR ) +- +-#define SET_ATOM_S6_REQ_SCALER_ARATIO \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET ) +-#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR ) +- +-#define SET_ATOM_S6_I2C_STATE_CHANGE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET ) +- +-#define SET_ATOM_S6_DISPLAY_STATE_CHANGE \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET ) +- +-#define SET_ATOM_S6_DEVICE_RECONFIG \ +- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ +- ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET) +-#define CLEAR_ATOM_S0_LCD1 \ +- ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 ) | \ +- ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR ) +-#define SET_ATOM_S7_DOS_8BIT_DAC_EN \ +- ((ATOM_DOS_MODE_INFO_DEF << 8) | \ +- ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET ) +-#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN \ +- ((ATOM_DOS_MODE_INFO_DEF << 8) | \ +- ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR ) ++#define CLEAR_ATOM_S6_ACC_MODE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR) ++#define SET_ATOM_S6_DEVICE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET) ++#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET) ++#define SET_ATOM_S6_SCALER_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET) ++#define SET_ATOM_S6_LID_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET) + +-/****************************************************************************/ +-/* Portion II: Definitinos only used in Driver */ ++#define SET_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET) ++#define CLEAR_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR) ++ ++#define SET_ATOM_S6_DOCK_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET) ++#define SET_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET) ++#define CLEAR_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR) ++ ++#define SET_ATOM_S6_THERMAL_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET) ++#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET) ++#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET) ++ ++#define SET_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET) ++#define CLEAR_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR) ++ ++#define SET_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET) ++#define CLEAR_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR ) ++ ++#define SET_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET ) ++#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR ) ++ ++#define SET_ATOM_S6_I2C_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET ) ++ ++#define SET_ATOM_S6_DISPLAY_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET ) ++ ++#define SET_ATOM_S6_DEVICE_RECONFIG ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET) ++#define CLEAR_ATOM_S0_LCD1 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )| ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR ) ++#define SET_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET ) ++#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR ) ++ ++/****************************************************************************/ ++//Portion II: Definitinos only used in Driver + /****************************************************************************/ + +-/* Macros used by driver */ ++// Macros used by driver ++#ifdef __cplusplus ++#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast(&(static_cast(0))->FieldName)-static_cast(0))/sizeof(USHORT)) + +-#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char *)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES *)0)->FieldName)-(char *)0)/sizeof(USHORT)) ++#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F) ++#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F) ++#else // not __cplusplus ++#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT)) + + #define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F) + #define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F) ++#endif // __cplusplus + + #define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION + #define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION + +-/****************************************************************************/ +-/* Portion III: Definitinos only used in VBIOS */ ++/****************************************************************************/ ++//Portion III: Definitinos only used in VBIOS + /****************************************************************************/ + #define ATOM_DAC_SRC 0x80 + #define ATOM_SRC_DAC1 0 + #define ATOM_SRC_DAC2 0x80 + +-#ifdef UEFI_BUILD +-#define USHORT UTEMP +-#endif +- +-typedef struct _MEMORY_PLLINIT_PARAMETERS { +- ULONG ulTargetMemoryClock; /* In 10Khz unit */ +- UCHAR ucAction; /* not define yet */ +- UCHAR ucFbDiv_Hi; /* Fbdiv Hi byte */ +- UCHAR ucFbDiv; /* FB value */ +- UCHAR ucPostDiv; /* Post div */ +-} MEMORY_PLLINIT_PARAMETERS; ++typedef struct _MEMORY_PLLINIT_PARAMETERS ++{ ++ ULONG ulTargetMemoryClock; //In 10Khz unit ++ UCHAR ucAction; //not define yet ++ UCHAR ucFbDiv_Hi; //Fbdiv Hi byte ++ UCHAR ucFbDiv; //FB value ++ UCHAR ucPostDiv; //Post div ++}MEMORY_PLLINIT_PARAMETERS; + + #define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS + +-#define GPIO_PIN_WRITE 0x01 ++ ++#define GPIO_PIN_WRITE 0x01 + #define GPIO_PIN_READ 0x00 + +-typedef struct _GPIO_PIN_CONTROL_PARAMETERS { +- UCHAR ucGPIO_ID; /* return value, read from GPIO pins */ +- UCHAR ucGPIOBitShift; /* define which bit in uGPIOBitVal need to be update */ +- UCHAR ucGPIOBitVal; /* Set/Reset corresponding bit defined in ucGPIOBitMask */ +- UCHAR ucAction; /* =GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write */ +-} GPIO_PIN_CONTROL_PARAMETERS; +- +-typedef struct _ENABLE_SCALER_PARAMETERS { +- UCHAR ucScaler; /* ATOM_SCALER1, ATOM_SCALER2 */ +- UCHAR ucEnable; /* ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION */ +- UCHAR ucTVStandard; /* */ +- UCHAR ucPadding[1]; +-} ENABLE_SCALER_PARAMETERS; +-#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS +- +-/* ucEnable: */ ++typedef struct _GPIO_PIN_CONTROL_PARAMETERS ++{ ++ UCHAR ucGPIO_ID; //return value, read from GPIO pins ++ UCHAR ucGPIOBitShift; //define which bit in uGPIOBitVal need to be update ++ UCHAR ucGPIOBitVal; //Set/Reset corresponding bit defined in ucGPIOBitMask ++ UCHAR ucAction; //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write ++}GPIO_PIN_CONTROL_PARAMETERS; ++ ++typedef struct _ENABLE_SCALER_PARAMETERS ++{ ++ UCHAR ucScaler; // ATOM_SCALER1, ATOM_SCALER2 ++ UCHAR ucEnable; // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION ++ UCHAR ucTVStandard; // ++ UCHAR ucPadding[1]; ++}ENABLE_SCALER_PARAMETERS; ++#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS ++ ++//ucEnable: + #define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0 + #define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1 + #define SCALER_ENABLE_2TAP_ALPHA_MODE 2 + #define SCALER_ENABLE_MULTITAP_MODE 3 + +-typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS { +- ULONG usHWIconHorzVertPosn; /* Hardware Icon Vertical position */ +- UCHAR ucHWIconVertOffset; /* Hardware Icon Vertical offset */ +- UCHAR ucHWIconHorzOffset; /* Hardware Icon Horizontal offset */ +- UCHAR ucSelection; /* ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 */ +- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ +-} ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS; +- +-typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION { +- ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon; +- ENABLE_CRTC_PARAMETERS sReserved; +-} ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION; +- +-typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS { +- USHORT usHight; /* Image Hight */ +- USHORT usWidth; /* Image Width */ +- UCHAR ucSurface; /* Surface 1 or 2 */ +- UCHAR ucPadding[3]; +-} ENABLE_GRAPH_SURFACE_PARAMETERS; +- +-typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 { +- USHORT usHight; /* Image Hight */ +- USHORT usWidth; /* Image Width */ +- UCHAR ucSurface; /* Surface 1 or 2 */ +- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ +- UCHAR ucPadding[2]; +-} ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2; +- +-typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION { +- ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface; +- ENABLE_YUV_PS_ALLOCATION sReserved; /* Don't set this one */ +-} ENABLE_GRAPH_SURFACE_PS_ALLOCATION; +- +-typedef struct _MEMORY_CLEAN_UP_PARAMETERS { +- USHORT usMemoryStart; /* in 8Kb boundry, offset from memory base address */ +- USHORT usMemorySize; /* 8Kb blocks aligned */ +-} MEMORY_CLEAN_UP_PARAMETERS; ++typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS ++{ ++ ULONG usHWIconHorzVertPosn; // Hardware Icon Vertical position ++ UCHAR ucHWIconVertOffset; // Hardware Icon Vertical offset ++ UCHAR ucHWIconHorzOffset; // Hardware Icon Horizontal offset ++ UCHAR ucSelection; // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 ++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE ++}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS; ++ ++typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION ++{ ++ ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon; ++ ENABLE_CRTC_PARAMETERS sReserved; ++}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION; ++ ++typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS ++{ ++ USHORT usHight; // Image Hight ++ USHORT usWidth; // Image Width ++ UCHAR ucSurface; // Surface 1 or 2 ++ UCHAR ucPadding[3]; ++}ENABLE_GRAPH_SURFACE_PARAMETERS; ++ ++typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 ++{ ++ USHORT usHight; // Image Hight ++ USHORT usWidth; // Image Width ++ UCHAR ucSurface; // Surface 1 or 2 ++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE ++ UCHAR ucPadding[2]; ++}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2; ++ ++typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3 ++{ ++ USHORT usHight; // Image Hight ++ USHORT usWidth; // Image Width ++ UCHAR ucSurface; // Surface 1 or 2 ++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE ++ USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0. ++}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3; ++ ++typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION ++{ ++ ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface; ++ ENABLE_YUV_PS_ALLOCATION sReserved; // Don't set this one ++}ENABLE_GRAPH_SURFACE_PS_ALLOCATION; ++ ++typedef struct _MEMORY_CLEAN_UP_PARAMETERS ++{ ++ USHORT usMemoryStart; //in 8Kb boundry, offset from memory base address ++ USHORT usMemorySize; //8Kb blocks aligned ++}MEMORY_CLEAN_UP_PARAMETERS; + #define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS + +-typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS { +- USHORT usX_Size; /* When use as input parameter, usX_Size indicates which CRTC */ +- USHORT usY_Size; +-} GET_DISPLAY_SURFACE_SIZE_PARAMETERS; ++typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS ++{ ++ USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC ++ USHORT usY_Size; ++}GET_DISPLAY_SURFACE_SIZE_PARAMETERS; + +-typedef struct _INDIRECT_IO_ACCESS { +- ATOM_COMMON_TABLE_HEADER sHeader; +- UCHAR IOAccessSequence[256]; ++typedef struct _INDIRECT_IO_ACCESS ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR IOAccessSequence[256]; + } INDIRECT_IO_ACCESS; + + #define INDIRECT_READ 0x00 +@@ -3615,93 +4414,108 @@ typedef struct _INDIRECT_IO_ACCESS { + #define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ + #define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE + +-typedef struct _ATOM_OEM_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; +-} ATOM_OEM_INFO; +- +-typedef struct _ATOM_TV_MODE { +- UCHAR ucVMode_Num; /* Video mode number */ +- UCHAR ucTV_Mode_Num; /* Internal TV mode number */ +-} ATOM_TV_MODE; +- +-typedef struct _ATOM_BIOS_INT_TVSTD_MODE { +- ATOM_COMMON_TABLE_HEADER sHeader; +- USHORT usTV_Mode_LUT_Offset; /* Pointer to standard to internal number conversion table */ +- USHORT usTV_FIFO_Offset; /* Pointer to FIFO entry table */ +- USHORT usNTSC_Tbl_Offset; /* Pointer to SDTV_Mode_NTSC table */ +- USHORT usPAL_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */ +- USHORT usCV_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */ +-} ATOM_BIOS_INT_TVSTD_MODE; +- +-typedef struct _ATOM_TV_MODE_SCALER_PTR { +- USHORT ucFilter0_Offset; /* Pointer to filter format 0 coefficients */ +- USHORT usFilter1_Offset; /* Pointer to filter format 0 coefficients */ +- UCHAR ucTV_Mode_Num; +-} ATOM_TV_MODE_SCALER_PTR; +- +-typedef struct _ATOM_STANDARD_VESA_TIMING { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_DTD_FORMAT aModeTimings[16]; /* 16 is not the real array number, just for initial allocation */ +-} ATOM_STANDARD_VESA_TIMING; +- +-typedef struct _ATOM_STD_FORMAT { +- USHORT usSTD_HDisp; +- USHORT usSTD_VDisp; +- USHORT usSTD_RefreshRate; +- USHORT usReserved; +-} ATOM_STD_FORMAT; +- +-typedef struct _ATOM_VESA_TO_EXTENDED_MODE { +- USHORT usVESA_ModeNumber; +- USHORT usExtendedModeNumber; +-} ATOM_VESA_TO_EXTENDED_MODE; +- +-typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT { +- ATOM_COMMON_TABLE_HEADER sHeader; +- ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76]; +-} ATOM_VESA_TO_INTENAL_MODE_LUT; ++typedef struct _ATOM_OEM_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; ++}ATOM_OEM_INFO; ++ ++typedef struct _ATOM_TV_MODE ++{ ++ UCHAR ucVMode_Num; //Video mode number ++ UCHAR ucTV_Mode_Num; //Internal TV mode number ++}ATOM_TV_MODE; ++ ++typedef struct _ATOM_BIOS_INT_TVSTD_MODE ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usTV_Mode_LUT_Offset; // Pointer to standard to internal number conversion table ++ USHORT usTV_FIFO_Offset; // Pointer to FIFO entry table ++ USHORT usNTSC_Tbl_Offset; // Pointer to SDTV_Mode_NTSC table ++ USHORT usPAL_Tbl_Offset; // Pointer to SDTV_Mode_PAL table ++ USHORT usCV_Tbl_Offset; // Pointer to SDTV_Mode_PAL table ++}ATOM_BIOS_INT_TVSTD_MODE; ++ ++ ++typedef struct _ATOM_TV_MODE_SCALER_PTR ++{ ++ USHORT ucFilter0_Offset; //Pointer to filter format 0 coefficients ++ USHORT usFilter1_Offset; //Pointer to filter format 0 coefficients ++ UCHAR ucTV_Mode_Num; ++}ATOM_TV_MODE_SCALER_PTR; ++ ++typedef struct _ATOM_STANDARD_VESA_TIMING ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_DTD_FORMAT aModeTimings[16]; // 16 is not the real array number, just for initial allocation ++}ATOM_STANDARD_VESA_TIMING; ++ ++ ++typedef struct _ATOM_STD_FORMAT ++{ ++ USHORT usSTD_HDisp; ++ USHORT usSTD_VDisp; ++ USHORT usSTD_RefreshRate; ++ USHORT usReserved; ++}ATOM_STD_FORMAT; ++ ++typedef struct _ATOM_VESA_TO_EXTENDED_MODE ++{ ++ USHORT usVESA_ModeNumber; ++ USHORT usExtendedModeNumber; ++}ATOM_VESA_TO_EXTENDED_MODE; ++ ++typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76]; ++}ATOM_VESA_TO_INTENAL_MODE_LUT; + + /*************** ATOM Memory Related Data Structure ***********************/ +-typedef struct _ATOM_MEMORY_VENDOR_BLOCK { +- UCHAR ucMemoryType; +- UCHAR ucMemoryVendor; +- UCHAR ucAdjMCId; +- UCHAR ucDynClkId; +- ULONG ulDllResetClkRange; +-} ATOM_MEMORY_VENDOR_BLOCK; +- +-typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG { ++typedef struct _ATOM_MEMORY_VENDOR_BLOCK{ ++ UCHAR ucMemoryType; ++ UCHAR ucMemoryVendor; ++ UCHAR ucAdjMCId; ++ UCHAR ucDynClkId; ++ ULONG ulDllResetClkRange; ++}ATOM_MEMORY_VENDOR_BLOCK; ++ ++ ++typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{ + #if ATOM_BIG_ENDIAN +- ULONG ucMemBlkId:8; +- ULONG ulMemClockRange:24; ++ ULONG ucMemBlkId:8; ++ ULONG ulMemClockRange:24; + #else +- ULONG ulMemClockRange:24; +- ULONG ucMemBlkId:8; ++ ULONG ulMemClockRange:24; ++ ULONG ucMemBlkId:8; + #endif +-} ATOM_MEMORY_SETTING_ID_CONFIG; +- +-typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS { +- ATOM_MEMORY_SETTING_ID_CONFIG slAccess; +- ULONG ulAccess; +-} ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS; +- +-typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK { +- ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID; +- ULONG aulMemData[1]; +-} ATOM_MEMORY_SETTING_DATA_BLOCK; +- +-typedef struct _ATOM_INIT_REG_INDEX_FORMAT { +- USHORT usRegIndex; /* MC register index */ +- UCHAR ucPreRegDataLength; /* offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf */ +-} ATOM_INIT_REG_INDEX_FORMAT; +- +-typedef struct _ATOM_INIT_REG_BLOCK { +- USHORT usRegIndexTblSize; /* size of asRegIndexBuf */ +- USHORT usRegDataBlkSize; /* size of ATOM_MEMORY_SETTING_DATA_BLOCK */ +- ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1]; +- ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1]; +-} ATOM_INIT_REG_BLOCK; ++}ATOM_MEMORY_SETTING_ID_CONFIG; ++ ++typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ++{ ++ ATOM_MEMORY_SETTING_ID_CONFIG slAccess; ++ ULONG ulAccess; ++}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS; ++ ++ ++typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{ ++ ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID; ++ ULONG aulMemData[1]; ++}ATOM_MEMORY_SETTING_DATA_BLOCK; ++ ++ ++typedef struct _ATOM_INIT_REG_INDEX_FORMAT{ ++ USHORT usRegIndex; // MC register index ++ UCHAR ucPreRegDataLength; // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf ++}ATOM_INIT_REG_INDEX_FORMAT; ++ ++ ++typedef struct _ATOM_INIT_REG_BLOCK{ ++ USHORT usRegIndexTblSize; //size of asRegIndexBuf ++ USHORT usRegDataBlkSize; //size of ATOM_MEMORY_SETTING_DATA_BLOCK ++ ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1]; ++ ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1]; ++}ATOM_INIT_REG_BLOCK; + + #define END_OF_REG_INDEX_BLOCK 0x0ffff + #define END_OF_REG_DATA_BLOCK 0x00000000 +@@ -3716,16 +4530,19 @@ typedef struct _ATOM_INIT_REG_BLOCK { + #define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) + #define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) + +-typedef struct _ATOM_MC_INIT_PARAM_TABLE { +- ATOM_COMMON_TABLE_HEADER sHeader; +- USHORT usAdjustARB_SEQDataOffset; +- USHORT usMCInitMemTypeTblOffset; +- USHORT usMCInitCommonTblOffset; +- USHORT usMCInitPowerDownTblOffset; +- ULONG ulARB_SEQDataBuf[32]; +- ATOM_INIT_REG_BLOCK asMCInitMemType; +- ATOM_INIT_REG_BLOCK asMCInitCommon; +-} ATOM_MC_INIT_PARAM_TABLE; ++ ++typedef struct _ATOM_MC_INIT_PARAM_TABLE ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usAdjustARB_SEQDataOffset; ++ USHORT usMCInitMemTypeTblOffset; ++ USHORT usMCInitCommonTblOffset; ++ USHORT usMCInitPowerDownTblOffset; ++ ULONG ulARB_SEQDataBuf[32]; ++ ATOM_INIT_REG_BLOCK asMCInitMemType; ++ ATOM_INIT_REG_BLOCK asMCInitCommon; ++}ATOM_MC_INIT_PARAM_TABLE; ++ + + #define _4Mx16 0x2 + #define _4Mx32 0x3 +@@ -3751,221 +4568,272 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE { + + #define QIMONDA INFINEON + #define PROMOS MOSEL ++#define KRETON INFINEON + +-/* ///////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// */ ++/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// + + #define UCODE_ROM_START_ADDRESS 0x1c000 +-#define UCODE_SIGNATURE 0x4375434d /* 'MCuC' - MC uCode */ +- +-/* uCode block header for reference */ +- +-typedef struct _MCuCodeHeader { +- ULONG ulSignature; +- UCHAR ucRevision; +- UCHAR ucChecksum; +- UCHAR ucReserved1; +- UCHAR ucReserved2; +- USHORT usParametersLength; +- USHORT usUCodeLength; +- USHORT usReserved1; +- USHORT usReserved2; ++#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode ++ ++//uCode block header for reference ++ ++typedef struct _MCuCodeHeader ++{ ++ ULONG ulSignature; ++ UCHAR ucRevision; ++ UCHAR ucChecksum; ++ UCHAR ucReserved1; ++ UCHAR ucReserved2; ++ USHORT usParametersLength; ++ USHORT usUCodeLength; ++ USHORT usReserved1; ++ USHORT usReserved2; + } MCuCodeHeader; + +-/* //////////////////////////////////////////////////////////////////////////////// */ ++////////////////////////////////////////////////////////////////////////////////// + + #define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16 + + #define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF +-typedef struct _ATOM_VRAM_MODULE_V1 { +- ULONG ulReserved; +- USHORT usEMRSValue; +- USHORT usMRSValue; +- USHORT usReserved; +- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ +- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; */ +- UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender */ +- UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */ +- UCHAR ucRow; /* Number of Row,in power of 2; */ +- UCHAR ucColumn; /* Number of Column,in power of 2; */ +- UCHAR ucBank; /* Nunber of Bank; */ +- UCHAR ucRank; /* Number of Rank, in power of 2 */ +- UCHAR ucChannelNum; /* Number of channel; */ +- UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */ +- UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */ +- UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */ +- UCHAR ucReserved[2]; +-} ATOM_VRAM_MODULE_V1; +- +-typedef struct _ATOM_VRAM_MODULE_V2 { +- ULONG ulReserved; +- ULONG ulFlags; /* To enable/disable functionalities based on memory type */ +- ULONG ulEngineClock; /* Override of default engine clock for particular memory type */ +- ULONG ulMemoryClock; /* Override of default memory clock for particular memory type */ +- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ +- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ +- USHORT usEMRSValue; +- USHORT usMRSValue; +- USHORT usReserved; +- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ +- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */ +- UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */ +- UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */ +- UCHAR ucRow; /* Number of Row,in power of 2; */ +- UCHAR ucColumn; /* Number of Column,in power of 2; */ +- UCHAR ucBank; /* Nunber of Bank; */ +- UCHAR ucRank; /* Number of Rank, in power of 2 */ +- UCHAR ucChannelNum; /* Number of channel; */ +- UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */ +- UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */ +- UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */ +- UCHAR ucRefreshRateFactor; +- UCHAR ucReserved[3]; +-} ATOM_VRAM_MODULE_V2; +- +-typedef struct _ATOM_MEMORY_TIMING_FORMAT { +- ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */ +- union { +- USHORT usMRS; /* mode register */ +- USHORT usDDR3_MR0; +- }; +- union { +- USHORT usEMRS; /* extended mode register */ +- USHORT usDDR3_MR1; +- }; +- UCHAR ucCL; /* CAS latency */ +- UCHAR ucWL; /* WRITE Latency */ +- UCHAR uctRAS; /* tRAS */ +- UCHAR uctRC; /* tRC */ +- UCHAR uctRFC; /* tRFC */ +- UCHAR uctRCDR; /* tRCDR */ +- UCHAR uctRCDW; /* tRCDW */ +- UCHAR uctRP; /* tRP */ +- UCHAR uctRRD; /* tRRD */ +- UCHAR uctWR; /* tWR */ +- UCHAR uctWTR; /* tWTR */ +- UCHAR uctPDIX; /* tPDIX */ +- UCHAR uctFAW; /* tFAW */ +- UCHAR uctAOND; /* tAOND */ +- union { +- struct { +- UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */ +- UCHAR ucReserved; +- }; +- USHORT usDDR3_MR2; +- }; +-} ATOM_MEMORY_TIMING_FORMAT; +- +-typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 { +- ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */ +- USHORT usMRS; /* mode register */ +- USHORT usEMRS; /* extended mode register */ +- UCHAR ucCL; /* CAS latency */ +- UCHAR ucWL; /* WRITE Latency */ +- UCHAR uctRAS; /* tRAS */ +- UCHAR uctRC; /* tRC */ +- UCHAR uctRFC; /* tRFC */ +- UCHAR uctRCDR; /* tRCDR */ +- UCHAR uctRCDW; /* tRCDW */ +- UCHAR uctRP; /* tRP */ +- UCHAR uctRRD; /* tRRD */ +- UCHAR uctWR; /* tWR */ +- UCHAR uctWTR; /* tWTR */ +- UCHAR uctPDIX; /* tPDIX */ +- UCHAR uctFAW; /* tFAW */ +- UCHAR uctAOND; /* tAOND */ +- UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */ +-/* ///////////////////////GDDR parameters/////////////////////////////////// */ +- UCHAR uctCCDL; /* */ +- UCHAR uctCRCRL; /* */ +- UCHAR uctCRCWL; /* */ +- UCHAR uctCKE; /* */ +- UCHAR uctCKRSE; /* */ +- UCHAR uctCKRSX; /* */ +- UCHAR uctFAW32; /* */ +- UCHAR ucReserved1; /* */ +- UCHAR ucReserved2; /* */ +- UCHAR ucTerminator; +-} ATOM_MEMORY_TIMING_FORMAT_V1; +- +-typedef struct _ATOM_MEMORY_FORMAT { +- ULONG ulDllDisClock; /* memory DLL will be disable when target memory clock is below this clock */ +- union { +- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ +- USHORT usDDR3_Reserved; /* Not used for DDR3 memory */ +- }; +- union { +- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ +- USHORT usDDR3_MR3; /* Used for DDR3 memory */ +- }; +- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */ +- UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */ +- UCHAR ucRow; /* Number of Row,in power of 2; */ +- UCHAR ucColumn; /* Number of Column,in power of 2; */ +- UCHAR ucBank; /* Nunber of Bank; */ +- UCHAR ucRank; /* Number of Rank, in power of 2 */ +- UCHAR ucBurstSize; /* burst size, 0= burst size=4 1= burst size=8 */ +- UCHAR ucDllDisBit; /* position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) */ +- UCHAR ucRefreshRateFactor; /* memory refresh rate in unit of ms */ +- UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ +- UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ +- UCHAR ucMemAttrib; /* Memory Device Addribute, like RDBI/WDBI etc */ +- ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ +-} ATOM_MEMORY_FORMAT; +- +-typedef struct _ATOM_VRAM_MODULE_V3 { +- ULONG ulChannelMapCfg; /* board dependent paramenter:Channel combination */ +- USHORT usSize; /* size of ATOM_VRAM_MODULE_V3 */ +- USHORT usDefaultMVDDQ; /* board dependent parameter:Default Memory Core Voltage */ +- USHORT usDefaultMVDDC; /* board dependent parameter:Default Memory IO Voltage */ +- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ +- UCHAR ucChannelNum; /* board dependent parameter:Number of channel; */ +- UCHAR ucChannelSize; /* board dependent parameter:32bit or 64bit */ +- UCHAR ucVREFI; /* board dependnt parameter: EXT or INT +160mv to -140mv */ +- UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ +- UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ +- ATOM_MEMORY_FORMAT asMemory; /* describ all of video memory parameters from memory spec */ +-} ATOM_VRAM_MODULE_V3; +- +-/* ATOM_VRAM_MODULE_V3.ucNPL_RT */ ++typedef struct _ATOM_VRAM_MODULE_V1 ++{ ++ ULONG ulReserved; ++ USHORT usEMRSValue; ++ USHORT usMRSValue; ++ USHORT usReserved; ++ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module ++ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; ++ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender ++ UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... ++ UCHAR ucRow; // Number of Row,in power of 2; ++ UCHAR ucColumn; // Number of Column,in power of 2; ++ UCHAR ucBank; // Nunber of Bank; ++ UCHAR ucRank; // Number of Rank, in power of 2 ++ UCHAR ucChannelNum; // Number of channel; ++ UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 ++ UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; ++ UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; ++ UCHAR ucReserved[2]; ++}ATOM_VRAM_MODULE_V1; ++ ++ ++typedef struct _ATOM_VRAM_MODULE_V2 ++{ ++ ULONG ulReserved; ++ ULONG ulFlags; // To enable/disable functionalities based on memory type ++ ULONG ulEngineClock; // Override of default engine clock for particular memory type ++ ULONG ulMemoryClock; // Override of default memory clock for particular memory type ++ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usEMRSValue; ++ USHORT usMRSValue; ++ USHORT usReserved; ++ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module ++ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; ++ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed ++ UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... ++ UCHAR ucRow; // Number of Row,in power of 2; ++ UCHAR ucColumn; // Number of Column,in power of 2; ++ UCHAR ucBank; // Nunber of Bank; ++ UCHAR ucRank; // Number of Rank, in power of 2 ++ UCHAR ucChannelNum; // Number of channel; ++ UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 ++ UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; ++ UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; ++ UCHAR ucRefreshRateFactor; ++ UCHAR ucReserved[3]; ++}ATOM_VRAM_MODULE_V2; ++ ++ ++typedef struct _ATOM_MEMORY_TIMING_FORMAT ++{ ++ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing ++ union{ ++ USHORT usMRS; // mode register ++ USHORT usDDR3_MR0; ++ }; ++ union{ ++ USHORT usEMRS; // extended mode register ++ USHORT usDDR3_MR1; ++ }; ++ UCHAR ucCL; // CAS latency ++ UCHAR ucWL; // WRITE Latency ++ UCHAR uctRAS; // tRAS ++ UCHAR uctRC; // tRC ++ UCHAR uctRFC; // tRFC ++ UCHAR uctRCDR; // tRCDR ++ UCHAR uctRCDW; // tRCDW ++ UCHAR uctRP; // tRP ++ UCHAR uctRRD; // tRRD ++ UCHAR uctWR; // tWR ++ UCHAR uctWTR; // tWTR ++ UCHAR uctPDIX; // tPDIX ++ UCHAR uctFAW; // tFAW ++ UCHAR uctAOND; // tAOND ++ union ++ { ++ struct { ++ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon ++ UCHAR ucReserved; ++ }; ++ USHORT usDDR3_MR2; ++ }; ++}ATOM_MEMORY_TIMING_FORMAT; ++ ++ ++typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 ++{ ++ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing ++ USHORT usMRS; // mode register ++ USHORT usEMRS; // extended mode register ++ UCHAR ucCL; // CAS latency ++ UCHAR ucWL; // WRITE Latency ++ UCHAR uctRAS; // tRAS ++ UCHAR uctRC; // tRC ++ UCHAR uctRFC; // tRFC ++ UCHAR uctRCDR; // tRCDR ++ UCHAR uctRCDW; // tRCDW ++ UCHAR uctRP; // tRP ++ UCHAR uctRRD; // tRRD ++ UCHAR uctWR; // tWR ++ UCHAR uctWTR; // tWTR ++ UCHAR uctPDIX; // tPDIX ++ UCHAR uctFAW; // tFAW ++ UCHAR uctAOND; // tAOND ++ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon ++////////////////////////////////////GDDR parameters/////////////////////////////////// ++ UCHAR uctCCDL; // ++ UCHAR uctCRCRL; // ++ UCHAR uctCRCWL; // ++ UCHAR uctCKE; // ++ UCHAR uctCKRSE; // ++ UCHAR uctCKRSX; // ++ UCHAR uctFAW32; // ++ UCHAR ucMR5lo; // ++ UCHAR ucMR5hi; // ++ UCHAR ucTerminator; ++}ATOM_MEMORY_TIMING_FORMAT_V1; ++ ++typedef struct _ATOM_MEMORY_TIMING_FORMAT_V2 ++{ ++ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing ++ USHORT usMRS; // mode register ++ USHORT usEMRS; // extended mode register ++ UCHAR ucCL; // CAS latency ++ UCHAR ucWL; // WRITE Latency ++ UCHAR uctRAS; // tRAS ++ UCHAR uctRC; // tRC ++ UCHAR uctRFC; // tRFC ++ UCHAR uctRCDR; // tRCDR ++ UCHAR uctRCDW; // tRCDW ++ UCHAR uctRP; // tRP ++ UCHAR uctRRD; // tRRD ++ UCHAR uctWR; // tWR ++ UCHAR uctWTR; // tWTR ++ UCHAR uctPDIX; // tPDIX ++ UCHAR uctFAW; // tFAW ++ UCHAR uctAOND; // tAOND ++ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon ++////////////////////////////////////GDDR parameters/////////////////////////////////// ++ UCHAR uctCCDL; // ++ UCHAR uctCRCRL; // ++ UCHAR uctCRCWL; // ++ UCHAR uctCKE; // ++ UCHAR uctCKRSE; // ++ UCHAR uctCKRSX; // ++ UCHAR uctFAW32; // ++ UCHAR ucMR4lo; // ++ UCHAR ucMR4hi; // ++ UCHAR ucMR5lo; // ++ UCHAR ucMR5hi; // ++ UCHAR ucTerminator; ++ UCHAR ucReserved; ++}ATOM_MEMORY_TIMING_FORMAT_V2; ++ ++typedef struct _ATOM_MEMORY_FORMAT ++{ ++ ULONG ulDllDisClock; // memory DLL will be disable when target memory clock is below this clock ++ union{ ++ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usDDR3_Reserved; // Not used for DDR3 memory ++ }; ++ union{ ++ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usDDR3_MR3; // Used for DDR3 memory ++ }; ++ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; ++ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed ++ UCHAR ucRow; // Number of Row,in power of 2; ++ UCHAR ucColumn; // Number of Column,in power of 2; ++ UCHAR ucBank; // Nunber of Bank; ++ UCHAR ucRank; // Number of Rank, in power of 2 ++ UCHAR ucBurstSize; // burst size, 0= burst size=4 1= burst size=8 ++ UCHAR ucDllDisBit; // position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) ++ UCHAR ucRefreshRateFactor; // memory refresh rate in unit of ms ++ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 ++ UCHAR ucPreamble; //[7:4] Write Preamble, [3:0] Read Preamble ++ UCHAR ucMemAttrib; // Memory Device Addribute, like RDBI/WDBI etc ++ ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; //Memory Timing block sort from lower clock to higher clock ++}ATOM_MEMORY_FORMAT; ++ ++ ++typedef struct _ATOM_VRAM_MODULE_V3 ++{ ++ ULONG ulChannelMapCfg; // board dependent paramenter:Channel combination ++ USHORT usSize; // size of ATOM_VRAM_MODULE_V3 ++ USHORT usDefaultMVDDQ; // board dependent parameter:Default Memory Core Voltage ++ USHORT usDefaultMVDDC; // board dependent parameter:Default Memory IO Voltage ++ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module ++ UCHAR ucChannelNum; // board dependent parameter:Number of channel; ++ UCHAR ucChannelSize; // board dependent parameter:32bit or 64bit ++ UCHAR ucVREFI; // board dependnt parameter: EXT or INT +160mv to -140mv ++ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters ++ UCHAR ucFlag; // To enable/disable functionalities based on memory type ++ ATOM_MEMORY_FORMAT asMemory; // describ all of video memory parameters from memory spec ++}ATOM_VRAM_MODULE_V3; ++ ++ ++//ATOM_VRAM_MODULE_V3.ucNPL_RT + #define NPL_RT_MASK 0x0f + #define BATTERY_ODT_MASK 0xc0 + + #define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3 + +-typedef struct _ATOM_VRAM_MODULE_V4 { +- ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */ +- USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */ +- USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ +- /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */ +- USHORT usReserved; +- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ +- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */ +- UCHAR ucChannelNum; /* Number of channels present in this module config */ +- UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */ +- UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ +- UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ +- UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */ +- UCHAR ucVREFI; /* board dependent parameter */ +- UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ +- UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ +- UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ +- /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */ +- UCHAR ucReserved[3]; +- +-/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */ +- union { +- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ +- USHORT usDDR3_Reserved; +- }; +- union { +- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ +- USHORT usDDR3_MR3; /* Used for DDR3 memory */ +- }; +- UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */ +- UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */ +- UCHAR ucReserved2[2]; +- ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ +-} ATOM_VRAM_MODULE_V4; ++typedef struct _ATOM_VRAM_MODULE_V4 ++{ ++ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination ++ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE ++ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! ++ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) ++ USHORT usReserved; ++ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module ++ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; ++ UCHAR ucChannelNum; // Number of channels present in this module config ++ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits ++ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 ++ UCHAR ucFlag; // To enable/disable functionalities based on memory type ++ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 ++ UCHAR ucVREFI; // board dependent parameter ++ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters ++ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble ++ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! ++ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros ++ UCHAR ucReserved[3]; ++ ++//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level ++ union{ ++ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usDDR3_Reserved; ++ }; ++ union{ ++ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usDDR3_MR3; // Used for DDR3 memory ++ }; ++ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed ++ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) ++ UCHAR ucReserved2[2]; ++ ATOM_MEMORY_TIMING_FORMAT asMemTiming[5];//Memory Timing block sort from lower clock to higher clock ++}ATOM_VRAM_MODULE_V4; + + #define VRAM_MODULE_V4_MISC_RANK_MASK 0x3 + #define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1 +@@ -3973,96 +4841,139 @@ typedef struct _ATOM_VRAM_MODULE_V4 { + #define VRAM_MODULE_V4_MISC_BL8 0x4 + #define VRAM_MODULE_V4_MISC_DUAL_CS 0x10 + +-typedef struct _ATOM_VRAM_MODULE_V5 { +- ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */ +- USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */ +- USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ +- /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */ +- USHORT usReserved; +- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ +- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */ +- UCHAR ucChannelNum; /* Number of channels present in this module config */ +- UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */ +- UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ +- UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ +- UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */ +- UCHAR ucVREFI; /* board dependent parameter */ +- UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ +- UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ +- UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ +- /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */ +- UCHAR ucReserved[3]; ++typedef struct _ATOM_VRAM_MODULE_V5 ++{ ++ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination ++ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE ++ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! ++ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) ++ USHORT usReserved; ++ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module ++ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; ++ UCHAR ucChannelNum; // Number of channels present in this module config ++ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits ++ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 ++ UCHAR ucFlag; // To enable/disable functionalities based on memory type ++ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 ++ UCHAR ucVREFI; // board dependent parameter ++ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters ++ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble ++ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! ++ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros ++ UCHAR ucReserved[3]; ++ ++//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level ++ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type ++ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed ++ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) ++ UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth ++ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth ++ ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock ++}ATOM_VRAM_MODULE_V5; ++ ++typedef struct _ATOM_VRAM_MODULE_V6 ++{ ++ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination ++ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE ++ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! ++ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) ++ USHORT usReserved; ++ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module ++ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; ++ UCHAR ucChannelNum; // Number of channels present in this module config ++ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits ++ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 ++ UCHAR ucFlag; // To enable/disable functionalities based on memory type ++ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 ++ UCHAR ucVREFI; // board dependent parameter ++ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters ++ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble ++ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! ++ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros ++ UCHAR ucReserved[3]; ++ ++//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level ++ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type ++ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type ++ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed ++ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) ++ UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth ++ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth ++ ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock ++}ATOM_VRAM_MODULE_V6; ++ ++ ++ ++typedef struct _ATOM_VRAM_INFO_V2 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucNumOfVRAMModule; ++ ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; ++}ATOM_VRAM_INFO_V2; + +-/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */ +- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ +- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ +- UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */ +- UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */ +- UCHAR ucFIFODepth; /* FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth */ +- UCHAR ucCDR_Bandwidth; /* [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth */ +- ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ +-} ATOM_VRAM_MODULE_V5; +- +-typedef struct _ATOM_VRAM_INFO_V2 { +- ATOM_COMMON_TABLE_HEADER sHeader; +- UCHAR ucNumOfVRAMModule; +- ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */ +-} ATOM_VRAM_INFO_V2; +- +-typedef struct _ATOM_VRAM_INFO_V3 { +- ATOM_COMMON_TABLE_HEADER sHeader; +- USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */ +- USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */ +- USHORT usRerseved; +- UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */ +- UCHAR ucNumOfVRAMModule; +- ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */ +- ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */ +- /* ATOM_INIT_REG_BLOCK aMemAdjust; */ +-} ATOM_VRAM_INFO_V3; ++typedef struct _ATOM_VRAM_INFO_V3 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting ++ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting ++ USHORT usRerseved; ++ UCHAR aVID_PinsShift[9]; // 8 bit strap maximum+terminator ++ UCHAR ucNumOfVRAMModule; ++ ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; ++ ATOM_INIT_REG_BLOCK asMemPatch; // for allocation ++ // ATOM_INIT_REG_BLOCK aMemAdjust; ++}ATOM_VRAM_INFO_V3; + + #define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3 + +-typedef struct _ATOM_VRAM_INFO_V4 { +- ATOM_COMMON_TABLE_HEADER sHeader; +- USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */ +- USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */ +- USHORT usRerseved; +- UCHAR ucMemDQ7_0ByteRemap; /* DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 */ +- ULONG ulMemDQ7_0BitRemap; /* each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] */ +- UCHAR ucReservde[4]; +- UCHAR ucNumOfVRAMModule; +- ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */ +- ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */ +- /* ATOM_INIT_REG_BLOCK aMemAdjust; */ +-} ATOM_VRAM_INFO_V4; +- +-typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */ +-} ATOM_VRAM_GPIO_DETECTION_INFO; +- +-typedef struct _ATOM_MEMORY_TRAINING_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- UCHAR ucTrainingLoop; +- UCHAR ucReserved[3]; +- ATOM_INIT_REG_BLOCK asMemTrainingSetting; +-} ATOM_MEMORY_TRAINING_INFO; +- +-typedef struct SW_I2C_CNTL_DATA_PARAMETERS { +- UCHAR ucControl; +- UCHAR ucData; +- UCHAR ucSatus; +- UCHAR ucTemp; ++typedef struct _ATOM_VRAM_INFO_V4 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting ++ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting ++ USHORT usRerseved; ++ UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 ++ ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] ++ UCHAR ucReservde[4]; ++ UCHAR ucNumOfVRAMModule; ++ ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; ++ ATOM_INIT_REG_BLOCK asMemPatch; // for allocation ++ // ATOM_INIT_REG_BLOCK aMemAdjust; ++}ATOM_VRAM_INFO_V4; ++ ++typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR aVID_PinsShift[9]; //8 bit strap maximum+terminator ++}ATOM_VRAM_GPIO_DETECTION_INFO; ++ ++ ++typedef struct _ATOM_MEMORY_TRAINING_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucTrainingLoop; ++ UCHAR ucReserved[3]; ++ ATOM_INIT_REG_BLOCK asMemTrainingSetting; ++}ATOM_MEMORY_TRAINING_INFO; ++ ++ ++typedef struct SW_I2C_CNTL_DATA_PARAMETERS ++{ ++ UCHAR ucControl; ++ UCHAR ucData; ++ UCHAR ucSatus; ++ UCHAR ucTemp; + } SW_I2C_CNTL_DATA_PARAMETERS; + + #define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS + +-typedef struct _SW_I2C_IO_DATA_PARAMETERS { +- USHORT GPIO_Info; +- UCHAR ucAct; +- UCHAR ucData; +-} SW_I2C_IO_DATA_PARAMETERS; ++typedef struct _SW_I2C_IO_DATA_PARAMETERS ++{ ++ USHORT GPIO_Info; ++ UCHAR ucAct; ++ UCHAR ucData; ++ } SW_I2C_IO_DATA_PARAMETERS; + + #define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS + +@@ -4087,127 +4998,136 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS { + #define SW_I2C_CNTL_CLOSE 5 + #define SW_I2C_CNTL_WRITE1BIT 6 + +-/* ==============================VESA definition Portion=============================== */ ++//==============================VESA definition Portion=============================== + #define VESA_OEM_PRODUCT_REV '01.00' +-#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB /* refer to VBE spec p.32, no TTY support */ ++#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support + #define VESA_MODE_WIN_ATTRIBUTE 7 + #define VESA_WIN_SIZE 64 + +-typedef struct _PTR_32_BIT_STRUCTURE { +- USHORT Offset16; +- USHORT Segment16; ++typedef struct _PTR_32_BIT_STRUCTURE ++{ ++ USHORT Offset16; ++ USHORT Segment16; + } PTR_32_BIT_STRUCTURE; + +-typedef union _PTR_32_BIT_UNION { +- PTR_32_BIT_STRUCTURE SegmentOffset; +- ULONG Ptr32_Bit; ++typedef union _PTR_32_BIT_UNION ++{ ++ PTR_32_BIT_STRUCTURE SegmentOffset; ++ ULONG Ptr32_Bit; + } PTR_32_BIT_UNION; + +-typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE { +- UCHAR VbeSignature[4]; +- USHORT VbeVersion; +- PTR_32_BIT_UNION OemStringPtr; +- UCHAR Capabilities[4]; +- PTR_32_BIT_UNION VideoModePtr; +- USHORT TotalMemory; ++typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE ++{ ++ UCHAR VbeSignature[4]; ++ USHORT VbeVersion; ++ PTR_32_BIT_UNION OemStringPtr; ++ UCHAR Capabilities[4]; ++ PTR_32_BIT_UNION VideoModePtr; ++ USHORT TotalMemory; + } VBE_1_2_INFO_BLOCK_UPDATABLE; + +-typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE { +- VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock; +- USHORT OemSoftRev; +- PTR_32_BIT_UNION OemVendorNamePtr; +- PTR_32_BIT_UNION OemProductNamePtr; +- PTR_32_BIT_UNION OemProductRevPtr; ++ ++typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE ++{ ++ VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock; ++ USHORT OemSoftRev; ++ PTR_32_BIT_UNION OemVendorNamePtr; ++ PTR_32_BIT_UNION OemProductNamePtr; ++ PTR_32_BIT_UNION OemProductRevPtr; + } VBE_2_0_INFO_BLOCK_UPDATABLE; + +-typedef union _VBE_VERSION_UNION { +- VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock; +- VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock; ++typedef union _VBE_VERSION_UNION ++{ ++ VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock; ++ VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock; + } VBE_VERSION_UNION; + +-typedef struct _VBE_INFO_BLOCK { +- VBE_VERSION_UNION UpdatableVBE_Info; +- UCHAR Reserved[222]; +- UCHAR OemData[256]; ++typedef struct _VBE_INFO_BLOCK ++{ ++ VBE_VERSION_UNION UpdatableVBE_Info; ++ UCHAR Reserved[222]; ++ UCHAR OemData[256]; + } VBE_INFO_BLOCK; + +-typedef struct _VBE_FP_INFO { +- USHORT HSize; +- USHORT VSize; +- USHORT FPType; +- UCHAR RedBPP; +- UCHAR GreenBPP; +- UCHAR BlueBPP; +- UCHAR ReservedBPP; +- ULONG RsvdOffScrnMemSize; +- ULONG RsvdOffScrnMEmPtr; +- UCHAR Reserved[14]; ++typedef struct _VBE_FP_INFO ++{ ++ USHORT HSize; ++ USHORT VSize; ++ USHORT FPType; ++ UCHAR RedBPP; ++ UCHAR GreenBPP; ++ UCHAR BlueBPP; ++ UCHAR ReservedBPP; ++ ULONG RsvdOffScrnMemSize; ++ ULONG RsvdOffScrnMEmPtr; ++ UCHAR Reserved[14]; + } VBE_FP_INFO; + +-typedef struct _VESA_MODE_INFO_BLOCK { +-/* Mandatory information for all VBE revisions */ +- USHORT ModeAttributes; /* dw ? ; mode attributes */ +- UCHAR WinAAttributes; /* db ? ; window A attributes */ +- UCHAR WinBAttributes; /* db ? ; window B attributes */ +- USHORT WinGranularity; /* dw ? ; window granularity */ +- USHORT WinSize; /* dw ? ; window size */ +- USHORT WinASegment; /* dw ? ; window A start segment */ +- USHORT WinBSegment; /* dw ? ; window B start segment */ +- ULONG WinFuncPtr; /* dd ? ; real mode pointer to window function */ +- USHORT BytesPerScanLine; /* dw ? ; bytes per scan line */ +- +-/* ; Mandatory information for VBE 1.2 and above */ +- USHORT XResolution; /* dw ? ; horizontal resolution in pixels or characters */ +- USHORT YResolution; /* dw ? ; vertical resolution in pixels or characters */ +- UCHAR XCharSize; /* db ? ; character cell width in pixels */ +- UCHAR YCharSize; /* db ? ; character cell height in pixels */ +- UCHAR NumberOfPlanes; /* db ? ; number of memory planes */ +- UCHAR BitsPerPixel; /* db ? ; bits per pixel */ +- UCHAR NumberOfBanks; /* db ? ; number of banks */ +- UCHAR MemoryModel; /* db ? ; memory model type */ +- UCHAR BankSize; /* db ? ; bank size in KB */ +- UCHAR NumberOfImagePages; /* db ? ; number of images */ +- UCHAR ReservedForPageFunction; /* db 1 ; reserved for page function */ +- +-/* ; Direct Color fields(required for direct/6 and YUV/7 memory models) */ +- UCHAR RedMaskSize; /* db ? ; size of direct color red mask in bits */ +- UCHAR RedFieldPosition; /* db ? ; bit position of lsb of red mask */ +- UCHAR GreenMaskSize; /* db ? ; size of direct color green mask in bits */ +- UCHAR GreenFieldPosition; /* db ? ; bit position of lsb of green mask */ +- UCHAR BlueMaskSize; /* db ? ; size of direct color blue mask in bits */ +- UCHAR BlueFieldPosition; /* db ? ; bit position of lsb of blue mask */ +- UCHAR RsvdMaskSize; /* db ? ; size of direct color reserved mask in bits */ +- UCHAR RsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask */ +- UCHAR DirectColorModeInfo; /* db ? ; direct color mode attributes */ +- +-/* ; Mandatory information for VBE 2.0 and above */ +- ULONG PhysBasePtr; /* dd ? ; physical address for flat memory frame buffer */ +- ULONG Reserved_1; /* dd 0 ; reserved - always set to 0 */ +- USHORT Reserved_2; /* dw 0 ; reserved - always set to 0 */ +- +-/* ; Mandatory information for VBE 3.0 and above */ +- USHORT LinBytesPerScanLine; /* dw ? ; bytes per scan line for linear modes */ +- UCHAR BnkNumberOfImagePages; /* db ? ; number of images for banked modes */ +- UCHAR LinNumberOfImagPages; /* db ? ; number of images for linear modes */ +- UCHAR LinRedMaskSize; /* db ? ; size of direct color red mask(linear modes) */ +- UCHAR LinRedFieldPosition; /* db ? ; bit position of lsb of red mask(linear modes) */ +- UCHAR LinGreenMaskSize; /* db ? ; size of direct color green mask(linear modes) */ +- UCHAR LinGreenFieldPosition; /* db ? ; bit position of lsb of green mask(linear modes) */ +- UCHAR LinBlueMaskSize; /* db ? ; size of direct color blue mask(linear modes) */ +- UCHAR LinBlueFieldPosition; /* db ? ; bit position of lsb of blue mask(linear modes) */ +- UCHAR LinRsvdMaskSize; /* db ? ; size of direct color reserved mask(linear modes) */ +- UCHAR LinRsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask(linear modes) */ +- ULONG MaxPixelClock; /* dd ? ; maximum pixel clock(in Hz) for graphics mode */ +- UCHAR Reserved; /* db 190 dup (0) */ ++typedef struct _VESA_MODE_INFO_BLOCK ++{ ++// Mandatory information for all VBE revisions ++ USHORT ModeAttributes; // dw ? ; mode attributes ++ UCHAR WinAAttributes; // db ? ; window A attributes ++ UCHAR WinBAttributes; // db ? ; window B attributes ++ USHORT WinGranularity; // dw ? ; window granularity ++ USHORT WinSize; // dw ? ; window size ++ USHORT WinASegment; // dw ? ; window A start segment ++ USHORT WinBSegment; // dw ? ; window B start segment ++ ULONG WinFuncPtr; // dd ? ; real mode pointer to window function ++ USHORT BytesPerScanLine;// dw ? ; bytes per scan line ++ ++//; Mandatory information for VBE 1.2 and above ++ USHORT XResolution; // dw ? ; horizontal resolution in pixels or characters ++ USHORT YResolution; // dw ? ; vertical resolution in pixels or characters ++ UCHAR XCharSize; // db ? ; character cell width in pixels ++ UCHAR YCharSize; // db ? ; character cell height in pixels ++ UCHAR NumberOfPlanes; // db ? ; number of memory planes ++ UCHAR BitsPerPixel; // db ? ; bits per pixel ++ UCHAR NumberOfBanks; // db ? ; number of banks ++ UCHAR MemoryModel; // db ? ; memory model type ++ UCHAR BankSize; // db ? ; bank size in KB ++ UCHAR NumberOfImagePages;// db ? ; number of images ++ UCHAR ReservedForPageFunction;//db 1 ; reserved for page function ++ ++//; Direct Color fields(required for direct/6 and YUV/7 memory models) ++ UCHAR RedMaskSize; // db ? ; size of direct color red mask in bits ++ UCHAR RedFieldPosition; // db ? ; bit position of lsb of red mask ++ UCHAR GreenMaskSize; // db ? ; size of direct color green mask in bits ++ UCHAR GreenFieldPosition; // db ? ; bit position of lsb of green mask ++ UCHAR BlueMaskSize; // db ? ; size of direct color blue mask in bits ++ UCHAR BlueFieldPosition; // db ? ; bit position of lsb of blue mask ++ UCHAR RsvdMaskSize; // db ? ; size of direct color reserved mask in bits ++ UCHAR RsvdFieldPosition; // db ? ; bit position of lsb of reserved mask ++ UCHAR DirectColorModeInfo;// db ? ; direct color mode attributes ++ ++//; Mandatory information for VBE 2.0 and above ++ ULONG PhysBasePtr; // dd ? ; physical address for flat memory frame buffer ++ ULONG Reserved_1; // dd 0 ; reserved - always set to 0 ++ USHORT Reserved_2; // dw 0 ; reserved - always set to 0 ++ ++//; Mandatory information for VBE 3.0 and above ++ USHORT LinBytesPerScanLine; // dw ? ; bytes per scan line for linear modes ++ UCHAR BnkNumberOfImagePages;// db ? ; number of images for banked modes ++ UCHAR LinNumberOfImagPages; // db ? ; number of images for linear modes ++ UCHAR LinRedMaskSize; // db ? ; size of direct color red mask(linear modes) ++ UCHAR LinRedFieldPosition; // db ? ; bit position of lsb of red mask(linear modes) ++ UCHAR LinGreenMaskSize; // db ? ; size of direct color green mask(linear modes) ++ UCHAR LinGreenFieldPosition;// db ? ; bit position of lsb of green mask(linear modes) ++ UCHAR LinBlueMaskSize; // db ? ; size of direct color blue mask(linear modes) ++ UCHAR LinBlueFieldPosition; // db ? ; bit position of lsb of blue mask(linear modes) ++ UCHAR LinRsvdMaskSize; // db ? ; size of direct color reserved mask(linear modes) ++ UCHAR LinRsvdFieldPosition; // db ? ; bit position of lsb of reserved mask(linear modes) ++ ULONG MaxPixelClock; // dd ? ; maximum pixel clock(in Hz) for graphics mode ++ UCHAR Reserved; // db 190 dup (0) + } VESA_MODE_INFO_BLOCK; + +-/* BIOS function CALLS */ +-#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 /* ATI Extended Function code */ ++// BIOS function CALLS ++#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 // ATI Extended Function code + #define ATOM_BIOS_FUNCTION_COP_MODE 0x00 + #define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04 + #define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05 + #define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06 +-#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B ++#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B + #define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E + #define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F + #define ATOM_BIOS_FUNCTION_STV_STD 0x16 +@@ -4217,100 +5137,135 @@ typedef struct _VESA_MODE_INFO_BLOCK { + #define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82 + #define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83 + #define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84 +-#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A ++#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A + #define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B +-#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 /* Sub function 80 */ +-#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 /* Sub function 80 */ ++#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 // Sub function 80 ++#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 // Sub function 80 + + #define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D + #define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E +-#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F +-#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 /* Sub function 03 */ +-#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 /* Sub function 7 */ +-#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 /* Notify caller the current thermal state */ +-#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 /* Notify caller the current critical state */ +-#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 /* Sub function 85 */ +-#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900 /* Sub function 89 */ +-#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 /* Notify caller that ADC is supported */ +- +-#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 /* Set DPMS */ +-#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 /* BL: Sub function 01 */ +-#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 /* BL: Sub function 02 */ +-#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 /* BH Parameter for DPMS ON. */ +-#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 /* BH Parameter for DPMS STANDBY */ +-#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 /* BH Parameter for DPMS SUSPEND */ +-#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 /* BH Parameter for DPMS OFF */ +-#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 /* BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) */ ++#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F ++#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 // Sub function 03 ++#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 // Sub function 7 ++#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 // Notify caller the current thermal state ++#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 // Notify caller the current critical state ++#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 // Sub function 85 ++#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89 ++#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 // Notify caller that ADC is supported ++ ++ ++#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 // Set DPMS ++#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 // BL: Sub function 01 ++#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 // BL: Sub function 02 ++#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 // BH Parameter for DPMS ON. ++#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 // BH Parameter for DPMS STANDBY ++#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 // BH Parameter for DPMS SUSPEND ++#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 // BH Parameter for DPMS OFF ++#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) + + #define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L + #define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L + #define ATOM_BIOS_REG_LOW_MASK 0x000000FFL + +-/* structure used for VBIOS only */ ++// structure used for VBIOS only + +-/* DispOutInfoTable */ +-typedef struct _ASIC_TRANSMITTER_INFO { ++//DispOutInfoTable ++typedef struct _ASIC_TRANSMITTER_INFO ++{ + USHORT usTransmitterObjId; + USHORT usSupportDevice; +- UCHAR ucTransmitterCmdTblId; +- UCHAR ucConfig; +- UCHAR ucEncoderID; /* available 1st encoder ( default ) */ +- UCHAR ucOptionEncoderID; /* available 2nd encoder ( optional ) */ +- UCHAR uc2ndEncoderID; +- UCHAR ucReserved; +-} ASIC_TRANSMITTER_INFO; +- +-typedef struct _ASIC_ENCODER_INFO { ++ UCHAR ucTransmitterCmdTblId; ++ UCHAR ucConfig; ++ UCHAR ucEncoderID; //available 1st encoder ( default ) ++ UCHAR ucOptionEncoderID; //available 2nd encoder ( optional ) ++ UCHAR uc2ndEncoderID; ++ UCHAR ucReserved; ++}ASIC_TRANSMITTER_INFO; ++ ++typedef struct _ASIC_ENCODER_INFO ++{ + UCHAR ucEncoderID; + UCHAR ucEncoderConfig; +- USHORT usEncoderCmdTblId; +-} ASIC_ENCODER_INFO; ++ USHORT usEncoderCmdTblId; ++}ASIC_ENCODER_INFO; ++ ++typedef struct _ATOM_DISP_OUT_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT ptrTransmitterInfo; ++ USHORT ptrEncoderInfo; ++ ASIC_TRANSMITTER_INFO asTransmitterInfo[1]; ++ ASIC_ENCODER_INFO asEncoderInfo[1]; ++}ATOM_DISP_OUT_INFO; + +-typedef struct _ATOM_DISP_OUT_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; ++typedef struct _ATOM_DISP_OUT_INFO_V2 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; + USHORT ptrTransmitterInfo; + USHORT ptrEncoderInfo; +- ASIC_TRANSMITTER_INFO asTransmitterInfo[1]; +- ASIC_ENCODER_INFO asEncoderInfo[1]; +-} ATOM_DISP_OUT_INFO; ++ USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary. ++ ASIC_TRANSMITTER_INFO asTransmitterInfo[1]; ++ ASIC_ENCODER_INFO asEncoderInfo[1]; ++}ATOM_DISP_OUT_INFO_V2; + +-/* DispDevicePriorityInfo */ +-typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; ++// DispDevicePriorityInfo ++typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; + USHORT asDevicePriority[16]; +-} ATOM_DISPLAY_DEVICE_PRIORITY_INFO; +- +-/* ProcessAuxChannelTransactionTable */ +-typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS { +- USHORT lpAuxRequest; +- USHORT lpDataOut; +- UCHAR ucChannelID; +- union { +- UCHAR ucReplyStatus; +- UCHAR ucDelay; ++}ATOM_DISPLAY_DEVICE_PRIORITY_INFO; ++ ++//ProcessAuxChannelTransactionTable ++typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS ++{ ++ USHORT lpAuxRequest; ++ USHORT lpDataOut; ++ UCHAR ucChannelID; ++ union ++ { ++ UCHAR ucReplyStatus; ++ UCHAR ucDelay; ++ }; ++ UCHAR ucDataOutLen; ++ UCHAR ucReserved; ++}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS; ++ ++//ProcessAuxChannelTransactionTable ++typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 ++{ ++ USHORT lpAuxRequest; ++ USHORT lpDataOut; ++ UCHAR ucChannelID; ++ union ++ { ++ UCHAR ucReplyStatus; ++ UCHAR ucDelay; + }; +- UCHAR ucDataOutLen; +- UCHAR ucReserved; +-} PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS; ++ UCHAR ucDataOutLen; ++ UCHAR ucHPD_ID; //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6 ++}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2; + + #define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS + +-/* GetSinkType */ ++//GetSinkType + +-typedef struct _DP_ENCODER_SERVICE_PARAMETERS { ++typedef struct _DP_ENCODER_SERVICE_PARAMETERS ++{ + USHORT ucLinkClock; +- union { +- UCHAR ucConfig; /* for DP training command */ +- UCHAR ucI2cId; /* use for GET_SINK_TYPE command */ ++ union ++ { ++ UCHAR ucConfig; // for DP training command ++ UCHAR ucI2cId; // use for GET_SINK_TYPE command + }; + UCHAR ucAction; + UCHAR ucStatus; + UCHAR ucLaneNum; + UCHAR ucReserved[2]; +-} DP_ENCODER_SERVICE_PARAMETERS; ++}DP_ENCODER_SERVICE_PARAMETERS; + +-/* ucAction */ ++// ucAction + #define ATOM_DP_ACTION_GET_SINK_TYPE 0x01 ++/* obselete */ + #define ATOM_DP_ACTION_TRAINING_START 0x02 + #define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03 + #define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04 +@@ -4318,7 +5273,7 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS { + #define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06 + #define ATOM_DP_ACTION_BLANKING 0x07 + +-/* ucConfig */ ++// ucConfig + #define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03 + #define ATOM_DP_CONFIG_DIG1_ENCODER 0x00 + #define ATOM_DP_CONFIG_DIG2_ENCODER 0x01 +@@ -4326,14 +5281,14 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS { + #define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04 + #define ATOM_DP_CONFIG_LINK_A 0x00 + #define ATOM_DP_CONFIG_LINK_B 0x04 +- ++/* /obselete */ + #define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS + +-/* DP_TRAINING_TABLE */ +-#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR ++// DP_TRAINING_TABLE ++#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR + #define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) +-#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16) +-#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24) ++#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16 ) ++#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24 ) + #define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32) + #define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40) + #define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48) +@@ -4341,183 +5296,241 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS { + #define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64) + #define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72) + #define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76) +-#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80) ++#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80) ++#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 84) + +-typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS { +- UCHAR ucI2CSpeed; +- union { +- UCHAR ucRegIndex; +- UCHAR ucStatus; ++typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS ++{ ++ UCHAR ucI2CSpeed; ++ union ++ { ++ UCHAR ucRegIndex; ++ UCHAR ucStatus; + }; +- USHORT lpI2CDataOut; +- UCHAR ucFlag; +- UCHAR ucTransBytes; +- UCHAR ucSlaveAddr; +- UCHAR ucLineNumber; +-} PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS; ++ USHORT lpI2CDataOut; ++ UCHAR ucFlag; ++ UCHAR ucTransBytes; ++ UCHAR ucSlaveAddr; ++ UCHAR ucLineNumber; ++}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS; + + #define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS + +-/* ucFlag */ ++//ucFlag + #define HW_I2C_WRITE 1 + #define HW_I2C_READ 0 ++#define I2C_2BYTE_ADDR 0x02 + ++typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2 ++{ ++ UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ... ++ UCHAR ucReserved[3]; ++}SET_HWBLOCK_INSTANCE_PARAMETER_V2; ++ ++#define HWBLKINST_INSTANCE_MASK 0x07 ++#define HWBLKINST_HWBLK_MASK 0xF0 ++#define HWBLKINST_HWBLK_SHIFT 0x04 ++ ++//ucHWBlock ++#define SELECT_DISP_ENGINE 0 ++#define SELECT_DISP_PLL 1 ++#define SELECT_DCIO_UNIPHY_LINK0 2 ++#define SELECT_DCIO_UNIPHY_LINK1 3 ++#define SELECT_DCIO_IMPCAL 4 ++#define SELECT_DCIO_DIG 6 ++#define SELECT_CRTC_PIXEL_RATE 7 ++ ++/****************************************************************************/ ++//Portion VI: Definitinos for vbios MC scratch registers that driver used + /****************************************************************************/ +-/* Portion VI: Definitinos being oboselete */ ++ ++#define MC_MISC0__MEMORY_TYPE_MASK 0xF0000000 ++#define MC_MISC0__MEMORY_TYPE__GDDR1 0x10000000 ++#define MC_MISC0__MEMORY_TYPE__DDR2 0x20000000 ++#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000 ++#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000 ++#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000 ++#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000 ++ ++/****************************************************************************/ ++//Portion VI: Definitinos being oboselete + /****************************************************************************/ + +-/* ========================================================================================== */ +-/* Remove the definitions below when driver is ready! */ +-typedef struct _ATOM_DAC_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- USHORT usMaxFrequency; /* in 10kHz unit */ +- USHORT usReserved; +-} ATOM_DAC_INFO; +- +-typedef struct _COMPASSIONATE_DATA { +- ATOM_COMMON_TABLE_HEADER sHeader; +- +- /* ============================== DAC1 portion */ +- UCHAR ucDAC1_BG_Adjustment; +- UCHAR ucDAC1_DAC_Adjustment; +- USHORT usDAC1_FORCE_Data; +- /* ============================== DAC2 portion */ +- UCHAR ucDAC2_CRT2_BG_Adjustment; +- UCHAR ucDAC2_CRT2_DAC_Adjustment; +- USHORT usDAC2_CRT2_FORCE_Data; +- USHORT usDAC2_CRT2_MUX_RegisterIndex; +- UCHAR ucDAC2_CRT2_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ +- UCHAR ucDAC2_NTSC_BG_Adjustment; +- UCHAR ucDAC2_NTSC_DAC_Adjustment; +- USHORT usDAC2_TV1_FORCE_Data; +- USHORT usDAC2_TV1_MUX_RegisterIndex; +- UCHAR ucDAC2_TV1_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ +- UCHAR ucDAC2_CV_BG_Adjustment; +- UCHAR ucDAC2_CV_DAC_Adjustment; +- USHORT usDAC2_CV_FORCE_Data; +- USHORT usDAC2_CV_MUX_RegisterIndex; +- UCHAR ucDAC2_CV_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ +- UCHAR ucDAC2_PAL_BG_Adjustment; +- UCHAR ucDAC2_PAL_DAC_Adjustment; +- USHORT usDAC2_TV2_FORCE_Data; +-} COMPASSIONATE_DATA; ++//========================================================================================== ++//Remove the definitions below when driver is ready! ++typedef struct _ATOM_DAC_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usMaxFrequency; // in 10kHz unit ++ USHORT usReserved; ++}ATOM_DAC_INFO; ++ ++ ++typedef struct _COMPASSIONATE_DATA ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ ++ //============================== DAC1 portion ++ UCHAR ucDAC1_BG_Adjustment; ++ UCHAR ucDAC1_DAC_Adjustment; ++ USHORT usDAC1_FORCE_Data; ++ //============================== DAC2 portion ++ UCHAR ucDAC2_CRT2_BG_Adjustment; ++ UCHAR ucDAC2_CRT2_DAC_Adjustment; ++ USHORT usDAC2_CRT2_FORCE_Data; ++ USHORT usDAC2_CRT2_MUX_RegisterIndex; ++ UCHAR ucDAC2_CRT2_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low ++ UCHAR ucDAC2_NTSC_BG_Adjustment; ++ UCHAR ucDAC2_NTSC_DAC_Adjustment; ++ USHORT usDAC2_TV1_FORCE_Data; ++ USHORT usDAC2_TV1_MUX_RegisterIndex; ++ UCHAR ucDAC2_TV1_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low ++ UCHAR ucDAC2_CV_BG_Adjustment; ++ UCHAR ucDAC2_CV_DAC_Adjustment; ++ USHORT usDAC2_CV_FORCE_Data; ++ USHORT usDAC2_CV_MUX_RegisterIndex; ++ UCHAR ucDAC2_CV_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low ++ UCHAR ucDAC2_PAL_BG_Adjustment; ++ UCHAR ucDAC2_PAL_DAC_Adjustment; ++ USHORT usDAC2_TV2_FORCE_Data; ++}COMPASSIONATE_DATA; + + /****************************Supported Device Info Table Definitions**********************/ +-/* ucConnectInfo: */ +-/* [7:4] - connector type */ +-/* = 1 - VGA connector */ +-/* = 2 - DVI-I */ +-/* = 3 - DVI-D */ +-/* = 4 - DVI-A */ +-/* = 5 - SVIDEO */ +-/* = 6 - COMPOSITE */ +-/* = 7 - LVDS */ +-/* = 8 - DIGITAL LINK */ +-/* = 9 - SCART */ +-/* = 0xA - HDMI_type A */ +-/* = 0xB - HDMI_type B */ +-/* = 0xE - Special case1 (DVI+DIN) */ +-/* Others=TBD */ +-/* [3:0] - DAC Associated */ +-/* = 0 - no DAC */ +-/* = 1 - DACA */ +-/* = 2 - DACB */ +-/* = 3 - External DAC */ +-/* Others=TBD */ +-/* */ +- +-typedef struct _ATOM_CONNECTOR_INFO { ++// ucConnectInfo: ++// [7:4] - connector type ++// = 1 - VGA connector ++// = 2 - DVI-I ++// = 3 - DVI-D ++// = 4 - DVI-A ++// = 5 - SVIDEO ++// = 6 - COMPOSITE ++// = 7 - LVDS ++// = 8 - DIGITAL LINK ++// = 9 - SCART ++// = 0xA - HDMI_type A ++// = 0xB - HDMI_type B ++// = 0xE - Special case1 (DVI+DIN) ++// Others=TBD ++// [3:0] - DAC Associated ++// = 0 - no DAC ++// = 1 - DACA ++// = 2 - DACB ++// = 3 - External DAC ++// Others=TBD ++// ++ ++typedef struct _ATOM_CONNECTOR_INFO ++{ + #if ATOM_BIG_ENDIAN +- UCHAR bfConnectorType:4; +- UCHAR bfAssociatedDAC:4; ++ UCHAR bfConnectorType:4; ++ UCHAR bfAssociatedDAC:4; + #else +- UCHAR bfAssociatedDAC:4; +- UCHAR bfConnectorType:4; ++ UCHAR bfAssociatedDAC:4; ++ UCHAR bfConnectorType:4; + #endif +-} ATOM_CONNECTOR_INFO; ++}ATOM_CONNECTOR_INFO; ++ ++typedef union _ATOM_CONNECTOR_INFO_ACCESS ++{ ++ ATOM_CONNECTOR_INFO sbfAccess; ++ UCHAR ucAccess; ++}ATOM_CONNECTOR_INFO_ACCESS; + +-typedef union _ATOM_CONNECTOR_INFO_ACCESS { +- ATOM_CONNECTOR_INFO sbfAccess; +- UCHAR ucAccess; +-} ATOM_CONNECTOR_INFO_ACCESS; ++typedef struct _ATOM_CONNECTOR_INFO_I2C ++{ ++ ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo; ++ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; ++}ATOM_CONNECTOR_INFO_I2C; + +-typedef struct _ATOM_CONNECTOR_INFO_I2C { +- ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo; +- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; +-} ATOM_CONNECTOR_INFO_I2C; + +-typedef struct _ATOM_SUPPORTED_DEVICES_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- USHORT usDeviceSupport; +- ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO]; +-} ATOM_SUPPORTED_DEVICES_INFO; ++typedef struct _ATOM_SUPPORTED_DEVICES_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usDeviceSupport; ++ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO]; ++}ATOM_SUPPORTED_DEVICES_INFO; + + #define NO_INT_SRC_MAPPED 0xFF + +-typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP { +- UCHAR ucIntSrcBitmap; +-} ATOM_CONNECTOR_INC_SRC_BITMAP; +- +-typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 { +- ATOM_COMMON_TABLE_HEADER sHeader; +- USHORT usDeviceSupport; +- ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; +- ATOM_CONNECTOR_INC_SRC_BITMAP +- asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; +-} ATOM_SUPPORTED_DEVICES_INFO_2; +- +-typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 { +- ATOM_COMMON_TABLE_HEADER sHeader; +- USHORT usDeviceSupport; +- ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE]; +- ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE]; +-} ATOM_SUPPORTED_DEVICES_INFO_2d1; ++typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP ++{ ++ UCHAR ucIntSrcBitmap; ++}ATOM_CONNECTOR_INC_SRC_BITMAP; ++ ++typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usDeviceSupport; ++ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; ++ ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; ++}ATOM_SUPPORTED_DEVICES_INFO_2; ++ ++typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usDeviceSupport; ++ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE]; ++ ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE]; ++}ATOM_SUPPORTED_DEVICES_INFO_2d1; + + #define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1 + +-typedef struct _ATOM_MISC_CONTROL_INFO { +- USHORT usFrequency; +- UCHAR ucPLL_ChargePump; /* PLL charge-pump gain control */ +- UCHAR ucPLL_DutyCycle; /* PLL duty cycle control */ +- UCHAR ucPLL_VCO_Gain; /* PLL VCO gain control */ +- UCHAR ucPLL_VoltageSwing; /* PLL driver voltage swing control */ +-} ATOM_MISC_CONTROL_INFO; ++ ++ ++typedef struct _ATOM_MISC_CONTROL_INFO ++{ ++ USHORT usFrequency; ++ UCHAR ucPLL_ChargePump; // PLL charge-pump gain control ++ UCHAR ucPLL_DutyCycle; // PLL duty cycle control ++ UCHAR ucPLL_VCO_Gain; // PLL VCO gain control ++ UCHAR ucPLL_VoltageSwing; // PLL driver voltage swing control ++}ATOM_MISC_CONTROL_INFO; ++ + + #define ATOM_MAX_MISC_INFO 4 + +-typedef struct _ATOM_TMDS_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- USHORT usMaxFrequency; /* in 10Khz */ +- ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO]; +-} ATOM_TMDS_INFO; ++typedef struct _ATOM_TMDS_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usMaxFrequency; // in 10Khz ++ ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO]; ++}ATOM_TMDS_INFO; ++ ++ ++typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE ++{ ++ UCHAR ucTVStandard; //Same as TV standards defined above, ++ UCHAR ucPadding[1]; ++}ATOM_ENCODER_ANALOG_ATTRIBUTE; + +-typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE { +- UCHAR ucTVStandard; /* Same as TV standards defined above, */ +- UCHAR ucPadding[1]; +-} ATOM_ENCODER_ANALOG_ATTRIBUTE; ++typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE ++{ ++ UCHAR ucAttribute; //Same as other digital encoder attributes defined above ++ UCHAR ucPadding[1]; ++}ATOM_ENCODER_DIGITAL_ATTRIBUTE; + +-typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE { +- UCHAR ucAttribute; /* Same as other digital encoder attributes defined above */ +- UCHAR ucPadding[1]; +-} ATOM_ENCODER_DIGITAL_ATTRIBUTE; ++typedef union _ATOM_ENCODER_ATTRIBUTE ++{ ++ ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib; ++ ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib; ++}ATOM_ENCODER_ATTRIBUTE; + +-typedef union _ATOM_ENCODER_ATTRIBUTE { +- ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib; +- ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib; +-} ATOM_ENCODER_ATTRIBUTE; + +-typedef struct _DVO_ENCODER_CONTROL_PARAMETERS { +- USHORT usPixelClock; +- USHORT usEncoderID; +- UCHAR ucDeviceType; /* Use ATOM_DEVICE_xxx1_Index to indicate device type only. */ +- UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */ +- ATOM_ENCODER_ATTRIBUTE usDevAttr; +-} DVO_ENCODER_CONTROL_PARAMETERS; ++typedef struct _DVO_ENCODER_CONTROL_PARAMETERS ++{ ++ USHORT usPixelClock; ++ USHORT usEncoderID; ++ UCHAR ucDeviceType; //Use ATOM_DEVICE_xxx1_Index to indicate device type only. ++ UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT ++ ATOM_ENCODER_ATTRIBUTE usDevAttr; ++}DVO_ENCODER_CONTROL_PARAMETERS; ++ ++typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION ++{ ++ DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder; ++ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion ++}DVO_ENCODER_CONTROL_PS_ALLOCATION; + +-typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION { +- DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder; +- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */ +-} DVO_ENCODER_CONTROL_PS_ALLOCATION; + + #define ATOM_XTMDS_ASIC_SI164_ID 1 + #define ATOM_XTMDS_ASIC_SI178_ID 2 +@@ -4526,27 +5539,30 @@ typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION { + #define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002 + #define ATOM_XTMDS_MVPU_FPGA 0x00000004 + +-typedef struct _ATOM_XTMDS_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- USHORT usSingleLinkMaxFrequency; +- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* Point the ID on which I2C is used to control external chip */ +- UCHAR ucXtransimitterID; +- UCHAR ucSupportedLink; /* Bit field, bit0=1, single link supported;bit1=1,dual link supported */ +- UCHAR ucSequnceAlterID; /* Even with the same external TMDS asic, it's possible that the program seqence alters */ +- /* due to design. This ID is used to alert driver that the sequence is not "standard"! */ +- UCHAR ucMasterAddress; /* Address to control Master xTMDS Chip */ +- UCHAR ucSlaveAddress; /* Address to control Slave xTMDS Chip */ +-} ATOM_XTMDS_INFO; +- +-typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS { +- UCHAR ucEnable; /* ATOM_ENABLE=On or ATOM_DISABLE=Off */ +- UCHAR ucDevice; /* ATOM_DEVICE_DFP1_INDEX.... */ +- UCHAR ucPadding[2]; +-} DFP_DPMS_STATUS_CHANGE_PARAMETERS; ++ ++typedef struct _ATOM_XTMDS_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ USHORT usSingleLinkMaxFrequency; ++ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //Point the ID on which I2C is used to control external chip ++ UCHAR ucXtransimitterID; ++ UCHAR ucSupportedLink; // Bit field, bit0=1, single link supported;bit1=1,dual link supported ++ UCHAR ucSequnceAlterID; // Even with the same external TMDS asic, it's possible that the program seqence alters ++ // due to design. This ID is used to alert driver that the sequence is not "standard"! ++ UCHAR ucMasterAddress; // Address to control Master xTMDS Chip ++ UCHAR ucSlaveAddress; // Address to control Slave xTMDS Chip ++}ATOM_XTMDS_INFO; ++ ++typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS ++{ ++ UCHAR ucEnable; // ATOM_ENABLE=On or ATOM_DISABLE=Off ++ UCHAR ucDevice; // ATOM_DEVICE_DFP1_INDEX.... ++ UCHAR ucPadding[2]; ++}DFP_DPMS_STATUS_CHANGE_PARAMETERS; + + /****************************Legacy Power Play Table Definitions **********************/ + +-/* Definitions for ulPowerPlayMiscInfo */ ++//Definitions for ulPowerPlayMiscInfo + #define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L + #define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L + #define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L +@@ -4558,8 +5574,8 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS { + + #define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L + #define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L +-#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L /* When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program */ +- ++#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program ++ + #define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L + #define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L + #define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L +@@ -4569,22 +5585,22 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS { + #define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L + + #define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L +-#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L ++#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L + #define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L + #define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L + #define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L + +-#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L /* 0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved */ +-#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20 ++#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved ++#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20 + + #define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L + #define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L + #define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L +-#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L /* When set, Dynamic */ +-#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L /* When set, Dynamic */ +-#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L /* When set, This mode is for acceleated 3D mode */ ++#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L //When set, Dynamic ++#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L //When set, Dynamic ++#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L //When set, This mode is for acceleated 3D mode + +-#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L /* 1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) */ ++#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) + #define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28 + #define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L + +@@ -4594,55 +5610,59 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS { + #define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L + #define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L + #define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L +-#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L /* If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. */ +- /* If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback */ ++#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. ++ //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback + #define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L + #define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L +-#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L +- +-/* ucTableFormatRevision=1 */ +-/* ucTableContentRevision=1 */ +-typedef struct _ATOM_POWERMODE_INFO { +- ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ +- ULONG ulReserved1; /* must set to 0 */ +- ULONG ulReserved2; /* must set to 0 */ +- USHORT usEngineClock; +- USHORT usMemoryClock; +- UCHAR ucVoltageDropIndex; /* index to GPIO table */ +- UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ +- UCHAR ucMinTemperature; +- UCHAR ucMaxTemperature; +- UCHAR ucNumPciELanes; /* number of PCIE lanes */ +-} ATOM_POWERMODE_INFO; +- +-/* ucTableFormatRevision=2 */ +-/* ucTableContentRevision=1 */ +-typedef struct _ATOM_POWERMODE_INFO_V2 { +- ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ +- ULONG ulMiscInfo2; +- ULONG ulEngineClock; +- ULONG ulMemoryClock; +- UCHAR ucVoltageDropIndex; /* index to GPIO table */ +- UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ +- UCHAR ucMinTemperature; +- UCHAR ucMaxTemperature; +- UCHAR ucNumPciELanes; /* number of PCIE lanes */ +-} ATOM_POWERMODE_INFO_V2; +- +-/* ucTableFormatRevision=2 */ +-/* ucTableContentRevision=2 */ +-typedef struct _ATOM_POWERMODE_INFO_V3 { +- ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ +- ULONG ulMiscInfo2; +- ULONG ulEngineClock; +- ULONG ulMemoryClock; +- UCHAR ucVoltageDropIndex; /* index to Core (VDDC) votage table */ +- UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ +- UCHAR ucMinTemperature; +- UCHAR ucMaxTemperature; +- UCHAR ucNumPciELanes; /* number of PCIE lanes */ +- UCHAR ucVDDCI_VoltageDropIndex; /* index to VDDCI votage table */ +-} ATOM_POWERMODE_INFO_V3; ++#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L ++ ++//ucTableFormatRevision=1 ++//ucTableContentRevision=1 ++typedef struct _ATOM_POWERMODE_INFO ++{ ++ ULONG ulMiscInfo; //The power level should be arranged in ascending order ++ ULONG ulReserved1; // must set to 0 ++ ULONG ulReserved2; // must set to 0 ++ USHORT usEngineClock; ++ USHORT usMemoryClock; ++ UCHAR ucVoltageDropIndex; // index to GPIO table ++ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate ++ UCHAR ucMinTemperature; ++ UCHAR ucMaxTemperature; ++ UCHAR ucNumPciELanes; // number of PCIE lanes ++}ATOM_POWERMODE_INFO; ++ ++//ucTableFormatRevision=2 ++//ucTableContentRevision=1 ++typedef struct _ATOM_POWERMODE_INFO_V2 ++{ ++ ULONG ulMiscInfo; //The power level should be arranged in ascending order ++ ULONG ulMiscInfo2; ++ ULONG ulEngineClock; ++ ULONG ulMemoryClock; ++ UCHAR ucVoltageDropIndex; // index to GPIO table ++ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate ++ UCHAR ucMinTemperature; ++ UCHAR ucMaxTemperature; ++ UCHAR ucNumPciELanes; // number of PCIE lanes ++}ATOM_POWERMODE_INFO_V2; ++ ++//ucTableFormatRevision=2 ++//ucTableContentRevision=2 ++typedef struct _ATOM_POWERMODE_INFO_V3 ++{ ++ ULONG ulMiscInfo; //The power level should be arranged in ascending order ++ ULONG ulMiscInfo2; ++ ULONG ulEngineClock; ++ ULONG ulMemoryClock; ++ UCHAR ucVoltageDropIndex; // index to Core (VDDC) votage table ++ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate ++ UCHAR ucMinTemperature; ++ UCHAR ucMaxTemperature; ++ UCHAR ucNumPciELanes; // number of PCIE lanes ++ UCHAR ucVDDCI_VoltageDropIndex; // index to VDDCI votage table ++}ATOM_POWERMODE_INFO_V3; ++ + + #define ATOM_MAX_NUMBEROF_POWER_BLOCK 8 + +@@ -4655,40 +5675,44 @@ typedef struct _ATOM_POWERMODE_INFO_V3 { + #define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04 + #define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05 + #define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06 +-#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 /* Andigilog */ +- +-typedef struct _ATOM_POWERPLAY_INFO { +- ATOM_COMMON_TABLE_HEADER sHeader; +- UCHAR ucOverdriveThermalController; +- UCHAR ucOverdriveI2cLine; +- UCHAR ucOverdriveIntBitmap; +- UCHAR ucOverdriveControllerAddress; +- UCHAR ucSizeOfPowerModeEntry; +- UCHAR ucNumOfPowerModeEntries; +- ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; +-} ATOM_POWERPLAY_INFO; +- +-typedef struct _ATOM_POWERPLAY_INFO_V2 { +- ATOM_COMMON_TABLE_HEADER sHeader; +- UCHAR ucOverdriveThermalController; +- UCHAR ucOverdriveI2cLine; +- UCHAR ucOverdriveIntBitmap; +- UCHAR ucOverdriveControllerAddress; +- UCHAR ucSizeOfPowerModeEntry; +- UCHAR ucNumOfPowerModeEntries; +- ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; +-} ATOM_POWERPLAY_INFO_V2; +- +-typedef struct _ATOM_POWERPLAY_INFO_V3 { +- ATOM_COMMON_TABLE_HEADER sHeader; +- UCHAR ucOverdriveThermalController; +- UCHAR ucOverdriveI2cLine; +- UCHAR ucOverdriveIntBitmap; +- UCHAR ucOverdriveControllerAddress; +- UCHAR ucSizeOfPowerModeEntry; +- UCHAR ucNumOfPowerModeEntries; +- ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; +-} ATOM_POWERPLAY_INFO_V3; ++#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 // Andigilog ++ ++ ++typedef struct _ATOM_POWERPLAY_INFO ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucOverdriveThermalController; ++ UCHAR ucOverdriveI2cLine; ++ UCHAR ucOverdriveIntBitmap; ++ UCHAR ucOverdriveControllerAddress; ++ UCHAR ucSizeOfPowerModeEntry; ++ UCHAR ucNumOfPowerModeEntries; ++ ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; ++}ATOM_POWERPLAY_INFO; ++ ++typedef struct _ATOM_POWERPLAY_INFO_V2 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucOverdriveThermalController; ++ UCHAR ucOverdriveI2cLine; ++ UCHAR ucOverdriveIntBitmap; ++ UCHAR ucOverdriveControllerAddress; ++ UCHAR ucSizeOfPowerModeEntry; ++ UCHAR ucNumOfPowerModeEntries; ++ ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; ++}ATOM_POWERPLAY_INFO_V2; ++ ++typedef struct _ATOM_POWERPLAY_INFO_V3 ++{ ++ ATOM_COMMON_TABLE_HEADER sHeader; ++ UCHAR ucOverdriveThermalController; ++ UCHAR ucOverdriveI2cLine; ++ UCHAR ucOverdriveIntBitmap; ++ UCHAR ucOverdriveControllerAddress; ++ UCHAR ucSizeOfPowerModeEntry; ++ UCHAR ucNumOfPowerModeEntries; ++ ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; ++}ATOM_POWERPLAY_INFO_V3; + + /* New PPlib */ + /**************************************************************************/ +@@ -4873,40 +5897,42 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO + UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16} + UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement. + USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200). +- ULONG ulFlags; ++ ULONG ulFlags; + } ATOM_PPLIB_RS780_CLOCK_INFO; + +-#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0 +-#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1 +-#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2 +-#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3 ++#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0 ++#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1 ++#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2 ++#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3 + + #define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is. + #define ATOM_PPLIB_RS780_SPMCLK_LOW 1 + #define ATOM_PPLIB_RS780_SPMCLK_HIGH 2 + +-#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0 +-#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 +-#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 ++#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0 ++#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 ++#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 + + /**************************************************************************/ + +-/* Following definitions are for compatiblity issue in different SW components. */ ++ ++// Following definitions are for compatiblity issue in different SW components. + #define ATOM_MASTER_DATA_TABLE_REVISION 0x01 +-#define Object_Info Object_Header ++#define Object_Info Object_Header + #define AdjustARB_SEQ MC_InitParameter + #define VRAM_GPIO_DetectionInfo VoltageObjectInfo +-#define ASIC_VDDCI_Info ASIC_ProfilingInfo ++#define ASIC_VDDCI_Info ASIC_ProfilingInfo + #define ASIC_MVDDQ_Info MemoryTrainingInfo +-#define SS_Info PPLL_SS_Info ++#define SS_Info PPLL_SS_Info + #define ASIC_MVDDC_Info ASIC_InternalSS_Info + #define DispDevicePriorityInfo SaveRestoreInfo + #define DispOutInfo TV_VideoMode + ++ + #define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE + #define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE + +-/* New device naming, remove them when both DAL/VBIOS is ready */ ++//New device naming, remove them when both DAL/VBIOS is ready + #define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS + #define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS + +@@ -4921,7 +5947,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO + + #define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX + #define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX +- ++ + #define ATOM_DEVICE_DFP2I_INDEX 0x00000009 + #define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX) + +@@ -4939,7 +5965,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO + + #define ATOM_S3_DFP2I_ACTIVEb1 0x02 + +-#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE ++#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE + #define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE + + #define ATOM_S3_DFP2I_ACTIVE 0x00000200L +@@ -4958,14 +5984,14 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO + #define ATOM_S6_ACC_REQ_DFP2Ib3 0x02 + #define ATOM_S6_ACC_REQ_DFP2I 0x02000000L + +-#define TMDS1XEncoderControl DVOEncoderControl ++#define TMDS1XEncoderControl DVOEncoderControl + #define DFP1XOutputControl DVOOutputControl + + #define ExternalDFPOutputControl DFP1XOutputControl + #define EnableExternalTMDS_Encoder TMDS1XEncoderControl + + #define DFP1IOutputControl TMDSAOutputControl +-#define DFP2IOutputControl LVTMAOutputControl ++#define DFP2IOutputControl LVTMAOutputControl + + #define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS + #define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION +@@ -4974,7 +6000,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO + #define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION + + #define ucDac1Standard ucDacStandard +-#define ucDac2Standard ucDacStandard ++#define ucDac2Standard ucDacStandard + + #define TMDS1EncoderControl TMDSAEncoderControl + #define TMDS2EncoderControl LVTMAEncoderControl +@@ -4984,12 +6010,56 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO + #define CRT1OutputControl DAC1OutputControl + #define CRT2OutputControl DAC2OutputControl + +-/* These two lines will be removed for sure in a few days, will follow up with Michael V. */ ++//These two lines will be removed for sure in a few days, will follow up with Michael V. + #define EnableLVDS_SS EnableSpreadSpectrumOnPPLL +-#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL ++#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL ++ ++//#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L ++//#define ATOM_S2_LCD1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE ++//#define ATOM_S2_TV1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE ++//#define ATOM_S2_DFP1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE ++//#define ATOM_S2_CRT2_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE ++ ++#define ATOM_S6_ACC_REQ_TV2 0x00400000L ++#define ATOM_DEVICE_TV2_INDEX 0x00000006 ++#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX) ++#define ATOM_S0_TV2 0x00100000L ++#define ATOM_S3_TV2_ACTIVE ATOM_S3_DFP6_ACTIVE ++#define ATOM_S3_TV2_CRTC_ACTIVE ATOM_S3_DFP6_CRTC_ACTIVE ++ ++// ++#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L ++#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L ++#define ATOM_S2_TV1_DPMS_STATE 0x00040000L ++#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L ++#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L ++#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L ++#define ATOM_S2_TV2_DPMS_STATE 0x00400000L ++#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L ++#define ATOM_S2_CV_DPMS_STATE 0x01000000L ++#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L ++#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L ++#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L ++ ++#define ATOM_S2_CRT1_DPMS_STATEb2 0x01 ++#define ATOM_S2_LCD1_DPMS_STATEb2 0x02 ++#define ATOM_S2_TV1_DPMS_STATEb2 0x04 ++#define ATOM_S2_DFP1_DPMS_STATEb2 0x08 ++#define ATOM_S2_CRT2_DPMS_STATEb2 0x10 ++#define ATOM_S2_LCD2_DPMS_STATEb2 0x20 ++#define ATOM_S2_TV2_DPMS_STATEb2 0x40 ++#define ATOM_S2_DFP2_DPMS_STATEb2 0x80 ++#define ATOM_S2_CV_DPMS_STATEb3 0x01 ++#define ATOM_S2_DFP3_DPMS_STATEb3 0x02 ++#define ATOM_S2_DFP4_DPMS_STATEb3 0x04 ++#define ATOM_S2_DFP5_DPMS_STATEb3 0x08 ++ ++#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20 ++#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40 ++#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80 + + /*********************************************************************************/ + +-#pragma pack() /* BIOS data must use byte aligment */ ++#pragma pack() // BIOS data must use byte aligment + + #endif /* _ATOMBIOS_H */ +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c +index af464e3..c076eac 100644 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c +@@ -245,21 +245,25 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) + + switch (mode) { + case DRM_MODE_DPMS_ON: +- atombios_enable_crtc(crtc, 1); ++ atombios_enable_crtc(crtc, ATOM_ENABLE); + if (ASIC_IS_DCE3(rdev)) +- atombios_enable_crtc_memreq(crtc, 1); +- atombios_blank_crtc(crtc, 0); +- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); ++ atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); ++ atombios_blank_crtc(crtc, ATOM_DISABLE); ++ /* XXX re-enable when interrupt support is added */ ++ if (!ASIC_IS_DCE4(rdev)) ++ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); + radeon_crtc_load_lut(crtc); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: +- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); +- atombios_blank_crtc(crtc, 1); ++ /* XXX re-enable when interrupt support is added */ ++ if (!ASIC_IS_DCE4(rdev)) ++ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); ++ atombios_blank_crtc(crtc, ATOM_ENABLE); + if (ASIC_IS_DCE3(rdev)) +- atombios_enable_crtc_memreq(crtc, 0); +- atombios_enable_crtc(crtc, 0); ++ atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); ++ atombios_enable_crtc(crtc, ATOM_DISABLE); + break; + } + } +@@ -363,6 +367,10 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable) + uint16_t percentage = 0; + uint8_t type = 0, step = 0, delay = 0, range = 0; + ++ /* XXX add ss support for DCE4 */ ++ if (ASIC_IS_DCE4(rdev)) ++ return; ++ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + radeon_encoder = to_radeon_encoder(encoder); +@@ -409,6 +417,7 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable) + + union adjust_pixel_clock { + ADJUST_DISPLAY_PLL_PS_ALLOCATION v1; ++ ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3; + }; + + static u32 atombios_adjust_pll(struct drm_crtc *crtc, +@@ -420,6 +429,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, + struct drm_encoder *encoder = NULL; + struct radeon_encoder *radeon_encoder = NULL; + u32 adjusted_clock = mode->clock; ++ int encoder_mode = 0; + + /* reset the pll flags */ + pll->flags = 0; +@@ -448,6 +458,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + radeon_encoder = to_radeon_encoder(encoder); ++ encoder_mode = atombios_get_encoder_mode(encoder); + if (ASIC_IS_AVIVO(rdev)) { + /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) +@@ -468,14 +479,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, + */ + if (ASIC_IS_DCE3(rdev)) { + union adjust_pixel_clock args; +- struct radeon_encoder_atom_dig *dig; + u8 frev, crev; + int index; + +- if (!radeon_encoder->enc_priv) +- return adjusted_clock; +- dig = radeon_encoder->enc_priv; +- + index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); + atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, + &crev); +@@ -489,12 +495,56 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, + case 2: + args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); + args.v1.ucTransmitterID = radeon_encoder->encoder_id; +- args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder); ++ args.v1.ucEncodeMode = encoder_mode; + + atom_execute_table(rdev->mode_info.atom_context, + index, (uint32_t *)&args); + adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; + break; ++ case 3: ++ args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10); ++ args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; ++ args.v3.sInput.ucEncodeMode = encoder_mode; ++ args.v3.sInput.ucDispPllConfig = 0; ++ if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { ++ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; ++ ++ if (encoder_mode == ATOM_ENCODER_MODE_DP) ++ args.v3.sInput.ucDispPllConfig |= ++ DISPPLL_CONFIG_COHERENT_MODE; ++ else { ++ if (dig->coherent_mode) ++ args.v3.sInput.ucDispPllConfig |= ++ DISPPLL_CONFIG_COHERENT_MODE; ++ if (mode->clock > 165000) ++ args.v3.sInput.ucDispPllConfig |= ++ DISPPLL_CONFIG_DUAL_LINK; ++ } ++ } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { ++ /* may want to enable SS on DP/eDP eventually */ ++ /*args.v3.sInput.ucDispPllConfig |= ++ DISPPLL_CONFIG_SS_ENABLE;*/ ++ if (encoder_mode == ATOM_ENCODER_MODE_DP) ++ args.v3.sInput.ucDispPllConfig |= ++ DISPPLL_CONFIG_COHERENT_MODE; ++ else { ++ if (mode->clock > 165000) ++ args.v3.sInput.ucDispPllConfig |= ++ DISPPLL_CONFIG_DUAL_LINK; ++ } ++ } ++ atom_execute_table(rdev->mode_info.atom_context, ++ index, (uint32_t *)&args); ++ adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; ++ if (args.v3.sOutput.ucRefDiv) { ++ pll->flags |= RADEON_PLL_USE_REF_DIV; ++ pll->reference_div = args.v3.sOutput.ucRefDiv; ++ } ++ if (args.v3.sOutput.ucPostDiv) { ++ pll->flags |= RADEON_PLL_USE_POST_DIV; ++ pll->post_div = args.v3.sOutput.ucPostDiv; ++ } ++ break; + default: + DRM_ERROR("Unknown table version %d %d\n", frev, crev); + return adjusted_clock; +@@ -513,9 +563,47 @@ union set_pixel_clock { + PIXEL_CLOCK_PARAMETERS v1; + PIXEL_CLOCK_PARAMETERS_V2 v2; + PIXEL_CLOCK_PARAMETERS_V3 v3; ++ PIXEL_CLOCK_PARAMETERS_V5 v5; + }; + +-void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) ++static void atombios_crtc_set_dcpll(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct radeon_device *rdev = dev->dev_private; ++ u8 frev, crev; ++ int index; ++ union set_pixel_clock args; ++ ++ memset(&args, 0, sizeof(args)); ++ ++ index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); ++ atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, ++ &crev); ++ ++ switch (frev) { ++ case 1: ++ switch (crev) { ++ case 5: ++ /* if the default dcpll clock is specified, ++ * SetPixelClock provides the dividers ++ */ ++ args.v5.ucCRTC = ATOM_CRTC_INVALID; ++ args.v5.usPixelClock = rdev->clock.default_dispclk; ++ args.v5.ucPpll = ATOM_DCPLL; ++ break; ++ default: ++ DRM_ERROR("Unknown table version %d %d\n", frev, crev); ++ return; ++ } ++ break; ++ default: ++ DRM_ERROR("Unknown table version %d %d\n", frev, crev); ++ return; ++ } ++ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); ++} ++ ++static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) + { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; +@@ -529,12 +617,14 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) + u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; + struct radeon_pll *pll; + u32 adjusted_clock; ++ int encoder_mode = 0; + + memset(&args, 0, sizeof(args)); + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + radeon_encoder = to_radeon_encoder(encoder); ++ encoder_mode = atombios_get_encoder_mode(encoder); + break; + } + } +@@ -542,10 +632,18 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) + if (!radeon_encoder) + return; + +- if (radeon_crtc->crtc_id == 0) ++ switch (radeon_crtc->pll_id) { ++ case ATOM_PPLL1: + pll = &rdev->clock.p1pll; +- else ++ break; ++ case ATOM_PPLL2: + pll = &rdev->clock.p2pll; ++ break; ++ case ATOM_DCPLL: ++ case ATOM_PPLL_INVALID: ++ pll = &rdev->clock.dcpll; ++ break; ++ } + + /* adjust pixel clock as needed */ + adjusted_clock = atombios_adjust_pll(crtc, mode, pll); +@@ -576,8 +674,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) + args.v1.usFbDiv = cpu_to_le16(fb_div); + args.v1.ucFracFbDiv = frac_fb_div; + args.v1.ucPostDiv = post_div; +- args.v1.ucPpll = +- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; ++ args.v1.ucPpll = radeon_crtc->pll_id; + args.v1.ucCRTC = radeon_crtc->crtc_id; + args.v1.ucRefDivSrc = 1; + break; +@@ -587,8 +684,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) + args.v2.usFbDiv = cpu_to_le16(fb_div); + args.v2.ucFracFbDiv = frac_fb_div; + args.v2.ucPostDiv = post_div; +- args.v2.ucPpll = +- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; ++ args.v2.ucPpll = radeon_crtc->pll_id; + args.v2.ucCRTC = radeon_crtc->crtc_id; + args.v2.ucRefDivSrc = 1; + break; +@@ -598,12 +694,22 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) + args.v3.usFbDiv = cpu_to_le16(fb_div); + args.v3.ucFracFbDiv = frac_fb_div; + args.v3.ucPostDiv = post_div; +- args.v3.ucPpll = +- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; +- args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2); ++ args.v3.ucPpll = radeon_crtc->pll_id; ++ args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2); + args.v3.ucTransmitterId = radeon_encoder->encoder_id; +- args.v3.ucEncoderMode = +- atombios_get_encoder_mode(encoder); ++ args.v3.ucEncoderMode = encoder_mode; ++ break; ++ case 5: ++ args.v5.ucCRTC = radeon_crtc->crtc_id; ++ args.v5.usPixelClock = cpu_to_le16(mode->clock / 10); ++ args.v5.ucRefDiv = ref_div; ++ args.v5.usFbDiv = cpu_to_le16(fb_div); ++ args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); ++ args.v5.ucPostDiv = post_div; ++ args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ ++ args.v5.ucTransmitterID = radeon_encoder->encoder_id; ++ args.v5.ucEncoderMode = encoder_mode; ++ args.v5.ucPpll = radeon_crtc->pll_id; + break; + default: + DRM_ERROR("Unknown table version %d %d\n", frev, crev); +@@ -618,6 +724,140 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + } + ++static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, ++ struct drm_framebuffer *old_fb) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct radeon_device *rdev = dev->dev_private; ++ struct radeon_framebuffer *radeon_fb; ++ struct drm_gem_object *obj; ++ struct radeon_bo *rbo; ++ uint64_t fb_location; ++ uint32_t fb_format, fb_pitch_pixels, tiling_flags; ++ int r; ++ ++ /* no fb bound */ ++ if (!crtc->fb) { ++ DRM_DEBUG("No FB bound\n"); ++ return 0; ++ } ++ ++ radeon_fb = to_radeon_framebuffer(crtc->fb); ++ ++ /* Pin framebuffer & get tilling informations */ ++ obj = radeon_fb->obj; ++ rbo = obj->driver_private; ++ r = radeon_bo_reserve(rbo, false); ++ if (unlikely(r != 0)) ++ return r; ++ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); ++ if (unlikely(r != 0)) { ++ radeon_bo_unreserve(rbo); ++ return -EINVAL; ++ } ++ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); ++ radeon_bo_unreserve(rbo); ++ ++ switch (crtc->fb->bits_per_pixel) { ++ case 8: ++ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) | ++ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED)); ++ break; ++ case 15: ++ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | ++ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555)); ++ break; ++ case 16: ++ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | ++ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); ++ break; ++ case 24: ++ case 32: ++ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | ++ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); ++ break; ++ default: ++ DRM_ERROR("Unsupported screen depth %d\n", ++ crtc->fb->bits_per_pixel); ++ return -EINVAL; ++ } ++ ++ switch (radeon_crtc->crtc_id) { ++ case 0: ++ WREG32(AVIVO_D1VGA_CONTROL, 0); ++ break; ++ case 1: ++ WREG32(AVIVO_D2VGA_CONTROL, 0); ++ break; ++ case 2: ++ WREG32(EVERGREEN_D3VGA_CONTROL, 0); ++ break; ++ case 3: ++ WREG32(EVERGREEN_D4VGA_CONTROL, 0); ++ break; ++ case 4: ++ WREG32(EVERGREEN_D5VGA_CONTROL, 0); ++ break; ++ case 5: ++ WREG32(EVERGREEN_D6VGA_CONTROL, 0); ++ break; ++ default: ++ break; ++ } ++ ++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, ++ upper_32_bits(fb_location)); ++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, ++ upper_32_bits(fb_location)); ++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, ++ (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); ++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, ++ (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); ++ WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); ++ ++ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); ++ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); ++ WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0); ++ WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0); ++ WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width); ++ WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height); ++ ++ fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); ++ WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); ++ WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); ++ ++ WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, ++ crtc->mode.vdisplay); ++ x &= ~3; ++ y &= ~1; ++ WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, ++ (x << 16) | y); ++ WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, ++ (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); ++ ++ if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ++ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, ++ EVERGREEN_INTERLEAVE_EN); ++ else ++ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); ++ ++ if (old_fb && old_fb != crtc->fb) { ++ radeon_fb = to_radeon_framebuffer(old_fb); ++ rbo = radeon_fb->obj->driver_private; ++ r = radeon_bo_reserve(rbo, false); ++ if (unlikely(r != 0)) ++ return r; ++ radeon_bo_unpin(rbo); ++ radeon_bo_unreserve(rbo); ++ } ++ ++ /* Bytes per pixel may have changed */ ++ radeon_bandwidth_update(rdev); ++ ++ return 0; ++} ++ + static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) + { +@@ -755,7 +995,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + +- if (ASIC_IS_AVIVO(rdev)) ++ if (ASIC_IS_DCE4(rdev)) ++ return evergreen_crtc_set_base(crtc, x, y, old_fb); ++ else if (ASIC_IS_AVIVO(rdev)) + return avivo_crtc_set_base(crtc, x, y, old_fb); + else + return radeon_crtc_set_base(crtc, x, y, old_fb); +@@ -785,6 +1027,46 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc) + } + } + ++static int radeon_atom_pick_pll(struct drm_crtc *crtc) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct radeon_device *rdev = dev->dev_private; ++ struct drm_encoder *test_encoder; ++ struct drm_crtc *test_crtc; ++ uint32_t pll_in_use = 0; ++ ++ if (ASIC_IS_DCE4(rdev)) { ++ /* if crtc is driving DP and we have an ext clock, use that */ ++ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { ++ if (test_encoder->crtc && (test_encoder->crtc == crtc)) { ++ if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) { ++ if (rdev->clock.dp_extclk) ++ return ATOM_PPLL_INVALID; ++ } ++ } ++ } ++ ++ /* otherwise, pick one of the plls */ ++ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { ++ struct radeon_crtc *radeon_test_crtc; ++ ++ if (crtc == test_crtc) ++ continue; ++ ++ radeon_test_crtc = to_radeon_crtc(test_crtc); ++ if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) && ++ (radeon_test_crtc->pll_id <= ATOM_PPLL2)) ++ pll_in_use |= (1 << radeon_test_crtc->pll_id); ++ } ++ if (!(pll_in_use & 1)) ++ return ATOM_PPLL1; ++ return ATOM_PPLL2; ++ } else ++ return radeon_crtc->crtc_id; ++ ++} ++ + int atombios_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, +@@ -796,19 +1078,27 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, + + /* TODO color tiling */ + ++ /* pick pll */ ++ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); ++ + atombios_set_ss(crtc, 0); ++ /* always set DCPLL */ ++ if (ASIC_IS_DCE4(rdev)) ++ atombios_crtc_set_dcpll(crtc); + atombios_crtc_set_pll(crtc, adjusted_mode); + atombios_set_ss(crtc, 1); +- atombios_crtc_set_timing(crtc, adjusted_mode); + +- if (ASIC_IS_AVIVO(rdev)) +- atombios_crtc_set_base(crtc, x, y, old_fb); ++ if (ASIC_IS_DCE4(rdev)) ++ atombios_set_crtc_dtd_timing(crtc, adjusted_mode); ++ else if (ASIC_IS_AVIVO(rdev)) ++ atombios_crtc_set_timing(crtc, adjusted_mode); + else { ++ atombios_crtc_set_timing(crtc, adjusted_mode); + if (radeon_crtc->crtc_id == 0) + atombios_set_crtc_dtd_timing(crtc, adjusted_mode); +- atombios_crtc_set_base(crtc, x, y, old_fb); + radeon_legacy_atom_fixup(crtc); + } ++ atombios_crtc_set_base(crtc, x, y, old_fb); + atombios_overscan_setup(crtc, mode, adjusted_mode); + atombios_scaler_setup(crtc); + return 0; +@@ -825,14 +1115,14 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, + + static void atombios_crtc_prepare(struct drm_crtc *crtc) + { +- atombios_lock_crtc(crtc, 1); ++ atombios_lock_crtc(crtc, ATOM_ENABLE); + atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + } + + static void atombios_crtc_commit(struct drm_crtc *crtc) + { + atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); +- atombios_lock_crtc(crtc, 0); ++ atombios_lock_crtc(crtc, ATOM_DISABLE); + } + + static const struct drm_crtc_helper_funcs atombios_helper_funcs = { +@@ -848,8 +1138,37 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = { + void radeon_atombios_init_crtc(struct drm_device *dev, + struct radeon_crtc *radeon_crtc) + { +- if (radeon_crtc->crtc_id == 1) +- radeon_crtc->crtc_offset = +- AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; ++ struct radeon_device *rdev = dev->dev_private; ++ ++ if (ASIC_IS_DCE4(rdev)) { ++ switch (radeon_crtc->crtc_id) { ++ case 0: ++ default: ++ radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET; ++ break; ++ case 1: ++ radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET; ++ break; ++ case 2: ++ radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET; ++ break; ++ case 3: ++ radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET; ++ break; ++ case 4: ++ radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET; ++ break; ++ case 5: ++ radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET; ++ break; ++ } ++ } else { ++ if (radeon_crtc->crtc_id == 1) ++ radeon_crtc->crtc_offset = ++ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; ++ else ++ radeon_crtc->crtc_offset = 0; ++ } ++ radeon_crtc->pll_id = -1; + drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); + } +diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c +index 99915a6..28b31c6 100644 +--- a/drivers/gpu/drm/radeon/atombios_dp.c ++++ b/drivers/gpu/drm/radeon/atombios_dp.c +@@ -321,6 +321,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], + train_set[lane] = v | p; + } + ++union aux_channel_transaction { ++ PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; ++ PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; ++}; + + /* radeon aux chan functions */ + bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, +@@ -329,7 +333,7 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, + { + struct drm_device *dev = chan->dev; + struct radeon_device *rdev = dev->dev_private; +- PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; ++ union aux_channel_transaction args; + int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); + unsigned char *base; + int retry_count = 0; +@@ -341,31 +345,33 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, + retry: + memcpy(base, req_bytes, num_bytes); + +- args.lpAuxRequest = 0; +- args.lpDataOut = 16; +- args.ucDataOutLen = 0; +- args.ucChannelID = chan->rec.i2c_id; +- args.ucDelay = delay / 10; ++ args.v1.lpAuxRequest = 0; ++ args.v1.lpDataOut = 16; ++ args.v1.ucDataOutLen = 0; ++ args.v1.ucChannelID = chan->rec.i2c_id; ++ args.v1.ucDelay = delay / 10; ++ if (ASIC_IS_DCE4(rdev)) ++ args.v2.ucHPD_ID = chan->rec.hpd_id; + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + +- if (args.ucReplyStatus && !args.ucDataOutLen) { +- if (args.ucReplyStatus == 0x20 && retry_count++ < 10) ++ if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) { ++ if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10) + goto retry; + DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", + req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], +- chan->rec.i2c_id, args.ucReplyStatus, retry_count); ++ chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count); + return false; + } + +- if (args.ucDataOutLen && read_byte && read_buf_len) { +- if (read_buf_len < args.ucDataOutLen) { ++ if (args.v1.ucDataOutLen && read_byte && read_buf_len) { ++ if (read_buf_len < args.v1.ucDataOutLen) { + DRM_ERROR("Buffer to small for return answer %d %d\n", +- read_buf_len, args.ucDataOutLen); ++ read_buf_len, args.v1.ucDataOutLen); + return false; + } + { +- int len = min(read_buf_len, args.ucDataOutLen); ++ int len = min(read_buf_len, args.v1.ucDataOutLen); + memcpy(read_byte, base + 16, len); + } + } +@@ -626,12 +632,19 @@ void dp_link_train(struct drm_encoder *encoder, + dp_set_link_bw_lanes(radeon_connector, link_configuration); + /* disable downspread on the sink */ + dp_set_downspread(radeon_connector, 0); +- /* start training on the source */ +- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START, +- dig_connector->dp_clock, enc_id, 0); +- /* set training pattern 1 on the source */ +- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, +- dig_connector->dp_clock, enc_id, 0); ++ if (ASIC_IS_DCE4(rdev)) { ++ /* start training on the source */ ++ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START); ++ /* set training pattern 1 on the source */ ++ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1); ++ } else { ++ /* start training on the source */ ++ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START, ++ dig_connector->dp_clock, enc_id, 0); ++ /* set training pattern 1 on the source */ ++ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, ++ dig_connector->dp_clock, enc_id, 0); ++ } + + /* set initial vs/emph */ + memset(train_set, 0, 4); +@@ -691,8 +704,11 @@ void dp_link_train(struct drm_encoder *encoder, + /* set training pattern 2 on the sink */ + dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2); + /* set training pattern 2 on the source */ +- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, +- dig_connector->dp_clock, enc_id, 1); ++ if (ASIC_IS_DCE4(rdev)) ++ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2); ++ else ++ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, ++ dig_connector->dp_clock, enc_id, 1); + + /* channel equalization loop */ + tries = 0; +@@ -731,8 +747,12 @@ void dp_link_train(struct drm_encoder *encoder, + /* disable the training pattern on the sink */ + dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); + +- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, +- dig_connector->dp_clock, enc_id, 0); ++ /* disable the training pattern on the source */ ++ if (ASIC_IS_DCE4(rdev)) ++ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE); ++ else ++ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, ++ dig_connector->dp_clock, enc_id, 0); + } + + int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c +new file mode 100644 +index 0000000..c2f9752 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/evergreen.c +@@ -0,0 +1,794 @@ ++/* ++ * Copyright 2010 Advanced Micro Devices, Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Alex Deucher ++ */ ++#include ++#include ++#include "drmP.h" ++#include "radeon.h" ++#include "radeon_drm.h" ++#include "rv770d.h" ++#include "atom.h" ++#include "avivod.h" ++#include "evergreen_reg.h" ++ ++static void evergreen_gpu_init(struct radeon_device *rdev); ++void evergreen_fini(struct radeon_device *rdev); ++ ++bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) ++{ ++ bool connected = false; ++ /* XXX */ ++ return connected; ++} ++ ++void evergreen_hpd_set_polarity(struct radeon_device *rdev, ++ enum radeon_hpd_id hpd) ++{ ++ /* XXX */ ++} ++ ++void evergreen_hpd_init(struct radeon_device *rdev) ++{ ++ /* XXX */ ++} ++ ++ ++void evergreen_bandwidth_update(struct radeon_device *rdev) ++{ ++ /* XXX */ ++} ++ ++void evergreen_hpd_fini(struct radeon_device *rdev) ++{ ++ /* XXX */ ++} ++ ++static int evergreen_mc_wait_for_idle(struct radeon_device *rdev) ++{ ++ unsigned i; ++ u32 tmp; ++ ++ for (i = 0; i < rdev->usec_timeout; i++) { ++ /* read MC_STATUS */ ++ tmp = RREG32(SRBM_STATUS) & 0x1F00; ++ if (!tmp) ++ return 0; ++ udelay(1); ++ } ++ return -1; ++} ++ ++/* ++ * GART ++ */ ++int evergreen_pcie_gart_enable(struct radeon_device *rdev) ++{ ++ u32 tmp; ++ int r, i; ++ ++ if (rdev->gart.table.vram.robj == NULL) { ++ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); ++ return -EINVAL; ++ } ++ r = radeon_gart_table_vram_pin(rdev); ++ if (r) ++ return r; ++ /* Setup L2 cache */ ++ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | ++ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | ++ EFFECTIVE_L2_QUEUE_SIZE(7)); ++ WREG32(VM_L2_CNTL2, 0); ++ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); ++ /* Setup TLB control */ ++ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | ++ SYSTEM_ACCESS_MODE_NOT_IN_SYS | ++ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | ++ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); ++ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); ++ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); ++ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); ++ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); ++ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); ++ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); ++ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); ++ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); ++ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); ++ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); ++ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | ++ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); ++ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, ++ (u32)(rdev->dummy_page.addr >> 12)); ++ for (i = 1; i < 7; i++) ++ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); ++ ++ r600_pcie_gart_tlb_flush(rdev); ++ rdev->gart.ready = true; ++ return 0; ++} ++ ++void evergreen_pcie_gart_disable(struct radeon_device *rdev) ++{ ++ u32 tmp; ++ int i, r; ++ ++ /* Disable all tables */ ++ for (i = 0; i < 7; i++) ++ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); ++ ++ /* Setup L2 cache */ ++ WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | ++ EFFECTIVE_L2_QUEUE_SIZE(7)); ++ WREG32(VM_L2_CNTL2, 0); ++ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); ++ /* Setup TLB control */ ++ tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); ++ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); ++ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); ++ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); ++ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); ++ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); ++ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); ++ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); ++ if (rdev->gart.table.vram.robj) { ++ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); ++ if (likely(r == 0)) { ++ radeon_bo_kunmap(rdev->gart.table.vram.robj); ++ radeon_bo_unpin(rdev->gart.table.vram.robj); ++ radeon_bo_unreserve(rdev->gart.table.vram.robj); ++ } ++ } ++} ++ ++void evergreen_pcie_gart_fini(struct radeon_device *rdev) ++{ ++ evergreen_pcie_gart_disable(rdev); ++ radeon_gart_table_vram_free(rdev); ++ radeon_gart_fini(rdev); ++} ++ ++ ++void evergreen_agp_enable(struct radeon_device *rdev) ++{ ++ u32 tmp; ++ int i; ++ ++ /* Setup L2 cache */ ++ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | ++ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | ++ EFFECTIVE_L2_QUEUE_SIZE(7)); ++ WREG32(VM_L2_CNTL2, 0); ++ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); ++ /* Setup TLB control */ ++ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | ++ SYSTEM_ACCESS_MODE_NOT_IN_SYS | ++ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | ++ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); ++ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); ++ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); ++ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); ++ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); ++ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); ++ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); ++ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); ++ for (i = 0; i < 7; i++) ++ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); ++} ++ ++static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) ++{ ++ save->vga_control[0] = RREG32(D1VGA_CONTROL); ++ save->vga_control[1] = RREG32(D2VGA_CONTROL); ++ save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL); ++ save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL); ++ save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL); ++ save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL); ++ save->vga_render_control = RREG32(VGA_RENDER_CONTROL); ++ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); ++ save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); ++ save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); ++ save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); ++ save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); ++ save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); ++ save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); ++ ++ /* Stop all video */ ++ WREG32(VGA_RENDER_CONTROL, 0); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); ++ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); ++ ++ WREG32(D1VGA_CONTROL, 0); ++ WREG32(D2VGA_CONTROL, 0); ++ WREG32(EVERGREEN_D3VGA_CONTROL, 0); ++ WREG32(EVERGREEN_D4VGA_CONTROL, 0); ++ WREG32(EVERGREEN_D5VGA_CONTROL, 0); ++ WREG32(EVERGREEN_D6VGA_CONTROL, 0); ++} ++ ++static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) ++{ ++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, ++ upper_32_bits(rdev->mc.vram_start)); ++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, ++ upper_32_bits(rdev->mc.vram_start)); ++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET, ++ (u32)rdev->mc.vram_start); ++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET, ++ (u32)rdev->mc.vram_start); ++ ++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET, ++ upper_32_bits(rdev->mc.vram_start)); ++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET, ++ upper_32_bits(rdev->mc.vram_start)); ++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, ++ (u32)rdev->mc.vram_start); ++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, ++ (u32)rdev->mc.vram_start); ++ ++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, ++ upper_32_bits(rdev->mc.vram_start)); ++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, ++ upper_32_bits(rdev->mc.vram_start)); ++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, ++ (u32)rdev->mc.vram_start); ++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, ++ (u32)rdev->mc.vram_start); ++ ++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, ++ upper_32_bits(rdev->mc.vram_start)); ++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, ++ upper_32_bits(rdev->mc.vram_start)); ++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, ++ (u32)rdev->mc.vram_start); ++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, ++ (u32)rdev->mc.vram_start); ++ ++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, ++ upper_32_bits(rdev->mc.vram_start)); ++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, ++ upper_32_bits(rdev->mc.vram_start)); ++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, ++ (u32)rdev->mc.vram_start); ++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, ++ (u32)rdev->mc.vram_start); ++ ++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, ++ upper_32_bits(rdev->mc.vram_start)); ++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, ++ upper_32_bits(rdev->mc.vram_start)); ++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, ++ (u32)rdev->mc.vram_start); ++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, ++ (u32)rdev->mc.vram_start); ++ ++ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); ++ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); ++ /* Unlock host access */ ++ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); ++ mdelay(1); ++ /* Restore video state */ ++ WREG32(D1VGA_CONTROL, save->vga_control[0]); ++ WREG32(D2VGA_CONTROL, save->vga_control[1]); ++ WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]); ++ WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]); ++ WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]); ++ WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); ++ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); ++ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); ++ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); ++ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); ++ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); ++ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); ++ WREG32(VGA_RENDER_CONTROL, save->vga_render_control); ++} ++ ++static void evergreen_mc_program(struct radeon_device *rdev) ++{ ++ struct evergreen_mc_save save; ++ u32 tmp; ++ int i, j; ++ ++ /* Initialize HDP */ ++ for (i = 0, j = 0; i < 32; i++, j += 0x18) { ++ WREG32((0x2c14 + j), 0x00000000); ++ WREG32((0x2c18 + j), 0x00000000); ++ WREG32((0x2c1c + j), 0x00000000); ++ WREG32((0x2c20 + j), 0x00000000); ++ WREG32((0x2c24 + j), 0x00000000); ++ } ++ WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); ++ ++ evergreen_mc_stop(rdev, &save); ++ if (evergreen_mc_wait_for_idle(rdev)) { ++ dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); ++ } ++ /* Lockout access through VGA aperture*/ ++ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); ++ /* Update configuration */ ++ if (rdev->flags & RADEON_IS_AGP) { ++ if (rdev->mc.vram_start < rdev->mc.gtt_start) { ++ /* VRAM before AGP */ ++ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, ++ rdev->mc.vram_start >> 12); ++ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, ++ rdev->mc.gtt_end >> 12); ++ } else { ++ /* VRAM after AGP */ ++ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, ++ rdev->mc.gtt_start >> 12); ++ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, ++ rdev->mc.vram_end >> 12); ++ } ++ } else { ++ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, ++ rdev->mc.vram_start >> 12); ++ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, ++ rdev->mc.vram_end >> 12); ++ } ++ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); ++ tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; ++ tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); ++ WREG32(MC_VM_FB_LOCATION, tmp); ++ WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); ++ WREG32(HDP_NONSURFACE_INFO, (2 << 7)); ++ WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); ++ if (rdev->flags & RADEON_IS_AGP) { ++ WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); ++ WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); ++ WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); ++ } else { ++ WREG32(MC_VM_AGP_BASE, 0); ++ WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); ++ WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); ++ } ++ if (evergreen_mc_wait_for_idle(rdev)) { ++ dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); ++ } ++ evergreen_mc_resume(rdev, &save); ++ /* we need to own VRAM, so turn off the VGA renderer here ++ * to stop it overwriting our objects */ ++ rv515_vga_render_disable(rdev); ++} ++ ++#if 0 ++/* ++ * CP. ++ */ ++static void evergreen_cp_stop(struct radeon_device *rdev) ++{ ++ /* XXX */ ++} ++ ++ ++static int evergreen_cp_load_microcode(struct radeon_device *rdev) ++{ ++ /* XXX */ ++ ++ return 0; ++} ++ ++ ++/* ++ * Core functions ++ */ ++static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes, ++ u32 num_backends, ++ u32 backend_disable_mask) ++{ ++ u32 backend_map = 0; ++ ++ return backend_map; ++} ++#endif ++ ++static void evergreen_gpu_init(struct radeon_device *rdev) ++{ ++ /* XXX */ ++} ++ ++int evergreen_mc_init(struct radeon_device *rdev) ++{ ++ fixed20_12 a; ++ u32 tmp; ++ int chansize, numchan; ++ int r; ++ ++ /* Get VRAM informations */ ++ rdev->mc.vram_is_ddr = true; ++ tmp = RREG32(MC_ARB_RAMCFG); ++ if (tmp & CHANSIZE_OVERRIDE) { ++ chansize = 16; ++ } else if (tmp & CHANSIZE_MASK) { ++ chansize = 64; ++ } else { ++ chansize = 32; ++ } ++ tmp = RREG32(MC_SHARED_CHMAP); ++ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { ++ case 0: ++ default: ++ numchan = 1; ++ break; ++ case 1: ++ numchan = 2; ++ break; ++ case 2: ++ numchan = 4; ++ break; ++ case 3: ++ numchan = 8; ++ break; ++ } ++ rdev->mc.vram_width = numchan * chansize; ++ /* Could aper size report 0 ? */ ++ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); ++ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); ++ /* Setup GPU memory space */ ++ /* size in MB on evergreen */ ++ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; ++ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; ++ ++ if (rdev->mc.mc_vram_size > rdev->mc.aper_size) ++ rdev->mc.mc_vram_size = rdev->mc.aper_size; ++ ++ if (rdev->mc.real_vram_size > rdev->mc.aper_size) ++ rdev->mc.real_vram_size = rdev->mc.aper_size; ++ ++ if (rdev->flags & RADEON_IS_AGP) { ++ r = radeon_agp_init(rdev); ++ if (r) ++ return r; ++ /* gtt_size is setup by radeon_agp_init */ ++ rdev->mc.gtt_location = rdev->mc.agp_base; ++ tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; ++ /* Try to put vram before or after AGP because we ++ * we want SYSTEM_APERTURE to cover both VRAM and ++ * AGP so that GPU can catch out of VRAM/AGP access ++ */ ++ if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) { ++ /* Enought place before */ ++ rdev->mc.vram_location = rdev->mc.gtt_location - ++ rdev->mc.mc_vram_size; ++ } else if (tmp > rdev->mc.mc_vram_size) { ++ /* Enought place after */ ++ rdev->mc.vram_location = rdev->mc.gtt_location + ++ rdev->mc.gtt_size; ++ } else { ++ /* Try to setup VRAM then AGP might not ++ * not work on some card ++ */ ++ rdev->mc.vram_location = 0x00000000UL; ++ rdev->mc.gtt_location = rdev->mc.mc_vram_size; ++ } ++ } else { ++ rdev->mc.vram_location = 0x00000000UL; ++ rdev->mc.gtt_location = rdev->mc.mc_vram_size; ++ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; ++ } ++ rdev->mc.vram_start = rdev->mc.vram_location; ++ rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; ++ rdev->mc.gtt_start = rdev->mc.gtt_location; ++ rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; ++ /* FIXME: we should enforce default clock in case GPU is not in ++ * default setup ++ */ ++ a.full = rfixed_const(100); ++ rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); ++ rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); ++ return 0; ++} ++int evergreen_gpu_reset(struct radeon_device *rdev) ++{ ++ /* FIXME: implement for evergreen */ ++ return 0; ++} ++ ++static int evergreen_startup(struct radeon_device *rdev) ++{ ++#if 0 ++ int r; ++ ++ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { ++ r = r600_init_microcode(rdev); ++ if (r) { ++ DRM_ERROR("Failed to load firmware!\n"); ++ return r; ++ } ++ } ++#endif ++ evergreen_mc_program(rdev); ++#if 0 ++ if (rdev->flags & RADEON_IS_AGP) { ++ evergreem_agp_enable(rdev); ++ } else { ++ r = evergreen_pcie_gart_enable(rdev); ++ if (r) ++ return r; ++ } ++#endif ++ evergreen_gpu_init(rdev); ++#if 0 ++ if (!rdev->r600_blit.shader_obj) { ++ r = r600_blit_init(rdev); ++ if (r) { ++ DRM_ERROR("radeon: failed blitter (%d).\n", r); ++ return r; ++ } ++ } ++ ++ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); ++ if (unlikely(r != 0)) ++ return r; ++ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, ++ &rdev->r600_blit.shader_gpu_addr); ++ radeon_bo_unreserve(rdev->r600_blit.shader_obj); ++ if (r) { ++ DRM_ERROR("failed to pin blit object %d\n", r); ++ return r; ++ } ++ ++ /* Enable IRQ */ ++ r = r600_irq_init(rdev); ++ if (r) { ++ DRM_ERROR("radeon: IH init failed (%d).\n", r); ++ radeon_irq_kms_fini(rdev); ++ return r; ++ } ++ r600_irq_set(rdev); ++ ++ r = radeon_ring_init(rdev, rdev->cp.ring_size); ++ if (r) ++ return r; ++ r = evergreen_cp_load_microcode(rdev); ++ if (r) ++ return r; ++ r = r600_cp_resume(rdev); ++ if (r) ++ return r; ++ /* write back buffer are not vital so don't worry about failure */ ++ r600_wb_enable(rdev); ++#endif ++ return 0; ++} ++ ++int evergreen_resume(struct radeon_device *rdev) ++{ ++ int r; ++ ++ /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, ++ * posting will perform necessary task to bring back GPU into good ++ * shape. ++ */ ++ /* post card */ ++ atom_asic_init(rdev->mode_info.atom_context); ++ /* Initialize clocks */ ++ r = radeon_clocks_init(rdev); ++ if (r) { ++ return r; ++ } ++ ++ r = evergreen_startup(rdev); ++ if (r) { ++ DRM_ERROR("r600 startup failed on resume\n"); ++ return r; ++ } ++#if 0 ++ r = r600_ib_test(rdev); ++ if (r) { ++ DRM_ERROR("radeon: failled testing IB (%d).\n", r); ++ return r; ++ } ++#endif ++ return r; ++ ++} ++ ++int evergreen_suspend(struct radeon_device *rdev) ++{ ++#if 0 ++ int r; ++ ++ /* FIXME: we should wait for ring to be empty */ ++ r700_cp_stop(rdev); ++ rdev->cp.ready = false; ++ r600_wb_disable(rdev); ++ evergreen_pcie_gart_disable(rdev); ++ /* unpin shaders bo */ ++ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); ++ if (likely(r == 0)) { ++ radeon_bo_unpin(rdev->r600_blit.shader_obj); ++ radeon_bo_unreserve(rdev->r600_blit.shader_obj); ++ } ++#endif ++ return 0; ++} ++ ++static bool evergreen_card_posted(struct radeon_device *rdev) ++{ ++ u32 reg; ++ ++ /* first check CRTCs */ ++ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | ++ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | ++ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | ++ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | ++ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | ++ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); ++ if (reg & EVERGREEN_CRTC_MASTER_EN) ++ return true; ++ ++ /* then check MEM_SIZE, in case the crtcs are off */ ++ if (RREG32(CONFIG_MEMSIZE)) ++ return true; ++ ++ return false; ++} ++ ++/* Plan is to move initialization in that function and use ++ * helper function so that radeon_device_init pretty much ++ * do nothing more than calling asic specific function. This ++ * should also allow to remove a bunch of callback function ++ * like vram_info. ++ */ ++int evergreen_init(struct radeon_device *rdev) ++{ ++ int r; ++ ++ r = radeon_dummy_page_init(rdev); ++ if (r) ++ return r; ++ /* This don't do much */ ++ r = radeon_gem_init(rdev); ++ if (r) ++ return r; ++ /* Read BIOS */ ++ if (!radeon_get_bios(rdev)) { ++ if (ASIC_IS_AVIVO(rdev)) ++ return -EINVAL; ++ } ++ /* Must be an ATOMBIOS */ ++ if (!rdev->is_atom_bios) { ++ dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); ++ return -EINVAL; ++ } ++ r = radeon_atombios_init(rdev); ++ if (r) ++ return r; ++ /* Post card if necessary */ ++ if (!evergreen_card_posted(rdev)) { ++ if (!rdev->bios) { ++ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); ++ return -EINVAL; ++ } ++ DRM_INFO("GPU not posted. posting now...\n"); ++ atom_asic_init(rdev->mode_info.atom_context); ++ } ++ /* Initialize scratch registers */ ++ r600_scratch_init(rdev); ++ /* Initialize surface registers */ ++ radeon_surface_init(rdev); ++ /* Initialize clocks */ ++ radeon_get_clock_info(rdev->ddev); ++ r = radeon_clocks_init(rdev); ++ if (r) ++ return r; ++ /* Initialize power management */ ++ radeon_pm_init(rdev); ++ /* Fence driver */ ++ r = radeon_fence_driver_init(rdev); ++ if (r) ++ return r; ++ r = evergreen_mc_init(rdev); ++ if (r) ++ return r; ++ /* Memory manager */ ++ r = radeon_bo_init(rdev); ++ if (r) ++ return r; ++#if 0 ++ r = radeon_irq_kms_init(rdev); ++ if (r) ++ return r; ++ ++ rdev->cp.ring_obj = NULL; ++ r600_ring_init(rdev, 1024 * 1024); ++ ++ rdev->ih.ring_obj = NULL; ++ r600_ih_ring_init(rdev, 64 * 1024); ++ ++ r = r600_pcie_gart_init(rdev); ++ if (r) ++ return r; ++#endif ++ rdev->accel_working = false; ++ r = evergreen_startup(rdev); ++ if (r) { ++ evergreen_suspend(rdev); ++ /*r600_wb_fini(rdev);*/ ++ /*radeon_ring_fini(rdev);*/ ++ /*evergreen_pcie_gart_fini(rdev);*/ ++ rdev->accel_working = false; ++ } ++ if (rdev->accel_working) { ++ r = radeon_ib_pool_init(rdev); ++ if (r) { ++ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); ++ rdev->accel_working = false; ++ } ++ r = r600_ib_test(rdev); ++ if (r) { ++ DRM_ERROR("radeon: failed testing IB (%d).\n", r); ++ rdev->accel_working = false; ++ } ++ } ++ return 0; ++} ++ ++void evergreen_fini(struct radeon_device *rdev) ++{ ++ evergreen_suspend(rdev); ++#if 0 ++ r600_blit_fini(rdev); ++ r600_irq_fini(rdev); ++ radeon_irq_kms_fini(rdev); ++ radeon_ring_fini(rdev); ++ r600_wb_fini(rdev); ++ evergreen_pcie_gart_fini(rdev); ++#endif ++ radeon_gem_fini(rdev); ++ radeon_fence_driver_fini(rdev); ++ radeon_clocks_fini(rdev); ++ radeon_agp_fini(rdev); ++ radeon_bo_fini(rdev); ++ radeon_atombios_fini(rdev); ++ kfree(rdev->bios); ++ rdev->bios = NULL; ++ radeon_dummy_page_fini(rdev); ++} +diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h +new file mode 100644 +index 0000000..f7c7c96 +--- /dev/null ++++ b/drivers/gpu/drm/radeon/evergreen_reg.h +@@ -0,0 +1,176 @@ ++/* ++ * Copyright 2010 Advanced Micro Devices, Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Alex Deucher ++ */ ++#ifndef __EVERGREEN_REG_H__ ++#define __EVERGREEN_REG_H__ ++ ++/* evergreen */ ++#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310 ++#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324 ++#define EVERGREEN_D3VGA_CONTROL 0x3e0 ++#define EVERGREEN_D4VGA_CONTROL 0x3e4 ++#define EVERGREEN_D5VGA_CONTROL 0x3e8 ++#define EVERGREEN_D6VGA_CONTROL 0x3ec ++ ++#define EVERGREEN_P1PLL_SS_CNTL 0x414 ++#define EVERGREEN_P2PLL_SS_CNTL 0x454 ++# define EVERGREEN_PxPLL_SS_EN (1 << 12) ++/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */ ++#define EVERGREEN_GRPH_ENABLE 0x6800 ++#define EVERGREEN_GRPH_CONTROL 0x6804 ++# define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0) ++# define EVERGREEN_GRPH_DEPTH_8BPP 0 ++# define EVERGREEN_GRPH_DEPTH_16BPP 1 ++# define EVERGREEN_GRPH_DEPTH_32BPP 2 ++# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8) ++/* 8 BPP */ ++# define EVERGREEN_GRPH_FORMAT_INDEXED 0 ++/* 16 BPP */ ++# define EVERGREEN_GRPH_FORMAT_ARGB1555 0 ++# define EVERGREEN_GRPH_FORMAT_ARGB565 1 ++# define EVERGREEN_GRPH_FORMAT_ARGB4444 2 ++# define EVERGREEN_GRPH_FORMAT_AI88 3 ++# define EVERGREEN_GRPH_FORMAT_MONO16 4 ++# define EVERGREEN_GRPH_FORMAT_BGRA5551 5 ++/* 32 BPP */ ++# define EVERGREEN_GRPH_FORMAT_ARGB8888 0 ++# define EVERGREEN_GRPH_FORMAT_ARGB2101010 1 ++# define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2 ++# define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3 ++# define EVERGREEN_GRPH_FORMAT_BGRA1010102 4 ++# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5 ++# define EVERGREEN_GRPH_FORMAT_RGB111110 6 ++# define EVERGREEN_GRPH_FORMAT_BGR101111 7 ++#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c ++# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0) ++# define EVERGREEN_GRPH_ENDIAN_NONE 0 ++# define EVERGREEN_GRPH_ENDIAN_8IN16 1 ++# define EVERGREEN_GRPH_ENDIAN_8IN32 2 ++# define EVERGREEN_GRPH_ENDIAN_8IN64 3 ++# define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4) ++# define EVERGREEN_GRPH_RED_SEL_R 0 ++# define EVERGREEN_GRPH_RED_SEL_G 1 ++# define EVERGREEN_GRPH_RED_SEL_B 2 ++# define EVERGREEN_GRPH_RED_SEL_A 3 ++# define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6) ++# define EVERGREEN_GRPH_GREEN_SEL_G 0 ++# define EVERGREEN_GRPH_GREEN_SEL_B 1 ++# define EVERGREEN_GRPH_GREEN_SEL_A 2 ++# define EVERGREEN_GRPH_GREEN_SEL_R 3 ++# define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8) ++# define EVERGREEN_GRPH_BLUE_SEL_B 0 ++# define EVERGREEN_GRPH_BLUE_SEL_A 1 ++# define EVERGREEN_GRPH_BLUE_SEL_R 2 ++# define EVERGREEN_GRPH_BLUE_SEL_G 3 ++# define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10) ++# define EVERGREEN_GRPH_ALPHA_SEL_A 0 ++# define EVERGREEN_GRPH_ALPHA_SEL_R 1 ++# define EVERGREEN_GRPH_ALPHA_SEL_G 2 ++# define EVERGREEN_GRPH_ALPHA_SEL_B 3 ++#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x6810 ++#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x6814 ++# define EVERGREEN_GRPH_DFQ_ENABLE (1 << 0) ++# define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00 ++#define EVERGREEN_GRPH_PITCH 0x6818 ++#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x681c ++#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x6820 ++#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x6824 ++#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x6828 ++#define EVERGREEN_GRPH_X_START 0x682c ++#define EVERGREEN_GRPH_Y_START 0x6830 ++#define EVERGREEN_GRPH_X_END 0x6834 ++#define EVERGREEN_GRPH_Y_END 0x6838 ++ ++/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */ ++#define EVERGREEN_CUR_CONTROL 0x6998 ++# define EVERGREEN_CURSOR_EN (1 << 0) ++# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8) ++# define EVERGREEN_CURSOR_MONO 0 ++# define EVERGREEN_CURSOR_24_1 1 ++# define EVERGREEN_CURSOR_24_8_PRE_MULT 2 ++# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3 ++# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16) ++# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20) ++# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24) ++# define EVERGREEN_CURSOR_URGENT_ALWAYS 0 ++# define EVERGREEN_CURSOR_URGENT_1_8 1 ++# define EVERGREEN_CURSOR_URGENT_1_4 2 ++# define EVERGREEN_CURSOR_URGENT_3_8 3 ++# define EVERGREEN_CURSOR_URGENT_1_2 4 ++#define EVERGREEN_CUR_SURFACE_ADDRESS 0x699c ++# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000 ++#define EVERGREEN_CUR_SIZE 0x69a0 ++#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x69a4 ++#define EVERGREEN_CUR_POSITION 0x69a8 ++#define EVERGREEN_CUR_HOT_SPOT 0x69ac ++#define EVERGREEN_CUR_COLOR1 0x69b0 ++#define EVERGREEN_CUR_COLOR2 0x69b4 ++#define EVERGREEN_CUR_UPDATE 0x69b8 ++# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0) ++# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1) ++# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16) ++# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24) ++ ++/* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */ ++#define EVERGREEN_DC_LUT_RW_MODE 0x69e0 ++#define EVERGREEN_DC_LUT_RW_INDEX 0x69e4 ++#define EVERGREEN_DC_LUT_SEQ_COLOR 0x69e8 ++#define EVERGREEN_DC_LUT_PWL_DATA 0x69ec ++#define EVERGREEN_DC_LUT_30_COLOR 0x69f0 ++#define EVERGREEN_DC_LUT_VGA_ACCESS_ENABLE 0x69f4 ++#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x69f8 ++#define EVERGREEN_DC_LUT_AUTOFILL 0x69fc ++#define EVERGREEN_DC_LUT_CONTROL 0x6a00 ++#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x6a04 ++#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x6a08 ++#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x6a0c ++#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x6a10 ++#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x6a14 ++#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x6a18 ++ ++#define EVERGREEN_DATA_FORMAT 0x6b00 ++# define EVERGREEN_INTERLEAVE_EN (1 << 0) ++#define EVERGREEN_DESKTOP_HEIGHT 0x6b04 ++ ++#define EVERGREEN_VIEWPORT_START 0x6d70 ++#define EVERGREEN_VIEWPORT_SIZE 0x6d74 ++ ++/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */ ++#define EVERGREEN_CRTC0_REGISTER_OFFSET (0x6df0 - 0x6df0) ++#define EVERGREEN_CRTC1_REGISTER_OFFSET (0x79f0 - 0x6df0) ++#define EVERGREEN_CRTC2_REGISTER_OFFSET (0x105f0 - 0x6df0) ++#define EVERGREEN_CRTC3_REGISTER_OFFSET (0x111f0 - 0x6df0) ++#define EVERGREEN_CRTC4_REGISTER_OFFSET (0x11df0 - 0x6df0) ++#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0) ++ ++/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */ ++#define EVERGREEN_CRTC_CONTROL 0x6e70 ++# define EVERGREEN_CRTC_MASTER_EN (1 << 0) ++#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 ++ ++#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 ++#define EVERGREEN_DC_GPIO_HPD_A 0x64b4 ++#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8 ++#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc ++ ++#endif +diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h +index c0356bb..d564c62 100644 +--- a/drivers/gpu/drm/radeon/radeon.h ++++ b/drivers/gpu/drm/radeon/radeon.h +@@ -138,11 +138,14 @@ void radeon_dummy_page_fini(struct radeon_device *rdev); + struct radeon_clock { + struct radeon_pll p1pll; + struct radeon_pll p2pll; ++ struct radeon_pll dcpll; + struct radeon_pll spll; + struct radeon_pll mpll; + /* 10 Khz units */ + uint32_t default_mclk; + uint32_t default_sclk; ++ uint32_t default_dispclk; ++ uint32_t dp_extclk; + }; + + /* +@@ -830,6 +833,7 @@ struct radeon_device { + struct r600_ih ih; /* r6/700 interrupt ring */ + struct workqueue_struct *wq; + struct work_struct hotplug_work; ++ int num_crtc; /* number of crtcs */ + + /* audio stuff */ + struct timer_list audio_timer; +@@ -956,7 +960,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); + #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) + #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) + #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) +- ++#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) + + /* + * BIOS helpers. +@@ -1189,6 +1193,14 @@ extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, + uint8_t status_bits, + uint8_t category_code); + ++/* evergreen */ ++struct evergreen_mc_save { ++ u32 vga_control[6]; ++ u32 vga_render_control; ++ u32 vga_hdp_control; ++ u32 crtc_control[6]; ++}; ++ + #include "radeon_object.h" + + #endif +diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h +index 05ee1ae..f7734c3 100644 +--- a/drivers/gpu/drm/radeon/radeon_asic.h ++++ b/drivers/gpu/drm/radeon/radeon_asic.h +@@ -539,7 +539,7 @@ static struct radeon_asic r600_asic = { + .get_memory_clock = &radeon_atom_get_memory_clock, + .set_memory_clock = &radeon_atom_set_memory_clock, + .set_pcie_lanes = NULL, +- .set_clock_gating = &radeon_atom_set_clock_gating, ++ .set_clock_gating = NULL, + .set_surface_reg = r600_set_surface_reg, + .clear_surface_reg = r600_clear_surface_reg, + .bandwidth_update = &rv515_bandwidth_update, +@@ -595,4 +595,54 @@ static struct radeon_asic rv770_asic = { + .ioctl_wait_idle = r600_ioctl_wait_idle, + }; + ++/* ++ * evergreen ++ */ ++int evergreen_init(struct radeon_device *rdev); ++void evergreen_fini(struct radeon_device *rdev); ++int evergreen_suspend(struct radeon_device *rdev); ++int evergreen_resume(struct radeon_device *rdev); ++int evergreen_gpu_reset(struct radeon_device *rdev); ++void evergreen_bandwidth_update(struct radeon_device *rdev); ++void evergreen_hpd_init(struct radeon_device *rdev); ++void evergreen_hpd_fini(struct radeon_device *rdev); ++bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); ++void evergreen_hpd_set_polarity(struct radeon_device *rdev, ++ enum radeon_hpd_id hpd); ++ ++static struct radeon_asic evergreen_asic = { ++ .init = &evergreen_init, ++ .fini = &evergreen_fini, ++ .suspend = &evergreen_suspend, ++ .resume = &evergreen_resume, ++ .cp_commit = NULL, ++ .gpu_reset = &evergreen_gpu_reset, ++ .vga_set_state = &r600_vga_set_state, ++ .gart_tlb_flush = &r600_pcie_gart_tlb_flush, ++ .gart_set_page = &rs600_gart_set_page, ++ .ring_test = NULL, ++ .ring_ib_execute = NULL, ++ .irq_set = NULL, ++ .irq_process = NULL, ++ .get_vblank_counter = NULL, ++ .fence_ring_emit = NULL, ++ .cs_parse = NULL, ++ .copy_blit = NULL, ++ .copy_dma = NULL, ++ .copy = NULL, ++ .get_engine_clock = &radeon_atom_get_engine_clock, ++ .set_engine_clock = &radeon_atom_set_engine_clock, ++ .get_memory_clock = &radeon_atom_get_memory_clock, ++ .set_memory_clock = &radeon_atom_set_memory_clock, ++ .set_pcie_lanes = NULL, ++ .set_clock_gating = NULL, ++ .set_surface_reg = r600_set_surface_reg, ++ .clear_surface_reg = r600_clear_surface_reg, ++ .bandwidth_update = &evergreen_bandwidth_update, ++ .hpd_init = &evergreen_hpd_init, ++ .hpd_fini = &evergreen_hpd_fini, ++ .hpd_sense = &evergreen_hpd_sense, ++ .hpd_set_polarity = &evergreen_hpd_set_polarity, ++}; ++ + #endif +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c +index 4d88315..381ebdd 100644 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -159,8 +159,15 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device + struct radeon_gpio_rec *gpio) + { + struct radeon_hpd hpd; ++ u32 reg; ++ ++ if (ASIC_IS_DCE4(rdev)) ++ reg = EVERGREEN_DC_GPIO_HPD_A; ++ else ++ reg = AVIVO_DC_GPIO_HPD_A; ++ + hpd.gpio = *gpio; +- if (gpio->reg == AVIVO_DC_GPIO_HPD_A) { ++ if (gpio->reg == reg) { + switch(gpio->mask) { + case (1 << 0): + hpd.hpd = RADEON_HPD_1; +@@ -574,6 +581,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) + ddc_bus.valid = false; + } + ++ /* needed for aux chan transactions */ ++ ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0; ++ + conn_id = le16_to_cpu(path->usConnObjectId); + + if (!radeon_atom_apply_quirks +@@ -838,6 +848,7 @@ union firmware_info { + ATOM_FIRMWARE_INFO_V1_2 info_12; + ATOM_FIRMWARE_INFO_V1_3 info_13; + ATOM_FIRMWARE_INFO_V1_4 info_14; ++ ATOM_FIRMWARE_INFO_V2_1 info_21; + }; + + bool radeon_atom_get_clock_info(struct drm_device *dev) +@@ -849,6 +860,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) + uint8_t frev, crev; + struct radeon_pll *p1pll = &rdev->clock.p1pll; + struct radeon_pll *p2pll = &rdev->clock.p2pll; ++ struct radeon_pll *dcpll = &rdev->clock.dcpll; + struct radeon_pll *spll = &rdev->clock.spll; + struct radeon_pll *mpll = &rdev->clock.mpll; + uint16_t data_offset; +@@ -951,8 +963,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) + rdev->clock.default_mclk = + le32_to_cpu(firmware_info->info.ulDefaultMemoryClock); + ++ if (ASIC_IS_DCE4(rdev)) { ++ rdev->clock.default_dispclk = ++ le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); ++ if (rdev->clock.default_dispclk == 0) ++ rdev->clock.default_dispclk = 60000; /* 600 Mhz */ ++ rdev->clock.dp_extclk = ++ le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); ++ } ++ *dcpll = *p1pll; ++ + return true; + } ++ + return false; + } + +@@ -1395,16 +1418,6 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + } + +-void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable) +-{ +- ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args; +- int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt); +- +- args.ucEnable = enable; +- +- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +-} +- + uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev) + { + GET_ENGINE_CLOCK_PS_ALLOCATION args; +diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c +index 73c4405..f64936c 100644 +--- a/drivers/gpu/drm/radeon/radeon_clocks.c ++++ b/drivers/gpu/drm/radeon/radeon_clocks.c +@@ -96,6 +96,7 @@ void radeon_get_clock_info(struct drm_device *dev) + struct radeon_device *rdev = dev->dev_private; + struct radeon_pll *p1pll = &rdev->clock.p1pll; + struct radeon_pll *p2pll = &rdev->clock.p2pll; ++ struct radeon_pll *dcpll = &rdev->clock.dcpll; + struct radeon_pll *spll = &rdev->clock.spll; + struct radeon_pll *mpll = &rdev->clock.mpll; + int ret; +@@ -204,6 +205,17 @@ void radeon_get_clock_info(struct drm_device *dev) + p2pll->max_frac_feedback_div = 0; + } + ++ /* dcpll is DCE4 only */ ++ dcpll->min_post_div = 2; ++ dcpll->max_post_div = 0x7f; ++ dcpll->min_frac_feedback_div = 0; ++ dcpll->max_frac_feedback_div = 9; ++ dcpll->min_ref_div = 2; ++ dcpll->max_ref_div = 0x3ff; ++ dcpll->min_feedback_div = 4; ++ dcpll->max_feedback_div = 0xfff; ++ dcpll->best_vco = 0; ++ + p1pll->min_ref_div = 2; + p1pll->max_ref_div = 0x3ff; + p1pll->min_feedback_div = 4; +@@ -846,8 +858,10 @@ int radeon_static_clocks_init(struct drm_device *dev) + /* XXX make sure engine is idle */ + + if (radeon_dynclks != -1) { +- if (radeon_dynclks) +- radeon_set_clock_gating(rdev, 1); ++ if (radeon_dynclks) { ++ if (rdev->asic->set_clock_gating) ++ radeon_set_clock_gating(rdev, 1); ++ } + } + radeon_apply_clock_quirks(rdev); + return 0; +diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c +index e7b1944..e3388a9 100644 +--- a/drivers/gpu/drm/radeon/radeon_combios.c ++++ b/drivers/gpu/drm/radeon/radeon_combios.c +@@ -507,6 +507,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde + } + i2c.mm_i2c = false; + i2c.i2c_id = 0; ++ i2c.hpd_id = 0; + + if (ddc_line) + i2c.valid = true; +diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c +index 28772a3..7ecf5e1 100644 +--- a/drivers/gpu/drm/radeon/radeon_cursor.c ++++ b/drivers/gpu/drm/radeon/radeon_cursor.c +@@ -36,7 +36,14 @@ static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock) + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + uint32_t cur_lock; + +- if (ASIC_IS_AVIVO(rdev)) { ++ if (ASIC_IS_DCE4(rdev)) { ++ cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset); ++ if (lock) ++ cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK; ++ else ++ cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK; ++ WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock); ++ } else if (ASIC_IS_AVIVO(rdev)) { + cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset); + if (lock) + cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK; +@@ -58,7 +65,10 @@ static void radeon_hide_cursor(struct drm_crtc *crtc) + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct radeon_device *rdev = crtc->dev->dev_private; + +- if (ASIC_IS_AVIVO(rdev)) { ++ if (ASIC_IS_DCE4(rdev)) { ++ WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); ++ WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); ++ } else if (ASIC_IS_AVIVO(rdev)) { + WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); + WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); + } else { +@@ -81,10 +91,14 @@ static void radeon_show_cursor(struct drm_crtc *crtc) + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct radeon_device *rdev = crtc->dev->dev_private; + +- if (ASIC_IS_AVIVO(rdev)) { ++ if (ASIC_IS_DCE4(rdev)) { ++ WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); ++ WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | ++ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); ++ } else if (ASIC_IS_AVIVO(rdev)) { + WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); + WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | +- (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); ++ (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); + } else { + switch (radeon_crtc->crtc_id) { + case 0: +@@ -109,7 +123,10 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct radeon_device *rdev = crtc->dev->dev_private; + +- if (ASIC_IS_AVIVO(rdev)) { ++ if (ASIC_IS_DCE4(rdev)) { ++ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0); ++ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); ++ } else if (ASIC_IS_AVIVO(rdev)) { + if (rdev->family >= CHIP_RV770) { + if (radeon_crtc->crtc_id) + WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0); +@@ -201,7 +218,20 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, + yorigin = CURSOR_HEIGHT - 1; + + radeon_lock_cursor(crtc, true); +- if (ASIC_IS_AVIVO(rdev)) { ++ if (ASIC_IS_DCE4(rdev)) { ++ /* cursors are offset into the total surface */ ++ x += crtc->x; ++ y += crtc->y; ++ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); ++ ++ /* XXX: check if evergreen has the same issues as avivo chips */ ++ WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, ++ ((xorigin ? 0 : x) << 16) | ++ (yorigin ? 0 : y)); ++ WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); ++ WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, ++ ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1)); ++ } else if (ASIC_IS_AVIVO(rdev)) { + int w = radeon_crtc->cursor_width; + int i = 0; + struct drm_crtc *crtc_p; +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c +index 768b150..4ca5ddc 100644 +--- a/drivers/gpu/drm/radeon/radeon_device.c ++++ b/drivers/gpu/drm/radeon/radeon_device.c +@@ -182,7 +182,16 @@ bool radeon_card_posted(struct radeon_device *rdev) + uint32_t reg; + + /* first check CRTCs */ +- if (ASIC_IS_AVIVO(rdev)) { ++ if (ASIC_IS_DCE4(rdev)) { ++ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | ++ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | ++ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | ++ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | ++ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | ++ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); ++ if (reg & EVERGREEN_CRTC_MASTER_EN) ++ return true; ++ } else if (ASIC_IS_AVIVO(rdev)) { + reg = RREG32(AVIVO_D1CRTC_CONTROL) | + RREG32(AVIVO_D2CRTC_CONTROL); + if (reg & AVIVO_CRTC_EN) { +@@ -310,7 +319,7 @@ void radeon_register_accessor_init(struct radeon_device *rdev) + rdev->mc_rreg = &rs600_mc_rreg; + rdev->mc_wreg = &rs600_mc_wreg; + } +- if (rdev->family >= CHIP_R600) { ++ if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) { + rdev->pciep_rreg = &r600_pciep_rreg; + rdev->pciep_wreg = &r600_pciep_wreg; + } +@@ -387,6 +396,13 @@ int radeon_asic_init(struct radeon_device *rdev) + case CHIP_RV740: + rdev->asic = &rv770_asic; + break; ++ case CHIP_CEDAR: ++ case CHIP_REDWOOD: ++ case CHIP_JUNIPER: ++ case CHIP_CYPRESS: ++ case CHIP_HEMLOCK: ++ rdev->asic = &evergreen_asic; ++ break; + default: + /* FIXME: not supported yet */ + return -EINVAL; +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c +index 7e17a36..86a9f01 100644 +--- a/drivers/gpu/drm/radeon/radeon_display.c ++++ b/drivers/gpu/drm/radeon/radeon_display.c +@@ -68,6 +68,36 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc) + WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); + } + ++static void evergreen_crtc_load_lut(struct drm_crtc *crtc) ++{ ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct radeon_device *rdev = dev->dev_private; ++ int i; ++ ++ DRM_DEBUG("%d\n", radeon_crtc->crtc_id); ++ WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); ++ ++ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); ++ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); ++ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); ++ ++ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); ++ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); ++ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); ++ ++ WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id); ++ WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007); ++ ++ WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0); ++ for (i = 0; i < 256; i++) { ++ WREG32(EVERGREEN_DC_LUT_30_COLOR, ++ (radeon_crtc->lut_r[i] << 20) | ++ (radeon_crtc->lut_g[i] << 10) | ++ (radeon_crtc->lut_b[i] << 0)); ++ } ++} ++ + static void legacy_crtc_load_lut(struct drm_crtc *crtc) + { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); +@@ -100,7 +130,9 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc) + if (!crtc->enabled) + return; + +- if (ASIC_IS_AVIVO(rdev)) ++ if (ASIC_IS_DCE4(rdev)) ++ evergreen_crtc_load_lut(crtc); ++ else if (ASIC_IS_AVIVO(rdev)) + avivo_crtc_load_lut(crtc); + else + legacy_crtc_load_lut(crtc); +@@ -819,7 +851,7 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) + + int radeon_modeset_init(struct radeon_device *rdev) + { +- int num_crtc = 2, i; ++ int i; + int ret; + + drm_mode_config_init(rdev->ddev); +@@ -843,10 +875,16 @@ int radeon_modeset_init(struct radeon_device *rdev) + } + + if (rdev->flags & RADEON_SINGLE_CRTC) +- num_crtc = 1; ++ rdev->num_crtc = 1; ++ else { ++ if (ASIC_IS_DCE4(rdev)) ++ rdev->num_crtc = 6; ++ else ++ rdev->num_crtc = 2; ++ } + + /* allocate crtcs */ +- for (i = 0; i < num_crtc; i++) { ++ for (i = 0; i < rdev->num_crtc; i++) { + radeon_crtc_init(rdev->ddev, i); + } + +diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c +index 3c91724..cac9e06 100644 +--- a/drivers/gpu/drm/radeon/radeon_encoders.c ++++ b/drivers/gpu/drm/radeon/radeon_encoders.c +@@ -53,7 +53,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) + /* DVO requires 2x ppll clocks depending on tmds chip */ + if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) + return index_mask; +- ++ + count = -1; + list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) { + struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder); +@@ -228,6 +228,32 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) + return NULL; + } + ++static struct radeon_connector_atom_dig * ++radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder) ++{ ++ struct drm_device *dev = encoder->dev; ++ struct radeon_device *rdev = dev->dev_private; ++ struct drm_connector *connector; ++ struct radeon_connector *radeon_connector; ++ struct radeon_connector_atom_dig *dig_connector; ++ ++ if (!rdev->is_atom_bios) ++ return NULL; ++ ++ connector = radeon_get_connector_for_encoder(encoder); ++ if (!connector) ++ return NULL; ++ ++ radeon_connector = to_radeon_connector(connector); ++ ++ if (!radeon_connector->con_priv) ++ return NULL; ++ ++ dig_connector = radeon_connector->con_priv; ++ ++ return dig_connector; ++} ++ + static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +@@ -273,7 +299,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, + } + + if (ASIC_IS_DCE3(rdev) && +- (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) { ++ (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + radeon_dp_set_link_config(connector, mode); + } +@@ -458,34 +484,20 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; ++ struct radeon_connector_atom_dig *dig_connector = ++ radeon_get_atom_connector_priv_from_encoder(encoder); + union lvds_encoder_control args; + int index = 0; + int hdmi_detected = 0; + uint8_t frev, crev; +- struct radeon_encoder_atom_dig *dig; +- struct drm_connector *connector; +- struct radeon_connector *radeon_connector; +- struct radeon_connector_atom_dig *dig_connector; +- +- connector = radeon_get_connector_for_encoder(encoder); +- if (!connector) +- return; +- +- radeon_connector = to_radeon_connector(connector); + +- if (!radeon_encoder->enc_priv) ++ if (!dig || !dig_connector) + return; + +- dig = radeon_encoder->enc_priv; +- +- if (!radeon_connector->con_priv) +- return; +- +- if (drm_detect_hdmi_monitor(radeon_connector->edid)) ++ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) + hdmi_detected = 1; + +- dig_connector = radeon_connector->con_priv; +- + memset(&args, 0, sizeof(args)); + + switch (radeon_encoder->encoder_id) { +@@ -586,7 +598,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) + { + struct drm_connector *connector; + struct radeon_connector *radeon_connector; +- struct radeon_connector_atom_dig *radeon_dig_connector; ++ struct radeon_connector_atom_dig *dig_connector; + + connector = radeon_get_connector_for_encoder(encoder); + if (!connector) +@@ -617,9 +629,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) + break; + case DRM_MODE_CONNECTOR_DisplayPort: + case DRM_MODE_CONNECTOR_eDP: +- radeon_dig_connector = radeon_connector->con_priv; +- if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || +- (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) ++ dig_connector = radeon_connector->con_priv; ++ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || ++ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) + return ATOM_ENCODER_MODE_DP; + else if (drm_detect_hdmi_monitor(radeon_connector->edid)) + return ATOM_ENCODER_MODE_HDMI; +@@ -656,6 +668,18 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) + * - 2 DIG encoder blocks. + * DIG1/2 can drive UNIPHY0/1/2 link A or link B + * ++ * DCE 4.0 ++ * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B). ++ * Supports up to 6 digital outputs ++ * - 6 DIG encoder blocks. ++ * - DIG to PHY mapping is hardcoded ++ * DIG1 drives UNIPHY0 link A, A+B ++ * DIG2 drives UNIPHY0 link B ++ * DIG3 drives UNIPHY1 link A, A+B ++ * DIG4 drives UNIPHY1 link B ++ * DIG5 drives UNIPHY2 link A, A+B ++ * DIG6 drives UNIPHY2 link B ++ * + * Routing + * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) + * Examples: +@@ -664,88 +688,77 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) + * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS + * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI + */ +-static void ++ ++union dig_encoder_control { ++ DIG_ENCODER_CONTROL_PS_ALLOCATION v1; ++ DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; ++ DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; ++}; ++ ++void + atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) + { + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); +- DIG_ENCODER_CONTROL_PS_ALLOCATION args; +- int index = 0, num = 0; ++ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; ++ struct radeon_connector_atom_dig *dig_connector = ++ radeon_get_atom_connector_priv_from_encoder(encoder); ++ union dig_encoder_control args; ++ int index = 0; + uint8_t frev, crev; +- struct radeon_encoder_atom_dig *dig; +- struct drm_connector *connector; +- struct radeon_connector *radeon_connector; +- struct radeon_connector_atom_dig *dig_connector; +- +- connector = radeon_get_connector_for_encoder(encoder); +- if (!connector) +- return; +- +- radeon_connector = to_radeon_connector(connector); + +- if (!radeon_connector->con_priv) ++ if (!dig || !dig_connector) + return; + +- dig_connector = radeon_connector->con_priv; +- +- if (!radeon_encoder->enc_priv) +- return; +- +- dig = radeon_encoder->enc_priv; +- + memset(&args, 0, sizeof(args)); + +- if (dig->dig_encoder) +- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); +- else +- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); +- num = dig->dig_encoder + 1; ++ if (ASIC_IS_DCE4(rdev)) ++ index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl); ++ else { ++ if (dig->dig_encoder) ++ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); ++ else ++ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); ++ } + + atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); + +- args.ucAction = action; +- args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); ++ args.v1.ucAction = action; ++ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); ++ args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); + +- if (ASIC_IS_DCE32(rdev)) { ++ if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) { ++ if (dig_connector->dp_clock == 270000) ++ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; ++ args.v1.ucLaneNum = dig_connector->dp_lane_count; ++ } else if (radeon_encoder->pixel_clock > 165000) ++ args.v1.ucLaneNum = 8; ++ else ++ args.v1.ucLaneNum = 4; ++ ++ if (ASIC_IS_DCE4(rdev)) { ++ args.v3.acConfig.ucDigSel = dig->dig_encoder; ++ args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; ++ } else { + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: +- args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; ++ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: +- args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; ++ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: ++ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: +- args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; +- break; +- } +- } else { +- switch (radeon_encoder->encoder_id) { +- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: +- args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1; +- break; +- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: +- args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER2; ++ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; + break; + } ++ if (dig_connector->linkb) ++ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; ++ else ++ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; + } + +- args.ucEncoderMode = atombios_get_encoder_mode(encoder); +- +- if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) { +- if (dig_connector->dp_clock == 270000) +- args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; +- args.ucLaneNum = dig_connector->dp_lane_count; +- } else if (radeon_encoder->pixel_clock > 165000) +- args.ucLaneNum = 8; +- else +- args.ucLaneNum = 4; +- +- if (dig_connector->linkb) +- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; +- else +- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; +- + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + } +@@ -753,6 +766,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) + union dig_transmitter_control { + DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; + DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; ++ DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; + }; + + void +@@ -761,37 +775,29 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); +- union dig_transmitter_control args; +- int index = 0, num = 0; +- uint8_t frev, crev; +- struct radeon_encoder_atom_dig *dig; ++ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; ++ struct radeon_connector_atom_dig *dig_connector = ++ radeon_get_atom_connector_priv_from_encoder(encoder); + struct drm_connector *connector; + struct radeon_connector *radeon_connector; +- struct radeon_connector_atom_dig *dig_connector; ++ union dig_transmitter_control args; ++ int index = 0; ++ uint8_t frev, crev; + bool is_dp = false; ++ int pll_id = 0; + +- connector = radeon_get_connector_for_encoder(encoder); +- if (!connector) ++ if (!dig || !dig_connector) + return; + ++ connector = radeon_get_connector_for_encoder(encoder); + radeon_connector = to_radeon_connector(connector); + +- if (!radeon_encoder->enc_priv) +- return; +- +- dig = radeon_encoder->enc_priv; +- +- if (!radeon_connector->con_priv) +- return; +- +- dig_connector = radeon_connector->con_priv; +- + if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) + is_dp = true; + + memset(&args, 0, sizeof(args)); + +- if (ASIC_IS_DCE32(rdev)) ++ if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev)) + index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); + else { + switch (radeon_encoder->encoder_id) { +@@ -821,24 +827,64 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t + else + args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + } +- if (ASIC_IS_DCE32(rdev)) { +- if (dig->dig_encoder == 1) +- args.v2.acConfig.ucEncoderSel = 1; ++ if (ASIC_IS_DCE4(rdev)) { ++ if (is_dp) ++ args.v3.ucLaneNum = dig_connector->dp_lane_count; ++ else if (radeon_encoder->pixel_clock > 165000) ++ args.v3.ucLaneNum = 8; ++ else ++ args.v3.ucLaneNum = 4; ++ ++ if (dig_connector->linkb) { ++ args.v3.acConfig.ucLinkSel = 1; ++ args.v3.acConfig.ucEncoderSel = 1; ++ } ++ ++ /* Select the PLL for the PHY ++ * DP PHY should be clocked from external src if there is ++ * one. ++ */ ++ if (encoder->crtc) { ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); ++ pll_id = radeon_crtc->pll_id; ++ } ++ if (is_dp && rdev->clock.dp_extclk) ++ args.v3.acConfig.ucRefClkSource = 2; /* external src */ ++ else ++ args.v3.acConfig.ucRefClkSource = pll_id; ++ ++ switch (radeon_encoder->encoder_id) { ++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: ++ args.v3.acConfig.ucTransmitterSel = 0; ++ break; ++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: ++ args.v3.acConfig.ucTransmitterSel = 1; ++ break; ++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: ++ args.v3.acConfig.ucTransmitterSel = 2; ++ break; ++ } ++ ++ if (is_dp) ++ args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */ ++ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { ++ if (dig->coherent_mode) ++ args.v3.acConfig.fCoherentMode = 1; ++ } ++ } else if (ASIC_IS_DCE32(rdev)) { ++ args.v2.acConfig.ucEncoderSel = dig->dig_encoder; + if (dig_connector->linkb) + args.v2.acConfig.ucLinkSel = 1; + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + args.v2.acConfig.ucTransmitterSel = 0; +- num = 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + args.v2.acConfig.ucTransmitterSel = 1; +- num = 1; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + args.v2.acConfig.ucTransmitterSel = 2; +- num = 2; + break; + } + +@@ -849,7 +895,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t + args.v2.acConfig.fCoherentMode = 1; + } + } else { +- + args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; + + if (dig->dig_encoder) +@@ -857,31 +902,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t + else + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; + +- switch (radeon_encoder->encoder_id) { +- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: +- if (rdev->flags & RADEON_IS_IGP) { +- if (radeon_encoder->pixel_clock > 165000) { +- if (dig_connector->igp_lane_info & 0x3) +- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; +- else if (dig_connector->igp_lane_info & 0xc) +- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; +- } else { +- if (dig_connector->igp_lane_info & 0x1) +- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; +- else if (dig_connector->igp_lane_info & 0x2) +- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; +- else if (dig_connector->igp_lane_info & 0x4) +- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; +- else if (dig_connector->igp_lane_info & 0x8) +- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; +- } ++ if ((rdev->flags & RADEON_IS_IGP) && ++ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { ++ if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { ++ if (dig_connector->igp_lane_info & 0x1) ++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; ++ else if (dig_connector->igp_lane_info & 0x2) ++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; ++ else if (dig_connector->igp_lane_info & 0x4) ++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; ++ else if (dig_connector->igp_lane_info & 0x8) ++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; ++ } else { ++ if (dig_connector->igp_lane_info & 0x3) ++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; ++ else if (dig_connector->igp_lane_info & 0xc) ++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; + } +- break; + } + +- if (radeon_encoder->pixel_clock > 165000) +- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; +- + if (dig_connector->linkb) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; + else +@@ -892,6 +931,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t + else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { + if (dig->coherent_mode) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; ++ if (radeon_encoder->pixel_clock > 165000) ++ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; + } + } + +@@ -998,16 +1039,25 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) + if (is_dig) { + switch (mode) { + case DRM_MODE_DPMS_ON: +- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); +- { ++ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); ++ + dp_link_train(encoder, connector); ++ if (ASIC_IS_DCE4(rdev)) ++ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); + } ++ if (!ASIC_IS_DCE4(rdev)) ++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: +- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); ++ if (!ASIC_IS_DCE4(rdev)) ++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); ++ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { ++ if (ASIC_IS_DCE4(rdev)) ++ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); ++ } + break; + } + } else { +@@ -1026,7 +1076,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) + radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + } + +-union crtc_sourc_param { ++union crtc_source_param { + SELECT_CRTC_SOURCE_PS_ALLOCATION v1; + SELECT_CRTC_SOURCE_PARAMETERS_V2 v2; + }; +@@ -1038,7 +1088,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); +- union crtc_sourc_param args; ++ union crtc_source_param args; + int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); + uint8_t frev, crev; + struct radeon_encoder_atom_dig *dig; +@@ -1107,10 +1157,26 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + dig = radeon_encoder->enc_priv; +- if (dig->dig_encoder) +- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; +- else ++ switch (dig->dig_encoder) { ++ case 0: + args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; ++ break; ++ case 1: ++ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; ++ break; ++ case 2: ++ args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID; ++ break; ++ case 3: ++ args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID; ++ break; ++ case 4: ++ args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID; ++ break; ++ case 5: ++ args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID; ++ break; ++ } + break; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; +@@ -1167,6 +1233,7 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder, + } + + /* set scaler clears this on some chips */ ++ /* XXX check DCE4 */ + if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) { + if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) + WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, +@@ -1183,6 +1250,33 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) + struct drm_encoder *test_encoder; + struct radeon_encoder_atom_dig *dig; + uint32_t dig_enc_in_use = 0; ++ ++ if (ASIC_IS_DCE4(rdev)) { ++ struct radeon_connector_atom_dig *dig_connector = ++ radeon_get_atom_connector_priv_from_encoder(encoder); ++ ++ switch (radeon_encoder->encoder_id) { ++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: ++ if (dig_connector->linkb) ++ return 1; ++ else ++ return 0; ++ break; ++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: ++ if (dig_connector->linkb) ++ return 3; ++ else ++ return 2; ++ break; ++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: ++ if (dig_connector->linkb) ++ return 5; ++ else ++ return 4; ++ break; ++ } ++ } ++ + /* on DCE32 and encoder can driver any block so just crtc id */ + if (ASIC_IS_DCE32(rdev)) { + return radeon_crtc->crtc_id; +@@ -1254,15 +1348,26 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: +- /* disable the encoder and transmitter */ +- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); +- atombios_dig_encoder_setup(encoder, ATOM_DISABLE); +- +- /* setup and enable the encoder and transmitter */ +- atombios_dig_encoder_setup(encoder, ATOM_ENABLE); +- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); +- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); +- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); ++ if (ASIC_IS_DCE4(rdev)) { ++ /* disable the transmitter */ ++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); ++ /* setup and enable the encoder */ ++ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP); ++ ++ /* init and enable the transmitter */ ++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); ++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); ++ } else { ++ /* disable the encoder and transmitter */ ++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); ++ atombios_dig_encoder_setup(encoder, ATOM_DISABLE); ++ ++ /* setup and enable the encoder and transmitter */ ++ atombios_dig_encoder_setup(encoder, ATOM_ENABLE); ++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); ++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); ++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); ++ } + break; + case ENCODER_OBJECT_ID_INTERNAL_DDI: + atombios_ddia_setup(encoder, ATOM_ENABLE); +@@ -1282,7 +1387,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, + } + atombios_apply_encoder_quirks(encoder, adjusted_mode); + +- r600_hdmi_setmode(encoder, adjusted_mode); ++ /* XXX */ ++ if (!ASIC_IS_DCE4(rdev)) ++ r600_hdmi_setmode(encoder, adjusted_mode); + } + + static bool +@@ -1480,10 +1587,18 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su + return; + + encoder = &radeon_encoder->base; +- if (rdev->flags & RADEON_SINGLE_CRTC) ++ switch (rdev->num_crtc) { ++ case 1: + encoder->possible_crtcs = 0x1; +- else ++ break; ++ case 2: ++ default: + encoder->possible_crtcs = 0x3; ++ break; ++ case 6: ++ encoder->possible_crtcs = 0x3f; ++ break; ++ } + + radeon_encoder->enc_priv = NULL; + +diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h +index 797972e..93c7d5d 100644 +--- a/drivers/gpu/drm/radeon/radeon_family.h ++++ b/drivers/gpu/drm/radeon/radeon_family.h +@@ -75,6 +75,11 @@ enum radeon_family { + CHIP_RV730, + CHIP_RV710, + CHIP_RV740, ++ CHIP_CEDAR, ++ CHIP_REDWOOD, ++ CHIP_JUNIPER, ++ CHIP_CYPRESS, ++ CHIP_HEMLOCK, + CHIP_LAST, + }; + +diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c +index d71e346..0059242 100644 +--- a/drivers/gpu/drm/radeon/radeon_fb.c ++++ b/drivers/gpu/drm/radeon/radeon_fb.c +@@ -148,7 +148,6 @@ int radeonfb_create(struct drm_device *dev, + unsigned long tmp; + bool fb_tiled = false; /* useful for testing */ + u32 tiling_flags = 0; +- int crtc_count; + + mode_cmd.width = surface_width; + mode_cmd.height = surface_height; +@@ -239,11 +238,7 @@ int radeonfb_create(struct drm_device *dev, + rfbdev = info->par; + rfbdev->helper.funcs = &radeon_fb_helper_funcs; + rfbdev->helper.dev = dev; +- if (rdev->flags & RADEON_SINGLE_CRTC) +- crtc_count = 1; +- else +- crtc_count = 2; +- ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count, ++ ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc, + RADEONFB_CONN_LIMIT); + if (ret) + goto out_unref; +diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h +index e81b2ae..2f582a2 100644 +--- a/drivers/gpu/drm/radeon/radeon_mode.h ++++ b/drivers/gpu/drm/radeon/radeon_mode.h +@@ -83,6 +83,8 @@ struct radeon_i2c_bus_rec { + bool valid; + /* id used by atom */ + uint8_t i2c_id; ++ /* id used by atom */ ++ uint8_t hpd_id; + /* can be used with hw i2c engine */ + bool hw_capable; + /* uses multi-media i2c engine */ +@@ -193,7 +195,7 @@ struct radeon_mode_info { + struct card_info *atom_card_info; + enum radeon_connector_table connector_table; + bool mode_config_initialized; +- struct radeon_crtc *crtcs[2]; ++ struct radeon_crtc *crtcs[6]; + /* DVI-I properties */ + struct drm_property *coherent_mode_property; + /* DAC enable load detect */ +@@ -237,6 +239,7 @@ struct radeon_crtc { + fixed20_12 vsc; + fixed20_12 hsc; + struct drm_display_mode native_mode; ++ int pll_id; + }; + + struct radeon_encoder_primary_dac { +@@ -398,6 +401,7 @@ extern void dp_link_train(struct drm_encoder *encoder, + struct drm_connector *connector); + extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); + extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); ++extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action); + extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, + int action, uint8_t lane_num, + uint8_t lane_set); +diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h +index 6d0a009..7f0c752 100644 +--- a/drivers/gpu/drm/radeon/radeon_reg.h ++++ b/drivers/gpu/drm/radeon/radeon_reg.h +@@ -54,7 +54,7 @@ + #include "r300_reg.h" + #include "r500_reg.h" + #include "r600_reg.h" +- ++#include "evergreen_reg.h" + + #define RADEON_MC_AGP_LOCATION 0x014c + #define RADEON_MC_AGP_START_MASK 0x0000FFFF +diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h +index a1367ab..9506f8c 100644 +--- a/drivers/gpu/drm/radeon/rv770d.h ++++ b/drivers/gpu/drm/radeon/rv770d.h +@@ -343,4 +343,6 @@ + + #define WAIT_UNTIL 0x8040 + ++#define SRBM_STATUS 0x0E50 ++ + #endif +diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h +index e6f3b12..403490c 100644 +--- a/include/drm/drm_pciids.h ++++ b/include/drm/drm_pciids.h +@@ -141,6 +141,41 @@ + {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x688A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68a0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68a1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68a9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68c8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68c9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68d9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68da, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68de, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68e0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68e1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68e4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68e5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68e8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68e9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ diff --git a/drm-radeon-firemv-pciid.patch b/drm-radeon-firemv-pciid.patch new file mode 100644 index 0000000..1b4c129 --- /dev/null +++ b/drm-radeon-firemv-pciid.patch @@ -0,0 +1,30 @@ +From 79b9517a33a283c5d9db875c263670ed1e055f7e Mon Sep 17 00:00:00 2001 +From: Dave Airlie +Date: Mon, 19 Apr 2010 17:54:31 +1000 +Subject: [PATCH] drm/radeon/kms: add FireMV 2400 PCI ID. + +This is an M24/X600 chip. + +From RH# 581927 + +cc: stable@kernel.org +Signed-off-by: Dave Airlie +--- + include/drm/drm_pciids.h | 1 + + 1 files changed, 1 insertions(+), 0 deletions(-) + +diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h +index 04a6ebc..2d428b0 100644 +--- a/include/drm/drm_pciids.h ++++ b/include/drm/drm_pciids.h +@@ -6,6 +6,7 @@ + {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ + {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ +-- +1.6.6.1 + diff --git a/drm-radeon-fix-rs600-tlb.patch b/drm-radeon-fix-rs600-tlb.patch new file mode 100644 index 0000000..977dcd3 --- /dev/null +++ b/drm-radeon-fix-rs600-tlb.patch @@ -0,0 +1,32 @@ +From 30f69f3fb20bd719b5e1bf879339914063d38f47 Mon Sep 17 00:00:00 2001 +From: Jerome Glisse +Date: Fri, 16 Apr 2010 18:46:35 +0200 +Subject: [PATCH] drm/radeon/kms: fix rs600 tlb flush + +Typo in in flush leaded to no flush of the RS600 tlb which +ultimately leaded to massive system ram corruption, with +this patch everythings seems to work properly. + +Signed-off-by: Jerome Glisse +Cc: stable +Signed-off-by: Dave Airlie +--- + drivers/gpu/drm/radeon/rs600.c | 2 +- + 1 files changed, 1 insertions(+), 1 deletions(-) + +diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c +index abf824c..a81bc7a 100644 +--- a/drivers/gpu/drm/radeon/rs600.c ++++ b/drivers/gpu/drm/radeon/rs600.c +@@ -159,7 +159,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) + WREG32_MC(R_000100_MC_PT0_CNTL, tmp); + + tmp = RREG32_MC(R_000100_MC_PT0_CNTL); +- tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); ++ tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); + WREG32_MC(R_000100_MC_PT0_CNTL, tmp); + + tmp = RREG32_MC(R_000100_MC_PT0_CNTL); +-- +1.6.6.1 + diff --git a/drm-radeon-fix-shared-ddc-handling.patch b/drm-radeon-fix-shared-ddc-handling.patch new file mode 100644 index 0000000..f17827c --- /dev/null +++ b/drm-radeon-fix-shared-ddc-handling.patch @@ -0,0 +1,36 @@ +From 557b452536c9390105539a264d342d963d71b087 Mon Sep 17 00:00:00 2001 +From: Alex Deucher +Date: Mon, 21 Jun 2010 12:07:52 -0400 +Subject: [PATCH] drm/radeon/kms: fix shared ddc handling +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Connectors with a shared ddc line can be connected to different +encoders. + +Reported by Pasi Kärkkäinen on dri-devel + +Signed-off-by: Alex Deucher +--- + drivers/gpu/drm/radeon/radeon_connectors.c | 4 +++- + 1 files changed, 3 insertions(+), 1 deletions(-) + +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c +index 0c7ccc6..f58f8bd 100644 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c +@@ -785,7 +785,9 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect + if (connector == list_connector) + continue; + list_radeon_connector = to_radeon_connector(list_connector); +- if (radeon_connector->devices == list_radeon_connector->devices) { ++ if (list_radeon_connector->shared_ddc && ++ (list_radeon_connector->ddc_bus->rec.i2c_id == ++ radeon_connector->ddc_bus->rec.i2c_id)) { + if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) { + kfree(radeon_connector->edid); +-- +1.7.0.1 + diff --git a/drm-radeon-kms-fix-dual-link-dvi.patch b/drm-radeon-kms-fix-dual-link-dvi.patch new file mode 100644 index 0000000..3e9d28f --- /dev/null +++ b/drm-radeon-kms-fix-dual-link-dvi.patch @@ -0,0 +1,39 @@ +From b317a9ce2259e64258a802a5ca70dec45ac15dda Mon Sep 17 00:00:00 2001 +From: Alex Deucher +Date: Thu, 15 Apr 2010 16:54:38 -0400 +Subject: [PATCH] drm/radeon/kms/atom: fix dual-link DVI on DCE3.2/4.0 + +Got broken during the evergreen merge. +Fixes fdo bug 27001. + +Signed-off-by: Alex Deucher +Signed-off-by: Dave Airlie +--- + drivers/gpu/drm/radeon/radeon_encoders.c | 4 ++++ + 1 files changed, 4 insertions(+), 0 deletions(-) + +diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c +index c52fc30..9f7f56a 100644 +--- a/drivers/gpu/drm/radeon/radeon_encoders.c ++++ b/drivers/gpu/drm/radeon/radeon_encoders.c +@@ -865,6 +865,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t + else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { + if (dig->coherent_mode) + args.v3.acConfig.fCoherentMode = 1; ++ if (radeon_encoder->pixel_clock > 165000) ++ args.v3.acConfig.fDualLinkConnector = 1; + } + } else if (ASIC_IS_DCE32(rdev)) { + args.v2.acConfig.ucEncoderSel = dig->dig_encoder; +@@ -888,6 +890,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t + else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { + if (dig->coherent_mode) + args.v2.acConfig.fCoherentMode = 1; ++ if (radeon_encoder->pixel_clock > 165000) ++ args.v2.acConfig.fDualLinkConnector = 1; + } + } else { + args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; +-- +1.6.6.1 + diff --git a/drm-radeon-ss-fix.patch b/drm-radeon-ss-fix.patch new file mode 100644 index 0000000..ae081aa --- /dev/null +++ b/drm-radeon-ss-fix.patch @@ -0,0 +1,97 @@ +From 7aac5b711ff4c64bad5a6027cf6d38f1bbd53efe Mon Sep 17 00:00:00 2001 +From: Dave Airlie +Date: Tue, 20 Apr 2010 18:30:37 +1000 +Subject: [PATCH] drm/radeon/kms: further spread spectrum fixes + +Adjust modeset ordering to fix spread spectrum. +The spread spectrum command table relies on the +crtc routing to already be set in order to work +properly on some asics. + +Should fix fdo bug 25741. + +Signed-off-by: Alex Deucher +Signed-off-by: Dave Airlie + +Conflicts: + + drivers/gpu/drm/radeon/atombios_crtc.c +--- + drivers/gpu/drm/radeon/atombios_crtc.c | 5 +++++ + drivers/gpu/drm/radeon/radeon_encoders.c | 25 +++++++++++++++---------- + 2 files changed, 20 insertions(+), 10 deletions(-) + +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c +index c076eac..e70b575 100644 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c +@@ -1115,6 +1115,11 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, + + static void atombios_crtc_prepare(struct drm_crtc *crtc) + { ++ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ++ ++ /* pick pll */ ++ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); ++ + atombios_lock_crtc(crtc, ATOM_ENABLE); + atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + } +diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c +index cac9e06..ff28ad8 100644 +--- a/drivers/gpu/drm/radeon/radeon_encoders.c ++++ b/drivers/gpu/drm/radeon/radeon_encoders.c +@@ -1207,6 +1207,9 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) + } + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); ++ ++ /* update scratch regs with new routing */ ++ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); + } + + static void +@@ -1317,19 +1320,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); +- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + +- if (radeon_encoder->active_device & +- (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { +- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; +- if (dig) +- dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); +- } + radeon_encoder->pixel_clock = adjusted_mode->clock; + +- radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); +- atombios_set_encoder_crtc_source(encoder); +- + if (ASIC_IS_AVIVO(rdev)) { + if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) + atombios_yuv_setup(encoder, true); +@@ -1483,8 +1476,20 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec + + static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) + { ++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ++ ++ if (radeon_encoder->active_device & ++ (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { ++ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; ++ if (dig) ++ dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); ++ } ++ + radeon_atom_output_lock(encoder, true); + radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); ++ ++ /* this is needed for the pll/ss setup to work correctly in some cases */ ++ atombios_set_encoder_crtc_source(encoder); + } + + static void radeon_atom_encoder_commit(struct drm_encoder *encoder) +-- +1.6.5.2 + diff --git a/drm-revert-drm-fbdev-rework-output-polling-to-be-back-in-core.patch b/drm-revert-drm-fbdev-rework-output-polling-to-be-back-in-core.patch deleted file mode 100644 index 481a08f..0000000 --- a/drm-revert-drm-fbdev-rework-output-polling-to-be-back-in-core.patch +++ /dev/null @@ -1,958 +0,0 @@ -From 5b904034b0ab5195d971b139d0c0b67ab21b063c Mon Sep 17 00:00:00 2001 -From: Kyle McMartin -Date: Mon, 21 Jun 2010 20:33:16 +0100 -Subject: Revert "drm/fbdev: rework output polling to be back in the core. (v4)" - -This reverts commit eb1f8e4f3be898df808e2dfc131099f5831d491d. - -Conflicts: - - drivers/gpu/drm/drm_crtc_helper.c - drivers/gpu/drm/i915/i915_dma.c - drivers/gpu/drm/i915/intel_fb.c - drivers/gpu/drm/nouveau/nouveau_fbcon.c - drivers/gpu/drm/radeon/radeon_fb.c - include/drm/drm_crtc_helper.h ---- - drivers/gpu/drm/Kconfig | 2 +- - drivers/gpu/drm/drm_crtc_helper.c | 111 ------------------------ - drivers/gpu/drm/drm_fb_helper.c | 123 +++++++++++++++++++++++---- - drivers/gpu/drm/i915/i915_dma.c | 1 - - drivers/gpu/drm/i915/i915_irq.c | 3 +- - drivers/gpu/drm/i915/intel_crt.c | 5 - - drivers/gpu/drm/i915/intel_display.c | 2 - - drivers/gpu/drm/i915/intel_dp.c | 2 - - drivers/gpu/drm/i915/intel_drv.h | 2 +- - drivers/gpu/drm/i915/intel_fb.c | 14 ++-- - drivers/gpu/drm/i915/intel_hdmi.c | 1 - - drivers/gpu/drm/i915/intel_sdvo.c | 2 - - drivers/gpu/drm/nouveau/nouveau_connector.c | 12 --- - drivers/gpu/drm/nouveau/nouveau_display.c | 1 - - drivers/gpu/drm/nouveau/nouveau_fbcon.c | 13 ++- - drivers/gpu/drm/nouveau/nouveau_fbcon.h | 2 +- - drivers/gpu/drm/nouveau/nouveau_state.c | 5 +- - drivers/gpu/drm/nouveau/nv50_display.c | 2 +- - drivers/gpu/drm/radeon/radeon_connectors.c | 13 --- - drivers/gpu/drm/radeon/radeon_display.c | 10 -- - drivers/gpu/drm/radeon/radeon_fb.c | 15 +++- - drivers/gpu/drm/radeon/radeon_irq_kms.c | 5 +- - drivers/gpu/drm/radeon/radeon_mode.h | 3 +- - include/drm/drm_crtc.h | 17 ---- - include/drm/drm_crtc_helper.h | 6 -- - include/drm/drm_fb_helper.h | 13 +++- - 26 files changed, 155 insertions(+), 230 deletions(-) - -diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig -index c2711c6..a51a1e4 100644 ---- a/drivers/gpu/drm/Kconfig -+++ b/drivers/gpu/drm/Kconfig -@@ -9,7 +9,6 @@ menuconfig DRM - depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU - select I2C - select I2C_ALGOBIT -- select SLOW_WORK - help - Kernel-level support for the Direct Rendering Infrastructure (DRI) - introduced in XFree86 4.0. If you say Y here, you need to select -@@ -24,6 +23,7 @@ config DRM_KMS_HELPER - depends on DRM - select FB - select FRAMEBUFFER_CONSOLE if !EMBEDDED -+ select SLOW_WORK - help - FB and CRTC helpers for KMS drivers. - -diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c -index 9b2a541..b142ac2 100644 ---- a/drivers/gpu/drm/drm_crtc_helper.c -+++ b/drivers/gpu/drm/drm_crtc_helper.c -@@ -807,114 +807,3 @@ int drm_helper_resume_force_mode(struct drm_device *dev) - return 0; - } - EXPORT_SYMBOL(drm_helper_resume_force_mode); -- --static struct slow_work_ops output_poll_ops; -- --#define DRM_OUTPUT_POLL_PERIOD (10*HZ) --static void output_poll_execute(struct slow_work *work) --{ -- struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work); -- struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work); -- struct drm_connector *connector; -- enum drm_connector_status old_status, status; -- bool repoll = false, changed = false; -- int ret; -- -- mutex_lock(&dev->mode_config.mutex); -- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -- -- /* if this is HPD or polled don't check it - -- TV out for instance */ -- if (!connector->polled) -- continue; -- -- else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT)) -- repoll = true; -- -- old_status = connector->status; -- /* if we are connected and don't want to poll for disconnect -- skip it */ -- if (old_status == connector_status_connected && -- !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) && -- !(connector->polled & DRM_CONNECTOR_POLL_HPD)) -- continue; -- -- status = connector->funcs->detect(connector); -- if (old_status != status) -- changed = true; -- } -- -- mutex_unlock(&dev->mode_config.mutex); -- -- if (changed) { -- /* send a uevent + call fbdev */ -- drm_sysfs_hotplug_event(dev); -- if (dev->mode_config.funcs->output_poll_changed) -- dev->mode_config.funcs->output_poll_changed(dev); -- } -- -- if (repoll) { -- ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD); -- if (ret) -- DRM_ERROR("delayed enqueue failed %d\n", ret); -- } --} -- --void drm_kms_helper_poll_disable(struct drm_device *dev) --{ -- if (!dev->mode_config.poll_enabled) -- return; -- delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); --} --EXPORT_SYMBOL(drm_kms_helper_poll_disable); -- --void drm_kms_helper_poll_enable(struct drm_device *dev) --{ -- bool poll = false; -- struct drm_connector *connector; -- int ret; -- -- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { -- if (connector->polled) -- poll = true; -- } -- -- if (poll) { -- ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); -- if (ret) -- DRM_ERROR("delayed enqueue failed %d\n", ret); -- } --} --EXPORT_SYMBOL(drm_kms_helper_poll_enable); -- --void drm_kms_helper_poll_init(struct drm_device *dev) --{ -- slow_work_register_user(THIS_MODULE); -- delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, -- &output_poll_ops); -- dev->mode_config.poll_enabled = true; -- -- drm_kms_helper_poll_enable(dev); --} --EXPORT_SYMBOL(drm_kms_helper_poll_init); -- --void drm_kms_helper_poll_fini(struct drm_device *dev) --{ -- drm_kms_helper_poll_disable(dev); -- slow_work_unregister_user(THIS_MODULE); --} --EXPORT_SYMBOL(drm_kms_helper_poll_fini); -- --void drm_helper_hpd_irq_event(struct drm_device *dev) --{ -- if (!dev->mode_config.poll_enabled) -- return; -- delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); -- /* schedule a slow work asap */ -- delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0); --} --EXPORT_SYMBOL(drm_helper_hpd_irq_event); -- --static struct slow_work_ops output_poll_ops = { -- .execute = output_poll_execute, --}; -diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c -index 08c4c92..dcc6601 100644 ---- a/drivers/gpu/drm/drm_fb_helper.c -+++ b/drivers/gpu/drm/drm_fb_helper.c -@@ -42,6 +42,8 @@ MODULE_LICENSE("GPL and additional rights"); - - static LIST_HEAD(kernel_fb_helper_list); - -+static struct slow_work_ops output_status_change_ops; -+ - /* simple single crtc case helper function */ - int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) - { -@@ -423,13 +425,19 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) - - int drm_fb_helper_init(struct drm_device *dev, - struct drm_fb_helper *fb_helper, -- int crtc_count, int max_conn_count) -+ int crtc_count, int max_conn_count, -+ bool polled) - { - struct drm_crtc *crtc; - int ret = 0; - int i; - - fb_helper->dev = dev; -+ fb_helper->poll_enabled = polled; -+ -+ slow_work_register_user(THIS_MODULE); -+ delayed_slow_work_init(&fb_helper->output_status_change_slow_work, -+ &output_status_change_ops); - - INIT_LIST_HEAD(&fb_helper->kernel_fb_list); - -@@ -486,6 +494,8 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) - - drm_fb_helper_crtc_free(fb_helper); - -+ delayed_slow_work_cancel(&fb_helper->output_status_change_slow_work); -+ slow_work_unregister_user(THIS_MODULE); - } - EXPORT_SYMBOL(drm_fb_helper_fini); - -@@ -703,7 +713,7 @@ int drm_fb_helper_set_par(struct fb_info *info) - - if (fb_helper->delayed_hotplug) { - fb_helper->delayed_hotplug = false; -- drm_fb_helper_hotplug_event(fb_helper); -+ delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 0); - } - return 0; - } -@@ -816,7 +826,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, - if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { - /* hmm everyone went away - assume VGA cable just fell out - and will come back later. */ -- DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n"); -+ DRM_ERROR("Cannot find any crtc or sizes - going 1024x768\n"); - sizes.fb_width = sizes.surface_width = 1024; - sizes.fb_height = sizes.surface_height = 768; - } -@@ -1362,7 +1372,12 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) - * we shouldn't end up with no modes here. - */ - if (count == 0) { -- printk(KERN_INFO "No connectors reported connected with modes\n"); -+ if (fb_helper->poll_enabled) { -+ delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, -+ 5*HZ); -+ printk(KERN_INFO "No connectors reported connected with modes - started polling\n"); -+ } else -+ printk(KERN_INFO "No connectors reported connected with modes\n"); - } - drm_setup_crtcs(fb_helper); - -@@ -1370,16 +1385,71 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) - } - EXPORT_SYMBOL(drm_fb_helper_initial_config); - --bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) -+/* we got a hotplug irq - need to update fbcon */ -+void drm_helper_fb_hpd_irq_event(struct drm_fb_helper *fb_helper) -+{ -+ /* if we don't have the fbdev registered yet do nothing */ -+ if (!fb_helper->fbdev) -+ return; -+ -+ /* schedule a slow work asap */ -+ delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 0); -+} -+EXPORT_SYMBOL(drm_helper_fb_hpd_irq_event); -+ -+bool drm_helper_fb_hotplug_event(struct drm_fb_helper *fb_helper, bool polled) - { - int count = 0; -+ int ret; - u32 max_width, max_height, bpp_sel; -- bool bound = false, crtcs_bound = false; -- struct drm_crtc *crtc; - - if (!fb_helper->fb) - return false; -+ DRM_DEBUG_KMS("\n"); -+ -+ max_width = fb_helper->fb->width; -+ max_height = fb_helper->fb->height; -+ bpp_sel = fb_helper->fb->bits_per_pixel; -+ -+ count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, -+ max_height); -+ if (fb_helper->poll_enabled && !polled) { -+ if (count) { -+ delayed_slow_work_cancel(&fb_helper->output_status_change_slow_work); -+ } else { -+ ret = delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 5*HZ); -+ } -+ } -+ drm_setup_crtcs(fb_helper); -+ -+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); -+} -+EXPORT_SYMBOL(drm_helper_fb_hotplug_event); -+ -+/* -+ * delayed work queue execution function -+ * - check if fbdev is actually in use on the gpu -+ * - if not set delayed flag and repoll if necessary -+ * - check for connector status change -+ * - repoll if 0 modes found -+ *- call driver output status changed notifier -+ */ -+static void output_status_change_execute(struct slow_work *work) -+{ -+ struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work); -+ struct drm_fb_helper *fb_helper = container_of(delayed_work, struct drm_fb_helper, output_status_change_slow_work); -+ struct drm_connector *connector; -+ enum drm_connector_status old_status, status; -+ bool repoll, changed = false; -+ int ret; -+ int i; -+ bool bound = false, crtcs_bound = false; -+ struct drm_crtc *crtc; - -+ repoll = fb_helper->poll_enabled; -+ -+ /* first of all check the fbcon framebuffer is actually bound to any crtc */ -+ /* take into account that no crtc at all maybe bound */ - list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { - if (crtc->fb) - crtcs_bound = true; -@@ -1387,21 +1457,38 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) - bound = true; - } - -- if (!bound && crtcs_bound) { -+ if (bound == false && crtcs_bound) { - fb_helper->delayed_hotplug = true; -- return false; -+ goto requeue; - } -- DRM_DEBUG_KMS("\n"); - -- max_width = fb_helper->fb->width; -- max_height = fb_helper->fb->height; -- bpp_sel = fb_helper->fb->bits_per_pixel; -+ for (i = 0; i < fb_helper->connector_count; i++) { -+ connector = fb_helper->connector_info[i]->connector; -+ old_status = connector->status; -+ status = connector->funcs->detect(connector); -+ if (old_status != status) { -+ changed = true; -+ } -+ if (status == connector_status_connected && repoll) { -+ DRM_DEBUG("%s is connected - stop polling\n", drm_get_connector_name(connector)); -+ repoll = false; -+ } -+ } - -- count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, -- max_height); -- drm_setup_crtcs(fb_helper); -+ if (changed) { -+ if (fb_helper->funcs->fb_output_status_changed) -+ fb_helper->funcs->fb_output_status_changed(fb_helper); -+ } - -- return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); -+requeue: -+ if (repoll) { -+ ret = delayed_slow_work_enqueue(delayed_work, 5*HZ); -+ if (ret) -+ DRM_ERROR("delayed enqueue failed %d\n", ret); -+ } - } --EXPORT_SYMBOL(drm_fb_helper_hotplug_event); -+ -+static struct slow_work_ops output_status_change_ops = { -+ .execute = output_status_change_execute, -+}; - -diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c -index 59a2bf8..76ace2d 100644 ---- a/drivers/gpu/drm/i915/i915_dma.c -+++ b/drivers/gpu/drm/i915/i915_dma.c -@@ -1430,7 +1430,6 @@ static int i915_load_modeset_init(struct drm_device *dev, - if (ret) - goto cleanup_irq; - -- drm_kms_helper_poll_init(dev); - return 0; - - cleanup_irq: -diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c -index 2479be0..6350bd3 100644 ---- a/drivers/gpu/drm/i915/i915_irq.c -+++ b/drivers/gpu/drm/i915/i915_irq.c -@@ -271,7 +271,8 @@ static void i915_hotplug_work_func(struct work_struct *work) - } - } - /* Just fire off a uevent and let userspace tell us what to do */ -- drm_helper_hpd_irq_event(dev); -+ intelfb_hotplug(dev, false); -+ drm_sysfs_hotplug_event(dev); - } - - static void i915_handle_rps_change(struct drm_device *dev) -diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c -index 22ff384..125eded 100644 ---- a/drivers/gpu/drm/i915/intel_crt.c -+++ b/drivers/gpu/drm/i915/intel_crt.c -@@ -584,10 +584,5 @@ void intel_crt_init(struct drm_device *dev) - - drm_sysfs_connector_add(connector); - -- if (I915_HAS_HOTPLUG(dev)) -- connector->polled = DRM_CONNECTOR_POLL_HPD; -- else -- connector->polled = DRM_CONNECTOR_POLL_CONNECT; -- - dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; - } -diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c -index d753257..70537cf 100644 ---- a/drivers/gpu/drm/i915/intel_display.c -+++ b/drivers/gpu/drm/i915/intel_display.c -@@ -5036,7 +5036,6 @@ intel_user_framebuffer_create(struct drm_device *dev, - - static const struct drm_mode_config_funcs intel_mode_funcs = { - .fb_create = intel_user_framebuffer_create, -- .output_poll_changed = intel_fb_output_poll_changed, - }; - - static struct drm_gem_object * -@@ -5538,7 +5537,6 @@ void intel_modeset_cleanup(struct drm_device *dev) - - mutex_lock(&dev->struct_mutex); - -- drm_kms_helper_poll_fini(dev); - intel_fbdev_fini(dev); - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c -index 49b54f0..1815df5 100644 ---- a/drivers/gpu/drm/i915/intel_dp.c -+++ b/drivers/gpu/drm/i915/intel_dp.c -@@ -1393,8 +1393,6 @@ intel_dp_init(struct drm_device *dev, int output_reg) - DRM_MODE_CONNECTOR_DisplayPort); - drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); - -- connector->polled = DRM_CONNECTOR_POLL_HPD; -- - if (output_reg == DP_A) - intel_encoder->type = INTEL_OUTPUT_EDP; - else -diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h -index df931f7..3230e8d 100644 ---- a/drivers/gpu/drm/i915/intel_drv.h -+++ b/drivers/gpu/drm/i915/intel_drv.h -@@ -235,5 +235,5 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data, - extern int intel_overlay_attrs(struct drm_device *dev, void *data, - struct drm_file *file_priv); - --extern void intel_fb_output_poll_changed(struct drm_device *dev); -+void intelfb_hotplug(struct drm_device *dev, bool polled); - #endif /* __INTEL_DRV_H__ */ -diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c -index c3c5052..79098b3 100644 ---- a/drivers/gpu/drm/i915/intel_fb.c -+++ b/drivers/gpu/drm/i915/intel_fb.c -@@ -211,6 +211,12 @@ static int intel_fb_find_or_create_single(struct drm_fb_helper *helper, - return new_fb; - } - -+void intelfb_hotplug(struct drm_device *dev, bool polled) -+{ -+ drm_i915_private_t *dev_priv = dev->dev_private; -+ drm_helper_fb_hpd_irq_event(&dev_priv->fbdev->helper); -+} -+ - static struct drm_fb_helper_funcs intel_fb_helper_funcs = { - .gamma_set = intel_crtc_fb_gamma_set, - .gamma_get = intel_crtc_fb_gamma_get, -@@ -256,7 +262,7 @@ int intel_fbdev_init(struct drm_device *dev) - - ret = drm_fb_helper_init(dev, &ifbdev->helper, - dev_priv->num_pipe, -- INTELFB_CONN_LIMIT); -+ INTELFB_CONN_LIMIT, false); - if (ret) { - kfree(ifbdev); - return ret; -@@ -278,9 +284,3 @@ void intel_fbdev_fini(struct drm_device *dev) - dev_priv->fbdev = NULL; - } - MODULE_LICENSE("GPL and additional rights"); -- --void intel_fb_output_poll_changed(struct drm_device *dev) --{ -- drm_i915_private_t *dev_priv = dev->dev_private; -- drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); --} -diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c -index 83bd764..acaca07 100644 ---- a/drivers/gpu/drm/i915/intel_hdmi.c -+++ b/drivers/gpu/drm/i915/intel_hdmi.c -@@ -240,7 +240,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) - - intel_encoder->type = INTEL_OUTPUT_HDMI; - -- connector->polled = DRM_CONNECTOR_POLL_HPD; - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - intel_encoder->crtc_mask = (1 << 0) | (1 << 1); -diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c -index 76993ac..1c716b5 100644 ---- a/drivers/gpu/drm/i915/intel_sdvo.c -+++ b/drivers/gpu/drm/i915/intel_sdvo.c -@@ -2218,7 +2218,6 @@ intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device) - } - - connector = &intel_connector->base; -- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - encoder->encoder_type = DRM_MODE_ENCODER_TMDS; - connector->connector_type = DRM_MODE_CONNECTOR_DVID; - -@@ -2285,7 +2284,6 @@ intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device) - return false; - - connector = &intel_connector->base; -- connector->polled = DRM_CONNECTOR_POLL_CONNECT; - encoder->encoder_type = DRM_MODE_ENCODER_DAC; - connector->connector_type = DRM_MODE_CONNECTOR_VGA; - sdvo_connector = intel_connector->dev_priv; -diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c -index 149ed22..9a61f3c 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_connector.c -+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c -@@ -846,7 +846,6 @@ nouveau_connector_create(struct drm_device *dev, - - switch (dcb->type) { - case DCB_CONNECTOR_VGA: -- connector->polled = DRM_CONNECTOR_POLL_CONNECT; - if (dev_priv->card_type >= NV_50) { - drm_connector_attach_property(connector, - dev->mode_config.scaling_mode_property, -@@ -858,17 +857,6 @@ nouveau_connector_create(struct drm_device *dev, - case DCB_CONNECTOR_TV_3: - nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; - break; -- case DCB_CONNECTOR_DP: -- case DCB_CONNECTOR_eDP: -- case DCB_CONNECTOR_HDMI_0: -- case DCB_CONNECTOR_HDMI_1: -- case DCB_CONNECTOR_DVI_I: -- case DCB_CONNECTOR_DVI_D: -- if (dev_priv->card_type >= NV_50) -- connector->polled = DRM_CONNECTOR_POLL_HPD; -- else -- connector->polled = DRM_CONNECTOR_POLL_CONNECT; -- /* fall-through */ - default: - nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; - -diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c -index 74e6b4e..9d7928f 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_display.c -+++ b/drivers/gpu/drm/nouveau/nouveau_display.c -@@ -101,6 +101,5 @@ nouveau_user_framebuffer_create(struct drm_device *dev, - - const struct drm_mode_config_funcs nouveau_mode_config_funcs = { - .fb_create = nouveau_user_framebuffer_create, -- .output_poll_changed = nouveau_fbcon_output_poll_changed, - }; - -diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c -index c9a4a0d..0a59f96 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c -+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c -@@ -326,11 +326,15 @@ nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper, - return new_fb; - } - --void --nouveau_fbcon_output_poll_changed(struct drm_device *dev) -+void nouveau_fbcon_hotplug(struct drm_device *dev) - { - struct drm_nouveau_private *dev_priv = dev->dev_private; -- drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper); -+ drm_helper_fb_hpd_irq_event(&dev_priv->nfbdev->helper); -+} -+ -+static void nouveau_fbcon_output_status_changed(struct drm_fb_helper *fb_helper) -+{ -+ drm_helper_fb_hotplug_event(fb_helper, true); - } - - int -@@ -370,6 +374,7 @@ static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { - .gamma_set = nouveau_fbcon_gamma_set, - .gamma_get = nouveau_fbcon_gamma_get, - .fb_probe = nouveau_fbcon_find_or_create_single, -+ .fb_output_status_changed = nouveau_fbcon_output_status_changed, - }; - - -@@ -387,7 +392,7 @@ int nouveau_fbcon_init(struct drm_device *dev) - dev_priv->nfbdev = nfbdev; - nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; - -- ret = drm_fb_helper_init(dev, &nfbdev->helper, 2, 4); -+ ret = drm_fb_helper_init(dev, &nfbdev->helper, 2, 4, true); - if (ret) { - kfree(nfbdev); - return ret; -diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h -index e7e1268..bf8e00d 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h -+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h -@@ -58,6 +58,6 @@ void nouveau_fbcon_zfill_all(struct drm_device *dev); - void nouveau_fbcon_save_disable_accel(struct drm_device *dev); - void nouveau_fbcon_restore_accel(struct drm_device *dev); - --void nouveau_fbcon_output_poll_changed(struct drm_device *dev); -+void nouveau_fbcon_hotplug(struct drm_device *dev); - #endif /* __NV50_FBCON_H__ */ - -diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c -index b02a231..4dcb976 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_state.c -+++ b/drivers/gpu/drm/nouveau/nouveau_state.c -@@ -519,10 +519,8 @@ nouveau_card_init(struct drm_device *dev) - - dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; - -- if (drm_core_check_feature(dev, DRIVER_MODESET)) { -+ if (drm_core_check_feature(dev, DRIVER_MODESET)) - nouveau_fbcon_init(dev); -- drm_kms_helper_poll_init(dev); -- } - - return 0; - -@@ -844,7 +842,6 @@ int nouveau_unload(struct drm_device *dev) - struct drm_nouveau_private *dev_priv = dev->dev_private; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { -- drm_kms_helper_poll_fini(dev); - nouveau_fbcon_fini(dev); - if (dev_priv->card_type >= NV_50) - nv50_display_destroy(dev); -diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c -index 580a5d1..e6a44af 100644 ---- a/drivers/gpu/drm/nouveau/nv50_display.c -+++ b/drivers/gpu/drm/nouveau/nv50_display.c -@@ -980,7 +980,7 @@ nv50_display_irq_hotplug_bh(struct work_struct *work) - if (dev_priv->chipset >= 0x90) - nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); - -- drm_helper_hpd_irq_event(dev); -+ nouveau_fbcon_hotplug(dev); - } - - void -diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c -index 0c7ccc6..40a24c9 100644 ---- a/drivers/gpu/drm/radeon/radeon_connectors.c -+++ b/drivers/gpu/drm/radeon/radeon_connectors.c -@@ -1085,7 +1085,6 @@ radeon_add_atom_connector(struct drm_device *dev, - drm_connector_attach_property(&radeon_connector->base, - rdev->mode_info.load_detect_property, - 1); -- connector->polled = DRM_CONNECTOR_POLL_CONNECT; - break; - case DRM_MODE_CONNECTOR_DVIA: - drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); -@@ -1212,12 +1211,6 @@ radeon_add_atom_connector(struct drm_device *dev, - break; - } - -- if (hpd->hpd == RADEON_HPD_NONE) { -- if (i2c_bus->valid) -- connector->polled = DRM_CONNECTOR_POLL_CONNECT; -- } else -- connector->polled = DRM_CONNECTOR_POLL_HPD; -- - connector->display_info.subpixel_order = subpixel_order; - drm_sysfs_connector_add(connector); - return; -@@ -1279,7 +1272,6 @@ radeon_add_legacy_connector(struct drm_device *dev, - drm_connector_attach_property(&radeon_connector->base, - rdev->mode_info.load_detect_property, - 1); -- connector->polled = DRM_CONNECTOR_POLL_CONNECT; - break; - case DRM_MODE_CONNECTOR_DVIA: - drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); -@@ -1348,11 +1340,6 @@ radeon_add_legacy_connector(struct drm_device *dev, - break; - } - -- if (hpd->hpd == RADEON_HPD_NONE) { -- if (i2c_bus->valid) -- connector->polled = DRM_CONNECTOR_POLL_CONNECT; -- } else -- connector->polled = DRM_CONNECTOR_POLL_HPD; - connector->display_info.subpixel_order = subpixel_order; - drm_sysfs_connector_add(connector); - return; -diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c -index c73444a..ed756be 100644 ---- a/drivers/gpu/drm/radeon/radeon_display.c -+++ b/drivers/gpu/drm/radeon/radeon_display.c -@@ -887,15 +887,8 @@ radeon_user_framebuffer_create(struct drm_device *dev, - return &radeon_fb->base; - } - --static void radeon_output_poll_changed(struct drm_device *dev) --{ -- struct radeon_device *rdev = dev->dev_private; -- radeon_fb_output_poll_changed(rdev); --} -- - static const struct drm_mode_config_funcs radeon_mode_funcs = { - .fb_create = radeon_user_framebuffer_create, -- .output_poll_changed = radeon_output_poll_changed - }; - - struct drm_prop_enum_list { -@@ -1044,8 +1037,6 @@ int radeon_modeset_init(struct radeon_device *rdev) - radeon_pm_init(rdev); - - radeon_fbdev_init(rdev); -- drm_kms_helper_poll_init(rdev->ddev); -- - return 0; - } - -@@ -1058,7 +1049,6 @@ void radeon_modeset_fini(struct radeon_device *rdev) - radeon_pm_fini(rdev); - - if (rdev->mode_info.mode_config_initialized) { -- drm_kms_helper_poll_fini(rdev->ddev); - radeon_hpd_fini(rdev); - drm_mode_config_cleanup(rdev->ddev); - rdev->mode_info.mode_config_initialized = false; -diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c -index dc1634b..7dc38f6 100644 ---- a/drivers/gpu/drm/radeon/radeon_fb.c -+++ b/drivers/gpu/drm/radeon/radeon_fb.c -@@ -316,9 +316,16 @@ int radeon_parse_options(char *options) - return 0; - } - --void radeon_fb_output_poll_changed(struct radeon_device *rdev) -+void radeonfb_hotplug(struct drm_device *dev, bool polled) - { -- drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); -+ struct radeon_device *rdev = dev->dev_private; -+ -+ drm_helper_fb_hpd_irq_event(&rdev->mode_info.rfbdev->helper); -+} -+ -+static void radeon_fb_output_status_changed(struct drm_fb_helper *fb_helper) -+{ -+ drm_helper_fb_hotplug_event(fb_helper, true); - } - - static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) -@@ -357,6 +364,7 @@ static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { - .gamma_set = radeon_crtc_fb_gamma_set, - .gamma_get = radeon_crtc_fb_gamma_get, - .fb_probe = radeon_fb_find_or_create_single, -+ .fb_output_status_changed = radeon_fb_output_status_changed, - }; - - int radeon_fbdev_init(struct radeon_device *rdev) -@@ -379,7 +387,7 @@ int radeon_fbdev_init(struct radeon_device *rdev) - - ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, - rdev->num_crtc, -- RADEONFB_CONN_LIMIT); -+ RADEONFB_CONN_LIMIT, true); - if (ret) { - kfree(rfbdev); - return ret; -@@ -388,6 +396,7 @@ int radeon_fbdev_init(struct radeon_device *rdev) - drm_fb_helper_single_add_all_connectors(&rfbdev->helper); - drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); - return 0; -+ - } - - void radeon_fbdev_fini(struct radeon_device *rdev) -diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c -index 059bfa4..b0178de 100644 ---- a/drivers/gpu/drm/radeon/radeon_irq_kms.c -+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c -@@ -26,7 +26,6 @@ - * Jerome Glisse - */ - #include "drmP.h" --#include "drm_crtc_helper.h" - #include "radeon_drm.h" - #include "radeon_reg.h" - #include "radeon.h" -@@ -56,7 +55,9 @@ static void radeon_hotplug_work_func(struct work_struct *work) - radeon_connector_hotplug(connector); - } - /* Just fire off a uevent and let userspace tell us what to do */ -- drm_helper_hpd_irq_event(dev); -+ radeonfb_hotplug(dev, false); -+ -+ drm_sysfs_hotplug_event(dev); - } - - void radeon_driver_irq_preinstall_kms(struct drm_device *dev) -diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h -index 67358ba..fdd1611 100644 ---- a/drivers/gpu/drm/radeon/radeon_mode.h -+++ b/drivers/gpu/drm/radeon/radeon_mode.h -@@ -588,6 +588,5 @@ void radeon_fbdev_fini(struct radeon_device *rdev); - void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); - int radeon_fbdev_total_size(struct radeon_device *rdev); - bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); -- --void radeon_fb_output_poll_changed(struct radeon_device *rdev); -+void radeonfb_hotplug(struct drm_device *dev, bool polled); - #endif -diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h -index 93a1a31..a7148d2 100644 ---- a/include/drm/drm_crtc.h -+++ b/include/drm/drm_crtc.h -@@ -31,7 +31,6 @@ - #include - - #include --#include - - struct drm_device; - struct drm_mode_set; -@@ -461,15 +460,6 @@ enum drm_connector_force { - DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ - }; - --/* should we poll this connector for connects and disconnects */ --/* hot plug detectable */ --#define DRM_CONNECTOR_POLL_HPD (1 << 0) --/* poll for connections */ --#define DRM_CONNECTOR_POLL_CONNECT (1 << 1) --/* can cleanly poll for disconnections without flickering the screen */ --/* DACs should rarely do this without a lot of testing */ --#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) -- - /** - * drm_connector - central DRM connector control structure - * @crtc: CRTC this connector is currently connected to, NULL if none -@@ -514,8 +504,6 @@ struct drm_connector { - u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY]; - uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY]; - -- uint8_t polled; /* DRM_CONNECTOR_POLL_* */ -- - /* requested DPMS state */ - int dpms; - -@@ -555,7 +543,6 @@ struct drm_mode_set { - */ - struct drm_mode_config_funcs { - struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd); -- void (*output_poll_changed)(struct drm_device *dev); - }; - - struct drm_mode_group { -@@ -593,10 +580,6 @@ struct drm_mode_config { - struct drm_mode_config_funcs *funcs; - resource_size_t fb_base; - -- /* output poll support */ -- bool poll_enabled; -- struct delayed_slow_work output_poll_slow_work; -- - /* pointers to standard properties */ - struct list_head property_blob_list; - struct drm_property *edid_property; -diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h -index 1121f77..b1fa0f8 100644 ---- a/include/drm/drm_crtc_helper.h -+++ b/include/drm/drm_crtc_helper.h -@@ -127,10 +127,4 @@ static inline void drm_connector_helper_add(struct drm_connector *connector, - } - - extern int drm_helper_resume_force_mode(struct drm_device *dev); --extern void drm_kms_helper_poll_init(struct drm_device *dev); --extern void drm_kms_helper_poll_fini(struct drm_device *dev); --extern void drm_helper_hpd_irq_event(struct drm_device *dev); -- --extern void drm_kms_helper_poll_disable(struct drm_device *dev); --extern void drm_kms_helper_poll_enable(struct drm_device *dev); - #endif -diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h -index f0a6afc..9b55a94 100644 ---- a/include/drm/drm_fb_helper.h -+++ b/include/drm/drm_fb_helper.h -@@ -30,6 +30,8 @@ - #ifndef DRM_FB_HELPER_H - #define DRM_FB_HELPER_H - -+#include -+ - struct drm_fb_helper; - - struct drm_fb_helper_crtc { -@@ -69,6 +71,9 @@ struct drm_fb_helper_funcs { - - int (*fb_probe)(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes); -+ -+ void (*fb_output_status_changed)(struct drm_fb_helper *helper); -+ - }; - - struct drm_fb_helper_connector { -@@ -90,6 +95,8 @@ struct drm_fb_helper { - u32 pseudo_palette[17]; - struct list_head kernel_fb_list; - -+ struct delayed_slow_work output_status_change_slow_work; -+ bool poll_enabled; - /* we got a hotplug but fbdev wasn't running the console - delay until next set_par */ - bool delayed_hotplug; -@@ -100,7 +107,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper, - - int drm_fb_helper_init(struct drm_device *dev, - struct drm_fb_helper *helper, int crtc_count, -- int max_conn); -+ int max_conn, bool polled); - void drm_fb_helper_fini(struct drm_fb_helper *helper); - int drm_fb_helper_blank(int blank, struct fb_info *info); - int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, -@@ -123,8 +130,10 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, - - int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); - --bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); -+bool drm_helper_fb_hotplug_event(struct drm_fb_helper *fb_helper, -+ bool polled); - bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); - int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); - -+void drm_helper_fb_hpd_irq_event(struct drm_fb_helper *fb_helper); - #endif --- -1.7.0.1 - diff --git a/ethtool-fix-buffer-overflow.patch b/ethtool-fix-buffer-overflow.patch new file mode 100644 index 0000000..01b1a41 --- /dev/null +++ b/ethtool-fix-buffer-overflow.patch @@ -0,0 +1,33 @@ +From: Ben Hutchings +Date: Mon, 28 Jun 2010 08:44:07 +0000 (+0000) +Subject: ethtool: Fix potential kernel buffer overflow in ETHTOOL_GRXCLSRLALL +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdavem%2Fnet-2.6.git;a=commitdiff_plain;h=db048b69037e7fa6a7d9e95a1271a50dc08ae233 + +ethtool: Fix potential kernel buffer overflow in ETHTOOL_GRXCLSRLALL + +On a 32-bit machine, info.rule_cnt >= 0x40000000 leads to integer +overflow and the buffer may be smaller than needed. Since +ETHTOOL_GRXCLSRLALL is unprivileged, this can presumably be used for at +least denial of service. + +Signed-off-by: Ben Hutchings +Cc: stable@kernel.org +Signed-off-by: David S. Miller +--- + +diff --git a/net/core/ethtool.c b/net/core/ethtool.c +index a0f4964..a3a7e9a 100644 +--- a/net/core/ethtool.c ++++ b/net/core/ethtool.c +@@ -347,8 +347,9 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, + + if (info.cmd == ETHTOOL_GRXCLSRLALL) { + if (info.rule_cnt > 0) { +- rule_buf = kmalloc(info.rule_cnt * sizeof(u32), +- GFP_USER); ++ if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) ++ rule_buf = kmalloc(info.rule_cnt * sizeof(u32), ++ GFP_USER); + if (!rule_buf) + return -ENOMEM; + } diff --git a/ext4-issue-discard-operation-before-releasing-blocks.patch b/ext4-issue-discard-operation-before-releasing-blocks.patch new file mode 100644 index 0000000..b2b66f0 --- /dev/null +++ b/ext4-issue-discard-operation-before-releasing-blocks.patch @@ -0,0 +1,62 @@ +From: Theodore Ts'o +Date: Tue, 20 Apr 2010 20:51:59 +0000 (-0400) +Subject: ext4: Issue the discard operation *before* releasing the blocks to be reused +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=b90f687018e6d6c77d981b09203780f7001407e5 + +ext4: Issue the discard operation *before* releasing the blocks to be reused + +[ backported to 2.6.33 ] + +Otherwise, we can end up having data corruption because the blocks +could get reused and then discarded! + +https://bugzilla.kernel.org/show_bug.cgi?id=15579 + +Signed-off-by: "Theodore Ts'o" +--- + +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 54df209..e5ab41b 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -2534,6 +2534,20 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) + mb_debug(1, "gonna free %u blocks in group %u (0x%p):", + entry->count, entry->group, entry); + ++ if (test_opt(sb, DISCARD)) { ++ ext4_fsblk_t discard_block; ++ struct ext4_super_block *es = EXT4_SB(sb)->s_es; ++ ++ discard_block = (ext4_fsblk_t)entry->group * ++ EXT4_BLOCKS_PER_GROUP(sb) ++ + entry->start_blk ++ + le32_to_cpu(es->s_first_data_block); ++ trace_ext4_discard_blocks(sb, ++ (unsigned long long)discard_block, ++ entry->count); ++ sb_issue_discard(sb, discard_block, entry->count); ++ } ++ + err = ext4_mb_load_buddy(sb, entry->group, &e4b); + /* we expect to find existing buddy because it's pinned */ + BUG_ON(err != 0); +@@ -2555,19 +2566,6 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) + page_cache_release(e4b.bd_bitmap_page); + } + ext4_unlock_group(sb, entry->group); +- if (test_opt(sb, DISCARD)) { +- ext4_fsblk_t discard_block; +- struct ext4_super_block *es = EXT4_SB(sb)->s_es; +- +- discard_block = (ext4_fsblk_t)entry->group * +- EXT4_BLOCKS_PER_GROUP(sb) +- + entry->start_blk +- + le32_to_cpu(es->s_first_data_block); +- trace_ext4_discard_blocks(sb, +- (unsigned long long)discard_block, +- entry->count); +- sb_issue_discard(sb, discard_block, entry->count); +- } + kmem_cache_free(ext4_free_ext_cachep, entry); + ext4_mb_release_desc(&e4b); + } diff --git a/ext4-move-aio-completion-after-unwritten-extent-conversion.patch b/ext4-move-aio-completion-after-unwritten-extent-conversion.patch deleted file mode 100644 index 7e2cb17..0000000 --- a/ext4-move-aio-completion-after-unwritten-extent-conversion.patch +++ /dev/null @@ -1,95 +0,0 @@ -From: jiayingz@google.com (Jiaying Zhang) <> -Date: Tue, 27 Jul 2010 15:56:06 +0000 (-0400) -Subject: ext4: move aio completion after unwritten extent conversion -X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftytso%2Fext4.git;a=commitdiff_plain;h=5b3ff237bef43b9e7fb7d1eb858e29b73fd664f9 - -ext4: move aio completion after unwritten extent conversion - -This patch is to be applied upon Christoph's "direct-io: move aio_complete -into ->end_io" patch. It adds iocb and result fields to struct ext4_io_end_t, -so that we can call aio_complete from ext4_end_io_nolock() after the extent -conversion has finished. - -I have verified with Christoph's aio-dio test that used to fail after a few -runs on an original kernel but now succeeds on the patched kernel. - -See http://thread.gmane.org/gmane.comp.file-systems.ext4/19659 for details. - -Signed-off-by: Jiaying Zhang -Signed-off-by: "Theodore Ts'o" ---- - -diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h -index 4c7d472..fbb3947 100644 ---- a/fs/ext4/ext4.h -+++ b/fs/ext4/ext4.h -@@ -170,13 +170,15 @@ struct mpage_da_data { - }; - #define EXT4_IO_UNWRITTEN 0x1 - typedef struct ext4_io_end { -- struct list_head list; /* per-file finished AIO list */ -+ struct list_head list; /* per-file finished IO list */ - struct inode *inode; /* file being written to */ - unsigned int flag; /* unwritten or not */ - struct page *page; /* page struct for buffer write */ - loff_t offset; /* offset in the file */ - ssize_t size; /* size of the extent */ - struct work_struct work; /* data work queue */ -+ struct kiocb *iocb; /* iocb struct for AIO */ -+ int result; /* error value for AIO */ - } ext4_io_end_t; - - /* -diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c -index 609159e..46d2079 100644 ---- a/fs/ext4/inode.c -+++ b/fs/ext4/inode.c -@@ -3668,6 +3668,8 @@ static int ext4_end_io_nolock(ext4_io_end_t *io) - return ret; - } - -+ if (io->iocb) -+ aio_complete(io->iocb, io->result, 0); - /* clear the DIO AIO unwritten flag */ - io->flag = 0; - return ret; -@@ -3767,6 +3769,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags) - io->offset = 0; - io->size = 0; - io->page = NULL; -+ io->iocb = NULL; -+ io->result = 0; - INIT_WORK(&io->work, ext4_end_io_work); - INIT_LIST_HEAD(&io->list); - } -@@ -3796,12 +3800,18 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, - if (io_end->flag != EXT4_IO_UNWRITTEN){ - ext4_free_io_end(io_end); - iocb->private = NULL; -- goto out; -+out: -+ if (is_async) -+ aio_complete(iocb, ret, 0); -+ return; - } - - io_end->offset = offset; - io_end->size = size; -- io_end->flag = EXT4_IO_UNWRITTEN; -+ if (is_async) { -+ io_end->iocb = iocb; -+ io_end->result = ret; -+ } - wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; - - /* queue the work to convert unwritten extents to written */ -@@ -3813,9 +3823,6 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, - list_add_tail(&io_end->list, &ei->i_completed_io_list); - spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); - iocb->private = NULL; --out: -- if (is_async) -- aio_complete(iocb, ret, 0); - } - - static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) diff --git a/fix-9p-fscache.patch b/fix-9p-fscache.patch new file mode 100644 index 0000000..3376f89 --- /dev/null +++ b/fix-9p-fscache.patch @@ -0,0 +1,21 @@ +Subject: [PATCH] 9p: fscache: fix build breakage introduced by 201a15428bd54f83eccec8b7c64a04b8f9431204 + +While building 2.6.32-rc8-git2 for Fedora I noticed the following thinko in +201a15428bd54f83eccec8b7c64a04b8f9431204. Patch below looks to be correct? + +Signed-off-by: Kyle McMartin + +--- +diff --git a/fs/9p/cache.c b/fs/9p/cache.c +index bcc5357..e777961 100644 +--- a/fs/9p/cache.c ++++ b/fs/9p/cache.c +@@ -343,7 +343,7 @@ int __v9fs_fscache_release_page(struct page *page, gfp_t gfp) + + BUG_ON(!vcookie->fscache); + +- return fscache_maybe_release_page(vnode->cache, page, gfp); ++ return fscache_maybe_release_page(vcookie->fscache, page, gfp); + } + + void __v9fs_fscache_invalidate_page(struct page *page) diff --git a/git-bluetooth.patch b/git-bluetooth.patch index e69de29..023f095 100644 --- a/git-bluetooth.patch +++ b/git-bluetooth.patch @@ -0,0 +1,319 @@ +commit 711909b33d6fdee149b5cb58bd888e7c10407acb +Author: Bastien Nocera +Date: Wed Apr 21 15:24:56 2010 +0100 + + Add support for the Wacom Intuos 4 wireless + + And to the HID blacklist. + + Same command set as the Graphire Bluetooth tablet. + + Signed-off-by: Bastien Nocera + +commit 1e03f3dc79ae5a9456545702f6dcac1023b06666 +Author: Antonio Ospite +Date: Thu Apr 29 23:59:34 2010 +0200 + + hid/hid-sony: fix sony_set_operational_bt + + Don't send the report type as part of the data, this prevents the + controller from going into the operational state at all. + + This is completely equivalent to what the code originally meant to accomplish: + as per in net/bluetooth/hidp/core.c::hidp_output_raw_report(), by using + HID_FEATURE_REPORT here, what will be actually sent is + (HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE) which is exactly 0x53. + + Signed-off-by: Antonio Ospite + +commit ea42416024fb33c970dbc10a6c69c0831126d75e +Author: Jiri Kosina +Date: Wed Feb 3 15:52:31 2010 +0100 + + HID: make Wacom modesetting failures non-fatal + + With Wacom tablet mode-setting moved from userspace into kernel, + we don't have to consider failures of device queries through the + _raw callback as hard failure, as the driver can safely continue + anyway. + + This is consistent with the current USB driver in wacom_sys.c + + Reported-by: Ping Cheng + Signed-off-by: Jiri Kosina + +commit a37234f5fcd6ad44ada8c477c2ab531f3ed9fbe5 +Author: Bastien Nocera +Date: Wed Jan 20 12:01:53 2010 +0000 + + HID: Enable Sixaxis controller over Bluetooth + + Now that hid_output_raw_report works, port the PS3 Sixaxis + Bluetooth quirk from user-space, into kernel-space. + + Signed-off-by: Bastien Nocera + Acked-by: Marcel Holtmann + Signed-off-by: Jiri Kosina + +commit 6bc702ac6551532774171d593a805c3565befb4e +Author: Bastien Nocera +Date: Wed Jan 20 12:00:53 2010 +0000 + + HID: Implement Wacom quirk in the kernel + + The hid-wacom driver required user-space to poke at the tablet + to make it send data about the cursor location. + + This patch makes it do the same thing but in the kernel. + + Signed-off-by: Bastien Nocera + Acked-by: Marcel Holtmann + Signed-off-by: Jiri Kosina + +commit 6fd920bdba1752fdd6411a55b3c17e0fda67b8d2 +Author: Jiri Kosina +Date: Fri Jan 29 15:03:36 2010 +0100 + + HID: make raw reports possible for both feature and output reports + + In commit 2da31939a42 ("Bluetooth: Implement raw output support for HIDP + layer"), support for Bluetooth hid_output_raw_report was added, but it + pushes the data to the intr socket instead of the ctrl one. This has been + fixed by 6bf8268f9a91f1 ("Bluetooth: Use the control channel for raw HID reports") + + Still, it is necessary to distinguish whether the report in question should be + either FEATURE or OUTPUT. For this, we have to extend the generic HID API, + so that hid_output_raw_report() callback provides means to specify this + value so that it can be passed down to lower level hardware drivers (currently + Bluetooth and USB). + + Based on original patch by Bastien Nocera + Acked-by: Marcel Holtmann + Signed-off-by: Jiri Kosina + +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 8455f3d..112568e 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1340,6 +1340,7 @@ static const struct hid_device_id hid_blacklist[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, ++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, +@@ -1352,6 +1353,7 @@ static const struct hid_device_id hid_blacklist[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) }, ++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, + +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 793691f..4ccd60b 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -428,6 +428,7 @@ + + #define USB_VENDOR_ID_WACOM 0x056a + #define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81 ++#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0xbd + + #define USB_VENDOR_ID_WISEGROUP 0x0925 + #define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005 +diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c +index 4e84502..e71da89 100644 +--- a/drivers/hid/hid-sony.c ++++ b/drivers/hid/hid-sony.c +@@ -48,7 +48,7 @@ static void sony_report_fixup(struct hid_device *hdev, __u8 *rdesc, + * to "operational". Without this, the ps3 controller will not report any + * events. + */ +-static int sony_set_operational(struct hid_device *hdev) ++static int sony_set_operational_usb(struct hid_device *hdev) + { + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct usb_device *dev = interface_to_usbdev(intf); +@@ -73,6 +73,12 @@ static int sony_set_operational(struct hid_device *hdev) + return ret; + } + ++static int sony_set_operational_bt(struct hid_device *hdev) ++{ ++ unsigned char buf[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 }; ++ return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT); ++} ++ + static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) + { + int ret; +@@ -101,7 +107,17 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) + goto err_free; + } + +- ret = sony_set_operational(hdev); ++ switch (hdev->bus) { ++ case BUS_USB: ++ ret = sony_set_operational_usb(hdev); ++ break; ++ case BUS_BLUETOOTH: ++ ret = sony_set_operational_bt(hdev); ++ break; ++ default: ++ ret = 0; ++ } ++ + if (ret < 0) + goto err_stop; + +@@ -121,6 +137,7 @@ static void sony_remove(struct hid_device *hdev) + + static const struct hid_device_id sony_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, ++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE), + .driver_data = VAIO_RDESC_CONSTANT }, + { } +diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c +index 12dcda5..91dbae3 100644 +--- a/drivers/hid/hid-wacom.c ++++ b/drivers/hid/hid-wacom.c +@@ -156,7 +156,9 @@ static int wacom_probe(struct hid_device *hdev, + struct hid_input *hidinput; + struct input_dev *input; + struct wacom_data *wdata; ++ char rep_data[2]; + int ret; ++ int limit; + + wdata = kzalloc(sizeof(*wdata), GFP_KERNEL); + if (wdata == NULL) { +@@ -166,6 +168,7 @@ static int wacom_probe(struct hid_device *hdev, + + hid_set_drvdata(hdev, wdata); + ++ /* Parse the HID report now */ + ret = hid_parse(hdev); + if (ret) { + dev_err(&hdev->dev, "parse failed\n"); +@@ -178,6 +181,31 @@ static int wacom_probe(struct hid_device *hdev, + goto err_free; + } + ++ /* ++ * Note that if the raw queries fail, it's not a hard failure and it ++ * is safe to continue ++ */ ++ ++ /* Set Wacom mode2 */ ++ rep_data[0] = 0x03; rep_data[1] = 0x00; ++ limit = 3; ++ do { ++ ret = hdev->hid_output_raw_report(hdev, rep_data, 2, ++ HID_FEATURE_REPORT); ++ } while (ret < 0 && limit-- > 0); ++ if (ret < 0) ++ dev_warn(&hdev->dev, "failed to poke device #1, %d\n", ret); ++ ++ /* 0x06 - high reporting speed, 0x05 - low speed */ ++ rep_data[0] = 0x06; rep_data[1] = 0x00; ++ limit = 3; ++ do { ++ ret = hdev->hid_output_raw_report(hdev, rep_data, 2, ++ HID_FEATURE_REPORT); ++ } while (ret < 0 && limit-- > 0); ++ if (ret < 0) ++ dev_warn(&hdev->dev, "failed to poke device #2, %d\n", ret); ++ + hidinput = list_entry(hdev->inputs.next, struct hid_input, list); + input = hidinput->input; + +@@ -228,7 +256,7 @@ static void wacom_remove(struct hid_device *hdev) + + static const struct hid_device_id wacom_devices[] = { + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) }, +- ++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) }, + { } + }; + MODULE_DEVICE_TABLE(hid, wacom_devices); +diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c +index cdd1369..d044767 100644 +--- a/drivers/hid/hidraw.c ++++ b/drivers/hid/hidraw.c +@@ -134,7 +134,7 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t + goto out; + } + +- ret = dev->hid_output_raw_report(dev, buf, count); ++ ret = dev->hid_output_raw_report(dev, buf, count, HID_OUTPUT_REPORT); + out: + kfree(buf); + return ret; +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c +index 2f84237..83c9f94 100644 +--- a/drivers/hid/usbhid/hid-core.c ++++ b/drivers/hid/usbhid/hid-core.c +@@ -798,7 +798,8 @@ static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid) + return 0; + } + +-static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count) ++static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count, ++ unsigned char report_type) + { + struct usbhid_device *usbhid = hid->driver_data; + struct usb_device *dev = hid_to_usb_dev(hid); +@@ -809,7 +810,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co + ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + HID_REQ_SET_REPORT, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, +- ((HID_OUTPUT_REPORT + 1) << 8) | *buf, ++ ((report_type + 1) << 8) | *buf, + interface->desc.bInterfaceNumber, buf + 1, count - 1, + USB_CTRL_SET_TIMEOUT); + +diff --git a/include/linux/hid.h b/include/linux/hid.h +index 8709365..3661a62 100644 +--- a/include/linux/hid.h ++++ b/include/linux/hid.h +@@ -501,7 +501,7 @@ struct hid_device { /* device report descriptor */ + void (*hiddev_report_event) (struct hid_device *, struct hid_report *); + + /* handler for raw output data, used by hidraw */ +- int (*hid_output_raw_report) (struct hid_device *, __u8 *, size_t); ++ int (*hid_output_raw_report) (struct hid_device *, __u8 *, size_t, unsigned char); + + /* debugging support via debugfs */ + unsigned short debug; +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c +index fc6ec1e..280529a 100644 +--- a/net/bluetooth/hidp/core.c ++++ b/net/bluetooth/hidp/core.c +@@ -313,10 +313,21 @@ static int hidp_send_report(struct hidp_session *session, struct hid_report *rep + return hidp_queue_report(session, buf, rsize); + } + +-static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count) ++static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count, ++ unsigned char report_type) + { +- if (hidp_send_ctrl_message(hid->driver_data, +- HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE, ++ switch (report_type) { ++ case HID_FEATURE_REPORT: ++ report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE; ++ break; ++ case HID_OUTPUT_REPORT: ++ report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (hidp_send_ctrl_message(hid->driver_data, report_type, + data, count)) + return -ENOMEM; + return count; diff --git a/hda_intel-prealloc-4mb-dmabuffer.patch b/hda_intel-prealloc-4mb-dmabuffer.patch index 36e6aca..c80f11d 100644 --- a/hda_intel-prealloc-4mb-dmabuffer.patch +++ b/hda_intel-prealloc-4mb-dmabuffer.patch @@ -1,25 +1,16 @@ -From c69fcbd1f60b0842f7c1ad2c95692ffd19c4932b Mon Sep 17 00:00:00 2001 -From: Kyle McMartin -Date: Mon, 29 Mar 2010 23:56:08 -0400 -Subject: hda_intel-prealloc-4mb-dmabuffer - ---- - sound/pci/hda/hda_intel.c | 14 +++++++++++++- - 1 files changed, 13 insertions(+), 1 deletions(-) - diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c -index 4bb9067..37db515 100644 +index c8d9178..7d3bb15 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c -@@ -1986,6 +1986,7 @@ azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec, +@@ -1774,6 +1774,7 @@ azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec, struct azx_pcm *apcm; int pcm_dev = cpcm->device; int s, err; + size_t prealloc_min = 64*1024; /* 64KB */ - if (pcm_dev >= HDA_MAX_PCMS) { + if (pcm_dev >= AZX_MAX_PCMS) { snd_printk(KERN_ERR SFX "Invalid PCM device number %d\n", -@@ -2019,10 +2020,21 @@ azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec, +@@ -1807,10 +1808,21 @@ azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec, if (cpcm->stream[s].substreams) snd_pcm_set_ops(pcm, s, &azx_pcm_ops); } @@ -42,6 +33,3 @@ index 4bb9067..37db515 100644 return 0; } --- -1.7.0.1 - diff --git a/hdpvr-ir-enable.patch b/hdpvr-ir-enable.patch index 15b039e..a5c7e92 100644 --- a/hdpvr-ir-enable.patch +++ b/hdpvr-ir-enable.patch @@ -1,26 +1,7 @@ - drivers/media/video/hdpvr/Makefile | 4 +- - drivers/media/video/hdpvr/hdpvr-core.c | 12 ++--- - drivers/media/video/hdpvr/hdpvr-i2c.c | 83 ++++++++++++++++++++++---------- - drivers/media/video/hdpvr/hdpvr.h | 2 +- - 4 files changed, 64 insertions(+), 37 deletions(-) - -diff --git a/drivers/media/video/hdpvr/Makefile b/drivers/media/video/hdpvr/Makefile -index e0230fc..a0b9a43 100644 ---- a/drivers/media/video/hdpvr/Makefile -+++ b/drivers/media/video/hdpvr/Makefile -@@ -1,6 +1,4 @@ --hdpvr-objs := hdpvr-control.o hdpvr-core.o hdpvr-video.o -- --hdpvr-$(CONFIG_I2C) += hdpvr-i2c.o -+hdpvr-objs := hdpvr-control.o hdpvr-i2c.o hdpvr-core.o hdpvr-video.o - - obj-$(CONFIG_VIDEO_HDPVR) += hdpvr.o - -diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c -index 2fc9865..c72793a 100644 ---- a/drivers/media/video/hdpvr/hdpvr-core.c -+++ b/drivers/media/video/hdpvr/hdpvr-core.c -@@ -364,9 +364,8 @@ static int hdpvr_probe(struct usb_interface *interface, +diff -Naurp a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c +--- a/drivers/media/video/hdpvr/hdpvr-core.c 2010-07-06 17:36:44.000000000 -0400 ++++ b/drivers/media/video/hdpvr/hdpvr-core.c 2010-07-06 17:38:13.000000000 -0400 +@@ -363,9 +363,8 @@ static int hdpvr_probe(struct usb_interf goto error; } @@ -32,12 +13,12 @@ index 2fc9865..c72793a 100644 if (retval < 0) { v4l2_err(&dev->v4l2_dev, "registering i2c adapter failed\n"); goto error; -@@ -412,12 +411,9 @@ static void hdpvr_disconnect(struct usb_interface *interface) +@@ -411,12 +410,9 @@ static void hdpvr_disconnect(struct usb_ mutex_unlock(&dev->io_mutex); /* deregister I2C adapter */ -#ifdef CONFIG_I2C -+#if defined(CONFIG_I2C) || (CONFIG_I2C_MODULE) ++#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) mutex_lock(&dev->i2c_mutex); - if (dev->i2c_adapter) - i2c_del_adapter(dev->i2c_adapter); @@ -47,10 +28,21 @@ index 2fc9865..c72793a 100644 mutex_unlock(&dev->i2c_mutex); #endif /* CONFIG_I2C */ -diff --git a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c -index 463b81b..a0557e4 100644 ---- a/drivers/media/video/hdpvr/hdpvr-i2c.c -+++ b/drivers/media/video/hdpvr/hdpvr-i2c.c +diff -Naurp a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h +--- a/drivers/media/video/hdpvr/hdpvr.h 2010-02-24 13:52:17.000000000 -0500 ++++ b/drivers/media/video/hdpvr/hdpvr.h 2010-07-06 17:42:20.000000000 -0400 +@@ -101,7 +101,7 @@ struct hdpvr_device { + struct work_struct worker; + + /* I2C adapter */ +- struct i2c_adapter *i2c_adapter; ++ struct i2c_adapter i2c_adapter; + /* I2C lock */ + struct mutex i2c_mutex; + +diff -Naurp a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c +--- a/drivers/media/video/hdpvr/hdpvr-i2c.c 2010-07-06 17:36:51.000000000 -0400 ++++ b/drivers/media/video/hdpvr/hdpvr-i2c.c 2010-07-06 17:45:50.000000000 -0400 @@ -10,6 +10,8 @@ * */ @@ -58,83 +50,87 @@ index 463b81b..a0557e4 100644 +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) + #include - #include -@@ -22,8 +24,11 @@ - #define REQTYPE_I2C_WRITE 0xb0 - #define REQTYPE_I2C_WRITE_STATT 0xd0 + #include "hdpvr.h" +@@ -19,10 +21,13 @@ --static int hdpvr_i2c_read(struct hdpvr_device *dev, unsigned char addr, -- char *data, int len) + #define REQTYPE_I2C_READ 0xb1 + #define REQTYPE_I2C_WRITE 0xb0 +-#define REQTYPE_I2C_WRITE_STATT 0xd0 ++#define REQTYPE_I2C_WRITE_STAT 0xd0 ++ +#define HDPVR_HW_Z8F0811_IR_TX_I2C_ADDR 0x70 +#define HDPVR_HW_Z8F0811_IR_RX_I2C_ADDR 0x71 -+ -+static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus, -+ unsigned char addr, char *data, int len) + + static int hdpvr_i2c_read(struct hdpvr_device *dev, unsigned char addr, +- char *data, int len) ++ char *data, int len, int bus) { int ret; char *buf = kmalloc(len, GFP_KERNEL); -@@ -33,7 +38,7 @@ static int hdpvr_i2c_read(struct hdpvr_device *dev, unsigned char addr, +@@ -32,7 +37,7 @@ static int hdpvr_i2c_read(struct hdpvr_d ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), REQTYPE_I2C_READ, CTRL_READ_REQUEST, - 0x100|addr, 0, buf, len, 1000); -+ (bus << 8) | addr, 0, buf, len, 1000); ++ bus<<8 | addr, 0, buf, len, 1000); if (ret == len) { memcpy(data, buf, len); -@@ -46,8 +51,8 @@ static int hdpvr_i2c_read(struct hdpvr_device *dev, unsigned char addr, - return ret; +@@ -46,7 +51,7 @@ static int hdpvr_i2c_read(struct hdpvr_d } --static int hdpvr_i2c_write(struct hdpvr_device *dev, unsigned char addr, + static int hdpvr_i2c_write(struct hdpvr_device *dev, unsigned char addr, - char *data, int len) -+static int hdpvr_i2c_write(struct hdpvr_device *dev, int bus, -+ unsigned char addr, char *data, int len) ++ char *data, int len, int bus) { int ret; char *buf = kmalloc(len, GFP_KERNEL); -@@ -58,7 +63,7 @@ static int hdpvr_i2c_write(struct hdpvr_device *dev, unsigned char addr, +@@ -57,17 +62,17 @@ static int hdpvr_i2c_write(struct hdpvr_ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST, - 0x100|addr, 0, buf, len, 1000); -+ (bus << 8) | addr, 0, buf, len, 1000); ++ bus<<8 | addr, 0, buf, len, 1000); if (ret < 0) goto error; -@@ -68,7 +73,7 @@ static int hdpvr_i2c_write(struct hdpvr_device *dev, unsigned char addr, - REQTYPE_I2C_WRITE_STATT, CTRL_READ_REQUEST, + + ret = usb_control_msg(dev->udev, + usb_rcvctrlpipe(dev->udev, 0), +- REQTYPE_I2C_WRITE_STATT, CTRL_READ_REQUEST, ++ REQTYPE_I2C_WRITE_STAT, CTRL_READ_REQUEST, 0, 0, buf, 2, 1000); - if (ret == 2) -+ if ((ret == 2) && (buf[1] == (len - 1))) ++ if (ret == 2 && buf[1] == (len - 1)) ret = 0; else if (ret >= 0) ret = -EIO; -@@ -93,10 +98,10 @@ static int hdpvr_transfer(struct i2c_adapter *i2c_adapter, struct i2c_msg *msgs, - addr = msgs[i].addr << 1; +@@ -93,10 +98,10 @@ static int hdpvr_transfer(struct i2c_ada if (msgs[i].flags & I2C_M_RD) -- retval = hdpvr_i2c_read(dev, addr, msgs[i].buf, -+ retval = hdpvr_i2c_read(dev, 1, addr, msgs[i].buf, - msgs[i].len); + retval = hdpvr_i2c_read(dev, addr, msgs[i].buf, +- msgs[i].len); ++ msgs[i].len, 1); else -- retval = hdpvr_i2c_write(dev, addr, msgs[i].buf, -+ retval = hdpvr_i2c_write(dev, 1, addr, msgs[i].buf, - msgs[i].len); + retval = hdpvr_i2c_write(dev, addr, msgs[i].buf, +- msgs[i].len); ++ msgs[i].len, 1); } -@@ -115,31 +120,59 @@ static struct i2c_algorithm hdpvr_algo = { + mutex_unlock(&dev->i2c_mutex); +@@ -114,31 +119,61 @@ static struct i2c_algorithm hdpvr_algo = .functionality = hdpvr_functionality, }; -+static struct i2c_adapter hdpvr_i2c_adapter_template = { -+ .name = "Hauppage HD PVR I2C", -+ .owner = THIS_MODULE, -+ .id = I2C_HW_B_HDPVR, -+ .algo = &hdpvr_algo, -+ .class = I2C_CLASS_TV_ANALOG, ++static struct i2c_adapter hdpvr_i2c_adap_template = { ++ .name = "Hauppauge HD PVR I2C", ++ .owner = THIS_MODULE, ++ .id = I2C_HW_B_HDPVR, ++ .algo = &hdpvr_algo, ++ .algo_data = NULL, ++ .class = I2C_CLASS_TV_ANALOG, +}; + +static struct i2c_board_info hdpvr_i2c_board_info = { @@ -148,20 +144,20 @@ index 463b81b..a0557e4 100644 + + mutex_lock(&dev->i2c_mutex); + -+ hdpvr_i2c_read(dev, 0, 0x54, buffer, 1); ++ hdpvr_i2c_read(dev, 0x54, buffer, 1, 0); + + buffer[0] = 0; + buffer[1] = 0x8; -+ hdpvr_i2c_write(dev, 1, 0x54, buffer, 2); ++ hdpvr_i2c_write(dev, 0x54, buffer, 2, 1); + + buffer[1] = 0x18; -+ hdpvr_i2c_write(dev, 1, 0x54, buffer, 2); ++ hdpvr_i2c_write(dev, 0x54, buffer, 2, 1); + + mutex_unlock(&dev->i2c_mutex); -+ + return 0; +} + ++ int hdpvr_register_i2c_adapter(struct hdpvr_device *dev) { - struct i2c_adapter *i2c_adap; @@ -169,8 +165,18 @@ index 463b81b..a0557e4 100644 - i2c_adap = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL); - if (i2c_adap == NULL) -- goto error; + hdpvr_activate_ir(dev); ++ ++ memcpy(&dev->i2c_adapter, &hdpvr_i2c_adap_template, ++ sizeof(struct i2c_adapter)); ++ dev->i2c_adapter.dev.parent = &dev->udev->dev; ++ ++ i2c_set_adapdata(&dev->i2c_adapter, dev); ++ ++ retval = i2c_add_adapter(&dev->i2c_adapter); ++ ++ if (retval) + goto error; - strlcpy(i2c_adap->name, "Hauppauge HD PVR I2C", - sizeof(i2c_adap->name)); @@ -178,18 +184,11 @@ index 463b81b..a0557e4 100644 - i2c_adap->class = I2C_CLASS_TV_ANALOG; - i2c_adap->owner = THIS_MODULE; - i2c_adap->dev.parent = &dev->udev->dev; -+ memcpy(&dev->i2c_adapter, &hdpvr_i2c_adapter_template, -+ sizeof(struct i2c_adapter)); -+ dev->i2c_adapter.dev.parent = &dev->udev->dev; - +- - i2c_set_adapdata(i2c_adap, dev); -+ i2c_set_adapdata(&dev->i2c_adapter, dev); - +- - retval = i2c_add_adapter(i2c_adap); -+ retval = i2c_add_adapter(&dev->i2c_adapter); -+ if (retval) -+ goto error; - +- - if (!retval) - dev->i2c_adapter = i2c_adap; - else @@ -201,16 +200,14 @@ index 463b81b..a0557e4 100644 } + +#endif /* CONFIG_I2C */ -diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h -index 49ae25d..8a5729a 100644 ---- a/drivers/media/video/hdpvr/hdpvr.h -+++ b/drivers/media/video/hdpvr/hdpvr.h -@@ -102,7 +102,7 @@ struct hdpvr_device { - struct work_struct worker; +diff -Naurp a/drivers/media/video/hdpvr/Makefile b/drivers/media/video/hdpvr/Makefile +--- a/drivers/media/video/hdpvr/Makefile 2010-07-06 17:36:38.000000000 -0400 ++++ b/drivers/media/video/hdpvr/Makefile 2010-07-06 17:35:17.000000000 -0400 +@@ -1,6 +1,4 @@ +-hdpvr-objs := hdpvr-control.o hdpvr-core.o hdpvr-video.o +- +-hdpvr-$(CONFIG_I2C) += hdpvr-i2c.o ++hdpvr-objs := hdpvr-control.o hdpvr-core.o hdpvr-i2c.o hdpvr-video.o - /* I2C adapter */ -- struct i2c_adapter *i2c_adapter; -+ struct i2c_adapter i2c_adapter; - /* I2C lock */ - struct mutex i2c_mutex; + obj-$(CONFIG_VIDEO_HDPVR) += hdpvr.o diff --git a/i915-fix-crt-hotplug-regression.patch b/i915-fix-crt-hotplug-regression.patch deleted file mode 100644 index 253bf67..0000000 --- a/i915-fix-crt-hotplug-regression.patch +++ /dev/null @@ -1,85 +0,0 @@ -From dec23057518b7035117a1a732aa48be6d34f1be8 Mon Sep 17 00:00:00 2001 -From: Andrew Lutomirski -Date: Sat, 12 Jun 2010 09:21:18 +0000 -Subject: i915: Fix CRT hotplug regression in 2.6.35-rc1 - -Commit 7a772c492fcfffae812ffca78a628e76fa57fe58 has two bugs which -made the hotplug problems on my laptop worse instead of better. - -First, it did not, in fact, disable the CRT plug interrupt -- it -disabled all the other hotplug interrupts. It seems rather doubtful -that that bit of the patch fixed anything, so let's just remove it. -(If you want to add it back, you probably meant ~CRT_HOTPLUG_INT_EN.) - -Second, on at least my GM45, setting CRT_HOTPLUG_ACTIVATION_PERIOD_64 -and CRT_HOTPLUG_VOLTAGE_COMPARE_50 (when they were previously unset) -causes a hotplug interrupt about three seconds later. The old code -never restored PORT_HOTPLUG_EN so this could only happen once, but -they new code restores those registers. So just set those bits when -we set up the interrupt in the first place. - -Signed-off-by: Andy Lutomirski ---- - drivers/gpu/drm/i915/i915_irq.c | 12 +++++++++++- - drivers/gpu/drm/i915/i915_reg.h | 1 - - drivers/gpu/drm/i915/intel_crt.c | 6 ------ - 3 files changed, 11 insertions(+), 8 deletions(-) - -diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c -index 2479be0..7acb1a6 100644 ---- a/drivers/gpu/drm/i915/i915_irq.c -+++ b/drivers/gpu/drm/i915/i915_irq.c -@@ -1400,8 +1400,18 @@ int i915_driver_irq_postinstall(struct drm_device *dev) - hotplug_en |= SDVOC_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) - hotplug_en |= SDVOB_HOTPLUG_INT_EN; -- if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) -+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { - hotplug_en |= CRT_HOTPLUG_INT_EN; -+ -+ /* Programming the CRT detection parameters tends -+ to generate a spurious hotplug event about three -+ seconds later. So just do it once. -+ */ -+ if (IS_G4X(dev)) -+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; -+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; -+ } -+ - /* Ignore TV since it's buggy */ - - I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); -diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h -index 64b0a3a..d390b17 100644 ---- a/drivers/gpu/drm/i915/i915_reg.h -+++ b/drivers/gpu/drm/i915/i915_reg.h -@@ -1130,7 +1130,6 @@ - #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) - #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) - #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) --#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ - - #define PORT_HOTPLUG_STAT 0x61114 - #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) -diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c -index 22ff384..ee0732b 100644 ---- a/drivers/gpu/drm/i915/intel_crt.c -+++ b/drivers/gpu/drm/i915/intel_crt.c -@@ -234,14 +234,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) - else - tries = 1; - hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); -- hotplug_en &= CRT_HOTPLUG_MASK; - hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; - -- if (IS_G4X(dev)) -- hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; -- -- hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; -- - for (i = 0; i < tries ; i++) { - unsigned long timeout; - /* turn on the FORCE_DETECT */ --- -1.7.0.1 - diff --git a/ibmvscsi-fix-DMA-API-misuse.patch b/ibmvscsi-fix-DMA-API-misuse.patch new file mode 100644 index 0000000..5cb392e --- /dev/null +++ b/ibmvscsi-fix-DMA-API-misuse.patch @@ -0,0 +1,72 @@ +From b395ecef0de15c10459856e56a590ac1fe16be76 Mon Sep 17 00:00:00 2001 +From: FUJITA Tomonori +Date: Fri, 2 Apr 2010 15:50:24 +0900 +Subject: [SCSI] ibmvscsi: fix DMA API misuse + +ibmvscsi uses dma_unmap_single() for buffers mapped via +dma_map_sg(). It works however it's the API violation. The DMA debug +facility complains about it: + +http://marc.info/?l=linux-scsi&m=127018555013151&w=2 + +Reported-by: Sachin Sant +Tested-by: Sachin Sant +Signed-off-by: FUJITA Tomonori +Signed-off-by: James Bottomley + +(cherry picked from a71fa1fc43a29133f13ae6ada1a389ca298c0934) +--- + drivers/scsi/ibmvscsi/ibmvscsi.c | 29 ++--------------------------- + 1 files changed, 2 insertions(+), 27 deletions(-) + +diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c +index e475b79..3b14bbe 100644 +--- a/drivers/scsi/ibmvscsi/ibmvscsi.c ++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c +@@ -321,16 +321,6 @@ static void set_srp_direction(struct scsi_cmnd *cmd, + srp_cmd->buf_fmt = fmt; + } + +-static void unmap_sg_list(int num_entries, +- struct device *dev, +- struct srp_direct_buf *md) +-{ +- int i; +- +- for (i = 0; i < num_entries; ++i) +- dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL); +-} +- + /** + * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format + * @cmd: srp_cmd whose additional_data member will be unmapped +@@ -348,24 +338,9 @@ static void unmap_cmd_data(struct srp_cmd *cmd, + + if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) + return; +- else if (out_fmt == SRP_DATA_DESC_DIRECT || +- in_fmt == SRP_DATA_DESC_DIRECT) { +- struct srp_direct_buf *data = +- (struct srp_direct_buf *) cmd->add_data; +- dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL); +- } else { +- struct srp_indirect_buf *indirect = +- (struct srp_indirect_buf *) cmd->add_data; +- int num_mapped = indirect->table_desc.len / +- sizeof(struct srp_direct_buf); + +- if (num_mapped <= MAX_INDIRECT_BUFS) { +- unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]); +- return; +- } +- +- unmap_sg_list(num_mapped, dev, evt_struct->ext_list); +- } ++ if (evt_struct->cmnd) ++ scsi_dma_unmap(evt_struct->cmnd); + } + + static int map_sg_list(struct scsi_cmnd *cmd, int nseg, +-- +1.7.0.1 + diff --git a/input-synaptics-relax-capability-id-checks-on-new-hardware.patch b/input-synaptics-relax-capability-id-checks-on-new-hardware.patch deleted file mode 100644 index 957478d..0000000 --- a/input-synaptics-relax-capability-id-checks-on-new-hardware.patch +++ /dev/null @@ -1,56 +0,0 @@ -From: Dmitry Torokhov -Date: Wed, 21 Jul 2010 07:01:19 +0000 (-0700) -Subject: Input: synaptics - relax capability ID checks on newer hardware -X-Git-Tag: v2.6.35-rc6~1^2 -X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=3619b8fead04ab9de643712e757ef6b5f79fd1ab - -Input: synaptics - relax capability ID checks on newer hardware - -Older firmwares fixed the middle byte of the Synaptics capabilities -query to 0x47, but starting with firmware 7.5 the middle byte -represents submodel ID, sometimes also called "dash number". - -Reported-and-tested-by: Miroslav Šulc -Signed-off-by: Dmitry Torokhov ---- - -diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c -index 9ba9c4a..705589d 100644 ---- a/drivers/input/mouse/synaptics.c -+++ b/drivers/input/mouse/synaptics.c -@@ -141,8 +141,13 @@ static int synaptics_capability(struct psmouse *psmouse) - priv->capabilities = (cap[0] << 16) | (cap[1] << 8) | cap[2]; - priv->ext_cap = priv->ext_cap_0c = 0; - -- if (!SYN_CAP_VALID(priv->capabilities)) -+ /* -+ * Older firmwares had submodel ID fixed to 0x47 -+ */ -+ if (SYN_ID_FULL(priv->identity) < 0x705 && -+ SYN_CAP_SUBMODEL_ID(priv->capabilities) != 0x47) { - return -1; -+ } - - /* - * Unless capExtended is set the rest of the flags should be ignored -diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h -index 7d4d5e1..b6aa7d2 100644 ---- a/drivers/input/mouse/synaptics.h -+++ b/drivers/input/mouse/synaptics.h -@@ -47,7 +47,7 @@ - #define SYN_CAP_FOUR_BUTTON(c) ((c) & (1 << 3)) - #define SYN_CAP_MULTIFINGER(c) ((c) & (1 << 1)) - #define SYN_CAP_PALMDETECT(c) ((c) & (1 << 0)) --#define SYN_CAP_VALID(c) ((((c) & 0x00ff00) >> 8) == 0x47) -+#define SYN_CAP_SUBMODEL_ID(c) (((c) & 0x00ff00) >> 8) - #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) - #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) - #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) -@@ -66,6 +66,7 @@ - #define SYN_ID_MODEL(i) (((i) >> 4) & 0x0f) - #define SYN_ID_MAJOR(i) ((i) & 0x0f) - #define SYN_ID_MINOR(i) (((i) >> 16) & 0xff) -+#define SYN_ID_FULL(i) ((SYN_ID_MAJOR(i) << 8) | SYN_ID_MINOR(i)) - #define SYN_ID_IS_SYNAPTICS(i) ((((i) >> 8) & 0xff) == 0x47) - - /* synaptics special commands */ diff --git a/iwlwifi-Recover-TX-flow-failure.patch b/iwlwifi-Recover-TX-flow-failure.patch deleted file mode 100644 index 4d0cd5b..0000000 --- a/iwlwifi-Recover-TX-flow-failure.patch +++ /dev/null @@ -1,162 +0,0 @@ -From 5b51e801eef53be8e521316eea9e78e5c4595fd4 Mon Sep 17 00:00:00 2001 -From: Wey-Yi Guy -Date: Thu, 4 Mar 2010 13:38:59 -0800 -Subject: [PATCH] iwlwifi: Recover TX flow failure - -Monitors the tx statistics to detect the drop in throughput. -When the throughput drops, the ratio of the actual_ack_count and the -expected_ack_count also drops. At the same time, the aggregated -ba_timeout (the number of ba timeout retries) also rises. If the -actual_ack_count/expected_ack_count ratio is 0 and the number of ba -timeout retries rises to BA_TIMEOUT_MAX, no tx packets can be delivered. -Reloading the uCode and bring the system back to normal operational -state. - -Signed-off-by: Trieu 'Andrew' Nguyen -Signed-off-by: Wey-Yi Guy -Signed-off-by: Reinette Chatre ---- - drivers/net/wireless/iwlwifi/iwl-agn.c | 14 ++++++++- - drivers/net/wireless/iwlwifi/iwl-dev.h | 21 +++++++++++++ - drivers/net/wireless/iwlwifi/iwl-rx.c | 50 +++++++++++++++++++++++++++++++- - 3 files changed, 83 insertions(+), 2 deletions(-) - -diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c -index 07a9a02..dc751cb 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-agn.c -+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c -@@ -2965,10 +2965,21 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, - return ret; - case IEEE80211_AMPDU_TX_START: - IWL_DEBUG_HT(priv, "start Tx\n"); -- return iwl_tx_agg_start(priv, sta->addr, tid, ssn); -+ ret = iwl_tx_agg_start(priv, sta->addr, tid, ssn); -+ if (ret == 0) { -+ priv->_agn.agg_tids_count++; -+ IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", -+ priv->_agn.agg_tids_count); -+ } -+ return ret; - case IEEE80211_AMPDU_TX_STOP: - IWL_DEBUG_HT(priv, "stop Tx\n"); - ret = iwl_tx_agg_stop(priv, sta->addr, tid); -+ if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) { -+ priv->_agn.agg_tids_count--; -+ IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", -+ priv->_agn.agg_tids_count); -+ } - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return 0; - else -@@ -3399,6 +3410,7 @@ static int iwl_init_drv(struct iwl_priv *priv) - priv->iw_mode = NL80211_IFTYPE_STATION; - priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; - priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; -+ priv->_agn.agg_tids_count = 0; - - /* initialize force reset */ - priv->force_reset[IWL_RF_RESET].reset_duration = -diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h -index 447e14b..e2a6b76 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-dev.h -+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h -@@ -1275,6 +1275,26 @@ struct iwl_priv { - void *shared_virt; - dma_addr_t shared_phys; - /*End*/ -+ union { -+#if defined(CONFIG_IWLAGN) || defined(CONFIG_IWLAGN_MODULE) -+ struct { -+ /* INT ICT Table */ -+ __le32 *ict_tbl; -+ void *ict_tbl_vir; -+ dma_addr_t ict_tbl_dma; -+ dma_addr_t aligned_ict_tbl_dma; -+ int ict_index; -+ u32 inta; -+ bool use_ict; -+ /* -+ * reporting the number of tids has AGG on. 0 means -+ * no AGGREGATION -+ */ -+ u8 agg_tids_count; -+ } _agn; -+#endif -+ }; -+ - struct iwl_hw_params hw_params; - - /* INT ICT Table */ -diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c -index fabc52f..f48d685 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-rx.c -+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c -@@ -617,9 +617,18 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv, - - #define REG_RECALIB_PERIOD (60) - -+/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ -+#define ACK_CNT_RATIO (50) -+#define BA_TIMEOUT_CNT (5) -+#define BA_TIMEOUT_MAX (16) -+ - #define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n" - /* -- * This function checks for plcp error. -+ * This function checks for plcp error, ACK count ratios, aggregated BA -+ * timeout retries. -+ * - When the ACK count ratio is 0 and aggregated BA timeout retries is -+ * exceeding the BA_TIMEOUT_MAX, it will recover the failure by resetting -+ * the firmware. - * - When the plcp error is exceeding the thresholds, it will reset the radio - * to improve the throughput. - */ -@@ -629,6 +638,45 @@ void iwl_recover_from_statistics(struct iwl_priv *priv, - int combined_plcp_delta; - unsigned int plcp_msec; - unsigned long plcp_received_jiffies; -+ int actual_ack_cnt_delta; -+ int expected_ack_cnt_delta; -+ int ba_timeout_delta; -+ -+ actual_ack_cnt_delta = -+ le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - -+ le32_to_cpu(priv->statistics.tx.actual_ack_cnt); -+ expected_ack_cnt_delta = -+ le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - -+ le32_to_cpu(priv->statistics.tx.expected_ack_cnt); -+ ba_timeout_delta = -+ le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - -+ le32_to_cpu(priv->statistics.tx.agg.ba_timeout); -+ if ((priv->_agn.agg_tids_count > 0) && -+ (expected_ack_cnt_delta > 0) && -+ (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) -+ < ACK_CNT_RATIO) && -+ (ba_timeout_delta > BA_TIMEOUT_CNT)) { -+ IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d," -+ " expected_ack_cnt = %d\n", -+ actual_ack_cnt_delta, expected_ack_cnt_delta); -+ -+#ifdef CONFIG_IWLWIFI_DEBUG -+ IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n", -+ priv->delta_statistics.tx.rx_detected_cnt); -+ IWL_DEBUG_RADIO(priv, -+ "ack_or_ba_timeout_collision delta = %d\n", -+ priv->delta_statistics.tx. -+ ack_or_ba_timeout_collision); -+#endif -+ IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", -+ ba_timeout_delta); -+ if ((actual_ack_cnt_delta == 0) && -+ (ba_timeout_delta >= BA_TIMEOUT_MAX)) { -+ IWL_DEBUG_RADIO(priv, -+ "call iwl_force_reset(IWL_FW_RESET)\n"); -+ iwl_force_reset(priv, IWL_FW_RESET); -+ } -+ } - - /* - * check for plcp_err and trigger radio reset if it exceeds --- -1.7.0.1 - diff --git a/iwlwifi-add-internal-short-scan-support-for-3945.patch b/iwlwifi-add-internal-short-scan-support-for-3945.patch deleted file mode 100644 index 6a0d54a..0000000 --- a/iwlwifi-add-internal-short-scan-support-for-3945.patch +++ /dev/null @@ -1,90 +0,0 @@ -From dcde3533b9f501ad079c297b3bf7659739c4c287 Mon Sep 17 00:00:00 2001 -From: Wey-Yi Guy -Date: Wed, 24 Feb 2010 08:28:30 -0800 -Subject: [PATCH] iwlwifi: add internal short scan support for 3945 - -Add internal short scan support for 3945 NIC, This allows 3945 NIC -to support radio reset request like the other series of NICs. - -Signed-off-by: Wey-Yi Guy -Signed-off-by: Reinette Chatre ---- - drivers/net/wireless/iwlwifi/iwl3945-base.c | 22 ++++++++++++++-------- - 1 files changed, 14 insertions(+), 8 deletions(-) - -diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c -index b74a56c..19c77a8 100644 ---- a/drivers/net/wireless/iwlwifi/iwl3945-base.c -+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c -@@ -2821,7 +2821,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data) - .len = sizeof(struct iwl3945_scan_cmd), - .flags = CMD_SIZE_HUGE, - }; -- int rc = 0; - struct iwl3945_scan_cmd *scan; - struct ieee80211_conf *conf = NULL; - u8 n_probes = 0; -@@ -2849,7 +2848,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data) - if (test_bit(STATUS_SCAN_HW, &priv->status)) { - IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests " - "Ignoring second request.\n"); -- rc = -EIO; - goto done; - } - -@@ -2884,7 +2882,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data) - priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) + - IWL_MAX_SCAN_SIZE, GFP_KERNEL); - if (!priv->scan) { -- rc = -ENOMEM; -+ IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n"); - goto done; - } - } -@@ -2927,7 +2925,9 @@ static void iwl3945_bg_request_scan(struct work_struct *data) - scan_suspend_time, interval); - } - -- if (priv->scan_request->n_ssids) { -+ if (priv->is_internal_short_scan) { -+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); -+ } else if (priv->scan_request->n_ssids) { - int i, p = 0; - IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); - for (i = 0; i < priv->scan_request->n_ssids; i++) { -@@ -2975,13 +2975,20 @@ static void iwl3945_bg_request_scan(struct work_struct *data) - goto done; - } - -- scan->tx_cmd.len = cpu_to_le16( -+ if (!priv->is_internal_short_scan) { -+ scan->tx_cmd.len = cpu_to_le16( - iwl_fill_probe_req(priv, - (struct ieee80211_mgmt *)scan->data, - priv->scan_request->ie, - priv->scan_request->ie_len, - IWL_MAX_SCAN_SIZE - sizeof(*scan))); -- -+ } else { -+ scan->tx_cmd.len = cpu_to_le16( -+ iwl_fill_probe_req(priv, -+ (struct ieee80211_mgmt *)scan->data, -+ NULL, 0, -+ IWL_MAX_SCAN_SIZE - sizeof(*scan))); -+ } - /* select Rx antennas */ - scan->flags |= iwl3945_get_antenna_flags(priv); - -@@ -3003,8 +3010,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data) - scan->len = cpu_to_le16(cmd.len); - - set_bit(STATUS_SCAN_HW, &priv->status); -- rc = iwl_send_cmd_sync(priv, &cmd); -- if (rc) -+ if (iwl_send_cmd_sync(priv, &cmd)) - goto done; - - queue_delayed_work(priv->workqueue, &priv->scan_check, --- -1.7.0.1 - diff --git a/iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch b/iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch new file mode 100644 index 0000000..465d2ac --- /dev/null +++ b/iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch @@ -0,0 +1,58 @@ +commit a69b03e941abae00380fc6bc1877fb797a1b31e6 +Author: John W. Linville +Date: Mon Jun 14 14:30:25 2010 -0400 + + iwlwifi: cancel scan watchdog in iwl_bg_abort_scan + + Avoids this: + + WARNING: at net/mac80211/scan.c:312 ieee80211_scan_completed+0x5f/0x1f1 + [mac80211]() + Hardware name: Latitude E5400 + Modules linked in: aes_x86_64 aes_generic fuse ipt_MASQUERADE iptable_nat + nf_nat rfcomm sco bridge stp llc bnep l2cap sunrpc cpufreq_ondemand + acpi_cpufreq freq_table xt_physdev ip6t_REJECT nf_conntrack_ipv6 + ip6table_filter ip6_tables ipv6 kvm_intel kvm uinput arc4 ecb + snd_hda_codec_intelhdmi snd_hda_codec_idt snd_hda_intel iwlagn snd_hda_codec + snd_hwdep snd_seq snd_seq_device iwlcore snd_pcm dell_wmi sdhci_pci sdhci + iTCO_wdt tg3 dell_laptop mmc_core i2c_i801 wmi mac80211 snd_timer + iTCO_vendor_support btusb joydev dcdbas cfg80211 bluetooth snd soundcore + microcode rfkill snd_page_alloc firewire_ohci firewire_core crc_itu_t + yenta_socket rsrc_nonstatic i915 drm_kms_helper drm i2c_algo_bit i2c_core video + output [last unloaded: scsi_wait_scan] + Pid: 979, comm: iwlagn Tainted: G W 2.6.33.3-85.fc13.x86_64 #1 + Call Trace: + [] warn_slowpath_common+0x77/0x8f + [] warn_slowpath_null+0xf/0x11 + [] ieee80211_scan_completed+0x5f/0x1f1 [mac80211] + [] iwl_bg_scan_completed+0xbb/0x17a [iwlcore] + [] worker_thread+0x1a4/0x232 + [] ? iwl_bg_scan_completed+0x0/0x17a [iwlcore] + [] ? autoremove_wake_function+0x0/0x34 + [] ? worker_thread+0x0/0x232 + [] kthread+0x7a/0x82 + [] kernel_thread_helper+0x4/0x10 + [] ? kthread+0x0/0x82 + [] ? kernel_thread_helper+0x0/0x10 + + Reported here: + + https://bugzilla.redhat.com/show_bug.cgi?id=590436 + + Signed-off-by: John W. Linville + Reported-by: Mihai Harpau + Cc: stable@kernel.org + Acked-by: Reinette Chatre + +diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c +index 5d3f51f..386c5f9 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-scan.c ++++ b/drivers/net/wireless/iwlwifi/iwl-scan.c +@@ -491,6 +491,7 @@ void iwl_bg_abort_scan(struct work_struct *work) + + mutex_lock(&priv->mutex); + ++ cancel_delayed_work_sync(&priv->scan_check); + set_bit(STATUS_SCAN_ABORTING, &priv->status); + iwl_send_scan_abort(priv); + diff --git a/iwlwifi-code-cleanup-for-connectivity-recovery.patch b/iwlwifi-code-cleanup-for-connectivity-recovery.patch deleted file mode 100644 index 4a3ab5c..0000000 --- a/iwlwifi-code-cleanup-for-connectivity-recovery.patch +++ /dev/null @@ -1,278 +0,0 @@ -From 56cf16e34b896ac40c6707eb053d45d2cab18bbd Mon Sep 17 00:00:00 2001 -From: Wey-Yi Guy -Date: Fri, 5 Mar 2010 14:22:46 -0800 -Subject: [PATCH] iwlwifi: code cleanup for connectivity recovery - -Split the connectivity check and recovery routine into separated -functions based on the types - 1. iwl_good_ack_health() - check for ack count - 2. iwl_good_plcp_health() - check for plcp error - -Based on the type of errors being detected, different recovery methods -will be used to bring the system back to normal operational state. - -Because different NIC has different HW and uCode, the behavior is also -different; these functions thus now form part of the ops infrastructure, -so we can have more control on how to monitor and recover from error condition -case per device. - -Signed-off-by: Wey-Yi Guy -Signed-off-by: Reinette Chatre ---- - drivers/net/wireless/iwlwifi/iwl-1000.c | 3 +- - drivers/net/wireless/iwlwifi/iwl-4965.c | 2 +- - drivers/net/wireless/iwlwifi/iwl-5000.c | 6 +- - drivers/net/wireless/iwlwifi/iwl-6000.c | 6 +- - drivers/net/wireless/iwlwifi/iwl-core.h | 11 +++- - drivers/net/wireless/iwlwifi/iwl-rx.c | 97 +++++++++++++++++++++---------- - 6 files changed, 85 insertions(+), 40 deletions(-) - -diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c -index 2597574..7087631 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-1000.c -+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c -@@ -212,7 +212,8 @@ static struct iwl_lib_ops iwl1000_lib = { - }, - .add_bcast_station = iwl_add_bcast_station, - .recover_from_tx_stall = iwl_bg_monitor_recover, -- .recover_from_statistics = iwl_recover_from_statistics, -+ .check_plcp_health = iwl_good_plcp_health, -+ .check_ack_health = iwl_good_ack_health, - }; - - static const struct iwl_ops iwl1000_ops = { -diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c -index 6dd4328..dcca310 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-4965.c -+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c -@@ -2217,7 +2217,7 @@ static struct iwl_lib_ops iwl4965_lib = { - .set_ct_kill = iwl4965_set_ct_threshold, - }, - .add_bcast_station = iwl_add_bcast_station, -- .recover_from_statistics = iwl_recover_from_statistics, -+ .check_plcp_health = iwl_good_plcp_health, - }; - - static const struct iwl_ops iwl4965_ops = { -diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c -index 0c2469c..8e0dd13 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-5000.c -+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c -@@ -1501,7 +1501,8 @@ struct iwl_lib_ops iwl5000_lib = { - }, - .add_bcast_station = iwl_add_bcast_station, - .recover_from_tx_stall = iwl_bg_monitor_recover, -- .recover_from_statistics = iwl_recover_from_statistics, -+ .check_plcp_health = iwl_good_plcp_health, -+ .check_ack_health = iwl_good_ack_health, - }; - - static struct iwl_lib_ops iwl5150_lib = { -@@ -1557,7 +1558,8 @@ static struct iwl_lib_ops iwl5150_lib = { - }, - .add_bcast_station = iwl_add_bcast_station, - .recover_from_tx_stall = iwl_bg_monitor_recover, -- .recover_from_statistics = iwl_recover_from_statistics, -+ .check_plcp_health = iwl_good_plcp_health, -+ .check_ack_health = iwl_good_ack_health, - }; - - static const struct iwl_ops iwl5000_ops = { -diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c -index 189a8ce..1d4fea1 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-6000.c -+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c -@@ -278,7 +278,8 @@ static struct iwl_lib_ops iwl6000_lib = { - }, - .add_bcast_station = iwl_add_bcast_station, - .recover_from_tx_stall = iwl_bg_monitor_recover, -- .recover_from_statistics = iwl_recover_from_statistics, -+ .check_plcp_health = iwl_good_plcp_health, -+ .check_ack_health = iwl_good_ack_health, - }; - - static const struct iwl_ops iwl6000_ops = { -@@ -345,7 +346,8 @@ static struct iwl_lib_ops iwl6050_lib = { - }, - .add_bcast_station = iwl_add_bcast_station, - .recover_from_tx_stall = iwl_bg_monitor_recover, -- .recover_from_statistics = iwl_recover_from_statistics, -+ .check_plcp_health = iwl_good_plcp_health, -+ .check_ack_health = iwl_good_ack_health, - }; - - static const struct iwl_ops iwl6050_ops = { -diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h -index d67048e..5234a85 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-core.h -+++ b/drivers/net/wireless/iwlwifi/iwl-core.h -@@ -193,8 +193,11 @@ struct iwl_lib_ops { - void (*add_bcast_station)(struct iwl_priv *priv); - /* recover from tx queue stall */ - void (*recover_from_tx_stall)(unsigned long data); -- /* recover from errors showed in statistics */ -- void (*recover_from_statistics)(struct iwl_priv *priv, -+ /* check for plcp health */ -+ bool (*check_plcp_health)(struct iwl_priv *priv, -+ struct iwl_rx_packet *pkt); -+ /* check for ack health */ -+ bool (*check_ack_health)(struct iwl_priv *priv, - struct iwl_rx_packet *pkt); - }; - -@@ -438,7 +441,9 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); - void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); --void iwl_recover_from_statistics(struct iwl_priv *priv, -+bool iwl_good_plcp_health(struct iwl_priv *priv, -+ struct iwl_rx_packet *pkt); -+bool iwl_good_ack_health(struct iwl_priv *priv, - struct iwl_rx_packet *pkt); - void iwl_rx_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c -index f48d685..506ccf7 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-rx.c -+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c -@@ -622,24 +622,18 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv, - #define BA_TIMEOUT_CNT (5) - #define BA_TIMEOUT_MAX (16) - --#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n" --/* -- * This function checks for plcp error, ACK count ratios, aggregated BA -- * timeout retries. -- * - When the ACK count ratio is 0 and aggregated BA timeout retries is -- * exceeding the BA_TIMEOUT_MAX, it will recover the failure by resetting -- * the firmware. -- * - When the plcp error is exceeding the thresholds, it will reset the radio -- * to improve the throughput. -+/** -+ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. -+ * -+ * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding -+ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal -+ * operation state. - */ --void iwl_recover_from_statistics(struct iwl_priv *priv, -- struct iwl_rx_packet *pkt) -+bool iwl_good_ack_health(struct iwl_priv *priv, -+ struct iwl_rx_packet *pkt) - { -- int combined_plcp_delta; -- unsigned int plcp_msec; -- unsigned long plcp_received_jiffies; -- int actual_ack_cnt_delta; -- int expected_ack_cnt_delta; -+ bool rc = true; -+ int actual_ack_cnt_delta, expected_ack_cnt_delta; - int ba_timeout_delta; - - actual_ack_cnt_delta = -@@ -670,13 +664,27 @@ void iwl_recover_from_statistics(struct iwl_priv *priv, - #endif - IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", - ba_timeout_delta); -- if ((actual_ack_cnt_delta == 0) && -- (ba_timeout_delta >= BA_TIMEOUT_MAX)) { -- IWL_DEBUG_RADIO(priv, -- "call iwl_force_reset(IWL_FW_RESET)\n"); -- iwl_force_reset(priv, IWL_FW_RESET); -- } -+ if (!actual_ack_cnt_delta && -+ (ba_timeout_delta >= BA_TIMEOUT_MAX)) -+ rc = false; - } -+ return rc; -+} -+EXPORT_SYMBOL(iwl_good_ack_health); -+ -+/** -+ * iwl_good_plcp_health - checks for plcp error. -+ * -+ * When the plcp error is exceeding the thresholds, reset the radio -+ * to improve the throughput. -+ */ -+bool iwl_good_plcp_health(struct iwl_priv *priv, -+ struct iwl_rx_packet *pkt) -+{ -+ bool rc = true; -+ int combined_plcp_delta; -+ unsigned int plcp_msec; -+ unsigned long plcp_received_jiffies; - - /* - * check for plcp_err and trigger radio reset if it exceeds -@@ -711,7 +719,8 @@ void iwl_recover_from_statistics(struct iwl_priv *priv, - * combined_plcp_delta, - * plcp_msec - */ -- IWL_DEBUG_RADIO(priv, PLCP_MSG, -+ IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " -+ "%u, %u, %u, %u, %d, %u mSecs\n", - priv->cfg->plcp_delta_threshold, - le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err), - le32_to_cpu(priv->statistics.rx.ofdm.plcp_err), -@@ -719,15 +728,42 @@ void iwl_recover_from_statistics(struct iwl_priv *priv, - le32_to_cpu( - priv->statistics.rx.ofdm_ht.plcp_err), - combined_plcp_delta, plcp_msec); -- /* -- * Reset the RF radio due to the high plcp -- * error rate -- */ -- iwl_force_reset(priv, IWL_RF_RESET); -+ rc = false; -+ } -+ } -+ return rc; -+} -+EXPORT_SYMBOL(iwl_good_plcp_health); -+ -+static void iwl_recover_from_statistics(struct iwl_priv *priv, -+ struct iwl_rx_packet *pkt) -+{ -+ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) -+ return; -+ if (iwl_is_associated(priv)) { -+ if (priv->cfg->ops->lib->check_ack_health) { -+ if (!priv->cfg->ops->lib->check_ack_health( -+ priv, pkt)) { -+ /* -+ * low ack count detected -+ * restart Firmware -+ */ -+ IWL_ERR(priv, "low ack count detected, " -+ "restart firmware\n"); -+ iwl_force_reset(priv, IWL_FW_RESET); -+ } -+ } else if (priv->cfg->ops->lib->check_plcp_health) { -+ if (!priv->cfg->ops->lib->check_plcp_health( -+ priv, pkt)) { -+ /* -+ * high plcp error detected -+ * reset Radio -+ */ -+ iwl_force_reset(priv, IWL_RF_RESET); -+ } - } - } - } --EXPORT_SYMBOL(iwl_recover_from_statistics); - - void iwl_rx_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -@@ -749,8 +785,7 @@ void iwl_rx_statistics(struct iwl_priv *priv, - #ifdef CONFIG_IWLWIFI_DEBUG - iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); - #endif -- if (priv->cfg->ops->lib->recover_from_statistics) -- priv->cfg->ops->lib->recover_from_statistics(priv, pkt); -+ iwl_recover_from_statistics(priv, pkt); - - memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); - --- -1.7.0.1 - diff --git a/iwlwifi-fix-internal-scan-race.patch b/iwlwifi-fix-internal-scan-race.patch new file mode 100644 index 0000000..18b3156 --- /dev/null +++ b/iwlwifi-fix-internal-scan-race.patch @@ -0,0 +1,123 @@ +From reinette.chatre@intel.com Thu May 13 17:49:59 2010 +Return-path: +Envelope-to: linville@tuxdriver.com +Delivery-date: Thu, 13 May 2010 17:49:59 -0400 +Received: from mga09.intel.com ([134.134.136.24]) + by smtp.tuxdriver.com with esmtp (Exim 4.63) + (envelope-from ) + id 1OCgI1-0007H3-Eg + for linville@tuxdriver.com; Thu, 13 May 2010 17:49:59 -0400 +Received: from orsmga002.jf.intel.com ([10.7.209.21]) + by orsmga102.jf.intel.com with ESMTP; 13 May 2010 14:48:04 -0700 +X-ExtLoop1: 1 +X-IronPort-AV: E=Sophos;i="4.53,224,1272870000"; + d="scan'208";a="517743256" +Received: from rchatre-desk.amr.corp.intel.com.jf.intel.com (HELO localhost.localdomain) ([134.134.15.94]) + by orsmga002.jf.intel.com with ESMTP; 13 May 2010 14:49:12 -0700 +From: Reinette Chatre +To: linville@tuxdriver.com +Cc: linux-wireless@vger.kernel.org, ipw3945-devel@lists.sourceforge.net, Reinette Chatre +Subject: [PATCH 1/2] iwlwifi: fix internal scan race +Date: Thu, 13 May 2010 14:49:44 -0700 +Message-Id: <1273787385-9248-2-git-send-email-reinette.chatre@intel.com> +X-Mailer: git-send-email 1.6.3.3 +In-Reply-To: <1273787385-9248-1-git-send-email-reinette.chatre@intel.com> +References: <1273787385-9248-1-git-send-email-reinette.chatre@intel.com> +X-Spam-Score: -4.2 (----) +X-Spam-Status: No +Status: RO +Content-Length: 3370 +Lines: 91 + +From: Reinette Chatre + +It is possible for internal scan to race against itself if the device is +not returning the scan results from first requests. What happens in this +case is the cleanup done during the abort of the first internal scan also +cleans up part of the new scan, causing it to access memory it shouldn't. + +Here are details: +* First internal scan is triggered and scan command sent to device. +* After seven seconds there is no scan results so the watchdog timer + triggers a scan abort. +* The scan abort succeeds and a SCAN_COMPLETE_NOTIFICATION is received for + failed scan. +* During processing of SCAN_COMPLETE_NOTIFICATION we clear STATUS_SCANNING + and queue the "scan_completed" work. +** At this time, since the problem that caused the internal scan in first + place is still present, a new internal scan is triggered. +The behavior at this point is a bit different between 2.6.34 and 2.6.35 +since 2.6.35 has a lot of this synchronized. The rest of the race +description will thus be generalized. +** As part of preparing for the scan "is_internal_short_scan" is set to +true. +* At this point the completion work for fist scan is run. As part of this + there is some locking missing around the "is_internal_short_scan" + variable and it is set to "false". +** Now the second scan runs and it considers itself a real (not internal0 + scan and thus causes problems with wrong memory being accessed. + +The fix is twofold. +* Since "is_internal_short_scan" should be protected by mutex, fix this in + scan completion work so that changes to it can be serialized. +* Do not queue a new internal scan if one is in progress. + +This fixes https://bugzilla.kernel.org/show_bug.cgi?id=15824 + +Signed-off-by: Reinette Chatre +--- + drivers/net/wireless/iwlwifi/iwl-scan.c | 21 ++++++++++++++++++--- + 1 files changed, 18 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c +index 2367286..a2c4855 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-scan.c ++++ b/drivers/net/wireless/iwlwifi/iwl-scan.c +@@ -560,6 +560,11 @@ static void iwl_bg_start_internal_scan(struct work_struct *work) + + mutex_lock(&priv->mutex); + ++ if (priv->is_internal_short_scan == true) { ++ IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n"); ++ goto unlock; ++ } ++ + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); + goto unlock; +@@ -957,17 +962,27 @@ void iwl_bg_scan_completed(struct work_struct *work) + { + struct iwl_priv *priv = + container_of(work, struct iwl_priv, scan_completed); ++ bool internal = false; + + IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); + + cancel_delayed_work(&priv->scan_check); + +- if (!priv->is_internal_short_scan) +- ieee80211_scan_completed(priv->hw, false); +- else { ++ mutex_lock(&priv->mutex); ++ if (priv->is_internal_short_scan) { + priv->is_internal_short_scan = false; + IWL_DEBUG_SCAN(priv, "internal short scan completed\n"); ++ internal = true; + } ++ mutex_unlock(&priv->mutex); ++ ++ /* ++ * Do not hold mutex here since this will cause mac80211 to call ++ * into driver again into functions that will attempt to take ++ * mutex. ++ */ ++ if (!internal) ++ ieee80211_scan_completed(priv->hw, false); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; +-- +1.6.3.3 + + + diff --git a/iwlwifi-fix-scan-races.patch b/iwlwifi-fix-scan-races.patch new file mode 100644 index 0000000..2e00f00 --- /dev/null +++ b/iwlwifi-fix-scan-races.patch @@ -0,0 +1,139 @@ +commit 88be026490ed89c2ffead81a52531fbac5507e01 +Author: Johannes Berg +Date: Wed Apr 7 00:21:36 2010 -0700 + + iwlwifi: fix scan races + + When an internal scan is started, nothing protects the + is_internal_short_scan variable which can cause crashes, + cf. https://bugzilla.kernel.org/show_bug.cgi?id=15667. + Fix this by making the short scan request use the mutex + for locking, which requires making the request go to a + work struct so that it can sleep. + + Reported-by: Peter Zijlstra + Signed-off-by: Johannes Berg + Signed-off-by: Reinette Chatre + +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c +index e4c2e1e..ba0fdba 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-agn.c ++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c +@@ -3330,6 +3330,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv) + + cancel_delayed_work_sync(&priv->init_alive_start); + cancel_delayed_work(&priv->scan_check); ++ cancel_work_sync(&priv->start_internal_scan); + cancel_delayed_work(&priv->alive_start); + cancel_work_sync(&priv->beacon_update); + del_timer_sync(&priv->statistics_periodic); +diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c +index 894bcb8..1459cdb 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-core.c ++++ b/drivers/net/wireless/iwlwifi/iwl-core.c +@@ -3357,7 +3357,6 @@ static void iwl_force_rf_reset(struct iwl_priv *priv) + */ + IWL_DEBUG_INFO(priv, "perform radio reset.\n"); + iwl_internal_short_hw_scan(priv); +- return; + } + + +diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h +index 732590f..36940a9 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-core.h ++++ b/drivers/net/wireless/iwlwifi/iwl-core.h +@@ -506,7 +506,7 @@ void iwl_init_scan_params(struct iwl_priv *priv); + int iwl_scan_cancel(struct iwl_priv *priv); + int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); + int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req); +-int iwl_internal_short_hw_scan(struct iwl_priv *priv); ++void iwl_internal_short_hw_scan(struct iwl_priv *priv); + int iwl_force_reset(struct iwl_priv *priv, int mode); + u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, + const u8 *ie, int ie_len, int left); +diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h +index 6054c5f..ef1720a 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-dev.h ++++ b/drivers/net/wireless/iwlwifi/iwl-dev.h +@@ -1296,6 +1296,7 @@ struct iwl_priv { + struct work_struct tt_work; + struct work_struct ct_enter; + struct work_struct ct_exit; ++ struct work_struct start_internal_scan; + + struct tasklet_struct irq_tasklet; + +diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c +index bd2f7c4..5062f4e 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-scan.c ++++ b/drivers/net/wireless/iwlwifi/iwl-scan.c +@@ -469,6 +469,8 @@ EXPORT_SYMBOL(iwl_init_scan_params); + + static int iwl_scan_initiate(struct iwl_priv *priv) + { ++ WARN_ON(!mutex_is_locked(&priv->mutex)); ++ + IWL_DEBUG_INFO(priv, "Starting scan...\n"); + set_bit(STATUS_SCANNING, &priv->status); + priv->is_internal_short_scan = false; +@@ -546,24 +548,31 @@ EXPORT_SYMBOL(iwl_mac_hw_scan); + * internal short scan, this function should only been called while associated. + * It will reset and tune the radio to prevent possible RF related problem + */ +-int iwl_internal_short_hw_scan(struct iwl_priv *priv) ++void iwl_internal_short_hw_scan(struct iwl_priv *priv) + { +- int ret = 0; ++ queue_work(priv->workqueue, &priv->start_internal_scan); ++} ++ ++static void iwl_bg_start_internal_scan(struct work_struct *work) ++{ ++ struct iwl_priv *priv = ++ container_of(work, struct iwl_priv, start_internal_scan); ++ ++ mutex_lock(&priv->mutex); + + if (!iwl_is_ready_rf(priv)) { +- ret = -EIO; + IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); +- goto out; ++ goto unlock; + } ++ + if (test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); +- ret = -EAGAIN; +- goto out; ++ goto unlock; + } ++ + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); +- ret = -EAGAIN; +- goto out; ++ goto unlock; + } + + priv->scan_bands = 0; +@@ -576,9 +585,8 @@ int iwl_internal_short_hw_scan(struct iwl_priv *priv) + set_bit(STATUS_SCANNING, &priv->status); + priv->is_internal_short_scan = true; + queue_work(priv->workqueue, &priv->request_scan); +- +-out: +- return ret; ++ unlock: ++ mutex_unlock(&priv->mutex); + } + EXPORT_SYMBOL(iwl_internal_short_hw_scan); + +@@ -964,6 +972,7 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv) + INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); + INIT_WORK(&priv->request_scan, iwl_bg_request_scan); + INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); ++ INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan); + INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); + } + EXPORT_SYMBOL(iwl_setup_scan_deferred_work); diff --git a/iwlwifi-iwl_good_ack_health-only-apply-to-AGN-device.patch b/iwlwifi-iwl_good_ack_health-only-apply-to-AGN-device.patch deleted file mode 100644 index 3fbc641..0000000 --- a/iwlwifi-iwl_good_ack_health-only-apply-to-AGN-device.patch +++ /dev/null @@ -1,150 +0,0 @@ -From 8ac33071b4c991e302be67fd0dae1d9cc4b502e3 Mon Sep 17 00:00:00 2001 -From: Wey-Yi Guy -Date: Tue, 16 Mar 2010 10:46:31 -0700 -Subject: [PATCH] iwlwifi: iwl_good_ack_health() only apply to AGN device - -iwl_good_ack_health() check for expected and actual ack count which only -apply to aggregation mode. Move the function to iwlagn module. - -Reported-by: Chantry Xavier -Signed-off-by: Wey-Yi Guy -Signed-off-by: Reinette Chatre ---- - drivers/net/wireless/iwlwifi/iwl-agn.c | 54 +++++++++++++++++++++++++++++++ - drivers/net/wireless/iwlwifi/iwl-rx.c | 55 -------------------------------- - 2 files changed, 54 insertions(+), 55 deletions(-) - -diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c -index dc751cb..b5d410b 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-agn.c -+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c -@@ -1448,6 +1448,60 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) - iwl_enable_interrupts(priv); - } - -+/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ -+#define ACK_CNT_RATIO (50) -+#define BA_TIMEOUT_CNT (5) -+#define BA_TIMEOUT_MAX (16) -+ -+/** -+ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. -+ * -+ * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding -+ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal -+ * operation state. -+ */ -+bool iwl_good_ack_health(struct iwl_priv *priv, -+ struct iwl_rx_packet *pkt) -+{ -+ bool rc = true; -+ int actual_ack_cnt_delta, expected_ack_cnt_delta; -+ int ba_timeout_delta; -+ -+ actual_ack_cnt_delta = -+ le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - -+ le32_to_cpu(priv->statistics.tx.actual_ack_cnt); -+ expected_ack_cnt_delta = -+ le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - -+ le32_to_cpu(priv->statistics.tx.expected_ack_cnt); -+ ba_timeout_delta = -+ le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - -+ le32_to_cpu(priv->statistics.tx.agg.ba_timeout); -+ if ((priv->_agn.agg_tids_count > 0) && -+ (expected_ack_cnt_delta > 0) && -+ (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) -+ < ACK_CNT_RATIO) && -+ (ba_timeout_delta > BA_TIMEOUT_CNT)) { -+ IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d," -+ " expected_ack_cnt = %d\n", -+ actual_ack_cnt_delta, expected_ack_cnt_delta); -+ -+#ifdef CONFIG_IWLWIFI_DEBUG -+ IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n", -+ priv->delta_statistics.tx.rx_detected_cnt); -+ IWL_DEBUG_RADIO(priv, -+ "ack_or_ba_timeout_collision delta = %d\n", -+ priv->delta_statistics.tx. -+ ack_or_ba_timeout_collision); -+#endif -+ IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", -+ ba_timeout_delta); -+ if (!actual_ack_cnt_delta && -+ (ba_timeout_delta >= BA_TIMEOUT_MAX)) -+ rc = false; -+ } -+ return rc; -+} -+ - - /****************************************************************************** - * -diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c -index 506ccf7..def5042 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-rx.c -+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c -@@ -617,61 +617,6 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv, - - #define REG_RECALIB_PERIOD (60) - --/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ --#define ACK_CNT_RATIO (50) --#define BA_TIMEOUT_CNT (5) --#define BA_TIMEOUT_MAX (16) -- --/** -- * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. -- * -- * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding -- * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal -- * operation state. -- */ --bool iwl_good_ack_health(struct iwl_priv *priv, -- struct iwl_rx_packet *pkt) --{ -- bool rc = true; -- int actual_ack_cnt_delta, expected_ack_cnt_delta; -- int ba_timeout_delta; -- -- actual_ack_cnt_delta = -- le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - -- le32_to_cpu(priv->statistics.tx.actual_ack_cnt); -- expected_ack_cnt_delta = -- le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - -- le32_to_cpu(priv->statistics.tx.expected_ack_cnt); -- ba_timeout_delta = -- le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - -- le32_to_cpu(priv->statistics.tx.agg.ba_timeout); -- if ((priv->_agn.agg_tids_count > 0) && -- (expected_ack_cnt_delta > 0) && -- (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) -- < ACK_CNT_RATIO) && -- (ba_timeout_delta > BA_TIMEOUT_CNT)) { -- IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d," -- " expected_ack_cnt = %d\n", -- actual_ack_cnt_delta, expected_ack_cnt_delta); -- --#ifdef CONFIG_IWLWIFI_DEBUG -- IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n", -- priv->delta_statistics.tx.rx_detected_cnt); -- IWL_DEBUG_RADIO(priv, -- "ack_or_ba_timeout_collision delta = %d\n", -- priv->delta_statistics.tx. -- ack_or_ba_timeout_collision); --#endif -- IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", -- ba_timeout_delta); -- if (!actual_ack_cnt_delta && -- (ba_timeout_delta >= BA_TIMEOUT_MAX)) -- rc = false; -- } -- return rc; --} --EXPORT_SYMBOL(iwl_good_ack_health); -- - /** - * iwl_good_plcp_health - checks for plcp error. - * --- -1.7.0.1 - diff --git a/iwlwifi-manage-QoS-by-mac-stack.patch b/iwlwifi-manage-QoS-by-mac-stack.patch index bd0765c..940b8ea 100644 --- a/iwlwifi-manage-QoS-by-mac-stack.patch +++ b/iwlwifi-manage-QoS-by-mac-stack.patch @@ -1,7 +1,7 @@ From: Stanislaw Gruszka To: kernel@lists.fedoraproject.org, "John W. Linville" -Subject: [PATCH 2/4 2.6.34.y] iwlwifi: manage QoS by mac stack -Date: Fri, 11 Jun 2010 17:05:12 +0200 +Subject: [PATCH 2/4 2.6.33.y] iwlwifi: manage QoS by mac stack +Date: Fri, 11 Jun 2010 17:04:18 +0200 commit e61146e36b40fd9d346118c40285913236c329f3 upstream. @@ -26,10 +26,10 @@ Signed-off-by: Stanislaw Gruszka 5 files changed, 17 insertions(+), 171 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c -index bdff565..21c3ef0 100644 +index 5622a55..9ba4207 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c -@@ -2515,7 +2515,6 @@ void iwl_post_associate(struct iwl_priv *priv) +@@ -2327,7 +2327,6 @@ void iwl_post_associate(struct iwl_priv *priv) { struct ieee80211_conf *conf = NULL; int ret = 0; @@ -37,7 +37,7 @@ index bdff565..21c3ef0 100644 if (priv->iw_mode == NL80211_IFTYPE_AP) { IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); -@@ -2600,10 +2599,6 @@ void iwl_post_associate(struct iwl_priv *priv) +@@ -2412,10 +2411,6 @@ void iwl_post_associate(struct iwl_priv *priv) if (priv->iw_mode == NL80211_IFTYPE_ADHOC) priv->assoc_station_added = 1; @@ -48,7 +48,7 @@ index bdff565..21c3ef0 100644 /* the chain noise calibration will enabled PM upon completion * If chain noise has already been run, then we need to enable * power management here */ -@@ -2780,7 +2775,6 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +@@ -2602,7 +2597,6 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) void iwl_config_ap(struct iwl_priv *priv) { int ret = 0; @@ -56,7 +56,7 @@ index bdff565..21c3ef0 100644 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; -@@ -2832,10 +2826,6 @@ void iwl_config_ap(struct iwl_priv *priv) +@@ -2654,10 +2648,6 @@ void iwl_config_ap(struct iwl_priv *priv) /* restore RXON assoc */ priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; iwlcore_commit_rxon(priv); @@ -67,7 +67,7 @@ index bdff565..21c3ef0 100644 iwl_add_bcast_station(priv); } iwl_send_beacon_cmd(priv); -@@ -3396,11 +3386,6 @@ static int iwl_init_drv(struct iwl_priv *priv) +@@ -3195,11 +3185,6 @@ static int iwl_init_drv(struct iwl_priv *priv) iwl_init_scan_params(priv); @@ -80,10 +80,10 @@ index bdff565..21c3ef0 100644 /* Set the tx_power_user_lmt to the lowest power level * this value will get overwritten by channel max power avg diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c -index 049b652..2dd8aaa 100644 +index 6e9e156..d79b1e9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c -@@ -325,17 +325,13 @@ EXPORT_SYMBOL(iwl_hw_nic_init); +@@ -301,17 +301,13 @@ EXPORT_SYMBOL(iwl_hw_nic_init); /* * QoS support */ @@ -102,7 +102,7 @@ index 049b652..2dd8aaa 100644 if (priv->qos_data.qos_active) priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_UPDATE_EDCA_MSK; -@@ -343,118 +339,14 @@ void iwl_activate_qos(struct iwl_priv *priv, u8 force) +@@ -319,118 +315,14 @@ void iwl_activate_qos(struct iwl_priv *priv, u8 force) if (priv->current_ht_config.is_ht) priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; @@ -227,7 +227,7 @@ index 049b652..2dd8aaa 100644 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ -@@ -2306,12 +2198,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, +@@ -2273,12 +2165,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, cpu_to_le16((params->txop * 32)); priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; @@ -240,7 +240,7 @@ index 049b652..2dd8aaa 100644 spin_unlock_irqrestore(&priv->lock, flags); -@@ -2587,11 +2473,8 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) +@@ -2554,11 +2440,8 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) IWL_DEBUG_MAC80211(priv, "leave\n"); spin_unlock_irqrestore(&priv->lock, flags); @@ -252,9 +252,9 @@ index 049b652..2dd8aaa 100644 return 0; } EXPORT_SYMBOL(iwl_mac_beacon_update); -@@ -2833,6 +2716,15 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) - iwl_set_tx_power(priv, conf->power_level, false); - } +@@ -2790,6 +2673,15 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + if (changed & IEEE80211_CONF_CHANGE_QOS) { + bool qos_active = !!(conf->flags & IEEE80211_CONF_QOS); @@ -268,7 +268,7 @@ index 049b652..2dd8aaa 100644 if (!iwl_is_ready(priv)) { IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); goto out; -@@ -2867,8 +2759,6 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw) +@@ -2860,8 +2752,6 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw) memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config)); spin_unlock_irqrestore(&priv->lock, flags); @@ -278,10 +278,10 @@ index 049b652..2dd8aaa 100644 priv->assoc_id = 0; priv->assoc_capability = 0; diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h -index 36940a9..70af968 100644 +index b69e972..403f512 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h -@@ -304,8 +304,7 @@ struct iwl_cfg { +@@ -298,8 +298,7 @@ struct iwl_cfg { struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, struct ieee80211_ops *hw_ops); void iwl_hw_detect(struct iwl_priv *priv); @@ -292,10 +292,10 @@ index 36940a9..70af968 100644 const struct ieee80211_tx_queue_params *params); void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt); diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h -index ef1720a..cc12e89 100644 +index 3822cf5..f2a9356 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h -@@ -519,30 +519,9 @@ struct iwl_ht_config { +@@ -518,30 +518,9 @@ struct iwl_ht_config { u8 non_GF_STA_present; }; @@ -327,10 +327,10 @@ index ef1720a..cc12e89 100644 }; diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c -index b74a56c..c054527 100644 +index adbb3ea..2280ba7 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c -@@ -3152,8 +3152,6 @@ void iwl3945_post_associate(struct iwl_priv *priv) +@@ -3127,8 +3127,6 @@ void iwl3945_post_associate(struct iwl_priv *priv) break; } @@ -339,9 +339,9 @@ index b74a56c..c054527 100644 /* we have just associated, don't start scan too early */ priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; } -@@ -3861,11 +3859,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv) +@@ -3841,11 +3839,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv) + priv->iw_mode = NL80211_IFTYPE_STATION; - priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; - iwl_reset_qos(priv); - diff --git a/iwlwifi-move-plcp-check-to-separated-function.patch b/iwlwifi-move-plcp-check-to-separated-function.patch deleted file mode 100644 index d805206..0000000 --- a/iwlwifi-move-plcp-check-to-separated-function.patch +++ /dev/null @@ -1,208 +0,0 @@ -From b3786de4e1033b00d522a5c457a3ea9f8376d0d0 Mon Sep 17 00:00:00 2001 -From: Wey-Yi Guy -Date: Thu, 4 Mar 2010 13:38:58 -0800 -Subject: [PATCH] iwlwifi: move plcp check to separated function - -Move the plcp error checking into stand alone function and pointed by ops -to accommodate devices not needing this recovery. - -Signed-off-by: Trieu 'Andrew' Nguyen -Signed-off-by: Wey-Yi Guy -Signed-off-by: Reinette Chatre ---- - drivers/net/wireless/iwlwifi/iwl-1000.c | 1 + - drivers/net/wireless/iwlwifi/iwl-4965.c | 1 + - drivers/net/wireless/iwlwifi/iwl-5000.c | 2 + - drivers/net/wireless/iwlwifi/iwl-6000.c | 2 + - drivers/net/wireless/iwlwifi/iwl-core.h | 5 +++ - drivers/net/wireless/iwlwifi/iwl-rx.c | 58 +++++++++++++++++++------------ - 6 files changed, 47 insertions(+), 22 deletions(-) - -diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c -index 89dc401..2597574 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-1000.c -+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c -@@ -212,6 +212,7 @@ static struct iwl_lib_ops iwl1000_lib = { - }, - .add_bcast_station = iwl_add_bcast_station, - .recover_from_tx_stall = iwl_bg_monitor_recover, -+ .recover_from_statistics = iwl_recover_from_statistics, - }; - - static const struct iwl_ops iwl1000_ops = { -diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c -index aa49a6e..6dd4328 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-4965.c -+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c -@@ -2217,6 +2217,7 @@ static struct iwl_lib_ops iwl4965_lib = { - .set_ct_kill = iwl4965_set_ct_threshold, - }, - .add_bcast_station = iwl_add_bcast_station, -+ .recover_from_statistics = iwl_recover_from_statistics, - }; - - static const struct iwl_ops iwl4965_ops = { -diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c -index d05fad4..0c2469c 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-5000.c -+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c -@@ -1501,6 +1501,7 @@ struct iwl_lib_ops iwl5000_lib = { - }, - .add_bcast_station = iwl_add_bcast_station, - .recover_from_tx_stall = iwl_bg_monitor_recover, -+ .recover_from_statistics = iwl_recover_from_statistics, - }; - - static struct iwl_lib_ops iwl5150_lib = { -@@ -1556,6 +1557,7 @@ static struct iwl_lib_ops iwl5150_lib = { - }, - .add_bcast_station = iwl_add_bcast_station, - .recover_from_tx_stall = iwl_bg_monitor_recover, -+ .recover_from_statistics = iwl_recover_from_statistics, - }; - - static const struct iwl_ops iwl5000_ops = { -diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c -index 0c965cd..189a8ce 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-6000.c -+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c -@@ -278,6 +278,7 @@ static struct iwl_lib_ops iwl6000_lib = { - }, - .add_bcast_station = iwl_add_bcast_station, - .recover_from_tx_stall = iwl_bg_monitor_recover, -+ .recover_from_statistics = iwl_recover_from_statistics, - }; - - static const struct iwl_ops iwl6000_ops = { -@@ -344,6 +345,7 @@ static struct iwl_lib_ops iwl6050_lib = { - }, - .add_bcast_station = iwl_add_bcast_station, - .recover_from_tx_stall = iwl_bg_monitor_recover, -+ .recover_from_statistics = iwl_recover_from_statistics, - }; - - static const struct iwl_ops iwl6050_ops = { -diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h -index 9076576..d67048e 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-core.h -+++ b/drivers/net/wireless/iwlwifi/iwl-core.h -@@ -193,6 +193,9 @@ struct iwl_lib_ops { - void (*add_bcast_station)(struct iwl_priv *priv); - /* recover from tx queue stall */ - void (*recover_from_tx_stall)(unsigned long data); -+ /* recover from errors showed in statistics */ -+ void (*recover_from_statistics)(struct iwl_priv *priv, -+ struct iwl_rx_packet *pkt); - }; - - struct iwl_led_ops { -@@ -435,6 +438,8 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); - void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -+void iwl_recover_from_statistics(struct iwl_priv *priv, -+ struct iwl_rx_packet *pkt); - void iwl_rx_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); - void iwl_reply_statistics(struct iwl_priv *priv, -diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c -index e5eb339..fabc52f 100644 ---- a/drivers/net/wireless/iwlwifi/iwl-rx.c -+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c -@@ -618,28 +618,18 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv, - #define REG_RECALIB_PERIOD (60) - - #define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n" --void iwl_rx_statistics(struct iwl_priv *priv, -- struct iwl_rx_mem_buffer *rxb) -+/* -+ * This function checks for plcp error. -+ * - When the plcp error is exceeding the thresholds, it will reset the radio -+ * to improve the throughput. -+ */ -+void iwl_recover_from_statistics(struct iwl_priv *priv, -+ struct iwl_rx_packet *pkt) - { -- int change; -- struct iwl_rx_packet *pkt = rxb_addr(rxb); - int combined_plcp_delta; - unsigned int plcp_msec; - unsigned long plcp_received_jiffies; - -- IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", -- (int)sizeof(priv->statistics), -- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); -- -- change = ((priv->statistics.general.temperature != -- pkt->u.stats.general.temperature) || -- ((priv->statistics.flag & -- STATISTICS_REPLY_FLG_HT40_MODE_MSK) != -- (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); -- --#ifdef CONFIG_IWLWIFI_DEBUG -- iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); --#endif - /* - * check for plcp_err and trigger radio reset if it exceeds - * the plcp error threshold plcp_delta. -@@ -660,11 +650,11 @@ void iwl_rx_statistics(struct iwl_priv *priv, - le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err)); - - if ((combined_plcp_delta > 0) && -- ((combined_plcp_delta * 100) / plcp_msec) > -+ ((combined_plcp_delta * 100) / plcp_msec) > - priv->cfg->plcp_delta_threshold) { - /* -- * if plcp_err exceed the threshold, the following -- * data is printed in csv format: -+ * if plcp_err exceed the threshold, -+ * the following data is printed in csv format: - * Text: plcp_err exceeded %d, - * Received ofdm.plcp_err, - * Current ofdm.plcp_err, -@@ -679,9 +669,8 @@ void iwl_rx_statistics(struct iwl_priv *priv, - le32_to_cpu(priv->statistics.rx.ofdm.plcp_err), - le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err), - le32_to_cpu( -- priv->statistics.rx.ofdm_ht.plcp_err), -+ priv->statistics.rx.ofdm_ht.plcp_err), - combined_plcp_delta, plcp_msec); -- - /* - * Reset the RF radio due to the high plcp - * error rate -@@ -689,6 +678,31 @@ void iwl_rx_statistics(struct iwl_priv *priv, - iwl_force_reset(priv, IWL_RF_RESET); - } - } -+} -+EXPORT_SYMBOL(iwl_recover_from_statistics); -+ -+void iwl_rx_statistics(struct iwl_priv *priv, -+ struct iwl_rx_mem_buffer *rxb) -+{ -+ int change; -+ struct iwl_rx_packet *pkt = rxb_addr(rxb); -+ -+ -+ IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", -+ (int)sizeof(priv->statistics), -+ le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); -+ -+ change = ((priv->statistics.general.temperature != -+ pkt->u.stats.general.temperature) || -+ ((priv->statistics.flag & -+ STATISTICS_REPLY_FLG_HT40_MODE_MSK) != -+ (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); -+ -+#ifdef CONFIG_IWLWIFI_DEBUG -+ iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); -+#endif -+ if (priv->cfg->ops->lib->recover_from_statistics) -+ priv->cfg->ops->lib->recover_from_statistics(priv, pkt); - - memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); - --- -1.7.0.1 - diff --git a/iwlwifi-recover_from_tx_stall.patch b/iwlwifi-recover_from_tx_stall.patch new file mode 100644 index 0000000..0b69e44 --- /dev/null +++ b/iwlwifi-recover_from_tx_stall.patch @@ -0,0 +1,13 @@ +https://bugzilla.redhat.com/show_bug.cgi?id=589777#c5 + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c.orig 2010-05-19 16:07:15.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c 2010-05-19 16:09:42.000000000 -0400 +@@ -2794,6 +2794,7 @@ static struct iwl_lib_ops iwl3945_lib = + .post_associate = iwl3945_post_associate, + .isr = iwl_isr_legacy, + .config_ap = iwl3945_config_ap, ++ .recover_from_tx_stall = iwl_bg_monitor_recover, + }; + + static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { diff --git a/iwlwifi_-Adjusting-PLCP-error-threshold-for-1000-NIC.patch b/iwlwifi_-Adjusting-PLCP-error-threshold-for-1000-NIC.patch new file mode 100644 index 0000000..ab6c4d6 --- /dev/null +++ b/iwlwifi_-Adjusting-PLCP-error-threshold-for-1000-NIC.patch @@ -0,0 +1,48 @@ +Backport of the following upstream commit... + +commit 6c3872e1d52290dcd506473028867cacc6b7393d +Author: Trieu 'Andrew' Nguyen +Date: Mon Feb 8 13:53:05 2010 -0800 + + iwlwifi: Adjusting PLCP error threshold for 1000 NIC + + While testing the station with the NIC 1000 family, it is found that + the plcp error can easily exceed 50 value in 100mSecs. This creates + unneccessary radio reset/tuning. This patch raises the PLCP error + threshold of the NIC 1000 from 50 to 200 error count. + + Signed-off-by: Trieu 'Andrew' Nguyen + Signed-off-by: Reinette Chatre + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig 2010-04-13 13:44:14.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c 2010-04-13 13:45:51.000000000 -0400 +@@ -174,7 +174,7 @@ struct iwl_cfg iwl1000_bgn_cfg = { + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .support_ct_kill_exit = true, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, +- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, + }; + + struct iwl_cfg iwl1000_bg_cfg = { +@@ -201,7 +201,7 @@ struct iwl_cfg iwl1000_bg_cfg = { + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .support_ct_kill_exit = true, +- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, + }; + + MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig 2010-04-13 13:44:14.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h 2010-04-13 13:45:06.000000000 -0400 +@@ -991,6 +991,7 @@ struct iwl_switch_rxon { + #define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (0) + #define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50) + #define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100) ++#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200) + #define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255) + + enum iwl_reset { diff --git a/iwlwifi_-Logic-to-control-how-frequent-radio-should-be-reset-if-needed.patch b/iwlwifi_-Logic-to-control-how-frequent-radio-should-be-reset-if-needed.patch new file mode 100644 index 0000000..72618dd --- /dev/null +++ b/iwlwifi_-Logic-to-control-how-frequent-radio-should-be-reset-if-needed.patch @@ -0,0 +1,81 @@ +Backport of the following upstream commit... + +commit d4d59e88cb746165c6fe33eacb6f582d525c6ef1 +Author: Wey-Yi Guy +Date: Fri Jan 22 14:22:45 2010 -0800 + + iwlwifi: Logic to control how frequent radio should be reset if needed + + Add additional logic for internal scan routine to control how + frequent this function should be performed. + + The intent of this function is to reset/re-tune the radio and bring the + RF/PHY back to normal state, it does not make sense calling it too + frequent, + if reset the radio can not bring it back to normal state, it indicate + there are other reason to cause the radio not operate correctly. + + Signed-off-by: Wey-Yi Guy + Signed-off-by: Reinette Chatre + Signed-off-by: John W. Linville + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig 2010-04-13 11:53:36.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h 2010-04-13 11:59:18.000000000 -0400 +@@ -1034,6 +1034,7 @@ struct iwl_priv { + unsigned long scan_start; + unsigned long scan_pass_start; + unsigned long scan_start_tsf; ++ unsigned long last_internal_scan_jiffies; + void *scan; + int scan_bands; + struct cfg80211_scan_request *scan_request; +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c.orig 2010-04-13 11:53:36.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c 2010-04-13 12:01:06.000000000 -0400 +@@ -204,7 +204,8 @@ static void iwl_rx_scan_results_notif(st + #endif + + priv->last_scan_jiffies = jiffies; +- priv->next_scan_jiffies = 0; ++ if (!priv->is_internal_short_scan) ++ priv->next_scan_jiffies = 0; + } + + /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ +@@ -251,7 +252,11 @@ static void iwl_rx_scan_complete_notif(s + } + + priv->last_scan_jiffies = jiffies; +- priv->next_scan_jiffies = 0; ++ if (!priv->is_internal_short_scan) ++ priv->next_scan_jiffies = 0; ++ else ++ priv->last_internal_scan_jiffies = jiffies; ++ + IWL_DEBUG_INFO(priv, "Setting scan to off\n"); + + clear_bit(STATUS_SCANNING, &priv->status); +@@ -559,6 +564,8 @@ EXPORT_SYMBOL(iwl_mac_hw_scan); + * internal short scan, this function should only been called while associated. + * It will reset and tune the radio to prevent possible RF related problem + */ ++#define IWL_DELAY_NEXT_INTERNAL_SCAN (HZ*1) ++ + int iwl_internal_short_hw_scan(struct iwl_priv *priv) + { + int ret = 0; +@@ -578,6 +585,13 @@ int iwl_internal_short_hw_scan(struct iw + ret = -EAGAIN; + goto out; + } ++ if (priv->last_internal_scan_jiffies && ++ time_after(priv->last_internal_scan_jiffies + ++ IWL_DELAY_NEXT_INTERNAL_SCAN, jiffies)) { ++ IWL_DEBUG_SCAN(priv, "internal scan rejected\n"); ++ goto out; ++ } ++ + priv->scan_bands = 0; + if (priv->band == IEEE80211_BAND_5GHZ) + priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ); diff --git a/iwlwifi_-Recover-TX-flow-failure.patch b/iwlwifi_-Recover-TX-flow-failure.patch new file mode 100644 index 0000000..a5566d6 --- /dev/null +++ b/iwlwifi_-Recover-TX-flow-failure.patch @@ -0,0 +1,131 @@ +Backport of the following upstream commit... + +commit d5a0ffa3eaf9e898f25a925813f1a723be7808f8 +Author: Wey-Yi Guy +Date: Thu Mar 4 13:38:59 2010 -0800 + + iwlwifi: Recover TX flow failure + + Monitors the tx statistics to detect the drop in throughput. + When the throughput drops, the ratio of the actual_ack_count and the + expected_ack_count also drops. At the same time, the aggregated + ba_timeout (the number of ba timeout retries) also rises. If the + actual_ack_count/expected_ack_count ratio is 0 and the number of ba + timeout retries rises to BA_TIMEOUT_MAX, no tx packets can be delivered. + Reloading the uCode and bring the system back to normal operational + state. + + Signed-off-by: Trieu 'Andrew' Nguyen + Signed-off-by: Wey-Yi Guy + Signed-off-by: Reinette Chatre + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c.orig 2010-04-13 14:44:38.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c 2010-04-13 14:53:47.000000000 -0400 +@@ -2783,10 +2783,21 @@ static int iwl_mac_ampdu_action(struct i + return ret; + case IEEE80211_AMPDU_TX_START: + IWL_DEBUG_HT(priv, "start Tx\n"); +- return iwl_tx_agg_start(priv, sta->addr, tid, ssn); ++ ret = iwl_tx_agg_start(priv, sta->addr, tid, ssn); ++ if (ret == 0) { ++ priv->agg_tids_count++; ++ IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", ++ priv->agg_tids_count); ++ } ++ return ret; + case IEEE80211_AMPDU_TX_STOP: + IWL_DEBUG_HT(priv, "stop Tx\n"); + ret = iwl_tx_agg_stop(priv, sta->addr, tid); ++ if ((ret == 0) && (priv->agg_tids_count > 0)) { ++ priv->agg_tids_count--; ++ IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", ++ priv->agg_tids_count); ++ } + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return 0; + else +@@ -3204,6 +3215,7 @@ static int iwl_init_drv(struct iwl_priv + priv->band = IEEE80211_BAND_2GHZ; + + priv->iw_mode = NL80211_IFTYPE_STATION; ++ priv->agg_tids_count = 0; + + /* initialize force reset */ + priv->force_reset[IWL_RF_RESET].reset_duration = +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig 2010-04-13 14:44:38.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h 2010-04-13 14:53:19.000000000 -0400 +@@ -1239,6 +1239,11 @@ struct iwl_priv { + void *ict_tbl_vir; + u32 inta; + bool use_ict; ++ /* ++ * reporting the number of tids has AGG on. 0 means ++ * no AGGREGATION ++ */ ++ u8 agg_tids_count; + + u32 inta_mask; + /* Current association information needed to configure the +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig 2010-04-13 14:44:38.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c 2010-04-13 14:56:17.000000000 -0400 +@@ -593,9 +593,18 @@ static void iwl_accumulative_statistics( + + #define REG_RECALIB_PERIOD (60) + ++/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ ++#define ACK_CNT_RATIO (50) ++#define BA_TIMEOUT_CNT (5) ++#define BA_TIMEOUT_MAX (16) ++ + #define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n" + /* +- * This function checks for plcp error. ++ * This function checks for plcp error, ACK count ratios, aggregated BA ++ * timeout retries. ++ * - When the ACK count ratio is 0 and aggregated BA timeout retries is ++ * exceeding the BA_TIMEOUT_MAX, it will recover the failure by resetting ++ * the firmware. + * - When the plcp error is exceeding the thresholds, it will reset the radio + * to improve the throughput. + */ +@@ -605,6 +614,37 @@ void iwl_recover_from_statistics(struct + int combined_plcp_delta; + unsigned int plcp_msec; + unsigned long plcp_received_jiffies; ++ int actual_ack_cnt_delta; ++ int expected_ack_cnt_delta; ++ int ba_timeout_delta; ++ ++ actual_ack_cnt_delta = ++ le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - ++ le32_to_cpu(priv->statistics.tx.actual_ack_cnt); ++ expected_ack_cnt_delta = ++ le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - ++ le32_to_cpu(priv->statistics.tx.expected_ack_cnt); ++ ba_timeout_delta = ++ le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - ++ le32_to_cpu(priv->statistics.tx.agg.ba_timeout); ++ if ((priv->agg_tids_count > 0) && ++ (expected_ack_cnt_delta > 0) && ++ (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) ++ < ACK_CNT_RATIO) && ++ (ba_timeout_delta > BA_TIMEOUT_CNT)) { ++ IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d," ++ " expected_ack_cnt = %d\n", ++ actual_ack_cnt_delta, expected_ack_cnt_delta); ++ ++ IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", ++ ba_timeout_delta); ++ if ((actual_ack_cnt_delta == 0) && ++ (ba_timeout_delta >= BA_TIMEOUT_MAX)) { ++ IWL_DEBUG_RADIO(priv, ++ "call iwl_force_reset(IWL_FW_RESET)\n"); ++ iwl_force_reset(priv, IWL_FW_RESET); ++ } ++ } + + /* + * check for plcp_err and trigger radio reset if it exceeds diff --git a/iwlwifi_-Recover-TX-flow-stall-due-to-stuck-queue.patch b/iwlwifi_-Recover-TX-flow-stall-due-to-stuck-queue.patch new file mode 100644 index 0000000..218a1b2 --- /dev/null +++ b/iwlwifi_-Recover-TX-flow-stall-due-to-stuck-queue.patch @@ -0,0 +1,472 @@ +Backport of the following upstream commit... + +commit b74e31a9bc1013e69b85b139072485dc153453dd +Author: Wey-Yi Guy +Date: Mon Mar 1 17:23:50 2010 -0800 + + iwlwifi: Recover TX flow stall due to stuck queue + + Monitors the internal TX queues periodically. When a queue is stuck + for some unknown conditions causing the throughput to drop and the + transfer is stop, the driver will force firmware reload and bring the + system back to normal operational state. + + The iwlwifi devices behave differently in this regard so this feature is + made part of the ops infrastructure so we can have more control on how to + monitor and recover from tx queue stall case per device. + + Signed-off-by: Trieu 'Andrew' Nguyen + Signed-off-by: Wey-Yi Guy + Signed-off-by: Reinette Chatre + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig 2010-04-13 14:33:10.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c 2010-04-13 14:34:23.000000000 -0400 +@@ -138,6 +138,7 @@ static struct iwl_lib_ops iwl1000_lib = + .temperature = iwl5000_temperature, + .set_ct_kill = iwl1000_set_ct_threshold, + }, ++ .recover_from_tx_stall = iwl_bg_monitor_recover, + }; + + static struct iwl_ops iwl1000_ops = { +@@ -175,6 +176,7 @@ struct iwl_cfg iwl1000_bgn_cfg = { + .support_ct_kill_exit = true, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl1000_bg_cfg = { +@@ -202,6 +204,7 @@ struct iwl_cfg iwl1000_bg_cfg = { + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl3945-base.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl3945-base.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl3945-base.c.orig 2010-04-13 14:33:10.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl3945-base.c 2010-04-13 14:33:49.000000000 -0400 +@@ -2483,6 +2483,13 @@ static void iwl3945_alive_start(struct i + /* After the ALIVE response, we can send commands to 3945 uCode */ + set_bit(STATUS_ALIVE, &priv->status); + ++ if (priv->cfg->ops->lib->recover_from_tx_stall) { ++ /* Enable timer to monitor the driver queues */ ++ mod_timer(&priv->monitor_recover, ++ jiffies + ++ msecs_to_jiffies(priv->cfg->monitor_recover_period)); ++ } ++ + if (iwl_is_rfkill(priv)) + return; + +@@ -3768,6 +3775,13 @@ static void iwl3945_setup_deferred_work( + + iwl3945_hw_setup_deferred_work(priv); + ++ if (priv->cfg->ops->lib->recover_from_tx_stall) { ++ init_timer(&priv->monitor_recover); ++ priv->monitor_recover.data = (unsigned long)priv; ++ priv->monitor_recover.function = ++ priv->cfg->ops->lib->recover_from_tx_stall; ++ } ++ + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl3945_irq_tasklet, (unsigned long)priv); + } +@@ -3780,6 +3794,8 @@ static void iwl3945_cancel_deferred_work + cancel_delayed_work(&priv->scan_check); + cancel_delayed_work(&priv->alive_start); + cancel_work_sync(&priv->beacon_update); ++ if (priv->cfg->ops->lib->recover_from_tx_stall) ++ del_timer_sync(&priv->monitor_recover); + } + + static struct attribute *iwl3945_sysfs_entries[] = { +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c.orig 2010-04-13 14:33:10.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c 2010-04-13 14:33:49.000000000 -0400 +@@ -2829,6 +2829,7 @@ static struct iwl_cfg iwl3945_bg_cfg = { + .led_compensation = 64, + .broken_powersave = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + static struct iwl_cfg iwl3945_abg_cfg = { +@@ -2847,6 +2848,7 @@ static struct iwl_cfg iwl3945_abg_cfg = + .led_compensation = 64, + .broken_powersave = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct pci_device_id iwl3945_hw_card_ids[] = { +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c.orig 2010-04-13 14:33:10.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c 2010-04-13 14:33:49.000000000 -0400 +@@ -2248,6 +2248,7 @@ struct iwl_cfg iwl4965_agn_cfg = { + .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + /* Module firmware */ +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c.orig 2010-04-13 14:33:10.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c 2010-04-13 14:35:01.000000000 -0400 +@@ -1506,6 +1506,7 @@ struct iwl_lib_ops iwl5000_lib = { + .temperature = iwl5000_temperature, + .set_ct_kill = iwl5000_set_ct_threshold, + }, ++ .recover_from_tx_stall = iwl_bg_monitor_recover, + }; + + static struct iwl_lib_ops iwl5150_lib = { +@@ -1558,6 +1559,7 @@ static struct iwl_lib_ops iwl5150_lib = + .temperature = iwl5150_temperature, + .set_ct_kill = iwl5150_set_ct_threshold, + }, ++ .recover_from_tx_stall = iwl_bg_monitor_recover, + }; + + static struct iwl_ops iwl5000_ops = { +@@ -1607,6 +1609,7 @@ struct iwl_cfg iwl5300_agn_cfg = { + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl5100_bgn_cfg = { +@@ -1632,6 +1635,7 @@ struct iwl_cfg iwl5100_bgn_cfg = { + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl5100_abg_cfg = { +@@ -1655,6 +1659,7 @@ struct iwl_cfg iwl5100_abg_cfg = { + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl5100_agn_cfg = { +@@ -1681,6 +1686,7 @@ struct iwl_cfg iwl5100_agn_cfg = { + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl5350_agn_cfg = { +@@ -1707,6 +1713,7 @@ struct iwl_cfg iwl5350_agn_cfg = { + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl5150_agn_cfg = { +@@ -1733,6 +1740,7 @@ struct iwl_cfg iwl5150_agn_cfg = { + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl5150_abg_cfg = { +@@ -1756,6 +1764,7 @@ struct iwl_cfg iwl5150_abg_cfg = { + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c.orig 2010-04-13 14:33:10.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c 2010-04-13 14:36:12.000000000 -0400 +@@ -250,6 +250,7 @@ static struct iwl_lib_ops iwl6000_lib = + .temperature = iwl5000_temperature, + .set_ct_kill = iwl6000_set_ct_threshold, + }, ++ .recover_from_tx_stall = iwl_bg_monitor_recover, + }; + + static struct iwl_ops iwl6000_ops = { +@@ -308,6 +309,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = { + .support_ct_kill_exit = true, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl6000i_2abg_cfg = { +@@ -338,6 +340,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = { + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl6000i_2bg_cfg = { +@@ -368,6 +371,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = { + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl6050_2agn_cfg = { +@@ -400,6 +404,7 @@ struct iwl_cfg iwl6050_2agn_cfg = { + .support_ct_kill_exit = true, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DYNAMIC, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl6050_2abg_cfg = { +@@ -430,6 +435,7 @@ struct iwl_cfg iwl6050_2abg_cfg = { + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + struct iwl_cfg iwl6000_3agn_cfg = { +@@ -462,6 +468,7 @@ struct iwl_cfg iwl6000_3agn_cfg = { + .support_ct_kill_exit = true, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, ++ .monitor_recover_period = IWL_MONITORING_PERIOD, + }; + + MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c.orig 2010-04-13 14:33:10.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c 2010-04-13 14:37:04.000000000 -0400 +@@ -1912,6 +1912,13 @@ static void iwl_alive_start(struct iwl_p + /* After the ALIVE response, we can send host commands to the uCode */ + set_bit(STATUS_ALIVE, &priv->status); + ++ if (priv->cfg->ops->lib->recover_from_tx_stall) { ++ /* Enable timer to monitor the driver queues */ ++ mod_timer(&priv->monitor_recover, ++ jiffies + ++ msecs_to_jiffies(priv->cfg->monitor_recover_period)); ++ } ++ + if (iwl_is_rfkill(priv)) + return; + +@@ -3126,6 +3133,13 @@ static void iwl_setup_deferred_work(stru + priv->statistics_periodic.data = (unsigned long)priv; + priv->statistics_periodic.function = iwl_bg_statistics_periodic; + ++ if (priv->cfg->ops->lib->recover_from_tx_stall) { ++ init_timer(&priv->monitor_recover); ++ priv->monitor_recover.data = (unsigned long)priv; ++ priv->monitor_recover.function = ++ priv->cfg->ops->lib->recover_from_tx_stall; ++ } ++ + if (!priv->cfg->use_isr_legacy) + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl_irq_tasklet, (unsigned long)priv); +@@ -3144,6 +3158,8 @@ static void iwl_cancel_deferred_work(str + cancel_delayed_work(&priv->alive_start); + cancel_work_sync(&priv->beacon_update); + del_timer_sync(&priv->statistics_periodic); ++ if (priv->cfg->ops->lib->recover_from_tx_stall) ++ del_timer_sync(&priv->monitor_recover); + } + + static void iwl_init_hw_rates(struct iwl_priv *priv, +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig 2010-04-13 14:33:10.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c 2010-04-13 14:33:49.000000000 -0400 +@@ -3263,6 +3263,99 @@ int iwl_force_reset(struct iwl_priv *pri + } + return 0; + } ++EXPORT_SYMBOL(iwl_force_reset); ++ ++/** ++ * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover ++ * ++ * During normal condition (no queue is stuck), the timer is continually set to ++ * execute every monitor_recover_period milliseconds after the last timer ++ * expired. When the queue read_ptr is at the same place, the timer is ++ * shorten to 100mSecs. This is ++ * 1) to reduce the chance that the read_ptr may wrap around (not stuck) ++ * 2) to detect the stuck queues quicker before the station and AP can ++ * disassociate each other. ++ * ++ * This function monitors all the tx queues and recover from it if any ++ * of the queues are stuck. ++ * 1. It first check the cmd queue for stuck conditions. If it is stuck, ++ * it will recover by resetting the firmware and return. ++ * 2. Then, it checks for station association. If it associates it will check ++ * other queues. If any queue is stuck, it will recover by resetting ++ * the firmware. ++ * Note: It the number of times the queue read_ptr to be at the same place to ++ * be MAX_REPEAT+1 in order to consider to be stuck. ++ */ ++/* ++ * The maximum number of times the read pointer of the tx queue at the ++ * same place without considering to be stuck. ++ */ ++#define MAX_REPEAT (2) ++static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt) ++{ ++ struct iwl_tx_queue *txq; ++ struct iwl_queue *q; ++ ++ txq = &priv->txq[cnt]; ++ q = &txq->q; ++ /* queue is empty, skip */ ++ if (q->read_ptr != q->write_ptr) { ++ if (q->read_ptr == q->last_read_ptr) { ++ /* a queue has not been read from last time */ ++ if (q->repeat_same_read_ptr > MAX_REPEAT) { ++ IWL_ERR(priv, ++ "queue %d stuck %d time. Fw reload.\n", ++ q->id, q->repeat_same_read_ptr); ++ q->repeat_same_read_ptr = 0; ++ iwl_force_reset(priv, IWL_FW_RESET); ++ } else { ++ q->repeat_same_read_ptr++; ++ IWL_DEBUG_RADIO(priv, ++ "queue %d, not read %d time\n", ++ q->id, ++ q->repeat_same_read_ptr); ++ mod_timer(&priv->monitor_recover, jiffies + ++ msecs_to_jiffies(IWL_ONE_HUNDRED_MSECS)); ++ } ++ return 1; ++ } else { ++ q->last_read_ptr = q->read_ptr; ++ q->repeat_same_read_ptr = 0; ++ } ++ } ++ return 0; ++} ++ ++void iwl_bg_monitor_recover(unsigned long data) ++{ ++ struct iwl_priv *priv = (struct iwl_priv *)data; ++ int cnt; ++ ++ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) ++ return; ++ ++ /* monitor and check for stuck cmd queue */ ++ if (iwl_check_stuck_queue(priv, IWL_CMD_QUEUE_NUM)) ++ return; ++ ++ /* monitor and check for other stuck queues */ ++ if (iwl_is_associated(priv)) { ++ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { ++ /* skip as we already checked the command queue */ ++ if (cnt == IWL_CMD_QUEUE_NUM) ++ continue; ++ if (iwl_check_stuck_queue(priv, cnt)) ++ return; ++ } ++ } ++ /* ++ * Reschedule the timer to occur in ++ * priv->cfg->monitor_recover_period ++ */ ++ mod_timer(&priv->monitor_recover, ++ jiffies + msecs_to_jiffies(priv->cfg->monitor_recover_period)); ++} ++EXPORT_SYMBOL(iwl_bg_monitor_recover); + + #ifdef CONFIG_PM + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig 2010-04-13 14:33:10.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h 2010-04-13 14:38:21.000000000 -0400 +@@ -187,6 +187,8 @@ struct iwl_lib_ops { + + /* temperature */ + struct iwl_temp_ops temp_ops; ++ /* recover from tx queue stall */ ++ void (*recover_from_tx_stall)(unsigned long data); + }; + + struct iwl_led_ops { +@@ -292,6 +294,8 @@ struct iwl_cfg { + u8 sm_ps_mode; + const bool support_wimax_coexist; + u8 plcp_delta_threshold; ++ /* timer period for monitor the driver queues */ ++ u32 monitor_recover_period; + }; + + /*************************** +@@ -579,6 +583,9 @@ static inline u16 iwl_pcie_link_ctl(stru + pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); + return pci_lnk_ctl; + } ++ ++void iwl_bg_monitor_recover(unsigned long data); ++ + #ifdef CONFIG_PM + int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state); + int iwl_pci_resume(struct pci_dev *pdev); +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig 2010-04-13 14:33:10.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h 2010-04-13 14:38:52.000000000 -0400 +@@ -183,6 +183,10 @@ struct iwl_queue { + int n_bd; /* number of BDs in this queue */ + int write_ptr; /* 1-st empty entry (index) host_w*/ + int read_ptr; /* last used entry (index) host_r*/ ++ /* use for monitoring and recovering the stuck queue */ ++ int last_read_ptr; /* storing the last read_ptr */ ++ /* number of time read_ptr and last_read_ptr are the same */ ++ u8 repeat_same_read_ptr; + dma_addr_t dma_addr; /* physical addr for BD's */ + int n_window; /* safe queue window */ + u32 id; +@@ -997,6 +1001,11 @@ struct iwl_switch_rxon { + #define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) + #define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) + ++/* timer constants use to monitor and recover stuck tx queues in mSecs */ ++#define IWL_MONITORING_PERIOD (1000) ++#define IWL_ONE_HUNDRED_MSECS (100) ++#define IWL_SIXTY_SECS (60000) ++ + enum iwl_reset { + IWL_RF_RESET = 0, + IWL_FW_RESET, +@@ -1295,6 +1304,7 @@ struct iwl_priv { + u32 disable_tx_power_cal; + struct work_struct run_time_calib_work; + struct timer_list statistics_periodic; ++ struct timer_list monitor_recover; + bool hw_ready; + /*For 3945*/ + #define IWL_DEFAULT_TX_POWER 0x0F +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-tx.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-tx.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-tx.c.orig 2010-04-13 14:33:10.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-tx.c 2010-04-13 14:33:49.000000000 -0400 +@@ -288,6 +288,8 @@ static int iwl_queue_init(struct iwl_pri + q->high_mark = 2; + + q->write_ptr = q->read_ptr = 0; ++ q->last_read_ptr = 0; ++ q->repeat_same_read_ptr = 0; + + return 0; + } diff --git a/iwlwifi_-Tune-radio-to-prevent-unexpected-behavior.patch b/iwlwifi_-Tune-radio-to-prevent-unexpected-behavior.patch new file mode 100644 index 0000000..f53445c --- /dev/null +++ b/iwlwifi_-Tune-radio-to-prevent-unexpected-behavior.patch @@ -0,0 +1,394 @@ +Backport of the following upstream commit... + +commit 3e4fb5faefb57824f2e42305b3d5907845af978c +Author: Trieu 'Andrew' Nguyen +Date: Fri Jan 22 14:22:46 2010 -0800 + + iwlwifi: Tune radio to prevent unexpected behavior + + We have seen the throughput dropped due to external noisy environment + and the radio is out of tune. There are lot of plcp errors indicating + this condition. Eventually the station can get de-authenticated by the + Access Point. By resetting and tuning the radio, the plcp errors are + reduced or eliminated and the throughput starts to rise. + + To prevent unexpected behavior such as drop in throughput or deauthentication, + - The change provides the driver feature to monitor and tune the radio base on + the statistics notification from the uCode. + - It also allows the setting of the plcp error rate threshold via + the plcp_delta under debugfs interface. + + Signed-off-by: Trieu 'Andrew' Nguyen + Signed-off-by: Reinette Chatre + Signed-off-by: John W. Linville + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig 2010-04-13 14:30:36.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c 2010-04-13 14:30:41.000000000 -0400 +@@ -174,6 +174,7 @@ struct iwl_cfg iwl1000_bgn_cfg = { + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .support_ct_kill_exit = true, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + }; + + struct iwl_cfg iwl1000_bg_cfg = { +@@ -200,6 +201,7 @@ struct iwl_cfg iwl1000_bg_cfg = { + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .support_ct_kill_exit = true, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + }; + + MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c.orig 2010-04-13 14:30:36.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-3945.c 2010-04-13 14:30:41.000000000 -0400 +@@ -2828,6 +2828,7 @@ static struct iwl_cfg iwl3945_bg_cfg = { + .ht_greenfield_support = false, + .led_compensation = 64, + .broken_powersave = true, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + }; + + static struct iwl_cfg iwl3945_abg_cfg = { +@@ -2845,6 +2846,7 @@ static struct iwl_cfg iwl3945_abg_cfg = + .ht_greenfield_support = false, + .led_compensation = 64, + .broken_powersave = true, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + }; + + struct pci_device_id iwl3945_hw_card_ids[] = { +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c.orig 2010-04-13 14:30:36.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c 2010-04-13 14:30:41.000000000 -0400 +@@ -2247,6 +2247,7 @@ struct iwl_cfg iwl4965_agn_cfg = { + .led_compensation = 61, + .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + }; + + /* Module firmware */ +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c.orig 2010-04-13 14:30:36.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c 2010-04-13 14:30:41.000000000 -0400 +@@ -1606,6 +1606,7 @@ struct iwl_cfg iwl5300_agn_cfg = { + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + }; + + struct iwl_cfg iwl5100_bgn_cfg = { +@@ -1630,6 +1631,7 @@ struct iwl_cfg iwl5100_bgn_cfg = { + .led_compensation = 51, + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + }; + + struct iwl_cfg iwl5100_abg_cfg = { +@@ -1652,6 +1654,7 @@ struct iwl_cfg iwl5100_abg_cfg = { + .use_bsm = false, + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + }; + + struct iwl_cfg iwl5100_agn_cfg = { +@@ -1677,6 +1680,7 @@ struct iwl_cfg iwl5100_agn_cfg = { + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + }; + + struct iwl_cfg iwl5350_agn_cfg = { +@@ -1702,6 +1706,7 @@ struct iwl_cfg iwl5350_agn_cfg = { + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + }; + + struct iwl_cfg iwl5150_agn_cfg = { +@@ -1727,6 +1732,7 @@ struct iwl_cfg iwl5150_agn_cfg = { + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + }; + + struct iwl_cfg iwl5150_abg_cfg = { +@@ -1749,6 +1755,7 @@ struct iwl_cfg iwl5150_abg_cfg = { + .use_bsm = false, + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + }; + + MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c.orig 2010-04-13 14:30:36.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c 2010-04-13 14:32:09.000000000 -0400 +@@ -307,6 +307,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = { + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + }; + + struct iwl_cfg iwl6000i_2abg_cfg = { +@@ -336,6 +337,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = { + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + }; + + struct iwl_cfg iwl6000i_2bg_cfg = { +@@ -365,6 +367,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = { + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + }; + + struct iwl_cfg iwl6050_2agn_cfg = { +@@ -396,6 +399,7 @@ struct iwl_cfg iwl6050_2agn_cfg = { + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DYNAMIC, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + }; + + struct iwl_cfg iwl6050_2abg_cfg = { +@@ -425,6 +429,7 @@ struct iwl_cfg iwl6050_2abg_cfg = { + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + }; + + struct iwl_cfg iwl6000_3agn_cfg = { +@@ -456,6 +461,7 @@ struct iwl_cfg iwl6000_3agn_cfg = { + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, ++ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + }; + + MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig 2010-04-13 14:30:36.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h 2010-04-13 14:30:41.000000000 -0400 +@@ -232,6 +232,8 @@ struct iwl_mod_params { + * @support_ct_kill_exit: support ct kill exit condition + * @sm_ps_mode: spatial multiplexing power save mode + * @support_wimax_coexist: support wimax/wifi co-exist ++ * @plcp_delta_threshold: plcp error rate threshold used to trigger ++ * radio tuning when there is a high receiving plcp error rate + * + * We enable the driver to be backward compatible wrt API version. The + * driver specifies which APIs it supports (with @ucode_api_max being the +@@ -289,6 +291,7 @@ struct iwl_cfg { + bool support_ct_kill_exit; + u8 sm_ps_mode; + const bool support_wimax_coexist; ++ u8 plcp_delta_threshold; + }; + + /*************************** +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debugfs.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debugfs.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debugfs.c.orig 2010-04-13 14:30:36.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2010-04-13 14:30:41.000000000 -0400 +@@ -1866,6 +1866,47 @@ static ssize_t iwl_dbgfs_internal_scan_w + return count; + } + ++static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file, ++ char __user *user_buf, ++ size_t count, loff_t *ppos) { ++ ++ struct iwl_priv *priv = (struct iwl_priv *)file->private_data; ++ int pos = 0; ++ char buf[12]; ++ const size_t bufsz = sizeof(buf); ++ ssize_t ret; ++ ++ pos += scnprintf(buf + pos, bufsz - pos, "%u\n", ++ priv->cfg->plcp_delta_threshold); ++ ++ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); ++ return ret; ++} ++ ++static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file, ++ const char __user *user_buf, ++ size_t count, loff_t *ppos) { ++ ++ struct iwl_priv *priv = file->private_data; ++ char buf[8]; ++ int buf_size; ++ int plcp; ++ ++ memset(buf, 0, sizeof(buf)); ++ buf_size = min(count, sizeof(buf) - 1); ++ if (copy_from_user(buf, user_buf, buf_size)) ++ return -EFAULT; ++ if (sscanf(buf, "%d", &plcp) != 1) ++ return -EINVAL; ++ if ((plcp <= IWL_MAX_PLCP_ERR_THRESHOLD_MIN) || ++ (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX)) ++ priv->cfg->plcp_delta_threshold = ++ IWL_MAX_PLCP_ERR_THRESHOLD_DEF; ++ else ++ priv->cfg->plcp_delta_threshold = plcp; ++ return count; ++} ++ + DEBUGFS_READ_FILE_OPS(rx_statistics); + DEBUGFS_READ_FILE_OPS(tx_statistics); + DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); +@@ -1881,6 +1922,7 @@ DEBUGFS_READ_FILE_OPS(power_save_status) + DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); + DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics); + DEBUGFS_WRITE_FILE_OPS(internal_scan); ++DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta); + + /* + * Create the debugfs files and directories +@@ -1932,6 +1974,7 @@ int iwl_dbgfs_register(struct iwl_priv * + DEBUGFS_ADD_FILE(clear_ucode_statistics, debug, S_IWUSR); + DEBUGFS_ADD_FILE(clear_traffic_statistics, debug, S_IWUSR); + DEBUGFS_ADD_FILE(internal_scan, debug, S_IWUSR); ++ DEBUGFS_ADD_FILE(plcp_delta, debug, S_IWUSR | S_IRUSR); + if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { + DEBUGFS_ADD_FILE(ucode_rx_stats, debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_tx_stats, debug, S_IRUSR); +@@ -1990,6 +2033,7 @@ void iwl_dbgfs_unregister(struct iwl_pri + DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files. + file_clear_traffic_statistics); + DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_internal_scan); ++ DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_plcp_delta); + if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { + DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files. + file_ucode_rx_stats); +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debug.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debug.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debug.h.orig 2010-04-13 14:30:36.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debug.h 2010-04-13 14:30:41.000000000 -0400 +@@ -110,6 +110,7 @@ struct iwl_debugfs { + struct dentry *file_clear_ucode_statistics; + struct dentry *file_clear_traffic_statistics; + struct dentry *file_internal_scan; ++ struct dentry *file_plcp_delta; + } dbgfs_debug_files; + u32 sram_offset; + u32 sram_len; +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig 2010-04-13 14:30:36.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h 2010-04-13 14:30:41.000000000 -0400 +@@ -984,6 +984,15 @@ struct iwl_switch_rxon { + __le16 channel; + }; + ++/* ++ * This is the threshold value of plcp error rate per 100mSecs. It is ++ * used to set and check for the validity of plcp_delta. ++ */ ++#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (0) ++#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50) ++#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100) ++#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255) ++ + struct iwl_priv { + + /* ieee device used by generic ieee processing code */ +@@ -1012,6 +1021,9 @@ struct iwl_priv { + /* ucode beacon time */ + u32 ucode_beacon_time; + ++ /* storing the jiffies when the plcp error rate is received */ ++ unsigned long plcp_jiffies; ++ + /* we allocate array of iwl4965_channel_info for NIC's valid channels. + * Access via channel # using indirect index array */ + struct iwl_channel_info *channel_info; /* channel info array */ +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig 2010-04-13 14:30:36.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c 2010-04-13 14:30:41.000000000 -0400 +@@ -593,11 +593,15 @@ static void iwl_accumulative_statistics( + + #define REG_RECALIB_PERIOD (60) + ++#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n" + void iwl_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) + { + int change; + struct iwl_rx_packet *pkt = rxb_addr(rxb); ++ int combined_plcp_delta; ++ unsigned int plcp_msec; ++ unsigned long plcp_received_jiffies; + + IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", + (int)sizeof(priv->statistics), +@@ -612,6 +616,56 @@ void iwl_rx_statistics(struct iwl_priv * + #ifdef CONFIG_IWLWIFI_DEBUG + iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); + #endif ++ /* ++ * check for plcp_err and trigger radio reset if it exceeds ++ * the plcp error threshold plcp_delta. ++ */ ++ plcp_received_jiffies = jiffies; ++ plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - ++ (long) priv->plcp_jiffies); ++ priv->plcp_jiffies = plcp_received_jiffies; ++ /* ++ * check to make sure plcp_msec is not 0 to prevent division ++ * by zero. ++ */ ++ if (plcp_msec) { ++ combined_plcp_delta = ++ (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) - ++ le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) + ++ (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) - ++ le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err)); ++ ++ if ((combined_plcp_delta > 0) && ++ ((combined_plcp_delta * 100) / plcp_msec) > ++ priv->cfg->plcp_delta_threshold) { ++ /* ++ * if plcp_err exceed the threshold, the following ++ * data is printed in csv format: ++ * Text: plcp_err exceeded %d, ++ * Received ofdm.plcp_err, ++ * Current ofdm.plcp_err, ++ * Received ofdm_ht.plcp_err, ++ * Current ofdm_ht.plcp_err, ++ * combined_plcp_delta, ++ * plcp_msec ++ */ ++ IWL_DEBUG_RADIO(priv, PLCP_MSG, ++ priv->cfg->plcp_delta_threshold, ++ le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err), ++ le32_to_cpu(priv->statistics.rx.ofdm.plcp_err), ++ le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err), ++ le32_to_cpu( ++ priv->statistics.rx.ofdm_ht.plcp_err), ++ combined_plcp_delta, plcp_msec); ++ ++ /* ++ * Reset the RF radio due to the high plcp ++ * error rate ++ */ ++ iwl_force_rf_reset(priv); ++ } ++ } ++ + memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); + + set_bit(STATUS_STATISTICS, &priv->status); diff --git a/iwlwifi_-add-function-to-reset_tune-radio-if-needed.patch b/iwlwifi_-add-function-to-reset_tune-radio-if-needed.patch new file mode 100644 index 0000000..c523f9d --- /dev/null +++ b/iwlwifi_-add-function-to-reset_tune-radio-if-needed.patch @@ -0,0 +1,374 @@ +Backport of the following upstream commit... + +commit afbdd69af0e6a0c40676d4d4b94a0a4414708eaa +Author: Wey-Yi Guy +Date: Fri Jan 22 14:22:43 2010 -0800 + + iwlwifi: add function to reset/tune radio if needed + + Adding "radio reset" function to help reset and stabilize the radio. + + During normal operation, sometime for unknown reason, radio encounter + problem and can not recover by itself; the best way to + recover from it is to reset and re-tune the radio. Currently, there is + no RF reset command available, but since radio will get reset when + switching channel, use internal hw scan request to force radio + reset and get back to normal operation state. + + The internal hw scan will only perform passive scan on the first + available channel (not the channel being used) in associated state. The + request should be ignored if already performing scan operation or STA is + not in associated state. + + Also include an "internal_scan" debugfs file to help trigger the + internal scan from user mode. + + Signed-off-by: Wey-Yi Guy + Signed-off-by: Reinette Chatre + Signed-off-by: John W. Linville + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig 2010-04-13 11:41:15.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c 2010-04-13 11:53:36.000000000 -0400 +@@ -3197,6 +3197,30 @@ void iwl_update_stats(struct iwl_priv *p + EXPORT_SYMBOL(iwl_update_stats); + #endif + ++void iwl_force_rf_reset(struct iwl_priv *priv) ++{ ++ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) ++ return; ++ ++ if (!iwl_is_associated(priv)) { ++ IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n"); ++ return; ++ } ++ /* ++ * There is no easy and better way to force reset the radio, ++ * the only known method is switching channel which will force to ++ * reset and tune the radio. ++ * Use internal short scan (single channel) operation to should ++ * achieve this objective. ++ * Driver should reset the radio when number of consecutive missed ++ * beacon, or any other uCode error condition detected. ++ */ ++ IWL_DEBUG_INFO(priv, "perform radio reset.\n"); ++ iwl_internal_short_hw_scan(priv); ++ return; ++} ++EXPORT_SYMBOL(iwl_force_rf_reset); ++ + #ifdef CONFIG_PM + + int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig 2010-02-24 13:52:17.000000000 -0500 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h 2010-04-13 11:53:36.000000000 -0400 +@@ -497,6 +497,8 @@ void iwl_init_scan_params(struct iwl_pri + int iwl_scan_cancel(struct iwl_priv *priv); + int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); + int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req); ++int iwl_internal_short_hw_scan(struct iwl_priv *priv); ++void iwl_force_rf_reset(struct iwl_priv *priv); + u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, + const u8 *ie, int ie_len, int left); + void iwl_setup_rx_scan_handlers(struct iwl_priv *priv); +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debugfs.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debugfs.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debugfs.c.orig 2010-02-24 13:52:17.000000000 -0500 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2010-04-13 11:56:15.000000000 -0400 +@@ -1845,6 +1845,27 @@ static ssize_t iwl_dbgfs_clear_ucode_sta + return count; + } + ++static ssize_t iwl_dbgfs_internal_scan_write(struct file *file, ++ const char __user *user_buf, ++ size_t count, loff_t *ppos) ++{ ++ struct iwl_priv *priv = file->private_data; ++ char buf[8]; ++ int buf_size; ++ int scan; ++ ++ memset(buf, 0, sizeof(buf)); ++ buf_size = min(count, sizeof(buf) - 1); ++ if (copy_from_user(buf, user_buf, buf_size)) ++ return -EFAULT; ++ if (sscanf(buf, "%d", &scan) != 1) ++ return -EINVAL; ++ ++ iwl_internal_short_hw_scan(priv); ++ ++ return count; ++} ++ + DEBUGFS_READ_FILE_OPS(rx_statistics); + DEBUGFS_READ_FILE_OPS(tx_statistics); + DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); +@@ -1859,6 +1880,7 @@ DEBUGFS_READ_FILE_OPS(tx_power); + DEBUGFS_READ_FILE_OPS(power_save_status); + DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); + DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics); ++DEBUGFS_WRITE_FILE_OPS(internal_scan); + + /* + * Create the debugfs files and directories +@@ -1909,6 +1931,7 @@ int iwl_dbgfs_register(struct iwl_priv * + DEBUGFS_ADD_FILE(power_save_status, debug, S_IRUSR); + DEBUGFS_ADD_FILE(clear_ucode_statistics, debug, S_IWUSR); + DEBUGFS_ADD_FILE(clear_traffic_statistics, debug, S_IWUSR); ++ DEBUGFS_ADD_FILE(internal_scan, debug, S_IWUSR); + if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { + DEBUGFS_ADD_FILE(ucode_rx_stats, debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_tx_stats, debug, S_IRUSR); +@@ -1966,6 +1989,7 @@ void iwl_dbgfs_unregister(struct iwl_pri + file_clear_ucode_statistics); + DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files. + file_clear_traffic_statistics); ++ DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_internal_scan); + if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { + DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files. + file_ucode_rx_stats); +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debug.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debug.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debug.h.orig 2010-02-24 13:52:17.000000000 -0500 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-debug.h 2010-04-13 11:54:48.000000000 -0400 +@@ -109,6 +109,7 @@ struct iwl_debugfs { + struct dentry *file_power_save_status; + struct dentry *file_clear_ucode_statistics; + struct dentry *file_clear_traffic_statistics; ++ struct dentry *file_internal_scan; + } dbgfs_debug_files; + u32 sram_offset; + u32 sram_len; +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig 2010-02-24 13:52:17.000000000 -0500 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h 2010-04-13 11:53:36.000000000 -0400 +@@ -1037,6 +1037,7 @@ struct iwl_priv { + void *scan; + int scan_bands; + struct cfg80211_scan_request *scan_request; ++ bool is_internal_short_scan; + u8 scan_tx_ant[IEEE80211_NUM_BANDS]; + u8 mgmt_tx_ant; + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c.orig 2010-02-24 13:52:17.000000000 -0500 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c 2010-04-13 11:53:36.000000000 -0400 +@@ -314,6 +314,72 @@ u16 iwl_get_passive_dwell_time(struct iw + } + EXPORT_SYMBOL(iwl_get_passive_dwell_time); + ++static int iwl_get_single_channel_for_scan(struct iwl_priv *priv, ++ enum ieee80211_band band, ++ struct iwl_scan_channel *scan_ch) ++{ ++ const struct ieee80211_supported_band *sband; ++ const struct iwl_channel_info *ch_info; ++ u16 passive_dwell = 0; ++ u16 active_dwell = 0; ++ int i, added = 0; ++ u16 channel = 0; ++ ++ sband = iwl_get_hw_mode(priv, band); ++ if (!sband) { ++ IWL_ERR(priv, "invalid band\n"); ++ return added; ++ } ++ ++ active_dwell = iwl_get_active_dwell_time(priv, band, 0); ++ passive_dwell = iwl_get_passive_dwell_time(priv, band); ++ ++ if (passive_dwell <= active_dwell) ++ passive_dwell = active_dwell + 1; ++ ++ /* only scan single channel, good enough to reset the RF */ ++ /* pick the first valid not in-use channel */ ++ if (band == IEEE80211_BAND_5GHZ) { ++ for (i = 14; i < priv->channel_count; i++) { ++ if (priv->channel_info[i].channel != ++ le16_to_cpu(priv->staging_rxon.channel)) { ++ channel = priv->channel_info[i].channel; ++ ch_info = iwl_get_channel_info(priv, ++ band, channel); ++ if (is_channel_valid(ch_info)) ++ break; ++ } ++ } ++ } else { ++ for (i = 0; i < 14; i++) { ++ if (priv->channel_info[i].channel != ++ le16_to_cpu(priv->staging_rxon.channel)) { ++ channel = ++ priv->channel_info[i].channel; ++ ch_info = iwl_get_channel_info(priv, ++ band, channel); ++ if (is_channel_valid(ch_info)) ++ break; ++ } ++ } ++ } ++ if (channel) { ++ scan_ch->channel = cpu_to_le16(channel); ++ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; ++ scan_ch->active_dwell = cpu_to_le16(active_dwell); ++ scan_ch->passive_dwell = cpu_to_le16(passive_dwell); ++ /* Set txpower levels to defaults */ ++ scan_ch->dsp_atten = 110; ++ if (band == IEEE80211_BAND_5GHZ) ++ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; ++ else ++ scan_ch->tx_gain = ((1 << 5) | (5 << 3)); ++ added++; ++ } else ++ IWL_ERR(priv, "no valid channel found\n"); ++ return added; ++} ++ + static int iwl_get_channels_for_scan(struct iwl_priv *priv, + enum ieee80211_band band, + u8 is_active, u8 n_probes, +@@ -421,6 +487,7 @@ static int iwl_scan_initiate(struct iwl_ + + IWL_DEBUG_INFO(priv, "Starting scan...\n"); + set_bit(STATUS_SCANNING, &priv->status); ++ priv->is_internal_short_scan = false; + priv->scan_start = jiffies; + priv->scan_pass_start = priv->scan_start; + +@@ -488,6 +555,45 @@ out_unlock: + } + EXPORT_SYMBOL(iwl_mac_hw_scan); + ++/* ++ * internal short scan, this function should only been called while associated. ++ * It will reset and tune the radio to prevent possible RF related problem ++ */ ++int iwl_internal_short_hw_scan(struct iwl_priv *priv) ++{ ++ int ret = 0; ++ ++ if (!iwl_is_ready_rf(priv)) { ++ ret = -EIO; ++ IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); ++ goto out; ++ } ++ if (test_bit(STATUS_SCANNING, &priv->status)) { ++ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); ++ ret = -EAGAIN; ++ goto out; ++ } ++ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { ++ IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); ++ ret = -EAGAIN; ++ goto out; ++ } ++ priv->scan_bands = 0; ++ if (priv->band == IEEE80211_BAND_5GHZ) ++ priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ); ++ else ++ priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ); ++ ++ IWL_DEBUG_SCAN(priv, "Start internal short scan...\n"); ++ set_bit(STATUS_SCANNING, &priv->status); ++ priv->is_internal_short_scan = true; ++ queue_work(priv->workqueue, &priv->request_scan); ++ ++out: ++ return ret; ++} ++EXPORT_SYMBOL(iwl_internal_short_hw_scan); ++ + #define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) + + void iwl_bg_scan_check(struct work_struct *data) +@@ -551,7 +657,8 @@ u16 iwl_fill_probe_req(struct iwl_priv * + if (WARN_ON(left < ie_len)) + return len; + +- memcpy(pos, ies, ie_len); ++ if (ies) ++ memcpy(pos, ies, ie_len); + len += ie_len; + left -= ie_len; + +@@ -654,7 +761,6 @@ static void iwl_bg_request_scan(struct w + unsigned long flags; + + IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); +- + spin_lock_irqsave(&priv->lock, flags); + interval = priv->beacon_int; + spin_unlock_irqrestore(&priv->lock, flags); +@@ -672,7 +778,9 @@ static void iwl_bg_request_scan(struct w + scan_suspend_time, interval); + } + +- if (priv->scan_request->n_ssids) { ++ if (priv->is_internal_short_scan) { ++ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); ++ } else if (priv->scan_request->n_ssids) { + int i, p = 0; + IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); + for (i = 0; i < priv->scan_request->n_ssids; i++) { +@@ -753,24 +861,38 @@ static void iwl_bg_request_scan(struct w + rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; + rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; + scan->rx_chain = cpu_to_le16(rx_chain); +- cmd_len = iwl_fill_probe_req(priv, +- (struct ieee80211_mgmt *)scan->data, +- priv->scan_request->ie, +- priv->scan_request->ie_len, +- IWL_MAX_SCAN_SIZE - sizeof(*scan)); ++ if (!priv->is_internal_short_scan) { ++ cmd_len = iwl_fill_probe_req(priv, ++ (struct ieee80211_mgmt *)scan->data, ++ priv->scan_request->ie, ++ priv->scan_request->ie_len, ++ IWL_MAX_SCAN_SIZE - sizeof(*scan)); ++ } else { ++ cmd_len = iwl_fill_probe_req(priv, ++ (struct ieee80211_mgmt *)scan->data, ++ NULL, 0, ++ IWL_MAX_SCAN_SIZE - sizeof(*scan)); + ++ } + scan->tx_cmd.len = cpu_to_le16(cmd_len); +- + if (iwl_is_monitor_mode(priv)) + scan->filter_flags = RXON_FILTER_PROMISC_MSK; + + scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | + RXON_FILTER_BCON_AWARE_MSK); + +- scan->channel_count = +- iwl_get_channels_for_scan(priv, band, is_active, n_probes, +- (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); +- ++ if (priv->is_internal_short_scan) { ++ scan->channel_count = ++ iwl_get_single_channel_for_scan(priv, band, ++ (void *)&scan->data[le16_to_cpu( ++ scan->tx_cmd.len)]); ++ } else { ++ scan->channel_count = ++ iwl_get_channels_for_scan(priv, band, ++ is_active, n_probes, ++ (void *)&scan->data[le16_to_cpu( ++ scan->tx_cmd.len)]); ++ } + if (scan->channel_count == 0) { + IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); + goto done; +@@ -831,7 +953,12 @@ void iwl_bg_scan_completed(struct work_s + + cancel_delayed_work(&priv->scan_check); + +- ieee80211_scan_completed(priv->hw, false); ++ if (!priv->is_internal_short_scan) ++ ieee80211_scan_completed(priv->hw, false); ++ else { ++ priv->is_internal_short_scan = false; ++ IWL_DEBUG_SCAN(priv, "internal short scan completed\n"); ++ } + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; diff --git a/iwlwifi_-add-internal-short-scan-support-for-3945.patch b/iwlwifi_-add-internal-short-scan-support-for-3945.patch new file mode 100644 index 0000000..db132ee --- /dev/null +++ b/iwlwifi_-add-internal-short-scan-support-for-3945.patch @@ -0,0 +1,85 @@ +commit 4f4d4088b05155d4904e29d5c00316395ce32f27 +Author: Wey-Yi Guy +Date: Wed Feb 24 08:28:30 2010 -0800 + + iwlwifi: add internal short scan support for 3945 + + Add internal short scan support for 3945 NIC, This allows 3945 NIC + to support radio reset request like the other series of NICs. + + Signed-off-by: Wey-Yi Guy + Signed-off-by: Reinette Chatre + +diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c +index dd33251..252df12 100644 +--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c ++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c +@@ -2799,7 +2799,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data) + .len = sizeof(struct iwl3945_scan_cmd), + .flags = CMD_SIZE_HUGE, + }; +- int rc = 0; + struct iwl3945_scan_cmd *scan; + struct ieee80211_conf *conf = NULL; + u8 n_probes = 0; +@@ -2827,7 +2826,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data) + if (test_bit(STATUS_SCAN_HW, &priv->status)) { + IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests " + "Ignoring second request.\n"); +- rc = -EIO; + goto done; + } + +@@ -2862,7 +2860,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data) + priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) + + IWL_MAX_SCAN_SIZE, GFP_KERNEL); + if (!priv->scan) { +- rc = -ENOMEM; ++ IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n"); + goto done; + } + } +@@ -2905,7 +2903,9 @@ static void iwl3945_bg_request_scan(struct work_struct *data) + scan_suspend_time, interval); + } + +- if (priv->scan_request->n_ssids) { ++ if (priv->is_internal_short_scan) { ++ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); ++ } else if (priv->scan_request->n_ssids) { + int i, p = 0; + IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); + for (i = 0; i < priv->scan_request->n_ssids; i++) { +@@ -2952,13 +2952,20 @@ static void iwl3945_bg_request_scan(struct work_struct *data) + goto done; + } + +- scan->tx_cmd.len = cpu_to_le16( ++ if (!priv->is_internal_short_scan) { ++ scan->tx_cmd.len = cpu_to_le16( + iwl_fill_probe_req(priv, + (struct ieee80211_mgmt *)scan->data, + priv->scan_request->ie, + priv->scan_request->ie_len, + IWL_MAX_SCAN_SIZE - sizeof(*scan))); +- ++ } else { ++ scan->tx_cmd.len = cpu_to_le16( ++ iwl_fill_probe_req(priv, ++ (struct ieee80211_mgmt *)scan->data, ++ NULL, 0, ++ IWL_MAX_SCAN_SIZE - sizeof(*scan))); ++ } + /* select Rx antennas */ + scan->flags |= iwl3945_get_antenna_flags(priv); + +@@ -2980,8 +2987,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data) + scan->len = cpu_to_le16(cmd.len); + + set_bit(STATUS_SCAN_HW, &priv->status); +- rc = iwl_send_cmd_sync(priv, &cmd); +- if (rc) ++ if (iwl_send_cmd_sync(priv, &cmd)) + goto done; + + queue_delayed_work(priv->workqueue, &priv->scan_check, diff --git a/iwlwifi_-code-cleanup-for-connectivity-recovery.patch b/iwlwifi_-code-cleanup-for-connectivity-recovery.patch new file mode 100644 index 0000000..bb7a4fe --- /dev/null +++ b/iwlwifi_-code-cleanup-for-connectivity-recovery.patch @@ -0,0 +1,254 @@ +Backport of the following upstream commit... + +commit fa8f130c504223d25c116b3d23787f465dfb1317 +Author: Wey-Yi Guy +Date: Fri Mar 5 14:22:46 2010 -0800 + + iwlwifi: code cleanup for connectivity recovery + + Split the connectivity check and recovery routine into separated + functions based on the types + 1. iwl_good_ack_health() - check for ack count + 2. iwl_good_plcp_health() - check for plcp error + + Based on the type of errors being detected, different recovery methods + will be used to bring the system back to normal operational state. + + Because different NIC has different HW and uCode, the behavior is also + different; these functions thus now form part of the ops infrastructure, + so we can have more control on how to monitor and recover from error condition + case per device. + + Signed-off-by: Wey-Yi Guy + Signed-off-by: Reinette Chatre + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig 2010-04-13 14:57:34.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c 2010-04-13 15:01:00.000000000 -0400 +@@ -139,7 +139,8 @@ static struct iwl_lib_ops iwl1000_lib = + .set_ct_kill = iwl1000_set_ct_threshold, + }, + .recover_from_tx_stall = iwl_bg_monitor_recover, +- .recover_from_statistics = iwl_recover_from_statistics, ++ .check_plcp_health = iwl_good_plcp_health, ++ .check_ack_health = iwl_good_ack_health, + }; + + static struct iwl_ops iwl1000_ops = { +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c.orig 2010-04-13 14:57:34.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c 2010-04-13 15:02:23.000000000 -0400 +@@ -2213,7 +2213,7 @@ static struct iwl_lib_ops iwl4965_lib = + .temperature = iwl4965_temperature_calib, + .set_ct_kill = iwl4965_set_ct_threshold, + }, +- .recover_from_statistics = iwl_recover_from_statistics, ++ .check_plcp_health = iwl_good_plcp_health, + }; + + static struct iwl_ops iwl4965_ops = { +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c.orig 2010-04-13 14:57:34.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c 2010-04-13 15:01:00.000000000 -0400 +@@ -1507,7 +1507,8 @@ struct iwl_lib_ops iwl5000_lib = { + .set_ct_kill = iwl5000_set_ct_threshold, + }, + .recover_from_tx_stall = iwl_bg_monitor_recover, +- .recover_from_statistics = iwl_recover_from_statistics, ++ .check_plcp_health = iwl_good_plcp_health, ++ .check_ack_health = iwl_good_ack_health, + }; + + static struct iwl_lib_ops iwl5150_lib = { +@@ -1561,7 +1562,8 @@ static struct iwl_lib_ops iwl5150_lib = + .set_ct_kill = iwl5150_set_ct_threshold, + }, + .recover_from_tx_stall = iwl_bg_monitor_recover, +- .recover_from_statistics = iwl_recover_from_statistics, ++ .check_plcp_health = iwl_good_plcp_health, ++ .check_ack_health = iwl_good_ack_health, + }; + + static struct iwl_ops iwl5000_ops = { +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c.orig 2010-04-13 14:57:34.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c 2010-04-13 15:01:00.000000000 -0400 +@@ -251,7 +251,8 @@ static struct iwl_lib_ops iwl6000_lib = + .set_ct_kill = iwl6000_set_ct_threshold, + }, + .recover_from_tx_stall = iwl_bg_monitor_recover, +- .recover_from_statistics = iwl_recover_from_statistics, ++ .check_plcp_health = iwl_good_plcp_health, ++ .check_ack_health = iwl_good_ack_health, + }; + + static struct iwl_ops iwl6000_ops = { +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig 2010-04-13 14:57:34.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h 2010-04-13 15:04:33.000000000 -0400 +@@ -189,8 +189,11 @@ struct iwl_lib_ops { + struct iwl_temp_ops temp_ops; + /* recover from tx queue stall */ + void (*recover_from_tx_stall)(unsigned long data); +- /* recover from errors showed in statistics */ +- void (*recover_from_statistics)(struct iwl_priv *priv, ++ /* check for plcp health */ ++ bool (*check_plcp_health)(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt); ++ /* check for ack health */ ++ bool (*check_ack_health)(struct iwl_priv *priv, + struct iwl_rx_packet *pkt); + }; + +@@ -435,7 +438,9 @@ int iwl_tx_queue_reclaim(struct iwl_priv + /* Handlers */ + void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +-void iwl_recover_from_statistics(struct iwl_priv *priv, ++bool iwl_good_plcp_health(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt); ++bool iwl_good_ack_health(struct iwl_priv *priv, + struct iwl_rx_packet *pkt); + void iwl_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig 2010-04-13 14:57:34.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c 2010-04-13 15:01:00.000000000 -0400 +@@ -598,24 +598,18 @@ static void iwl_accumulative_statistics( + #define BA_TIMEOUT_CNT (5) + #define BA_TIMEOUT_MAX (16) + +-#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n" +-/* +- * This function checks for plcp error, ACK count ratios, aggregated BA +- * timeout retries. +- * - When the ACK count ratio is 0 and aggregated BA timeout retries is +- * exceeding the BA_TIMEOUT_MAX, it will recover the failure by resetting +- * the firmware. +- * - When the plcp error is exceeding the thresholds, it will reset the radio +- * to improve the throughput. ++/** ++ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. ++ * ++ * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding ++ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal ++ * operation state. + */ +-void iwl_recover_from_statistics(struct iwl_priv *priv, +- struct iwl_rx_packet *pkt) ++bool iwl_good_ack_health(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt) + { +- int combined_plcp_delta; +- unsigned int plcp_msec; +- unsigned long plcp_received_jiffies; +- int actual_ack_cnt_delta; +- int expected_ack_cnt_delta; ++ bool rc = true; ++ int actual_ack_cnt_delta, expected_ack_cnt_delta; + int ba_timeout_delta; + + actual_ack_cnt_delta = +@@ -638,13 +632,27 @@ void iwl_recover_from_statistics(struct + + IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", + ba_timeout_delta); +- if ((actual_ack_cnt_delta == 0) && +- (ba_timeout_delta >= BA_TIMEOUT_MAX)) { +- IWL_DEBUG_RADIO(priv, +- "call iwl_force_reset(IWL_FW_RESET)\n"); +- iwl_force_reset(priv, IWL_FW_RESET); +- } ++ if (!actual_ack_cnt_delta && ++ (ba_timeout_delta >= BA_TIMEOUT_MAX)) ++ rc = false; + } ++ return rc; ++} ++EXPORT_SYMBOL(iwl_good_ack_health); ++ ++/** ++ * iwl_good_plcp_health - checks for plcp error. ++ * ++ * When the plcp error is exceeding the thresholds, reset the radio ++ * to improve the throughput. ++ */ ++bool iwl_good_plcp_health(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt) ++{ ++ bool rc = true; ++ int combined_plcp_delta; ++ unsigned int plcp_msec; ++ unsigned long plcp_received_jiffies; + + /* + * check for plcp_err and trigger radio reset if it exceeds +@@ -679,7 +687,8 @@ void iwl_recover_from_statistics(struct + * combined_plcp_delta, + * plcp_msec + */ +- IWL_DEBUG_RADIO(priv, PLCP_MSG, ++ IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " ++ "%u, %u, %u, %u, %d, %u mSecs\n", + priv->cfg->plcp_delta_threshold, + le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err), + le32_to_cpu(priv->statistics.rx.ofdm.plcp_err), +@@ -687,15 +696,42 @@ void iwl_recover_from_statistics(struct + le32_to_cpu( + priv->statistics.rx.ofdm_ht.plcp_err), + combined_plcp_delta, plcp_msec); +- /* +- * Reset the RF radio due to the high plcp +- * error rate +- */ +- iwl_force_reset(priv, IWL_RF_RESET); ++ rc = false; ++ } ++ } ++ return rc; ++} ++EXPORT_SYMBOL(iwl_good_plcp_health); ++ ++static void iwl_recover_from_statistics(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt) ++{ ++ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) ++ return; ++ if (iwl_is_associated(priv)) { ++ if (priv->cfg->ops->lib->check_ack_health) { ++ if (!priv->cfg->ops->lib->check_ack_health( ++ priv, pkt)) { ++ /* ++ * low ack count detected ++ * restart Firmware ++ */ ++ IWL_ERR(priv, "low ack count detected, " ++ "restart firmware\n"); ++ iwl_force_reset(priv, IWL_FW_RESET); ++ } ++ } else if (priv->cfg->ops->lib->check_plcp_health) { ++ if (!priv->cfg->ops->lib->check_plcp_health( ++ priv, pkt)) { ++ /* ++ * high plcp error detected ++ * reset Radio ++ */ ++ iwl_force_reset(priv, IWL_RF_RESET); ++ } + } + } + } +-EXPORT_SYMBOL(iwl_recover_from_statistics); + + void iwl_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +@@ -717,8 +753,7 @@ void iwl_rx_statistics(struct iwl_priv * + #ifdef CONFIG_IWLWIFI_DEBUG + iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); + #endif +- if (priv->cfg->ops->lib->recover_from_statistics) +- priv->cfg->ops->lib->recover_from_statistics(priv, pkt); ++ iwl_recover_from_statistics(priv, pkt); + + memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); + diff --git a/iwlwifi_-iwl_good_ack_health-only-apply-to-AGN-device.patch b/iwlwifi_-iwl_good_ack_health-only-apply-to-AGN-device.patch new file mode 100644 index 0000000..6072f0a --- /dev/null +++ b/iwlwifi_-iwl_good_ack_health-only-apply-to-AGN-device.patch @@ -0,0 +1,140 @@ +Backport of the following upstream commit... + +commit 872c8ddcbec06995c1c7caa3e41c921290a8b6df +Author: Wey-Yi Guy +Date: Tue Mar 16 10:46:31 2010 -0700 + + iwlwifi: iwl_good_ack_health() only apply to AGN device + + iwl_good_ack_health() check for expected and actual ack count which only + apply to aggregation mode. Move the function to iwlagn module. + + Reported-by: Chantry Xavier + Signed-off-by: Wey-Yi Guy + Signed-off-by: Reinette Chatre + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c.orig 2010-04-13 15:15:47.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c 2010-04-13 15:20:00.000000000 -0400 +@@ -1316,6 +1316,52 @@ static void iwl_irq_tasklet(struct iwl_p + iwl_enable_interrupts(priv); + } + ++/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ ++#define ACK_CNT_RATIO (50) ++#define BA_TIMEOUT_CNT (5) ++#define BA_TIMEOUT_MAX (16) ++ ++/** ++ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. ++ * ++ * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding ++ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal ++ * operation state. ++ */ ++bool iwl_good_ack_health(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt) ++{ ++ bool rc = true; ++ int actual_ack_cnt_delta, expected_ack_cnt_delta; ++ int ba_timeout_delta; ++ ++ actual_ack_cnt_delta = ++ le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - ++ le32_to_cpu(priv->statistics.tx.actual_ack_cnt); ++ expected_ack_cnt_delta = ++ le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - ++ le32_to_cpu(priv->statistics.tx.expected_ack_cnt); ++ ba_timeout_delta = ++ le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - ++ le32_to_cpu(priv->statistics.tx.agg.ba_timeout); ++ if ((priv->agg_tids_count > 0) && ++ (expected_ack_cnt_delta > 0) && ++ (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) ++ < ACK_CNT_RATIO) && ++ (ba_timeout_delta > BA_TIMEOUT_CNT)) { ++ IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d," ++ " expected_ack_cnt = %d\n", ++ actual_ack_cnt_delta, expected_ack_cnt_delta); ++ ++ IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", ++ ba_timeout_delta); ++ if (!actual_ack_cnt_delta && ++ (ba_timeout_delta >= BA_TIMEOUT_MAX)) ++ rc = false; ++ } ++ return rc; ++} ++ + + /****************************************************************************** + * +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig 2010-04-13 15:15:47.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h 2010-04-13 15:18:13.000000000 -0400 +@@ -584,6 +584,8 @@ void iwl_disable_ict(struct iwl_priv *pr + int iwl_alloc_isr_ict(struct iwl_priv *priv); + void iwl_free_isr_ict(struct iwl_priv *priv); + irqreturn_t iwl_isr_ict(int irq, void *data); ++bool iwl_good_ack_health(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt); + + static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv) + { +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig 2010-04-13 15:15:47.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c 2010-04-13 15:18:52.000000000 -0400 +@@ -593,53 +593,6 @@ static void iwl_accumulative_statistics( + + #define REG_RECALIB_PERIOD (60) + +-/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ +-#define ACK_CNT_RATIO (50) +-#define BA_TIMEOUT_CNT (5) +-#define BA_TIMEOUT_MAX (16) +- +-/** +- * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. +- * +- * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding +- * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal +- * operation state. +- */ +-bool iwl_good_ack_health(struct iwl_priv *priv, +- struct iwl_rx_packet *pkt) +-{ +- bool rc = true; +- int actual_ack_cnt_delta, expected_ack_cnt_delta; +- int ba_timeout_delta; +- +- actual_ack_cnt_delta = +- le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - +- le32_to_cpu(priv->statistics.tx.actual_ack_cnt); +- expected_ack_cnt_delta = +- le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - +- le32_to_cpu(priv->statistics.tx.expected_ack_cnt); +- ba_timeout_delta = +- le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - +- le32_to_cpu(priv->statistics.tx.agg.ba_timeout); +- if ((priv->agg_tids_count > 0) && +- (expected_ack_cnt_delta > 0) && +- (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) +- < ACK_CNT_RATIO) && +- (ba_timeout_delta > BA_TIMEOUT_CNT)) { +- IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d," +- " expected_ack_cnt = %d\n", +- actual_ack_cnt_delta, expected_ack_cnt_delta); +- +- IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", +- ba_timeout_delta); +- if (!actual_ack_cnt_delta && +- (ba_timeout_delta >= BA_TIMEOUT_MAX)) +- rc = false; +- } +- return rc; +-} +-EXPORT_SYMBOL(iwl_good_ack_health); +- + /** + * iwl_good_plcp_health - checks for plcp error. + * diff --git a/iwlwifi_-move-plcp-check-to-separated-function.patch b/iwlwifi_-move-plcp-check-to-separated-function.patch new file mode 100644 index 0000000..394eeb2 --- /dev/null +++ b/iwlwifi_-move-plcp-check-to-separated-function.patch @@ -0,0 +1,186 @@ +Backport of the following upstream commit... + +commit beac5498b792ed8420885ee23e8d4f2885ee2d13 +Author: Wey-Yi Guy +Date: Thu Mar 4 13:38:58 2010 -0800 + + iwlwifi: move plcp check to separated function + + Move the plcp error checking into stand alone function and pointed by ops + to accommodate devices not needing this recovery. + + Signed-off-by: Trieu 'Andrew' Nguyen + Signed-off-by: Wey-Yi Guy + Signed-off-by: Reinette Chatre + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c.orig 2010-04-13 14:40:14.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-1000.c 2010-04-13 14:40:43.000000000 -0400 +@@ -139,6 +139,7 @@ static struct iwl_lib_ops iwl1000_lib = + .set_ct_kill = iwl1000_set_ct_threshold, + }, + .recover_from_tx_stall = iwl_bg_monitor_recover, ++ .recover_from_statistics = iwl_recover_from_statistics, + }; + + static struct iwl_ops iwl1000_ops = { +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c.orig 2010-04-13 14:40:14.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-4965.c 2010-04-13 14:41:23.000000000 -0400 +@@ -2213,6 +2213,7 @@ static struct iwl_lib_ops iwl4965_lib = + .temperature = iwl4965_temperature_calib, + .set_ct_kill = iwl4965_set_ct_threshold, + }, ++ .recover_from_statistics = iwl_recover_from_statistics, + }; + + static struct iwl_ops iwl4965_ops = { +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c.orig 2010-04-13 14:40:14.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-5000.c 2010-04-13 14:40:43.000000000 -0400 +@@ -1507,6 +1507,7 @@ struct iwl_lib_ops iwl5000_lib = { + .set_ct_kill = iwl5000_set_ct_threshold, + }, + .recover_from_tx_stall = iwl_bg_monitor_recover, ++ .recover_from_statistics = iwl_recover_from_statistics, + }; + + static struct iwl_lib_ops iwl5150_lib = { +@@ -1560,6 +1561,7 @@ static struct iwl_lib_ops iwl5150_lib = + .set_ct_kill = iwl5150_set_ct_threshold, + }, + .recover_from_tx_stall = iwl_bg_monitor_recover, ++ .recover_from_statistics = iwl_recover_from_statistics, + }; + + static struct iwl_ops iwl5000_ops = { +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c.orig 2010-04-13 14:40:14.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-6000.c 2010-04-13 14:42:29.000000000 -0400 +@@ -251,6 +251,7 @@ static struct iwl_lib_ops iwl6000_lib = + .set_ct_kill = iwl6000_set_ct_threshold, + }, + .recover_from_tx_stall = iwl_bg_monitor_recover, ++ .recover_from_statistics = iwl_recover_from_statistics, + }; + + static struct iwl_ops iwl6000_ops = { +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig 2010-04-13 14:40:14.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h 2010-04-13 14:43:12.000000000 -0400 +@@ -189,6 +189,9 @@ struct iwl_lib_ops { + struct iwl_temp_ops temp_ops; + /* recover from tx queue stall */ + void (*recover_from_tx_stall)(unsigned long data); ++ /* recover from errors showed in statistics */ ++ void (*recover_from_statistics)(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt); + }; + + struct iwl_led_ops { +@@ -432,6 +435,8 @@ int iwl_tx_queue_reclaim(struct iwl_priv + /* Handlers */ + void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); ++void iwl_recover_from_statistics(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt); + void iwl_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + void iwl_reply_statistics(struct iwl_priv *priv, +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig 2010-04-13 14:40:14.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c 2010-04-13 14:40:43.000000000 -0400 +@@ -594,28 +594,18 @@ static void iwl_accumulative_statistics( + #define REG_RECALIB_PERIOD (60) + + #define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n" +-void iwl_rx_statistics(struct iwl_priv *priv, +- struct iwl_rx_mem_buffer *rxb) ++/* ++ * This function checks for plcp error. ++ * - When the plcp error is exceeding the thresholds, it will reset the radio ++ * to improve the throughput. ++ */ ++void iwl_recover_from_statistics(struct iwl_priv *priv, ++ struct iwl_rx_packet *pkt) + { +- int change; +- struct iwl_rx_packet *pkt = rxb_addr(rxb); + int combined_plcp_delta; + unsigned int plcp_msec; + unsigned long plcp_received_jiffies; + +- IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", +- (int)sizeof(priv->statistics), +- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); +- +- change = ((priv->statistics.general.temperature != +- pkt->u.stats.general.temperature) || +- ((priv->statistics.flag & +- STATISTICS_REPLY_FLG_HT40_MODE_MSK) != +- (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); +- +-#ifdef CONFIG_IWLWIFI_DEBUG +- iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); +-#endif + /* + * check for plcp_err and trigger radio reset if it exceeds + * the plcp error threshold plcp_delta. +@@ -636,11 +626,11 @@ void iwl_rx_statistics(struct iwl_priv * + le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err)); + + if ((combined_plcp_delta > 0) && +- ((combined_plcp_delta * 100) / plcp_msec) > ++ ((combined_plcp_delta * 100) / plcp_msec) > + priv->cfg->plcp_delta_threshold) { + /* +- * if plcp_err exceed the threshold, the following +- * data is printed in csv format: ++ * if plcp_err exceed the threshold, ++ * the following data is printed in csv format: + * Text: plcp_err exceeded %d, + * Received ofdm.plcp_err, + * Current ofdm.plcp_err, +@@ -655,9 +645,8 @@ void iwl_rx_statistics(struct iwl_priv * + le32_to_cpu(priv->statistics.rx.ofdm.plcp_err), + le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err), + le32_to_cpu( +- priv->statistics.rx.ofdm_ht.plcp_err), ++ priv->statistics.rx.ofdm_ht.plcp_err), + combined_plcp_delta, plcp_msec); +- + /* + * Reset the RF radio due to the high plcp + * error rate +@@ -665,6 +654,31 @@ void iwl_rx_statistics(struct iwl_priv * + iwl_force_reset(priv, IWL_RF_RESET); + } + } ++} ++EXPORT_SYMBOL(iwl_recover_from_statistics); ++ ++void iwl_rx_statistics(struct iwl_priv *priv, ++ struct iwl_rx_mem_buffer *rxb) ++{ ++ int change; ++ struct iwl_rx_packet *pkt = rxb_addr(rxb); ++ ++ ++ IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", ++ (int)sizeof(priv->statistics), ++ le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); ++ ++ change = ((priv->statistics.general.temperature != ++ pkt->u.stats.general.temperature) || ++ ((priv->statistics.flag & ++ STATISTICS_REPLY_FLG_HT40_MODE_MSK) != ++ (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); ++ ++#ifdef CONFIG_IWLWIFI_DEBUG ++ iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); ++#endif ++ if (priv->cfg->ops->lib->recover_from_statistics) ++ priv->cfg->ops->lib->recover_from_statistics(priv, pkt); + + memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); + diff --git a/iwlwifi_-multiple-force-reset-mode.patch b/iwlwifi_-multiple-force-reset-mode.patch new file mode 100644 index 0000000..e50971f --- /dev/null +++ b/iwlwifi_-multiple-force-reset-mode.patch @@ -0,0 +1,172 @@ +Backport of the following upstream commit... + +commit a93e7973d0983d22fcbe5f691244736211639fe7 +Author: Wey-Yi Guy +Date: Wed Feb 3 11:47:19 2010 -0800 + + iwlwifi: multiple force reset mode + + Provide the function to perform different type of uCode reset/reload operation. + When uCode detect error and can not fix itself, this iwl_force_reset() + function allow driver to perform the necessary reset/reload functions and help + to bring uCode back to normal operation state. + + Currently only 2 type of force reset are available: + - reset radio + - reload firmware + + Signed-off-by: Wey-Yi Guy + Signed-off-by: Reinette Chatre + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig 2010-04-13 13:36:35.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c 2010-04-13 13:38:40.000000000 -0400 +@@ -3197,7 +3197,7 @@ void iwl_update_stats(struct iwl_priv *p + EXPORT_SYMBOL(iwl_update_stats); + #endif + +-void iwl_force_rf_reset(struct iwl_priv *priv) ++static void iwl_force_rf_reset(struct iwl_priv *priv) + { + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; +@@ -3219,7 +3219,47 @@ void iwl_force_rf_reset(struct iwl_priv + iwl_internal_short_hw_scan(priv); + return; + } +-EXPORT_SYMBOL(iwl_force_rf_reset); ++ ++#define IWL_DELAY_NEXT_FORCE_RESET (HZ*3) ++ ++int iwl_force_reset(struct iwl_priv *priv, int mode) ++{ ++ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) ++ return -EINVAL; ++ ++ if (priv->last_force_reset_jiffies && ++ time_after(priv->last_force_reset_jiffies + ++ IWL_DELAY_NEXT_FORCE_RESET, jiffies)) { ++ IWL_DEBUG_INFO(priv, "force reset rejected\n"); ++ return -EAGAIN; ++ } ++ ++ IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode); ++ ++ switch (mode) { ++ case IWL_RF_RESET: ++ iwl_force_rf_reset(priv); ++ break; ++ case IWL_FW_RESET: ++ IWL_ERR(priv, "On demand firmware reload\n"); ++ /* Set the FW error flag -- cleared on iwl_down */ ++ set_bit(STATUS_FW_ERROR, &priv->status); ++ wake_up_interruptible(&priv->wait_command_queue); ++ /* ++ * Keep the restart process from trying to send host ++ * commands by clearing the INIT status bit ++ */ ++ clear_bit(STATUS_READY, &priv->status); ++ queue_work(priv->workqueue, &priv->restart); ++ break; ++ default: ++ IWL_DEBUG_INFO(priv, "invalid reset request.\n"); ++ return -EINVAL; ++ } ++ priv->last_force_reset_jiffies = jiffies; ++ ++ return 0; ++} + + #ifdef CONFIG_PM + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h.orig 2010-04-13 13:36:50.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.h 2010-04-13 13:38:40.000000000 -0400 +@@ -501,7 +501,7 @@ int iwl_scan_cancel(struct iwl_priv *pri + int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); + int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req); + int iwl_internal_short_hw_scan(struct iwl_priv *priv); +-void iwl_force_rf_reset(struct iwl_priv *priv); ++int iwl_force_reset(struct iwl_priv *priv, int mode); + u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, + const u8 *ie, int ie_len, int left); + void iwl_setup_rx_scan_handlers(struct iwl_priv *priv); +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig 2010-04-13 13:36:50.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h 2010-04-13 13:38:40.000000000 -0400 +@@ -993,6 +993,11 @@ struct iwl_switch_rxon { + #define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100) + #define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255) + ++enum iwl_reset { ++ IWL_RF_RESET = 0, ++ IWL_FW_RESET, ++}; ++ + struct iwl_priv { + + /* ieee device used by generic ieee processing code */ +@@ -1024,6 +1029,9 @@ struct iwl_priv { + /* storing the jiffies when the plcp error rate is received */ + unsigned long plcp_jiffies; + ++ /* force reset */ ++ unsigned long last_force_reset_jiffies; ++ + /* we allocate array of iwl4965_channel_info for NIC's valid channels. + * Access via channel # using indirect index array */ + struct iwl_channel_info *channel_info; /* channel info array */ +@@ -1046,7 +1054,6 @@ struct iwl_priv { + unsigned long scan_start; + unsigned long scan_pass_start; + unsigned long scan_start_tsf; +- unsigned long last_internal_scan_jiffies; + void *scan; + int scan_bands; + struct cfg80211_scan_request *scan_request; +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c.orig 2010-04-13 13:36:50.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-rx.c 2010-04-13 13:38:40.000000000 -0400 +@@ -662,7 +662,7 @@ void iwl_rx_statistics(struct iwl_priv * + * Reset the RF radio due to the high plcp + * error rate + */ +- iwl_force_rf_reset(priv); ++ iwl_force_reset(priv, IWL_RF_RESET); + } + } + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c.orig 2010-04-13 13:36:42.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-scan.c 2010-04-13 13:38:40.000000000 -0400 +@@ -254,8 +254,6 @@ static void iwl_rx_scan_complete_notif(s + priv->last_scan_jiffies = jiffies; + if (!priv->is_internal_short_scan) + priv->next_scan_jiffies = 0; +- else +- priv->last_internal_scan_jiffies = jiffies; + + IWL_DEBUG_INFO(priv, "Setting scan to off\n"); + +@@ -564,8 +562,6 @@ EXPORT_SYMBOL(iwl_mac_hw_scan); + * internal short scan, this function should only been called while associated. + * It will reset and tune the radio to prevent possible RF related problem + */ +-#define IWL_DELAY_NEXT_INTERNAL_SCAN (HZ*1) +- + int iwl_internal_short_hw_scan(struct iwl_priv *priv) + { + int ret = 0; +@@ -585,12 +581,6 @@ int iwl_internal_short_hw_scan(struct iw + ret = -EAGAIN; + goto out; + } +- if (priv->last_internal_scan_jiffies && +- time_after(priv->last_internal_scan_jiffies + +- IWL_DELAY_NEXT_INTERNAL_SCAN, jiffies)) { +- IWL_DEBUG_SCAN(priv, "internal scan rejected\n"); +- goto out; +- } + + priv->scan_bands = 0; + if (priv->band == IEEE80211_BAND_5GHZ) diff --git a/iwlwifi_-separated-time-check-for-different-type-of-force-reset.patch b/iwlwifi_-separated-time-check-for-different-type-of-force-reset.patch new file mode 100644 index 0000000..f708074 --- /dev/null +++ b/iwlwifi_-separated-time-check-for-different-type-of-force-reset.patch @@ -0,0 +1,122 @@ +Backport of the following upstream commit... + +commit 8a472da431998b7357e6dc562e79a3061ed56cad +Author: Wey-Yi Guy +Date: Thu Feb 18 22:03:06 2010 -0800 + + iwlwifi: separated time check for different type of force reset + + Use different timing duration check for different type of force reset, + force reset request can come from different source and based on + different reason; one type of reset request should not block other type of + reset request. + + Adding structure to keep track of different force reset request. + + Signed-off-by: Wey-Yi Guy + Signed-off-by: Reinette Chatre + Signed-off-by: John W. Linville + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c.orig 2010-04-13 13:44:14.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-agn.c 2010-04-13 13:47:34.000000000 -0400 +@@ -3189,6 +3189,12 @@ static int iwl_init_drv(struct iwl_priv + + priv->iw_mode = NL80211_IFTYPE_STATION; + ++ /* initialize force reset */ ++ priv->force_reset[IWL_RF_RESET].reset_duration = ++ IWL_DELAY_NEXT_FORCE_RF_RESET; ++ priv->force_reset[IWL_FW_RESET].reset_duration = ++ IWL_DELAY_NEXT_FORCE_FW_RELOAD; ++ + /* Choose which receivers/antennas to use */ + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c.orig 2010-04-13 13:44:14.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-core.c 2010-04-13 13:47:34.000000000 -0400 +@@ -3220,22 +3220,30 @@ static void iwl_force_rf_reset(struct iw + return; + } + +-#define IWL_DELAY_NEXT_FORCE_RESET (HZ*3) + + int iwl_force_reset(struct iwl_priv *priv, int mode) + { ++ struct iwl_force_reset *force_reset; ++ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EINVAL; + +- if (priv->last_force_reset_jiffies && +- time_after(priv->last_force_reset_jiffies + +- IWL_DELAY_NEXT_FORCE_RESET, jiffies)) { ++ if (mode >= IWL_MAX_FORCE_RESET) { ++ IWL_DEBUG_INFO(priv, "invalid reset request.\n"); ++ return -EINVAL; ++ } ++ force_reset = &priv->force_reset[mode]; ++ force_reset->reset_request_count++; ++ if (force_reset->last_force_reset_jiffies && ++ time_after(force_reset->last_force_reset_jiffies + ++ force_reset->reset_duration, jiffies)) { + IWL_DEBUG_INFO(priv, "force reset rejected\n"); ++ force_reset->reset_reject_count++; + return -EAGAIN; + } +- ++ force_reset->reset_success_count++; ++ force_reset->last_force_reset_jiffies = jiffies; + IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode); +- + switch (mode) { + case IWL_RF_RESET: + iwl_force_rf_reset(priv); +@@ -3252,12 +3260,7 @@ int iwl_force_reset(struct iwl_priv *pri + clear_bit(STATUS_READY, &priv->status); + queue_work(priv->workqueue, &priv->restart); + break; +- default: +- IWL_DEBUG_INFO(priv, "invalid reset request.\n"); +- return -EINVAL; + } +- priv->last_force_reset_jiffies = jiffies; +- + return 0; + } + +diff -up linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h +--- linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h.orig 2010-04-13 13:45:06.000000000 -0400 ++++ linux-2.6.33.noarch/drivers/net/wireless/iwlwifi/iwl-dev.h 2010-04-13 13:47:34.000000000 -0400 +@@ -994,9 +994,21 @@ struct iwl_switch_rxon { + #define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200) + #define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255) + ++#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) ++#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) ++ + enum iwl_reset { + IWL_RF_RESET = 0, + IWL_FW_RESET, ++ IWL_MAX_FORCE_RESET, ++}; ++ ++struct iwl_force_reset { ++ int reset_request_count; ++ int reset_success_count; ++ int reset_reject_count; ++ unsigned long reset_duration; ++ unsigned long last_force_reset_jiffies; + }; + + struct iwl_priv { +@@ -1031,7 +1043,7 @@ struct iwl_priv { + unsigned long plcp_jiffies; + + /* force reset */ +- unsigned long last_force_reset_jiffies; ++ struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET]; + + /* we allocate array of iwl4965_channel_info for NIC's valid channels. + * Access via channel # using indirect index array */ diff --git a/kernel.spec b/kernel.spec index 29198d9..6c46fe3 100644 --- a/kernel.spec +++ b/kernel.spec @@ -26,9 +26,9 @@ Summary: The Linux kernel # % define buildid .local ################################################################### -# The buildid can also be specified on the rpmbuild command line -# by adding --define="buildid .whatever". If both the specfile and -# the environment define a buildid they will be concatenated together. +# buildid can also be specified on the rpmbuild command line +# by adding --define="buildid .whatever". If both kinds of buildid +# are specified they will be concatenated together. %if 0%{?orig_buildid:1} %if 0%{?buildid:1} %global srpm_buildid %{buildid} @@ -48,21 +48,21 @@ Summary: The Linux kernel # reset this by hand to 1 (or to 0 and then use rpmdev-bumpspec). # scripts/rebase.sh should be made to do that for you, actually. # -%global baserelease 38 +%global baserelease 148 %global fedora_build %{baserelease} # base_sublevel is the kernel version we're starting with and patching # on top of -- for example, 2.6.22-rc7-git1 starts with a 2.6.21 base, # which yields a base_sublevel of 21. -%define base_sublevel 34 +%define base_sublevel 33 ## If this is a released kernel ## %if 0%{?released_kernel} # Do we have a -stable update to apply? -%define stable_update 4 +%define stable_update 6 # Is it a -stable RC? -%define stable_rc 1 +%define stable_rc 0 # Set rpm version accordingly %if 0%{?stable_update} %define stablerev .%{stable_update} @@ -100,6 +100,8 @@ Summary: The Linux kernel %define with_up %{?_without_up: 0} %{?!_without_up: 1} # kernel-smp (only valid for ppc 32-bit) %define with_smp %{?_without_smp: 0} %{?!_without_smp: 1} +# kernel-kdump +%define with_kdump %{?_without_kdump: 0} %{?!_without_kdump: 1} # kernel-debug %define with_debug %{?_without_debug: 0} %{?!_without_debug: 1} # kernel-doc @@ -118,6 +120,8 @@ Summary: The Linux kernel %define with_bootwrapper %{?_without_bootwrapper: 0} %{?!_without_bootwrapper: 1} # Want to build a the vsdo directories installed %define with_vdso_install %{?_without_vdso_install: 0} %{?!_without_vdso_install: 1} +# Use dracut instead of mkinitrd for initrd image generation +%define with_dracut %{?_without_dracut: 0} %{?!_without_dracut: 1} # Build the kernel-doc package, but don't fail the build if it botches. # Here "true" means "continue" and "false" means "fail the build". @@ -229,12 +233,14 @@ Summary: The Linux kernel # if requested, only build base kernel %if %{with_baseonly} %define with_smp 0 +%define with_kdump 0 %define with_debug 0 %endif # if requested, only build smp kernel %if %{with_smponly} %define with_up 0 +%define with_kdump 0 %define with_debug 0 %endif @@ -245,6 +251,8 @@ Summary: The Linux kernel %endif %define with_smp 0 %define with_pae 0 +%define with_xen 0 +%define with_kdump 0 %define with_perftool 0 %endif @@ -262,6 +270,8 @@ Summary: The Linux kernel %define with_smp 0 %endif +%define with_kdump 0 + # don't do debug builds on anything but i686 and x86_64 %ifnarch i686 x86_64 %define with_debug 0 @@ -321,12 +331,16 @@ Summary: The Linux kernel %ifarch s390x %define asmarch s390 %define hdrarch s390 -%define all_arch_configs kernel-%{version}-s390x.config +%define all_arch_configs kernel-%{version}-s390x*.config %define image_install_path boot %define make_target image %define kernel_image arch/s390/boot/image %endif +%ifarch sparc +# We only build sparc headers since we dont support sparc32 hardware +%endif + %ifarch sparc64 %define asmarch sparc %define all_arch_configs kernel-%{version}-sparc64*.config @@ -387,13 +401,15 @@ Summary: The Linux kernel # us use the previous build of that package -- it'll just be completely AWOL. # Which is a BadThing(tm). -# We only build kernel-headers on the following... +# We don't build a kernel on i386; we only do kernel-headers there, +# and we no longer build for 31bit s390. Same for 32bit sparc and arm. %define nobuildarches i386 s390 sparc %{arm} %ifarch %nobuildarches %define with_up 0 %define with_smp 0 %define with_pae 0 +%define with_kdump 0 %define with_debuginfo 0 %define with_perftool 0 %define _enable_debug_packages 0 @@ -423,6 +439,19 @@ Summary: The Linux kernel # %define package_conflicts initscripts < 7.23, udev < 063-6, iptables < 1.3.2-1, ipw2200-firmware < 2.4, iwl4965-firmware < 228.57.2, selinux-policy-targeted < 1.25.3-14, squashfs-tools < 4.0, wireless-tools < 29-3 +# +# The ld.so.conf.d file we install uses syntax older ldconfig's don't grok. +# +%define kernel_xen_conflicts glibc < 2.3.5-1, xen < 3.0.1 + +%define kernel_PAE_obsoletes kernel-smp < 2.6.17, kernel-xen <= 2.6.27-0.2.rc0.git6.fc10 +%define kernel_PAE_provides kernel-xen = %{rpmversion}-%{pkg_release} + +%ifarch x86_64 +%define kernel_obsoletes kernel-xen <= 2.6.27-0.2.rc0.git6.fc10 +%define kernel_provides kernel-xen = %{rpmversion}-%{pkg_release} +%endif + # We moved the drm include files into kernel-headers, make sure there's # a recent enough libdrm-devel on the system that doesn't have those. %define kernel_headers_conflicts libdrm-devel < 2.4.0-0.15 @@ -432,7 +461,11 @@ Summary: The Linux kernel # scripts use them. # %define kernel_prereq fileutils, module-init-tools, initscripts >= 8.11.1-1, grubby >= 7.0.10-1 +%if %{with_dracut} %define initrd_prereq dracut >= 001-7 +%else +%define initrd_prereq mkinitrd >= 6.0.61-1 +%endif # # This macro does requires, provides, conflicts, obsoletes for a kernel package. @@ -452,10 +485,7 @@ Requires(pre): %{initrd_prereq}\ %if %{with_firmware}\ Requires(pre): kernel-firmware >= %{rpmversion}-%{pkg_release}\ %else\ -Requires(pre): linux-firmware >= 20100806-2\ -%if %{with_perftool}\ -Requires(pre): elfutils-libs\ -%endif\ +Requires(pre): linux-firmware\ %endif\ Requires(post): /sbin/new-kernel-pkg\ Requires(preun): /sbin/new-kernel-pkg\ @@ -500,7 +530,7 @@ BuildRequires: xmlto, asciidoc BuildRequires: sparse >= 0.4.1 %endif %if %{with_perftool} -BuildRequires: elfutils-devel zlib-devel binutils-devel +BuildRequires: elfutils-libelf-devel zlib-devel binutils-devel libdwarf-devel %endif BuildConflicts: rhbuildsys(DiskFree) < 500Mb @@ -599,8 +629,8 @@ Patch05: linux-2.6-makefile-after_link.patch # revert upstream patches we get via other methods Patch09: linux-2.6-upstream-reverts.patch # Git trees. +Patch10: git-cpufreq.patch Patch11: git-bluetooth.patch -Patch12: git-cpufreq.patch # Standalone patches Patch20: linux-2.6-hotfixes.patch @@ -609,160 +639,234 @@ Patch21: linux-2.6-tracehook.patch Patch22: linux-2.6-utrace.patch Patch23: linux-2.6-utrace-ptrace.patch -Patch50: linux-2.6-x86-cfi_sections.patch - +Patch143: linux-2.6-g5-therm-shutdown.patch Patch144: linux-2.6-vio-modalias.patch Patch150: linux-2.6.29-sparc-IOC_TYPECHECK.patch Patch160: linux-2.6-execshield.patch -Patch200: linux-2.6-debug-sizeof-structs.patch -Patch201: linux-2.6-debug-nmi-timeout.patch -Patch202: linux-2.6-debug-taint-vm.patch -Patch203: linux-2.6-debug-vm-would-have-oomkilled.patch -Patch204: linux-2.6-debug-always-inline-kzalloc.patch - +Patch250: linux-2.6-debug-sizeof-structs.patch +Patch260: linux-2.6-debug-nmi-timeout.patch +Patch270: linux-2.6-debug-taint-vm.patch Patch300: linux-2.6-driver-level-usb-autosuspend.diff Patch303: linux-2.6-enable-btusb-autosuspend.patch Patch304: linux-2.6-usb-uvc-autosuspend.diff Patch305: linux-2.6-fix-btusb-autosuspend.patch - Patch310: linux-2.6-usb-wwan-update.patch +Patch340: linux-2.6-debug-vm-would-have-oomkilled.patch +Patch360: linux-2.6-debug-always-inline-kzalloc.patch Patch380: linux-2.6-defaults-pci_no_msi.patch -# enable ASPM +Patch381: linux-2.6-pciehp-update.patch +Patch382: linux-2.6-defaults-pciehp.patch Patch383: linux-2.6-defaults-aspm.patch -Patch384: pci-acpi-disable-aspm-if-no-osc.patch -Patch385: pci-aspm-dont-enable-too-early.patch - -# 2.6.34 bugfixes -Patch387: pci-fall-back-to-original-bios-bar-addresses.patch - Patch390: linux-2.6-defaults-acpi-video.patch Patch391: linux-2.6-acpi-video-dos.patch Patch392: linux-2.6-acpi-video-export-edid.patch -Patch393: acpi-ec-add-delay-before-write.patch - Patch450: linux-2.6-input-kill-stupid-messages.patch +Patch451: linux-2.6-input-fix-toshiba-hotkeys.patch Patch452: linux-2.6.30-no-pcspkr-modalias.patch Patch453: thinkpad-acpi-add-x100e.patch -Patch454: thinkpad-acpi-fix-backlight.patch +Patch454: linux-2.6-input-hid-quirk-egalax.patch +Patch455: linux-2.6-input-clickpad-support.patch +Patch456: thinkpad-acpi-fix-backlight.patch +Patch457: ntrig-backport.patch Patch460: linux-2.6-serial-460800.patch Patch470: die-floppy-die.patch Patch510: linux-2.6-silence-noise.patch -Patch520: pci-change-error-messages-to-kern-info.patch +Patch520: linux-2.6.30-hush-rom-warning.patch Patch530: linux-2.6-silence-fbcon-logo.patch Patch570: linux-2.6-selinux-mprotect-checks.patch Patch580: linux-2.6-sparc-selinux-mprotect-checks.patch +Patch581: linux-2.6-selinux-avtab-size.patch -Patch610: hda_intel-prealloc-4mb-dmabuffer.patch +Patch600: linux-2.6-acpi-sleep-live-sci-live.patch +Patch601: linux-2.6-acpi-indirect_fan_control.patch -Patch690: iwlwifi-add-internal-short-scan-support-for-3945.patch -Patch692: iwlwifi-move-plcp-check-to-separated-function.patch -Patch693: iwlwifi-Recover-TX-flow-failure.patch -Patch694: iwlwifi-code-cleanup-for-connectivity-recovery.patch -Patch695: iwlwifi-iwl_good_ack_health-only-apply-to-AGN-device.patch +Patch610: hda_intel-prealloc-4mb-dmabuffer.patch -Patch800: linux-2.6-crash-driver.patch +Patch670: linux-2.6-ata-quirk.patch -Patch900: linux-2.6-cantiga-iommu-gfx.patch +Patch681: linux-2.6-mac80211-age-scan-results-on-resume.patch -# crypto/ -Patch1200: crypto-add-async-hash-testing.patch +Patch800: linux-2.6-crash-driver.patch Patch1515: lirc-2.6.33.patch Patch1517: hdpvr-ir-enable.patch +Patch1520: crystalhd-2.6.34-staging.patch # virt + ksm patches -Patch1550: virtqueue-wrappers.patch +Patch1553: vhost_net-rollup.patch Patch1554: virt_console-rollup.patch -Patch1555: fix_xen_guest_on_old_EC2.patch - -# DRM -Patch1800: drm-next.patch -Patch1801: drm-revert-drm-fbdev-rework-output-polling-to-be-back-in-core.patch -Patch1802: revert-drm-kms-toggle-poll-around-switcheroo.patch -Patch1803: drm-encoder-disable.patch -# nouveau + drm fixes -Patch1815: drm-nouveau-updates.patch -Patch1816: drm-nouveau-race-fix.patch -Patch1817: drm-nouveau-nva3-noaccel.patch +Patch1555: virt_console-fix-race.patch +Patch1556: virt_console-fix-fix-race.patch +Patch1557: virt_console-rollup2.patch +Patch1558: vhost_net-rollup2.patch +# EC2 is running old xen hosts and wont upgrade so we have to work around it +Patch1559: fix_xen_guest_on_old_EC2.patch + +# fbdev x86-64 primary fix +Patch1700: linux-2.6-x86-64-fbdev-primary.patch + +Patch1800: drm-core-next.patch +# fix modeline for 1024x768@85 +Patch1801: drm-1024x768-85.patch + +# radeon kms backport +Patch1808: drm-radeon-evergreen.patch +Patch1809: drm-radeon-firemv-pciid.patch +Patch1810: drm-radeon-kms-fix-dual-link-dvi.patch +Patch1811: drm-radeon-fix-rs600-tlb.patch +Patch1812: drm-radeon-ss-fix.patch +Patch1813: drm-radeon-fix-shared-ddc-handling.patch +# nouveau fixes +# - these not until 2.6.34 +Patch1815: drm-nouveau-abi16.patch +Patch1816: drm-nouveau-updates.patch +# requires code that hasn't been merged upstream yet +Patch1817: drm-nouveau-acpi-edid-fallback.patch +Patch1818: drm-nouveau-drm-fixed-header.patch + +# drm fixes Patch1819: drm-intel-big-hammer.patch # intel drm is all merged upstream -Patch1820: drm-i915-fix-edp-panels.patch -Patch1821: i915-fix-crt-hotplug-regression.patch Patch1824: drm-intel-next.patch # make sure the lvds comes back on lid open Patch1825: drm-intel-make-lvds-work.patch -Patch1900: linux-2.6-intel-iommu-igfx.patch -# radeon +# disable iommu for gfx by default, just too broken +Patch1827: linux-2.6-intel-iommu-igfx.patch +# posted for upstream but not in an anholt tree yet +Patch1828: drm-intel-gen5-dither.patch +# thanks for the untested sdvo rework guys +Patch1829: drm-intel-sdvo-fix.patch +Patch1830: drm-intel-sdvo-fix-2.patch +# from 2.6.33.5 +Patch1840: drm-i915-use-pipe_control-instruction-on-ironlake-and-sandy-bridge.patch +Patch1841: drm-i915-fix-non-ironlake-965-class-crashes.patch +Patch1842: drm-i915-fix-edp-panels.patch +Patch1843: drm-i915-fix-hibernate-memory-corruption.patch +Patch1844: drm-i915-add-reclaimable-to-page-allocations.patch +Patch1845: drm-i915-make-G4X-style-PLL-search-more-permissive.patch +Patch1846: drm-intel-945gm-stability-fixes.patch + +Patch2100: linux-2.6-phylib-autoload.patch # linux1394 git patches Patch2200: linux-2.6-firewire-git-update.patch Patch2201: linux-2.6-firewire-git-pending.patch -Patch2400: linux-2.6-phylib-autoload.patch - # Quiet boot fixes # silence the ACPI blacklist code Patch2802: linux-2.6-silence-acpi-blacklist.patch +# Upstream V4L updates Patch2899: linux-2.6-v4l-dvb-fixes.patch Patch2900: linux-2.6-v4l-dvb-update.patch Patch2901: linux-2.6-v4l-dvb-experimental.patch + +# Rebase gspca to what will be in 2.6.34 +Patch2904: linux-2.6-v4l-dvb-rebase-gspca-to-latest.patch +# Some cherry picked fixes from v4l-dvb-next Patch2905: linux-2.6-v4l-dvb-gspca-fixes.patch -Patch2906: linux-2.6-v4l-dvb-uvcvideo-update.patch -Patch2910: linux-2.6-v4l-dvb-add-lgdt3304-support.patch -Patch2911: linux-2.6-v4l-dvb-add-kworld-a340-support.patch +# kworld ub435-q/340u usb atsc tuner support (still lingering +# in one of mkrufky's trees, pending push to v4l-dvb proper) +Patch2906: linux-2.6-v4l-dvb-add-lgdt3304-support.patch +Patch2907: linux-2.6-v4l-dvb-add-kworld-a340-support.patch # fs fixes - -Patch3012: btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch - +Patch3000: linux-2.6-btrfs-update.patch +Patch3002: btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch # NFSv4 +Patch3051: linux-2.6-nfs4-callback-hidden.patch + +Patch4000: linux-2.6-cpufreq-locking.patch # VIA Nano / VX8xx updates # patches headed upstream -Patch12005: linux-2.6-input-hid-quirk-egalax.patch +Patch12010: linux-2.6-dell-laptop-rfkill-fix.patch +Patch12013: linux-2.6-rfkill-all.patch +Patch12014: linux-2.6-x86-cfi_sections.patch -Patch12015: add-appleir-usb-driver.patch -Patch12016: disable-i8042-check-on-apple-mac.patch +Patch12015: add-appleir-driver.patch Patch12017: prevent-runtime-conntrack-changes.patch Patch12018: neuter_intel_microcode_load.patch Patch12019: linux-2.6-umh-refactor.patch -Patch12020: coredump-uid-pipe-check.patch -Patch12030: ssb_check_for_sprom.patch +# rhbz#533746 +Patch12021: ssb_check_for_sprom.patch + +# make p54pci usable on slower hardware +Patch12103: linux-2.6-p54pci.patch + +Patch12200: acpi-ec-add-delay-before-write.patch -Patch12035: quiet-prove_RCU-in-cgroups.patch +# patches from Intel to address intermittent firmware failures with iwlagn +Patch12404: iwlwifi_-add-function-to-reset_tune-radio-if-needed.patch +Patch12405: iwlwifi_-Logic-to-control-how-frequent-radio-should-be-reset-if-needed.patch +Patch12406: iwlwifi_-Tune-radio-to-prevent-unexpected-behavior.patch +Patch12407: iwlwifi_-multiple-force-reset-mode.patch +Patch12409: iwlwifi_-Adjusting-PLCP-error-threshold-for-1000-NIC.patch +Patch12410: iwlwifi_-separated-time-check-for-different-type-of-force-reset.patch +Patch12411: iwlwifi_-add-internal-short-scan-support-for-3945.patch +Patch12412: iwlwifi_-Recover-TX-flow-stall-due-to-stuck-queue.patch +Patch12413: iwlwifi_-move-plcp-check-to-separated-function.patch +Patch12414: iwlwifi_-Recover-TX-flow-failure.patch +Patch12415: iwlwifi_-code-cleanup-for-connectivity-recovery.patch +Patch12416: iwlwifi_-iwl_good_ack_health-only-apply-to-AGN-device.patch -Patch12040: iwlwifi-manage-QoS-by-mac-stack.patch -Patch12042: mac80211-explicitly-disable-enable-QoS.patch +Patch12500: alsa-usbmixer-add-possibility-to-remap-dB-values.patch -Patch12250: inotify-fix-inotify-oneshot-support.patch -Patch12260: inotify-send-IN_UNMOUNT-events.patch +# fix possible corruption with ssd +Patch12700: ext4-issue-discard-operation-before-releasing-blocks.patch -Patch12270: kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch +Patch12820: ibmvscsi-fix-DMA-API-misuse.patch -Patch12400: input-synaptics-relax-capability-id-checks-on-new-hardware.patch +Patch12830: disable-i8042-check-on-apple-mac.patch -Patch12410: cifs-fix-dns-resolver.patch -Patch12420: matroxfb-fix-font-corruption.patch -Patch12430: cred-dont-resurrect-dead-credentials.patch +Patch12850: crypto-aesni-kill-module_alias.patch -Patch12440: direct-io-move-aio_complete-into-end_io.patch -Patch12450: ext4-move-aio-completion-after-unwritten-extent-conversion.patch -Patch12460: xfs-move-aio-completion-after-unwritten-extent-conversion.patch +# automatically mount debugfs when perf needs it +Patch12851: perf-mount-debugfs-automatically.patch + +# iwlwifi: fix scan races +Patch12910: iwlwifi-fix-scan-races.patch +# iwlwifi: fix internal scan race +Patch12911: iwlwifi-fix-internal-scan-race.patch +# iwlwifi: recover_from_tx_stall +Patch12912: iwlwifi-recover_from_tx_stall.patch + +Patch12913: iwlwifi-manage-QoS-by-mac-stack.patch +Patch12914: mac80211-do-not-wipe-out-old-supported-rates.patch +Patch12915: mac80211-explicitly-disable-enable-QoS.patch +Patch12916: mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch + +# Disable rt20xx and rt35xx chipset support in rt2800pci and rt2800usb +Patch13010: rt2x00-rt2800-Make-rt30xx-and-rt35xx-chipsets-configurable.patch + +# iwlwifi: cancel scan watchdog in iwl_bg_abort_scan +Patch13020: iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch + +Patch13030: sched-fix-over-scheduling-bug.patch +Patch13040: ethtool-fix-buffer-overflow.patch +Patch13050: x86-debug-clear-reserved-bits-of-dr6.patch +Patch13060: x86-debug-send-sigtrap-for-user-icebp.patch + +Patch13070: cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch + +Patch13074: inotify-fix-inotify-oneshot-support.patch +Patch13076: inotify-send-IN_UNMOUNT-events.patch + +Patch13080: kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch %endif @@ -831,9 +935,10 @@ It provides the kernel source files common to all builds. Summary: Performance monitoring for the Linux kernel Group: Development/System License: GPLv2 +Requires: libdwarf %description -n perf -This package provides the supporting documentation for the perf tool -shipped in each kernel image subpackage. +This package provides the perf shell script, supporting documentation and +required libraries for the perf tool shipped in each kernel image subpackage. # # This macro creates a kernel--debuginfo package. @@ -939,6 +1044,14 @@ It should only be installed when trying to gather additional information on kernel bugs, as some of these options impact performance noticably. +%define variant_summary A minimal Linux kernel compiled for crash dumps +%kernel_variant_package kdump +%description kdump +This package includes a kdump version of the Linux kernel. It is +required only on machines which will use the kexec-based kernel crash dump +mechanism. + + %prep # do a few sanity-checks for --with *only builds %if %{with_baseonly} @@ -1122,7 +1235,7 @@ fi if [ -d linux-%{kversion}.%{_target_cpu} ]; then # Just in case we ctrl-c'd a prep already rm -rf deleteme.%{_target_cpu} - # Move away the stale away, and delete in background. + # Move away the stale away, and delete in background. mv linux-%{kversion}.%{_target_cpu} deleteme.%{_target_cpu} rm -rf deleteme.%{_target_cpu} & fi @@ -1179,8 +1292,8 @@ ApplyOptionalPatch linux-2.6-compile-fixes.patch # revert patches from upstream that conflict or that we get via other means ApplyOptionalPatch linux-2.6-upstream-reverts.patch -R -ApplyOptionalPatch git-bluetooth.patch -ApplyOptionalPatch git-cpufreq.patch +#ApplyPatch git-cpufreq.patch +ApplyPatch git-bluetooth.patch ApplyPatch linux-2.6-hotfixes.patch @@ -1191,6 +1304,7 @@ ApplyPatch linux-2.6-utrace-ptrace.patch # Architecture patches # x86(-64) +ApplyPatch linux-2.6-dell-laptop-rfkill-fix.patch ApplyPatch linux-2.6-x86-cfi_sections.patch # @@ -1200,6 +1314,9 @@ ApplyPatch linux-2.6-x86-cfi_sections.patch # # PowerPC # +### NOT (YET) UPSTREAM: +# Alleviate G5 thermal shutdown problems +ApplyPatch linux-2.6-g5-therm-shutdown.patch # Provide modalias in sysfs for vio devices ApplyPatch linux-2.6-vio-modalias.patch @@ -1222,17 +1339,22 @@ ApplyPatch linux-2.6-execshield.patch # xfs # btrfs -ApplyPatch btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch +ApplyPatch linux-2.6-btrfs-update.patch +ApplyPatch btrfs-prohibit-a-operation-of-changing-acls-mask-when-noacl-mount-option-is-used.patch # eCryptfs # NFSv4 +ApplyPatch linux-2.6-nfs4-callback-hidden.patch + +# CPUFREQ +ApplyPatch linux-2.6-cpufreq-locking.patch # USB -#ApplyPatch linux-2.6-driver-level-usb-autosuspend.diff -#ApplyPatch linux-2.6-enable-btusb-autosuspend.patch -#ApplyPatch linux-2.6-usb-uvc-autosuspend.diff +ApplyPatch linux-2.6-driver-level-usb-autosuspend.diff +ApplyPatch linux-2.6-enable-btusb-autosuspend.patch +ApplyPatch linux-2.6-usb-uvc-autosuspend.diff #ApplyPatch linux-2.6-fix-btusb-autosuspend.patch ApplyPatch linux-2.6-usb-wwan-update.patch @@ -1242,6 +1364,7 @@ ApplyPatch linux-2.6-usb-wwan-update.patch ApplyPatch linux-2.6-defaults-acpi-video.patch ApplyPatch linux-2.6-acpi-video-dos.patch ApplyPatch linux-2.6-acpi-video-export-edid.patch + ApplyPatch acpi-ec-add-delay-before-write.patch # Various low-impact patches to aid debugging. @@ -1254,22 +1377,22 @@ ApplyPatch linux-2.6-debug-always-inline-kzalloc.patch # # PCI # -# make default state of PCI MSI a config option +# disable message signaled interrupts ApplyPatch linux-2.6-defaults-pci_no_msi.patch +# update the pciehp driver +#ApplyPatch linux-2.6-pciehp-update.patch +# default to enabling passively listening for hotplug events +#ApplyPatch linux-2.6-defaults-pciehp.patch # enable ASPM by default on hardware we expect to work ApplyPatch linux-2.6-defaults-aspm.patch -# disable aspm if acpi doesn't provide an _OSC method -ApplyPatch pci-acpi-disable-aspm-if-no-osc.patch -# allow drivers to disable aspm at load time -ApplyPatch pci-aspm-dont-enable-too-early.patch -# fall back to original BIOS address when reassignment fails (KORG#16263) -ApplyPatch pci-fall-back-to-original-bios-bar-addresses.patch # # SCSI Bits. # # ACPI +ApplyPatch linux-2.6-acpi-sleep-live-sci-live.patch +ApplyPatch linux-2.6-acpi-indirect_fan_control.patch # ALSA ApplyPatch hda_intel-prealloc-4mb-dmabuffer.patch @@ -1283,18 +1406,23 @@ ApplyPatch linux-2.6-input-kill-stupid-messages.patch # stop floppy.ko from autoloading during udev... ApplyPatch die-floppy-die.patch +# Get away from having to poll Toshibas +#ApplyPatch linux-2.6-input-fix-toshiba-hotkeys.patch + ApplyPatch linux-2.6.30-no-pcspkr-modalias.patch ApplyPatch linux-2.6-input-hid-quirk-egalax.patch +ApplyPatch linux-2.6-input-clickpad-support.patch ApplyPatch thinkpad-acpi-add-x100e.patch ApplyPatch thinkpad-acpi-fix-backlight.patch +ApplyPatch ntrig-backport.patch # Allow to use 480600 baud on 16C950 UARTs ApplyPatch linux-2.6-serial-460800.patch # Silence some useless messages that still get printed with 'quiet' ApplyPatch linux-2.6-silence-noise.patch -ApplyPatch pci-change-error-messages-to-kern-info.patch +ApplyPatch linux-2.6.30-hush-rom-warning.patch # Make fbcon not show the penguins with 'quiet' ApplyPatch linux-2.6-silence-fbcon-logo.patch @@ -1302,123 +1430,180 @@ ApplyPatch linux-2.6-silence-fbcon-logo.patch # Fix the SELinux mprotect checks on executable mappings #ApplyPatch linux-2.6-selinux-mprotect-checks.patch # Fix SELinux for sparc -# FIXME: Can we drop this now? See updated linux-2.6-selinux-mprotect-checks.patch #ApplyPatch linux-2.6-sparc-selinux-mprotect-checks.patch +# Shirk size of memory allocation required to load policy. In 2.6.34 +ApplyPatch linux-2.6-selinux-avtab-size.patch # Changes to upstream defaults. -# /dev/crash driver. -ApplyPatch linux-2.6-crash-driver.patch -# Cantiga chipset b0rkage -ApplyPatch linux-2.6-cantiga-iommu-gfx.patch +# ia64 ata quirk +ApplyPatch linux-2.6-ata-quirk.patch -# crypto/ +# back-port scan result aging patches +#ApplyPatch linux-2.6-mac80211-age-scan-results-on-resume.patch -# Add async hash testing (a8f1a05) -ApplyPatch crypto-add-async-hash-testing.patch +# /dev/crash driver. +ApplyPatch linux-2.6-crash-driver.patch # http://www.lirc.org/ ApplyPatch lirc-2.6.33.patch # enable IR receiver on Hauppauge HD PVR (v4l-dvb merge pending) ApplyPatch hdpvr-ir-enable.patch +# Broadcom Crystal HD video decoder +ApplyPatch crystalhd-2.6.34-staging.patch # Assorted Virt Fixes -ApplyPatch virtqueue-wrappers.patch +ApplyPatch vhost_net-rollup.patch ApplyPatch virt_console-rollup.patch +ApplyPatch virt_console-fix-race.patch +ApplyPatch virt_console-fix-fix-race.patch +ApplyPatch virt_console-rollup2.patch +ApplyPatch vhost_net-rollup2.patch ApplyPatch fix_xen_guest_on_old_EC2.patch -ApplyPatch drm-next.patch -ApplyPatch drm-revert-drm-fbdev-rework-output-polling-to-be-back-in-core.patch -ApplyPatch revert-drm-kms-toggle-poll-around-switcheroo.patch -ApplyPatch drm-i915-fix-edp-panels.patch -ApplyPatch i915-fix-crt-hotplug-regression.patch -ApplyPatch drm-encoder-disable.patch +# fix x86-64 fbdev primary GPU selection +ApplyPatch linux-2.6-x86-64-fbdev-primary.patch + +ApplyPatch drm-core-next.patch +ApplyPatch drm-1024x768-85.patch # Nouveau DRM + drm fixes +ApplyPatch drm-radeon-evergreen.patch +ApplyPatch drm-radeon-firemv-pciid.patch +ApplyPatch drm-radeon-kms-fix-dual-link-dvi.patch +ApplyPatch drm-radeon-fix-rs600-tlb.patch +ApplyPatch drm-radeon-ss-fix.patch +ApplyPatch drm-radeon-fix-shared-ddc-handling.patch +ApplyPatch drm-nouveau-abi16.patch ApplyPatch drm-nouveau-updates.patch -ApplyPatch drm-nouveau-race-fix.patch -ApplyPatch drm-nouveau-nva3-noaccel.patch - +ApplyPatch drm-nouveau-acpi-edid-fallback.patch +ApplyPatch drm-nouveau-drm-fixed-header.patch +# pm broken on my thinkpad t60p - airlied ApplyPatch drm-intel-big-hammer.patch ApplyOptionalPatch drm-intel-next.patch ApplyPatch drm-intel-make-lvds-work.patch - ApplyPatch linux-2.6-intel-iommu-igfx.patch +ApplyPatch drm-intel-gen5-dither.patch +ApplyPatch drm-intel-sdvo-fix.patch +ApplyPatch drm-intel-sdvo-fix-2.patch +# from 2.6.33.5 +ApplyPatch drm-i915-use-pipe_control-instruction-on-ironlake-and-sandy-bridge.patch +ApplyPatch drm-i915-fix-non-ironlake-965-class-crashes.patch +ApplyPatch drm-i915-fix-edp-panels.patch +# hibernation memory corruption fixes +ApplyPatch drm-i915-fix-hibernate-memory-corruption.patch +ApplyPatch drm-i915-add-reclaimable-to-page-allocations.patch + +# RHBZ#572799 +ApplyPatch drm-i915-make-G4X-style-PLL-search-more-permissive.patch + +ApplyPatch drm-intel-945gm-stability-fixes.patch + +ApplyPatch linux-2.6-phylib-autoload.patch # linux1394 git patches -ApplyOptionalPatch linux-2.6-firewire-git-update.patch -ApplyOptionalPatch linux-2.6-firewire-git-pending.patch +#ApplyPatch linux-2.6-firewire-git-update.patch +#ApplyOptionalPatch linux-2.6-firewire-git-pending.patch # silence the ACPI blacklist code ApplyPatch linux-2.6-silence-acpi-blacklist.patch # V4L/DVB updates/fixes/experimental drivers -# apply if non-empty +# Upstream trees, applied only if non-empty ApplyOptionalPatch linux-2.6-v4l-dvb-fixes.patch ApplyOptionalPatch linux-2.6-v4l-dvb-update.patch ApplyOptionalPatch linux-2.6-v4l-dvb-experimental.patch +ApplyPatch linux-2.6-v4l-dvb-rebase-gspca-to-latest.patch ApplyPatch linux-2.6-v4l-dvb-gspca-fixes.patch -ApplyPatch linux-2.6-v4l-dvb-uvcvideo-update.patch ApplyPatch linux-2.6-v4l-dvb-add-lgdt3304-support.patch ApplyPatch linux-2.6-v4l-dvb-add-kworld-a340-support.patch -ApplyPatch linux-2.6-phylib-autoload.patch - # Patches headed upstream -ApplyPatch add-appleir-usb-driver.patch -ApplyPatch disable-i8042-check-on-apple-mac.patch +ApplyPatch linux-2.6-rfkill-all.patch + +# appleir remote controller +ApplyPatch add-appleir-driver.patch ApplyPatch neuter_intel_microcode_load.patch # Refactor UserModeHelper code & satisfy abrt recursion check request -#ApplyPatch linux-2.6-umh-refactor.patch -#ApplyPatch coredump-uid-pipe-check.patch +ApplyPatch linux-2.6-umh-refactor.patch + +ApplyPatch alsa-usbmixer-add-possibility-to-remap-dB-values.patch # rhbz#533746 -# awful, ugly conflicts between this patch and the 2.6.34.2 patch: -# ssb-handle-netbook-devices-where-the-sprom-address-is-changed.patch -#ApplyPatch ssb_check_for_sprom.patch +ApplyPatch ssb_check_for_sprom.patch + +# make p54pci usable on slower hardware +ApplyPatch linux-2.6-p54pci.patch + +# patches from Intel to address intermittent firmware failures with iwlagn +ApplyPatch iwlwifi_-add-function-to-reset_tune-radio-if-needed.patch +ApplyPatch iwlwifi_-Logic-to-control-how-frequent-radio-should-be-reset-if-needed.patch +ApplyPatch iwlwifi_-Tune-radio-to-prevent-unexpected-behavior.patch +ApplyPatch iwlwifi_-multiple-force-reset-mode.patch +ApplyPatch iwlwifi_-Adjusting-PLCP-error-threshold-for-1000-NIC.patch +ApplyPatch iwlwifi_-separated-time-check-for-different-type-of-force-reset.patch +ApplyPatch iwlwifi_-add-internal-short-scan-support-for-3945.patch +ApplyPatch iwlwifi_-Recover-TX-flow-stall-due-to-stuck-queue.patch +ApplyPatch iwlwifi_-move-plcp-check-to-separated-function.patch +ApplyPatch iwlwifi_-Recover-TX-flow-failure.patch +ApplyPatch iwlwifi_-code-cleanup-for-connectivity-recovery.patch +ApplyPatch iwlwifi_-iwl_good_ack_health-only-apply-to-AGN-device.patch + +# fix possible corruption with ssd +ApplyPatch ext4-issue-discard-operation-before-releasing-blocks.patch + +ApplyPatch ibmvscsi-fix-DMA-API-misuse.patch + +ApplyPatch disable-i8042-check-on-apple-mac.patch + +ApplyPatch crypto-aesni-kill-module_alias.patch -# iwlwifi fixes from F-13-2.6.33 -ApplyPatch iwlwifi-add-internal-short-scan-support-for-3945.patch -ApplyPatch iwlwifi-move-plcp-check-to-separated-function.patch -ApplyPatch iwlwifi-Recover-TX-flow-failure.patch -ApplyPatch iwlwifi-code-cleanup-for-connectivity-recovery.patch -ApplyPatch iwlwifi-iwl_good_ack_health-only-apply-to-AGN-device.patch +# automagically mount debugfs for perf +ApplyPatch perf-mount-debugfs-automatically.patch + +# iwlwifi: fix scan races +ApplyPatch iwlwifi-fix-scan-races.patch +# iwlwifi: fix internal scan race +ApplyPatch iwlwifi-fix-internal-scan-race.patch +# iwlwifi: recover_from_tx_stall +ApplyPatch iwlwifi-recover_from_tx_stall.patch # mac80211/iwlwifi fix connections to some APs (rhbz#558002) ApplyPatch mac80211-explicitly-disable-enable-QoS.patch ApplyPatch iwlwifi-manage-QoS-by-mac-stack.patch +ApplyPatch mac80211-do-not-wipe-out-old-supported-rates.patch +ApplyPatch mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch -ApplyPatch quiet-prove_RCU-in-cgroups.patch - -# fix broken oneshot support and missing umount events (#607327) -ApplyPatch inotify-fix-inotify-oneshot-support.patch -ApplyPatch inotify-send-IN_UNMOUNT-events.patch +# Disable rt20xx and rt35xx chipset support in rt2800pci and rt2800usb +ApplyPatch rt2x00-rt2800-Make-rt30xx-and-rt35xx-chipsets-configurable.patch -# 610911 -ApplyPatch kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch +# iwlwifi: cancel scan watchdog in iwl_bg_abort_scan +ApplyPatch iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch -# fix newer synaptics touchpads not being recognized -ApplyPatch input-synaptics-relax-capability-id-checks-on-new-hardware.patch +# fix performance problem with CGROUPS +ApplyPatch sched-fix-over-scheduling-bug.patch -# Remove __init and __exit attributes from resolver code -ApplyPatch cifs-fix-dns-resolver.patch +# CVE-2010-2478 +ApplyPatch ethtool-fix-buffer-overflow.patch -# RHBZ #617687 -ApplyPatch matroxfb-fix-font-corruption.patch +# BZ#609548 +ApplyPatch x86-debug-clear-reserved-bits-of-dr6.patch +ApplyPatch x86-debug-send-sigtrap-for-user-icebp.patch -# RHBZ #591015 -ApplyPatch cred-dont-resurrect-dead-credentials.patch +# CVE-2010-2524 +ApplyPatch cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch -# RHBZ #617699 -ApplyPatch direct-io-move-aio_complete-into-end_io.patch -ApplyPatch ext4-move-aio-completion-after-unwritten-extent-conversion.patch -ApplyPatch xfs-move-aio-completion-after-unwritten-extent-conversion.patch +# fix broken oneshot support and missing umount events (#607327) +ApplyPatch inotify-fix-inotify-oneshot-support.patch +ApplyPatch inotify-send-IN_UNMOUNT-events.patch +# RHBZ#610911 +ApplyPatch kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch # END OF PATCH APPLICATIONS @@ -1548,11 +1733,13 @@ BuildKernel() { mkdir -p $RPM_BUILD_ROOT/%{image_install_path} install -m 644 .config $RPM_BUILD_ROOT/boot/config-$KernelVer install -m 644 System.map $RPM_BUILD_ROOT/boot/System.map-$KernelVer - +%if %{with_dracut} # We estimate the size of the initramfs because rpm needs to take this size # into consideration when performing disk space calculations. (See bz #530778) dd if=/dev/zero of=$RPM_BUILD_ROOT/boot/initramfs-$KernelVer.img bs=1M count=20 - +%else + dd if=/dev/zero of=$RPM_BUILD_ROOT/boot/initrd-$KernelVer.img bs=1M count=5 +%endif if [ -f arch/$Arch/boot/zImage.stub ]; then cp arch/$Arch/boot/zImage.stub $RPM_BUILD_ROOT/%{image_install_path}/zImage.stub-$KernelVer || : fi @@ -1564,8 +1751,18 @@ BuildKernel() { # Override $(mod-fw) because we don't want it to install any firmware # We'll do that ourselves with 'make firmware_install' make -s ARCH=$Arch INSTALL_MOD_PATH=$RPM_BUILD_ROOT modules_install KERNELRELEASE=$KernelVer mod-fw= + %ifarch %{vdso_arches} make -s ARCH=$Arch INSTALL_MOD_PATH=$RPM_BUILD_ROOT vdso_install KERNELRELEASE=$KernelVer + if grep '^CONFIG_XEN=y$' .config >/dev/null; then + echo > ldconfig-kernel.conf "\ +# This directive teaches ldconfig to search in nosegneg subdirectories +# and cache the DSOs there with extra bit 0 set in their hwcap match +# fields. In Xen guest kernels, the vDSO tells the dynamic linker to +# search in nosegneg subdirectories and to match this extra hwcap bit +# in the ld.so.cache file. +hwcap 0 nosegneg" + fi if [ ! -s ldconfig-kernel.conf ]; then echo > ldconfig-kernel.conf "\ # Placeholder file, no vDSO hwcap entries used in this kernel." @@ -1720,6 +1917,10 @@ BuildKernel %make_target %kernel_image BuildKernel %make_target %kernel_image smp %endif +%if %{with_kdump} +BuildKernel vmlinux vmlinux kdump vmlinux +%endif + %if %{with_doc} # Make the HTML and man pages. make %{?_smp_mflags} htmldocs mandocs || %{doc_build_fail} @@ -1779,8 +1980,8 @@ xargs -0 --no-run-if-empty %{__install} -m 444 -t $man9dir $m ls $man9dir | grep -q '' || > $man9dir/BROKEN %endif # with_doc -# perf docs %if %{with_perf} +# perf docs mandir=$RPM_BUILD_ROOT%{_datadir}/man man1dir=$mandir/man1 pushd tools/perf/Documentation @@ -1792,15 +1993,14 @@ for d in *.1; do gzip $d; done popd -%endif # with_perf -# perf shell wrapper -%if %{with_perf} +# perf shell wrapper and examples mkdir -p $RPM_BUILD_ROOT/usr/sbin/ cp $RPM_SOURCE_DIR/perf $RPM_BUILD_ROOT/usr/sbin/perf chmod 0755 $RPM_BUILD_ROOT/usr/sbin/perf mkdir -p $RPM_BUILD_ROOT%{_datadir}/doc/perf -%endif +cp tools/perf/Documentation/examples.txt $RPM_BUILD_ROOT%{_datadir}/doc/perf +%endif # with_perf %if %{with_headers} # Install kernel headers @@ -1871,7 +2071,12 @@ fi\ # %define kernel_variant_posttrans() \ %{expand:%%posttrans %{?1}}\ +%{expand:\ +%if %{with_dracut}\ /sbin/new-kernel-pkg --package kernel%{?-v:-%{-v*}} --mkinitrd --dracut --depmod --update %{KVERREL}%{?-v:.%{-v*}} || exit $?\ +%else\ +/sbin/new-kernel-pkg --package kernel%{?-v:-%{-v*}} --mkinitrd --depmod --update %{KVERREL}%{?-v:.%{-v*}} || exit $?\ +%endif}\ /sbin/new-kernel-pkg --package kernel%{?1:-%{1}} --rpmposttrans %{KVERREL}%{?1:.%{1}} || exit $?\ %{nil} @@ -1912,18 +2117,22 @@ fi}\ %{nil} %kernel_variant_preun +%ifarch x86_64 +%kernel_variant_post -r (kernel-smp|kernel-xen) +%else %kernel_variant_post -r kernel-smp +%endif %kernel_variant_preun smp %kernel_variant_post -v smp %kernel_variant_preun PAE -%kernel_variant_post -v PAE -r (kernel|kernel-smp) +%kernel_variant_post -v PAE -r (kernel|kernel-smp|kernel-xen) %kernel_variant_preun debug %kernel_variant_post -v debug -%kernel_variant_post -v PAEdebug -r (kernel|kernel-smp) +%kernel_variant_post -v PAEdebug -r (kernel|kernel-smp|kernel-xen) %kernel_variant_preun PAEdebug if [ -x /sbin/ldconfig ] @@ -2005,7 +2214,11 @@ fi /etc/ld.so.conf.d/kernel-%{KVERREL}%{?2:.%{2}}.conf\ %endif\ /lib/modules/%{KVERREL}%{?2:.%{2}}/modules.*\ +%if %{with_dracut}\ %ghost /boot/initramfs-%{KVERREL}%{?2:.%{2}}.img\ +%else\ +%ghost /boot/initrd-%{KVERREL}%{?2:.%{2}}.img\ +%endif\ %{expand:%%files %{?2:%{2}-}devel}\ %defattr(-,root,root)\ %verify(not mtime) /usr/src/kernels/%{KVERREL}%{?2:.%{2}}\ @@ -2036,320 +2249,2327 @@ fi %kernel_variant_files %{with_debug} debug %kernel_variant_files %{with_pae} PAE %kernel_variant_files %{with_pae_debug} PAEdebug +%kernel_variant_files -k vmlinux %{with_kdump} kdump %changelog -* Tue Aug 10 2010 Chuck Ebbert 2.6.34.4-38.rc1 -- Linux 2.6.34.4-rc1 -- Fix up drm-next patch to apply on top of 2.6.34.4 - -* Tue Aug 10 2010 Chuck Ebbert 2.6.34.3-37 -- Linux 2.6.34.3 -- Disable AES-NI encryption until bugs can be sorted out (#622435) - -* Tue Aug 10 2010 Ben Skeggs 2.6.34.3-36.rc1 -- nouveau: disable accel on nva3/nva5/nva8 until it's fixed upstream -- rhbz#596330 - -* Sat Aug 07 2010 Chuck Ebbert 2.6.34.3-35.rc1 -- Linux 2.6.34.3-rc1 - -* Fri Aug 06 2010 Ben Skeggs 2.6.34.2-34 -- nouveau: fix inter-engine race when under memory pressure (rhbz#602956) -- Disable CONFIG_MULTICORE_RAID456 - -* Tue Aug 03 2010 Chuck Ebbert 2.6.34.2-33 -- Linux 2.6.34.2 -- Drop commented-out patches. -- Drop ancient linux-2.6-mac80211-age-scan-results-on-resume.patch -- Fix matroxfb font corruption (#617687) -- Don't resurrect dead task credentials (#591015) -- Fix "ext4 and xfs wrong data returned on read after write if - file size was changed with ftruncate" (#617699) - -* Sun Aug 01 2010 Chuck Ebbert 2.6.34.2-32.rc1 -- Linux 2.6.34.2-rc1 -- Comment out upstream merged patches: - pci-pm-do-not-use-native-pcie-pme-by-default.patch - linux-2.6-acpi-sleep-live-sci-live.patch (slightly different upstream patch) - drm-i915-make-G4X-style-PLL-search-more-permissive.patch - drm-intel-945gm-stability-fixes.patch - drm-radeon-fix-shared-ddc-handling.patch - drm-i915-add-reclaimable-to-page-allocations.patch - drm-i915-fix-hibernate-memory-corruption.patch - iwlwifi-Recover-TX-flow-stall-due-to-stuck-queue.patch - iwlwifi-recover_from_tx_stall.patch - mac80211-do-not-wipe-out-old-supported-rates.patch - mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch - iwlwifi-cancel-scan-watchdog-in-iwl_bg_abort_scan.patch - ata-generic-handle-new-mbp-with-mcp89.patch - ata-generic-implement-ata-gen-flags.patch - x86-debug-send-sigtrap-for-user-icebp.patch - ethtool-fix-buffer-overflow.patch - sched-fix-over-scheduling-bug.patch - kbuild-fix-modpost-segfault.patch - acpica-00-linux-2.6.git-0f849d2cc6863c7874889ea60a871fb71399dd3f.patch - acpica-01-linux-2.6.git-a997ab332832519c2e292db13f509e4360495a5a.patch - acpica-02-linux-2.6.git-e4e9a735991c80fb0fc1bd4a13a93681c3c17ce0.patch - acpica-03-linux-2.6.git-fd247447c1d94a79d5cfc647430784306b3a8323.patch - acpica-04-linux-2.6.git-c9a8bbb7704cbf515c0fc68970abbe4e91d68521.patch - acpica-05-linux-2.6.git-ce43ace02320a3fb9614ddb27edc3a8700d68b26.patch - acpica-06-linux-2.6.git-9d3c752de65dbfa6e522f1d666deb0ac152ef367.patch - acpi-pm-do-not-enable-gpes-for-system-wakeup-in-advance.patch - cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch - usb-obey-the-sysfs-power-wakeup-setting.patch -- Fix up virtqueue-wrappers.patch to apply after 2.6.34.2 due to: - virtio_net-fix-oom-handling-on-tx.patch -- Revert -stable DRM patches already in our drm-next patch: - amd64-agp-probe-unknown-agp-devices-the-right-way.patch - i915-fix-lock-imbalance-on-error-path.patch - drm-i915-hold-the-spinlock-whilst-resetting-unpin_work-along-error-path.patch -- Fix up drm-next.patch to apply after 2.6.34.2 due to: - drm-i915-gen3-page-flipping-fixes.patch - drm-i915-don-t-queue-flips-during-a-flip-pending-event.patch -- Drop patches now upstream from linux-2.6-v4l-dvb-uvcvideo-update.patch: - V4L/DVB: uvcvideo: Add support for unbranded Arkmicro 18ec:3290 webcams - V4L/DVB: uvcvideo: Add support for V4L2_PIX_FMT_Y16 -- Temporarily comment out ssb_check_for_sprom.patch due to ugly conflicts with: - ssb-handle-netbook-devices-where-the-sprom-address-is-changed.patch - -* Sun Aug 01 2010 Chuck Ebbert 2.6.34.1-31 -- Backport initial dist-git fixes from master (377da6d08) -- Modify the prep stage so multiple trees can be prepped in a - single shared git directory. - -* Mon Jul 26 2010 Chuck Ebbert 2.6.34.1-30 -- usb-obey-the-sysfs-power-wakeup-setting.patch: - Restore ability of USB devices to wake the machine (#617559) - -* Thu Jul 22 2010 Chuck Ebbert 2.6.34.1-29 +* Sat Aug 14 2010 Chuck Ebbert 2.6.33.6-148 +- Add 2.6.33 branch to git repository. + +* Fri Jul 23 2010 Chuck Ebbert 2.6.33.6-147.2.4 +- inotify-fix-inotify-oneshot-support.patch, + inotify-send-IN_UNMOUNT-events.patch: + Fix broken oneshot support and missing umount events. (#607327) + +* Fri Jul 23 2010 Chuck Ebbert 2.6.33.6-147.2.3 +- drm-i915-add-reclaimable-to-page-allocations.patch: + Additional fix for hibernation memory corruption bugs. +- drm-intel-945gm-stability-fixes.patch: fix 945GM stability issues +- drm-i915-make-G4X-style-PLL-search-more-permissive.patch (#572799) +- drm-radeon-fix-shared-ddc-handling.patch (#593429) + +* Fri Jul 23 2010 Chuck Ebbert 2.6.33.6-147.2.2 +- kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch: + Fix crash in guest Python programs (#610911) + +* Fri Jul 23 2010 Chuck Ebbert 2.6.33.6-147.2.1 - cifs-fix-malicious-redirect-problem-in-the-dns-lookup-code.patch: Fix a malicious redirect problem in the DNS lookup code (CVE-2010-2524) -* Thu Jul 22 2010 Chuck Ebbert 2.6.34.1-28 -- input-synaptics-relax-capability-id-checks-on-new-hardware.patch: - Make mouse driver recognize newer synaptics hardware as touchpad. +* Tue Jul 06 2010 Jarod Wilson 2.6.33.6-147 +- Really make hdpvr i2c IR part register this time, so something can + actually be bound to it (like, say, lirc_zilog) -* Thu Jul 22 2010 Chuck Ebbert 2.6.34.1-27 -- ACPI GPE enable/disable patches: fix system powering back on - after shutdown (#613239) (and possibly #615858) +* Tue Jul 06 2010 Chuck Ebbert 2.6.33.6-146 +- x86-debug-send-sigtrap-for-user-icebp.patch, + x86-debug-clear-reserved-bits-of-dr6.patch (#609548) -* Thu Jul 22 2010 Jerome Glisse 2.6.34.1-26 -- radeon fix shared ddc handling (#593429) +* Tue Jul 06 2010 Chuck Ebbert 2.6.33.6-145 +- ethtool-fix-buffer-overflow.patch: ethtool buffer overflow (CVE-2010-2478) -* Thu Jul 22 2010 Chuck Ebbert 2.6.34.1-25 -- kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch: - Fix crash in guest Python programs (#610911) +* Tue Jul 06 2010 Chuck Ebbert 2.6.33.6-144 +- sched-fix-over-scheduling-bug.patch: fix scheduler bug with CGROUPS -* Wed Jul 21 2010 Chuck Ebbert 2.6.34.1-24 -- Drop crypto-aesni-kill-module_alias.patch; bug #571577 should - not be present in 2.6.34. +* Tue Jul 06 2010 Chuck Ebbert 2.6.33.6-143 +- Linux 2.6.33.6 -* Wed Jul 21 2010 Dave Airlie 2.6.34.1-23 -- drm-intel-945gm-stability-fixes.patch: fix 945GM stability issues +* Fri Jul 02 2010 Ben Skeggs 2.6.33.6-142.rc1 +- nouveau: fix connector ordering issues (rhbz#602492) -* Wed Jul 21 2010 Dave Airlie 2.6.34.1-22 -- double drop: its a revert on top of a revert. +* Fri Jul 02 2010 Chuck Ebbert 2.6.33.6-141.rc1 +- Linux 2.6.33.6-rc1 +- Drop patches merged upstream: + btrfs-should-add-permission-check-for-setfacl.patch (CVE-2010-2071) + iwlwifi-reset-card-during-probe.patch + iwlwifi-recalculate-average-tpt-if-not-current.patch + keys-find-keyring-by-name-can-gain-access-to-the-freed-keyring.patch + l2tp-fix-oops-in-pppol2tp_xmit.patch +- Revert DRM patches we already have: + drm-edid-fix-1024x768-85hz.patch + drm-i915-fix-82854-pci-id-and-treat-it-like-other-85x.patch +- Fix up usb-wwan-update.patch for upstream additions. -* Tue Jul 20 2010 Dave Airlie 2.6.34.1-21 -- drop drm revert, that can't possible cause the bug, but is causing another one. +* Fri Jul 02 2010 Dave Airlie 2.6.33.5-140 +- attempt to fix hibernate on Intel GPUs (kernel.org #13811) -* Mon Jul 19 2010 Chuck Ebbert 2.6.34.1-20 -- pci-fall-back-to-original-bios-bar-addresses.patch: - Fix 2.6.34 problems with assigning PCI addresses (KORG#16263) +* Wed Jun 30 2010 Kyle McMartin +- Disable MRST here too. -* Mon Jul 19 2010 Chuck Ebbert 2.6.34.1-19 -- drm-i915-add-reclaimable-to-page-allocations.patch: - Additional fix for hibernation memory corruption bugs. +* Mon Jun 28 2010 Chuck Ebbert 2.6.33.5-138 +- ppc64: enable active memory sharing and DLPAR memory remove (#607175) -* Sun Jul 18 2010 Chuck Ebbert 2.6.34.1-18 -- drm-i915-make-G4X-style-PLL-search-more-permissive.patch (#572799) +* Mon Jun 28 2010 Dave Airlie 2.6.33.5-137 +- i915: fix edp panels betterer. -* Sun Jul 18 2010 Hans de Goede 2.6.34.1-17 -- Fix inotify-fix-inotify-oneshot-support.patch so that it compiles -- Various small updates / fixes to the uvcvideo driver: - - Support dynamic menu controls (#576023) - - Fix the apple iSight camera not working (#600998) +* Fri Jun 25 2010 Dave Airlie 2.6.33.5-136 +- i915: fix edp on a number of notebooks (including whot's one) -* Fri Jul 16 2010 Chuck Ebbert 2.6.34.1-16 -- inotify-fix-inotify-oneshot-support.patch, - inotify-send-IN_UNMOUNT-events.patch: - Fix broken oneshot support and missing umount events. (#607327) +* Fri Jun 25 2010 Ben Skeggs 2.6.33.5-135 +- nouveau: backport important fixes from upstream +- Fixes unPOSTed detection + support nv4x multi-card (rhbz#607190) +- Various VBIOS parser fixes (potential culprit for many suspend bugs) +- Fixes memory detection on some GF8 IGPs, and boards with 4GiB VRAM +- Corrects various problems in the behaviour of GF8 dual-link TMDS + +* Wed Jun 23 2010 Kyle McMartin 2.6.33.5-134 +- l2tp: fix oops in pppol2tp_xmit (#607054) + +* Fri Jun 18 2010 Roland McGrath 2.6.33.5-133 +- make execshield respect PF_RANDOMIZE and ADDR_NO_RANDOMIZE (#220892) + +* Thu Jun 17 2010 Kyle McMartin +- make ghash-clmulni modular to get rid of early boot noise (rhbz#586954) + (not a /fix/ but it should at least quiet boot down a bit if you have + the cpu support) + +* Tue Jun 15 2010 John W. Linville 2.6.33.5-131 +- iwlwifi: cancel scan watchdog in iwl_bg_abort_scan (#590436) + +* Mon Jun 14 2010 Kyle McMartin 2.6.33.5-129 +- Add btrfs ACL fixes from CVE-2010-2071. + +* Sun Jun 13 2010 Kyle McMartin 2.6.33.5-128 +- mac80211/iwlwifi fix connections to some APs (rhbz#558002) + patches from sgruszka@. + +* Fri Jun 11 2010 Justin M. Forbes 2.6.33.5-127 +- Disable xsave for so that kernel will boot on ancient EC2 hosts. + +* Fri Jun 11 2010 Kyle McMartin 2.6.33.5-126 +- ALSA: usbmixer - add possibility to remap dB values (rhbz#578131) + +* Fri Jun 11 2010 Kyle McMartin 2.6.33.5-124 +- Drop writeback patches, they appear to be able to cause oopses. + +* Wed Jun 09 2010 John W. Linville +- Disable rt20xx and rt35xx chipset support in rt2800 drivers (#570869) + +* Wed Jun 09 2010 David Woodhouse +- Include PHY modules in modules.networking (#602155) + +* Wed Jun 09 2010 Kyle McMartin 2.6.33.5-121 +- doc_build_fail FAIL. + +* Wed Jun 09 2010 Kyle McMartin 2.6.33.5-120 +- backport ntrig hid driver from git head. (rhbz#584593) + +* Mon Jun 07 2010 Matthew Garrett +- linux-2.6-acpi-indirect_fan_control.patch: fix some ACPI fans (rh#531916) + +* Mon Jun 07 2010 Ben Skeggs +- nouveau: fix iommu errors on GeForce 8 and newer chipsets (rh#561267) + +* Thu Jun 03 2010 Kyle McMartin +- But keep it for kernel-headers... + +* Thu Jun 03 2010 Dave Jones +- remove the 31bit s390 support again. -* Fri Jul 16 2010 Ben Skeggs 2.6.34.1-15 -- nouveau: fix lvds regression (#601002) -- nouveau: bring back acpi edid support, with fixes (#613284) -- nouveau: remove dcb1.5 quirk that breaks things (#595645) - -* Wed Jul 14 2010 Chuck Ebbert 2.6.34.1-14 -- Truncate the obsolete git bluetooth and firewire patches, use - ApplyOptionalPatch for bluetooth, cpufreq and firewire patches. - -* Wed Jul 14 2010 Chuck Ebbert 2.6.34.1-12 -- pci-pm-do-not-use-native-pcie-pme-by-default.patch: - fix PCIe hotplug interrupts firing continuously. (#613412) -- Update pci-acpi-disable-aspm-if-no-osc.patch so it works - with the above patch. -- Drop linux-2.6-defaults-pciehp.patch: pciehp_passive mode - does not exist anymore. - -* Tue Jul 13 2010 Ben Skeggs 2.6.34.1-11 -- nouveau: bring back patches lost from 2.6.34 update + add some more to - fix at least rhbz#532711 and rhbz#593046 -- remove patches relating to nouveau that are now unused - -* Mon Jul 12 2010 Dave Jones -- Remove a bunch of x86 options from config files that get set - automatically, and can't be overridden. - -* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-9 +* Tue Jun 01 2010 Jarod Wilson +- Wire up all s390{,x} bits to match RHEL6 kernel spec + +* Wed May 27 2010 Chuck Ebbert 2.6.33.5-112 +- CVE-2010-1437: keyrings: find_keyring_by_name() can gain the freed keyring + +* Wed May 27 2010 Chuck Ebbert 2.6.33.5-111 +- Linux 2.6.33.5 +- Drop patches merged in -stable: + iwlwifi_-check-for-aggregation-frame-and-queue.patch + iwlwifi_-clear-all-the-stop_queue-flag-after-load-firmware.patch + revert-ath9k_-fix-lockdep-warning-when-unloading-module.patch + btrfs-check-for-read-permission-on-src-file-in-clone-ioctl.patch +- Revert drm patch already in F-13: drm-i915-disable-fbc-on-915gm-and-945gm.patch +- Apply DRM patches from -stable on top of F-13 DRM updates: + drm-i915-use-pipe_control-instruction-on-ironlake-and-sandy-bridge.patch + drm-i915-fix-non-ironlake-965-class-crashes.patch + +* Thu May 27 2010 Ben Skeggs +- drm-nouveau-updates.patch: add nv50 gpio fix (rh#582621) + +* Wed May 26 2010 Adam Jackson +- linux-2.6-cantiga-iommu-gfx.patch: Drop, redundant. +- config-generic: Disable i830.ko, userspace will never load it. + +* Mon May 24 2010 John W. Linville +- iwlwifi: recover_from_tx_stall (#589777) + +* Thu May 20 2010 Chuck Ebbert 2.6.33.4-106 +- Remove "PatchNNNN" entries for dropped patches. +- More writeback fixes from block-2.6 tree (#593669) + +* Thu May 20 2010 Kyle McMartin +- kill some dead patches. + +* Wed May 19 2010 John W. Linville +- iwlwifi: fix scan races +- iwlwifi: fix internal scan race + +* Wed May 19 2010 Dave Airlie +- disable vmwgfx at request of vmware + +* Wed May 19 2010 Roland McGrath +- x86: put assembly CFI in .debug_frame + +* Tue May 18 2010 Kyle McMartin +- btrfs: check for read permission on src file in the clone ioctl + (rhbz#593226) + +* Mon May 17 2010 Matthew Garrett +- thinkpad-acpi-fix-backlight.patch: Fix backlight control on some recent + Thinkpads + +* Mon May 17 2010 Kyle McMartin 2.6.33.4-97 +- perf-mount-debugfs-automatically.patch (#570821) + +* Mon May 17 2010 Ben Skeggs 2.6.33.4-96 +- drm: fix edid modeline for 1024x768@85Hz (#582472) + +* Thu May 13 2010 Jarod Wilson 2.6.33.4-95 +- Enable support for kworld ub435-q and 340u usb atsc tuners + +* Thu May 13 2010 Peter Hutterer +- linux-2.6-input-clickpad-support.patch: add support for ClickPad + touchpads (#590835) + +* Wed May 12 2010 Chuck Ebbert 2.6.33.4-93 +- Linux 2.6.33.4 +- Drop patches merged upstream: + linux-2.6-pci-fixup-resume.patch + linux-2.6-tun-orphan_an_skb_on_tx.patch + libata-fix-accesses-at-LBA28-boundary.patch + linux-2.6-creds_are_invalid-race.patch + hugetlb-fix-infinite-loop-in-get-futex-key.patch + reiserfs-fix-permissions-on-reiserfs-priv.patch + ath9k-reorder-ieee80211_free_hw-behind-ath9k_uninit_.patch +- Revert -stable DRM patches we already have: + drm-i915-add-initial-bits-for-vga-modesetting-bringup-on-sandybridge.patch + drm-i915-fix-tiling-limits-for-i915-class-hw-v2.patch +- Fix up patches to apply on top of 2.6.33.4: + linux-2.6-p54pci.patch + vhost_net-rollup.patch + +* Wed May 12 2010 Roland McGrath +- utrace update (#590954) + +* Mon May 10 2010 Kyle McMartin +- don't link binutils against perf. sigh. stupid gpl versions. + +* Mon May 10 2010 Eric Paris +- reduce size of selinux poliy memory allocation (rhbz#590363) + +* Mon May 10 2010 Kyle McMartin - crypto-aesni-kill-module_alias.patch: kill MODULE_ALIAS to prevent aesni-intel from autoloading. -* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-8 -- iwlwifi: cancel scan watchdog in iwl_bg_abort_scan (#590436) +* Mon May 10 2010 Ben Skeggs +- add linux-2.6-input-hid-quirk-egalax.patch, missed from F-12, requested + by Peter Hutterer. -* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-7 -- Restore PowerPC VIO modalias patch; use the upstream version. -- Drop Mac G5 thermal shutdown patch, now upstream. +* Sun May 09 2010 Kyle McMartin +- fs-explicitly-pass-in-whether-sb-is-pinned-or-not.patch (rhbz#588930) -* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-6 -- Fix modpost segfault when building kernels. (#595915) +* Sat May 08 2010 Kyle McMartin +- Link perf against libbfd.a for name-demangling support. (rhbz#590289) -* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-5 -- pci-change-error-messages-to-kern-info.patch: - Use new upstream patch to silence more useless messages. +* Thu May 06 2010 Adam Jackson 2.6.33.3-85 +- drm-intel-next: Enable the display even harder (#587171) -* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-4 -- sched-fix-over-scheduling-bug.patch: fix scheduler bug with CGROUPS +* Wed May 5 2010 Kyle McMartin 2.6.33.3-84 +- CONFIG_HWMON=y => CONFIG_THERMAL_HWMON. Kconfig is worse than rabies. -* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-3 -- ethtool-fix-buffer-overflow.patch (CVE-2010-2478) +* Wed May 5 2010 Kyle McMartin 2.6.33.3-83 +- disable-i8042-check-on-apple-mac.patch: fix build on ppc. -* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-2 -- Copy fix for BZ#609548 from F-13 2.6.33 kernel. +* Tue May 4 2010 John W. Linville 2.6.33.3-82 +- iwlwifi: recalculate average tpt if not current (#588021) -* Fri Jul 09 2010 Chuck Ebbert 2.6.34.1-1 -- Initial commit of 2.6.34 for F-13 -- Previous history is in the branch private-f14-2_6_34 +* Tue May 4 2010 Kyle McMartin 2.6.33.3-81 +- disable-i8042-check-on-apple-mac.patch: avoid long delay or hang booting + on Intel Apple Macs. -* Wed Jul 07 2010 Chuck Ebbert -- pci-acpi-disable-aspm-if-no-osc.patch, pci-aspm-dont-enable-too-early.patch - PCI layer fixes for problems with hardware that doesn't support ASPM. +* Tue May 4 2010 Kyle McMartin 2.6.33.3-80 +- ibmvscsi-fix-DMA-API-misuse.patch (#579454) -* Wed Jul 07 2010 Chuck Ebbert -- attempt to fix hibernate on Intel GPUs (kernel.org #13811) (RHBZ#537494) +* Mon May 3 2010 Kyle McMartin 2.6.33.3-79 +- disable aesni. (#571577) -* Wed Jul 07 2010 Chuck Ebbert -- Let ata_generic handle SATA interface on new MacBook Pro (#608034) +* Fri Apr 30 2010 John W. Linville 2.6.33.3-78 +- ath9k: reorder ieee80211_free_hw behind ath9k_uninit_hw to avoid + oops (#586787) -* Tue Jul 06 2010 Chuck Ebbert -- Re-enable options: DYNAMIC_FTRACE, FUNCTION_TRACER and STACK_TRACER +* Fri Apr 30 2010 Kyle McMartin +- add-appleir-driver.patch: update from hadess, split out some other patches. +- git-bluetooth.patch: and put them in git-bluetooth, along with other fixes. -* Tue Jul 06 2010 Chuck Ebbert -- Linux 2.6.34.1 +* Thu Apr 29 2010 Adam Jackson +- drm-intel-sdvo-fix-2.patch: Require that the A/D bit of EDID match the + A/D-ness of the connector. (#584229) -* Thu Jul 01 2010 Chuck Ebbert -- Linux 2.6.34.1-rc1 +* Thu Apr 29 2010 Kyle McMartin +- add-appleir-usb-driver.patch: updates from hadess. + +* Thu Apr 29 2010 Ben Skeggs 2.6.33.3-73 +- nouveau: initial eDP support + DP suspend/resume fixes +- nouveau: fix monitor detection on certain chipsets with DP support +- nouveau: better CRTC PLL calculation on latest chipsets +- nouveau: send hotplug events down to userspace + +* Wed Apr 28 2010 John W. Linville 2.6.33.3-72 +- Revert "ath9k: fix lockdep warning when unloading module" + +* Tue Apr 27 2010 Chuck Ebbert 2.6.33.3-71 +- Linux 2.6.33.3 - Drop patches merged upstream: - btrfs-should-add-permission-check-for-setfacl.patch (CVE-2010-2071) - iwlwifi-recalculate-average-tpt-if-not-current.patch - iwlwifi-fix-internal-scan-race.patch + acpi-ec-allow-multibyte-access-to-ec.patch + acpi-ec-limit-burst-to-64-bit.patch + b43_-Allow-PIO-mode-to-be-selected-at-module-load.patch + b43_-fall-back-gracefully-to-PIO-mode-after-fatal-DMA-errors.patch + mac80211_-tear-down-all-agg-queues-when-restart_reconfig-hw.patch + iwlwifi_-clear-all-tx-queues-when-firmware-ready.patch + iwlwifi_-fix-scan-race.patch - Revert DRM patches we already have: - drm-i915-rebind-bo-if-currently-bound-with-incorrect-alignment.patch - drm-radeon-fix-the-r100-r200-ums-block-0-page-fix.patch - drm-radeon-r100-r200-ums-block-ability-for-userspace-app-to-trash-0-page-and-beyond.patch - drm-radeon-kms-atom-fix-typo-in-lvds-panel-info-parsing.patch - drm-radeon-kms-reset-ddc_bus-in-object-header-parsing.patch - drm-edid-fix-1024x768-85hz.patch - drm-i915-reject-bind_to_gtt-early-if-object-aperture.patch - drm-i915-fix-82854-pci-id-and-treat-it-like-other-85x.patch -- Revert broken -stable patch: - perf-fix-endianness-argument-compatibility-with-opt_boolean-and-introduce-opt_incr.patch + drm-radeon-kms-add-firemv-2400-pci-id.patch + drm-radeon-kms-fix-rs600-tlb-flush.patch + drm-edid-quirks-envision-en2028.patch + drm-return-enodev-if-the-inode-mapping-changes.patch + drm-remove-the-edid-blob-stored-in-the-edid-property-when-it-is-disconnected.patch + drm-edid-allow-certain-bogus-edids-to-hit-a-fixup-path-rather-than-fail.patch +- Fix up drm-core-next to apply after 2.6.33.3 -* Wed Jun 30 2010 Kyle McMartin -- Disable MRST on x86 here as well. +* Tue Apr 27 2010 Justin M. Forbes +- Orphan an skb on tx for tun/tap devices. -* Tue Jun 29 2010 Kyle McMartin -- i915-fix-crt-hotplug-regression.patch: copy from rawhide. +* Tue Apr 27 2010 Chuck Ebbert 2.6.33.2-68 +- Fix possible data corruption with ext4 mounted with -o discard -* Mon Jun 28 2010 Chuck Ebbert -- ppc64: enable active memory sharing and DLPAR memory remove (#607175) +* Mon Apr 26 2010 Chuck Ebbert +- hugetlb-fix-infinite-loop-in-get-futex-key.patch (F12#552557) +- reiserfs-fix-permissions-on-reiserfs-priv.patch (CVE-2010-1146) -* Mon Jun 28 2010 Chuck Ebbert -- Copy fix for BZ#220892 from F-13. +* Mon Apr 26 2010 Chuck Ebbert 2.6.33.2-66 +- Turn off debugging and enable debug kernel builds. -* Fri Jun 25 2010 Kyle McMartin -- drm-i915-fix-edp-panels.patch: copy from rawhide. +* Mon Apr 26 2010 Dave Jones +- Revert PCI changes from 2.6.33.2. + Possibly causing networking problems with some drivers. -* Mon Jun 21 2010 Dave Jones -- Disable workaround for obscure SMP pentium pro errata. - I miss the 1990s too, but it's time to move on. - If anyone actually needs this it would be better done using - the apply_alternatives infrastructure. +* Mon Apr 26 2010 Adam Jackson +- drm-intel-sdvo-fix.patch: Fix DDC bus selection for SDVO (#584229) -* Mon Jun 21 2010 Kyle McMartin -- drm-revert-drm-fbdev-rework-output-polling-to-be-back-in-core.patch - Revert eb1f8e4f, bisected by Nicolas Kaiser. Thanks! (rhbz#599190) - (If this works, will try to root-cause.) -- rebase previous patch on top of above reversion +* Thu Apr 22 2010 Hans de Goede +- Make p54pci wlan work on slower computers (#583623) -* Mon Jun 21 2010 Kyle McMartin -- revert-drm-kms-toggle-poll-around-switcheroo.patch (rhbz#599190) +* Thu Apr 22 2010 Matthew Garrett +- linux-2.6-pci-fixup-resume.patch: Make sure we enable power resources on D0 -* Thu Jun 17 2010 Kyle McMartin -- Suck in patch from Dave Miller in 2.6.35 to add async hash testing, - hopefully fixes error from previous commit. (But making it modular - is still a good idea.) +* Wed Apr 21 2010 Justin M. Forbes +- vhost-net fixes from upstream -* Thu Jun 17 2010 Kyle McMartin -- make ghash-clmulni modular to get rid of early boot noise (rhbz#586954) - (not a /fix/ but it should at least quiet boot down a bit if you have - the cpu support) +* Wed Apr 21 2010 Roland McGrath 2.6.33.2-60 +- fix race crash from bogus cred.c debugging code (#583843) -* Wed Jun 16 2010 Kyle McMartin -- Snag some more DRM commits into drm-next.patch that I missed the first - time. -- Fix up radeon_pm toggle to work with the upstream code. +* Wed Apr 21 2010 Matthew Garrett +- thinkpad-acpi-add-x100e.patch: Add EC path for Thinkpad X100 -* Tue Jun 15 2010 Prarit Bhargava -- Turn off CONFIG_I2O on x86. - It is broken on 64-bit address spaces (i686/PAE, x86_64), and frankly, I'm - having trouble finding anyone who actually uses it. +* Tue Apr 20 2010 Dave Airlie 2.6.33.2-57 +- drm-radeon-ss-fix.patch: backport spread spectrum fix (#571874) -* Tue Jun 15 2010 Kyle McMartin -- Fix build by nuking superfluous "%{expand" which was missing a - trailing '}'. You may now reward me with an array of alcoholic - beverages, I so richly deserve for spending roughly a full - day staring at the diff of the spec. +* Mon Apr 19 2010 Adam Jackson 2.6.33.2-56 +- drm-intel-next.patch: 2.6.34 as of today, plus anholt's for-linus tree as + of today, plus most of drm-intel-next except for the AGP/GTT split and a + broken TV detect fix. Tested on 945GM, GM45, and gen5. +- drm-intel-make-lvds-work.patch: Rebase to match. +- drm-intel-acpi-populate-didl.patch: Drop, merged in -intel-next +- drm-intel-gen5-dither.patch: Use better dither on gen5. -* Mon Jun 14 2010 Kyle McMartin -- btrfs ACL fixes from CVE-2010-2071. +* Mon Apr 19 2010 Matthew Garrett +- linux-2.6-acpi-sleep-live-sci-live.patch: Try harder to switch to ACPI mode -* Sun Jun 13 2010 Kyle McMartin -- remunge and reapply hdpvr-ir-enable +* Mon Apr 19 2010 Adam Jackson +- linux-2.6-intel-iommu-igfx.patch: Disable IOMMU for GFX by default, just too + broken. intel_iommu=igfx_on to turn it on. (Adel Gadllah) -* Sun Jun 13 2010 Kyle McMartin -- mac80211/iwlwifi fix connections to some APs (rhbz#558002) - patches from sgruszka@. +* Mon Apr 19 2010 Dave Airlie +- radeon: add rs600 + firemv pciid + dual-link fix -* Sun Jun 13 2010 Kyle McMartin -- Provide a knob to enable radeon_pm to allow users to test - that functionality. Add radeon.pm=1 to your kernel cmdline - in order to enable it. (It still defaults to off though.) +* Fri Apr 16 2010 John W. Linville +- Patches from Intel to address intermittent firmware failures with iwlagn -* Sun Jun 13 2010 Kyle McMartin -- Update drm-next to include fixes since 2.6.35-rc1. +* Fri Apr 16 2010 Adam Jackson +- drm-core-next.patch: Update EDID and other core bits to airlied's tree +- drm-nouveau-abi16.patch: Rediff to match -* Fri Jun 11 2010 Justin M. Forbes -- Disable xsave for so that kernel will boot on ancient EC2 hosts. +* Fri Apr 16 2010 Ben Skeggs 2.6.33.2-49 +- nouveau: fix dereference-after-free bug (rh#575224) +- drm-nouveau-acpi-edid-fallback.patch: fix ppc build + potential crasher -* Wed Jun 09 2010 John W. Linville -- Disable rt20xx and rt35xx chipset support in rt2800 drivers (#570869) +* Thu Apr 15 2010 Eric Paris +- enable CONFIG_INTEL_TXT on x86_64 -* Wed Jun 09 2010 David Woodhouse -- Include PHY modules in modules.networking (#602155) +* Wed Apr 14 2010 David Woodhouse +- Fix autoloading of phy modules (#525966) + +* Wed Apr 14 2010 Chuck Ebbert 2.6.33.2-46 +- libata-fix-accesses-at-LBA28-boundary.patch + +* Tue Apr 13 2010 Justin M. Forbes +- virt_console: Fixes from upstream + +* Tue Apr 13 2010 Chuck Ebbert +- Fix ACPI errors on boot caused by EC burst mode patch (#581535) +- Re-enable ACPI EC delay patch (#579510) + +* Tue Apr 13 2010 Ben Skeggs +- drm-nouveau-acpi-edid-fallback.patch: fix oops on cards without _DSM method + +* Mon Apr 12 2010 Matthew Garrett +- linux-2.6-acpi-video-export-edid.patch: + drm-nouveau-acpi-edid-fallback.patch: Let nouveau get an EDID from ACPI + +* Fri Apr 09 2010 John W. Linville 2.6.33.2-41 +- b43: Allow PIO mode to be selected at module load +- b43: fall back gracefully to PIO mode after fatal DMA errors + +* Fri Apr 09 2010 Chuck Ebbert +- virt_console: fix a bug in the original race fix + +* Fri Apr 09 2010 Ben Skeggs +- nouveau: fixes from upstream + NVA3 support + +* Thu Apr 08 2010 Dave Airlie +- Backport radeon r800 modesetting support + +* Wed Apr 07 2010 Chuck Ebbert +- Disable async multicore RAID4/5/6 stripe processing (F12#575402) + +* Tue Apr 06 2010 Hans de Goede +- gspca-vc032x: Use YUYV output for OV7670 (#537332) + +* Mon Apr 05 2010 Chuck Ebbert +- Build eeepc-laptop driver for x86_64 (#565582) + +* Mon Apr 05 2010 Chuck Ebbert +- Linux 2.6.33.2 +- Dropped patches merged upstream: + coredump-uid-pipe-check.patch + iwlwifi-use-dma_alloc_coherent.patch + r8169-offical-fix-for-CVE-2009-4537.patch +- Dropped from drm-nouveau-updates.patch: + "drm/nouveau: report unknown connector state if lid closed" +- New sparc64 config option: + CONFIG_FB_XVR1000=y +- Reverted from upstream: + usb-qcserial-add-new-device-ids.patch: Already in wwan-update patch + +* Mon Apr 05 2010 Chuck Ebbert +- Comment out acpi-ec-add-delay-before-write.patch: breaks + boot on some machines. + +* Mon Apr 05 2010 Jarod Wilson 2.6.33.1-32 +- Fix oops in lirc_it87 driver (#579270) +- Support more imon 0xffdc key combinations + +* Sat Apr 03 2010 Chuck Ebbert +- Build all of the DVB frontend drivers instead of just the automatically + selected ones. (#578755) + +* Thu Apr 01 2010 Matthew Garrett +- drm-intel-acpi-populate-didl.patch: Fix brightness hotkeys on some machines +- linux-2.6-usb-wwan-update.patch: Update wwan code and fix qcserial + +* Wed Mar 31 2010 Matthew Garrett +- drm-intel-make-lvds-work.patch: Make sure LVDS gets turned back on + +* Tue Mar 30 2010 Chuck Ebbert +- Allow setting buildid on both command line and in the SRPM. + +* Tue Mar 30 2010 Chuck Ebbert 2.6.33.1-26 +- r8169-offical-fix-for-CVE-2009-4537.patch + +* Tue Mar 30 2010 Chuck Ebbert +- ACPI EC fixes pending upstream: + acpi-ec-add-delay-before-write.patch + acpi-ec-allow-multibyte-access-to-ec.patch + +* Tue Mar 30 2010 Dave Jones +- Fix broken locking in cpufreq. + +* Tue Mar 30 2010 John W. Linville 2.6.33.1-24 +- Avoid null pointer dereference introduced by 'ssb: check for sprom' (#577463) + +* Mon Mar 29 2010 John W. Linville 2.6.33.1-23 +- iwlwifi: reset card during probe (#557084) +- iwlwifi: use dma_alloc_coherent (#574146) + +* Mon Mar 29 2010 Ben Skeggs 2.6.33.1-22 +- nouveau: sync with nouveau upstream + +* Wed Mar 24 2010 Josef Bacik 2.6.33.1-21 +- Update btrfs so it includes the default subvolume stuff, for the rollback + feature + +* Mon Mar 22 2010 Jarod Wilson +- A few more imon driver button additions +- Fix minor init issue w/topseed 0x0008 mceusb transceivers + +* Fri Mar 19 2010 John W. Linville 2.6.33.1-19 +- ssb: check for sprom (#533746) + +* Fri Mar 19 2010 Jarod Wilson 2.6.33.1-18 +- Improve mouse button and pad handling on 0xffdc imon devices +- Add xmit support to topseed 0x0008 lirc_mceusb transceiver + +* Fri Mar 19 2010 David Woodhouse +- Apply fix for #538163 again (Cantiga shadow GTT chipset b0rkage). + +* Fri Mar 19 2010 Hans de Goede +- Cherry pick various webcam driver fixes + (#571188, #572302, #572373) + +* Thu Mar 18 2010 Neil Horman +- Disable TIPC protocol in config + +* Wed Mar 17 2010 Jarod Wilson +- lirc driver update: + * fix lirc_i2c on cx2341x hauppauge cards (#573675) + * fix null ptr deref in lirc_imon (#545599) + * fix lirc_zilog on cx2341x hauppauge cards + * adds a few new lirc_mceusb device ids +- imon input layer driver update, adds better support for 0xffdc + devices and handles failed key lookups better + +* Tue Mar 16 2010 Chuck Ebbert +- Linux 2.6.33.1 + +* Tue Mar 16 2010 Chuck Ebbert +- Add examples.txt to perf docs, require libdwarf with perf package. + (#568309, #569506) + +* Mon Mar 15 2010 Chuck Ebbert +- Linux 2.6.33.1-rc1 +- Drop merged patch: + x86-pci-prevent-mmconfig-memory-corruption.patch +- Revert V4l patch we already have: + v4l-dvb-13991-gspca_mr973010a-fix-cif-type-1-cameras-not-streaming-on-uhci-controllers.patch + +* Mon Mar 15 2010 Ben Skeggs +- nouveau: pull in more fixes from upstream + +* Sat Mar 06 2010 Kyle McMartin +- Add libdwarf dep if %with_perftool. + +* Fri Mar 05 2010 Kyle McMartin +- Fix race between hvc_close and hvc_remove. (rhbz#568621) + +* Thu Mar 04 2010 Kyle McMartin +- Enable CGROUP_DEBUG. + +* Mon Mar 01 2010 Dave Jones +- Don't own /usr/src/kernels any more, it's now owned by filesystem. (#569438) + +* Sat Feb 27 2010 Chuck Ebbert +- Add patch from the 2.6.33 stable queue to fix memory corruption + in the PCI MMCONFIG code. + +* Thu Feb 25 2010 Ben Skeggs +- nouveau: rebase to nouveau/linux-2.6 git + +* Wed Feb 24 2010 Chuck Ebbert +- Drop/clear obsolete V4L patches, use ApplyOptionalPatch +- Fix two typos in config-generic probably caused by vi users + +* Wed Feb 24 2010 Dave Jones +- Remove unnecessary redefinition of KEY_RFKILL from linux-2.6-rfkill-all.patch + +* Wed Feb 24 2010 Kyle McMartin 2.6.33-1 +- Linux 2.6.33 + +* Wed Feb 24 2010 Dave Jones 2.6.33-0.53.rc8.git9 +- 2.6.33-rc8-git9 +- dropped: drm-nouveau-old-vgaload.patch - merged upstream. +- dropped: drm-nouveau-gf8-igp.patch - merged upstream. + +* Tue Feb 23 2010 Ben Skeggs 2.6.33-0.52.rc8.git6 +- nouveau: bring to latest upstream, reorganise patches to be more sensible + +* Mon Feb 22 2010 Kyle McMartin +- coredump-uid-pipe-check.patch: commit it to a useful branch. + +* Mon Feb 22 2010 Dave Jones 2.6.33-0.50.rc8.git6 +- 2.6.33-rc8-git6 + +* Sun Feb 21 2010 Hans de Goede +- Rebase gspca usb webcam driver + sub drivers to latest upstream, this + adds support for the following webcam bridge chipsets: benq, cpia1, sn9c2028; + and support for new devices and many bugfixes in other gspca-subdrivers + +* Fri Feb 19 2010 Kyle McMartin 2.6.33-0.48.rc8.git4 +- 2.6.33-rc8-git4 + +* Wed Feb 17 2010 Ben Skeggs 2.6.33-0.47.rc8.git1 +- nouveau: update to new kernel interface +- drm_nouveau_ucode.patch: drop, in linux-firmware now + +* Tue Feb 16 2010 Kyle McMartin 2.6.33-0.46.rc8.git1 +- 2.6.33-rc8-git1 +- virt_console-rollup.patch: fixes from linux-next from Amit. + +* Mon Feb 15 2010 Neil Horman +- Refactor usermodehelper code and change recursion check for abrt + with linux-2.6-umh-refactor.patch from -mm + fixes bz 557386 + +* Fri Feb 12 2010 Chuck Ebbert 2.6.33-0.44.rc8 +- 2.6.33-rc8 + +* Fri Feb 12 2010 Chuck Ebbert 2.6.33-0.43.rc7.git6 +- 2.6.33-rc7-git6 + +* Thu Feb 11 2010 Chuck Ebbert 2.6.33-0.42.rc7.git5 +- 2.6.33-rc7-git5 +- Drop merged patches: + fix-conntrack-bug-with-namespaces.patch + commit ad60a9154887bb6162e427b0969fefd2f34e94a6 from git-bluetooth.patch + +* Mon Feb 08 2010 Josh Boyer +- Drop ppc ps3_storage and imac-transparent bridge patches + +* Sat Feb 06 2010 Kyle McMartin 2.6.33-0.40.rc7.git0 +- Add libdwarf-devel to build deps so perf gets linked to it. + +* Sat Feb 06 2010 Kyle McMartin +- virt_console-rollup.patch, for feature F13/VirtioSerial, patches + are all targetted at 2.6.34 (and in linux-next.) + +* Sat Feb 06 2010 Kyle McMartin +- git-bluetooth.patch: selection of backports from next for hadess. + (rhbz#562245) + +* Sat Feb 06 2010 Kyle McMartin 2.6.33-0.36.rc7.git0 +- Linux 2.6.33-rc7 (oops, jumped the gun on -git6 I guess. :) + +* Sat Feb 06 2010 Kyle McMartin +- 2.6.33-rc6-git6 + +* Sat Feb 06 2010 Kyle McMartin +- Hack around delay loading microcode.ko, on intel, we don't split out + the firmware into cpuid specific versions (in fact, I don't know who does...) + so just patch out the request_firmware calls in microcode_intel.c, and + microcode_ctl.init will do the right thing. (fixes rhbz#560031) + (side note: I'll fix microcode_ctl to do one better at some point.) + +* Sat Feb 06 2010 Kyle McMartin +- Don't want linux-firmware if %with_firmware, yet. (Think F-11/F-12 2.6.33) + +* Fri Feb 05 2010 Peter Jones +- Move initrd creation to %%posttrans + Resolves: rhbz#557922 + +* Fri Feb 05 2010 Kyle McMartin +- If %with_firmware, continue with kernel-firmware, otherwise prereq on the + separate linux-firmware pkg. Thanks to dzickus for noticing. + +* Thu Feb 04 2010 Kyle McMartin 2.6.33-0.29.rc6.git4 +- 2.6.33-rc6-git4 + +* Wed Feb 03 2010 Kyle McMartin +- prevent-runtime-conntrack-changes.patch: fix another conntrack issue + identified by jcm. + +* Wed Feb 03 2010 Kyle McMartin +- fix-conntrack-bug-with-namespaces.patch: Patch for issue identified + by jcm. (Ref: http://lkml.org/lkml/2010/2/3/112) + +* Mon Feb 02 2010 Chuck Ebbert 2.6.33-0.26.rc6.git1 +- 2.6.33-rc6-git1 + +* Fri Jan 29 2010 Chuck Ebbert 2.6.33-0.25.rc6.git0 +- 2.6.33-rc6 + +* Wed Jan 27 2010 Roland McGrath 2.6.33-0.24.rc5.git1 +- Fix include/ copying for kernel-devel. + +* Mon Jan 25 2010 Kyle McMartin 2.6.33-0.23.rc5.git1 +- 2.6.33-rc5-git1 +- arm: MTD_PISMO is not set + +* Mon Jan 25 2010 Dave Jones +- Disable CONFIG_X86_CPU_DEBUG + +* Mon Jan 25 2010 Josh Boyer +- Turn off CONFIG_USB_FHCI_HCD. It doesn't build + +* Fri Jan 22 2010 Kyle McMartin 2.6.33-0.20.rc5.git0 +- 2.6.33-rc5 + +* Thu Jan 21 2010 Jarod Wilson +- Merge crystalhd powerpc build fix from airlied + +* Wed Jan 20 2010 Kyle McMartin 2.6.33-0.18.rc4.git7 +- 2.6.32-rc4-git7 +- dvb mantis drivers as modules + +* Wed Jan 20 2010 Kyle McMartin 2.6.33-0.17.rc4.git6 +- add appleir usb driver + +* Mon Jan 18 2010 Kyle McMartin 2.6.33-0.16.rc4.git6 +- 2.6.33-rc4-git6 +- execshield: rebase for mm_types.h reject + +* Mon Jan 18 2010 Kyle McMartin +- vhost_net-rollup.patch: https://fedoraproject.org/wiki/Features/VHostNet + from davem/net-next-2.6.git + +* Sat Jan 16 2010 Kyle McMartin 2.6.33-0.14.rc4.git3 +- DEBUG_STRICT_USER_COPY_CHECKS off for now, tickles issue in lirc_it87.c + +* Sat Jan 16 2010 Kyle McMartin 2.6.33-0.13.rc4.git3 +- 2.6.33-rc4-git3 + +* Thu Jan 14 2010 Steve Dickson +- Enabled the NFS4.1 (CONFIG_NFS_V4_1) kernel config + +* Wed Jan 13 2010 Kyle McMartin 2.6.33-0.11.rc4 +- Linux 2.6.33-rc4 + +* Wed Jan 13 2010 Kyle McMartin 2.6.33-0.10.rc3.git5 +- 2.6.33-rc3-git5 + +* Wed Jan 13 2010 Dave Airlie +- Add fbdev fix for multi-card primary console on x86-64 +- clean up all the drm- patches + +* Tue Jan 12 2010 Jarod Wilson +- Update lirc patch for 2.6.33 kfifo changes +- Add Broadcom Crystal HD video decoder driver from staging + +* Mon Jan 11 2010 Kyle McMartin +- include/asm is gone, kludge it for now. + +* Mon Jan 11 2010 Dave Jones +- Rebase exec-shield. + +* Mon Jan 11 2010 Kyle McMartin +- drop e1000 patch. + +* Mon Jan 11 2010 Kyle McMartin +- lirc broken due to kfifo mess. + +* Mon Jan 11 2010 Kyle McMartin +- drm-intel-big-hammer: fix IS_I855 macro. + +* Mon Jan 11 2010 Kyle McMartin +- Linux 2.6.33-rc3 +- utrace: rebased from roland's people page. +- via-hwmon-temp-sensor.patch: upstream. +- linux-2.6-defaults-alsa-hda-beep-off.patch: new config option supercedes. +- readd nouveau ctxprogs as firmware/ like it should be. +- linux-2.6-pci-cacheline-sizing.patch: upstream. +- linux-2.6-intel-agp-clear-gtt.patch: upstream. +- linux-2.6-nfsd4-proots.patch: upstream? +- rebased the rest. + +* Mon Jan 11 2010 Kyle McMartin 2.6.32.3-21 +- Linux 2.6.32.3 +- drm-intel-no-tv-hotplug.patch: re-add lost patch from F-12 + 2.6.31 (#522611, #544671) + +* Mon Jan 11 2010 Kyle McMartin 2.6.32.2-20 +- Re-enable ATM_HE (#545289) + +* Fri Jan 08 2010 Chuck Ebbert 2.6.32.2-19 +- Add another symbol to look for when generating modules.block + +* Thu Jan 07 2010 David Woodhouse 2.6.32.2-18 +- Drop kernel-firmware package now that it's packaged separately. + +* Mon Jan 04 2010 Dave Jones +- Drop some of the vm/spinlock taint patches. dump_stack() already does same. + +* Thu Dec 24 2009 Kyle McMartin 2.6.32.2-15 +- Add patch from dri-devel to fix vblanks on r600. + [http://marc.info/?l=dri-devel&m=126137027403059&w=2] + +* Fri Dec 18 2009 Kyle McMartin 2.6.32.2-14 +- Linux 2.6.32.2 +- dropped upstream patches. + +* Fri Dec 18 2009 Roland McGrath - 2.6.32.1-13 +- minor utrace update + +* Thu Dec 17 2009 Matthew Garrett 2.6.32.1-12 +- linux-2.6-driver-level-usb-autosuspend.diff: fix so it works properly... +- linux-2.6-fix-btusb-autosuspend.patch: avoid bluetooth connection drops +- linux-2.6-enable-btusb-autosuspend.patch: and default it to on +- linux-2.6-autoload-wmi.patch: autoload WMI drivers + +* Thu Dec 17 2009 Jarod Wilson 2.6.32.1-11 +- Split off onboard decode imon devices into pure input driver, + leaving lirc_imon for the ancient imon devices only +- Fix NULL ptr deref in lirc_serial (#543886) +- Assorted lirc_mceusb fixups suggested by Mauro +- Dropped compat ioctls from lirc_dev, main ioctls should now be + compatible between 32-bit and 64-bit (also at Mauro's suggestion) + +* Wed Dec 16 2009 Roland McGrath 2.6.32.1-10 +- utrace update, now testing the utrace-based ptrace! + +* Mon Dec 14 2009 Kyle McMartin 2.6.32.1-9 +- 2.6.32.1 +- ext4 patches and more... + +* Wed Dec 09 2009 Kyle McMartin 2.6.32-8 +- Add a patch off lkml from krh to fix perf when DEBUG_PERF_USE_VMALLOC + (rhbz#542791) +- Re-enable CONFIG_DEBUG_PERF_USE_VMALLOC on debug kernels. + +* Wed Dec 09 2009 Kyle McMartin 2.6.32-7 +- ext4-fix-insufficient-checks-in-EXT4_IOC_MOVE_EXT.patch: CVE-2009-4131 + fix insufficient permission checking which could result in arbitrary + data corruption by a local unprivileged user. + +* Tue Dec 08 2009 Chuck Ebbert 2.6.32-6 +- Copy fix for #540580 from F-12. + +* Tue Dec 08 2009 Kyle McMartin 2.6.32-5 +- new rpm changes: + - %{PACKAGE_VERSION} -> %{version} + - %{PACKAGE_RELEASE} -> %{release} + +* Tue Dec 08 2009 Kyle McMartin 2.6.32-4 +- Disable CONFIG_DEBUG_PERF_USE_VMALLOC for now, causes issues + on x86_64. (rhbz#542791) + +* Mon Dec 7 2009 Justin M. Forbes 2.6.32-3 +- Allow userspace to adjust kvmclock offset (#530389) + +* Mon Dec 7 2009 Steve Dickson 2.6.32-2 +- Updated the NFS4 pseudo root code to the latest release. + +* Thu Dec 03 2009 Kyle McMartin 2.6.32-1 +- Linux 2.6.32 + +* Wed Dec 02 2009 Kyle McMartin 2.6.32-0.65.rc8.git5 +- 2.6.32-rc8-git5 +- nuke 9p cachefiles fix, upstream. +- SLOW_WORK_PROC was renamed to SLOW_WORK_DEBUG, debugfs instead of procfs. + +* Wed Dec 02 2009 John W. Linville 2.6.32-0.64.rc8.git2 +- ath9k: add fixes suggested by upstream maintainer + +* Wed Dec 02 2009 David Woodhouse 2.6.32-0.63.rc8.git2 +- forward port IOMMU fixes from F-12 for HP BIOS brokenness +- Fix oops with intel_iommu=igfx_off +- agp/intel: Clear full GTT at startup + +* Wed Dec 02 2009 Dave Airlie 2.6.32-0.62.rc8.git2 +- forward port radeon fixes from F-12 + add radeon display port support + +* Mon Nov 30 2009 Kyle McMartin 2.6.32-0.61.rc8.git2 +- fix-9p-fscache.patch: fix build. + +* Mon Nov 30 2009 Kyle McMartin 2.6.32-0.60.rc8.git2 +- 2.6.32-rc8-git2 daily snapshot +- nuke include/generated nuke-age since the patch was reverted upstream +- config changes: + - generic: + +CONFIG_FSCACHE_OBJECT_LIST=y + +CONFIG_SLOW_WORK_PROC=y + +* Mon Nov 30 2009 Kyle McMartin +- drm-i915-fix-sync-to-vbl-when-vga-is-off.patch: add, (rhbz#541670) + +* Sun Nov 29 2009 Kyle McMartin +- linux-2.6-sysrq-c.patch: drop, was made consistent upstream. + +* Sat Nov 28 2009 Jarod Wilson 2.6.32-0.55.rc8.git1 +- add device name to lirc_zilog, fixes issues w/multiple target devices +- add lirc_imon pure input mode support for onboard decode devices + +* Fri Nov 27 2009 Dave Airlie 2.6.32-0.54.rc8.git1 +- attempt to put nouveau back - same patch as F-12 should work + +* Mon Nov 23 2009 Roland McGrath +- Install vmlinux.id file in kernel-devel rpm. + +* Fri Nov 20 2009 Chuck Ebbert 2.6.32-0.52.rc8.git1 +- 2.6.32-rc8-git1 +- Enable CONFIG_MEMORY_HOTPLUG (and HOTREMOVE) on x86_64 + +* Thu Nov 19 2009 Kyle McMartin 2.6.32-0.51.rc7.git2 +- Oops, re-enable debug builds for rawhide... didn't mean to commit that. + +* Thu Nov 19 2009 Kyle McMartin 2.6.32-0.50.rc7.git2 +- Disable FUNCTION_TRACER and DYNAMIC_FTRACE in non-debug builds for + Fedora 13. Some pondering required to see if it's actually worth doing + though. Anecdotal evidence worth half as much as benchmarks. + STACK_TRACER selects FUNCTION_TRACER, so it has to go off too, sadly, + since it hooks every mcount to log the stack depth for the task. + +* Thu Nov 19 2009 Kyle McMartin 2.6.32-0.49.rc7.git2 +- 2.6.32-rc7-git2 + +* Mon Nov 16 2009 Dave Airlie 2.6.32-0.48.rc7.git1 +- backout gpg change now that koji is fixed + +* Sun Nov 15 2009 Chuck Ebbert 2.6.32-0.47.rc7.git1 +- Buildrequire gpg + +* Sun Nov 15 2009 Chuck Ebbert +- Fix oops in VIA Padlock driver. + +* Sun Nov 15 2009 Chuck Ebbert +- Linux 2.6.32-rc7-git1 + +* Fri Nov 13 2009 Chuck Ebbert +- Linux 2.6.32-rc7 + +* Thu Nov 05 2009 Jarod Wilson +- Add --with dbgonly rpmbuild option to build only debug kernels + +* Wed Nov 04 2009 Kyle McMartin +- Make JBD2_DEBUG a toggleable config option. + +* Wed Nov 04 2009 Kyle McMartin 2.6.32-0.39.rc6.git0 +- 2.6.32-rc6, fix for NULL ptr deref in cfg80211. + +* Mon Nov 02 2009 Kyle McMartin 2.6.32-0.39.rc5.git6 +- 2.6.32-rc5-git6 (with sandeen's reversion of "ext4: Remove journal_checksum + mount option and enable it by default") + +* Mon Nov 02 2009 Chuck Ebbert +- 2.6.32-rc5-git5 + +* Tue Oct 27 2009 John W. Linville +- Disable build of prism54 module + +* Tue Oct 27 2009 Dave Airlie +- Get dd command line args correct. + +* Mon Oct 26 2009 Dave Jones +- Make a 20MB initramfs file so rpm gets its diskspace calculations right. (#530778) + +* Sat Oct 23 2009 Chuck Ebbert +- 2.6.32-rc5-git3 +- Drop merged patch: + linux-2.6-virtio_blk-revert-QUEUE_FLAG_VIRT-addition.patch + +* Sat Oct 17 2009 Chuck Ebbert 2.6.32-0.33.rc5.git1 +- 2.6.32-rc5-git1 + +* Fri Oct 16 2009 Chuck Ebbert +- 2.6.32-rc5 +- New config option: CONFIG_VMXNET3=m + +* Wed Oct 14 2009 Chuck Ebbert +- 2.6.32-rc4-git4 + +* Wed Oct 14 2009 Steve Dickson +- Updated the NFS v4 pseudo root patch so it will apply +- Fixed hang during NFS installs (bz 528537) + +* Wed Oct 14 2009 Peter Jones +- Add scsi_register_device_handler to modules.block's symbol list so + we'll have scsi device handlers in installer images. + +* Tue Oct 13 2009 Kyle McMartin +- Always build perf docs, regardless of whether we build kernel-doc. + Seems rather unfair to not ship the manpages half the time. + Also, drop BuildRequires %if when not with_doc, the rules about %if + there are f*!&^ing complicated. + +* Tue Oct 13 2009 Kyle McMartin +- Build perf manpages properly. + +* Tue Oct 13 2009 Dave Airlie +- cleanup some of drm vga arb bits that are upstream + +* Mon Oct 12 2009 Jarod Wilson +- Merge lirc compile fixes into lirc patch +- Refresh lirc patch with additional irq handling fixage +- Fix IR transmit on port 1 of 1st-gen mceusb transceiver +- Support another mouse button variant on imon devices + +* Mon Oct 12 2009 Chuck Ebbert 2.6.32-0.24.rc4.git0 +- Last-minute USB fix from upstream. + +* Sun Oct 11 2009 Chuck Ebbert +- Fix lirc build after header changes. +- Fix bug in lirc interrupt processing. + +* Sun Oct 11 2009 Chuck Ebbert +- Fix up multiple definition of debug options + (EXT4_DEBUG, DEBUG_FORCE_WEAK_PER_CPU) + +* Sun Oct 11 2009 Chuck Ebbert +- 2.6.32-rc4 +- New config options: + CONFIG_BE2ISCSI=m + CONFIG_SCSI_BFA_FC=m + CONFIG_USB_MUSB_HDRC is not set + +* Sun Oct 11 2009 Kyle McMartin +- 2.6.32-rc3-git3 + +* Thu Oct 08 2009 Ben Skeggs +- ppc: compile nvidiafb as a module only, nvidiafb+nouveau = bang! (rh#491308) + +* Wed Oct 07 2009 Dave Jones +- Enable FUNCTION_GRAPH_TRACER on x86-64. + +* Wed Oct 07 2009 Dave Jones +- Disable CONFIG_IRQSOFF_TRACER on srostedt's recommendation. + (Adds unwanted overhead when not in use). + +* Sun Oct 04 2009 Kyle McMartin 2.6.32-0.17.rc3.git0 +- 2.6.32-rc3 (bah, rebase script didn't catch it.) + +* Sun Oct 04 2009 Kyle McMartin +- 2.6.32-rc1-git7 +- [x86,x86_64] ACPI_PROCESSOR_AGGREGATOR=m + +* Mon Sep 28 2009 Kyle McMartin +- 2.6.32-rc1 +- rebased crash-driver patchset, ia64_ksyms.c conflicts. move x86 crash.h + file to the right place. +- full changelog forthcoming & to fedora-kernel-list. + +* Mon Sep 28 2009 Kyle McMartin +- sick of rejects. + +* Mon Sep 28 2009 Chuck Ebbert +- Fix up some items missing in make debug vs. make release, + rearrange so the options are in the same order. +- Add new debug options: + CONFIG_EXT4_DEBUG + CONFIG_DEBUG_FORCE_WEAK_PER_CPU + +* Sun Sep 27 2009 Kyle McMartin +- Must now make mrproper after each config pass, due to Kbuild + stashing away the $ARCH variable. + +* Sun Sep 27 2009 Kyle McMartin +- 2.6.31-git18 +- rebased: + - hdpvr-ir-enable.patch + - linux-2.6-build-nonintconfig.patch + - linux-2.6-debug-sizeof-structs.patch + - linux-2.6-debug-vm-would-have-oomkilled.patch + - linux-2.6-execshield.patch + - linux-2.6-makefile-after_link.patch + - linux-2.6-serial-460800.patch + - linux-2.6-utrace.patch + - via-hwmon-temp-sensor.patch +- merged: + - linux-2.6-tracehook.patch + - linux-2.6-die-closed-source-bios-muppets-die.patch + - linux-2.6-intel-iommu-updates.patch + - linux-2.6-ksm.patch + - linux-2.6-ksm-updates.patch + - linux-2.6-ksm-fix-munlock.patch + - linux-2.6-vga-arb.patch + - v4l-dvb-fix-cx25840-firmware-loading.patch + - linux-2.6-rtc-show-hctosys.patch + +* Fri Sep 18 2009 Dave Jones +- %ghost the dracut initramfs file. + +* Thu Sep 17 2009 Hans de Goede +- Now that we have %%post generation of dracut images we do not need to + Require dracut-kernel anymore + +* Thu Sep 17 2009 Chuck Ebbert +- Disable drm-nouveau too -- it won't build without other + drm updates. + +* Wed Sep 16 2009 Roland McGrath +- Remove workaround for gcc bug #521991, now fixed. + +* Tue Sep 15 2009 Kyle McMartin +- 2.6.31-git4 +- rebased: + - linux-2.6-execshield.patch: split paravirt_types.h + - linux-2.6-buildnonintconfig.patch +- disabled: + - ksm, drm. +- merged: + - linux-2.6-kvm-pvmmu-do-not-batch-pte-updates-from-interrupt-context.patch + - linux-2.6-kvm-vmx-check-cpl-before-emulating-debug-register-access.patch + - linux-2.6-use-__pa_symbol-to-calculate-address-of-C-symbol.patch + - linux-2.6-xen-stack-protector-fix.patch + - linux-2.6-bluetooth-autosuspend.diff + - hid-ignore-all-recent-imon-devices.patch +- config changes: + - arm: + - CONFIG_HIGHPTE off, seems safer this way. + - generic: + - RDS_RDMA/RDS_TCP=m + - SCSI_PMCRAID=m + - WLAN=y, CFG80211_DEFAULT_PS=y, NL80211_TESTMODE off. + - WL12XX=m + - B43_PHY_LP=y + - BT_MRVL=m + - new MISDN stuff modular. + - sparc: + - enable PERF_COUNTERS & EVENT_PROFILE + - ppc: + - XILINX_EMACSLITE=m + +* Mon Sep 14 2009 Chuck Ebbert +- 2.6.31-git2 +- Drop merged patches: + sched-introduce-SCHED_RESET_ON_FORK-scheduling-policy-flag.patch + linux-2.6-nfs4-ver4opt.patch + linux-2.6-alsa-improve-hda-powerdown.patch + alsa-tell-user-that-stream-to-be-rewound-is-suspended.patch + linux-2.6-ahci-export-capabilities.patch +- New s390 config option: + CONFIG_SCLP_ASYNC=m +- New generic config options: + CONFIG_ATA_VERBOSE_ERROR=y + CONFIG_PATA_RDC=m + CONFIG_SOUND_OSS_CORE_PRECLAIM=y + CONFIG_SND_HDA_PATCH_LOADER=y + CONFIG_SND_HDA_CODEC_CIRRUS=y + CONFIG_OPROFILE_EVENT_MULTIPLEX=y + CONFIG_CRYPTO_VMAC=m + CONFIG_CRYPTO_GHASH=m +- New debug option: + CONFIG_DEBUG_CREDENTIALS=y in debug kernels + +* Mon Sep 14 2009 Steve Dickson +- Added support for -o v4 mount parsing + +* Fri Sep 11 2009 Dave Jones +- Apply NX/RO to modules + +* Fri Sep 11 2009 Dave Jones +- Mark kernel data section as NX + +* Fri Sep 11 2009 Ben Skeggs +- nouveau: bring in Matthew Garret's initial switchable graphics support + +* Fri Sep 11 2009 Ben Skeggs +- nouveau: fixed use of strap-based panel mode when required (rh#522649) +- nouveau: temporarily block accel on NVAC chipsets (rh#522361, rh#522575) + +* Thu Sep 10 2009 Matthew Garrett +- linux-2.6-ahci-export-capabilities.patch: Backport from upstream +- linux-2.6-rtc-show-hctosys.patch: Export the hctosys state of an rtc +- linux-2.6-rfkill-all.patch: Support for keys that toggle all rfkill state + +* Thu Sep 10 2009 Ben Skeggs +- drm-nouveau.patch: add some scaler-only modes for LVDS, GEM/TTM fixes + +* Wed Sep 09 2009 Dennis Gilmore 2.6.31-2 +- touch the dracut initrd file when using %%{with_dracut} + +* Wed Sep 09 2009 Chuck Ebbert 2.6.31-1 +- Linux 2.6.31 + +* Wed Sep 09 2009 Chuck Ebbert +- Enable VXpocket and PDaudioCF PCMCIA sound drivers. + +* Wed Sep 09 2009 Hans de Goede +- Move to %%post generation of dracut initrd, because of GPL issues surrounding + shipping a prebuild initrd +- Require grubby >= 7.0.4-1, for %%post generation + +* Wed Sep 9 2009 Steve Dickson +- Updated the NFS4 pseudo root code to the latest release. + +* Wed Sep 09 2009 Justin M. Forbes +- Revert virtio_blk to rotational mode. (#509383) + +* Wed Sep 09 2009 Dave Airlie 2.6.31-0.219.rc9.git +- uggh lost nouveau bits in page flip + +* Wed Sep 09 2009 Dave Airlie 2.6.31-0.218.rc9.git2 +- fix r600 oops with page flip patch (#520766) + +* Wed Sep 09 2009 Ben Skeggs +- drm-nouveau.patch: fix display resume on pre-G8x chips + +* Wed Sep 09 2009 Ben Skeggs +- drm-nouveau.patch: add getparam to know using tile_flags is ok for scanout + +* Wed Sep 09 2009 Chuck Ebbert +- 2.6.31-rc9-git2 + +* Wed Sep 9 2009 Roland McGrath 2.6.31-0.214.rc9.git1 +- compile with -fno-var-tracking-assignments, work around gcc bug #521991 + +* Wed Sep 09 2009 Dave Airlie 2.6.31-0.213.rc9.git1 +- fix two bugs in r600 kms, fencing + mobile lvds + +* Tue Sep 08 2009 Ben Skeggs 2.6.31-0.212.rc9.git1 +- drm-nouveau.patch: fix ppc build + +* Tue Sep 08 2009 Ben Skeggs 2.6.31-0.211.rc9.git1 +- drm-nouveau.patch: more misc fixes + +* Tue Sep 08 2009 Dave Airlie 2.6.31-0.210.rc9.git1 +- drm-page-flip.patch: rebase again + +* Tue Sep 08 2009 Dave Airlie 2.6.31-0.209.rc9.git1 +- drm-next.patch: fix r600 signal interruption return value + +* Tue Sep 08 2009 Ben Skeggs 2.6.31-0.208.rc9.git1 +- drm-nouveau.patch: latest upstream + rebase onto drm-next + +* Tue Sep 08 2009 Dave Airlie 2.6.31-0.207.rc9.git1 +- drm-vga-arb.patch: update to avoid lockdep + add r600 support + +* Tue Sep 08 2009 Dave Airlie 2.6.31-0.206.rc9.git1 +- drm: rebase to drm-next - r600 accel + kms should start working now + +* Mon Sep 07 2009 Chuck Ebbert 2.6.31-0.205.rc9.git1 +- 2.6.31-rc9-git1 +- Temporarily hack the drm-next patch so it still applies; the result + should still be safe to build. + +* Sat Sep 05 2009 Chuck Ebbert 2.6.31-0.204.rc9 +- 2.6.31-rc9 + +* Fri Sep 04 2009 Chuck Ebbert 2.6.31-0.203.rc8.git2 +- Fix kernel build errors when building firmware by removing the + .config file before that step and restoring it afterward. + +* Thu Sep 03 2009 Adam Jackson +- drm-ddc-caching-bug.patch: Empty the connector's mode list when it's + disconnected. + +* Thu Sep 03 2009 Jarod Wilson +- Update hdpvr and lirc_zilog drivers for 2.6.31 i2c + +* Thu Sep 03 2009 Justin M.Forbes +- Fix xen guest with stack protector. (#508120) +- Small kvm fixes. + +* Wed Sep 02 2009 Adam Jackson 2.6.31-0.199.rc8.git2 +- drm-intel-pm.patch: Disable by default, too flickery on too many machines. + Enable with i915.powersave=1. + +* Wed Sep 02 2009 Dave Jones +- Add missing scriptlet dependancy. (#520788) + +* Tue Sep 01 2009 Adam Jackson +- Make DRM less chatty about EDID failures. No one cares. + +* Tue Sep 01 2009 Chuck Ebbert +- 2.6.31-rc8-git2 +- Blank out drm-intel-next: entire contents are now upstream. + +* Tue Sep 01 2009 Dave Jones +- Make firmware buildarch noarch. (Suggested by drago01 on irc) + +* Tue Sep 01 2009 Jarod Wilson +- Fix up lirc_zilog to enable functional IR transmit and receive + on the Hauppauge HD PVR +- Fix audio on PVR-500 when used in same system as HVR-1800 (#480728) + +* Sun Aug 30 2009 Chuck Ebbert +- 2.6.31-rc8-git1 +- Drop linux-2.6-inotify-accounting.patch, merged upstream. + +* Sun Aug 30 2009 Jarod Wilson +- fix lirc_imon oops on older devices w/o tx ctrl ep (#520008) + +* Fri Aug 28 2009 Eric Paris 2.6.31-0.190.rc8 +- fix inotify length accounting and send inotify events + +* Fri Aug 28 2009 David Woodhouse +- Enable Solos DSL driver + +* Fri Aug 28 2009 Chuck Ebbert +- 2.6.31-rc8 + +* Thu Aug 27 2009 Chuck Ebbert 2.6.31-0.185.rc7.git6 +- 2.6.31-rc7-git6 +- Drop patch merged upstream: + xen-fb-probe-fix.patch + +* Thu Aug 27 2009 Adam Jackson +- drm-rv710-ucode-fix.patch: Treat successful microcode load on RV710 as, + you know, success. (#519718) + +* Thu Aug 27 2009 Chuck Ebbert +- 2.6.31-rc7-git5 +- Drop patch linux-2.6-ima-leak.patch, now merged upstream. + +* Wed Aug 26 2009 Jarod Wilson +- Fix up hdpvr ir enable patch for use w/modular i2c (David Engel) + +* Wed Aug 26 2009 Eric Paris +- fix iint_cache leak in IMA code + drop the ima=0 patch + +* Wed Aug 26 2009 Justin M. Forbes +- Fix munlock with KSM (#516909) +- Re-enable KSM + +* Wed Aug 26 2009 Chuck Ebbert +- 2.6.31-rc7-git4 +- Drop patches merged upstream: + xen-x86-fix-stackprotect.patch + xen-x86-no-stackprotect.patch + +* Wed Aug 26 2009 Adam Jackson +- drm-intel-next.patch: Update, various output setup fixes. + +* Wed Aug 26 2009 David Woodhouse +- Make WiMAX modular (#512070) + +* Tue Aug 25 2009 Kyle McMartin +- allow-disabling-ima.diff: debugging patch... adds ima=0 kernel + param to disable initialization of IMA. + +* Tue Aug 25 2009 Ben Skeggs 2.6.31-0.174.rc7.git2 +- drm-nouveau.patch: upstream update, pre-nv50 tv-out + misc fixes + +* Tue Aug 25 2009 Chuck Ebbert 2.6.31-0.173.rc7.git2 +- Fix Xen boot (#508120) + +* Tue Aug 25 2009 Dave Airlie +- pull in drm-next tree + rebase around it + +* Mon Aug 24 2009 Chuck Ebbert +- 2.6.31-rc7-git2 + +* Mon Aug 24 2009 Chuck Ebbert +- 2.6.31-rc7-git1 + +* Sat Aug 22 2009 Chuck Ebbert +- 2.6.31-rc7 + +* Thu Aug 20 2009 Mark McLoughlin +- Disable LZMA for xen (#515831) + +* Thu Aug 20 2009 Chuck Ebbert +- 2.6.31-rc6-git5 +- Fix up drm-r600-kms.patch +- Drop fix-perf-make-man-failure.patch + +* Wed Aug 19 2009 Chuck Ebbert +- 2.6.31-rc6-git5 +- Revert linux-2.6-debug-vm-would-have-oomkilled.patch to v1.2 + because upstream changes to oom-kill.c were all reverted. + +* Tue Aug 18 2009 Kyle McMartin +- Fix up perf so that it builds docs now that they are fixed. +- with_docs disables perf docs too. be warned. (logic is that the + build deps are (mostly) the same, so if you don't want one, odds are...) + +* Tue Aug 18 2009 Dave Jones +- 2.6.31-rc6-git3 + +* Mon Aug 17 2009 Dave Jones 2.6.31-0.161.rc6.git2 +- 2.6.31-rc6-git2 + +* Mon Aug 17 2009 Chuck Ebbert +- Stop generating the (unused) ppc64-kdump.config file. + +* Mon Aug 17 2009 Jarod Wilson +- Add new lirc driver for built-in ENE0100 device on some laptops + +* Sun Aug 16 2009 Kyle McMartin 2.6.31-0.158.rc6 +- Improve the perf script so it prints something helpful if the + perf binary doesn't exist. + +* Sat Aug 15 2009 Dave Jones 2.6.31-0.157.rc6 +- Disable KSM patches on a hunch. Chasing the "encrypted VGs don't work" bug. + +* Fri Aug 14 2009 Dave Jones 2.6.31-0.155.rc6 +- 2.6.31-rc6 + +* Wed Aug 12 2009 Kyle McMartin +- fix perf. +- move perf to perf.$ver instead of perf-$ver... + +* Wed Aug 12 2009 Dennis Gilmore +- Obsolete kernel-smp on sparc64 +- Require grubby >= 7.0.2-1 since thats what introduces the dracut options we use + +* Wed Aug 12 2009 Kristian Høgsberg +- Fix drm-page-flip.patch to not break radeon kms and to not reset + crtc offset into fb on flip. + +* Wed Aug 12 2009 Adam Jackson +- Update drm-intel-next patch + +* Tue Aug 11 2009 Dennis Gilmore - 2.6.31-0.149.rc5.git3 +- disable building the -smp kernel on sparc64 +- disable building kernel-perf on sparc64 syscalls not supported + +* Tue Aug 11 2009 Eric Paris +- Enable config IMA + +* Tue Aug 11 2009 Ben Skeggs +- nouveau: various cleanups and fixes + more sanity checking in dma paths + +* Mon Aug 10 2009 Jarod Wilson +- Add new device ID to lirc_mceusb (#512483) +- Fix some lockdep false positives +- Add support for setting and enabling iMON clock via sysfs +- Add tunable pad threshold support to lirc_imon +- Add new pseudo-IR protocl to lirc_imon for universals w/o a pad +- Fix mouse device support on older iMON devices + +* Mon Aug 10 2009 David Woodhouse 2.6.31-0.145.rc5.git3 +- Merge latest Intel IOMMU fixes and BIOS workarounds, re-enable by default. + +* Sun Aug 09 2009 Kyle McMartin +- btusb autosuspend: fix build on !CONFIG_PM by stubbing out + suspend/resume methods. + +* Sat Aug 08 2009 Dennis Gilmore 2.6.31-0.141.rc5.git3 +- disable kgdb on sparc64 uni-processor kernel +- set max cpus to 256 on sparc64 +- enable AT keyboard on sparc64 + +* Fri Aug 07 2009 Justin M. Forbes +- Apply KSM updates from upstream + +* Fri Aug 07 2009 Hans de Goede +- When building a dracut generic initrd tell new-kernel-pkg to use that + instead of running mkinitrd + +* Fri Aug 07 2009 Dave Airlie 2.6.31-0.139.rc5.git3 +- drm-r600-kms.patch - update r600 KMS +- drm-radeon-fixes.patch - patches for queue to Linus + +* Thu Aug 06 2009 Justin M. Forbes 2.6.31-0.138.rc5.git3 +- Fix kvm virtio_blk errors (#514901) + +* Thu Aug 06 2009 Adam Jackson +- Hush DRM vblank warnings, they're constant (and harmless) under DRI2. + +* Thu Aug 06 2009 Dave Airlie 2.6.31.0.134.rc5.git3 +- fixup vga arb warning at startup and handover between gpus + +* Thu Aug 06 2009 Kyle McMartin 2.6.31.0.133.rc5.git3 +- die-floppy-die.patch: it's the 21st century, let's not rely on + steam powered technology. + +* Wed Aug 05 2009 Dave Airlie 2.6.31.0.132.rc5.git3 +- revert-ftrace-powerpc-snafu.patch - fix ppc build + +* Wed Aug 05 2009 Ben Skeggs +- nouveau: respect nomodeset + +* Wed Aug 05 2009 Chuck Ebbert +- Fix /usr/sbin/perf script. (#515494) + +* Wed Aug 05 2009 Dave Jones +- Fix shift in pci cacheline size printk. + +* Wed Aug 05 2009 Dave Airlie 2.6.31.0.128.rc5.git3 +- 2.6.31-rc5-git3 +- drop cpufreq + set memory fixes + +* Wed Aug 05 2009 Dave Airlie +- Add Jeromes initial r600 kms work. +- rebase arb patch + +* Tue Aug 04 2009 Kyle McMartin +- alsa-tell-user-that-stream-to-be-rewound-is-suspended.patch: apply patch + destined for 2.6.32, requested by Lennart. + +* Tue Aug 04 2009 Ben Skeggs +- nouveau: more code share between nv50/ +- update VGA arb patches again + +* Mon Aug 03 2009 Adam Jackson +- Update intel drm from anholt's tree +- Rebase drm-intel-pm.patch to match +- Drop gen3 fb hack, merged +- Drop previous watermark setup change + +* Mon Aug 03 2009 Dave Jones 2.6.31-0.122.rc5.git2 +- 2.6.31-rc5-git2 + +* Mon Aug 03 2009 Adam Jackson +- (Attempt to) fix watermark setup on Intel 9xx parts. + +* Mon Aug 03 2009 Jarod Wilson +- make usbhid driver ignore all recent SoundGraph iMON devices, so the + lirc_imon driver can grab them instead + +* Mon Aug 03 2009 Dave Airlie +- update VGA arb patches + +* Sat Aug 01 2009 David Woodhouse 2.6.31-0.118.rc5 +- Fix boot failures on ppc32 (#514010, #505071) + +* Fri Jul 31 2009 Kyle McMartin 2.6.31-0.117.rc5 +- Linux 2.6.31-rc5 + +* Fri Jul 31 2009 Matthew Garrett +- linux-2.6-dell-laptop-rfkill-fix.patch: Fix up Dell rfkill + +* Fri Jul 31 2009 Ben Skeggs +- nouveau: build against 2.6.31-rc4-git6, fix script parsing on some G8x chips + +* Thu Jul 30 2009 Chuck Ebbert +- Linux 2.6.31-rc4-git6 + New config item: CONFIG_BATTERY_DS2782 is not set +- Add last-minute set_memory_wc() fix from LKML. + +* Thu Jul 30 2009 Matthew Garrett +- drm-intel-pm.patch: Don't reclock external outputs. Increase the reduced + clock slightly to avoid upsetting some hardware. Disable renderclock + adjustment for the moment - it's breaking on some hardware. + +* Thu Jul 30 2009 Ben Skeggs +- nouveau: another DCB 1.5 entry, G80 corruption fixes, small +- fix VGA ARB + kms + +* Wed Jul 29 2009 Dave Jones +- Add support for dracut. (Harald Hoyer) + +* Wed Jul 29 2009 Ben Skeggs +- drm-nouveau.patch: nv50/nva0 tiled scanout fixes, nv40 kms fixes + +* Wed Jul 29 2009 Chuck Ebbert +- Linux 2.6.31-rc4-git3 +- Drop linux-2.6-ecryptfs-overflow-fixes.patch, merged upstream now. + +* Wed Jul 29 2009 Dave Airlie +- update VGA arb patches + +* Tue Jul 28 2009 Adam Jackson +- Remove the pcspkr modalias. If you're still living in 1994, load it + by hand. + +* Tue Jul 28 2009 Eric Sandeen 2.6.31-0.102.rc4.git2 +- Fix eCryptfs overflow issues (CVE-2009-2406, CVE-2009-2407) + +* Tue Jul 28 2009 Kyle McMartin 2.6.31-0.101.rc4.git2 +- 2.6.31-rc4-git2 +- rebase linux-2.6-fix-usb-serial-autosuspend.diff +- config changes: + - USB_GSPCA_SN9C20X=m (_EVDEV=y) + +* Tue Jul 28 2009 Ben Skeggs +- drm-nouveau.patch: cleanup userspace API, various bugfixes. + Looks worse than it is, register macros got cleaned up, which + touches pretty much everywhere.. + +* Mon Jul 27 2009 Adam Jackson +- Warn quieter about not finding PCI bus parents for ROM BARs, they're + not usually needed and there's nothing you can do about it anyway. + +* Mon Jul 27 2009 Matthew Garrett +- linux-2.6-alsa-improve-hda-powerdown.patch - attempt to reduce audio glitches + caused by HDA powerdown +- disable CONFIG_DEBUG_KOBJECT again for now, since it produces huge dmesg spew + +* Mon Jul 27 2009 Dave Airlie +- update vga arb code + +* Mon Jul 27 2009 Matthew Garrett +- drm-intel-pm.patch - Add runtime PM for Intel graphics + +* Fri Jul 24 2009 Kristian Høgsberg +- Add drm-page-flip.patch to support vsynced page flipping on intel + chipsets. +- Really add patch. +- Fix patch to not break nouveau. + +* Fri Jul 24 2009 Chuck Ebbert +- Enable CONFIG_DEBUG_KOBJECT in debug kernels. (#513606) + +* Thu Jul 23 2009 Kyle McMartin +- perf BuildRequires binutils-devel now. + +* Thu Jul 23 2009 Justin M. Forbes +- Add KSM support + +* Thu Jul 23 2009 Kyle McMartin 2.6.31-0.87.rc4 +- Linux 2.6.31-rc4 +- config changes: + - USB_CDC_PHONET=m [all] + - EVENT_PROFILE=y [i386, x86_64, powerpc, s390] + +* Wed Jul 22 2009 Tom "spot" Callaway +- We have to override the new %%install behavior because, well... the kernel is special. + +* Wed Jul 22 2009 Dave Jones +- 2.6.31-rc3-git5 + +* Wed Jul 22 2009 Ben Skeggs 2.6.31-0.82.rc3.git4 +- Enable KMS for nouveau + +* Wed Jul 22 2009 Ben Skeggs +- Update nouveau from upstream (initial suspend/resume + misc bugfixes) + +* Mon Jul 20 2009 Adam Jackson +- Disable VGA arbiter patches for a moment + +* Mon Jul 20 2009 Adam Jackson +- Revive 4k framebuffers for intel gen3 + +* Mon Jul 20 2009 Dave Jones 2.6.31-0.78.rc3.git4 +- Enable CONFIG_RTC_HCTOSYS (#489494) + +* Mon Jul 20 2009 Dave Jones 2.6.31-0.77.rc3.git4 +- Don't build 586 kernels any more. + +* Sun Jul 19 2009 Dave Jones 2.6.31-0.75.rc3.git4 +- build a 'full' package on i686 (Bill Nottingham) + +* Sun Jul 19 2009 Dave Jones 2.6.31-0.74.rc3.git4 +- 2.6.31-rc3-git4 + +* Sat Jul 18 2009 Matthew Garrett +- linux-2.6-driver-level-usb-autosuspend.diff - allow drivers to enable autopm +- linux-2.6-fix-usb-serial-autosuspend.diff - fix generic usb-serial autopm +- linux-2.6-qcserial-autosuspend.diff - enable autopm by default on qcserial +- linux-2.6-bluetooth-autosuspend.diff - enable autopm by default on btusb +- linux-2.6-usb-uvc-autosuspend.diff - enable autopm by default on uvc + +* Thu Jul 16 2009 Chuck Ebbert +- 2.6.31-rc3-git3 + +* Thu Jul 16 2009 Matthew Garrett +- linux-2.6-defaults-aspm.patch - default ASPM to on for PCIe >= 1.1 hardware + +* Thu Jul 16 2009 Dave Airlie 2.6.31-0.69.rc3 +- linux-2.6-vga-arb.patch - add VGA arbiter. +- drm-vga-arb.patch - add VGA arbiter support to drm + +* Tue Jul 14 2009 Kyle McMartin 2.6.31-0.68-rc3 +- 2.6.31-rc3 +- config changes: + - RTL8192SU is not set, (staging) + +* Mon Jul 13 2009 Kyle McMartin 2.6.31-0.67.rc2.git9 +- 2.6.31-rc2-git9 +- config changes: + - BLK_DEV_OSD=m + +* Mon Jul 13 2009 Ben Skeggs +- drm-nouveau.patch: update from upstream + +* Fri Jul 10 2009 Chuck Ebbert +- 2.6.31-rc2-git6 +- Drop dmadebug-spinlock patch -- merged upstream. + +* Fri Jul 10 2009 Dave Jones 2.6.31-0.64.rc2.git5 +- Don't jump through hoops that ppc powerbooks have to on sensible systems + in cpufreq_suspend. + +* Fri Jul 10 2009 Dave Jones +- 2.6.31-rc2-git5 + +* Thu Jul 09 2009 Dave Jones 2.6.31-0.62.rc2.git4 +- Use correct spinlock initialization in dma-debug + +* Thu Jul 09 2009 Chuck Ebbert 2.6.31-0.61.rc2.git4 +- 2.6.31-rc2-git4 + +* Thu Jul 09 2009 Jarod Wilson +- Enable IR receiver on the Hauppauge HD PVR +- Trim the changelog, axing everything before 2.6.29 (see cvs + if you still really want to see that far back) + +* Wed Jul 08 2009 Dave Jones +- Enable a bunch of debugging options that were missed somehow. + +* Wed Jul 08 2009 Kyle McMartin +- Bump NR_CPUS on x86_64 to 512. + +* Wed Jul 08 2009 Adam Jackson +- drm-no-gem-on-i8xx.patch: Drop, intel 2D driver requires GEM now. This + should be entertaining. + +* Wed Jul 08 2009 Kyle McMartin +- First cut of /usr/sbin/perf wrapper script and 'perf' + subpackage. + +* Wed Jul 08 2009 Kyle McMartin 2.6.31-0.54.rc2.git2 +- Rebase and re-apply all the Fedora-specific linux-2.6-debug-* + patches. +- Cull a bunch of upstreamed patches from the spec. + +* Wed Jul 08 2009 Steve Dickson +- Added NFSD v4 dynamic pseudo root patch which allows + NFS v3 exports to be mounted by v4 clients. + +* Tue Jul 07 2009 Jarod Wilson +- See if we can't make lirc_streamzap behave better... (#508952) + +* Tue Jul 07 2009 Chuck Ebbert 2.6.31-0.47.rc2.git2 +- 2.6.31-rc2-git2 + +* Tue Jul 07 2009 Jarod Wilson +- Make lirc_i2c actually work with 2.6.31 i2c + +* Mon Jul 06 2009 Chuck Ebbert +- Use LZMA for kernel compression on X86. + +* Mon Jul 06 2009 Jarod Wilson +- Hack up lirc_i2c and lirc_zilog to compile with 2.6.31 i2c + changes. The drivers might not actually be functional now, but + at least they compile again. Will fix later, if need be... + +* Sat Jul 04 2009 Dave Jones 2.6.31-0.42.rc2 +- 2.6.31-rc2 + +* Sat Jul 04 2009 Chuck Ebbert +- 2.6.31-rc1-git11 + +* Fri Jul 03 2009 Hans de Goede +- Disable v4l1 ov511 and quickcam_messenger drivers (obsoleted by + v4l2 gspca subdrivers) + +* Thu Jul 02 2009 Kyle McMartin 2.6.31-0.39.rc1.git9 +- 2.6.31-rc1-git9 +- linux-2.6-dm-fix-exstore-search.patch: similar patch merged upstream. + +* Tue Jun 30 2009 Chuck Ebbert 2.6.31-0.38.rc1.git7 +- 2.6.31-rc1-git7 + +* Tue Jun 30 2009 Dave Jones 2.6.31-0.37.rc1.git5 +- Disable kmemleak. Way too noisy, and not finding any real bugs. + +* Tue Jun 30 2009 Ben Skeggs +- drm-nouveau.patch: match upstream + +* Mon Jun 29 2009 Chuck Ebbert 2.6.31-0.35.rc1.git5 +- 2.6.31-rc1-git5 +- CONFIG_LEDS_LP3944=m + +* Mon Jun 29 2009 Chuck Ebbert +- Try to fix the dm overlay bug for real (#505121) + +* Sat Jun 27 2009 Ben Skeggs 2.6.31-0.33.rc1.git2 +- drm-nouveau.patch: fix conflicts from 2.6.31-rc1-git2 + +* Fri Jun 26 2009 Dave Jones 2.6.31-0.31.rc1.git2 +- Further improvements to kmemleak + +* Fri Jun 26 2009 Dave Jones 2.6.31-0.30.rc1.git2 +- 2.6.31-rc1-git2 + +* Fri Jun 26 2009 Ben Skeggs +- drm-nouveau.patch: latest upstream + reenable + +* Thu Jun 25 2009 Dave Jones 2.6.31-0.29.rc1 +- Make kmemleak scan process stacks by default. + Should reduce false positives (which does also increase false negatives, + but that's at least less noisy) + +* Wed Jun 24 2009 Kyle McMartin 2.6.31-0.28.rc1 +- 2.6.31-rc1 +- linux-2.6-utrace.patch: rebase on kernel/Makefile changes +- config changes: + - generic: + - CONFIG_DM_LOG_USERSPACE=m + - CONFIG_DM_MULTIPATH_QL=m + - CONFIG_DM_MULTIPATH_ST=m + - CONFIG_BATTERY_MAX17040=m + - CONFIG_I2C_DESIGNWARE is off (depends on clk.h) + +* Wed Jun 24 2009 Kyle McMartin +- Move perf to /usr/libexec/perf-$KernelVer. + +* Wed Jun 24 2009 Kyle McMartin +- config changes: + - generic: + - CONFIG_SCSI_DEBUG=m (was off, requested by davidz) + +* Wed Jun 24 2009 Dave Jones 2.6.31-0.22.rc0.git22 +- 2.6.30-git22 + +* Tue Jun 23 2009 Dave Jones 2.6.31-0.22.rc0.git20 +- 2.6.30-git20 + +* Mon Jun 22 2009 Kyle McMartin 2.6.31-0.24.rc0.git18 +- Enable tools/perf, installed as /bin/perf-$KernelVer. Docs and a /bin/perf + wrapper come next if this builds ok. + +* Mon Jun 22 2009 Kyle McMartin +- sched-introduce-SCHED_RESET_ON_FORK-scheduling-policy-flag.patch: pull in + two fixes from Mike Galbraith from tip.git + +* Sun Jun 21 2009 Dave Jones 2.6.31-0.21.rc0.git18 +- Add patch to possibly fix the pktlen problem on via-velocity. + +* Sun Jun 21 2009 Dave Jones 2.6.31-0.20.rc0.git18 +- 2.6.30-git18 + VIA crypto & mmc patches now upstream. + +* Sun Jun 21 2009 Dave Jones +- Determine cacheline sizes in a generic manner. + +* Sun Jun 21 2009 Chuck Ebbert 2.6.31-0.18.rc0.git17 +- 2.6.30-git17 +- Config changes: + - powerpc32-generic + CONFIG_PERF_COUNTERS=y + - generic + CONFIG_KEYBOARD_LM8323 is not set + CONFIG_MOUSE_SYNAPTICS_I2C=m + CONFIG_TOUCHSCREEN_EETI=m + CONFIG_TOUCHSCREEN_W90X900=m +- Dropped agp-set_memory_ucwb.patch, all fixed upstream now. + +* Sat Jun 20 2009 Kyle McMartin 2.6.31.0.17.rc0.git15 +- config changes: + - ppc generic: + - CONFIG_PPC_DISABLE_WERROR=y (switched... chrp fails otherwise, stack + frame size.) + +* Sat Jun 20 2009 Kyle McMartin 2.6.31.0.16.rc0.git15 +- 2.6.30-git15 +- config changes: + - generic: + - CONFIG_LBDAF=y + - staging: + - CONFIG_USB_SERIAL_QUATECH2 is not set + - CONFIG_VT6655 is not set + - CONFIG_USB_CPC is not set + - CONFIG_RDC_17F3101X is not set + - CONFIG_FB_UDL is not set + - ppc32: + - CONFIG_KMETER1=y + - ppc generic: + - CONFIG_PPC_DISABLE_WERROR is not set +- lirc disabled due to i2c detach_client removal. + +* Sat Jun 20 2009 Kyle McMartin +- sched-introduce-SCHED_RESET_ON_FORK-scheduling-policy-flag.patch: add, + queued in tip/sched/core (ca94c442535a44d508c99a77e54f21a59f4fc462) + +* Fri Jun 19 2009 Kyle McMartin 2.6.31.0.15.rc0.git14 +- Fix up ptrace, hopefully. Builds on x86_64 at least. + +* Fri Jun 19 2009 Chuck Ebbert +- linux-2.6-tip.git-203abd67b75f7714ce98ab0cdbd6cfd7ad79dec4.patch + Fixes oops on boot with qemu (#507007) + +* Fri Jun 19 2009 Kyle McMartin 2.6.31-0.13.rc0.git14 +- 2.6.30-git14 + +* Fri Jun 19 2009 Chuck Ebbert +- Fix up the via-sdmmc and via-hwmon-temp-sensor patches. +- Drop VIA Padlock patches merged upstream: + via-rng-enable-64bit.patch + via-padlock-10-enable-64bit.patch + via-padlock-20-add-x86-dependency.patch + +* Thu Jun 18 2009 Kyle McMartin 2.6.31-0.11.rc0.git13 +- 2.6.30-git13 +- config changes: + - arm: + - CONFIG_UACCESS_WITH_MEMCPY is not set + - i686-PAE: + - CONFIG_XEN_DEV_EVTCHN=m + - CONFIG_XEN_SYS_HYPERVISOR=y + - ia64: + - CONFIG_RCU_FANOUT=64 + - nodebug: + - CONFIG_DEBUG_KMEMLEAK is not set + - CONFIG_DEBUG_KMEMLEAK_TEST=m + - powerpc: + - CONFIG_CAN_SJA1000_OF_PLATFORM=m + - CONFIG_PPC_EMULATED_STATS=y + - CONFIG_SWIOTLB=y + - CONFIG_RDS is not set (broken on ppc32) + - powerpc32: + - CONFIG_RCU_FANOUT=32 + - powerpc64: + - CONFIG_RCU_FANOUT=64 + - CONFIG_PERF_COUNTERS=y + - s390x: + - CONFIG_RCU_FANOUT=64 + - CONFIG_SECCOMP=y + - CONFIG_PM=y + - CONFIG_HIBERNATION=y + - CONFIG_PM_STD_PARTITION="/dev/jokes" + - sparc64: + - CONFIG_RCU_FANOUT=64 + - x86: + - CONFIG_RCU_FANOUT=32 + - CONFIG_IOMMU_STRESS is not set + - CONFIG_PERF_COUNTERS=y + - CONFIG_X86_OLD_MCE is not set + - CONFIG_X86_MCE_INTEL=y + - CONFIG_X86_MCE_AMD=y + - CONFIG_X86_ANCIENT_MCE is not set + - CONFIG_X86_MCE_INJECT is not set + - x86_64: + - CONFIG_EDAC_AMD64=m + - CONFIG_EDAC_AMD64_ERROR_INJECTION is not set + - CONFIG_XEN_DEV_EVTCHN=m + - CONFIG_XEN_SYS_HYPERVISOR=y + - CONFIG_RCU_FANOUT=64 + - CONFIG_IOMMU_STRESS is not set + - CONFIG_PERF_COUNTERS=y + - CONFIG_X86_MCE_INJECT is not set + - generic: + - CONFIG_RCU_FANOUT=32 + - CONFIG_MMC_SDHCI_PLTFM=m + - CONFIG_MMC_CB710=m + - CONFIG_CB710_CORE=m + - CONFIG_CB710_DEBUG is not set + - CONFIG_SCSI_MVSAS_DEBUG is not set + - CONFIG_SCSI_BNX2_ISCSI=m + - CONFIG_NETFILTER_XT_MATCH_OSF=m + - CONFIG_RFKILL_INPUT=y (used to be =m, which was invalid) + - CONFIG_DE2104X_DSL=0 + - CONFIG_KS8842 is not set + - CONFIG_CFG80211_DEBUGFS=y + - CONFIG_MAC80211_DEFAULT_PS=y + - CONFIG_IWM=m + - CONFIG_IWM_DEBUG is not set + - CONFIG_RT2800USB=m + - CONFIG_CAN_DEV=m + - CONFIG_CAN_CALC_BITTIMING=y + - CONFIG_CAN_SJA1000=m + - CONFIG_CAN_SJA1000_PLATFORM=m + - CONFIG_CAN_EMS_PCI=m + - CONFIG_CAN_KVASER_PCI=m + - CONFIG_EEPROM_MAX6875=m + - CONFIG_SENSORS_TMP401=m + - CONFIG_MEDIA_SUPPORT=m + - CONFIG_SND_CTXFI=m + - CONFIG_SND_LX6464ES=m + - CONFIG_SND_HDA_CODEC_CA0110=y + - CONFIG_USB_XHCI_HCD=m + - CONFIG_USB_XHCI_HCD_DEBUGGING is not set + - CONFIG_DRAGONRISE_FF=y (used to be =m) + - CONFIG_GREENASIA_FF=y (used to be =m) + - CONFIG_SMARTJOYPLUS_FF=y (used to be =m) + - CONFIG_USB_NET_INT51X1=m + - CONFIG_CUSE=m + - CONFIG_FUNCTION_PROFILER=y + - CONFIG_RING_BUFFER_BENCHMARK=m + - CONFIG_REGULATOR_USERSPACE_CONSUMER=m + - CONFIG_REGULATOR_MAX1586=m + - CONFIG_REGULATOR_LP3971=m + - CONFIG_RCU_FANOUT_EXACT is not set + - CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 + - CONFIG_FSNOTIFY=y + - CONFIG_IEEE802154=m + - CONFIG_IEEE802154_DRIVERS=m + - CONFIG_IEEE802154_FAKEHARD=m + - CONFIG_CNIC=m + +* Wed Jun 17 2009 Jarod Wilson +- New lirc_imon hotness, update 2: + * support dual-interface devices with a single lirc device + * directional pad functions as an input device mouse + * touchscreen devices finally properly supported + * support for using MCE/RC-6 protocol remotes + * fix oops in RF remote association code (F10 bug #475496) + * fix re-enabling case/panel buttons and/or knobs +- Add some misc additional lirc_mceusb2 transceiver IDs +- Add missing unregister_chrdev_region() call to lirc_dev exit +- Add it8720 support to lirc_it87 + +* Tue Jun 16 2009 Chuck Ebbert +- Update via-sdmmc driver + +* Mon Jun 15 2009 Jarod Wilson +- Update lirc patches w/new imon hotness + +* Fri Jun 12 2009 Chuck Ebbert +- Update VIA temp sensor and mmc drivers. + +* Fri Jun 12 2009 John W. Linville 2.6.30-6 +- neigh: fix state transition INCOMPLETE->FAILED via Netlink request +- enable CONFIG_ARPD (used by OpenNHRP) + +* Wed Jun 10 2009 Chuck Ebbert +- VIA Nano updates: + Enable Padlock AES encryption and random number generator on x86-64 + Add via-sdmmc and via-cputemp drivers + +* Wed Jun 10 2009 Kyle McMartin 2.6.30-1 +- Linux 2.6.30 rebase. + +* Tue Jun 09 2009 John W. Linville +- Clean-up some wireless bits in config-generic + +* Tue Jun 09 2009 Chuck Ebbert +- Add support for ACPI P-states on VIA processors. +- Disable the e_powersaver driver. + +* Tue Jun 09 2009 Chuck Ebbert +- Linux 2.6.30-rc8-git6 + +* Fri Jun 05 2009 Chuck Ebbert +- Linux 2.6.30-rc8-git1 + +* Wed Jun 03 2009 Kyle McMartin +- Linux 2.6.30-rc8 + +* Tue Jun 2 2009 Roland McGrath +- utrace update (fixes stap PR10185) + +* Tue Jun 02 2009 Dave Jones +- For reasons unknown, RT2X00 driver was being built-in. + Make it modular. + +* Tue Jun 02 2009 Dave Jones +- 2.6.30-rc7-git5 + +* Sat May 30 2009 Dave Jones +- 2.6.30-rc7-git4 + +* Thu May 28 2009 Dave Jones +- 2.6.30-rc7-git2 + +* Tue May 26 2009 Dave Jones +- Various cpufreq patches from git. + +* Tue May 26 2009 Dave Jones +- 2.6.30-rc7-git1 + +* Mon May 25 2009 Kyle McMartin +- rds-only-on-64-bit-or-x86.patch: drop patch, issue is fixed upstream. + +* Sat May 23 2009 Dave Jones +- 2.6.30-rc7 + +* Thu May 21 2009 Dave Jones +- 2.6.30-rc6-git6 + +* Wed May 20 2009 Chuck Ebbert +- Enable Divas (formerly Eicon) ISDN drivers on x86_64. (#480837) + +* Wed May 20 2009 Dave Jones +- 2.6.30-rc6-git5 + +* Mon May 18 2009 Dave Jones +- 2.6.30-rc6-git3 + +* Sun May 17 2009 Dave Jones +- 2.6.30-rc6-git2 + +* Sat May 16 2009 Dave Jones +- 2.6.30-rc6 + +* Mon May 11 2009 Kyle McMartin +- Linux 2.6.30-rc5-git1 + +* Fri May 08 2009 Kyle McMartin +- Linux 2.6.30-rc5 + +* Fri May 08 2009 Kyle McMartin +- Linux 2.6.30-rc4-git4 + +* Wed May 06 2009 Kyle McMartin +- Linux 2.6.30-rc4-git3 +- linux-2.6-cdrom-door-status.patch: merged upstream. +- linux-2.6-iwl3945-remove-useless-exports.patch: merged upstream. +- linux-2.6-utrace.patch: rebase against changes to fs/proc/array.c +- USB_NET_CDC_EEM=m + +* Fri May 01 2009 Eric Sandeen +- Fix ext4 corruption on partial write into prealloc block + +* Thu Apr 30 2009 Kyle McMartin +- 2.6.30-rc4 + +* Wed Apr 29 2009 Dave Jones +- 2.6.30-rc3-git6 + +* Tue Apr 28 2009 Dave Jones +- 2.6.30-rc3-git4 + +* Tue Apr 28 2009 Chuck Ebbert +- Make the kernel-vanilla package buildable again. +- Allow building with older versions of RPM. + +* Tue Apr 28 2009 Neil Horman +- Backport missing snmp stats (bz 492391) + +* Tue Apr 28 2009 Chuck Ebbert 2.6.30-0.72.rc3.git3 +- Drop unused exports from the iwl3945 driver. + +* Tue Apr 28 2009 Chuck Ebbert +- Linux 2.6.30-rc3-git3 + +* Mon Apr 27 2009 Dave Jones +- 2.6.30-rc3-git2 + +* Sun Apr 26 2009 Chuck Ebbert 2.6.30-0.68.rc3.git1 +- Linux 2.6.30-rc3-git1 + +* Wed Apr 22 2009 Dave Jones 2.6.30-0.67.rc3 +- Disable SYSFS_DEPRECATED on ia64 + +* Wed Apr 22 2009 Kyle McMartin +- Linux 2.6.30-rc3 +- PROC_VMCORE=y: Exports the dump image of crashed + kernel in ELF format + +* Wed Apr 22 2009 Neil Horman +- Enable RELOCATABLE and CRASH_DUMP for powerpc64 +- With this we can remove the -kdump build variant +- for the ppc64 arch + +* Tue Apr 21 2009 Chuck Ebbert +- Don't include the modules.*.bin files in the RPM package. + +* Tue Apr 21 2009 Dave Jones +- 2.6.30-rc2-git7 + +* Mon Apr 20 2009 Dave Jones +- Various s390x config tweaks. (#496596, #496601, #496605, #496607) + +* Mon Apr 20 2009 Dave Jones +- 2.6.30-rc2-git6 + +* Sat Apr 18 2009 Chuck Ebbert +- Set CONFIG_UEVENT_HELPER_PATH to the empty string (#496296) + +* Fri Apr 17 2009 Dave Jones +- 2.6.30-rc2-git3 + +* Thu Apr 16 2009 Kyle McMartin 2.6.30-0.58.rc2.git1 +- 2.6.30-rc2-git1 + +* Wed Apr 15 2009 Kyle McMartin 2.6.30-0.57.rc2 +- 2.6.30-rc2 + +* Tue Apr 14 2009 Kyle McMartin +- 2.6.30-rc1-git7 +- CONFIG_TOUCHSCREEN_AD7879_I2C=m +- CONFIG_STRIP_ASM_SYMS=y, off for -debug + +* Mon Apr 13 2009 Kyle McMartin +- ppc-fix-parport_pc.patch: add from linuxppc-dev@ + +* Mon Apr 13 2009 Kyle McMartin +- execshield: fix build (load_user_cs_desc is 32-bit only in tlb.c) + +* Sun Apr 12 2009 Kyle McMartin +- 2.6.30-rc1-git5 +- revert-fix-modules_install-via-nfs.patch: reverted upstream + +* Thu Apr 09 2009 Kyle McMartin +- actually drop utrace-ftrace from srpm. + +* Thu Apr 09 2009 Kyle McMartin +- 2.6.30-rc1-git2 +- CONFIG_IGBVF=m +- CONFIG_NETFILTER_XT_TARGET_LED=m + +* Thu Apr 09 2009 Dave Jones +- Bring back the /dev/crash driver. (#492803) + +* Wed Apr 08 2009 Dave Jones +- disable MMIOTRACE in non-debug builds (#494584) + +* Wed Apr 08 2009 Kyle McMartin 2.6.30-0.44.rc1 +- 2.6.30-rc1 +- linux-2.6-hwmon-atk0110.patch: drop +- CONFIG_DETECT_HUNG_TASK=y +- # CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set + +* Tue Apr 7 2009 Roland McGrath +- utrace update, drop unfinished utrace-ftrace + +* Tue Apr 07 2009 Kyle McMartin +- Linux 2.6.29-git15 +- EXT3_DEFAULTS_TO_ORDERED on for now. +- X86_X2APIC enabled. +- LEDS_LP5521, LEDS_BD2802 off... look not generally relevant. +- LIBFCOE on. + +* Tue Apr 07 2009 Dave Jones +- Enable CONFIG_CIFS_STATS (#494545) + +* Mon Apr 06 2009 Kyle McMartin +- linux-2.6-execshield.patch: rebase for 2.6.30 + +* Mon Apr 06 2009 Kyle McMartin +- Linux 2.6.29-git13 +- drop patches merged upstream: + - fix-ppc-debug_kmap_atomic.patch + - fix-staging-at76.patch + - linux-2.6-acpi-video-didl-intel-outputs.patch + - linux-2.6-acpi-strict-resources.patch + - linux-2.6-sony-laptop-rfkill.patch + - linux-2.6-btrfs-fix-umount-hang.patch + - linux-2.6-fiemap-header-install.patch + - linux-2.6-debug-dma-api.patch + - dma-api-debug-fixes.patch + - linux-2.6-ext4-flush-on-close.patch + - linux-2.6-relatime-by-default.patch + - linux-2.6-pci-sysfs-remove-id.patch + - linux-2.6-scsi-cpqarray-set-master.patch + - alsa-rewrite-hw_ptr-updaters.patch + - alsa-pcm-always-reset-invalid-position.patch + - alsa-pcm-fix-delta-calc-at-overlap.patch + - alsa-pcm-safer-boundary-checks.patch + - linux-2.6-input-hid-extra-gamepad.patch + - linux-2.6-ipw2x00-age-scan-results-on-resume.patch + - linux-2.6-dropwatch-protocol.patch + - linux-2.6-net-fix-gro-bug.patch + - linux-2.6-net-fix-another-gro-bug.patch + - linux-2.6-net-xfrm-fix-spin-unlock.patch + - linux-2.6.29-pat-change-is_linear_pfn_mapping-to-not-use-vm_pgoff.patch + - linux-2.6.29-pat-pci-change-prot-for-inherit.patch + +* Thu Apr 02 2009 Josef Bacik +- linux-2.6-btrfs-fix-umount-hang.patch: fix umount hang on btrfs + +* Thu Apr 02 2009 Kyle McMartin +- fix-ppc-debug_kmap_atomic.patch: fix build failures on ppc. + +* Thu Apr 02 2009 Kyle McMartin +- Linux 2.6.29-git9 + +* Tue Mar 31 2009 Kyle McMartin +- rds-only-on-64-bit-or-x86.patch: add +- at76-netdev_ops.patch: add + +* Tue Mar 31 2009 Kyle McMartin +- Linux 2.6.29-git8 +- linux-2.6-net-fix-another-gro-bug.patch: upstream. + +* Tue Mar 31 2009 Eric Sandeen +- add fiemap.h to kernel-headers +- build ext4 (and jbd2 and crc16) into the kernel + +* Tue Mar 31 2009 Kyle McMartin +- Linux 2.6.29-git7 +- fix-staging-at76.patch: pull patch from linux-wireless to fix... + +* Mon Mar 30 2009 Kyle McMartin 2.6.30-0.28.rc0.git6 +- Linux 2.6.29-git6 +- Bunch of stuff disabled, most merged, some needs rebasing. + +* Mon Mar 30 2009 Chuck Ebbert +- Make the .shared-srctree file a list so more than two checkouts + can share source files. + +* Mon Mar 30 2009 Chuck Ebbert +- Separate PAT fixes that are headed for -stable from our out-of-tree ones. + +* Mon Mar 30 2009 Dave Jones +- Make io schedulers selectable at boot time again. (#492817) + +* Mon Mar 30 2009 Dave Jones +- Add a strict-devmem=0 boot argument (#492803) + +* Mon Mar 30 2009 Adam Jackson +- linux-2.6.29-pat-fixes.patch: Fix PAT/GTT interaction + +* Mon Mar 30 2009 Mauro Carvalho Chehab +- some fixes of troubles caused by v4l2 subdev conversion + +* Mon Mar 30 2009 Mark McLoughlin 2.6.29-21 +- Fix guest->remote network stall with virtio/GSO (#490266) + +* Mon Mar 30 2009 Ben Skeggs +- drm-nouveau.patch + - rewrite nouveau PCI(E) GART functions, should fix rh#492492 + - kms: kernel option to allow dual-link dvi + - modinfo descriptions for module parameters + +* Sun Mar 29 2009 Mauro Carvalho Chehab +- more v4l/dvb updates: v4l subdev conversion and some driver improvements + +* Sun Mar 29 2009 Chuck Ebbert +- More fixes for ALSA hardware pointer updating. + +* Sat Mar 28 2009 Mauro Carvalho Chehab +- linux-2.6-revert-dvb-net-kabi-change.patch: attempt to fix dvb net breakage +- update v4l fixes patch to reflect what's ready for 2.6.30 +- update v4l devel patch to reflect what will be kept on linux-next for a while + +* Fri Mar 27 2009 Chuck Ebbert 2.6.29-16 +- Fix 2.6.29 networking lockups. +- Fix locking in net/xfrm/xfrm_state.c (#489764) + +* Fri Mar 27 2009 Ben Skeggs +- drm-nouveau.patch: do nothing for dac_{prepare,commit}, it's useless + and breaks some things in strange ways. + +* Fri Mar 27 2009 Ben Skeggs +- nv50: clear 0x1900/8 on init, possible fix for rh#492240 +- forcibly disable GEM also if KMS requested where not supported +- inform the user if we disable KMS because of it not being supported + +* Thu Mar 26 2009 Matthew Garrett +- linux-2.6-relatime-by-default.patch: Backport relatime code from 2.6.30 + +* Thu Mar 26 2009 Dave Jones +- Check for modesetting enabled before forcing mode on 915. (#490336) + +* Thu Mar 26 2009 Dave Jones +- Set kernel-PAE as default in grub. (#487578) + +* Thu Mar 26 2009 Dave Jones +- Enable CONFIG_MOUSE_PS2_ELANTECH (#492163) + +* Thu Mar 26 2009 Kyle McMartin +- linux-2.6-v4l-pvrusb2-fixes.patch: fix build for uncle steve. + +* Thu Mar 26 2009 Mauro Carvalho Chehab +- Move all 2.6.30 stuff into linux-2.6-v4l-dvb-fixes.patch, in + preparation for upstream pull; +- Added two new drivers: gspca sq905c and DVB Intel ce6230 +- Updated to the latest v4l-dvb drivers. + +* Wed Mar 25 2009 Mauro Carvalho Chehab +- remove duplicated Cinergy T2 entry at config-generic + +* Wed Mar 25 2009 Neil Horman +- Add dropmonitor/dropwatch protocol from 2.6.30 + +* Wed Mar 25 2009 Kyle McMartin +- alsa-rewrite-hw_ptr-updaters.patch: snd_pcm_update_hw_ptr() tries to + detect the unexpected hwptr jumps more strictly to avoid the position + mess-up, which often results in the bad quality I/O with pulseaudio. + +* Wed Mar 25 2009 Ben Skeggs +- drm-nouveau.patch: idle channels better before destroying them + +* Tue Mar 24 2009 Kyle McMartin +- Disable DMAR by default until suspend & resume is fixed. + +* Tue Mar 24 2009 Josef Bacik +- fsync replay fixes for btrfs +* Mon Mar 23 2009 Dave Jones +- 2.6.29 ### # The following Emacs magic makes C-c C-e use UTC dates. diff --git a/kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch b/kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch index eefdda5..8638178 100644 --- a/kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch +++ b/kvm-mmu-fix-conflict-access-permissions-in-direct-sp.patch @@ -1,9 +1,7 @@ +From 567b1bbf982637ce3f0ac8597af91ae8106648c8 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong -Date: Wed, 30 Jun 2010 08:02:45 +0000 (+0800) -Subject: KVM: MMU: fix conflict access permissions in direct sp -X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=6aa0b9dec5d6dde26ea17b0b5be8fccfe19df3c9 - -KVM: MMU: fix conflict access permissions in direct sp +Date: Wed, 30 Jun 2010 16:02:45 +0800 +Subject: [PATCH] KVM: MMU: fix conflict access permissions in direct sp In no-direct mapping, we mark sp is 'direct' when we mapping the guest's larger page, but its access is encoded form upper page-struct @@ -34,12 +32,14 @@ Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- + arch/x86/kvm/paging_tmpl.h | 1 + + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h -index 89d66ca..2331bdc 100644 +index ede2131..b473c0f 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h -@@ -342,6 +342,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, +@@ -336,6 +336,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, /* advance table_gfn when emulating 1gb pages with 4k */ if (delta == 0) table_gfn += PT_INDEX(addr, level); @@ -47,3 +47,6 @@ index 89d66ca..2331bdc 100644 } else { direct = 0; table_gfn = gw->table_gfn[level - 2]; +-- +1.7.1 + diff --git a/linux-2.6-acpi-indirect_fan_control.patch b/linux-2.6-acpi-indirect_fan_control.patch new file mode 100644 index 0000000..88e2818 --- /dev/null +++ b/linux-2.6-acpi-indirect_fan_control.patch @@ -0,0 +1,48 @@ +commit 0c99c5288eb9b1bbc9684b0ec0fd7efc578749b3 +Author: Zhang Rui +Date: Thu Dec 17 16:02:08 2009 +0800 + + ACPI: Disable explicit power state retrieval on fans + + If the ACPI power state can be got both directly and indirectly, + we prefer to get it indirectly. + + https://bugzilla.redhat.com/show_bug.cgi?id=531916 describes a + system with a _PSC method for the fan that always returns "on". + There's no benefit in us always requesting the state of the fan + when performing transitions - we want to do everything we can + to ensure that the fan turns on when it should do, not risk + hardware damage by believing the hardware when it tells us the + fan is already on. Given that the Leading Other OS(tm) works fine + on this machine, it seems likely that it behaves in much this way. + + inspired-by: Matthew Garrett + Signed-off-by: Zhang Rui + Signed-off-by: Len Brown + +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c +index cf761b9..ae9226d 100644 +--- a/drivers/acpi/bus.c ++++ b/drivers/acpi/bus.c +@@ -190,16 +190,16 @@ int acpi_bus_get_power(acpi_handle handle, int *state) + * Get the device's power state either directly (via _PSC) or + * indirectly (via power resources). + */ +- if (device->power.flags.explicit_get) { ++ if (device->power.flags.power_resources) { ++ result = acpi_power_get_inferred_state(device); ++ if (result) ++ return result; ++ } else if (device->power.flags.explicit_get) { + status = acpi_evaluate_integer(device->handle, "_PSC", + NULL, &psc); + if (ACPI_FAILURE(status)) + return -ENODEV; + device->power.state = (int)psc; +- } else if (device->power.flags.power_resources) { +- result = acpi_power_get_inferred_state(device); +- if (result) +- return result; + } + + *state = device->power.state; diff --git a/linux-2.6-acpi-sleep-live-sci-live.patch b/linux-2.6-acpi-sleep-live-sci-live.patch new file mode 100644 index 0000000..5d4239c --- /dev/null +++ b/linux-2.6-acpi-sleep-live-sci-live.patch @@ -0,0 +1,51 @@ +commit 7ba0dea4158155a68b833982199691dbc2d4e6dc +Author: Matthew Garrett +Date: Mon Apr 19 16:51:39 2010 -0400 + + acpi: Fall back to manually changing SCI_EN + + The ACPI spec tells us that the ACPI SCI_EN bit is under hardware control + and shouldn't be touched by the OS. It seems that the Leading Other OS + ignores this and some machines expect this behaviour. We have a blacklist + for these, but given that we're able to detect the failure case and the + alternative to breaking the spec is letting the machine crash and burn, + let's try falling back when we know the alternative is a mostly-dead + machine. + + Signed-off-by: Matthew Garrett + +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c +index f74834a..79df8d4 100644 +--- a/drivers/acpi/sleep.c ++++ b/drivers/acpi/sleep.c +@@ -227,6 +227,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state) + static int acpi_suspend_enter(suspend_state_t pm_state) + { + acpi_status status = AE_OK; ++ acpi_status enable_status = AE_OK; + unsigned long flags = 0; + u32 acpi_state = acpi_target_sleep_state; + +@@ -254,10 +255,19 @@ static int acpi_suspend_enter(suspend_state_t pm_state) + } + + /* If ACPI is not enabled by the BIOS, we need to enable it here. */ +- if (set_sci_en_on_resume) ++ if (!set_sci_en_on_resume) ++ enable_status = acpi_enable(); ++ ++ if (set_sci_en_on_resume || enable_status == AE_NO_HARDWARE_RESPONSE) ++ /* If we're still in legacy mode then we have a problem. The ++ * spec tells us that this bit is under hardware control, but ++ * there's no plausible way that the OS can transition back to ++ * legacy mode so our choices here are to either ignore the ++ * spec or crash and burn horribly. The latter doesn't seem ++ * like it's ever going to be the preferable choice, so let's ++ * live dangerously. ++ */ + acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); +- else +- acpi_enable(); + + /* Reprogram control registers and execute _BFS */ + acpi_leave_sleep_state_prep(acpi_state); diff --git a/linux-2.6-acpi-video-export-edid.patch b/linux-2.6-acpi-video-export-edid.patch index 52ddd87..3721b02 100644 --- a/linux-2.6-acpi-video-export-edid.patch +++ b/linux-2.6-acpi-video-export-edid.patch @@ -1,26 +1,15 @@ -From 023f5b2d1fdad6ffe33a204a4e76e38edba9d9e5 Mon Sep 17 00:00:00 2001 -From: Kyle McMartin -Date: Thu, 20 May 2010 08:59:58 -0400 -Subject: linux-2.6-acpi-video-export-edid.patch - ---- - drivers/acpi/video.c | 110 ++++++++++++++++++++++++++++++++++++++++++++++---- - include/acpi/video.h | 16 +++++++ - 2 files changed, 118 insertions(+), 8 deletions(-) - -diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c -index a0c93b3..4b8bda1 100644 ---- a/drivers/acpi/video.c -+++ b/drivers/acpi/video.c -@@ -45,6 +45,7 @@ +diff -up linux-2.6.33.noarch/drivers/acpi/video.c.orig linux-2.6.33.noarch/drivers/acpi/video.c +--- linux-2.6.33.noarch/drivers/acpi/video.c.orig 2010-04-12 13:42:53.637966231 -0400 ++++ linux-2.6.33.noarch/drivers/acpi/video.c 2010-04-12 13:43:07.290978352 -0400 +@@ -43,6 +43,7 @@ + #include #include #include - #include +#include #define PREFIX "ACPI: " -@@ -65,11 +66,6 @@ +@@ -63,11 +64,6 @@ #define MAX_NAME_LEN 20 @@ -32,7 +21,7 @@ index a0c93b3..4b8bda1 100644 #define _COMPONENT ACPI_VIDEO_COMPONENT ACPI_MODULE_NAME("video"); -@@ -1748,11 +1744,27 @@ acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id +@@ -1731,11 +1727,27 @@ acpi_video_get_device_attr(struct acpi_v } static int @@ -61,7 +50,7 @@ index a0c93b3..4b8bda1 100644 struct acpi_video_device *data; struct acpi_video_device_attrib* attribute; -@@ -1797,8 +1809,25 @@ acpi_video_bus_get_one_device(struct acpi_device *device, +@@ -1780,8 +1792,25 @@ acpi_video_bus_get_one_device(struct acp } if(attribute->bios_can_detect) data->flags.bios = 1; @@ -89,7 +78,7 @@ index a0c93b3..4b8bda1 100644 acpi_video_device_bind(video, data); acpi_video_device_find_cap(data); -@@ -2032,6 +2061,71 @@ out: +@@ -2015,6 +2044,71 @@ out: return result; } @@ -161,10 +150,9 @@ index a0c93b3..4b8bda1 100644 static int acpi_video_bus_get_devices(struct acpi_video_bus *video, struct acpi_device *device) -diff --git a/include/acpi/video.h b/include/acpi/video.h -index cf7be3d..551793c 100644 ---- a/include/acpi/video.h -+++ b/include/acpi/video.h +diff -up linux-2.6.33.noarch/include/acpi/video.h.orig linux-2.6.33.noarch/include/acpi/video.h +--- linux-2.6.33.noarch/include/acpi/video.h.orig 2010-04-12 13:42:53.638966251 -0400 ++++ linux-2.6.33.noarch/include/acpi/video.h 2010-04-12 13:43:07.292841571 -0400 @@ -1,12 +1,28 @@ #ifndef __ACPI_VIDEO_H #define __ACPI_VIDEO_H @@ -194,6 +182,3 @@ index cf7be3d..551793c 100644 #endif #endif --- -1.7.0.1 - diff --git a/linux-2.6-ata-quirk.patch b/linux-2.6-ata-quirk.patch new file mode 100644 index 0000000..32096d4 --- /dev/null +++ b/linux-2.6-ata-quirk.patch @@ -0,0 +1,58 @@ +--- linux-2.6.20/arch/ia64/kernel/quirks.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.20_fix/arch/ia64/kernel/quirks.c 2007-02-13 13:56:34.000000000 -0500 +@@ -0,0 +1,45 @@ ++/* ++ * This file contains work-arounds for ia64 platform bugs. ++ */ ++#include ++ ++/* ++ * quirk_intel_ide_controller: If an ide/ata controller is ++ * at legacy mode, BIOS might initiates BAR(bar 0~3 and 5) ++ * with incorrect value. This quirk will reset the incorrect ++ * value to 0. ++ */ ++static void __devinit quirk_intel_ide_controller(struct pci_dev *dev) ++{ ++ unsigned int pos; ++ struct resource *res; ++ int fixed = 0; ++ u8 tmp8; ++ ++ if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) ++ return; ++ ++ /* TODO: What if one channel is in native mode ... */ ++ pci_read_config_byte(dev, PCI_CLASS_PROG, &tmp8); ++ if ((tmp8 & 5) == 5) ++ return; ++ ++ for( pos = 0; pos < 6; pos ++ ) { ++ res = &dev->resource[pos]; ++ if (!(res->flags & (IORESOURCE_IO | IORESOURCE_MEM))) ++ continue; ++ ++ if (!res->start && res->end) { ++ res->start = res->end = 0; ++ res->flags = 0; ++ fixed = 1; ++ } ++ } ++ if (fixed) ++ printk(KERN_WARNING ++ "PCI device %s: BIOS resource configuration fixed.\n", ++ pci_name(dev)); ++} ++ ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_11, quirk_intel_ide_controller); ++ +--- linux-2.6.21.noarch/arch/ia64/kernel/Makefile~ 2007-05-27 23:23:36.000000000 -0400 ++++ linux-2.6.21.noarch/arch/ia64/kernel/Makefile 2007-05-27 23:23:48.000000000 -0400 +@@ -33,6 +33,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o + obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o + obj-$(CONFIG_AUDIT) += audit.o + obj-$(CONFIG_PCI_MSI) += msi_ia64.o ++obj-$(CONFIG_PCI) += quirks.o + mca_recovery-y += mca_drv.o mca_drv_asm.o + obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o + diff --git a/linux-2.6-btrfs-update.patch b/linux-2.6-btrfs-update.patch new file mode 100644 index 0000000..c3325a2 --- /dev/null +++ b/linux-2.6-btrfs-update.patch @@ -0,0 +1,2819 @@ +diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h +index 3f1f50d..7a4dee1 100644 +--- a/fs/btrfs/btrfs_inode.h ++++ b/fs/btrfs/btrfs_inode.h +@@ -153,6 +153,11 @@ struct btrfs_inode { + unsigned ordered_data_close:1; + unsigned dummy_inode:1; + ++ /* ++ * always compress this one file ++ */ ++ unsigned force_compress:1; ++ + struct inode vfs_inode; + }; + +diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c +index a11a320..28b92a7 100644 +--- a/fs/btrfs/compression.c ++++ b/fs/btrfs/compression.c +@@ -478,7 +478,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, + goto next; + } + +- page = alloc_page(mapping_gfp_mask(mapping) | GFP_NOFS); ++ page = alloc_page(mapping_gfp_mask(mapping) & ~__GFP_FS); + if (!page) + break; + +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 8b5cfdd..0af2e38 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -373,11 +373,13 @@ struct btrfs_super_block { + * ones specified below then we will fail to mount + */ + #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) ++#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (2ULL << 0) + + #define BTRFS_FEATURE_COMPAT_SUPP 0ULL + #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL + #define BTRFS_FEATURE_INCOMPAT_SUPP \ +- BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF ++ (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ ++ BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL) + + /* + * A leaf is full of items. offset and size tell us where to find +@@ -1182,7 +1184,6 @@ struct btrfs_root { + #define BTRFS_INODE_NOATIME (1 << 9) + #define BTRFS_INODE_DIRSYNC (1 << 10) + +- + /* some macros to generate set/get funcs for the struct fields. This + * assumes there is a lefoo_to_cpu for every type, so lets make a simple + * one for u8: +@@ -1842,7 +1843,7 @@ BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block, + BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block, + compat_flags, 64); + BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block, +- compat_flags, 64); ++ compat_ro_flags, 64); + BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, + incompat_flags, 64); + BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, +@@ -2310,7 +2311,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, + u32 min_type); + + int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); +-int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end); ++int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, ++ struct extent_state **cached_state); + int btrfs_writepages(struct address_space *mapping, + struct writeback_control *wbc); + int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, +@@ -2335,7 +2337,7 @@ int btrfs_init_cachep(void); + void btrfs_destroy_cachep(void); + long btrfs_ioctl_trans_end(struct file *file); + struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, +- struct btrfs_root *root); ++ struct btrfs_root *root, int *was_new); + int btrfs_commit_write(struct file *file, struct page *page, + unsigned from, unsigned to); + struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, +@@ -2386,7 +2388,6 @@ void btrfs_sysfs_del_super(struct btrfs_fs_info *root); + ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); + + /* super.c */ +-u64 btrfs_parse_size(char *str); + int btrfs_parse_options(struct btrfs_root *root, char *options); + int btrfs_sync_fs(struct super_block *sb, int wait); + +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 2b59201..11d0ad3 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -263,13 +263,15 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, + static int verify_parent_transid(struct extent_io_tree *io_tree, + struct extent_buffer *eb, u64 parent_transid) + { ++ struct extent_state *cached_state = NULL; + int ret; + + if (!parent_transid || btrfs_header_generation(eb) == parent_transid) + return 0; + +- lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS); +- if (extent_buffer_uptodate(io_tree, eb) && ++ lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, ++ 0, &cached_state, GFP_NOFS); ++ if (extent_buffer_uptodate(io_tree, eb, cached_state) && + btrfs_header_generation(eb) == parent_transid) { + ret = 0; + goto out; +@@ -282,10 +284,10 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, + (unsigned long long)btrfs_header_generation(eb)); + } + ret = 1; +- clear_extent_buffer_uptodate(io_tree, eb); ++ clear_extent_buffer_uptodate(io_tree, eb, &cached_state); + out: +- unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, +- GFP_NOFS); ++ unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, ++ &cached_state, GFP_NOFS); + return ret; + } + +@@ -901,7 +903,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, + root->highest_objectid = 0; + root->name = NULL; + root->in_sysfs = 0; +- root->inode_tree.rb_node = NULL; ++ root->inode_tree = RB_ROOT; + + INIT_LIST_HEAD(&root->dirty_list); + INIT_LIST_HEAD(&root->orphan_list); +@@ -1673,7 +1675,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, + insert_inode_hash(fs_info->btree_inode); + + spin_lock_init(&fs_info->block_group_cache_lock); +- fs_info->block_group_cache_tree.rb_node = NULL; ++ fs_info->block_group_cache_tree = RB_ROOT; + + extent_io_tree_init(&fs_info->freed_extents[0], + fs_info->btree_inode->i_mapping, GFP_NOFS); +@@ -2497,7 +2499,8 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid) + int ret; + struct inode *btree_inode = buf->first_page->mapping->host; + +- ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf); ++ ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf, ++ NULL); + if (!ret) + return ret; + +diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c +index ba5c3fd..951ef09 100644 +--- a/fs/btrfs/export.c ++++ b/fs/btrfs/export.c +@@ -95,7 +95,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, + btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); + key.offset = 0; + +- inode = btrfs_iget(sb, &key, root); ++ inode = btrfs_iget(sb, &key, root, NULL); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto fail; +@@ -223,7 +223,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child) + + key.type = BTRFS_INODE_ITEM_KEY; + key.offset = 0; +- dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root)); ++ dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL)); + if (!IS_ERR(dentry)) + dentry->d_op = &btrfs_dentry_operations; + return dentry; +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index 559f724..1727b26 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -6561,6 +6561,7 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root, + struct btrfs_key key; + struct inode *inode = NULL; + struct btrfs_file_extent_item *fi; ++ struct extent_state *cached_state = NULL; + u64 num_bytes; + u64 skip_objectid = 0; + u32 nritems; +@@ -6589,12 +6590,14 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root, + } + num_bytes = btrfs_file_extent_num_bytes(leaf, fi); + +- lock_extent(&BTRFS_I(inode)->io_tree, key.offset, +- key.offset + num_bytes - 1, GFP_NOFS); ++ lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset, ++ key.offset + num_bytes - 1, 0, &cached_state, ++ GFP_NOFS); + btrfs_drop_extent_cache(inode, key.offset, + key.offset + num_bytes - 1, 1); +- unlock_extent(&BTRFS_I(inode)->io_tree, key.offset, +- key.offset + num_bytes - 1, GFP_NOFS); ++ unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset, ++ key.offset + num_bytes - 1, &cached_state, ++ GFP_NOFS); + cond_resched(); + } + iput(inode); +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index b177ed3..c99121a 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -104,8 +104,8 @@ void extent_io_exit(void) + void extent_io_tree_init(struct extent_io_tree *tree, + struct address_space *mapping, gfp_t mask) + { +- tree->state.rb_node = NULL; +- tree->buffer.rb_node = NULL; ++ tree->state = RB_ROOT; ++ tree->buffer = RB_ROOT; + tree->ops = NULL; + tree->dirty_bytes = 0; + spin_lock_init(&tree->lock); +@@ -513,7 +513,10 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + u64 last_end; + int err; + int set = 0; ++ int clear = 0; + ++ if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY)) ++ clear = 1; + again: + if (!prealloc && (mask & __GFP_WAIT)) { + prealloc = alloc_extent_state(mask); +@@ -524,14 +527,20 @@ again: + spin_lock(&tree->lock); + if (cached_state) { + cached = *cached_state; +- *cached_state = NULL; +- cached_state = NULL; ++ ++ if (clear) { ++ *cached_state = NULL; ++ cached_state = NULL; ++ } ++ + if (cached && cached->tree && cached->start == start) { +- atomic_dec(&cached->refs); ++ if (clear) ++ atomic_dec(&cached->refs); + state = cached; + goto hit_next; + } +- free_extent_state(cached); ++ if (clear) ++ free_extent_state(cached); + } + /* + * this search will find the extents that end after +@@ -946,11 +955,11 @@ int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + } + + int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, +- gfp_t mask) ++ struct extent_state **cached_state, gfp_t mask) + { + return set_extent_bit(tree, start, end, + EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE, +- 0, NULL, NULL, mask); ++ 0, NULL, cached_state, mask); + } + + int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, +@@ -984,10 +993,11 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, + } + + static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, +- u64 end, gfp_t mask) ++ u64 end, struct extent_state **cached_state, ++ gfp_t mask) + { + return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, +- NULL, mask); ++ cached_state, mask); + } + + int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) +@@ -1171,7 +1181,8 @@ out: + * 1 is returned if we find something, 0 if nothing was in the tree + */ + static noinline u64 find_delalloc_range(struct extent_io_tree *tree, +- u64 *start, u64 *end, u64 max_bytes) ++ u64 *start, u64 *end, u64 max_bytes, ++ struct extent_state **cached_state) + { + struct rb_node *node; + struct extent_state *state; +@@ -1203,8 +1214,11 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree, + *end = state->end; + goto out; + } +- if (!found) ++ if (!found) { + *start = state->start; ++ *cached_state = state; ++ atomic_inc(&state->refs); ++ } + found++; + *end = state->end; + cur_start = state->end + 1; +@@ -1336,10 +1350,11 @@ again: + delalloc_start = *start; + delalloc_end = 0; + found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, +- max_bytes); ++ max_bytes, &cached_state); + if (!found || delalloc_end <= *start) { + *start = delalloc_start; + *end = delalloc_end; ++ free_extent_state(cached_state); + return found; + } + +@@ -1722,7 +1737,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err) + } + + if (!uptodate) { +- clear_extent_uptodate(tree, start, end, GFP_NOFS); ++ clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS); + ClearPageUptodate(page); + SetPageError(page); + } +@@ -1750,7 +1765,8 @@ static void end_bio_extent_writepage(struct bio *bio, int err) + static void end_bio_extent_readpage(struct bio *bio, int err) + { + int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); +- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; ++ struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; ++ struct bio_vec *bvec = bio->bi_io_vec; + struct extent_io_tree *tree; + u64 start; + u64 end; +@@ -1773,7 +1789,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) + else + whole_page = 0; + +- if (--bvec >= bio->bi_io_vec) ++ if (++bvec <= bvec_end) + prefetchw(&bvec->bv_page->flags); + + if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { +@@ -1818,7 +1834,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) + } + check_page_locked(tree, page); + } +- } while (bvec >= bio->bi_io_vec); ++ } while (bvec <= bvec_end); + + bio_put(bio); + } +@@ -2704,6 +2720,7 @@ int extent_readpages(struct extent_io_tree *tree, + int extent_invalidatepage(struct extent_io_tree *tree, + struct page *page, unsigned long offset) + { ++ struct extent_state *cached_state = NULL; + u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); + u64 end = start + PAGE_CACHE_SIZE - 1; + size_t blocksize = page->mapping->host->i_sb->s_blocksize; +@@ -2712,12 +2729,12 @@ int extent_invalidatepage(struct extent_io_tree *tree, + if (start > end) + return 0; + +- lock_extent(tree, start, end, GFP_NOFS); ++ lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); + wait_on_page_writeback(page); + clear_extent_bit(tree, start, end, + EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, +- 1, 1, NULL, GFP_NOFS); ++ 1, 1, &cached_state, GFP_NOFS); + return 0; + } + +@@ -2920,16 +2937,17 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock, + get_extent_t *get_extent) + { + struct inode *inode = mapping->host; ++ struct extent_state *cached_state = NULL; + u64 start = iblock << inode->i_blkbits; + sector_t sector = 0; + size_t blksize = (1 << inode->i_blkbits); + struct extent_map *em; + +- lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, +- GFP_NOFS); ++ lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, ++ 0, &cached_state, GFP_NOFS); + em = get_extent(inode, NULL, 0, start, blksize, 0); +- unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, +- GFP_NOFS); ++ unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, ++ start + blksize - 1, &cached_state, GFP_NOFS); + if (!em || IS_ERR(em)) + return 0; + +@@ -2951,6 +2969,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + u32 flags = 0; + u64 disko = 0; + struct extent_map *em = NULL; ++ struct extent_state *cached_state = NULL; + int end = 0; + u64 em_start = 0, em_len = 0; + unsigned long emflags; +@@ -2959,8 +2978,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + if (len == 0) + return -EINVAL; + +- lock_extent(&BTRFS_I(inode)->io_tree, start, start + len, +- GFP_NOFS); ++ lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, ++ &cached_state, GFP_NOFS); + em = get_extent(inode, NULL, 0, off, max - off, 0); + if (!em) + goto out; +@@ -3023,8 +3042,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + out_free: + free_extent_map(em); + out: +- unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len, +- GFP_NOFS); ++ unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len, ++ &cached_state, GFP_NOFS); + return ret; + } + +@@ -3264,7 +3283,8 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, + } + + int clear_extent_buffer_uptodate(struct extent_io_tree *tree, +- struct extent_buffer *eb) ++ struct extent_buffer *eb, ++ struct extent_state **cached_state) + { + unsigned long i; + struct page *page; +@@ -3274,7 +3294,7 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree, + clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); + + clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, +- GFP_NOFS); ++ cached_state, GFP_NOFS); + for (i = 0; i < num_pages; i++) { + page = extent_buffer_page(eb, i); + if (page) +@@ -3334,7 +3354,8 @@ int extent_range_uptodate(struct extent_io_tree *tree, + } + + int extent_buffer_uptodate(struct extent_io_tree *tree, +- struct extent_buffer *eb) ++ struct extent_buffer *eb, ++ struct extent_state *cached_state) + { + int ret = 0; + unsigned long num_pages; +@@ -3346,7 +3367,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree, + return 1; + + ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, +- EXTENT_UPTODATE, 1, NULL); ++ EXTENT_UPTODATE, 1, cached_state); + if (ret) + return ret; + +diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h +index 36de250..bbab481 100644 +--- a/fs/btrfs/extent_io.h ++++ b/fs/btrfs/extent_io.h +@@ -163,6 +163,8 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); + int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + int bits, struct extent_state **cached, gfp_t mask); + int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); ++int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, ++ struct extent_state **cached, gfp_t mask); + int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask); + int extent_read_full_page(struct extent_io_tree *tree, struct page *page, +@@ -196,7 +198,7 @@ int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, + int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start, + u64 end, gfp_t mask); + int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, +- gfp_t mask); ++ struct extent_state **cached_state, gfp_t mask); + int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask); + int find_first_extent_bit(struct extent_io_tree *tree, u64 start, +@@ -281,9 +283,11 @@ int test_extent_buffer_dirty(struct extent_io_tree *tree, + int set_extent_buffer_uptodate(struct extent_io_tree *tree, + struct extent_buffer *eb); + int clear_extent_buffer_uptodate(struct extent_io_tree *tree, +- struct extent_buffer *eb); ++ struct extent_buffer *eb, ++ struct extent_state **cached_state); + int extent_buffer_uptodate(struct extent_io_tree *tree, +- struct extent_buffer *eb); ++ struct extent_buffer *eb, ++ struct extent_state *cached_state); + int map_extent_buffer(struct extent_buffer *eb, unsigned long offset, + unsigned long min_len, char **token, char **map, + unsigned long *map_start, +diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c +index 428fcac..28d87ba 100644 +--- a/fs/btrfs/extent_map.c ++++ b/fs/btrfs/extent_map.c +@@ -35,7 +35,7 @@ void extent_map_exit(void) + */ + void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) + { +- tree->map.rb_node = NULL; ++ tree->map = RB_ROOT; + rwlock_init(&tree->lock); + } + +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index 6ed434a..ee3323c 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -123,7 +123,8 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); + + end_of_last_block = start_pos + num_bytes - 1; +- err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); ++ err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, ++ NULL); + if (err) + return err; + +@@ -753,6 +754,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, + loff_t pos, unsigned long first_index, + unsigned long last_index, size_t write_bytes) + { ++ struct extent_state *cached_state = NULL; + int i; + unsigned long index = pos >> PAGE_CACHE_SHIFT; + struct inode *inode = fdentry(file)->d_inode; +@@ -781,16 +783,18 @@ again: + } + if (start_pos < inode->i_size) { + struct btrfs_ordered_extent *ordered; +- lock_extent(&BTRFS_I(inode)->io_tree, +- start_pos, last_pos - 1, GFP_NOFS); ++ lock_extent_bits(&BTRFS_I(inode)->io_tree, ++ start_pos, last_pos - 1, 0, &cached_state, ++ GFP_NOFS); + ordered = btrfs_lookup_first_ordered_extent(inode, + last_pos - 1); + if (ordered && + ordered->file_offset + ordered->len > start_pos && + ordered->file_offset < last_pos) { + btrfs_put_ordered_extent(ordered); +- unlock_extent(&BTRFS_I(inode)->io_tree, +- start_pos, last_pos - 1, GFP_NOFS); ++ unlock_extent_cached(&BTRFS_I(inode)->io_tree, ++ start_pos, last_pos - 1, ++ &cached_state, GFP_NOFS); + for (i = 0; i < num_pages; i++) { + unlock_page(pages[i]); + page_cache_release(pages[i]); +@@ -802,12 +806,13 @@ again: + if (ordered) + btrfs_put_ordered_extent(ordered); + +- clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos, ++ clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, + last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | +- EXTENT_DO_ACCOUNTING, ++ EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, + GFP_NOFS); +- unlock_extent(&BTRFS_I(inode)->io_tree, +- start_pos, last_pos - 1, GFP_NOFS); ++ unlock_extent_cached(&BTRFS_I(inode)->io_tree, ++ start_pos, last_pos - 1, &cached_state, ++ GFP_NOFS); + } + for (i = 0; i < num_pages; i++) { + clear_page_dirty_for_io(pages[i]); +diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c +index cb2849f..dd831ed 100644 +--- a/fs/btrfs/free-space-cache.c ++++ b/fs/btrfs/free-space-cache.c +@@ -870,7 +870,7 @@ __btrfs_return_cluster_to_free_space( + tree_insert_offset(&block_group->free_space_offset, + entry->offset, &entry->offset_index, 0); + } +- cluster->root.rb_node = NULL; ++ cluster->root = RB_ROOT; + + out: + spin_unlock(&cluster->lock); +@@ -1355,7 +1355,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) + { + spin_lock_init(&cluster->lock); + spin_lock_init(&cluster->refill_lock); +- cluster->root.rb_node = NULL; ++ cluster->root = RB_ROOT; + cluster->max_size = 0; + cluster->points_to_bitmap = false; + INIT_LIST_HEAD(&cluster->block_group_list); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index c41db6d..02bb099 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -379,7 +379,8 @@ again: + * change at any time if we discover bad compression ratios. + */ + if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && +- btrfs_test_opt(root, COMPRESS)) { ++ (btrfs_test_opt(root, COMPRESS) || ++ (BTRFS_I(inode)->force_compress))) { + WARN_ON(pages); + pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); + +@@ -483,8 +484,10 @@ again: + nr_pages_ret = 0; + + /* flag the file so we don't compress in the future */ +- if (!btrfs_test_opt(root, FORCE_COMPRESS)) ++ if (!btrfs_test_opt(root, FORCE_COMPRESS) && ++ !(BTRFS_I(inode)->force_compress)) { + BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; ++ } + } + if (will_compress) { + *num_added += 1; +@@ -570,8 +573,8 @@ retry: + unsigned long nr_written = 0; + + lock_extent(io_tree, async_extent->start, +- async_extent->start + +- async_extent->ram_size - 1, GFP_NOFS); ++ async_extent->start + ++ async_extent->ram_size - 1, GFP_NOFS); + + /* allocate blocks */ + ret = cow_file_range(inode, async_cow->locked_page, +@@ -1211,7 +1214,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, + else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) + ret = run_delalloc_nocow(inode, locked_page, start, end, + page_started, 0, nr_written); +- else if (!btrfs_test_opt(root, COMPRESS)) ++ else if (!btrfs_test_opt(root, COMPRESS) && ++ !(BTRFS_I(inode)->force_compress)) + ret = cow_file_range(inode, locked_page, start, end, + page_started, nr_written, 1); + else +@@ -1508,12 +1512,13 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, + return 0; + } + +-int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) ++int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, ++ struct extent_state **cached_state) + { + if ((end & (PAGE_CACHE_SIZE - 1)) == 0) + WARN_ON(1); + return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, +- GFP_NOFS); ++ cached_state, GFP_NOFS); + } + + /* see btrfs_writepage_start_hook for details on why this is required */ +@@ -1526,6 +1531,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) + { + struct btrfs_writepage_fixup *fixup; + struct btrfs_ordered_extent *ordered; ++ struct extent_state *cached_state = NULL; + struct page *page; + struct inode *inode; + u64 page_start; +@@ -1544,7 +1550,8 @@ again: + page_start = page_offset(page); + page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; + +- lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); ++ lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, ++ &cached_state, GFP_NOFS); + + /* already ordered? We're done */ + if (PagePrivate2(page)) +@@ -1552,17 +1559,18 @@ again: + + ordered = btrfs_lookup_ordered_extent(inode, page_start); + if (ordered) { +- unlock_extent(&BTRFS_I(inode)->io_tree, page_start, +- page_end, GFP_NOFS); ++ unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, ++ page_end, &cached_state, GFP_NOFS); + unlock_page(page); + btrfs_start_ordered_extent(inode, ordered, 1); + goto again; + } + +- btrfs_set_extent_delalloc(inode, page_start, page_end); ++ btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); + ClearPageChecked(page); + out: +- unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); ++ unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, ++ &cached_state, GFP_NOFS); + out_page: + unlock_page(page); + page_cache_release(page); +@@ -1691,14 +1699,14 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) + struct btrfs_trans_handle *trans; + struct btrfs_ordered_extent *ordered_extent = NULL; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; ++ struct extent_state *cached_state = NULL; + int compressed = 0; + int ret; + +- ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1); ++ ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, ++ end - start + 1); + if (!ret) + return 0; +- +- ordered_extent = btrfs_lookup_ordered_extent(inode, start); + BUG_ON(!ordered_extent); + + if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { +@@ -1713,9 +1721,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) + goto out; + } + +- lock_extent(io_tree, ordered_extent->file_offset, +- ordered_extent->file_offset + ordered_extent->len - 1, +- GFP_NOFS); ++ lock_extent_bits(io_tree, ordered_extent->file_offset, ++ ordered_extent->file_offset + ordered_extent->len - 1, ++ 0, &cached_state, GFP_NOFS); + + trans = btrfs_join_transaction(root, 1); + +@@ -1742,9 +1750,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) + ordered_extent->len); + BUG_ON(ret); + } +- unlock_extent(io_tree, ordered_extent->file_offset, +- ordered_extent->file_offset + ordered_extent->len - 1, +- GFP_NOFS); ++ unlock_extent_cached(io_tree, ordered_extent->file_offset, ++ ordered_extent->file_offset + ++ ordered_extent->len - 1, &cached_state, GFP_NOFS); ++ + add_pending_csums(trans, inode, ordered_extent->file_offset, + &ordered_extent->list); + +@@ -2153,7 +2162,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) + found_key.objectid = found_key.offset; + found_key.type = BTRFS_INODE_ITEM_KEY; + found_key.offset = 0; +- inode = btrfs_iget(root->fs_info->sb, &found_key, root); ++ inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); + if (IS_ERR(inode)) + break; + +@@ -3081,6 +3090,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) + struct btrfs_root *root = BTRFS_I(inode)->root; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct btrfs_ordered_extent *ordered; ++ struct extent_state *cached_state = NULL; + char *kaddr; + u32 blocksize = root->sectorsize; + pgoff_t index = from >> PAGE_CACHE_SHIFT; +@@ -3127,12 +3137,14 @@ again: + } + wait_on_page_writeback(page); + +- lock_extent(io_tree, page_start, page_end, GFP_NOFS); ++ lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, ++ GFP_NOFS); + set_page_extent_mapped(page); + + ordered = btrfs_lookup_ordered_extent(inode, page_start); + if (ordered) { +- unlock_extent(io_tree, page_start, page_end, GFP_NOFS); ++ unlock_extent_cached(io_tree, page_start, page_end, ++ &cached_state, GFP_NOFS); + unlock_page(page); + page_cache_release(page); + btrfs_start_ordered_extent(inode, ordered, 1); +@@ -3140,13 +3152,15 @@ again: + goto again; + } + +- clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, ++ clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, + EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, +- GFP_NOFS); ++ 0, 0, &cached_state, GFP_NOFS); + +- ret = btrfs_set_extent_delalloc(inode, page_start, page_end); ++ ret = btrfs_set_extent_delalloc(inode, page_start, page_end, ++ &cached_state); + if (ret) { +- unlock_extent(io_tree, page_start, page_end, GFP_NOFS); ++ unlock_extent_cached(io_tree, page_start, page_end, ++ &cached_state, GFP_NOFS); + goto out_unlock; + } + +@@ -3159,7 +3173,8 @@ again: + } + ClearPageChecked(page); + set_page_dirty(page); +- unlock_extent(io_tree, page_start, page_end, GFP_NOFS); ++ unlock_extent_cached(io_tree, page_start, page_end, &cached_state, ++ GFP_NOFS); + + out_unlock: + if (ret) +@@ -3177,6 +3192,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) + struct btrfs_root *root = BTRFS_I(inode)->root; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct extent_map *em; ++ struct extent_state *cached_state = NULL; + u64 mask = root->sectorsize - 1; + u64 hole_start = (inode->i_size + mask) & ~mask; + u64 block_end = (size + mask) & ~mask; +@@ -3192,11 +3208,13 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) + struct btrfs_ordered_extent *ordered; + btrfs_wait_ordered_range(inode, hole_start, + block_end - hole_start); +- lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); ++ lock_extent_bits(io_tree, hole_start, block_end - 1, 0, ++ &cached_state, GFP_NOFS); + ordered = btrfs_lookup_ordered_extent(inode, hole_start); + if (!ordered) + break; +- unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); ++ unlock_extent_cached(io_tree, hole_start, block_end - 1, ++ &cached_state, GFP_NOFS); + btrfs_put_ordered_extent(ordered); + } + +@@ -3241,7 +3259,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) + break; + } + +- unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); ++ unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, ++ GFP_NOFS); + return err; + } + +@@ -3639,6 +3658,7 @@ static noinline void init_btrfs_i(struct inode *inode) + bi->index_cnt = (u64)-1; + bi->last_unlink_trans = 0; + bi->ordered_data_close = 0; ++ bi->force_compress = 0; + extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); + extent_io_tree_init(&BTRFS_I(inode)->io_tree, + inode->i_mapping, GFP_NOFS); +@@ -3687,7 +3707,7 @@ static struct inode *btrfs_iget_locked(struct super_block *s, + * Returns in *is_new if the inode was read from disk + */ + struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, +- struct btrfs_root *root) ++ struct btrfs_root *root, int *new) + { + struct inode *inode; + +@@ -3702,6 +3722,8 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, + + inode_tree_add(inode); + unlock_new_inode(inode); ++ if (new) ++ *new = 1; + } + + return inode; +@@ -3754,7 +3776,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) + return NULL; + + if (location.type == BTRFS_INODE_ITEM_KEY) { +- inode = btrfs_iget(dir->i_sb, &location, root); ++ inode = btrfs_iget(dir->i_sb, &location, root, NULL); + return inode; + } + +@@ -3769,7 +3791,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) + else + inode = new_simple_dir(dir->i_sb, &location, sub_root); + } else { +- inode = btrfs_iget(dir->i_sb, &location, sub_root); ++ inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); + } + srcu_read_unlock(&root->fs_info->subvol_srcu, index); + +@@ -4501,7 +4523,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) + err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); + if (err) { + err = -ENOSPC; +- goto out_unlock; ++ goto out_fail; + } + + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, +@@ -4979,6 +5001,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) + { + struct extent_io_tree *tree; + struct btrfs_ordered_extent *ordered; ++ struct extent_state *cached_state = NULL; + u64 page_start = page_offset(page); + u64 page_end = page_start + PAGE_CACHE_SIZE - 1; + +@@ -4997,7 +5020,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) + btrfs_releasepage(page, GFP_NOFS); + return; + } +- lock_extent(tree, page_start, page_end, GFP_NOFS); ++ lock_extent_bits(tree, page_start, page_end, 0, &cached_state, ++ GFP_NOFS); + ordered = btrfs_lookup_ordered_extent(page->mapping->host, + page_offset(page)); + if (ordered) { +@@ -5008,7 +5032,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) + clear_extent_bit(tree, page_start, page_end, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, +- NULL, GFP_NOFS); ++ &cached_state, GFP_NOFS); + /* + * whoever cleared the private bit is responsible + * for the finish_ordered_io +@@ -5018,11 +5042,13 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) + page_start, page_end); + } + btrfs_put_ordered_extent(ordered); +- lock_extent(tree, page_start, page_end, GFP_NOFS); ++ cached_state = NULL; ++ lock_extent_bits(tree, page_start, page_end, 0, &cached_state, ++ GFP_NOFS); + } + clear_extent_bit(tree, page_start, page_end, + EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | +- EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS); ++ EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS); + __btrfs_releasepage(page, GFP_NOFS); + + ClearPageChecked(page); +@@ -5055,6 +5081,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) + struct btrfs_root *root = BTRFS_I(inode)->root; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct btrfs_ordered_extent *ordered; ++ struct extent_state *cached_state = NULL; + char *kaddr; + unsigned long zero_start; + loff_t size; +@@ -5093,7 +5120,8 @@ again: + } + wait_on_page_writeback(page); + +- lock_extent(io_tree, page_start, page_end, GFP_NOFS); ++ lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, ++ GFP_NOFS); + set_page_extent_mapped(page); + + /* +@@ -5102,7 +5130,8 @@ again: + */ + ordered = btrfs_lookup_ordered_extent(inode, page_start); + if (ordered) { +- unlock_extent(io_tree, page_start, page_end, GFP_NOFS); ++ unlock_extent_cached(io_tree, page_start, page_end, ++ &cached_state, GFP_NOFS); + unlock_page(page); + btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_put_ordered_extent(ordered); +@@ -5116,13 +5145,15 @@ again: + * is probably a better way to do this, but for now keep consistent with + * prepare_pages in the normal write path. + */ +- clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, ++ clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, + EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, +- GFP_NOFS); ++ 0, 0, &cached_state, GFP_NOFS); + +- ret = btrfs_set_extent_delalloc(inode, page_start, page_end); ++ ret = btrfs_set_extent_delalloc(inode, page_start, page_end, ++ &cached_state); + if (ret) { +- unlock_extent(io_tree, page_start, page_end, GFP_NOFS); ++ unlock_extent_cached(io_tree, page_start, page_end, ++ &cached_state, GFP_NOFS); + ret = VM_FAULT_SIGBUS; + btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); + goto out_unlock; +@@ -5148,7 +5179,7 @@ again: + BTRFS_I(inode)->last_trans = root->fs_info->generation; + BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; + +- unlock_extent(io_tree, page_start, page_end, GFP_NOFS); ++ unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); + + out_unlock: + btrfs_unreserve_metadata_for_delalloc(root, inode, 1); +@@ -5827,6 +5858,7 @@ stop_trans: + static long btrfs_fallocate(struct inode *inode, int mode, + loff_t offset, loff_t len) + { ++ struct extent_state *cached_state = NULL; + u64 cur_offset; + u64 last_byte; + u64 alloc_start; +@@ -5865,16 +5897,17 @@ static long btrfs_fallocate(struct inode *inode, int mode, + /* the extent lock is ordered inside the running + * transaction + */ +- lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, +- GFP_NOFS); ++ lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, ++ locked_end, 0, &cached_state, GFP_NOFS); + ordered = btrfs_lookup_first_ordered_extent(inode, + alloc_end - 1); + if (ordered && + ordered->file_offset + ordered->len > alloc_start && + ordered->file_offset < alloc_end) { + btrfs_put_ordered_extent(ordered); +- unlock_extent(&BTRFS_I(inode)->io_tree, +- alloc_start, locked_end, GFP_NOFS); ++ unlock_extent_cached(&BTRFS_I(inode)->io_tree, ++ alloc_start, locked_end, ++ &cached_state, GFP_NOFS); + /* + * we can't wait on the range with the transaction + * running or with the extent lock held +@@ -5916,8 +5949,8 @@ static long btrfs_fallocate(struct inode *inode, int mode, + break; + } + } +- unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, +- GFP_NOFS); ++ unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, ++ &cached_state, GFP_NOFS); + + btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode, + alloc_end - alloc_start); +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index 645a179..2845c6c 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -48,6 +48,7 @@ + #include "print-tree.h" + #include "volumes.h" + #include "locking.h" ++#include "ctree.h" + + /* Mask out flags that are inappropriate for the given type of inode. */ + static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) +@@ -474,7 +475,79 @@ out_unlock: + return error; + } + +-static int btrfs_defrag_file(struct file *file) ++static int should_defrag_range(struct inode *inode, u64 start, u64 len, ++ int thresh, u64 *last_len, u64 *skip, ++ u64 *defrag_end) ++{ ++ struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; ++ struct extent_map *em = NULL; ++ struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; ++ int ret = 1; ++ ++ ++ if (thresh == 0) ++ thresh = 256 * 1024; ++ ++ /* ++ * make sure that once we start defragging and extent, we keep on ++ * defragging it ++ */ ++ if (start < *defrag_end) ++ return 1; ++ ++ *skip = 0; ++ ++ /* ++ * hopefully we have this extent in the tree already, try without ++ * the full extent lock ++ */ ++ read_lock(&em_tree->lock); ++ em = lookup_extent_mapping(em_tree, start, len); ++ read_unlock(&em_tree->lock); ++ ++ if (!em) { ++ /* get the big lock and read metadata off disk */ ++ lock_extent(io_tree, start, start + len - 1, GFP_NOFS); ++ em = btrfs_get_extent(inode, NULL, 0, start, len, 0); ++ unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); ++ ++ if (!em) ++ return 0; ++ } ++ ++ /* this will cover holes, and inline extents */ ++ if (em->block_start >= EXTENT_MAP_LAST_BYTE) ++ ret = 0; ++ ++ /* ++ * we hit a real extent, if it is big don't bother defragging it again ++ */ ++ if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) ++ ret = 0; ++ ++ /* ++ * last_len ends up being a counter of how many bytes we've defragged. ++ * every time we choose not to defrag an extent, we reset *last_len ++ * so that the next tiny extent will force a defrag. ++ * ++ * The end result of this is that tiny extents before a single big ++ * extent will force at least part of that big extent to be defragged. ++ */ ++ if (ret) { ++ *last_len += len; ++ *defrag_end = extent_map_end(em); ++ } else { ++ *last_len = 0; ++ *skip = extent_map_end(em); ++ *defrag_end = 0; ++ } ++ ++ free_extent_map(em); ++ return ret; ++} ++ ++static int btrfs_defrag_file(struct file *file, ++ struct btrfs_ioctl_defrag_range_args *range) + { + struct inode *inode = fdentry(file)->d_inode; + struct btrfs_root *root = BTRFS_I(inode)->root; +@@ -486,37 +559,96 @@ static int btrfs_defrag_file(struct file *file) + unsigned long total_read = 0; + u64 page_start; + u64 page_end; ++ u64 last_len = 0; ++ u64 skip = 0; ++ u64 defrag_end = 0; + unsigned long i; + int ret; + +- ret = btrfs_check_data_free_space(root, inode, inode->i_size); +- if (ret) +- return -ENOSPC; ++ if (inode->i_size == 0) ++ return 0; ++ ++ if (range->start + range->len > range->start) { ++ last_index = min_t(u64, inode->i_size - 1, ++ range->start + range->len - 1) >> PAGE_CACHE_SHIFT; ++ } else { ++ last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; ++ } ++ ++ i = range->start >> PAGE_CACHE_SHIFT; ++ while (i <= last_index) { ++ if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, ++ PAGE_CACHE_SIZE, ++ range->extent_thresh, ++ &last_len, &skip, ++ &defrag_end)) { ++ unsigned long next; ++ /* ++ * the should_defrag function tells us how much to skip ++ * bump our counter by the suggested amount ++ */ ++ next = (skip + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; ++ i = max(i + 1, next); ++ continue; ++ } + +- mutex_lock(&inode->i_mutex); +- last_index = inode->i_size >> PAGE_CACHE_SHIFT; +- for (i = 0; i <= last_index; i++) { + if (total_read % ra_pages == 0) { + btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i, + min(last_index, i + ra_pages - 1)); + } + total_read++; ++ mutex_lock(&inode->i_mutex); ++ if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) ++ BTRFS_I(inode)->force_compress = 1; ++ ++ ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE); ++ if (ret) { ++ ret = -ENOSPC; ++ break; ++ } ++ ++ ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1); ++ if (ret) { ++ btrfs_free_reserved_data_space(root, inode, ++ PAGE_CACHE_SIZE); ++ ret = -ENOSPC; ++ break; ++ } + again: ++ if (inode->i_size == 0 || ++ i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) { ++ ret = 0; ++ goto err_reservations; ++ } ++ + page = grab_cache_page(inode->i_mapping, i); + if (!page) +- goto out_unlock; ++ goto err_reservations; ++ + if (!PageUptodate(page)) { + btrfs_readpage(NULL, page); + lock_page(page); + if (!PageUptodate(page)) { + unlock_page(page); + page_cache_release(page); +- goto out_unlock; ++ goto err_reservations; + } + } + ++ if (page->mapping != inode->i_mapping) { ++ unlock_page(page); ++ page_cache_release(page); ++ goto again; ++ } ++ + wait_on_page_writeback(page); + ++ if (PageDirty(page)) { ++ btrfs_free_reserved_data_space(root, inode, ++ PAGE_CACHE_SIZE); ++ goto loop_unlock; ++ } ++ + page_start = (u64)page->index << PAGE_CACHE_SHIFT; + page_end = page_start + PAGE_CACHE_SIZE - 1; + lock_extent(io_tree, page_start, page_end, GFP_NOFS); +@@ -537,18 +669,54 @@ again: + * page if it is dirtied again later + */ + clear_page_dirty_for_io(page); ++ clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, ++ page_end, EXTENT_DIRTY | EXTENT_DELALLOC | ++ EXTENT_DO_ACCOUNTING, GFP_NOFS); + +- btrfs_set_extent_delalloc(inode, page_start, page_end); ++ btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); ++ ClearPageChecked(page); + set_page_dirty(page); + unlock_extent(io_tree, page_start, page_end, GFP_NOFS); ++ ++loop_unlock: + unlock_page(page); + page_cache_release(page); ++ mutex_unlock(&inode->i_mutex); ++ ++ btrfs_unreserve_metadata_for_delalloc(root, inode, 1); + balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); ++ i++; ++ } ++ ++ if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) ++ filemap_flush(inode->i_mapping); ++ ++ if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { ++ /* the filemap_flush will queue IO into the worker threads, but ++ * we have to make sure the IO is actually started and that ++ * ordered extents get created before we return ++ */ ++ atomic_inc(&root->fs_info->async_submit_draining); ++ while (atomic_read(&root->fs_info->nr_async_submits) || ++ atomic_read(&root->fs_info->async_delalloc_pages)) { ++ wait_event(root->fs_info->async_submit_wait, ++ (atomic_read(&root->fs_info->nr_async_submits) == 0 && ++ atomic_read(&root->fs_info->async_delalloc_pages) == 0)); ++ } ++ atomic_dec(&root->fs_info->async_submit_draining); ++ ++ mutex_lock(&inode->i_mutex); ++ BTRFS_I(inode)->force_compress = 0; ++ mutex_unlock(&inode->i_mutex); + } + +-out_unlock: +- mutex_unlock(&inode->i_mutex); + return 0; ++ ++err_reservations: ++ mutex_unlock(&inode->i_mutex); ++ btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); ++ btrfs_unreserve_metadata_for_delalloc(root, inode, 1); ++ return ret; + } + + static noinline int btrfs_ioctl_resize(struct btrfs_root *root, +@@ -608,7 +776,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, + mod = 1; + sizestr++; + } +- new_size = btrfs_parse_size(sizestr); ++ new_size = memparse(sizestr, NULL); + if (new_size == 0) { + ret = -EINVAL; + goto out_unlock; +@@ -743,6 +911,327 @@ out: + return ret; + } + ++static noinline int key_in_sk(struct btrfs_key *key, ++ struct btrfs_ioctl_search_key *sk) ++{ ++ struct btrfs_key test; ++ int ret; ++ ++ test.objectid = sk->min_objectid; ++ test.type = sk->min_type; ++ test.offset = sk->min_offset; ++ ++ ret = btrfs_comp_cpu_keys(key, &test); ++ if (ret < 0) ++ return 0; ++ ++ test.objectid = sk->max_objectid; ++ test.type = sk->max_type; ++ test.offset = sk->max_offset; ++ ++ ret = btrfs_comp_cpu_keys(key, &test); ++ if (ret > 0) ++ return 0; ++ return 1; ++} ++ ++static noinline int copy_to_sk(struct btrfs_root *root, ++ struct btrfs_path *path, ++ struct btrfs_key *key, ++ struct btrfs_ioctl_search_key *sk, ++ char *buf, ++ unsigned long *sk_offset, ++ int *num_found) ++{ ++ u64 found_transid; ++ struct extent_buffer *leaf; ++ struct btrfs_ioctl_search_header sh; ++ unsigned long item_off; ++ unsigned long item_len; ++ int nritems; ++ int i; ++ int slot; ++ int found = 0; ++ int ret = 0; ++ ++ leaf = path->nodes[0]; ++ slot = path->slots[0]; ++ nritems = btrfs_header_nritems(leaf); ++ ++ if (btrfs_header_generation(leaf) > sk->max_transid) { ++ i = nritems; ++ goto advance_key; ++ } ++ found_transid = btrfs_header_generation(leaf); ++ ++ for (i = slot; i < nritems; i++) { ++ item_off = btrfs_item_ptr_offset(leaf, i); ++ item_len = btrfs_item_size_nr(leaf, i); ++ ++ if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE) ++ item_len = 0; ++ ++ if (sizeof(sh) + item_len + *sk_offset > ++ BTRFS_SEARCH_ARGS_BUFSIZE) { ++ ret = 1; ++ goto overflow; ++ } ++ ++ btrfs_item_key_to_cpu(leaf, key, i); ++ if (!key_in_sk(key, sk)) ++ continue; ++ ++ sh.objectid = key->objectid; ++ sh.offset = key->offset; ++ sh.type = key->type; ++ sh.len = item_len; ++ sh.transid = found_transid; ++ ++ /* copy search result header */ ++ memcpy(buf + *sk_offset, &sh, sizeof(sh)); ++ *sk_offset += sizeof(sh); ++ ++ if (item_len) { ++ char *p = buf + *sk_offset; ++ /* copy the item */ ++ read_extent_buffer(leaf, p, ++ item_off, item_len); ++ *sk_offset += item_len; ++ } ++ found++; ++ ++ if (*num_found >= sk->nr_items) ++ break; ++ } ++advance_key: ++ ret = 0; ++ if (key->offset < (u64)-1 && key->offset < sk->max_offset) ++ key->offset++; ++ else if (key->type < (u8)-1 && key->type < sk->max_type) { ++ key->offset = 0; ++ key->type++; ++ } else if (key->objectid < (u64)-1 && key->objectid < sk->max_objectid) { ++ key->offset = 0; ++ key->type = 0; ++ key->objectid++; ++ } else ++ ret = 1; ++overflow: ++ *num_found += found; ++ return ret; ++} ++ ++static noinline int search_ioctl(struct inode *inode, ++ struct btrfs_ioctl_search_args *args) ++{ ++ struct btrfs_root *root; ++ struct btrfs_key key; ++ struct btrfs_key max_key; ++ struct btrfs_path *path; ++ struct btrfs_ioctl_search_key *sk = &args->key; ++ struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info; ++ int ret; ++ int num_found = 0; ++ unsigned long sk_offset = 0; ++ ++ path = btrfs_alloc_path(); ++ if (!path) ++ return -ENOMEM; ++ ++ if (sk->tree_id == 0) { ++ /* search the root of the inode that was passed */ ++ root = BTRFS_I(inode)->root; ++ } else { ++ key.objectid = sk->tree_id; ++ key.type = BTRFS_ROOT_ITEM_KEY; ++ key.offset = (u64)-1; ++ root = btrfs_read_fs_root_no_name(info, &key); ++ if (IS_ERR(root)) { ++ printk(KERN_ERR "could not find root %llu\n", ++ sk->tree_id); ++ btrfs_free_path(path); ++ return -ENOENT; ++ } ++ } ++ ++ key.objectid = sk->min_objectid; ++ key.type = sk->min_type; ++ key.offset = sk->min_offset; ++ ++ max_key.objectid = sk->max_objectid; ++ max_key.type = sk->max_type; ++ max_key.offset = sk->max_offset; ++ ++ path->keep_locks = 1; ++ ++ while(1) { ++ ret = btrfs_search_forward(root, &key, &max_key, path, 0, ++ sk->min_transid); ++ if (ret != 0) { ++ if (ret > 0) ++ ret = 0; ++ goto err; ++ } ++ ret = copy_to_sk(root, path, &key, sk, args->buf, ++ &sk_offset, &num_found); ++ btrfs_release_path(root, path); ++ if (ret || num_found >= sk->nr_items) ++ break; ++ ++ } ++ ret = 0; ++err: ++ sk->nr_items = num_found; ++ btrfs_free_path(path); ++ return ret; ++} ++ ++static noinline int btrfs_ioctl_tree_search(struct file *file, ++ void __user *argp) ++{ ++ struct btrfs_ioctl_search_args *args; ++ struct inode *inode; ++ int ret; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ args = kmalloc(sizeof(*args), GFP_KERNEL); ++ if (!args) ++ return -ENOMEM; ++ ++ if (copy_from_user(args, argp, sizeof(*args))) { ++ kfree(args); ++ return -EFAULT; ++ } ++ inode = fdentry(file)->d_inode; ++ ret = search_ioctl(inode, args); ++ if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) ++ ret = -EFAULT; ++ kfree(args); ++ return ret; ++} ++ ++/* ++ * Search INODE_REFs to identify path name of 'dirid' directory ++ * in a 'tree_id' tree. and sets path name to 'name'. ++ */ ++static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, ++ u64 tree_id, u64 dirid, char *name) ++{ ++ struct btrfs_root *root; ++ struct btrfs_key key; ++ char *ptr; ++ int ret = -1; ++ int slot; ++ int len; ++ int total_len = 0; ++ struct btrfs_inode_ref *iref; ++ struct extent_buffer *l; ++ struct btrfs_path *path; ++ ++ if (dirid == BTRFS_FIRST_FREE_OBJECTID) { ++ name[0]='\0'; ++ return 0; ++ } ++ ++ path = btrfs_alloc_path(); ++ if (!path) ++ return -ENOMEM; ++ ++ ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX]; ++ ++ key.objectid = tree_id; ++ key.type = BTRFS_ROOT_ITEM_KEY; ++ key.offset = (u64)-1; ++ root = btrfs_read_fs_root_no_name(info, &key); ++ if (IS_ERR(root)) { ++ printk(KERN_ERR "could not find root %llu\n", tree_id); ++ ret = -ENOENT; ++ goto out; ++ } ++ ++ key.objectid = dirid; ++ key.type = BTRFS_INODE_REF_KEY; ++ key.offset = (u64)-1; ++ ++ while(1) { ++ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); ++ if (ret < 0) ++ goto out; ++ ++ l = path->nodes[0]; ++ slot = path->slots[0]; ++ if (ret > 0 && slot > 0) ++ slot--; ++ btrfs_item_key_to_cpu(l, &key, slot); ++ ++ if (ret > 0 && (key.objectid != dirid || ++ key.type != BTRFS_INODE_REF_KEY)) { ++ ret = -ENOENT; ++ goto out; ++ } ++ ++ iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref); ++ len = btrfs_inode_ref_name_len(l, iref); ++ ptr -= len + 1; ++ total_len += len + 1; ++ if (ptr < name) ++ goto out; ++ ++ *(ptr + len) = '/'; ++ read_extent_buffer(l, ptr,(unsigned long)(iref + 1), len); ++ ++ if (key.offset == BTRFS_FIRST_FREE_OBJECTID) ++ break; ++ ++ btrfs_release_path(root, path); ++ key.objectid = key.offset; ++ key.offset = (u64)-1; ++ dirid = key.objectid; ++ ++ } ++ if (ptr < name) ++ goto out; ++ memcpy(name, ptr, total_len); ++ name[total_len]='\0'; ++ ret = 0; ++out: ++ btrfs_free_path(path); ++ return ret; ++} ++ ++static noinline int btrfs_ioctl_ino_lookup(struct file *file, ++ void __user *argp) ++{ ++ struct btrfs_ioctl_ino_lookup_args *args; ++ struct inode *inode; ++ int ret; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ args = kmalloc(sizeof(*args), GFP_KERNEL); ++ if (copy_from_user(args, argp, sizeof(*args))) { ++ kfree(args); ++ return -EFAULT; ++ } ++ inode = fdentry(file)->d_inode; ++ ++ if (args->treeid == 0) ++ args->treeid = BTRFS_I(inode)->root->root_key.objectid; ++ ++ ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info, ++ args->treeid, args->objectid, ++ args->name); ++ ++ if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) ++ ret = -EFAULT; ++ ++ kfree(args); ++ return ret; ++} ++ + static noinline int btrfs_ioctl_snap_destroy(struct file *file, + void __user *arg) + { +@@ -849,10 +1338,11 @@ out: + return err; + } + +-static int btrfs_ioctl_defrag(struct file *file) ++static int btrfs_ioctl_defrag(struct file *file, void __user *argp) + { + struct inode *inode = fdentry(file)->d_inode; + struct btrfs_root *root = BTRFS_I(inode)->root; ++ struct btrfs_ioctl_defrag_range_args *range; + int ret; + + ret = mnt_want_write(file->f_path.mnt); +@@ -873,7 +1363,30 @@ static int btrfs_ioctl_defrag(struct file *file) + ret = -EINVAL; + goto out; + } +- btrfs_defrag_file(file); ++ ++ range = kzalloc(sizeof(*range), GFP_KERNEL); ++ if (!range) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ if (argp) { ++ if (copy_from_user(range, argp, ++ sizeof(*range))) { ++ ret = -EFAULT; ++ kfree(range); ++ } ++ /* compression requires us to start the IO */ ++ if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { ++ range->flags |= BTRFS_DEFRAG_RANGE_START_IO; ++ range->extent_thresh = (u32)-1; ++ } ++ } else { ++ /* the rest are all set to zero by kzalloc */ ++ range->len = (u64)-1; ++ } ++ btrfs_defrag_file(file, range); ++ kfree(range); + break; + } + out: +@@ -1274,6 +1787,157 @@ out: + return ret; + } + ++static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) ++{ ++ struct inode *inode = fdentry(file)->d_inode; ++ struct btrfs_root *root = BTRFS_I(inode)->root; ++ struct btrfs_root *new_root; ++ struct btrfs_dir_item *di; ++ struct btrfs_trans_handle *trans; ++ struct btrfs_path *path; ++ struct btrfs_key location; ++ struct btrfs_disk_key disk_key; ++ struct btrfs_super_block *disk_super; ++ u64 features; ++ u64 objectid = 0; ++ u64 dir_id; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ if (copy_from_user(&objectid, argp, sizeof(objectid))) ++ return -EFAULT; ++ ++ if (!objectid) ++ objectid = root->root_key.objectid; ++ ++ location.objectid = objectid; ++ location.type = BTRFS_ROOT_ITEM_KEY; ++ location.offset = (u64)-1; ++ ++ new_root = btrfs_read_fs_root_no_name(root->fs_info, &location); ++ if (IS_ERR(new_root)) ++ return PTR_ERR(new_root); ++ ++ if (btrfs_root_refs(&new_root->root_item) == 0) ++ return -ENOENT; ++ ++ path = btrfs_alloc_path(); ++ if (!path) ++ return -ENOMEM; ++ path->leave_spinning = 1; ++ ++ trans = btrfs_start_transaction(root, 1); ++ if (!trans) { ++ btrfs_free_path(path); ++ return -ENOMEM; ++ } ++ ++ dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); ++ di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path, ++ dir_id, "default", 7, 1); ++ if (!di) { ++ btrfs_free_path(path); ++ btrfs_end_transaction(trans, root); ++ printk(KERN_ERR "Umm, you don't have the default dir item, " ++ "this isn't going to work\n"); ++ return -ENOENT; ++ } ++ ++ btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key); ++ btrfs_set_dir_item_key(path->nodes[0], di, &disk_key); ++ btrfs_mark_buffer_dirty(path->nodes[0]); ++ btrfs_free_path(path); ++ ++ disk_super = &root->fs_info->super_copy; ++ features = btrfs_super_incompat_flags(disk_super); ++ if (!(features & BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL)) { ++ features |= BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL; ++ btrfs_set_super_incompat_flags(disk_super, features); ++ } ++ btrfs_end_transaction(trans, root); ++ ++ return 0; ++} ++ ++long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) ++{ ++ struct btrfs_ioctl_space_args space_args; ++ struct btrfs_ioctl_space_info space; ++ struct btrfs_ioctl_space_info *dest; ++ struct btrfs_ioctl_space_info *dest_orig; ++ struct btrfs_ioctl_space_info *user_dest; ++ struct btrfs_space_info *info; ++ int alloc_size; ++ int ret = 0; ++ int slot_count = 0; ++ ++ if (copy_from_user(&space_args, ++ (struct btrfs_ioctl_space_args __user *)arg, ++ sizeof(space_args))) ++ return -EFAULT; ++ ++ /* first we count slots */ ++ rcu_read_lock(); ++ list_for_each_entry_rcu(info, &root->fs_info->space_info, list) ++ slot_count++; ++ rcu_read_unlock(); ++ ++ /* space_slots == 0 means they are asking for a count */ ++ if (space_args.space_slots == 0) { ++ space_args.total_spaces = slot_count; ++ goto out; ++ } ++ alloc_size = sizeof(*dest) * slot_count; ++ /* we generally have at most 6 or so space infos, one for each raid ++ * level. So, a whole page should be more than enough for everyone ++ */ ++ if (alloc_size > PAGE_CACHE_SIZE) ++ return -ENOMEM; ++ ++ space_args.total_spaces = 0; ++ dest = kmalloc(alloc_size, GFP_NOFS); ++ if (!dest) ++ return -ENOMEM; ++ dest_orig = dest; ++ ++ /* now we have a buffer to copy into */ ++ rcu_read_lock(); ++ list_for_each_entry_rcu(info, &root->fs_info->space_info, list) { ++ /* make sure we don't copy more than we allocated ++ * in our buffer ++ */ ++ if (slot_count == 0) ++ break; ++ slot_count--; ++ ++ /* make sure userland has enough room in their buffer */ ++ if (space_args.total_spaces >= space_args.space_slots) ++ break; ++ ++ space.flags = info->flags; ++ space.total_bytes = info->total_bytes; ++ space.used_bytes = info->bytes_used; ++ memcpy(dest, &space, sizeof(space)); ++ dest++; ++ space_args.total_spaces++; ++ } ++ rcu_read_unlock(); ++ ++ user_dest = (struct btrfs_ioctl_space_info *) ++ (arg + sizeof(struct btrfs_ioctl_space_args)); ++ ++ if (copy_to_user(user_dest, dest_orig, alloc_size)) ++ ret = -EFAULT; ++ ++ kfree(dest_orig); ++out: ++ if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args))) ++ ret = -EFAULT; ++ ++ return ret; ++} ++ + /* + * there are many ways the trans_start and trans_end ioctls can lead + * to deadlocks. They should only be used by applications that +@@ -1320,8 +1984,12 @@ long btrfs_ioctl(struct file *file, unsigned int + return btrfs_ioctl_snap_create(file, argp, 1); + case BTRFS_IOC_SNAP_DESTROY: + return btrfs_ioctl_snap_destroy(file, argp); ++ case BTRFS_IOC_DEFAULT_SUBVOL: ++ return btrfs_ioctl_default_subvol(file, argp); + case BTRFS_IOC_DEFRAG: +- return btrfs_ioctl_defrag(file); ++ return btrfs_ioctl_defrag(file, NULL); ++ case BTRFS_IOC_DEFRAG_RANGE: ++ return btrfs_ioctl_defrag(file, argp); + case BTRFS_IOC_RESIZE: + return btrfs_ioctl_resize(root, argp); + case BTRFS_IOC_ADD_DEV: +@@ -1338,6 +2006,12 @@ long btrfs_ioctl(struct file *file, unsigned int + return btrfs_ioctl_trans_start(file); + case BTRFS_IOC_TRANS_END: + return btrfs_ioctl_trans_end(file); ++ case BTRFS_IOC_TREE_SEARCH: ++ return btrfs_ioctl_tree_search(file, argp); ++ case BTRFS_IOC_INO_LOOKUP: ++ return btrfs_ioctl_ino_lookup(file, argp); ++ case BTRFS_IOC_SPACE_INFO: ++ return btrfs_ioctl_space_info(root, argp); + case BTRFS_IOC_SYNC: + btrfs_sync_fs(file->f_dentry->d_sb, 1); + return 0; +diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h +index bc49914..424694a 100644 +--- a/fs/btrfs/ioctl.h ++++ b/fs/btrfs/ioctl.h +@@ -30,12 +30,114 @@ struct btrfs_ioctl_vol_args { + char name[BTRFS_PATH_NAME_MAX + 1]; + }; + ++#define BTRFS_INO_LOOKUP_PATH_MAX 4080 ++struct btrfs_ioctl_ino_lookup_args { ++ __u64 treeid; ++ __u64 objectid; ++ char name[BTRFS_INO_LOOKUP_PATH_MAX]; ++}; ++ ++struct btrfs_ioctl_search_key { ++ /* which root are we searching. 0 is the tree of tree roots */ ++ __u64 tree_id; ++ ++ /* keys returned will be >= min and <= max */ ++ __u64 min_objectid; ++ __u64 max_objectid; ++ ++ /* keys returned will be >= min and <= max */ ++ __u64 min_offset; ++ __u64 max_offset; ++ ++ /* max and min transids to search for */ ++ __u64 min_transid; ++ __u64 max_transid; ++ ++ /* keys returned will be >= min and <= max */ ++ __u32 min_type; ++ __u32 max_type; ++ ++ /* ++ * how many items did userland ask for, and how many are we ++ * returning ++ */ ++ __u32 nr_items; ++ ++ /* align to 64 bits */ ++ __u32 unused; ++ ++ /* some extra for later */ ++ __u64 unused1; ++ __u64 unused2; ++ __u64 unused3; ++ __u64 unused4; ++}; ++ ++struct btrfs_ioctl_search_header { ++ __u64 transid; ++ __u64 objectid; ++ __u64 offset; ++ __u32 type; ++ __u32 len; ++}; ++ ++#define BTRFS_SEARCH_ARGS_BUFSIZE (4096 - sizeof(struct btrfs_ioctl_search_key)) ++/* ++ * the buf is an array of search headers where ++ * each header is followed by the actual item ++ * the type field is expanded to 32 bits for alignment ++ */ ++struct btrfs_ioctl_search_args { ++ struct btrfs_ioctl_search_key key; ++ char buf[BTRFS_SEARCH_ARGS_BUFSIZE]; ++}; ++ + struct btrfs_ioctl_clone_range_args { + __s64 src_fd; + __u64 src_offset, src_length; + __u64 dest_offset; + }; + ++/* flags for the defrag range ioctl */ ++#define BTRFS_DEFRAG_RANGE_COMPRESS 1 ++#define BTRFS_DEFRAG_RANGE_START_IO 2 ++ ++struct btrfs_ioctl_defrag_range_args { ++ /* start of the defrag operation */ ++ __u64 start; ++ ++ /* number of bytes to defrag, use (u64)-1 to say all */ ++ __u64 len; ++ ++ /* ++ * flags for the operation, which can include turning ++ * on compression for this one defrag ++ */ ++ __u64 flags; ++ ++ /* ++ * any extent bigger than this will be considered ++ * already defragged. Use 0 to take the kernel default ++ * Use 1 to say every single extent must be rewritten ++ */ ++ __u32 extent_thresh; ++ ++ /* spare for later */ ++ __u32 unused[5]; ++}; ++ ++struct btrfs_ioctl_space_info { ++ __u64 flags; ++ __u64 total_bytes; ++ __u64 used_bytes; ++}; ++ ++struct btrfs_ioctl_space_args { ++ __u64 space_slots; ++ __u64 total_spaces; ++ struct btrfs_ioctl_space_info spaces[0]; ++}; ++ + #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ + struct btrfs_ioctl_vol_args) + #define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \ +@@ -67,4 +169,13 @@ struct btrfs_ioctl_clone_range_args { + struct btrfs_ioctl_vol_args) + #define BTRFS_IOC_SNAP_DESTROY _IOW(BTRFS_IOCTL_MAGIC, 15, \ + struct btrfs_ioctl_vol_args) ++#define BTRFS_IOC_DEFRAG_RANGE _IOW(BTRFS_IOCTL_MAGIC, 16, \ ++ struct btrfs_ioctl_defrag_range_args) ++#define BTRFS_IOC_TREE_SEARCH _IOWR(BTRFS_IOCTL_MAGIC, 17, \ ++ struct btrfs_ioctl_search_args) ++#define BTRFS_IOC_INO_LOOKUP _IOWR(BTRFS_IOCTL_MAGIC, 18, \ ++ struct btrfs_ioctl_ino_lookup_args) ++#define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, u64) ++#define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \ ++ struct btrfs_ioctl_space_args) + #endif +diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c +index 5c2a9e7..a8ffecd 100644 +--- a/fs/btrfs/ordered-data.c ++++ b/fs/btrfs/ordered-data.c +@@ -174,7 +174,6 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, + if (!entry) + return -ENOMEM; + +- mutex_lock(&tree->mutex); + entry->file_offset = file_offset; + entry->start = start; + entry->len = len; +@@ -190,16 +189,17 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, + INIT_LIST_HEAD(&entry->list); + INIT_LIST_HEAD(&entry->root_extent_list); + ++ spin_lock(&tree->lock); + node = tree_insert(&tree->tree, file_offset, + &entry->rb_node); + BUG_ON(node); ++ spin_unlock(&tree->lock); + + spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); + list_add_tail(&entry->root_extent_list, + &BTRFS_I(inode)->root->fs_info->ordered_extents); + spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); + +- mutex_unlock(&tree->mutex); + BUG_ON(node); + return 0; + } +@@ -216,9 +216,9 @@ int btrfs_add_ordered_sum(struct inode *inode, + struct btrfs_ordered_inode_tree *tree; + + tree = &BTRFS_I(inode)->ordered_tree; +- mutex_lock(&tree->mutex); ++ spin_lock(&tree->lock); + list_add_tail(&sum->list, &entry->list); +- mutex_unlock(&tree->mutex); ++ spin_unlock(&tree->lock); + return 0; + } + +@@ -232,15 +232,16 @@ int btrfs_add_ordered_sum(struct inode *inode, + * to make sure this function only returns 1 once for a given ordered extent. + */ + int btrfs_dec_test_ordered_pending(struct inode *inode, ++ struct btrfs_ordered_extent **cached, + u64 file_offset, u64 io_size) + { + struct btrfs_ordered_inode_tree *tree; + struct rb_node *node; +- struct btrfs_ordered_extent *entry; ++ struct btrfs_ordered_extent *entry = NULL; + int ret; + + tree = &BTRFS_I(inode)->ordered_tree; +- mutex_lock(&tree->mutex); ++ spin_lock(&tree->lock); + node = tree_search(tree, file_offset); + if (!node) { + ret = 1; +@@ -264,7 +265,11 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, + else + ret = 1; + out: +- mutex_unlock(&tree->mutex); ++ if (!ret && cached && entry) { ++ *cached = entry; ++ atomic_inc(&entry->refs); ++ } ++ spin_unlock(&tree->lock); + return ret == 0; + } + +@@ -291,7 +296,7 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) + + /* + * remove an ordered extent from the tree. No references are dropped +- * and you must wake_up entry->wait. You must hold the tree mutex ++ * and you must wake_up entry->wait. You must hold the tree lock + * while you call this function. + */ + static int __btrfs_remove_ordered_extent(struct inode *inode, +@@ -340,9 +345,9 @@ int btrfs_remove_ordered_extent(struct inode *inode, + int ret; + + tree = &BTRFS_I(inode)->ordered_tree; +- mutex_lock(&tree->mutex); ++ spin_lock(&tree->lock); + ret = __btrfs_remove_ordered_extent(inode, entry); +- mutex_unlock(&tree->mutex); ++ spin_unlock(&tree->lock); + wake_up(&entry->wait); + + return ret; +@@ -567,7 +572,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, + struct btrfs_ordered_extent *entry = NULL; + + tree = &BTRFS_I(inode)->ordered_tree; +- mutex_lock(&tree->mutex); ++ spin_lock(&tree->lock); + node = tree_search(tree, file_offset); + if (!node) + goto out; +@@ -578,7 +583,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, + if (entry) + atomic_inc(&entry->refs); + out: +- mutex_unlock(&tree->mutex); ++ spin_unlock(&tree->lock); + return entry; + } + +@@ -594,7 +599,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) + struct btrfs_ordered_extent *entry = NULL; + + tree = &BTRFS_I(inode)->ordered_tree; +- mutex_lock(&tree->mutex); ++ spin_lock(&tree->lock); + node = tree_search(tree, file_offset); + if (!node) + goto out; +@@ -602,7 +607,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + atomic_inc(&entry->refs); + out: +- mutex_unlock(&tree->mutex); ++ spin_unlock(&tree->lock); + return entry; + } + +@@ -629,7 +634,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, + else + offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); + +- mutex_lock(&tree->mutex); ++ spin_lock(&tree->lock); + disk_i_size = BTRFS_I(inode)->disk_i_size; + + /* truncate file */ +@@ -735,7 +740,7 @@ out: + */ + if (ordered) + __btrfs_remove_ordered_extent(inode, ordered); +- mutex_unlock(&tree->mutex); ++ spin_unlock(&tree->lock); + if (ordered) + wake_up(&ordered->wait); + return ret; +@@ -762,7 +767,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, + if (!ordered) + return 1; + +- mutex_lock(&tree->mutex); ++ spin_lock(&tree->lock); + list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { + if (disk_bytenr >= ordered_sum->bytenr) { + num_sectors = ordered_sum->len / sectorsize; +@@ -777,7 +782,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, + } + } + out: +- mutex_unlock(&tree->mutex); ++ spin_unlock(&tree->lock); + btrfs_put_ordered_extent(ordered); + return ret; + } +diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h +index 1fe1282..c82f76a 100644 +--- a/fs/btrfs/ordered-data.h ++++ b/fs/btrfs/ordered-data.h +@@ -21,7 +21,7 @@ + + /* one of these per inode */ + struct btrfs_ordered_inode_tree { +- struct mutex mutex; ++ spinlock_t lock; + struct rb_root tree; + struct rb_node *last; + }; +@@ -128,8 +128,8 @@ static inline int btrfs_ordered_sum_size(struct btrfs_root *root, + static inline void + btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t) + { +- mutex_init(&t->mutex); +- t->tree.rb_node = NULL; ++ spin_lock_init(&t->lock); ++ t->tree = RB_ROOT; + t->last = NULL; + } + +@@ -137,7 +137,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry); + int btrfs_remove_ordered_extent(struct inode *inode, + struct btrfs_ordered_extent *entry); + int btrfs_dec_test_ordered_pending(struct inode *inode, +- u64 file_offset, u64 io_size); ++ struct btrfs_ordered_extent **cached, ++ u64 file_offset, u64 io_size); + int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, + u64 start, u64 len, u64 disk_len, int tyep); + int btrfs_add_ordered_sum(struct inode *inode, +diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h +index bc283ad..e2a55cb 100644 +--- a/fs/btrfs/ref-cache.h ++++ b/fs/btrfs/ref-cache.h +@@ -52,7 +52,7 @@ static inline size_t btrfs_leaf_ref_size(int nr_extents) + + static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree) + { +- tree->root.rb_node = NULL; ++ tree->root = RB_ROOT; + INIT_LIST_HEAD(&tree->list); + spin_lock_init(&tree->lock); + } +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c +index ab7ab53..0b23942 100644 +--- a/fs/btrfs/relocation.c ++++ b/fs/btrfs/relocation.c +@@ -170,14 +170,14 @@ struct async_merge { + + static void mapping_tree_init(struct mapping_tree *tree) + { +- tree->rb_root.rb_node = NULL; ++ tree->rb_root = RB_ROOT; + spin_lock_init(&tree->lock); + } + + static void backref_cache_init(struct backref_cache *cache) + { + int i; +- cache->rb_root.rb_node = NULL; ++ cache->rb_root = RB_ROOT; + for (i = 0; i < BTRFS_MAX_LEVEL; i++) + INIT_LIST_HEAD(&cache->pending[i]); + spin_lock_init(&cache->lock); +@@ -2659,7 +2659,7 @@ static int relocate_file_extent_cluster(struct inode *inode, + EXTENT_BOUNDARY, GFP_NOFS); + nr++; + } +- btrfs_set_extent_delalloc(inode, page_start, page_end); ++ btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); + + set_page_dirty(page); + dirty_page++; +@@ -3487,7 +3487,7 @@ static struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, + key.objectid = objectid; + key.type = BTRFS_INODE_ITEM_KEY; + key.offset = 0; +- inode = btrfs_iget(root->fs_info->sb, &key, root); ++ inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); + BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); + BTRFS_I(inode)->index_cnt = group->key.objectid; + +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c +index 8a1ea6e..9ac612e 100644 +--- a/fs/btrfs/super.c ++++ b/fs/btrfs/super.c +@@ -63,10 +63,10 @@ static void btrfs_put_super(struct super_block *sb) + } + + enum { +- Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow, +- Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, +- Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, +- Opt_compress, Opt_compress_force, Opt_notreelog, Opt_ratio, ++ Opt_degraded, Opt_subvol, Opt_subvolid, Opt_device, Opt_nodatasum, ++ Opt_nodatacow, Opt_max_extent, Opt_max_inline, Opt_alloc_start, ++ Opt_nobarrier, Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, ++ Opt_noacl, Opt_compress, Opt_compress_force, Opt_notreelog, Opt_ratio, + Opt_flushoncommit, + Opt_discard, Opt_err, + }; +@@ -74,6 +74,7 @@ enum { + static match_table_t tokens = { + {Opt_degraded, "degraded"}, + {Opt_subvol, "subvol=%s"}, ++ {Opt_subvolid, "subvolid=%d"}, + {Opt_device, "device=%s"}, + {Opt_nodatasum, "nodatasum"}, + {Opt_nodatacow, "nodatacow"}, +@@ -95,31 +96,6 @@ static match_table_t tokens = { + {Opt_err, NULL}, + }; + +-u64 btrfs_parse_size(char *str) +-{ +- u64 res; +- int mult = 1; +- char *end; +- char last; +- +- res = simple_strtoul(str, &end, 10); +- +- last = end[0]; +- if (isalpha(last)) { +- last = tolower(last); +- switch (last) { +- case 'g': +- mult *= 1024; +- case 'm': +- mult *= 1024; +- case 'k': +- mult *= 1024; +- } +- res = res * mult; +- } +- return res; +-} +- + /* + * Regular mount options parser. Everything that is needed only when + * reading in a new superblock is parsed here. +@@ -128,7 +104,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) + { + struct btrfs_fs_info *info = root->fs_info; + substring_t args[MAX_OPT_ARGS]; +- char *p, *num; ++ char *p, *num, *orig; + int intarg; + int ret = 0; + +@@ -143,6 +119,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) + if (!options) + return -ENOMEM; + ++ orig = options; + + while ((p = strsep(&options, ",")) != NULL) { + int token; +@@ -156,6 +133,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) + btrfs_set_opt(info->mount_opt, DEGRADED); + break; + case Opt_subvol: ++ case Opt_subvolid: + case Opt_device: + /* + * These are parsed by btrfs_parse_early_options +@@ -213,7 +191,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) + case Opt_max_extent: + num = match_strdup(&args[0]); + if (num) { +- info->max_extent = btrfs_parse_size(num); ++ info->max_extent = memparse(num, NULL); + kfree(num); + + info->max_extent = max_t(u64, +@@ -225,7 +203,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) + case Opt_max_inline: + num = match_strdup(&args[0]); + if (num) { +- info->max_inline = btrfs_parse_size(num); ++ info->max_inline = memparse(num, NULL); + kfree(num); + + if (info->max_inline) { +@@ -240,7 +218,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) + case Opt_alloc_start: + num = match_strdup(&args[0]); + if (num) { +- info->alloc_start = btrfs_parse_size(num); ++ info->alloc_start = memparse(num, NULL); + kfree(num); + printk(KERN_INFO + "btrfs: allocations start at %llu\n", +@@ -280,7 +258,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) + } + } + out: +- kfree(options); ++ kfree(orig); + return ret; + } + +@@ -291,12 +269,13 @@ out: + * only when we need to allocate a new super block. + */ + static int btrfs_parse_early_options(const char *options, fmode_t flags, +- void *holder, char **subvol_name, ++ void *holder, char **subvol_name, u64 *subvol_objectid, + struct btrfs_fs_devices **fs_devices) + { + substring_t args[MAX_OPT_ARGS]; + char *opts, *p; + int error = 0; ++ int intarg; + + if (!options) + goto out; +@@ -319,6 +298,18 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, + case Opt_subvol: + *subvol_name = match_strdup(&args[0]); + break; ++ case Opt_subvolid: ++ intarg = 0; ++ error = match_int(&args[0], &intarg); ++ if (!error) { ++ /* we want the original fs_tree */ ++ if (!intarg) ++ *subvol_objectid = ++ BTRFS_FS_TREE_OBJECTID; ++ else ++ *subvol_objectid = intarg; ++ } ++ break; + case Opt_device: + error = btrfs_scan_one_device(match_strdup(&args[0]), + flags, holder, fs_devices); +@@ -346,6 +337,110 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, + return error; + } + ++static struct dentry *get_default_root(struct super_block *sb, ++ u64 subvol_objectid) ++{ ++ struct btrfs_root *root = sb->s_fs_info; ++ struct btrfs_root *new_root; ++ struct btrfs_dir_item *di; ++ struct btrfs_path *path; ++ struct btrfs_key location; ++ struct inode *inode; ++ struct dentry *dentry; ++ u64 dir_id; ++ int new = 0; ++ ++ /* ++ * We have a specific subvol we want to mount, just setup location and ++ * go look up the root. ++ */ ++ if (subvol_objectid) { ++ location.objectid = subvol_objectid; ++ location.type = BTRFS_ROOT_ITEM_KEY; ++ location.offset = (u64)-1; ++ goto find_root; ++ } ++ ++ path = btrfs_alloc_path(); ++ if (!path) ++ return ERR_PTR(-ENOMEM); ++ path->leave_spinning = 1; ++ ++ /* ++ * Find the "default" dir item which points to the root item that we ++ * will mount by default if we haven't been given a specific subvolume ++ * to mount. ++ */ ++ dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); ++ di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); ++ if (!di) { ++ /* ++ * Ok the default dir item isn't there. This is weird since ++ * it's always been there, but don't freak out, just try and ++ * mount to root most subvolume. ++ */ ++ btrfs_free_path(path); ++ dir_id = BTRFS_FIRST_FREE_OBJECTID; ++ new_root = root->fs_info->fs_root; ++ goto setup_root; ++ } ++ ++ btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); ++ btrfs_free_path(path); ++ ++find_root: ++ new_root = btrfs_read_fs_root_no_name(root->fs_info, &location); ++ if (IS_ERR(new_root)) ++ return ERR_PTR(PTR_ERR(new_root)); ++ ++ if (btrfs_root_refs(&new_root->root_item) == 0) ++ return ERR_PTR(-ENOENT); ++ ++ dir_id = btrfs_root_dirid(&new_root->root_item); ++setup_root: ++ location.objectid = dir_id; ++ location.type = BTRFS_INODE_ITEM_KEY; ++ location.offset = 0; ++ ++ inode = btrfs_iget(sb, &location, new_root, &new); ++ if (!inode) ++ return ERR_PTR(-ENOMEM); ++ ++ /* ++ * If we're just mounting the root most subvol put the inode and return ++ * a reference to the dentry. We will have already gotten a reference ++ * to the inode in btrfs_fill_super so we're good to go. ++ */ ++ if (!new && sb->s_root->d_inode == inode) { ++ iput(inode); ++ return dget(sb->s_root); ++ } ++ ++ if (new) { ++ const struct qstr name = { .name = "/", .len = 1 }; ++ ++ /* ++ * New inode, we need to make the dentry a sibling of s_root so ++ * everything gets cleaned up properly on unmount. ++ */ ++ dentry = d_alloc(sb->s_root, &name); ++ if (!dentry) { ++ iput(inode); ++ return ERR_PTR(-ENOMEM); ++ } ++ d_splice_alias(inode, dentry); ++ } else { ++ /* ++ * We found the inode in cache, just find a dentry for it and ++ * put the reference to the inode we just got. ++ */ ++ dentry = d_find_alias(inode); ++ iput(inode); ++ } ++ ++ return dentry; ++} ++ + static int btrfs_fill_super(struct super_block *sb, + struct btrfs_fs_devices *fs_devices, + void *data, int silent) +@@ -379,7 +474,7 @@ static int btrfs_fill_super(struct super_block *sb, + key.objectid = BTRFS_FIRST_FREE_OBJECTID; + key.type = BTRFS_INODE_ITEM_KEY; + key.offset = 0; +- inode = btrfs_iget(sb, &key, tree_root->fs_info->fs_root); ++ inode = btrfs_iget(sb, &key, tree_root->fs_info->fs_root, NULL); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto fail_close; +@@ -391,12 +486,6 @@ static int btrfs_fill_super(struct super_block *sb, + err = -ENOMEM; + goto fail_close; + } +-#if 0 +- /* this does the super kobj at the same time */ +- err = btrfs_sysfs_add_super(tree_root->fs_info); +- if (err) +- goto fail_close; +-#endif + + sb->s_root = root_dentry; + +@@ -488,19 +577,22 @@ static int btrfs_test_super(struct super_block *s, void *data) + static int btrfs_get_sb(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, struct vfsmount *mnt) + { +- char *subvol_name = NULL; + struct block_device *bdev = NULL; + struct super_block *s; + struct dentry *root; + struct btrfs_fs_devices *fs_devices = NULL; + fmode_t mode = FMODE_READ; ++ char *subvol_name = NULL; ++ u64 subvol_objectid = 0; + int error = 0; ++ int found = 0; + + if (!(flags & MS_RDONLY)) + mode |= FMODE_WRITE; + + error = btrfs_parse_early_options(data, mode, fs_type, +- &subvol_name, &fs_devices); ++ &subvol_name, &subvol_objectid, ++ &fs_devices); + if (error) + return error; + +@@ -529,6 +621,7 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, + goto error_close_devices; + } + ++ found = 1; + btrfs_close_devices(fs_devices); + } else { + char b[BDEVNAME_SIZE]; +@@ -546,25 +639,35 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, + s->s_flags |= MS_ACTIVE; + } + +- if (!strcmp(subvol_name, ".")) +- root = dget(s->s_root); +- else { +- mutex_lock(&s->s_root->d_inode->i_mutex); +- root = lookup_one_len(subvol_name, s->s_root, ++ root = get_default_root(s, subvol_objectid); ++ if (IS_ERR(root)) { ++ error = PTR_ERR(root); ++ deactivate_locked_super(s); ++ goto error; ++ } ++ /* if they gave us a subvolume name bind mount into that */ ++ if (strcmp(subvol_name, ".")) { ++ struct dentry *new_root; ++ mutex_lock(&root->d_inode->i_mutex); ++ new_root = lookup_one_len(subvol_name, root, + strlen(subvol_name)); +- mutex_unlock(&s->s_root->d_inode->i_mutex); ++ mutex_unlock(&root->d_inode->i_mutex); + +- if (IS_ERR(root)) { ++ if (IS_ERR(new_root)) { + deactivate_locked_super(s); +- error = PTR_ERR(root); +- goto error_free_subvol_name; ++ error = PTR_ERR(new_root); ++ dput(root); ++ goto error_close_devices; + } +- if (!root->d_inode) { ++ if (!new_root->d_inode) { + dput(root); ++ dput(new_root); + deactivate_locked_super(s); + error = -ENXIO; +- goto error_free_subvol_name; ++ goto error_close_devices; + } ++ dput(root); ++ root = new_root; + } + + mnt->mnt_sb = s; +@@ -579,6 +682,7 @@ error_close_devices: + btrfs_close_devices(fs_devices); + error_free_subvol_name: + kfree(subvol_name); ++error: + return error; + } + +@@ -623,14 +727,37 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) + { + struct btrfs_root *root = btrfs_sb(dentry->d_sb); + struct btrfs_super_block *disk_super = &root->fs_info->super_copy; ++ struct list_head *head = &root->fs_info->space_info; ++ struct btrfs_space_info *found; ++ u64 total_used = 0; ++ u64 data_used = 0; + int bits = dentry->d_sb->s_blocksize_bits; + __be32 *fsid = (__be32 *)root->fs_info->fsid; + ++ rcu_read_lock(); ++ list_for_each_entry_rcu(found, head, list) { ++ if (found->flags & (BTRFS_BLOCK_GROUP_DUP| ++ BTRFS_BLOCK_GROUP_RAID10| ++ BTRFS_BLOCK_GROUP_RAID1)) { ++ total_used += found->bytes_used; ++ if (found->flags & BTRFS_BLOCK_GROUP_DATA) ++ data_used += found->bytes_used; ++ else ++ data_used += found->total_bytes; ++ } ++ ++ total_used += found->bytes_used; ++ if (found->flags & BTRFS_BLOCK_GROUP_DATA) ++ data_used += found->bytes_used; ++ else ++ data_used += found->total_bytes; ++ } ++ rcu_read_unlock(); ++ + buf->f_namelen = BTRFS_NAME_LEN; + buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits; +- buf->f_bfree = buf->f_blocks - +- (btrfs_super_bytes_used(disk_super) >> bits); +- buf->f_bavail = buf->f_bfree; ++ buf->f_bfree = buf->f_blocks - (total_used >> bits); ++ buf->f_bavail = buf->f_blocks - (data_used >> bits); + buf->f_bsize = dentry->d_sb->s_blocksize; + buf->f_type = BTRFS_SUPER_MAGIC; + +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index b2acc79..2d654c1 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -69,7 +69,7 @@ static noinline int join_transaction(struct btrfs_root *root) + cur_trans->commit_done = 0; + cur_trans->start_time = get_seconds(); + +- cur_trans->delayed_refs.root.rb_node = NULL; ++ cur_trans->delayed_refs.root = RB_ROOT; + cur_trans->delayed_refs.num_entries = 0; + cur_trans->delayed_refs.num_heads_ready = 0; + cur_trans->delayed_refs.num_heads = 0; +@@ -997,13 +997,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, + + mutex_unlock(&root->fs_info->trans_mutex); + +- if (flush_on_commit) { ++ if (flush_on_commit || snap_pending) { + btrfs_start_delalloc_inodes(root, 1); + ret = btrfs_wait_ordered_extents(root, 0, 1); + BUG_ON(ret); +- } else if (snap_pending) { +- ret = btrfs_wait_ordered_extents(root, 0, 1); +- BUG_ON(ret); + } + + /* +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index 4a9434b..1255fcc 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -445,7 +445,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root, + key.objectid = objectid; + key.type = BTRFS_INODE_ITEM_KEY; + key.offset = 0; +- inode = btrfs_iget(root->fs_info->sb, &key, root); ++ inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); + if (IS_ERR(inode)) { + inode = NULL; + } else if (is_bad_inode(inode)) { +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index 41ecbb2..9df8e3f 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -256,13 +256,13 @@ loop_lock: + wake_up(&fs_info->async_submit_wait); + + BUG_ON(atomic_read(&cur->bi_cnt) == 0); +- submit_bio(cur->bi_rw, cur); +- num_run++; +- batch_run++; + + if (bio_rw_flagged(cur, BIO_RW_SYNCIO)) + num_sync_run++; + ++ submit_bio(cur->bi_rw, cur); ++ num_run++; ++ batch_run++; + if (need_resched()) { + if (num_sync_run) { + blk_run_backing_dev(bdi, NULL); +@@ -325,16 +325,6 @@ loop_lock: + num_sync_run = 0; + blk_run_backing_dev(bdi, NULL); + } +- +- cond_resched(); +- if (again) +- goto loop; +- +- spin_lock(&device->io_lock); +- if (device->pending_bios.head || device->pending_sync_bios.head) +- goto loop_lock; +- spin_unlock(&device->io_lock); +- + /* + * IO has already been through a long path to get here. Checksumming, + * async helper threads, perhaps compression. We've done a pretty +@@ -346,6 +336,16 @@ loop_lock: + * cared about found its way down here. + */ + blk_run_backing_dev(bdi, NULL); ++ ++ cond_resched(); ++ if (again) ++ goto loop; ++ ++ spin_lock(&device->io_lock); ++ if (device->pending_bios.head || device->pending_sync_bios.head) ++ goto loop_lock; ++ spin_unlock(&device->io_lock); ++ + done: + return 0; + } +@@ -365,6 +365,7 @@ static noinline int device_list_add(const char *path, + struct btrfs_device *device; + struct btrfs_fs_devices *fs_devices; + u64 found_transid = btrfs_super_generation(disk_super); ++ char *name; + + fs_devices = find_fsid(disk_super->fsid); + if (!fs_devices) { +@@ -411,6 +412,12 @@ static noinline int device_list_add(const char *path, + + device->fs_devices = fs_devices; + fs_devices->num_devices++; ++ } else if (strcmp(device->name, path)) { ++ name = kstrdup(path, GFP_NOFS); ++ if (!name) ++ return -ENOMEM; ++ kfree(device->name); ++ device->name = name; + } + + if (found_transid > fs_devices->latest_trans) { +@@ -592,7 +599,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, + goto error_close; + + disk_super = (struct btrfs_super_block *)bh->b_data; +- devid = le64_to_cpu(disk_super->dev_item.devid); ++ devid = btrfs_stack_device_id(&disk_super->dev_item); + if (devid != device->devid) + goto error_brelse; + +@@ -694,7 +701,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, + goto error_close; + } + disk_super = (struct btrfs_super_block *)bh->b_data; +- devid = le64_to_cpu(disk_super->dev_item.devid); ++ devid = btrfs_stack_device_id(&disk_super->dev_item); + transid = btrfs_super_generation(disk_super); + if (disk_super->label[0]) + printk(KERN_INFO "device label %s ", disk_super->label); +@@ -1187,7 +1194,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) + goto error_close; + } + disk_super = (struct btrfs_super_block *)bh->b_data; +- devid = le64_to_cpu(disk_super->dev_item.devid); ++ devid = btrfs_stack_device_id(&disk_super->dev_item); + dev_uuid = disk_super->dev_item.uuid; + device = btrfs_find_device(root, devid, dev_uuid, + disk_super->fsid); diff --git a/linux-2.6-cantiga-iommu-gfx.patch b/linux-2.6-cantiga-iommu-gfx.patch deleted file mode 100644 index a18e38b..0000000 --- a/linux-2.6-cantiga-iommu-gfx.patch +++ /dev/null @@ -1,26 +0,0 @@ -diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c -index 4173125..baa32a0 100644 ---- a/drivers/pci/intel-iommu.c -+++ b/drivers/pci/intel-iommu.c -@@ -340,7 +340,7 @@ int dmar_disabled = 0; - int dmar_disabled = 1; - #endif /*CONFIG_DMAR_DEFAULT_ON*/ - --static int __initdata dmar_map_gfx = 1; -+static int dmar_map_gfx = 1; - static int dmar_forcedac; - static int intel_iommu_strict; - -@@ -3728,6 +3728,12 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) - */ - printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); - rwbf_quirk = 1; -+ -+ /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */ -+ if (dev->revision == 0x07) { -+ printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n"); -+ dmar_map_gfx = 0; -+ } - } - - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); diff --git a/linux-2.6-cpufreq-locking.patch b/linux-2.6-cpufreq-locking.patch new file mode 100644 index 0000000..3580289 --- /dev/null +++ b/linux-2.6-cpufreq-locking.patch @@ -0,0 +1,98 @@ +Date: Thu, 4 Mar 2010 03:23:46 -0500 +From: Amerigo Wang +To: linux-kernel@vger.kernel.org +Cc: Dave Jones , cpufreq@vger.kernel.org, + Amerigo Wang , + Johannes Berg , akpm@linux-foundation.org, + Venkatesh Pallipadi +Subject: [CPUFREQ] fix a lockdep warning + +There is no need to do sysfs_remove_link() or kobject_put() etc. +when policy_rwsem_write is held, move them after releasing the lock. + +This fixes the lockdep warning: + +halt/4071 is trying to acquire lock: + (s_active){++++.+}, at: [] .sysfs_addrm_finish+0x58/0xc0 + +but task is already holding lock: + (&per_cpu(cpu_policy_rwsem, cpu)){+.+.+.}, at: [] .lock_policy_rwsem_write+0x84/0xf4 + +Reported-by: Benjamin Herrenschmidt +Signed-off-by: WANG Cong +Cc: Johannes Berg +Cc: Venkatesh Pallipadi +Signed-off-by: Dave Jones + +--- +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 67bc2ec..e196084 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -1113,6 +1113,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) + unsigned int cpu = sys_dev->id; + unsigned long flags; + struct cpufreq_policy *data; ++ struct kobject *kobj; ++ struct completion *cmp; + #ifdef CONFIG_SMP + struct sys_device *cpu_sys_dev; + unsigned int j; +@@ -1141,10 +1143,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) + dprintk("removing link\n"); + cpumask_clear_cpu(cpu, data->cpus); + spin_unlock_irqrestore(&cpufreq_driver_lock, flags); +- sysfs_remove_link(&sys_dev->kobj, "cpufreq"); ++ kobj = &sys_dev->kobj; + cpufreq_cpu_put(data); + cpufreq_debug_enable_ratelimit(); + unlock_policy_rwsem_write(cpu); ++ sysfs_remove_link(kobj, "cpufreq"); + return 0; + } + #endif +@@ -1181,7 +1184,10 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) + data->governor->name, CPUFREQ_NAME_LEN); + #endif + cpu_sys_dev = get_cpu_sysdev(j); +- sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq"); ++ kobj = &cpu_sys_dev->kobj; ++ unlock_policy_rwsem_write(cpu); ++ sysfs_remove_link(kobj, "cpufreq"); ++ lock_policy_rwsem_write(cpu); + cpufreq_cpu_put(data); + } + } +@@ -1192,19 +1198,22 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) + if (cpufreq_driver->target) + __cpufreq_governor(data, CPUFREQ_GOV_STOP); + +- kobject_put(&data->kobj); ++ kobj = &data->kobj; ++ cmp = &data->kobj_unregister; ++ unlock_policy_rwsem_write(cpu); ++ kobject_put(kobj); + + /* we need to make sure that the underlying kobj is actually + * not referenced anymore by anybody before we proceed with + * unloading. + */ + dprintk("waiting for dropping of refcount\n"); +- wait_for_completion(&data->kobj_unregister); ++ wait_for_completion(cmp); + dprintk("wait complete\n"); + ++ lock_policy_rwsem_write(cpu); + if (cpufreq_driver->exit) + cpufreq_driver->exit(data); +- + unlock_policy_rwsem_write(cpu); + + free_cpumask_var(data->related_cpus); + + +-- +To unsubscribe from this list: send the line "unsubscribe cpufreq" in +the body of a message to majordomo@vger.kernel.org +More majordomo info at http://vger.kernel.org/majordomo-info.html + diff --git a/linux-2.6-crash-driver.patch b/linux-2.6-crash-driver.patch index 7b518bb..5669f7a 100644 --- a/linux-2.6-crash-driver.patch +++ b/linux-2.6-crash-driver.patch @@ -1,21 +1,3 @@ -From df42d15cd28f468ecd4c30465b98a53cce90617c Mon Sep 17 00:00:00 2001 -From: Kyle McMartin -Date: Tue, 30 Mar 2010 00:16:25 -0400 -Subject: dev-crash-driver.patch - ---- - arch/ia64/include/asm/crash.h | 90 +++++++++++++++++++++++++++++ - arch/ia64/kernel/ia64_ksyms.c | 3 + - arch/x86/include/asm/crash.h | 75 ++++++++++++++++++++++++ - arch/x86/mm/ioremap.c | 2 + - drivers/char/Kconfig | 2 + - drivers/char/Makefile | 2 + - drivers/char/crash.c | 128 +++++++++++++++++++++++++++++++++++++++++ - 7 files changed, 302 insertions(+), 0 deletions(-) - create mode 100644 arch/ia64/include/asm/crash.h - create mode 100644 arch/x86/include/asm/crash.h - create mode 100644 drivers/char/crash.c - diff --git a/arch/ia64/include/asm/crash.h b/arch/ia64/include/asm/crash.h new file mode 100644 index 0000000..541af84 @@ -113,7 +95,7 @@ index 0000000..541af84 + +#endif /* _ASM_IA64_CRASH_H */ diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c -index 7f4a0ed..552fe24 100644 +index 14d39e3..cf3d040 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c @@ -84,6 +84,9 @@ EXPORT_SYMBOL(ia64_save_scratch_fpregs); @@ -208,23 +190,22 @@ index 0000000..dfcc006 + +#endif /* _ASM_I386_CRASH_H */ diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c -index 5eb1ba7..3e525d2 100644 +index 334e63c..8e1ccbc 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c -@@ -24,6 +24,8 @@ - - #include "physaddr.h" - +@@ -60,6 +60,7 @@ int page_is_ram(unsigned long pagenr) + } + return 0; + } +EXPORT_SYMBOL_GPL(page_is_ram); -+ + /* * Fix up the linear direct mapping of the kernel to avoid cache attribute - * conflicts. diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig -index 3141dd3..153658c 100644 +index 08a6f50..8bc5e9a 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig -@@ -471,6 +471,8 @@ config LEGACY_PTYS +@@ -484,6 +484,8 @@ config LEGACY_PTYS security. This option enables these legacy devices; on most systems, it is safe to say N. @@ -234,10 +215,10 @@ index 3141dd3..153658c 100644 config LEGACY_PTY_COUNT int "Maximum number of legacy PTY in use" diff --git a/drivers/char/Makefile b/drivers/char/Makefile -index f957edf..604c418 100644 +index 19a79dd..0bee860 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile -@@ -111,6 +111,8 @@ obj-$(CONFIG_PS3_FLASH) += ps3flash.o +@@ -112,6 +112,8 @@ obj-$(CONFIG_PS3_FLASH) += ps3flash.o obj-$(CONFIG_JS_RTC) += js-rtc.o js-rtc-y = rtc.o @@ -380,6 +361,3 @@ index 0000000..e5437de +module_exit(crash_cleanup_module); + +MODULE_LICENSE("GPL"); --- -1.7.0.1 - diff --git a/linux-2.6-debug-nmi-timeout.patch b/linux-2.6-debug-nmi-timeout.patch index f54d26a..15249d5 100644 --- a/linux-2.6-debug-nmi-timeout.patch +++ b/linux-2.6-debug-nmi-timeout.patch @@ -1,7 +1,7 @@ -From 542dee6f43067fa0101b53925aadf1d08c997cd4 Mon Sep 17 00:00:00 2001 -From: Kyle McMartin -Date: Mon, 29 Mar 2010 23:40:27 -0400 -Subject: linux-2.6-debug-nmi-timeout +From c2dcc88ee3aca407471246c38c11a100cca39076 Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Mon, 11 Jan 2010 08:20:51 -0500 +Subject: linux-2.6-debug-nmi-timeout.patch --- arch/x86/kernel/apic/nmi.c | 2 +- @@ -9,23 +9,23 @@ Subject: linux-2.6-debug-nmi-timeout 2 files changed, 9 insertions(+), 1 deletions(-) diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c -index 8aa65ad..ba7d55e 100644 +index 0159a69..2bbca2b 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c @@ -439,7 +439,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) * wait a few IRQs (5 seconds) before doing the oops ... */ - __this_cpu_inc(alert_counter); -- if (__this_cpu_read(alert_counter) == 5 * nmi_hz) -+ if (__this_cpu_read(alert_counter) == CONFIG_DEBUG_NMI_TIMEOUT * nmi_hz) + __this_cpu_inc(per_cpu_var(alert_counter)); +- if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz) ++ if (__this_cpu_read(per_cpu_var(alert_counter)) == CONFIG_DEBUG_NMI_TIMEOUT * nmi_hz) /* * die_nmi will return ONLY if NOTIFY_STOP happens.. */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug -index 1fafb4b..963e78b 100644 +index 25c3ed5..6d67975 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug -@@ -254,6 +254,14 @@ config SCHEDSTATS +@@ -253,6 +253,14 @@ config SCHEDSTATS application, you can say N to avoid the very slight overhead this adds. @@ -41,5 +41,5 @@ index 1fafb4b..963e78b 100644 bool "Collect kernel timers statistics" depends on DEBUG_KERNEL && PROC_FS -- -1.7.0.1 +1.6.5.2 diff --git a/linux-2.6-defaults-pci_no_msi.patch b/linux-2.6-defaults-pci_no_msi.patch index 9f49321..fad6a53 100644 --- a/linux-2.6-defaults-pci_no_msi.patch +++ b/linux-2.6-defaults-pci_no_msi.patch @@ -1,21 +1,6 @@ -From 14bdd0d36f5284108468bb73afd50726b07c7a84 Mon Sep 17 00:00:00 2001 -From: Kyle McMartin -Date: Mon, 29 Mar 2010 23:43:49 -0400 -Subject: linux-2.6-defaults-pci_no_msi - ---- - Documentation/kernel-parameters.txt | 3 +++ - drivers/pci/Kconfig | 12 ++++++++++++ - drivers/pci/msi.c | 9 +++++++++ - drivers/pci/pci.c | 2 ++ - drivers/pci/pci.h | 2 ++ - 5 files changed, 28 insertions(+), 0 deletions(-) - -diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt -index e4cbca5..8154a0f 100644 ---- a/Documentation/kernel-parameters.txt -+++ b/Documentation/kernel-parameters.txt -@@ -1911,6 +1911,9 @@ and is between 256 and 4096 characters. It is defined in the file +--- linux-2.6.30.noarch/Documentation/kernel-parameters.txt~ 2009-06-24 14:25:04.000000000 -0400 ++++ linux-2.6.30.noarch/Documentation/kernel-parameters.txt 2009-06-24 14:25:32.000000000 -0400 +@@ -1811,6 +1811,9 @@ and is between 256 and 4096 characters. check_enable_amd_mmconf [X86] check for and enable properly configured MMIO access to PCI config space on AMD family 10h CPU @@ -26,7 +11,7 @@ index e4cbca5..8154a0f 100644 enabled, this kernel boot option can be used to disable the use of MSI interrupts system-wide. diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig -index 7858a11..b12fcad 100644 +index 2a4501d..209758c 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -21,6 +21,18 @@ config PCI_MSI @@ -45,14 +30,14 @@ index 7858a11..b12fcad 100644 + + If you don't know what to do here, say N. + - config PCI_DEBUG - bool "PCI Debugging" - depends on PCI && DEBUG_KERNEL + config PCI_LEGACY + bool "Enable deprecated pci_find_* API" + depends on PCI diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c -index f9cf317..6b0539a 100644 +index 896a15d..53df583 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c -@@ -22,7 +22,11 @@ +@@ -23,7 +23,11 @@ #include "pci.h" #include "msi.h" @@ -64,7 +49,7 @@ index f9cf317..6b0539a 100644 /* Arch hooks */ -@@ -836,6 +840,11 @@ int pci_msi_enabled(void) +@@ -786,6 +790,11 @@ int pci_msi_enabled(void) } EXPORT_SYMBOL(pci_msi_enabled); @@ -77,10 +62,10 @@ index f9cf317..6b0539a 100644 { INIT_LIST_HEAD(&dev->msi_list); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c -index 1531f3a..3cb332b 100644 +index 17bd932..e9bc9fe 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c -@@ -2983,6 +2983,8 @@ static int __init pci_setup(char *str) +@@ -2393,6 +2393,8 @@ static int __init pci_setup(char *str) if (*str && (str = pcibios_setup(str)) && *str) { if (!strcmp(str, "nomsi")) { pci_no_msi(); @@ -90,10 +75,10 @@ index 1531f3a..3cb332b 100644 pci_no_aer(); } else if (!strcmp(str, "nodomains")) { diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h -index 4eb10f4..caa051e 100644 +index 26ddf78..85efe81 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h -@@ -122,9 +122,11 @@ extern unsigned int pci_pm_d3_delay; +@@ -111,9 +111,11 @@ extern unsigned int pci_pm_d3_delay; #ifdef CONFIG_PCI_MSI void pci_no_msi(void); @@ -105,6 +90,3 @@ index 4eb10f4..caa051e 100644 static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } #endif --- -1.7.0.1 - diff --git a/linux-2.6-defaults-pciehp.patch b/linux-2.6-defaults-pciehp.patch new file mode 100644 index 0000000..07f2670 --- /dev/null +++ b/linux-2.6-defaults-pciehp.patch @@ -0,0 +1,13 @@ +diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c +index e7f3c9e..4f2b037 100644 +--- a/drivers/pci/hotplug/pciehp_core.c ++++ b/drivers/pci/hotplug/pciehp_core.c +@@ -41,7 +41,7 @@ int pciehp_debug; + int pciehp_poll_mode; + int pciehp_poll_time; + int pciehp_force; +-int pciehp_passive; ++int pciehp_passive=1; + struct workqueue_struct *pciehp_wq; + + #define DRIVER_VERSION "0.4" diff --git a/linux-2.6-dell-laptop-rfkill-fix.patch b/linux-2.6-dell-laptop-rfkill-fix.patch new file mode 100644 index 0000000..911179a --- /dev/null +++ b/linux-2.6-dell-laptop-rfkill-fix.patch @@ -0,0 +1,319 @@ +From caca5447fae9bfc87ab7d4af664d8bc95db6904d Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Mon, 11 Jan 2010 08:08:02 -0500 +Subject: linux-2.6-dell-laptop-rfkill-fix.patch + +--- + drivers/input/input.c | 91 ++++++++++++++++++++++++++++----- + drivers/platform/x86/dell-laptop.c | 100 ++++++++++++++++++++++++++++++++++++ + include/linux/input.h | 5 ++ + 3 files changed, 183 insertions(+), 13 deletions(-) + +diff --git a/drivers/input/input.c b/drivers/input/input.c +index ab06071..1911c3a 100644 +--- a/drivers/input/input.c ++++ b/drivers/input/input.c +@@ -90,19 +90,26 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz) + */ + static void input_pass_event(struct input_dev *dev, + unsigned int type, unsigned int code, int value) +-{ +- struct input_handle *handle; ++ ++{ struct input_handle *handle; + + rcu_read_lock(); + + handle = rcu_dereference(dev->grab); +- if (handle) ++ if (handle) { + handle->handler->event(handle, type, code, value); +- else +- list_for_each_entry_rcu(handle, &dev->h_list, d_node) +- if (handle->open) +- handle->handler->event(handle, +- type, code, value); ++ goto out; ++ } ++ ++ handle = rcu_dereference(dev->filter); ++ if (handle && handle->handler->filter(handle, type, code, value)) ++ goto out; ++ ++ list_for_each_entry_rcu(handle, &dev->h_list, d_node) ++ if (handle->open) ++ handle->handler->event(handle, ++ type, code, value); ++out: + rcu_read_unlock(); + } + +@@ -383,12 +390,15 @@ int input_grab_device(struct input_handle *handle) + } + EXPORT_SYMBOL(input_grab_device); + +-static void __input_release_device(struct input_handle *handle) ++static void __input_release_device(struct input_handle *handle, bool filter) + { + struct input_dev *dev = handle->dev; + +- if (dev->grab == handle) { +- rcu_assign_pointer(dev->grab, NULL); ++ if (handle == (filter ? dev->filter : dev->grab)) { ++ if (filter) ++ rcu_assign_pointer(dev->filter, NULL); ++ else ++ rcu_assign_pointer(dev->grab, NULL); + /* Make sure input_pass_event() notices that grab is gone */ + synchronize_rcu(); + +@@ -412,12 +422,65 @@ void input_release_device(struct input_handle *handle) + struct input_dev *dev = handle->dev; + + mutex_lock(&dev->mutex); +- __input_release_device(handle); ++ __input_release_device(handle, false); + mutex_unlock(&dev->mutex); + } + EXPORT_SYMBOL(input_release_device); + + /** ++ * input_filter_device - allow input events to be filtered from higher layers ++ * @handle: input handle that wants to filter the device ++ * ++ * When a device is filtered by an input handle all events generated by ++ * the device are to this handle. If the filter function returns true then ++ * the event is discarded rather than being passed to any other input handles, ++ * otherwise it is passed to them as normal. Grabs will be handled before ++ * filters, so a grabbed device will not deliver events to a filter function. ++ */ ++int input_filter_device(struct input_handle *handle) ++{ ++ struct input_dev *dev = handle->dev; ++ int retval; ++ ++ retval = mutex_lock_interruptible(&dev->mutex); ++ if (retval) ++ return retval; ++ ++ if (dev->filter) { ++ retval = -EBUSY; ++ goto out; ++ } ++ ++ rcu_assign_pointer(dev->filter, handle); ++ synchronize_rcu(); ++ ++ out: ++ mutex_unlock(&dev->mutex); ++ return retval; ++} ++EXPORT_SYMBOL(input_filter_device); ++ ++/** ++ * input_unfilter_device - removes a filter from a device ++ * @handle: input handle that owns the device ++ * ++ * Removes the filter from a device so that other input handles can ++ * start receiving unfiltered input events. Upon release all handlers ++ * attached to the device have their start() method called so they ++ * have a change to synchronize device state with the rest of the ++ * system. ++ */ ++void input_unfilter_device(struct input_handle *handle) ++{ ++ struct input_dev *dev = handle->dev; ++ ++ mutex_lock(&dev->mutex); ++ __input_release_device(handle, true); ++ mutex_unlock(&dev->mutex); ++} ++EXPORT_SYMBOL(input_unfilter_device); ++ ++/** + * input_open_device - open input device + * @handle: handle through which device is being accessed + * +@@ -490,7 +553,9 @@ void input_close_device(struct input_handle *handle) + + mutex_lock(&dev->mutex); + +- __input_release_device(handle); ++ /* Release both grabs and filters */ ++ __input_release_device(handle, false); ++ __input_release_device(handle, true); + + if (!--dev->users && dev->close) + dev->close(dev); +diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c +index 3780994..25247be 100644 +--- a/drivers/platform/x86/dell-laptop.c ++++ b/drivers/platform/x86/dell-laptop.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include "../../firmware/dcdbas.h" + + #define BRIGHTNESS_TOKEN 0x7d +@@ -214,6 +215,16 @@ static const struct rfkill_ops dell_rfkill_ops = { + .query = dell_rfkill_query, + }; + ++static void dell_rfkill_update(void) ++{ ++ if (wifi_rfkill) ++ dell_rfkill_query(wifi_rfkill, (void *)1); ++ if (bluetooth_rfkill) ++ dell_rfkill_query(bluetooth_rfkill, (void *)2); ++ if (wwan_rfkill) ++ dell_rfkill_query(wwan_rfkill, (void *)3); ++} ++ + static int __init dell_setup_rfkill(void) + { + struct calling_interface_buffer buffer; +@@ -338,6 +349,90 @@ static struct backlight_ops dell_ops = { + .update_status = dell_send_intensity, + }; + ++static const struct input_device_id dell_input_ids[] = { ++ { ++ .bustype = 0x11, ++ .vendor = 0x01, ++ .product = 0x01, ++ .version = 0xab41, ++ .flags = INPUT_DEVICE_ID_MATCH_BUS | ++ INPUT_DEVICE_ID_MATCH_VENDOR | ++ INPUT_DEVICE_ID_MATCH_PRODUCT | ++ INPUT_DEVICE_ID_MATCH_VERSION ++ }, ++ { }, ++}; ++ ++static bool dell_input_filter(struct input_handle *handle, unsigned int type, ++ unsigned int code, int value) ++{ ++ if (type == EV_KEY && code == KEY_WLAN && value == 1) { ++ dell_rfkill_update(); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static void dell_input_event(struct input_handle *handle, unsigned int type, ++ unsigned int code, int value) ++{ ++} ++ ++static int dell_input_connect(struct input_handler *handler, ++ struct input_dev *dev, ++ const struct input_device_id *id) ++{ ++ struct input_handle *handle; ++ int error; ++ ++ handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); ++ if (!handle) ++ return -ENOMEM; ++ ++ handle->dev = dev; ++ handle->handler = handler; ++ handle->name = "dell-laptop"; ++ ++ error = input_register_handle(handle); ++ if (error) ++ goto err_free_handle; ++ ++ error = input_open_device(handle); ++ if (error) ++ goto err_unregister_handle; ++ ++ error = input_filter_device(handle); ++ if (error) ++ goto err_close_handle; ++ ++ return 0; ++ ++err_close_handle: ++ input_close_device(handle); ++err_unregister_handle: ++ input_unregister_handle(handle); ++err_free_handle: ++ kfree(handle); ++ return error; ++} ++ ++static void dell_input_disconnect(struct input_handle *handle) ++{ ++ input_close_device(handle); ++ input_unregister_handle(handle); ++ kfree(handle); ++} ++ ++static struct input_handler dell_input_handler = { ++ .name = "dell-laptop", ++ .filter = dell_input_filter, ++ .event = dell_input_event, ++ .connect = dell_input_connect, ++ .disconnect = dell_input_disconnect, ++ .id_table = dell_input_ids, ++}; ++ + static int __init dell_init(void) + { + struct calling_interface_buffer buffer; +@@ -373,6 +468,10 @@ static int __init dell_init(void) + goto fail_rfkill; + } + ++ if (input_register_handler(&dell_input_handler)) ++ printk(KERN_INFO ++ "dell-laptop: Could not register input filter\n"); ++ + #ifdef CONFIG_ACPI + /* In the event of an ACPI backlight being available, don't + * register the platform controller. +@@ -426,6 +525,7 @@ static void __exit dell_exit(void) + { + backlight_device_unregister(dell_backlight_device); + dell_cleanup_rfkill(); ++ input_unregister_handler(&dell_input_handler); + } + + module_init(dell_init); +diff --git a/include/linux/input.h b/include/linux/input.h +index 7be8a65..7d49094 100644 +--- a/include/linux/input.h ++++ b/include/linux/input.h +@@ -1127,6 +1127,7 @@ struct input_dev { + int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); + + struct input_handle *grab; ++ struct input_handle *filter; + + spinlock_t event_lock; + struct mutex mutex; +@@ -1227,6 +1228,7 @@ struct input_handler { + void *private; + + void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value); ++ bool (*filter)(struct input_handle *handle, unsigned int type, unsigned int code, int value); + int (*connect)(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id); + void (*disconnect)(struct input_handle *handle); + void (*start)(struct input_handle *handle); +@@ -1307,6 +1309,9 @@ void input_unregister_handle(struct input_handle *); + int input_grab_device(struct input_handle *); + void input_release_device(struct input_handle *); + ++int input_filter_device(struct input_handle *); ++void input_unfilter_device(struct input_handle *); ++ + int input_open_device(struct input_handle *); + void input_close_device(struct input_handle *); + +-- +1.6.5.2 + diff --git a/linux-2.6-execshield.patch b/linux-2.6-execshield.patch index f2409c2..b6ee042 100644 --- a/linux-2.6-execshield.patch +++ b/linux-2.6-execshield.patch @@ -1,12 +1,3 @@ -From 5006dd0fae6126c149868102c100cd90a20ef2e3 Mon Sep 17 00:00:00 2001 -From: Kyle McMartin -Date: Mon, 29 Mar 2010 23:20:18 -0400 -Subject: execshield - -cebbert@redhat.com: added fix for bz#220892 - -diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h -index 617bd56..526248d 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -5,6 +5,7 @@ @@ -53,8 +44,6 @@ index 617bd56..526248d 100644 +#endif /* CONFIG_X86_32 */ + #endif /* _ASM_X86_DESC_H */ -diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h -index 80a1dee..8314c66 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -7,12 +7,19 @@ @@ -77,8 +66,6 @@ index 80a1dee..8314c66 100644 } mm_context_t; #ifdef CONFIG_SMP -diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h -index 5653f43..55dadb2 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -289,6 +289,12 @@ static inline void set_ldt(const void *addr, unsigned entries) @@ -94,8 +81,6 @@ index 5653f43..55dadb2 100644 static inline void store_gdt(struct desc_ptr *dtr) { PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr); -diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h -index db9ef55..19c2793 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -118,6 +118,9 @@ struct pv_cpu_ops { @@ -108,8 +93,6 @@ index db9ef55..19c2793 100644 unsigned long (*store_tr)(void); void (*load_tls)(struct thread_struct *t, unsigned int cpu); #ifdef CONFIG_X86_64 -diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h -index b753ea5..4893156 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -162,6 +162,9 @@ static inline int hlt_works(int cpu) @@ -122,8 +105,6 @@ index b753ea5..4893156 100644 extern void cpu_detect(struct cpuinfo_x86 *c); extern struct pt_regs *idle_regs(struct pt_regs *); -diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c -index 4868e4a..6c8d2ca 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -802,6 +802,20 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) @@ -147,8 +128,6 @@ index 4868e4a..6c8d2ca 100644 /* If the model name is still unset, do table lookup. */ if (!c->x86_model_id[0]) { const char *p; -diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c -index 1db183e..238b97d 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -345,6 +345,9 @@ struct pv_cpu_ops pv_cpu_ops = { @@ -161,11 +140,9 @@ index 1db183e..238b97d 100644 .load_gdt = native_load_gdt, .load_idt = native_load_idt, .store_gdt = native_store_gdt, -diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c -index f6c6266..8ac2589 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c -@@ -251,7 +251,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, +@@ -257,7 +257,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { @@ -176,7 +153,7 @@ index f6c6266..8ac2589 100644 regs->fs = 0; set_fs(USER_DS); regs->ds = __USER_DS; -@@ -260,6 +263,11 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) +@@ -266,6 +269,11 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) regs->cs = __USER_CS; regs->ip = new_ip; regs->sp = new_sp; @@ -188,7 +165,7 @@ index f6c6266..8ac2589 100644 /* * Free the old FP and other extended state */ -@@ -319,6 +327,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) +@@ -325,6 +333,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) if (preload_fpu) prefetch(next->xstate); @@ -198,7 +175,7 @@ index f6c6266..8ac2589 100644 /* * Reload esp0. */ -@@ -412,3 +423,40 @@ unsigned long get_wchan(struct task_struct *p) +@@ -418,3 +429,40 @@ unsigned long get_wchan(struct task_struct *p) return 0; } @@ -239,8 +216,6 @@ index f6c6266..8ac2589 100644 + mm->context.exec_limit = 0; + set_user_cs(&mm->context.user_cs, 0); +} -diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c -index 1168e44..c452918 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -115,6 +115,76 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err) @@ -350,7 +325,7 @@ index 1168e44..c452918 100644 tsk->thread.error_code = error_code; tsk->thread.trap_no = 13; -@@ -863,19 +956,37 @@ do_device_not_available(struct pt_regs *regs, long error_code) +@@ -860,19 +953,37 @@ do_device_not_available(struct pt_regs *regs, long error_code) } #ifdef CONFIG_X86_32 @@ -397,8 +372,6 @@ index 1168e44..c452918 100644 } #endif -diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c -index 1dab519..360f39d 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -124,13 +124,16 @@ static unsigned long mmap_legacy_base(void) @@ -419,8 +392,6 @@ index 1dab519..360f39d 100644 mm->unmap_area = arch_unmap_area_topdown; } } -diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c -index a3250aa..e0d9cce 100644 --- a/arch/x86/mm/setup_nx.c +++ b/arch/x86/mm/setup_nx.c @@ -1,3 +1,4 @@ @@ -447,8 +418,6 @@ index a3250aa..e0d9cce 100644 printk(KERN_NOTICE "Notice: NX (Execute Disable) protection " "missing in CPU or disabled in BIOS!\n"); } else { -diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c -index 426f3a1..e0286b1 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -6,6 +6,7 @@ @@ -472,8 +441,6 @@ index 426f3a1..e0286b1 100644 /* * orig_rax contains the negated interrupt vector. * Use that to determine where the sender put the data. -diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c -index 02b442e..957bb67 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -331,7 +331,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) @@ -485,11 +452,9 @@ index 02b442e..957bb67 100644 if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; -diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c -index b607239..e426a3f 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c -@@ -334,6 +334,24 @@ static void xen_set_ldt(const void *addr, unsigned entries) +@@ -333,6 +333,24 @@ static void xen_set_ldt(const void *addr, unsigned entries) xen_mc_issue(PARAVIRT_LAZY_CPU); } @@ -514,7 +479,7 @@ index b607239..e426a3f 100644 static void xen_load_gdt(const struct desc_ptr *dtr) { unsigned long va = dtr->address; -@@ -960,6 +978,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { +@@ -959,6 +977,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { .load_tr_desc = paravirt_nop, .set_ldt = xen_set_ldt, @@ -524,11 +489,9 @@ index b607239..e426a3f 100644 .load_gdt = xen_load_gdt, .load_idt = xen_load_idt, .load_tls = xen_load_tls, -diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c -index 535e763..d114af6 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c -@@ -74,7 +74,7 @@ static struct linux_binfmt elf_format = { +@@ -73,7 +73,7 @@ static struct linux_binfmt elf_format = { .hasvdso = 1 }; @@ -537,7 +500,7 @@ index 535e763..d114af6 100644 static int set_brk(unsigned long start, unsigned long end) { -@@ -701,6 +701,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) +@@ -721,6 +721,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) break; } @@ -549,7 +512,7 @@ index 535e763..d114af6 100644 /* Some simple consistency checks for the interpreter */ if (elf_interpreter) { retval = -ELIBBAD; -@@ -717,6 +722,15 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) +@@ -740,6 +745,15 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) if (retval) goto out_free_dentry; @@ -565,7 +528,7 @@ index 535e763..d114af6 100644 /* OK, This is the point of no return */ current->flags &= ~PF_FORKNOEXEC; current->mm->def_flags = def_flags; -@@ -724,7 +738,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) +@@ -747,7 +761,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) /* Do this immediately, since STACK_TOP as used in setup_arg_pages may depend on the personality. */ SET_PERSONALITY(loc->elf_ex); @@ -575,7 +538,7 @@ index 535e763..d114af6 100644 current->personality |= READ_IMPLIES_EXEC; if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) -@@ -890,7 +905,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) +@@ -912,7 +927,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) interpreter, &interp_map_addr, load_bias); @@ -584,11 +547,9 @@ index 535e763..d114af6 100644 /* * load_elf_interp() returns relocation * adjustment -diff --git a/include/linux/mm.h b/include/linux/mm.h -index e70f21b..44e6d63 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h -@@ -1259,7 +1259,13 @@ extern int install_special_mapping(struct mm_struct *mm, +@@ -1148,7 +1148,13 @@ extern int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags, struct page **pages); @@ -603,11 +564,9 @@ index e70f21b..44e6d63 100644 extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, -diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index b8bb9a6..f478e39 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h -@@ -227,6 +227,9 @@ struct mm_struct { +@@ -209,6 +209,9 @@ struct mm_struct { unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); @@ -617,8 +576,6 @@ index b8bb9a6..f478e39 100644 void (*unmap_area) (struct mm_struct *mm, unsigned long addr); #endif unsigned long mmap_base; /* base of mmap area */ -diff --git a/include/linux/resource.h b/include/linux/resource.h -index f1e914e..d2aef9a 100644 --- a/include/linux/resource.h +++ b/include/linux/resource.h @@ -53,8 +53,11 @@ struct rlimit { @@ -634,8 +591,6 @@ index f1e914e..d2aef9a 100644 /* * GPG2 wants 64kB of mlocked memory, to make sure pass phrases -diff --git a/include/linux/sched.h b/include/linux/sched.h -index dad7f66..c5a3948 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -102,6 +102,9 @@ struct fs_struct; @@ -648,7 +603,7 @@ index dad7f66..c5a3948 100644 /* * List of flags we want to share for kernel threads, * if only because they are not used by them anyway. -@@ -390,6 +393,10 @@ extern void arch_pick_mmap_layout(struct mm_struct *mm); +@@ -382,6 +385,10 @@ extern void arch_pick_mmap_layout(struct mm_struct *mm); extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); @@ -659,11 +614,9 @@ index dad7f66..c5a3948 100644 extern unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, -diff --git a/kernel/sysctl.c b/kernel/sysctl.c -index 8686b0f..a4fad81 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c -@@ -99,6 +99,26 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max; +@@ -87,6 +87,26 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max; #ifndef CONFIG_MMU extern int sysctl_nr_trim_pages; #endif @@ -687,10 +640,10 @@ index 8686b0f..a4fad81 100644 +} +__setup("exec-shield=", setup_exec_shield); + - #ifdef CONFIG_BLOCK - extern int blk_iopoll_enabled; - #endif -@@ -400,6 +420,14 @@ static struct ctl_table kern_table[] = { + #ifdef CONFIG_RCU_TORTURE_TEST + extern int rcutorture_runnable; + #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ +@@ -410,6 +430,14 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, @@ -705,10 +658,8 @@ index 8686b0f..a4fad81 100644 #ifdef CONFIG_PROC_SYSCTL { .procname = "tainted", -diff --git a/mm/mmap.c b/mm/mmap.c -index 75557c6..8173284 100644 ---- a/mm/mmap.c -+++ b/mm/mmap.c +--- b/mm/mmap.c ++++ linux-2.6.33.noarch/mm/mmap.c @@ -28,6 +28,7 @@ #include #include @@ -736,7 +687,7 @@ index 75557c6..8173284 100644 static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end); -@@ -388,6 +401,8 @@ static inline void +@@ -388,6 +401,8 @@ __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node *rb_parent) { @@ -745,7 +696,7 @@ index 75557c6..8173284 100644 if (prev) { vma->vm_next = prev->vm_next; prev->vm_next = vma; -@@ -489,6 +504,8 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -490,6 +505,8 @@ rb_erase(&vma->vm_rb, &mm->mm_rb); if (mm->mmap_cache == vma) mm->mmap_cache = prev; @@ -754,16 +705,16 @@ index 75557c6..8173284 100644 } /* -@@ -798,6 +815,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, +@@ -797,6 +814,8 @@ } else /* cases 2, 5, 7 */ - err = vma_adjust(prev, prev->vm_start, + vma_adjust(prev, prev->vm_start, end, prev->vm_pgoff, NULL); + if (prev->vm_flags & VM_EXEC) + arch_add_exec_range(mm, prev->vm_end); - if (err) - return NULL; return prev; -@@ -952,7 +971,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + } + +@@ -947,7 +966,8 @@ /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ @@ -773,7 +724,7 @@ index 75557c6..8173284 100644 if (addr & ~PAGE_MASK) return addr; -@@ -1504,8 +1524,8 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) +@@ -1479,8 +1499,8 @@ } unsigned long @@ -784,7 +735,7 @@ index 75557c6..8173284 100644 { unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); -@@ -1518,7 +1538,11 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, +@@ -1493,7 +1513,11 @@ if (len > TASK_SIZE) return -ENOMEM; @@ -797,7 +748,7 @@ index 75557c6..8173284 100644 if (file && file->f_op && file->f_op->get_unmapped_area) get_area = file->f_op->get_unmapped_area; addr = get_area(file, addr, len, pgoff, flags); -@@ -1532,8 +1556,83 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, +@@ -1507,8 +1531,83 @@ return arch_rebalance_pgtables(addr, len); } @@ -882,33 +833,31 @@ index 75557c6..8173284 100644 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) -@@ -1608,6 +1707,16 @@ out: +@@ -1583,6 +1682,14 @@ return prev ? prev->vm_next : vma; } +static int over_stack_limit(unsigned long sz) +{ -+ struct rlimit *rlim = current->signal->rlim; -+ + if (sz < EXEC_STACK_BIAS) + return 0; + return (sz - EXEC_STACK_BIAS) > -+ ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur); ++ current->signal->rlim[RLIMIT_STACK].rlim_cur; +} + /* * Verify that the stack growth is acceptable and * update accounting. This is shared with both the -@@ -1624,7 +1733,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns +@@ -1599,7 +1706,7 @@ return -ENOMEM; /* Stack limit test */ -- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) +- if (size > rlim[RLIMIT_STACK].rlim_cur) + if (over_stack_limit(size)) return -ENOMEM; /* mlock limit tests */ -@@ -1936,10 +2045,14 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -1906,10 +2013,14 @@ if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); @@ -916,16 +865,16 @@ index 75557c6..8173284 100644 + if (new_below) { + unsigned long old_end = vma->vm_end; + - err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + + vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT), new); - else + if (vma->vm_flags & VM_EXEC) + arch_remove_exec_range(mm, old_end); + } else - err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); + vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); - /* Success. */ -@@ -2223,6 +2336,7 @@ void exit_mmap(struct mm_struct *mm) + return 0; +@@ -2176,6 +2287,7 @@ free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0); tlb_finish_mmu(tlb, 0, end); @@ -933,8 +882,6 @@ index 75557c6..8173284 100644 /* * Walk the list again, actually closing and freeing it, -diff --git a/mm/mprotect.c b/mm/mprotect.c -index 8bc969d..3c9b4fc 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -26,9 +26,14 @@ @@ -971,11 +918,9 @@ index 8bc969d..3c9b4fc 100644 mmu_notifier_invalidate_range_start(mm, start, end); if (is_vm_hugetlb_page(vma)) hugetlb_change_protection(vma, start, end, vma->vm_page_prot); -diff --git a/mm/mremap.c b/mm/mremap.c -index e9c75ef..0a5379f 100644 --- a/mm/mremap.c +++ b/mm/mremap.c -@@ -488,10 +488,10 @@ unsigned long do_mremap(unsigned long addr, +@@ -485,10 +485,10 @@ unsigned long do_mremap(unsigned long addr, if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; @@ -988,6 +933,3 @@ index e9c75ef..0a5379f 100644 if (new_addr & ~PAGE_MASK) { ret = new_addr; goto out; --- -1.7.0.1 - diff --git a/linux-2.6-firewire-git-pending.patch b/linux-2.6-firewire-git-pending.patch index e69de29..e05471f 100644 --- a/linux-2.6-firewire-git-pending.patch +++ b/linux-2.6-firewire-git-pending.patch @@ -0,0 +1,4 @@ +# +# Patches under review and/or pending inclusion in the linux1394-git +# tree (and/or in by the time your read this), which we want... +# diff --git a/linux-2.6-firewire-git-update.patch b/linux-2.6-firewire-git-update.patch index e69de29..6858081 100644 --- a/linux-2.6-firewire-git-update.patch +++ b/linux-2.6-firewire-git-update.patch @@ -0,0 +1,3682 @@ +linux1394-2.6.git tree vs. linus v2.6.29-rc3-git1 on 20090130 by jarod + +--- + firewire-git/drivers/firewire/fw-card.c | 68 - + firewire-git/drivers/firewire/fw-cdev.c | 1014 +++++++++++++++++-------- + firewire-git/drivers/firewire/fw-device.c | 43 - + firewire-git/drivers/firewire/fw-device.h | 7 + firewire-git/drivers/firewire/fw-iso.c | 225 ++++- + firewire-git/drivers/firewire/fw-ohci.c | 236 ++--- + firewire-git/drivers/firewire/fw-sbp2.c | 57 - + firewire-git/drivers/firewire/fw-topology.c | 28 + firewire-git/drivers/firewire/fw-topology.h | 19 + firewire-git/drivers/firewire/fw-transaction.c | 151 +-- + firewire-git/drivers/firewire/fw-transaction.h | 125 --- + include/linux/firewire-cdev.h | 170 +++- + 12 files changed, 1359 insertions(+), 784 deletions(-) + +diff -Naurp linux-2.6-git/drivers/firewire/fw-card.c firewire-git/drivers/firewire/fw-card.c +--- linux-2.6-git/drivers/firewire/fw-card.c 2009-01-30 13:39:02.989651512 -0500 ++++ firewire-git/drivers/firewire/fw-card.c 2009-01-30 13:35:51.859771884 -0500 +@@ -63,8 +63,7 @@ static int descriptor_count; + #define BIB_CMC ((1) << 30) + #define BIB_IMC ((1) << 31) + +-static u32 * +-generate_config_rom(struct fw_card *card, size_t *config_rom_length) ++static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length) + { + struct fw_descriptor *desc; + static u32 config_rom[256]; +@@ -128,8 +127,7 @@ generate_config_rom(struct fw_card *card + return config_rom; + } + +-static void +-update_config_roms(void) ++static void update_config_roms(void) + { + struct fw_card *card; + u32 *config_rom; +@@ -141,8 +139,7 @@ update_config_roms(void) + } + } + +-int +-fw_core_add_descriptor(struct fw_descriptor *desc) ++int fw_core_add_descriptor(struct fw_descriptor *desc) + { + size_t i; + +@@ -171,8 +168,7 @@ fw_core_add_descriptor(struct fw_descrip + return 0; + } + +-void +-fw_core_remove_descriptor(struct fw_descriptor *desc) ++void fw_core_remove_descriptor(struct fw_descriptor *desc) + { + mutex_lock(&card_mutex); + +@@ -189,8 +185,7 @@ static const char gap_count_table[] = { + 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 + }; + +-void +-fw_schedule_bm_work(struct fw_card *card, unsigned long delay) ++void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) + { + int scheduled; + +@@ -200,8 +195,7 @@ fw_schedule_bm_work(struct fw_card *card + fw_card_put(card); + } + +-static void +-fw_card_bm_work(struct work_struct *work) ++static void fw_card_bm_work(struct work_struct *work) + { + struct fw_card *card = container_of(work, struct fw_card, work.work); + struct fw_device *root_device; +@@ -371,17 +365,16 @@ fw_card_bm_work(struct work_struct *work + fw_card_put(card); + } + +-static void +-flush_timer_callback(unsigned long data) ++static void flush_timer_callback(unsigned long data) + { + struct fw_card *card = (struct fw_card *)data; + + fw_flush_transactions(card); + } + +-void +-fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, +- struct device *device) ++void fw_card_initialize(struct fw_card *card, ++ const struct fw_card_driver *driver, ++ struct device *device) + { + static atomic_t index = ATOMIC_INIT(-1); + +@@ -406,9 +399,8 @@ fw_card_initialize(struct fw_card *card, + } + EXPORT_SYMBOL(fw_card_initialize); + +-int +-fw_card_add(struct fw_card *card, +- u32 max_receive, u32 link_speed, u64 guid) ++int fw_card_add(struct fw_card *card, ++ u32 max_receive, u32 link_speed, u64 guid) + { + u32 *config_rom; + size_t length; +@@ -435,23 +427,20 @@ EXPORT_SYMBOL(fw_card_add); + * dummy driver just fails all IO. + */ + +-static int +-dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) ++static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) + { + BUG(); + return -1; + } + +-static int +-dummy_update_phy_reg(struct fw_card *card, int address, +- int clear_bits, int set_bits) ++static int dummy_update_phy_reg(struct fw_card *card, int address, ++ int clear_bits, int set_bits) + { + return -ENODEV; + } + +-static int +-dummy_set_config_rom(struct fw_card *card, +- u32 *config_rom, size_t length) ++static int dummy_set_config_rom(struct fw_card *card, ++ u32 *config_rom, size_t length) + { + /* + * We take the card out of card_list before setting the dummy +@@ -461,27 +450,23 @@ dummy_set_config_rom(struct fw_card *car + return -1; + } + +-static void +-dummy_send_request(struct fw_card *card, struct fw_packet *packet) ++static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) + { + packet->callback(packet, card, -ENODEV); + } + +-static void +-dummy_send_response(struct fw_card *card, struct fw_packet *packet) ++static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) + { + packet->callback(packet, card, -ENODEV); + } + +-static int +-dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) ++static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) + { + return -ENOENT; + } + +-static int +-dummy_enable_phys_dma(struct fw_card *card, +- int node_id, int generation) ++static int dummy_enable_phys_dma(struct fw_card *card, ++ int node_id, int generation) + { + return -ENODEV; + } +@@ -496,16 +481,14 @@ static struct fw_card_driver dummy_drive + .enable_phys_dma = dummy_enable_phys_dma, + }; + +-void +-fw_card_release(struct kref *kref) ++void fw_card_release(struct kref *kref) + { + struct fw_card *card = container_of(kref, struct fw_card, kref); + + complete(&card->done); + } + +-void +-fw_core_remove_card(struct fw_card *card) ++void fw_core_remove_card(struct fw_card *card) + { + card->driver->update_phy_reg(card, 4, + PHY_LINK_ACTIVE | PHY_CONTENDER, 0); +@@ -529,8 +512,7 @@ fw_core_remove_card(struct fw_card *card + } + EXPORT_SYMBOL(fw_core_remove_card); + +-int +-fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) ++int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) + { + int reg = short_reset ? 5 : 1; + int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; +diff -Naurp linux-2.6-git/drivers/firewire/fw-cdev.c firewire-git/drivers/firewire/fw-cdev.c +--- linux-2.6-git/drivers/firewire/fw-cdev.c 2008-11-04 11:19:19.000000000 -0500 ++++ firewire-git/drivers/firewire/fw-cdev.c 2009-01-30 13:35:51.860646788 -0500 +@@ -18,87 +18,162 @@ + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +-#include +-#include +-#include +-#include ++#include ++#include + #include +-#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include + #include + #include ++#include + #include +-#include +-#include +-#include +-#include +-#include ++#include ++#include ++#include ++ + #include + #include +-#include "fw-transaction.h" +-#include "fw-topology.h" ++ + #include "fw-device.h" ++#include "fw-topology.h" ++#include "fw-transaction.h" ++ ++struct client { ++ u32 version; ++ struct fw_device *device; ++ ++ spinlock_t lock; ++ bool in_shutdown; ++ struct idr resource_idr; ++ struct list_head event_list; ++ wait_queue_head_t wait; ++ u64 bus_reset_closure; ++ ++ struct fw_iso_context *iso_context; ++ u64 iso_closure; ++ struct fw_iso_buffer buffer; ++ unsigned long vm_start; + +-struct client; +-struct client_resource { + struct list_head link; +- void (*release)(struct client *client, struct client_resource *r); +- u32 handle; ++ struct kref kref; ++}; ++ ++static inline void client_get(struct client *client) ++{ ++ kref_get(&client->kref); ++} ++ ++static void client_release(struct kref *kref) ++{ ++ struct client *client = container_of(kref, struct client, kref); ++ ++ fw_device_put(client->device); ++ kfree(client); ++} ++ ++static void client_put(struct client *client) ++{ ++ kref_put(&client->kref, client_release); ++} ++ ++struct client_resource; ++typedef void (*client_resource_release_fn_t)(struct client *, ++ struct client_resource *); ++struct client_resource { ++ client_resource_release_fn_t release; ++ int handle; ++}; ++ ++struct address_handler_resource { ++ struct client_resource resource; ++ struct fw_address_handler handler; ++ __u64 closure; ++ struct client *client; ++}; ++ ++struct outbound_transaction_resource { ++ struct client_resource resource; ++ struct fw_transaction transaction; ++}; ++ ++struct inbound_transaction_resource { ++ struct client_resource resource; ++ struct fw_request *request; ++ void *data; ++ size_t length; + }; + ++struct descriptor_resource { ++ struct client_resource resource; ++ struct fw_descriptor descriptor; ++ u32 data[0]; ++}; ++ ++struct iso_resource { ++ struct client_resource resource; ++ struct client *client; ++ /* Schedule work and access todo only with client->lock held. */ ++ struct delayed_work work; ++ enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, ++ ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; ++ int generation; ++ u64 channels; ++ s32 bandwidth; ++ struct iso_resource_event *e_alloc, *e_dealloc; ++}; ++ ++static void schedule_iso_resource(struct iso_resource *); ++static void release_iso_resource(struct client *, struct client_resource *); ++ + /* + * dequeue_event() just kfree()'s the event, so the event has to be +- * the first field in the struct. ++ * the first field in a struct XYZ_event. + */ +- + struct event { + struct { void *data; size_t size; } v[2]; + struct list_head link; + }; + +-struct bus_reset { ++struct bus_reset_event { + struct event event; + struct fw_cdev_event_bus_reset reset; + }; + +-struct response { ++struct outbound_transaction_event { + struct event event; +- struct fw_transaction transaction; + struct client *client; +- struct client_resource resource; ++ struct outbound_transaction_resource r; + struct fw_cdev_event_response response; + }; + +-struct iso_interrupt { ++struct inbound_transaction_event { + struct event event; +- struct fw_cdev_event_iso_interrupt interrupt; ++ struct fw_cdev_event_request request; + }; + +-struct client { +- u32 version; +- struct fw_device *device; +- spinlock_t lock; +- u32 resource_handle; +- struct list_head resource_list; +- struct list_head event_list; +- wait_queue_head_t wait; +- u64 bus_reset_closure; +- +- struct fw_iso_context *iso_context; +- u64 iso_closure; +- struct fw_iso_buffer buffer; +- unsigned long vm_start; ++struct iso_interrupt_event { ++ struct event event; ++ struct fw_cdev_event_iso_interrupt interrupt; ++}; + +- struct list_head link; ++struct iso_resource_event { ++ struct event event; ++ struct fw_cdev_event_iso_resource resource; + }; + +-static inline void __user * +-u64_to_uptr(__u64 value) ++static inline void __user *u64_to_uptr(__u64 value) + { + return (void __user *)(unsigned long)value; + } + +-static inline __u64 +-uptr_to_u64(void __user *ptr) ++static inline __u64 uptr_to_u64(void __user *ptr) + { + return (__u64)(unsigned long)ptr; + } +@@ -107,7 +182,6 @@ static int fw_device_op_open(struct inod + { + struct fw_device *device; + struct client *client; +- unsigned long flags; + + device = fw_device_get_by_devt(inode->i_rdev); + if (device == NULL) +@@ -125,16 +199,17 @@ static int fw_device_op_open(struct inod + } + + client->device = device; +- INIT_LIST_HEAD(&client->event_list); +- INIT_LIST_HEAD(&client->resource_list); + spin_lock_init(&client->lock); ++ idr_init(&client->resource_idr); ++ INIT_LIST_HEAD(&client->event_list); + init_waitqueue_head(&client->wait); ++ kref_init(&client->kref); + + file->private_data = client; + +- spin_lock_irqsave(&device->card->lock, flags); ++ mutex_lock(&device->client_list_mutex); + list_add_tail(&client->link, &device->client_list); +- spin_unlock_irqrestore(&device->card->lock, flags); ++ mutex_unlock(&device->client_list_mutex); + + return 0; + } +@@ -150,68 +225,69 @@ static void queue_event(struct client *c + event->v[1].size = size1; + + spin_lock_irqsave(&client->lock, flags); +- list_add_tail(&event->link, &client->event_list); ++ if (client->in_shutdown) ++ kfree(event); ++ else ++ list_add_tail(&event->link, &client->event_list); + spin_unlock_irqrestore(&client->lock, flags); + + wake_up_interruptible(&client->wait); + } + +-static int +-dequeue_event(struct client *client, char __user *buffer, size_t count) ++static int dequeue_event(struct client *client, ++ char __user *buffer, size_t count) + { +- unsigned long flags; + struct event *event; + size_t size, total; +- int i, retval; ++ int i, ret; + +- retval = wait_event_interruptible(client->wait, +- !list_empty(&client->event_list) || +- fw_device_is_shutdown(client->device)); +- if (retval < 0) +- return retval; ++ ret = wait_event_interruptible(client->wait, ++ !list_empty(&client->event_list) || ++ fw_device_is_shutdown(client->device)); ++ if (ret < 0) ++ return ret; + + if (list_empty(&client->event_list) && + fw_device_is_shutdown(client->device)) + return -ENODEV; + +- spin_lock_irqsave(&client->lock, flags); +- event = container_of(client->event_list.next, struct event, link); ++ spin_lock_irq(&client->lock); ++ event = list_first_entry(&client->event_list, struct event, link); + list_del(&event->link); +- spin_unlock_irqrestore(&client->lock, flags); ++ spin_unlock_irq(&client->lock); + + total = 0; + for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { + size = min(event->v[i].size, count - total); + if (copy_to_user(buffer + total, event->v[i].data, size)) { +- retval = -EFAULT; ++ ret = -EFAULT; + goto out; + } + total += size; + } +- retval = total; ++ ret = total; + + out: + kfree(event); + +- return retval; ++ return ret; + } + +-static ssize_t +-fw_device_op_read(struct file *file, +- char __user *buffer, size_t count, loff_t *offset) ++static ssize_t fw_device_op_read(struct file *file, char __user *buffer, ++ size_t count, loff_t *offset) + { + struct client *client = file->private_data; + + return dequeue_event(client, buffer, count); + } + +-/* caller must hold card->lock so that node pointers can be dereferenced here */ +-static void +-fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, +- struct client *client) ++static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, ++ struct client *client) + { + struct fw_card *card = client->device->card; + ++ spin_lock_irq(&card->lock); ++ + event->closure = client->bus_reset_closure; + event->type = FW_CDEV_EVENT_BUS_RESET; + event->generation = client->device->generation; +@@ -220,39 +296,49 @@ fill_bus_reset_event(struct fw_cdev_even + event->bm_node_id = 0; /* FIXME: We don't track the BM. */ + event->irm_node_id = card->irm_node->node_id; + event->root_node_id = card->root_node->node_id; ++ ++ spin_unlock_irq(&card->lock); + } + +-static void +-for_each_client(struct fw_device *device, +- void (*callback)(struct client *client)) ++static void for_each_client(struct fw_device *device, ++ void (*callback)(struct client *client)) + { +- struct fw_card *card = device->card; + struct client *c; +- unsigned long flags; +- +- spin_lock_irqsave(&card->lock, flags); + ++ mutex_lock(&device->client_list_mutex); + list_for_each_entry(c, &device->client_list, link) + callback(c); ++ mutex_unlock(&device->client_list_mutex); ++} ++ ++static int schedule_reallocations(int id, void *p, void *data) ++{ ++ struct client_resource *r = p; + +- spin_unlock_irqrestore(&card->lock, flags); ++ if (r->release == release_iso_resource) ++ schedule_iso_resource(container_of(r, ++ struct iso_resource, resource)); ++ return 0; + } + +-static void +-queue_bus_reset_event(struct client *client) ++static void queue_bus_reset_event(struct client *client) + { +- struct bus_reset *bus_reset; ++ struct bus_reset_event *e; + +- bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC); +- if (bus_reset == NULL) { ++ e = kzalloc(sizeof(*e), GFP_KERNEL); ++ if (e == NULL) { + fw_notify("Out of memory when allocating bus reset event\n"); + return; + } + +- fill_bus_reset_event(&bus_reset->reset, client); ++ fill_bus_reset_event(&e->reset, client); ++ ++ queue_event(client, &e->event, ++ &e->reset, sizeof(e->reset), NULL, 0); + +- queue_event(client, &bus_reset->event, +- &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0); ++ spin_lock_irq(&client->lock); ++ idr_for_each(&client->resource_idr, schedule_reallocations, client); ++ spin_unlock_irq(&client->lock); + } + + void fw_device_cdev_update(struct fw_device *device) +@@ -274,11 +360,11 @@ static int ioctl_get_info(struct client + { + struct fw_cdev_get_info *get_info = buffer; + struct fw_cdev_event_bus_reset bus_reset; +- struct fw_card *card = client->device->card; + unsigned long ret = 0; + + client->version = get_info->version; + get_info->version = FW_CDEV_VERSION; ++ get_info->card = client->device->card->index; + + down_read(&fw_device_rwsem); + +@@ -300,49 +386,61 @@ static int ioctl_get_info(struct client + client->bus_reset_closure = get_info->bus_reset_closure; + if (get_info->bus_reset != 0) { + void __user *uptr = u64_to_uptr(get_info->bus_reset); +- unsigned long flags; + +- spin_lock_irqsave(&card->lock, flags); + fill_bus_reset_event(&bus_reset, client); +- spin_unlock_irqrestore(&card->lock, flags); +- + if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) + return -EFAULT; + } + +- get_info->card = card->index; +- + return 0; + } + +-static void +-add_client_resource(struct client *client, struct client_resource *resource) ++static int add_client_resource(struct client *client, ++ struct client_resource *resource, gfp_t gfp_mask) + { + unsigned long flags; ++ int ret; ++ ++ retry: ++ if (idr_pre_get(&client->resource_idr, gfp_mask | __GFP_ZERO) == 0) ++ return -ENOMEM; + + spin_lock_irqsave(&client->lock, flags); +- list_add_tail(&resource->link, &client->resource_list); +- resource->handle = client->resource_handle++; ++ if (client->in_shutdown) ++ ret = -ECANCELED; ++ else ++ ret = idr_get_new(&client->resource_idr, resource, ++ &resource->handle); ++ if (ret >= 0) { ++ client_get(client); ++ if (resource->release == release_iso_resource) ++ schedule_iso_resource(container_of(resource, ++ struct iso_resource, resource)); ++ } + spin_unlock_irqrestore(&client->lock, flags); ++ ++ if (ret == -EAGAIN) ++ goto retry; ++ ++ return ret < 0 ? ret : 0; + } + +-static int +-release_client_resource(struct client *client, u32 handle, +- struct client_resource **resource) ++static int release_client_resource(struct client *client, u32 handle, ++ client_resource_release_fn_t release, ++ struct client_resource **resource) + { + struct client_resource *r; +- unsigned long flags; + +- spin_lock_irqsave(&client->lock, flags); +- list_for_each_entry(r, &client->resource_list, link) { +- if (r->handle == handle) { +- list_del(&r->link); +- break; +- } +- } +- spin_unlock_irqrestore(&client->lock, flags); ++ spin_lock_irq(&client->lock); ++ if (client->in_shutdown) ++ r = NULL; ++ else ++ r = idr_find(&client->resource_idr, handle); ++ if (r && r->release == release) ++ idr_remove(&client->resource_idr, handle); ++ spin_unlock_irq(&client->lock); + +- if (&r->link == &client->resource_list) ++ if (!(r && r->release == release)) + return -EINVAL; + + if (resource) +@@ -350,203 +448,242 @@ release_client_resource(struct client *c + else + r->release(client, r); + ++ client_put(client); ++ + return 0; + } + +-static void +-release_transaction(struct client *client, struct client_resource *resource) ++static void release_transaction(struct client *client, ++ struct client_resource *resource) + { +- struct response *response = +- container_of(resource, struct response, resource); ++ struct outbound_transaction_resource *r = container_of(resource, ++ struct outbound_transaction_resource, resource); + +- fw_cancel_transaction(client->device->card, &response->transaction); ++ fw_cancel_transaction(client->device->card, &r->transaction); + } + +-static void +-complete_transaction(struct fw_card *card, int rcode, +- void *payload, size_t length, void *data) ++static void complete_transaction(struct fw_card *card, int rcode, ++ void *payload, size_t length, void *data) + { +- struct response *response = data; +- struct client *client = response->client; ++ struct outbound_transaction_event *e = data; ++ struct fw_cdev_event_response *rsp = &e->response; ++ struct client *client = e->client; + unsigned long flags; +- struct fw_cdev_event_response *r = &response->response; + +- if (length < r->length) +- r->length = length; ++ if (length < rsp->length) ++ rsp->length = length; + if (rcode == RCODE_COMPLETE) +- memcpy(r->data, payload, r->length); ++ memcpy(rsp->data, payload, rsp->length); + + spin_lock_irqsave(&client->lock, flags); +- list_del(&response->resource.link); ++ /* ++ * 1. If called while in shutdown, the idr tree must be left untouched. ++ * The idr handle will be removed and the client reference will be ++ * dropped later. ++ * 2. If the call chain was release_client_resource -> ++ * release_transaction -> complete_transaction (instead of a normal ++ * conclusion of the transaction), i.e. if this resource was already ++ * unregistered from the idr, the client reference will be dropped ++ * by release_client_resource and we must not drop it here. ++ */ ++ if (!client->in_shutdown && ++ idr_find(&client->resource_idr, e->r.resource.handle)) { ++ idr_remove(&client->resource_idr, e->r.resource.handle); ++ /* Drop the idr's reference */ ++ client_put(client); ++ } + spin_unlock_irqrestore(&client->lock, flags); + +- r->type = FW_CDEV_EVENT_RESPONSE; +- r->rcode = rcode; ++ rsp->type = FW_CDEV_EVENT_RESPONSE; ++ rsp->rcode = rcode; + + /* +- * In the case that sizeof(*r) doesn't align with the position of the ++ * In the case that sizeof(*rsp) doesn't align with the position of the + * data, and the read is short, preserve an extra copy of the data + * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless + * for short reads and some apps depended on it, this is both safe + * and prudent for compatibility. + */ +- if (r->length <= sizeof(*r) - offsetof(typeof(*r), data)) +- queue_event(client, &response->event, r, sizeof(*r), +- r->data, r->length); ++ if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) ++ queue_event(client, &e->event, rsp, sizeof(*rsp), ++ rsp->data, rsp->length); + else +- queue_event(client, &response->event, r, sizeof(*r) + r->length, ++ queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, + NULL, 0); ++ ++ /* Drop the transaction callback's reference */ ++ client_put(client); + } + +-static int ioctl_send_request(struct client *client, void *buffer) ++static int init_request(struct client *client, ++ struct fw_cdev_send_request *request, ++ int destination_id, int speed) + { +- struct fw_device *device = client->device; +- struct fw_cdev_send_request *request = buffer; +- struct response *response; ++ struct outbound_transaction_event *e; ++ int ret; + +- /* What is the biggest size we'll accept, really? */ +- if (request->length > 4096) +- return -EINVAL; ++ if (request->length > 4096 || request->length > 512 << speed) ++ return -EIO; + +- response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL); +- if (response == NULL) ++ e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); ++ if (e == NULL) + return -ENOMEM; + +- response->client = client; +- response->response.length = request->length; +- response->response.closure = request->closure; ++ e->client = client; ++ e->response.length = request->length; ++ e->response.closure = request->closure; + + if (request->data && +- copy_from_user(response->response.data, ++ copy_from_user(e->response.data, + u64_to_uptr(request->data), request->length)) { +- kfree(response); +- return -EFAULT; ++ ret = -EFAULT; ++ goto failed; + } + +- response->resource.release = release_transaction; +- add_client_resource(client, &response->resource); +- +- fw_send_request(device->card, &response->transaction, +- request->tcode & 0x1f, +- device->node->node_id, +- request->generation, +- device->max_speed, +- request->offset, +- response->response.data, request->length, +- complete_transaction, response); ++ e->r.resource.release = release_transaction; ++ ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); ++ if (ret < 0) ++ goto failed; ++ ++ /* Get a reference for the transaction callback */ ++ client_get(client); ++ ++ fw_send_request(client->device->card, &e->r.transaction, ++ request->tcode & 0x1f, destination_id, ++ request->generation, speed, request->offset, ++ e->response.data, request->length, ++ complete_transaction, e); + + if (request->data) + return sizeof(request) + request->length; + else + return sizeof(request); ++ failed: ++ kfree(e); ++ ++ return ret; + } + +-struct address_handler { +- struct fw_address_handler handler; +- __u64 closure; +- struct client *client; +- struct client_resource resource; +-}; ++static int ioctl_send_request(struct client *client, void *buffer) ++{ ++ struct fw_cdev_send_request *request = buffer; + +-struct request { +- struct fw_request *request; +- void *data; +- size_t length; +- struct client_resource resource; +-}; ++ switch (request->tcode) { ++ case TCODE_WRITE_QUADLET_REQUEST: ++ case TCODE_WRITE_BLOCK_REQUEST: ++ case TCODE_READ_QUADLET_REQUEST: ++ case TCODE_READ_BLOCK_REQUEST: ++ case TCODE_LOCK_MASK_SWAP: ++ case TCODE_LOCK_COMPARE_SWAP: ++ case TCODE_LOCK_FETCH_ADD: ++ case TCODE_LOCK_LITTLE_ADD: ++ case TCODE_LOCK_BOUNDED_ADD: ++ case TCODE_LOCK_WRAP_ADD: ++ case TCODE_LOCK_VENDOR_DEPENDENT: ++ break; ++ default: ++ return -EINVAL; ++ } + +-struct request_event { +- struct event event; +- struct fw_cdev_event_request request; +-}; ++ return init_request(client, request, client->device->node->node_id, ++ client->device->max_speed); ++} + +-static void +-release_request(struct client *client, struct client_resource *resource) ++static void release_request(struct client *client, ++ struct client_resource *resource) + { +- struct request *request = +- container_of(resource, struct request, resource); ++ struct inbound_transaction_resource *r = container_of(resource, ++ struct inbound_transaction_resource, resource); + +- fw_send_response(client->device->card, request->request, ++ fw_send_response(client->device->card, r->request, + RCODE_CONFLICT_ERROR); +- kfree(request); ++ kfree(r); + } + +-static void +-handle_request(struct fw_card *card, struct fw_request *r, +- int tcode, int destination, int source, +- int generation, int speed, +- unsigned long long offset, +- void *payload, size_t length, void *callback_data) +-{ +- struct address_handler *handler = callback_data; +- struct request *request; +- struct request_event *e; +- struct client *client = handler->client; ++static void handle_request(struct fw_card *card, struct fw_request *request, ++ int tcode, int destination, int source, ++ int generation, int speed, ++ unsigned long long offset, ++ void *payload, size_t length, void *callback_data) ++{ ++ struct address_handler_resource *handler = callback_data; ++ struct inbound_transaction_resource *r; ++ struct inbound_transaction_event *e; ++ int ret; + +- request = kmalloc(sizeof(*request), GFP_ATOMIC); ++ r = kmalloc(sizeof(*r), GFP_ATOMIC); + e = kmalloc(sizeof(*e), GFP_ATOMIC); +- if (request == NULL || e == NULL) { +- kfree(request); +- kfree(e); +- fw_send_response(card, r, RCODE_CONFLICT_ERROR); +- return; +- } +- +- request->request = r; +- request->data = payload; +- request->length = length; ++ if (r == NULL || e == NULL) ++ goto failed; + +- request->resource.release = release_request; +- add_client_resource(client, &request->resource); ++ r->request = request; ++ r->data = payload; ++ r->length = length; ++ ++ r->resource.release = release_request; ++ ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); ++ if (ret < 0) ++ goto failed; + + e->request.type = FW_CDEV_EVENT_REQUEST; + e->request.tcode = tcode; + e->request.offset = offset; + e->request.length = length; +- e->request.handle = request->resource.handle; ++ e->request.handle = r->resource.handle; + e->request.closure = handler->closure; + +- queue_event(client, &e->event, ++ queue_event(handler->client, &e->event, + &e->request, sizeof(e->request), payload, length); ++ return; ++ ++ failed: ++ kfree(r); ++ kfree(e); ++ fw_send_response(card, request, RCODE_CONFLICT_ERROR); + } + +-static void +-release_address_handler(struct client *client, +- struct client_resource *resource) ++static void release_address_handler(struct client *client, ++ struct client_resource *resource) + { +- struct address_handler *handler = +- container_of(resource, struct address_handler, resource); ++ struct address_handler_resource *r = ++ container_of(resource, struct address_handler_resource, resource); + +- fw_core_remove_address_handler(&handler->handler); +- kfree(handler); ++ fw_core_remove_address_handler(&r->handler); ++ kfree(r); + } + + static int ioctl_allocate(struct client *client, void *buffer) + { + struct fw_cdev_allocate *request = buffer; +- struct address_handler *handler; ++ struct address_handler_resource *r; + struct fw_address_region region; ++ int ret; + +- handler = kmalloc(sizeof(*handler), GFP_KERNEL); +- if (handler == NULL) ++ r = kmalloc(sizeof(*r), GFP_KERNEL); ++ if (r == NULL) + return -ENOMEM; + + region.start = request->offset; + region.end = request->offset + request->length; +- handler->handler.length = request->length; +- handler->handler.address_callback = handle_request; +- handler->handler.callback_data = handler; +- handler->closure = request->closure; +- handler->client = client; +- +- if (fw_core_add_address_handler(&handler->handler, ®ion) < 0) { +- kfree(handler); +- return -EBUSY; ++ r->handler.length = request->length; ++ r->handler.address_callback = handle_request; ++ r->handler.callback_data = r; ++ r->closure = request->closure; ++ r->client = client; ++ ++ ret = fw_core_add_address_handler(&r->handler, ®ion); ++ if (ret < 0) { ++ kfree(r); ++ return ret; + } + +- handler->resource.release = release_address_handler; +- add_client_resource(client, &handler->resource); +- request->handle = handler->resource.handle; ++ r->resource.release = release_address_handler; ++ ret = add_client_resource(client, &r->resource, GFP_KERNEL); ++ if (ret < 0) { ++ release_address_handler(client, &r->resource); ++ return ret; ++ } ++ request->handle = r->resource.handle; + + return 0; + } +@@ -555,18 +692,22 @@ static int ioctl_deallocate(struct clien + { + struct fw_cdev_deallocate *request = buffer; + +- return release_client_resource(client, request->handle, NULL); ++ return release_client_resource(client, request->handle, ++ release_address_handler, NULL); + } + + static int ioctl_send_response(struct client *client, void *buffer) + { + struct fw_cdev_send_response *request = buffer; + struct client_resource *resource; +- struct request *r; ++ struct inbound_transaction_resource *r; + +- if (release_client_resource(client, request->handle, &resource) < 0) ++ if (release_client_resource(client, request->handle, ++ release_request, &resource) < 0) + return -EINVAL; +- r = container_of(resource, struct request, resource); ++ ++ r = container_of(resource, struct inbound_transaction_resource, ++ resource); + if (request->length < r->length) + r->length = request->length; + if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) +@@ -588,85 +729,84 @@ static int ioctl_initiate_bus_reset(stru + return fw_core_initiate_bus_reset(client->device->card, short_reset); + } + +-struct descriptor { +- struct fw_descriptor d; +- struct client_resource resource; +- u32 data[0]; +-}; +- + static void release_descriptor(struct client *client, + struct client_resource *resource) + { +- struct descriptor *descriptor = +- container_of(resource, struct descriptor, resource); ++ struct descriptor_resource *r = ++ container_of(resource, struct descriptor_resource, resource); + +- fw_core_remove_descriptor(&descriptor->d); +- kfree(descriptor); ++ fw_core_remove_descriptor(&r->descriptor); ++ kfree(r); + } + + static int ioctl_add_descriptor(struct client *client, void *buffer) + { + struct fw_cdev_add_descriptor *request = buffer; +- struct descriptor *descriptor; +- int retval; ++ struct descriptor_resource *r; ++ int ret; + + if (request->length > 256) + return -EINVAL; + +- descriptor = +- kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL); +- if (descriptor == NULL) ++ r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL); ++ if (r == NULL) + return -ENOMEM; + +- if (copy_from_user(descriptor->data, ++ if (copy_from_user(r->data, + u64_to_uptr(request->data), request->length * 4)) { +- kfree(descriptor); +- return -EFAULT; ++ ret = -EFAULT; ++ goto failed; + } + +- descriptor->d.length = request->length; +- descriptor->d.immediate = request->immediate; +- descriptor->d.key = request->key; +- descriptor->d.data = descriptor->data; +- +- retval = fw_core_add_descriptor(&descriptor->d); +- if (retval < 0) { +- kfree(descriptor); +- return retval; ++ r->descriptor.length = request->length; ++ r->descriptor.immediate = request->immediate; ++ r->descriptor.key = request->key; ++ r->descriptor.data = r->data; ++ ++ ret = fw_core_add_descriptor(&r->descriptor); ++ if (ret < 0) ++ goto failed; ++ ++ r->resource.release = release_descriptor; ++ ret = add_client_resource(client, &r->resource, GFP_KERNEL); ++ if (ret < 0) { ++ fw_core_remove_descriptor(&r->descriptor); ++ goto failed; + } +- +- descriptor->resource.release = release_descriptor; +- add_client_resource(client, &descriptor->resource); +- request->handle = descriptor->resource.handle; ++ request->handle = r->resource.handle; + + return 0; ++ failed: ++ kfree(r); ++ ++ return ret; + } + + static int ioctl_remove_descriptor(struct client *client, void *buffer) + { + struct fw_cdev_remove_descriptor *request = buffer; + +- return release_client_resource(client, request->handle, NULL); ++ return release_client_resource(client, request->handle, ++ release_descriptor, NULL); + } + +-static void +-iso_callback(struct fw_iso_context *context, u32 cycle, +- size_t header_length, void *header, void *data) ++static void iso_callback(struct fw_iso_context *context, u32 cycle, ++ size_t header_length, void *header, void *data) + { + struct client *client = data; +- struct iso_interrupt *irq; ++ struct iso_interrupt_event *e; + +- irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC); +- if (irq == NULL) ++ e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC); ++ if (e == NULL) + return; + +- irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; +- irq->interrupt.closure = client->iso_closure; +- irq->interrupt.cycle = cycle; +- irq->interrupt.header_length = header_length; +- memcpy(irq->interrupt.header, header, header_length); +- queue_event(client, &irq->event, &irq->interrupt, +- sizeof(irq->interrupt) + header_length, NULL, 0); ++ e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; ++ e->interrupt.closure = client->iso_closure; ++ e->interrupt.cycle = cycle; ++ e->interrupt.header_length = header_length; ++ memcpy(e->interrupt.header, header, header_length); ++ queue_event(client, &e->event, &e->interrupt, ++ sizeof(e->interrupt) + header_length, NULL, 0); + } + + static int ioctl_create_iso_context(struct client *client, void *buffer) +@@ -871,6 +1011,237 @@ static int ioctl_get_cycle_timer(struct + return 0; + } + ++static void iso_resource_work(struct work_struct *work) ++{ ++ struct iso_resource_event *e; ++ struct iso_resource *r = ++ container_of(work, struct iso_resource, work.work); ++ struct client *client = r->client; ++ int generation, channel, bandwidth, todo; ++ bool skip, free, success; ++ ++ spin_lock_irq(&client->lock); ++ generation = client->device->generation; ++ todo = r->todo; ++ /* Allow 1000ms grace period for other reallocations. */ ++ if (todo == ISO_RES_ALLOC && ++ time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) { ++ if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3))) ++ client_get(client); ++ skip = true; ++ } else { ++ /* We could be called twice within the same generation. */ ++ skip = todo == ISO_RES_REALLOC && ++ r->generation == generation; ++ } ++ free = todo == ISO_RES_DEALLOC || ++ todo == ISO_RES_ALLOC_ONCE || ++ todo == ISO_RES_DEALLOC_ONCE; ++ r->generation = generation; ++ spin_unlock_irq(&client->lock); ++ ++ if (skip) ++ goto out; ++ ++ bandwidth = r->bandwidth; ++ ++ fw_iso_resource_manage(client->device->card, generation, ++ r->channels, &channel, &bandwidth, ++ todo == ISO_RES_ALLOC || ++ todo == ISO_RES_REALLOC || ++ todo == ISO_RES_ALLOC_ONCE); ++ /* ++ * Is this generation outdated already? As long as this resource sticks ++ * in the idr, it will be scheduled again for a newer generation or at ++ * shutdown. ++ */ ++ if (channel == -EAGAIN && ++ (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) ++ goto out; ++ ++ success = channel >= 0 || bandwidth > 0; ++ ++ spin_lock_irq(&client->lock); ++ /* ++ * Transit from allocation to reallocation, except if the client ++ * requested deallocation in the meantime. ++ */ ++ if (r->todo == ISO_RES_ALLOC) ++ r->todo = ISO_RES_REALLOC; ++ /* ++ * Allocation or reallocation failure? Pull this resource out of the ++ * idr and prepare for deletion, unless the client is shutting down. ++ */ ++ if (r->todo == ISO_RES_REALLOC && !success && ++ !client->in_shutdown && ++ idr_find(&client->resource_idr, r->resource.handle)) { ++ idr_remove(&client->resource_idr, r->resource.handle); ++ client_put(client); ++ free = true; ++ } ++ spin_unlock_irq(&client->lock); ++ ++ if (todo == ISO_RES_ALLOC && channel >= 0) ++ r->channels = 1ULL << channel; ++ ++ if (todo == ISO_RES_REALLOC && success) ++ goto out; ++ ++ if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { ++ e = r->e_alloc; ++ r->e_alloc = NULL; ++ } else { ++ e = r->e_dealloc; ++ r->e_dealloc = NULL; ++ } ++ e->resource.handle = r->resource.handle; ++ e->resource.channel = channel; ++ e->resource.bandwidth = bandwidth; ++ ++ queue_event(client, &e->event, ++ &e->resource, sizeof(e->resource), NULL, 0); ++ ++ if (free) { ++ cancel_delayed_work(&r->work); ++ kfree(r->e_alloc); ++ kfree(r->e_dealloc); ++ kfree(r); ++ } ++ out: ++ client_put(client); ++} ++ ++static void schedule_iso_resource(struct iso_resource *r) ++{ ++ client_get(r->client); ++ if (!schedule_delayed_work(&r->work, 0)) ++ client_put(r->client); ++} ++ ++static void release_iso_resource(struct client *client, ++ struct client_resource *resource) ++{ ++ struct iso_resource *r = ++ container_of(resource, struct iso_resource, resource); ++ ++ spin_lock_irq(&client->lock); ++ r->todo = ISO_RES_DEALLOC; ++ schedule_iso_resource(r); ++ spin_unlock_irq(&client->lock); ++} ++ ++static int init_iso_resource(struct client *client, ++ struct fw_cdev_allocate_iso_resource *request, int todo) ++{ ++ struct iso_resource_event *e1, *e2; ++ struct iso_resource *r; ++ int ret; ++ ++ if ((request->channels == 0 && request->bandwidth == 0) || ++ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || ++ request->bandwidth < 0) ++ return -EINVAL; ++ ++ r = kmalloc(sizeof(*r), GFP_KERNEL); ++ e1 = kmalloc(sizeof(*e1), GFP_KERNEL); ++ e2 = kmalloc(sizeof(*e2), GFP_KERNEL); ++ if (r == NULL || e1 == NULL || e2 == NULL) { ++ ret = -ENOMEM; ++ goto fail; ++ } ++ ++ INIT_DELAYED_WORK(&r->work, iso_resource_work); ++ r->client = client; ++ r->todo = todo; ++ r->generation = -1; ++ r->channels = request->channels; ++ r->bandwidth = request->bandwidth; ++ r->e_alloc = e1; ++ r->e_dealloc = e2; ++ ++ e1->resource.closure = request->closure; ++ e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; ++ e2->resource.closure = request->closure; ++ e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; ++ ++ if (todo == ISO_RES_ALLOC) { ++ r->resource.release = release_iso_resource; ++ ret = add_client_resource(client, &r->resource, GFP_KERNEL); ++ if (ret < 0) ++ goto fail; ++ } else { ++ r->resource.release = NULL; ++ r->resource.handle = -1; ++ schedule_iso_resource(r); ++ } ++ request->handle = r->resource.handle; ++ ++ return 0; ++ fail: ++ kfree(r); ++ kfree(e1); ++ kfree(e2); ++ ++ return ret; ++} ++ ++static int ioctl_allocate_iso_resource(struct client *client, void *buffer) ++{ ++ struct fw_cdev_allocate_iso_resource *request = buffer; ++ ++ return init_iso_resource(client, request, ISO_RES_ALLOC); ++} ++ ++static int ioctl_deallocate_iso_resource(struct client *client, void *buffer) ++{ ++ struct fw_cdev_deallocate *request = buffer; ++ ++ return release_client_resource(client, request->handle, ++ release_iso_resource, NULL); ++} ++ ++static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer) ++{ ++ struct fw_cdev_allocate_iso_resource *request = buffer; ++ ++ return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE); ++} ++ ++static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer) ++{ ++ struct fw_cdev_allocate_iso_resource *request = buffer; ++ ++ return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE); ++} ++ ++static int ioctl_get_speed(struct client *client, void *buffer) ++{ ++ struct fw_cdev_get_speed *request = buffer; ++ ++ request->max_speed = client->device->max_speed; ++ ++ return 0; ++} ++ ++static int ioctl_send_broadcast_request(struct client *client, void *buffer) ++{ ++ struct fw_cdev_send_request *request = buffer; ++ ++ switch (request->tcode) { ++ case TCODE_WRITE_QUADLET_REQUEST: ++ case TCODE_WRITE_BLOCK_REQUEST: ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ /* Security policy: Only allow accesses to Units Space. */ ++ if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) ++ return -EACCES; ++ ++ return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100); ++} ++ + static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { + ioctl_get_info, + ioctl_send_request, +@@ -885,13 +1256,19 @@ static int (* const ioctl_handlers[])(st + ioctl_start_iso, + ioctl_stop_iso, + ioctl_get_cycle_timer, ++ ioctl_allocate_iso_resource, ++ ioctl_deallocate_iso_resource, ++ ioctl_allocate_iso_resource_once, ++ ioctl_deallocate_iso_resource_once, ++ ioctl_get_speed, ++ ioctl_send_broadcast_request, + }; + +-static int +-dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) ++static int dispatch_ioctl(struct client *client, ++ unsigned int cmd, void __user *arg) + { + char buffer[256]; +- int retval; ++ int ret; + + if (_IOC_TYPE(cmd) != '#' || + _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) +@@ -903,9 +1280,9 @@ dispatch_ioctl(struct client *client, un + return -EFAULT; + } + +- retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer); +- if (retval < 0) +- return retval; ++ ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer); ++ if (ret < 0) ++ return ret; + + if (_IOC_DIR(cmd) & _IOC_READ) { + if (_IOC_SIZE(cmd) > sizeof(buffer) || +@@ -913,12 +1290,11 @@ dispatch_ioctl(struct client *client, un + return -EFAULT; + } + +- return retval; ++ return ret; + } + +-static long +-fw_device_op_ioctl(struct file *file, +- unsigned int cmd, unsigned long arg) ++static long fw_device_op_ioctl(struct file *file, ++ unsigned int cmd, unsigned long arg) + { + struct client *client = file->private_data; + +@@ -929,9 +1305,8 @@ fw_device_op_ioctl(struct file *file, + } + + #ifdef CONFIG_COMPAT +-static long +-fw_device_op_compat_ioctl(struct file *file, +- unsigned int cmd, unsigned long arg) ++static long fw_device_op_compat_ioctl(struct file *file, ++ unsigned int cmd, unsigned long arg) + { + struct client *client = file->private_data; + +@@ -947,7 +1322,7 @@ static int fw_device_op_mmap(struct file + struct client *client = file->private_data; + enum dma_data_direction direction; + unsigned long size; +- int page_count, retval; ++ int page_count, ret; + + if (fw_device_is_shutdown(client->device)) + return -ENODEV; +@@ -973,48 +1348,57 @@ static int fw_device_op_mmap(struct file + else + direction = DMA_FROM_DEVICE; + +- retval = fw_iso_buffer_init(&client->buffer, client->device->card, +- page_count, direction); +- if (retval < 0) +- return retval; ++ ret = fw_iso_buffer_init(&client->buffer, client->device->card, ++ page_count, direction); ++ if (ret < 0) ++ return ret; + +- retval = fw_iso_buffer_map(&client->buffer, vma); +- if (retval < 0) ++ ret = fw_iso_buffer_map(&client->buffer, vma); ++ if (ret < 0) + fw_iso_buffer_destroy(&client->buffer, client->device->card); + +- return retval; ++ return ret; ++} ++ ++static int shutdown_resource(int id, void *p, void *data) ++{ ++ struct client_resource *r = p; ++ struct client *client = data; ++ ++ r->release(client, r); ++ client_put(client); ++ ++ return 0; + } + + static int fw_device_op_release(struct inode *inode, struct file *file) + { + struct client *client = file->private_data; + struct event *e, *next_e; +- struct client_resource *r, *next_r; +- unsigned long flags; + +- if (client->buffer.pages) +- fw_iso_buffer_destroy(&client->buffer, client->device->card); ++ mutex_lock(&client->device->client_list_mutex); ++ list_del(&client->link); ++ mutex_unlock(&client->device->client_list_mutex); + + if (client->iso_context) + fw_iso_context_destroy(client->iso_context); + +- list_for_each_entry_safe(r, next_r, &client->resource_list, link) +- r->release(client, r); ++ if (client->buffer.pages) ++ fw_iso_buffer_destroy(&client->buffer, client->device->card); + +- /* +- * FIXME: We should wait for the async tasklets to stop +- * running before freeing the memory. +- */ ++ /* Freeze client->resource_idr and client->event_list */ ++ spin_lock_irq(&client->lock); ++ client->in_shutdown = true; ++ spin_unlock_irq(&client->lock); ++ ++ idr_for_each(&client->resource_idr, shutdown_resource, client); ++ idr_remove_all(&client->resource_idr); ++ idr_destroy(&client->resource_idr); + + list_for_each_entry_safe(e, next_e, &client->event_list, link) + kfree(e); + +- spin_lock_irqsave(&client->device->card->lock, flags); +- list_del(&client->link); +- spin_unlock_irqrestore(&client->device->card->lock, flags); +- +- fw_device_put(client->device); +- kfree(client); ++ client_put(client); + + return 0; + } +diff -Naurp linux-2.6-git/drivers/firewire/fw-device.c firewire-git/drivers/firewire/fw-device.c +--- linux-2.6-git/drivers/firewire/fw-device.c 2009-01-30 13:39:02.989651512 -0500 ++++ firewire-git/drivers/firewire/fw-device.c 2009-01-30 13:35:51.860646788 -0500 +@@ -27,8 +27,10 @@ + #include + #include + #include ++#include + #include + #include ++#include + #include + #include + #include "fw-transaction.h" +@@ -132,8 +134,7 @@ static int get_modalias(struct fw_unit * + vendor, model, specifier_id, version); + } + +-static int +-fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) ++static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) + { + struct fw_unit *unit = fw_unit(dev); + char modalias[64]; +@@ -191,8 +192,8 @@ struct config_rom_attribute { + u32 key; + }; + +-static ssize_t +-show_immediate(struct device *dev, struct device_attribute *dattr, char *buf) ++static ssize_t show_immediate(struct device *dev, ++ struct device_attribute *dattr, char *buf) + { + struct config_rom_attribute *attr = + container_of(dattr, struct config_rom_attribute, attr); +@@ -223,8 +224,8 @@ show_immediate(struct device *dev, struc + #define IMMEDIATE_ATTR(name, key) \ + { __ATTR(name, S_IRUGO, show_immediate, NULL), key } + +-static ssize_t +-show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf) ++static ssize_t show_text_leaf(struct device *dev, ++ struct device_attribute *dattr, char *buf) + { + struct config_rom_attribute *attr = + container_of(dattr, struct config_rom_attribute, attr); +@@ -293,10 +294,9 @@ static struct config_rom_attribute confi + TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION), + }; + +-static void +-init_fw_attribute_group(struct device *dev, +- struct device_attribute *attrs, +- struct fw_attribute_group *group) ++static void init_fw_attribute_group(struct device *dev, ++ struct device_attribute *attrs, ++ struct fw_attribute_group *group) + { + struct device_attribute *attr; + int i, j; +@@ -319,9 +319,8 @@ init_fw_attribute_group(struct device *d + dev->groups = group->groups; + } + +-static ssize_t +-modalias_show(struct device *dev, +- struct device_attribute *attr, char *buf) ++static ssize_t modalias_show(struct device *dev, ++ struct device_attribute *attr, char *buf) + { + struct fw_unit *unit = fw_unit(dev); + int length; +@@ -332,9 +331,8 @@ modalias_show(struct device *dev, + return length + 1; + } + +-static ssize_t +-rom_index_show(struct device *dev, +- struct device_attribute *attr, char *buf) ++static ssize_t rom_index_show(struct device *dev, ++ struct device_attribute *attr, char *buf) + { + struct fw_device *device = fw_device(dev->parent); + struct fw_unit *unit = fw_unit(dev); +@@ -349,8 +347,8 @@ static struct device_attribute fw_unit_a + __ATTR_NULL, + }; + +-static ssize_t +-config_rom_show(struct device *dev, struct device_attribute *attr, char *buf) ++static ssize_t config_rom_show(struct device *dev, ++ struct device_attribute *attr, char *buf) + { + struct fw_device *device = fw_device(dev); + size_t length; +@@ -363,8 +361,8 @@ config_rom_show(struct device *dev, stru + return length; + } + +-static ssize_t +-guid_show(struct device *dev, struct device_attribute *attr, char *buf) ++static ssize_t guid_show(struct device *dev, ++ struct device_attribute *attr, char *buf) + { + struct fw_device *device = fw_device(dev); + int ret; +@@ -383,8 +381,8 @@ static struct device_attribute fw_device + __ATTR_NULL, + }; + +-static int +-read_rom(struct fw_device *device, int generation, int index, u32 *data) ++static int read_rom(struct fw_device *device, ++ int generation, int index, u32 *data) + { + int rcode; + +@@ -1004,6 +1002,7 @@ void fw_node_event(struct fw_card *card, + device->node = fw_node_get(node); + device->node_id = node->node_id; + device->generation = card->generation; ++ mutex_init(&device->client_list_mutex); + INIT_LIST_HEAD(&device->client_list); + + /* +diff -Naurp linux-2.6-git/drivers/firewire/fw-device.h firewire-git/drivers/firewire/fw-device.h +--- linux-2.6-git/drivers/firewire/fw-device.h 2009-01-30 13:39:02.989651512 -0500 ++++ firewire-git/drivers/firewire/fw-device.h 2009-01-30 13:35:51.860646788 -0500 +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + #include + + enum fw_device_state { +@@ -64,7 +65,10 @@ struct fw_device { + bool cmc; + struct fw_card *card; + struct device device; ++ ++ struct mutex client_list_mutex; + struct list_head client_list; ++ + u32 *config_rom; + size_t config_rom_length; + int config_rom_retries; +@@ -176,8 +180,7 @@ struct fw_driver { + const struct fw_device_id *id_table; + }; + +-static inline struct fw_driver * +-fw_driver(struct device_driver *drv) ++static inline struct fw_driver *fw_driver(struct device_driver *drv) + { + return container_of(drv, struct fw_driver, driver); + } +diff -Naurp linux-2.6-git/drivers/firewire/fw-iso.c firewire-git/drivers/firewire/fw-iso.c +--- linux-2.6-git/drivers/firewire/fw-iso.c 2008-11-04 11:18:33.000000000 -0500 ++++ firewire-git/drivers/firewire/fw-iso.c 2009-01-30 13:35:51.860646788 -0500 +@@ -1,5 +1,7 @@ + /* +- * Isochronous IO functionality ++ * Isochronous I/O functionality: ++ * - Isochronous DMA context management ++ * - Isochronous bus resource management (channels, bandwidth), client side + * + * Copyright (C) 2006 Kristian Hoegsberg + * +@@ -18,21 +20,25 @@ + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +-#include +-#include + #include +-#include ++#include ++#include ++#include + #include ++#include ++#include + +-#include "fw-transaction.h" + #include "fw-topology.h" +-#include "fw-device.h" ++#include "fw-transaction.h" + +-int +-fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, +- int page_count, enum dma_data_direction direction) ++/* ++ * Isochronous DMA context management ++ */ ++ ++int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, ++ int page_count, enum dma_data_direction direction) + { +- int i, j, retval = -ENOMEM; ++ int i, j; + dma_addr_t address; + + buffer->page_count = page_count; +@@ -69,19 +75,19 @@ fw_iso_buffer_init(struct fw_iso_buffer + kfree(buffer->pages); + out: + buffer->pages = NULL; +- return retval; ++ return -ENOMEM; + } + + int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) + { + unsigned long uaddr; +- int i, retval; ++ int i, ret; + + uaddr = vma->vm_start; + for (i = 0; i < buffer->page_count; i++) { +- retval = vm_insert_page(vma, uaddr, buffer->pages[i]); +- if (retval) +- return retval; ++ ret = vm_insert_page(vma, uaddr, buffer->pages[i]); ++ if (ret) ++ return ret; + uaddr += PAGE_SIZE; + } + +@@ -105,14 +111,14 @@ void fw_iso_buffer_destroy(struct fw_iso + buffer->pages = NULL; + } + +-struct fw_iso_context * +-fw_iso_context_create(struct fw_card *card, int type, +- int channel, int speed, size_t header_size, +- fw_iso_callback_t callback, void *callback_data) ++struct fw_iso_context *fw_iso_context_create(struct fw_card *card, ++ int type, int channel, int speed, size_t header_size, ++ fw_iso_callback_t callback, void *callback_data) + { + struct fw_iso_context *ctx; + +- ctx = card->driver->allocate_iso_context(card, type, header_size); ++ ctx = card->driver->allocate_iso_context(card, ++ type, channel, header_size); + if (IS_ERR(ctx)) + return ctx; + +@@ -134,25 +140,186 @@ void fw_iso_context_destroy(struct fw_is + card->driver->free_iso_context(ctx); + } + +-int +-fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags) ++int fw_iso_context_start(struct fw_iso_context *ctx, ++ int cycle, int sync, int tags) + { + return ctx->card->driver->start_iso(ctx, cycle, sync, tags); + } + +-int +-fw_iso_context_queue(struct fw_iso_context *ctx, +- struct fw_iso_packet *packet, +- struct fw_iso_buffer *buffer, +- unsigned long payload) ++int fw_iso_context_queue(struct fw_iso_context *ctx, ++ struct fw_iso_packet *packet, ++ struct fw_iso_buffer *buffer, ++ unsigned long payload) + { + struct fw_card *card = ctx->card; + + return card->driver->queue_iso(ctx, packet, buffer, payload); + } + +-int +-fw_iso_context_stop(struct fw_iso_context *ctx) ++int fw_iso_context_stop(struct fw_iso_context *ctx) + { + return ctx->card->driver->stop_iso(ctx); + } ++ ++/* ++ * Isochronous bus resource management (channels, bandwidth), client side ++ */ ++ ++static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, ++ int bandwidth, bool allocate) ++{ ++ __be32 data[2]; ++ int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; ++ ++ /* ++ * On a 1394a IRM with low contention, try < 1 is enough. ++ * On a 1394-1995 IRM, we need at least try < 2. ++ * Let's just do try < 5. ++ */ ++ for (try = 0; try < 5; try++) { ++ new = allocate ? old - bandwidth : old + bandwidth; ++ if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) ++ break; ++ ++ data[0] = cpu_to_be32(old); ++ data[1] = cpu_to_be32(new); ++ switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, ++ irm_id, generation, SCODE_100, ++ CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, ++ data, sizeof(data))) { ++ case RCODE_GENERATION: ++ /* A generation change frees all bandwidth. */ ++ return allocate ? -EAGAIN : bandwidth; ++ ++ case RCODE_COMPLETE: ++ if (be32_to_cpup(data) == old) ++ return bandwidth; ++ ++ old = be32_to_cpup(data); ++ /* Fall through. */ ++ } ++ } ++ ++ return -EIO; ++} ++ ++static int manage_channel(struct fw_card *card, int irm_id, int generation, ++ u32 channels_mask, u64 offset, bool allocate) ++{ ++ __be32 data[2], c, all, old; ++ int i, retry = 5; ++ ++ old = all = allocate ? cpu_to_be32(~0) : 0; ++ ++ for (i = 0; i < 32; i++) { ++ if (!(channels_mask & 1 << i)) ++ continue; ++ ++ c = cpu_to_be32(1 << (31 - i)); ++ if ((old & c) != (all & c)) ++ continue; ++ ++ data[0] = old; ++ data[1] = old ^ c; ++ switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, ++ irm_id, generation, SCODE_100, ++ offset, data, sizeof(data))) { ++ case RCODE_GENERATION: ++ /* A generation change frees all channels. */ ++ return allocate ? -EAGAIN : i; ++ ++ case RCODE_COMPLETE: ++ if (data[0] == old) ++ return i; ++ ++ old = data[0]; ++ ++ /* Is the IRM 1394a-2000 compliant? */ ++ if ((data[0] & c) == (data[1] & c)) ++ continue; ++ ++ /* 1394-1995 IRM, fall through to retry. */ ++ default: ++ if (retry--) ++ i--; ++ } ++ } ++ ++ return -EIO; ++} ++ ++static void deallocate_channel(struct fw_card *card, int irm_id, ++ int generation, int channel) ++{ ++ u32 mask; ++ u64 offset; ++ ++ mask = channel < 32 ? 1 << channel : 1 << (channel - 32); ++ offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : ++ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; ++ ++ manage_channel(card, irm_id, generation, mask, offset, false); ++} ++ ++/** ++ * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth ++ * ++ * In parameters: card, generation, channels_mask, bandwidth, allocate ++ * Out parameters: channel, bandwidth ++ * This function blocks (sleeps) during communication with the IRM. ++ * ++ * Allocates or deallocates at most one channel out of channels_mask. ++ * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0. ++ * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for ++ * channel 0 and LSB for channel 63.) ++ * Allocates or deallocates as many bandwidth allocation units as specified. ++ * ++ * Returns channel < 0 if no channel was allocated or deallocated. ++ * Returns bandwidth = 0 if no bandwidth was allocated or deallocated. ++ * ++ * If generation is stale, deallocations succeed but allocations fail with ++ * channel = -EAGAIN. ++ * ++ * If channel allocation fails, no bandwidth will be allocated either. ++ * If bandwidth allocation fails, no channel will be allocated either. ++ * But deallocations of channel and bandwidth are tried independently ++ * of each other's success. ++ */ ++void fw_iso_resource_manage(struct fw_card *card, int generation, ++ u64 channels_mask, int *channel, int *bandwidth, ++ bool allocate) ++{ ++ u32 channels_hi = channels_mask; /* channels 31...0 */ ++ u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ ++ int irm_id, ret, c = -EINVAL; ++ ++ spin_lock_irq(&card->lock); ++ irm_id = card->irm_node->node_id; ++ spin_unlock_irq(&card->lock); ++ ++ if (channels_hi) ++ c = manage_channel(card, irm_id, generation, channels_hi, ++ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate); ++ if (channels_lo && c < 0) { ++ c = manage_channel(card, irm_id, generation, channels_lo, ++ CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate); ++ if (c >= 0) ++ c += 32; ++ } ++ *channel = c; ++ ++ if (allocate && channels_mask != 0 && c < 0) ++ *bandwidth = 0; ++ ++ if (*bandwidth == 0) ++ return; ++ ++ ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); ++ if (ret < 0) ++ *bandwidth = 0; ++ ++ if (allocate && ret < 0 && c >= 0) { ++ deallocate_channel(card, irm_id, generation, c); ++ *channel = ret; ++ } ++} +diff -Naurp linux-2.6-git/drivers/firewire/fw-ohci.c firewire-git/drivers/firewire/fw-ohci.c +--- linux-2.6-git/drivers/firewire/fw-ohci.c 2009-01-30 13:39:02.990772025 -0500 ++++ firewire-git/drivers/firewire/fw-ohci.c 2009-01-30 13:35:51.861646907 -0500 +@@ -205,6 +205,7 @@ struct fw_ohci { + + u32 it_context_mask; + struct iso_context *it_context_list; ++ u64 ir_context_channels; + u32 ir_context_mask; + struct iso_context *ir_context_list; + }; +@@ -441,9 +442,8 @@ static inline void flush_writes(const st + reg_read(ohci, OHCI1394_Version); + } + +-static int +-ohci_update_phy_reg(struct fw_card *card, int addr, +- int clear_bits, int set_bits) ++static int ohci_update_phy_reg(struct fw_card *card, int addr, ++ int clear_bits, int set_bits) + { + struct fw_ohci *ohci = fw_ohci(card); + u32 val, old; +@@ -658,8 +658,8 @@ static void ar_context_tasklet(unsigned + } + } + +-static int +-ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs) ++static int ar_context_init(struct ar_context *ctx, ++ struct fw_ohci *ohci, u32 regs) + { + struct ar_buffer ab; + +@@ -690,8 +690,7 @@ static void ar_context_run(struct ar_con + flush_writes(ctx->ohci); + } + +-static struct descriptor * +-find_branch_descriptor(struct descriptor *d, int z) ++static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) + { + int b, key; + +@@ -751,8 +750,7 @@ static void context_tasklet(unsigned lon + * Allocate a new buffer and add it to the list of free buffers for this + * context. Must be called with ohci->lock held. + */ +-static int +-context_add_buffer(struct context *ctx) ++static int context_add_buffer(struct context *ctx) + { + struct descriptor_buffer *desc; + dma_addr_t uninitialized_var(bus_addr); +@@ -781,9 +779,8 @@ context_add_buffer(struct context *ctx) + return 0; + } + +-static int +-context_init(struct context *ctx, struct fw_ohci *ohci, +- u32 regs, descriptor_callback_t callback) ++static int context_init(struct context *ctx, struct fw_ohci *ohci, ++ u32 regs, descriptor_callback_t callback) + { + ctx->ohci = ohci; + ctx->regs = regs; +@@ -814,8 +811,7 @@ context_init(struct context *ctx, struct + return 0; + } + +-static void +-context_release(struct context *ctx) ++static void context_release(struct context *ctx) + { + struct fw_card *card = &ctx->ohci->card; + struct descriptor_buffer *desc, *tmp; +@@ -827,8 +823,8 @@ context_release(struct context *ctx) + } + + /* Must be called with ohci->lock held */ +-static struct descriptor * +-context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus) ++static struct descriptor *context_get_descriptors(struct context *ctx, ++ int z, dma_addr_t *d_bus) + { + struct descriptor *d = NULL; + struct descriptor_buffer *desc = ctx->buffer_tail; +@@ -912,8 +908,8 @@ struct driver_data { + * Must always be called with the ochi->lock held to ensure proper + * generation handling and locking around packet queue manipulation. + */ +-static int +-at_context_queue_packet(struct context *ctx, struct fw_packet *packet) ++static int at_context_queue_packet(struct context *ctx, ++ struct fw_packet *packet) + { + struct fw_ohci *ohci = ctx->ohci; + dma_addr_t d_bus, uninitialized_var(payload_bus); +@@ -1095,8 +1091,8 @@ static int handle_at_packet(struct conte + #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) + #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) + +-static void +-handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) ++static void handle_local_rom(struct fw_ohci *ohci, ++ struct fw_packet *packet, u32 csr) + { + struct fw_packet response; + int tcode, length, i; +@@ -1122,8 +1118,8 @@ handle_local_rom(struct fw_ohci *ohci, s + fw_core_handle_response(&ohci->card, &response); + } + +-static void +-handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) ++static void handle_local_lock(struct fw_ohci *ohci, ++ struct fw_packet *packet, u32 csr) + { + struct fw_packet response; + int tcode, length, ext_tcode, sel; +@@ -1164,8 +1160,7 @@ handle_local_lock(struct fw_ohci *ohci, + fw_core_handle_response(&ohci->card, &response); + } + +-static void +-handle_local_request(struct context *ctx, struct fw_packet *packet) ++static void handle_local_request(struct context *ctx, struct fw_packet *packet) + { + u64 offset; + u32 csr; +@@ -1205,11 +1200,10 @@ handle_local_request(struct context *ctx + } + } + +-static void +-at_context_transmit(struct context *ctx, struct fw_packet *packet) ++static void at_context_transmit(struct context *ctx, struct fw_packet *packet) + { + unsigned long flags; +- int retval; ++ int ret; + + spin_lock_irqsave(&ctx->ohci->lock, flags); + +@@ -1220,10 +1214,10 @@ at_context_transmit(struct context *ctx, + return; + } + +- retval = at_context_queue_packet(ctx, packet); ++ ret = at_context_queue_packet(ctx, packet); + spin_unlock_irqrestore(&ctx->ohci->lock, flags); + +- if (retval < 0) ++ if (ret < 0) + packet->callback(packet, &ctx->ohci->card, packet->ack); + + } +@@ -1590,12 +1584,12 @@ static int ohci_enable(struct fw_card *c + return 0; + } + +-static int +-ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) ++static int ohci_set_config_rom(struct fw_card *card, ++ u32 *config_rom, size_t length) + { + struct fw_ohci *ohci; + unsigned long flags; +- int retval = -EBUSY; ++ int ret = -EBUSY; + __be32 *next_config_rom; + dma_addr_t uninitialized_var(next_config_rom_bus); + +@@ -1649,7 +1643,7 @@ ohci_set_config_rom(struct fw_card *card + + reg_write(ohci, OHCI1394_ConfigROMmap, + ohci->next_config_rom_bus); +- retval = 0; ++ ret = 0; + } + + spin_unlock_irqrestore(&ohci->lock, flags); +@@ -1661,13 +1655,13 @@ ohci_set_config_rom(struct fw_card *card + * controller could need to access it before the bus reset + * takes effect. + */ +- if (retval == 0) ++ if (ret == 0) + fw_core_initiate_bus_reset(&ohci->card, 1); + else + dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, + next_config_rom, next_config_rom_bus); + +- return retval; ++ return ret; + } + + static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) +@@ -1689,7 +1683,7 @@ static int ohci_cancel_packet(struct fw_ + struct fw_ohci *ohci = fw_ohci(card); + struct context *ctx = &ohci->at_request_ctx; + struct driver_data *driver_data = packet->driver_data; +- int retval = -ENOENT; ++ int ret = -ENOENT; + + tasklet_disable(&ctx->tasklet); + +@@ -1704,23 +1698,22 @@ static int ohci_cancel_packet(struct fw_ + driver_data->packet = NULL; + packet->ack = RCODE_CANCELLED; + packet->callback(packet, &ohci->card, packet->ack); +- retval = 0; +- ++ ret = 0; + out: + tasklet_enable(&ctx->tasklet); + +- return retval; ++ return ret; + } + +-static int +-ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) ++static int ohci_enable_phys_dma(struct fw_card *card, ++ int node_id, int generation) + { + #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA + return 0; + #else + struct fw_ohci *ohci = fw_ohci(card); + unsigned long flags; +- int n, retval = 0; ++ int n, ret = 0; + + /* + * FIXME: Make sure this bitmask is cleared when we clear the busReset +@@ -1730,7 +1723,7 @@ ohci_enable_phys_dma(struct fw_card *car + spin_lock_irqsave(&ohci->lock, flags); + + if (ohci->generation != generation) { +- retval = -ESTALE; ++ ret = -ESTALE; + goto out; + } + +@@ -1748,12 +1741,12 @@ ohci_enable_phys_dma(struct fw_card *car + flush_writes(ohci); + out: + spin_unlock_irqrestore(&ohci->lock, flags); +- return retval; ++ ++ return ret; + #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ + } + +-static u64 +-ohci_get_bus_time(struct fw_card *card) ++static u64 ohci_get_bus_time(struct fw_card *card) + { + struct fw_ohci *ohci = fw_ohci(card); + u32 cycle_time; +@@ -1765,6 +1758,28 @@ ohci_get_bus_time(struct fw_card *card) + return bus_time; + } + ++static void copy_iso_headers(struct iso_context *ctx, void *p) ++{ ++ int i = ctx->header_length; ++ ++ if (i + ctx->base.header_size > PAGE_SIZE) ++ return; ++ ++ /* ++ * The iso header is byteswapped to little endian by ++ * the controller, but the remaining header quadlets ++ * are big endian. We want to present all the headers ++ * as big endian, so we have to swap the first quadlet. ++ */ ++ if (ctx->base.header_size > 0) ++ *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); ++ if (ctx->base.header_size > 4) ++ *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p); ++ if (ctx->base.header_size > 8) ++ memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8); ++ ctx->header_length += ctx->base.header_size; ++} ++ + static int handle_ir_dualbuffer_packet(struct context *context, + struct descriptor *d, + struct descriptor *last) +@@ -1775,7 +1790,6 @@ static int handle_ir_dualbuffer_packet(s + __le32 *ir_header; + size_t header_length; + void *p, *end; +- int i; + + if (db->first_res_count != 0 && db->second_res_count != 0) { + if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { +@@ -1788,25 +1802,14 @@ static int handle_ir_dualbuffer_packet(s + header_length = le16_to_cpu(db->first_req_count) - + le16_to_cpu(db->first_res_count); + +- i = ctx->header_length; + p = db + 1; + end = p + header_length; +- while (p < end && i + ctx->base.header_size <= PAGE_SIZE) { +- /* +- * The iso header is byteswapped to little endian by +- * the controller, but the remaining header quadlets +- * are big endian. We want to present all the headers +- * as big endian, so we have to swap the first +- * quadlet. +- */ +- *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); +- memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); +- i += ctx->base.header_size; ++ while (p < end) { ++ copy_iso_headers(ctx, p); + ctx->excess_bytes += + (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; +- p += ctx->base.header_size + 4; ++ p += max(ctx->base.header_size, (size_t)8); + } +- ctx->header_length = i; + + ctx->excess_bytes -= le16_to_cpu(db->second_req_count) - + le16_to_cpu(db->second_res_count); +@@ -1832,7 +1835,6 @@ static int handle_ir_packet_per_buffer(s + struct descriptor *pd; + __le32 *ir_header; + void *p; +- int i; + + for (pd = d; pd <= last; pd++) { + if (pd->transfer_status) +@@ -1842,21 +1844,8 @@ static int handle_ir_packet_per_buffer(s + /* Descriptor(s) not done yet, stop iteration */ + return 0; + +- i = ctx->header_length; +- p = last + 1; +- +- if (ctx->base.header_size > 0 && +- i + ctx->base.header_size <= PAGE_SIZE) { +- /* +- * The iso header is byteswapped to little endian by +- * the controller, but the remaining header quadlets +- * are big endian. We want to present all the headers +- * as big endian, so we have to swap the first quadlet. +- */ +- *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); +- memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); +- ctx->header_length += ctx->base.header_size; +- } ++ p = last + 1; ++ copy_iso_headers(ctx, p); + + if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { + ir_header = (__le32 *) p; +@@ -1888,21 +1877,24 @@ static int handle_it_packet(struct conte + return 1; + } + +-static struct fw_iso_context * +-ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) ++static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, ++ int type, int channel, size_t header_size) + { + struct fw_ohci *ohci = fw_ohci(card); + struct iso_context *ctx, *list; + descriptor_callback_t callback; ++ u64 *channels, dont_care = ~0ULL; + u32 *mask, regs; + unsigned long flags; +- int index, retval = -ENOMEM; ++ int index, ret = -ENOMEM; + + if (type == FW_ISO_CONTEXT_TRANSMIT) { ++ channels = &dont_care; + mask = &ohci->it_context_mask; + list = ohci->it_context_list; + callback = handle_it_packet; + } else { ++ channels = &ohci->ir_context_channels; + mask = &ohci->ir_context_mask; + list = ohci->ir_context_list; + if (ohci->use_dualbuffer) +@@ -1912,9 +1904,11 @@ ohci_allocate_iso_context(struct fw_card + } + + spin_lock_irqsave(&ohci->lock, flags); +- index = ffs(*mask) - 1; +- if (index >= 0) ++ index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; ++ if (index >= 0) { ++ *channels &= ~(1ULL << channel); + *mask &= ~(1 << index); ++ } + spin_unlock_irqrestore(&ohci->lock, flags); + + if (index < 0) +@@ -1932,8 +1926,8 @@ ohci_allocate_iso_context(struct fw_card + if (ctx->header == NULL) + goto out; + +- retval = context_init(&ctx->context, ohci, regs, callback); +- if (retval < 0) ++ ret = context_init(&ctx->context, ohci, regs, callback); ++ if (ret < 0) + goto out_with_header; + + return &ctx->base; +@@ -1945,7 +1939,7 @@ ohci_allocate_iso_context(struct fw_card + *mask |= 1 << index; + spin_unlock_irqrestore(&ohci->lock, flags); + +- return ERR_PTR(retval); ++ return ERR_PTR(ret); + } + + static int ohci_start_iso(struct fw_iso_context *base, +@@ -2024,16 +2018,16 @@ static void ohci_free_iso_context(struct + } else { + index = ctx - ohci->ir_context_list; + ohci->ir_context_mask |= 1 << index; ++ ohci->ir_context_channels |= 1ULL << base->channel; + } + + spin_unlock_irqrestore(&ohci->lock, flags); + } + +-static int +-ohci_queue_iso_transmit(struct fw_iso_context *base, +- struct fw_iso_packet *packet, +- struct fw_iso_buffer *buffer, +- unsigned long payload) ++static int ohci_queue_iso_transmit(struct fw_iso_context *base, ++ struct fw_iso_packet *packet, ++ struct fw_iso_buffer *buffer, ++ unsigned long payload) + { + struct iso_context *ctx = container_of(base, struct iso_context, base); + struct descriptor *d, *last, *pd; +@@ -2128,11 +2122,10 @@ ohci_queue_iso_transmit(struct fw_iso_co + return 0; + } + +-static int +-ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, +- struct fw_iso_packet *packet, +- struct fw_iso_buffer *buffer, +- unsigned long payload) ++static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, ++ struct fw_iso_packet *packet, ++ struct fw_iso_buffer *buffer, ++ unsigned long payload) + { + struct iso_context *ctx = container_of(base, struct iso_context, base); + struct db_descriptor *db = NULL; +@@ -2151,11 +2144,11 @@ ohci_queue_iso_receive_dualbuffer(struct + z = 2; + + /* +- * The OHCI controller puts the status word in the header +- * buffer too, so we need 4 extra bytes per packet. ++ * The OHCI controller puts the isochronous header and trailer in the ++ * buffer, so we need at least 8 bytes. + */ + packet_count = p->header_length / ctx->base.header_size; +- header_size = packet_count * (ctx->base.header_size + 4); ++ header_size = packet_count * max(ctx->base.header_size, (size_t)8); + + /* Get header size in number of descriptors. */ + header_z = DIV_ROUND_UP(header_size, sizeof(*d)); +@@ -2173,7 +2166,8 @@ ohci_queue_iso_receive_dualbuffer(struct + db = (struct db_descriptor *) d; + db->control = cpu_to_le16(DESCRIPTOR_STATUS | + DESCRIPTOR_BRANCH_ALWAYS); +- db->first_size = cpu_to_le16(ctx->base.header_size + 4); ++ db->first_size = ++ cpu_to_le16(max(ctx->base.header_size, (size_t)8)); + if (p->skip && rest == p->payload_length) { + db->control |= cpu_to_le16(DESCRIPTOR_WAIT); + db->first_req_count = db->first_size; +@@ -2208,11 +2202,10 @@ ohci_queue_iso_receive_dualbuffer(struct + return 0; + } + +-static int +-ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, +- struct fw_iso_packet *packet, +- struct fw_iso_buffer *buffer, +- unsigned long payload) ++static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, ++ struct fw_iso_packet *packet, ++ struct fw_iso_buffer *buffer, ++ unsigned long payload) + { + struct iso_context *ctx = container_of(base, struct iso_context, base); + struct descriptor *d = NULL, *pd = NULL; +@@ -2223,11 +2216,11 @@ ohci_queue_iso_receive_packet_per_buffer + int page, offset, packet_count, header_size, payload_per_buffer; + + /* +- * The OHCI controller puts the status word in the +- * buffer too, so we need 4 extra bytes per packet. ++ * The OHCI controller puts the isochronous header and trailer in the ++ * buffer, so we need at least 8 bytes. + */ + packet_count = p->header_length / ctx->base.header_size; +- header_size = ctx->base.header_size + 4; ++ header_size = max(ctx->base.header_size, (size_t)8); + + /* Get header size in number of descriptors. */ + header_z = DIV_ROUND_UP(header_size, sizeof(*d)); +@@ -2286,29 +2279,27 @@ ohci_queue_iso_receive_packet_per_buffer + return 0; + } + +-static int +-ohci_queue_iso(struct fw_iso_context *base, +- struct fw_iso_packet *packet, +- struct fw_iso_buffer *buffer, +- unsigned long payload) ++static int ohci_queue_iso(struct fw_iso_context *base, ++ struct fw_iso_packet *packet, ++ struct fw_iso_buffer *buffer, ++ unsigned long payload) + { + struct iso_context *ctx = container_of(base, struct iso_context, base); + unsigned long flags; +- int retval; ++ int ret; + + spin_lock_irqsave(&ctx->context.ohci->lock, flags); + if (base->type == FW_ISO_CONTEXT_TRANSMIT) +- retval = ohci_queue_iso_transmit(base, packet, buffer, payload); ++ ret = ohci_queue_iso_transmit(base, packet, buffer, payload); + else if (ctx->context.ohci->use_dualbuffer) +- retval = ohci_queue_iso_receive_dualbuffer(base, packet, +- buffer, payload); ++ ret = ohci_queue_iso_receive_dualbuffer(base, packet, ++ buffer, payload); + else +- retval = ohci_queue_iso_receive_packet_per_buffer(base, packet, +- buffer, +- payload); ++ ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, ++ buffer, payload); + spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); + +- return retval; ++ return ret; + } + + static const struct fw_card_driver ohci_driver = { +@@ -2357,8 +2348,8 @@ static void ohci_pmac_off(struct pci_dev + #define ohci_pmac_off(dev) + #endif /* CONFIG_PPC_PMAC */ + +-static int __devinit +-pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) ++static int __devinit pci_probe(struct pci_dev *dev, ++ const struct pci_device_id *ent) + { + struct fw_ohci *ohci; + u32 bus_options, max_receive, link_speed, version; +@@ -2440,6 +2431,7 @@ pci_probe(struct pci_dev *dev, const str + ohci->it_context_list = kzalloc(size, GFP_KERNEL); + + reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); ++ ohci->ir_context_channels = ~0ULL; + ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); + reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); + size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); +diff -Naurp linux-2.6-git/drivers/firewire/fw-sbp2.c firewire-git/drivers/firewire/fw-sbp2.c +--- linux-2.6-git/drivers/firewire/fw-sbp2.c 2009-01-30 13:39:02.991771976 -0500 ++++ firewire-git/drivers/firewire/fw-sbp2.c 2009-01-30 13:35:51.861646907 -0500 +@@ -392,20 +392,18 @@ static const struct { + } + }; + +-static void +-free_orb(struct kref *kref) ++static void free_orb(struct kref *kref) + { + struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref); + + kfree(orb); + } + +-static void +-sbp2_status_write(struct fw_card *card, struct fw_request *request, +- int tcode, int destination, int source, +- int generation, int speed, +- unsigned long long offset, +- void *payload, size_t length, void *callback_data) ++static void sbp2_status_write(struct fw_card *card, struct fw_request *request, ++ int tcode, int destination, int source, ++ int generation, int speed, ++ unsigned long long offset, ++ void *payload, size_t length, void *callback_data) + { + struct sbp2_logical_unit *lu = callback_data; + struct sbp2_orb *orb; +@@ -451,9 +449,8 @@ sbp2_status_write(struct fw_card *card, + fw_send_response(card, request, RCODE_COMPLETE); + } + +-static void +-complete_transaction(struct fw_card *card, int rcode, +- void *payload, size_t length, void *data) ++static void complete_transaction(struct fw_card *card, int rcode, ++ void *payload, size_t length, void *data) + { + struct sbp2_orb *orb = data; + unsigned long flags; +@@ -482,9 +479,8 @@ complete_transaction(struct fw_card *car + kref_put(&orb->kref, free_orb); + } + +-static void +-sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, +- int node_id, int generation, u64 offset) ++static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, ++ int node_id, int generation, u64 offset) + { + struct fw_device *device = fw_device(lu->tgt->unit->device.parent); + unsigned long flags; +@@ -531,8 +527,8 @@ static int sbp2_cancel_orbs(struct sbp2_ + return retval; + } + +-static void +-complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) ++static void complete_management_orb(struct sbp2_orb *base_orb, ++ struct sbp2_status *status) + { + struct sbp2_management_orb *orb = + container_of(base_orb, struct sbp2_management_orb, base); +@@ -542,10 +538,9 @@ complete_management_orb(struct sbp2_orb + complete(&orb->done); + } + +-static int +-sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, +- int generation, int function, int lun_or_login_id, +- void *response) ++static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, ++ int generation, int function, ++ int lun_or_login_id, void *response) + { + struct fw_device *device = fw_device(lu->tgt->unit->device.parent); + struct sbp2_management_orb *orb; +@@ -652,9 +647,8 @@ static void sbp2_agent_reset(struct sbp2 + &d, sizeof(d)); + } + +-static void +-complete_agent_reset_write_no_wait(struct fw_card *card, int rcode, +- void *payload, size_t length, void *data) ++static void complete_agent_reset_write_no_wait(struct fw_card *card, ++ int rcode, void *payload, size_t length, void *data) + { + kfree(data); + } +@@ -1299,8 +1293,7 @@ static void sbp2_unmap_scatterlist(struc + sizeof(orb->page_table), DMA_TO_DEVICE); + } + +-static unsigned int +-sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) ++static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) + { + int sam_status; + +@@ -1337,8 +1330,8 @@ sbp2_status_to_sense_data(u8 *sbp2_statu + } + } + +-static void +-complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) ++static void complete_command_orb(struct sbp2_orb *base_orb, ++ struct sbp2_status *status) + { + struct sbp2_command_orb *orb = + container_of(base_orb, struct sbp2_command_orb, base); +@@ -1384,9 +1377,8 @@ complete_command_orb(struct sbp2_orb *ba + orb->done(orb->cmd); + } + +-static int +-sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, +- struct sbp2_logical_unit *lu) ++static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, ++ struct fw_device *device, struct sbp2_logical_unit *lu) + { + struct scatterlist *sg = scsi_sglist(orb->cmd); + int i, n; +@@ -1584,9 +1576,8 @@ static int sbp2_scsi_abort(struct scsi_c + * This is the concatenation of target port identifier and logical unit + * identifier as per SAM-2...SAM-4 annex A. + */ +-static ssize_t +-sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr, +- char *buf) ++static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, ++ struct device_attribute *attr, char *buf) + { + struct scsi_device *sdev = to_scsi_device(dev); + struct sbp2_logical_unit *lu; +diff -Naurp linux-2.6-git/drivers/firewire/fw-topology.c firewire-git/drivers/firewire/fw-topology.c +--- linux-2.6-git/drivers/firewire/fw-topology.c 2009-01-30 13:39:02.991771976 -0500 ++++ firewire-git/drivers/firewire/fw-topology.c 2009-01-30 13:35:51.862647087 -0500 +@@ -314,9 +314,8 @@ typedef void (*fw_node_callback_t)(struc + struct fw_node * node, + struct fw_node * parent); + +-static void +-for_each_fw_node(struct fw_card *card, struct fw_node *root, +- fw_node_callback_t callback) ++static void for_each_fw_node(struct fw_card *card, struct fw_node *root, ++ fw_node_callback_t callback) + { + struct list_head list; + struct fw_node *node, *next, *child, *parent; +@@ -349,9 +348,8 @@ for_each_fw_node(struct fw_card *card, s + fw_node_put(node); + } + +-static void +-report_lost_node(struct fw_card *card, +- struct fw_node *node, struct fw_node *parent) ++static void report_lost_node(struct fw_card *card, ++ struct fw_node *node, struct fw_node *parent) + { + fw_node_event(card, node, FW_NODE_DESTROYED); + fw_node_put(node); +@@ -360,9 +358,8 @@ report_lost_node(struct fw_card *card, + card->bm_retries = 0; + } + +-static void +-report_found_node(struct fw_card *card, +- struct fw_node *node, struct fw_node *parent) ++static void report_found_node(struct fw_card *card, ++ struct fw_node *node, struct fw_node *parent) + { + int b_path = (node->phy_speed == SCODE_BETA); + +@@ -415,8 +412,7 @@ static void move_tree(struct fw_node *no + * found, lost or updated. Update the nodes in the card topology tree + * as we go. + */ +-static void +-update_tree(struct fw_card *card, struct fw_node *root) ++static void update_tree(struct fw_card *card, struct fw_node *root) + { + struct list_head list0, list1; + struct fw_node *node0, *node1, *next1; +@@ -497,8 +493,8 @@ update_tree(struct fw_card *card, struct + } + } + +-static void +-update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count) ++static void update_topology_map(struct fw_card *card, ++ u32 *self_ids, int self_id_count) + { + int node_count; + +@@ -510,10 +506,8 @@ update_topology_map(struct fw_card *card + fw_compute_block_crc(card->topology_map); + } + +-void +-fw_core_handle_bus_reset(struct fw_card *card, +- int node_id, int generation, +- int self_id_count, u32 * self_ids) ++void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, ++ int self_id_count, u32 *self_ids) + { + struct fw_node *local_node; + unsigned long flags; +diff -Naurp linux-2.6-git/drivers/firewire/fw-topology.h firewire-git/drivers/firewire/fw-topology.h +--- linux-2.6-git/drivers/firewire/fw-topology.h 2008-11-04 11:18:33.000000000 -0500 ++++ firewire-git/drivers/firewire/fw-topology.h 2009-01-30 13:35:51.862647087 -0500 +@@ -19,6 +19,11 @@ + #ifndef __fw_topology_h + #define __fw_topology_h + ++#include ++#include ++ ++#include ++ + enum { + FW_NODE_CREATED, + FW_NODE_UPDATED, +@@ -51,26 +56,22 @@ struct fw_node { + struct fw_node *ports[0]; + }; + +-static inline struct fw_node * +-fw_node_get(struct fw_node *node) ++static inline struct fw_node *fw_node_get(struct fw_node *node) + { + atomic_inc(&node->ref_count); + + return node; + } + +-static inline void +-fw_node_put(struct fw_node *node) ++static inline void fw_node_put(struct fw_node *node) + { + if (atomic_dec_and_test(&node->ref_count)) + kfree(node); + } + +-void +-fw_destroy_nodes(struct fw_card *card); +- +-int +-fw_compute_block_crc(u32 *block); ++struct fw_card; ++void fw_destroy_nodes(struct fw_card *card); + ++int fw_compute_block_crc(u32 *block); + + #endif /* __fw_topology_h */ +diff -Naurp linux-2.6-git/drivers/firewire/fw-transaction.c firewire-git/drivers/firewire/fw-transaction.c +--- linux-2.6-git/drivers/firewire/fw-transaction.c 2009-01-30 13:39:02.991771976 -0500 ++++ firewire-git/drivers/firewire/fw-transaction.c 2009-01-30 13:35:51.862647087 -0500 +@@ -64,10 +64,9 @@ + #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) + #define PHY_IDENTIFIER(id) ((id) << 30) + +-static int +-close_transaction(struct fw_transaction *transaction, +- struct fw_card *card, int rcode, +- u32 *payload, size_t length) ++static int close_transaction(struct fw_transaction *transaction, ++ struct fw_card *card, int rcode, ++ u32 *payload, size_t length) + { + struct fw_transaction *t; + unsigned long flags; +@@ -94,9 +93,8 @@ close_transaction(struct fw_transaction + * Only valid for transactions that are potentially pending (ie have + * been sent). + */ +-int +-fw_cancel_transaction(struct fw_card *card, +- struct fw_transaction *transaction) ++int fw_cancel_transaction(struct fw_card *card, ++ struct fw_transaction *transaction) + { + /* + * Cancel the packet transmission if it's still queued. That +@@ -116,9 +114,8 @@ fw_cancel_transaction(struct fw_card *ca + } + EXPORT_SYMBOL(fw_cancel_transaction); + +-static void +-transmit_complete_callback(struct fw_packet *packet, +- struct fw_card *card, int status) ++static void transmit_complete_callback(struct fw_packet *packet, ++ struct fw_card *card, int status) + { + struct fw_transaction *t = + container_of(packet, struct fw_transaction, packet); +@@ -151,8 +148,7 @@ transmit_complete_callback(struct fw_pac + } + } + +-static void +-fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, ++static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, + int destination_id, int source_id, int generation, int speed, + unsigned long long offset, void *payload, size_t length) + { +@@ -247,12 +243,10 @@ fw_fill_request(struct fw_packet *packet + * @param callback_data pointer to arbitrary data, which will be + * passed to the callback + */ +-void +-fw_send_request(struct fw_card *card, struct fw_transaction *t, +- int tcode, int destination_id, int generation, int speed, +- unsigned long long offset, +- void *payload, size_t length, +- fw_transaction_callback_t callback, void *callback_data) ++void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, ++ int destination_id, int generation, int speed, ++ unsigned long long offset, void *payload, size_t length, ++ fw_transaction_callback_t callback, void *callback_data) + { + unsigned long flags; + int tlabel; +@@ -322,8 +316,8 @@ static void transaction_callback(struct + * Returns the RCODE. + */ + int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, +- int generation, int speed, unsigned long long offset, +- void *data, size_t length) ++ int generation, int speed, unsigned long long offset, ++ void *data, size_t length) + { + struct transaction_callback_data d; + struct fw_transaction t; +@@ -399,9 +393,8 @@ void fw_flush_transactions(struct fw_car + } + } + +-static struct fw_address_handler * +-lookup_overlapping_address_handler(struct list_head *list, +- unsigned long long offset, size_t length) ++static struct fw_address_handler *lookup_overlapping_address_handler( ++ struct list_head *list, unsigned long long offset, size_t length) + { + struct fw_address_handler *handler; + +@@ -414,9 +407,8 @@ lookup_overlapping_address_handler(struc + return NULL; + } + +-static struct fw_address_handler * +-lookup_enclosing_address_handler(struct list_head *list, +- unsigned long long offset, size_t length) ++static struct fw_address_handler *lookup_enclosing_address_handler( ++ struct list_head *list, unsigned long long offset, size_t length) + { + struct fw_address_handler *handler; + +@@ -449,36 +441,44 @@ const struct fw_address_region fw_unit_s + #endif /* 0 */ + + /** +- * Allocate a range of addresses in the node space of the OHCI +- * controller. When a request is received that falls within the +- * specified address range, the specified callback is invoked. The +- * parameters passed to the callback give the details of the +- * particular request. ++ * fw_core_add_address_handler - register for incoming requests ++ * @handler: callback ++ * @region: region in the IEEE 1212 node space address range ++ * ++ * region->start, ->end, and handler->length have to be quadlet-aligned. ++ * ++ * When a request is received that falls within the specified address range, ++ * the specified callback is invoked. The parameters passed to the callback ++ * give the details of the particular request. + * + * Return value: 0 on success, non-zero otherwise. + * The start offset of the handler's address region is determined by + * fw_core_add_address_handler() and is returned in handler->offset. +- * The offset is quadlet-aligned. + */ +-int +-fw_core_add_address_handler(struct fw_address_handler *handler, +- const struct fw_address_region *region) ++int fw_core_add_address_handler(struct fw_address_handler *handler, ++ const struct fw_address_region *region) + { + struct fw_address_handler *other; + unsigned long flags; + int ret = -EBUSY; + ++ if (region->start & 0xffff000000000003ULL || ++ region->end & 0xffff000000000003ULL || ++ region->start >= region->end || ++ handler->length & 3 || ++ handler->length == 0) ++ return -EINVAL; ++ + spin_lock_irqsave(&address_handler_lock, flags); + +- handler->offset = roundup(region->start, 4); ++ handler->offset = region->start; + while (handler->offset + handler->length <= region->end) { + other = + lookup_overlapping_address_handler(&address_handler_list, + handler->offset, + handler->length); + if (other != NULL) { +- handler->offset = +- roundup(other->offset + other->length, 4); ++ handler->offset += other->length; + } else { + list_add_tail(&handler->link, &address_handler_list); + ret = 0; +@@ -493,12 +493,7 @@ fw_core_add_address_handler(struct fw_ad + EXPORT_SYMBOL(fw_core_add_address_handler); + + /** +- * Deallocate a range of addresses allocated with fw_allocate. This +- * will call the associated callback one last time with a the special +- * tcode TCODE_DEALLOCATE, to let the client destroy the registered +- * callback data. For convenience, the callback parameters offset and +- * length are set to the start and the length respectively for the +- * deallocated region, payload is set to NULL. ++ * fw_core_remove_address_handler - unregister an address handler + */ + void fw_core_remove_address_handler(struct fw_address_handler *handler) + { +@@ -518,9 +513,8 @@ struct fw_request { + u32 data[0]; + }; + +-static void +-free_response_callback(struct fw_packet *packet, +- struct fw_card *card, int status) ++static void free_response_callback(struct fw_packet *packet, ++ struct fw_card *card, int status) + { + struct fw_request *request; + +@@ -528,9 +522,8 @@ free_response_callback(struct fw_packet + kfree(request); + } + +-void +-fw_fill_response(struct fw_packet *response, u32 *request_header, +- int rcode, void *payload, size_t length) ++void fw_fill_response(struct fw_packet *response, u32 *request_header, ++ int rcode, void *payload, size_t length) + { + int tcode, tlabel, extended_tcode, source, destination; + +@@ -588,8 +581,7 @@ fw_fill_response(struct fw_packet *respo + } + EXPORT_SYMBOL(fw_fill_response); + +-static struct fw_request * +-allocate_request(struct fw_packet *p) ++static struct fw_request *allocate_request(struct fw_packet *p) + { + struct fw_request *request; + u32 *data, length; +@@ -649,8 +641,8 @@ allocate_request(struct fw_packet *p) + return request; + } + +-void +-fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) ++void fw_send_response(struct fw_card *card, ++ struct fw_request *request, int rcode) + { + /* unified transaction or broadcast transaction: don't respond */ + if (request->ack != ACK_PENDING || +@@ -670,8 +662,7 @@ fw_send_response(struct fw_card *card, s + } + EXPORT_SYMBOL(fw_send_response); + +-void +-fw_core_handle_request(struct fw_card *card, struct fw_packet *p) ++void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) + { + struct fw_address_handler *handler; + struct fw_request *request; +@@ -719,8 +710,7 @@ fw_core_handle_request(struct fw_card *c + } + EXPORT_SYMBOL(fw_core_handle_request); + +-void +-fw_core_handle_response(struct fw_card *card, struct fw_packet *p) ++void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) + { + struct fw_transaction *t; + unsigned long flags; +@@ -793,12 +783,10 @@ static const struct fw_address_region to + { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, + .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; + +-static void +-handle_topology_map(struct fw_card *card, struct fw_request *request, +- int tcode, int destination, int source, +- int generation, int speed, +- unsigned long long offset, +- void *payload, size_t length, void *callback_data) ++static void handle_topology_map(struct fw_card *card, struct fw_request *request, ++ int tcode, int destination, int source, int generation, ++ int speed, unsigned long long offset, ++ void *payload, size_t length, void *callback_data) + { + int i, start, end; + __be32 *map; +@@ -832,12 +820,10 @@ static const struct fw_address_region re + { .start = CSR_REGISTER_BASE, + .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; + +-static void +-handle_registers(struct fw_card *card, struct fw_request *request, +- int tcode, int destination, int source, +- int generation, int speed, +- unsigned long long offset, +- void *payload, size_t length, void *callback_data) ++static void handle_registers(struct fw_card *card, struct fw_request *request, ++ int tcode, int destination, int source, int generation, ++ int speed, unsigned long long offset, ++ void *payload, size_t length, void *callback_data) + { + int reg = offset & ~CSR_REGISTER_BASE; + unsigned long long bus_time; +@@ -939,11 +925,11 @@ static struct fw_descriptor model_id_des + + static int __init fw_core_init(void) + { +- int retval; ++ int ret; + +- retval = bus_register(&fw_bus_type); +- if (retval < 0) +- return retval; ++ ret = bus_register(&fw_bus_type); ++ if (ret < 0) ++ return ret; + + fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); + if (fw_cdev_major < 0) { +@@ -951,19 +937,10 @@ static int __init fw_core_init(void) + return fw_cdev_major; + } + +- retval = fw_core_add_address_handler(&topology_map, +- &topology_map_region); +- BUG_ON(retval < 0); +- +- retval = fw_core_add_address_handler(®isters, +- ®isters_region); +- BUG_ON(retval < 0); +- +- /* Add the vendor textual descriptor. */ +- retval = fw_core_add_descriptor(&vendor_id_descriptor); +- BUG_ON(retval < 0); +- retval = fw_core_add_descriptor(&model_id_descriptor); +- BUG_ON(retval < 0); ++ fw_core_add_address_handler(&topology_map, &topology_map_region); ++ fw_core_add_address_handler(®isters, ®isters_region); ++ fw_core_add_descriptor(&vendor_id_descriptor); ++ fw_core_add_descriptor(&model_id_descriptor); + + return 0; + } +diff -Naurp linux-2.6-git/drivers/firewire/fw-transaction.h firewire-git/drivers/firewire/fw-transaction.h +--- linux-2.6-git/drivers/firewire/fw-transaction.h 2009-01-30 13:39:02.992772636 -0500 ++++ firewire-git/drivers/firewire/fw-transaction.h 2009-01-30 13:35:51.862647087 -0500 +@@ -82,14 +82,14 @@ + #define CSR_SPEED_MAP 0x2000 + #define CSR_SPEED_MAP_END 0x3000 + ++#define BANDWIDTH_AVAILABLE_INITIAL 4915 + #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) + #define BROADCAST_CHANNEL_VALID (1 << 30) + + #define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) + #define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) + +-static inline void +-fw_memcpy_from_be32(void *_dst, void *_src, size_t size) ++static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size) + { + u32 *dst = _dst; + __be32 *src = _src; +@@ -99,8 +99,7 @@ fw_memcpy_from_be32(void *_dst, void *_s + dst[i] = be32_to_cpu(src[i]); + } + +-static inline void +-fw_memcpy_to_be32(void *_dst, void *_src, size_t size) ++static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size) + { + fw_memcpy_from_be32(_dst, _src, size); + } +@@ -125,8 +124,7 @@ typedef void (*fw_packet_callback_t)(str + struct fw_card *card, int status); + + typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, +- void *data, +- size_t length, ++ void *data, size_t length, + void *callback_data); + + /* +@@ -141,12 +139,6 @@ typedef void (*fw_address_callback_t)(st + void *data, size_t length, + void *callback_data); + +-typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle, +- int node_id, int generation, +- u32 *self_ids, +- int self_id_count, +- void *callback_data); +- + struct fw_packet { + int speed; + int generation; +@@ -187,12 +179,6 @@ struct fw_transaction { + void *callback_data; + }; + +-static inline struct fw_packet * +-fw_packet(struct list_head *l) +-{ +- return list_entry(l, struct fw_packet, link); +-} +- + struct fw_address_handler { + u64 offset; + size_t length; +@@ -201,7 +187,6 @@ struct fw_address_handler { + struct list_head link; + }; + +- + struct fw_address_region { + u64 start; + u64 end; +@@ -315,10 +300,8 @@ struct fw_iso_packet { + struct fw_iso_context; + + typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, +- u32 cycle, +- size_t header_length, +- void *header, +- void *data); ++ u32 cycle, size_t header_length, ++ void *header, void *data); + + /* + * An iso buffer is just a set of pages mapped for DMA in the +@@ -344,36 +327,25 @@ struct fw_iso_context { + void *callback_data; + }; + +-int +-fw_iso_buffer_init(struct fw_iso_buffer *buffer, +- struct fw_card *card, +- int page_count, +- enum dma_data_direction direction); +-int +-fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); +-void +-fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); +- +-struct fw_iso_context * +-fw_iso_context_create(struct fw_card *card, int type, +- int channel, int speed, size_t header_size, +- fw_iso_callback_t callback, void *callback_data); +- +-void +-fw_iso_context_destroy(struct fw_iso_context *ctx); +- +-int +-fw_iso_context_queue(struct fw_iso_context *ctx, +- struct fw_iso_packet *packet, +- struct fw_iso_buffer *buffer, +- unsigned long payload); +- +-int +-fw_iso_context_start(struct fw_iso_context *ctx, +- int cycle, int sync, int tags); ++int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, ++ int page_count, enum dma_data_direction direction); ++int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); ++void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); ++ ++struct fw_iso_context *fw_iso_context_create(struct fw_card *card, ++ int type, int channel, int speed, size_t header_size, ++ fw_iso_callback_t callback, void *callback_data); ++int fw_iso_context_queue(struct fw_iso_context *ctx, ++ struct fw_iso_packet *packet, ++ struct fw_iso_buffer *buffer, ++ unsigned long payload); ++int fw_iso_context_start(struct fw_iso_context *ctx, ++ int cycle, int sync, int tags); ++int fw_iso_context_stop(struct fw_iso_context *ctx); ++void fw_iso_context_destroy(struct fw_iso_context *ctx); + +-int +-fw_iso_context_stop(struct fw_iso_context *ctx); ++void fw_iso_resource_manage(struct fw_card *card, int generation, ++ u64 channels_mask, int *channel, int *bandwidth, bool allocate); + + struct fw_card_driver { + /* +@@ -415,7 +387,7 @@ struct fw_card_driver { + + struct fw_iso_context * + (*allocate_iso_context)(struct fw_card *card, +- int type, size_t header_size); ++ int type, int channel, size_t header_size); + void (*free_iso_context)(struct fw_iso_context *ctx); + + int (*start_iso)(struct fw_iso_context *ctx, +@@ -429,24 +401,18 @@ struct fw_card_driver { + int (*stop_iso)(struct fw_iso_context *ctx); + }; + +-int +-fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); ++int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); + +-void +-fw_send_request(struct fw_card *card, struct fw_transaction *t, ++void fw_send_request(struct fw_card *card, struct fw_transaction *t, + int tcode, int destination_id, int generation, int speed, + unsigned long long offset, void *data, size_t length, + fw_transaction_callback_t callback, void *callback_data); +- +-int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, +- int generation, int speed, unsigned long long offset, +- void *data, size_t length); +- + int fw_cancel_transaction(struct fw_card *card, + struct fw_transaction *transaction); +- + void fw_flush_transactions(struct fw_card *card); +- ++int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, ++ int generation, int speed, unsigned long long offset, ++ void *data, size_t length); + void fw_send_phy_config(struct fw_card *card, + int node_id, int generation, int gap_count); + +@@ -454,29 +420,18 @@ void fw_send_phy_config(struct fw_card * + * Called by the topology code to inform the device code of node + * activity; found, lost, or updated nodes. + */ +-void +-fw_node_event(struct fw_card *card, struct fw_node *node, int event); ++void fw_node_event(struct fw_card *card, struct fw_node *node, int event); + + /* API used by card level drivers */ + +-void +-fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, +- struct device *device); +-int +-fw_card_add(struct fw_card *card, +- u32 max_receive, u32 link_speed, u64 guid); +- +-void +-fw_core_remove_card(struct fw_card *card); +- +-void +-fw_core_handle_bus_reset(struct fw_card *card, +- int node_id, int generation, +- int self_id_count, u32 *self_ids); +-void +-fw_core_handle_request(struct fw_card *card, struct fw_packet *request); +- +-void +-fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); ++void fw_card_initialize(struct fw_card *card, ++ const struct fw_card_driver *driver, struct device *device); ++int fw_card_add(struct fw_card *card, ++ u32 max_receive, u32 link_speed, u64 guid); ++void fw_core_remove_card(struct fw_card *card); ++void fw_core_handle_bus_reset(struct fw_card *card, int node_id, ++ int generation, int self_id_count, u32 *self_ids); ++void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); ++void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); + + #endif /* __fw_transaction_h */ +--- linux-2.6-git/include/linux/firewire-cdev.h 2008-11-04 11:19:21.000000000 -0500 ++++ firewire-git/include/linux/firewire-cdev.h 2009-01-30 13:35:54.327647015 -0500 +@@ -25,10 +25,12 @@ + #include + #include + +-#define FW_CDEV_EVENT_BUS_RESET 0x00 +-#define FW_CDEV_EVENT_RESPONSE 0x01 +-#define FW_CDEV_EVENT_REQUEST 0x02 +-#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 ++#define FW_CDEV_EVENT_BUS_RESET 0x00 ++#define FW_CDEV_EVENT_RESPONSE 0x01 ++#define FW_CDEV_EVENT_REQUEST 0x02 ++#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 ++#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04 ++#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05 + + /** + * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types +@@ -136,7 +138,24 @@ struct fw_cdev_event_request { + * This event is sent when the controller has completed an &fw_cdev_iso_packet + * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers + * stripped of all packets up until and including the interrupt packet are +- * returned in the @header field. ++ * returned in the @header field. The amount of header data per packet is as ++ * specified at iso context creation by &fw_cdev_create_iso_context.header_size. ++ * ++ * In version 1 of this ABI, header data consisted of the 1394 isochronous ++ * packet header, followed by quadlets from the packet payload if ++ * &fw_cdev_create_iso_context.header_size > 4. ++ * ++ * In version 2 of this ABI, header data consist of the 1394 isochronous ++ * packet header, followed by a timestamp quadlet if ++ * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the ++ * packet payload if &fw_cdev_create_iso_context.header_size > 8. ++ * ++ * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2. ++ * ++ * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel, ++ * 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp: ++ * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte ++ * order. + */ + struct fw_cdev_event_iso_interrupt { + __u64 closure; +@@ -147,12 +166,44 @@ struct fw_cdev_event_iso_interrupt { + }; + + /** ++ * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed ++ * @closure: See &fw_cdev_event_common; ++ * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl ++ * @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or ++ * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED ++ * @handle: Reference by which an allocated resource can be deallocated ++ * @channel: Isochronous channel which was (de)allocated, if any ++ * @bandwidth: Bandwidth allocation units which were (de)allocated, if any ++ * ++ * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event is sent after an isochronous ++ * resource was allocated at the IRM. The client has to check @channel and ++ * @bandwidth for whether the allocation actually succeeded. ++ * ++ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event is sent after an isochronous ++ * resource was deallocated at the IRM. It is also sent when automatic ++ * reallocation after a bus reset failed. ++ * ++ * @channel is <0 if no channel was (de)allocated or if reallocation failed. ++ * @bandwidth is 0 if no bandwidth was (de)allocated or if reallocation failed. ++ */ ++struct fw_cdev_event_iso_resource { ++ __u64 closure; ++ __u32 type; ++ __u32 handle; ++ __s32 channel; ++ __s32 bandwidth; ++}; ++ ++/** + * union fw_cdev_event - Convenience union of fw_cdev_event_ types + * @common: Valid for all types + * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET + * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE + * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST + * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT ++ * @iso_resource: Valid if @common.type == ++ * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or ++ * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED + * + * Convenience union for userspace use. Events could be read(2) into an + * appropriately aligned char buffer and then cast to this union for further +@@ -163,13 +214,15 @@ struct fw_cdev_event_iso_interrupt { + * not fit will be discarded so that the next read(2) will return a new event. + */ + union fw_cdev_event { +- struct fw_cdev_event_common common; +- struct fw_cdev_event_bus_reset bus_reset; +- struct fw_cdev_event_response response; +- struct fw_cdev_event_request request; +- struct fw_cdev_event_iso_interrupt iso_interrupt; ++ struct fw_cdev_event_common common; ++ struct fw_cdev_event_bus_reset bus_reset; ++ struct fw_cdev_event_response response; ++ struct fw_cdev_event_request request; ++ struct fw_cdev_event_iso_interrupt iso_interrupt; ++ struct fw_cdev_event_iso_resource iso_resource; + }; + ++/* available since kernel version 2.6.22 */ + #define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info) + #define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request) + #define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate) +@@ -178,18 +231,29 @@ union fw_cdev_event { + #define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset) + #define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor) + #define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor) +- + #define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context) + #define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) + #define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso) + #define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso) ++ ++/* available since kernel version 2.6.24 */ + #define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer) + +-/* FW_CDEV_VERSION History +- * +- * 1 Feb 18, 2007: Initial version. ++/* available since kernel version 2.6.30 */ ++#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource) ++#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate) ++#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource) ++#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource) ++#define FW_CDEV_IOC_GET_SPEED _IOR('#', 0x11, struct fw_cdev_get_speed) ++#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request) ++ ++/* ++ * FW_CDEV_VERSION History ++ * 1 (2.6.22) - initial version ++ * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if ++ * &fw_cdev_create_iso_context.header_size is 8 or more + */ +-#define FW_CDEV_VERSION 1 ++#define FW_CDEV_VERSION 2 + + /** + * struct fw_cdev_get_info - General purpose information ioctl +@@ -201,7 +265,7 @@ union fw_cdev_event { + * case, @rom_length is updated with the actual length of the + * configuration ROM. + * @rom: If non-zero, address of a buffer to be filled by a copy of the +- * local node's configuration ROM ++ * device's configuration ROM + * @bus_reset: If non-zero, address of a buffer to be filled by a + * &struct fw_cdev_event_bus_reset with the current state + * of the bus. This does not cause a bus reset to happen. +@@ -229,7 +293,7 @@ struct fw_cdev_get_info { + * Send a request to the device. This ioctl implements all outgoing requests. + * Both quadlet and block request specify the payload as a pointer to the data + * in the @data field. Once the transaction completes, the kernel writes an +- * &fw_cdev_event_request event back. The @closure field is passed back to ++ * &fw_cdev_event_response event back. The @closure field is passed back to + * user space in the response event. + */ + struct fw_cdev_send_request { +@@ -284,9 +348,9 @@ struct fw_cdev_allocate { + }; + + /** +- * struct fw_cdev_deallocate - Free an address range allocation +- * @handle: Handle to the address range, as returned by the kernel when the +- * range was allocated ++ * struct fw_cdev_deallocate - Free a CSR address range or isochronous resource ++ * @handle: Handle to the address range or iso resource, as returned by the ++ * kernel when the range or resource was allocated + */ + struct fw_cdev_deallocate { + __u32 handle; +@@ -370,6 +434,9 @@ struct fw_cdev_remove_descriptor { + * + * If a context was successfully created, the kernel writes back a handle to the + * context, which must be passed in for subsequent operations on that context. ++ * ++ * Note that the effect of a @header_size > 4 depends on ++ * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. + */ + struct fw_cdev_create_iso_context { + __u32 type; +@@ -473,10 +540,73 @@ struct fw_cdev_stop_iso { + * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer + * and also the system clock. This allows to express the receive time of an + * isochronous packet as a system time with microsecond accuracy. ++ * ++ * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and ++ * 12 bits cycleOffset, in host byte order. + */ + struct fw_cdev_get_cycle_timer { + __u64 local_time; + __u32 cycle_timer; + }; + ++/** ++ * struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth ++ * @closure: Passed back to userspace in correponding iso resource events ++ * @channels: Isochronous channels of which one is to be (de)allocated ++ * @bandwidth: Isochronous bandwidth units to be (de)allocated ++ * @handle: Handle to the allocation, written by the kernel (only valid in ++ * case of %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctls) ++ * ++ * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl initiates allocation of an ++ * isochronous channel and/or of isochronous bandwidth at the isochronous ++ * resource manager (IRM). Only one of the channels specified in @channels is ++ * allocated. An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED is sent after ++ * communication with the IRM, indicating success or failure in the event data. ++ * The kernel will automatically reallocate the resources after bus resets. ++ * Should a reallocation fail, an %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event ++ * will be sent. The kernel will also automatically deallocate the resources ++ * when the file descriptor is closed. ++ * ++ * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE ioctl can be used to initiate ++ * deallocation of resources which were allocated as described above. ++ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. ++ * ++ * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE ioctl is a variant of allocation ++ * without automatic re- or deallocation. ++ * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event concludes this operation, ++ * indicating success or failure in its data. ++ * ++ * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE ioctl works like ++ * %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE except that resources are freed ++ * instead of allocated. ++ * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. ++ * ++ * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources ++ * for the lifetime of the fd or handle. ++ * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources ++ * for the duration of a bus generation. ++ * ++ * @channels is a host-endian bitfield with the least significant bit ++ * representing channel 0 and the most significant bit representing channel 63: ++ * 1ULL << c for each channel c that is a candidate for (de)allocation. ++ * ++ * @bandwidth is expressed in bandwidth allocation units, i.e. the time to send ++ * one quadlet of data (payload or header data) at speed S1600. ++ */ ++struct fw_cdev_allocate_iso_resource { ++ __u64 closure; ++ __u64 channels; ++ __u32 bandwidth; ++ __u32 handle; ++}; ++ ++/** ++ * struct fw_cdev_get_speed - Query maximum speed to or from this device ++ * @max_speed: Speed code; minimum of the device's link speed, the local node's ++ * link speed, and all PHY port speeds between the two links ++ */ ++struct fw_cdev_get_speed { ++ __u32 max_speed; ++}; ++ + #endif /* _LINUX_FIREWIRE_CDEV_H */ diff --git a/linux-2.6-fix-usb-serial-autosuspend.diff b/linux-2.6-fix-usb-serial-autosuspend.diff new file mode 100644 index 0000000..c58dfd6 --- /dev/null +++ b/linux-2.6-fix-usb-serial-autosuspend.diff @@ -0,0 +1,49 @@ +commit 3b8e1210f0a558145ba87eddb20f7b104676d6f6 +Author: Oliber Neukum +Date: Sat Jul 18 07:19:04 2009 +0200 + + usb: fix counter logic in opening serial converters + + the usage counter must be increased only after autoresumption + +diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c +index 99188c9..3d1a756 100644 +--- a/drivers/usb/serial/usb-serial.c ++++ b/drivers/usb/serial/usb-serial.c +@@ -216,16 +216,15 @@ static int serial_open (struct tty_struct *tty, struct file *filp) + goto bailout_port_put; + } + +- ++port->port.count; +- + /* set up our port structure making the tty driver + * remember our port object, and us it */ + tty->driver_data = port; + tty_port_tty_set(&port->port, tty); + + /* If the console is attached, the device is already open */ +- if (port->port.count == 1 && !port->console) { ++ if (!port->port.count && !port->console) { + first = 1; ++ + /* lock this module before we call it + * this may fail, which means we must bail out, + * safe because we are called with BKL held */ +@@ -242,6 +241,8 @@ static int serial_open (struct tty_struct *tty, struct file *filp) + if (retval) + goto bailout_module_put; + ++ ++port->port.count; ++ + /* only call the device specific open if this + * is the first time the port is opened */ + retval = serial->type->open(tty, port, filp); +@@ -249,6 +250,8 @@ static int serial_open (struct tty_struct *tty, struct file *filp) + goto bailout_interface_put; + mutex_unlock(&serial->disc_mutex); + set_bit(ASYNCB_INITIALIZED, &port->port.flags); ++ } else { ++ ++port->port.count; + } + mutex_unlock(&port->mutex); + /* Now do the correct tty layer semantics */ diff --git a/linux-2.6-g5-therm-shutdown.patch b/linux-2.6-g5-therm-shutdown.patch new file mode 100644 index 0000000..1471ef1 --- /dev/null +++ b/linux-2.6-g5-therm-shutdown.patch @@ -0,0 +1,70 @@ +--- linux-2.6.15/drivers/macintosh/therm_pm72.c.orig 2006-04-02 21:34:48.000000000 +0100 ++++ linux-2.6.15/drivers/macintosh/therm_pm72.c 2006-04-02 22:33:27.000000000 +0100 +@@ -924,10 +925,16 @@ static void do_monitor_cpu_combined(void + printk(KERN_WARNING "Warning ! Temperature way above maximum (%d) !\n", + temp_combi >> 16); + state0->overtemp += CPU_MAX_OVERTEMP / 4; +- } else if (temp_combi > (state0->mpu.tmax << 16)) ++ } else if (temp_combi > (state0->mpu.tmax << 16)) { + state0->overtemp++; +- else ++ printk(KERN_WARNING "Temperature %d above max %d. overtemp %d\n", ++ temp_combi >> 16, state0->mpu.tmax, state0->overtemp); ++ } else { ++ if (state0->overtemp) ++ printk(KERN_WARNING "Temperature back down to %d\n", ++ temp_combi >> 16); + state0->overtemp = 0; ++ } + if (state0->overtemp >= CPU_MAX_OVERTEMP) + critical_state = 1; + if (state0->overtemp > 0) { +@@ -999,10 +1015,16 @@ static void do_monitor_cpu_split(struct + " (%d) !\n", + state->index, temp >> 16); + state->overtemp += CPU_MAX_OVERTEMP / 4; +- } else if (temp > (state->mpu.tmax << 16)) ++ } else if (temp > (state->mpu.tmax << 16)) { + state->overtemp++; +- else ++ printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n", ++ state->index, temp >> 16, state->mpu.tmax, state->overtemp); ++ } else { ++ if (state->overtemp) ++ printk(KERN_WARNING "CPU %d temperature back down to %d\n", ++ state->index, temp >> 16); + state->overtemp = 0; ++ } + if (state->overtemp >= CPU_MAX_OVERTEMP) + critical_state = 1; + if (state->overtemp > 0) { +@@ -1061,10 +1097,16 @@ static void do_monitor_cpu_rack(struct c + " (%d) !\n", + state->index, temp >> 16); + state->overtemp = CPU_MAX_OVERTEMP / 4; +- } else if (temp > (state->mpu.tmax << 16)) ++ } else if (temp > (state->mpu.tmax << 16)) { + state->overtemp++; +- else ++ printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n", ++ state->index, temp >> 16, state->mpu.tmax, state->overtemp); ++ } else { ++ if (state->overtemp) ++ printk(KERN_WARNING "CPU %d temperature back down to %d\n", ++ state->index, temp >> 16); + state->overtemp = 0; ++ } + if (state->overtemp >= CPU_MAX_OVERTEMP) + critical_state = 1; + if (state->overtemp > 0) { +--- linux-2.6.15/drivers/macintosh/therm_pm72.h~ 2006-01-03 03:21:10.000000000 +0000 ++++ linux-2.6.15/drivers/macintosh/therm_pm72.h 2006-04-02 22:25:58.000000000 +0100 +@@ -243,7 +243,7 @@ struct dimm_pid_state + #define CPU_TEMP_HISTORY_SIZE 2 + #define CPU_POWER_HISTORY_SIZE 10 + #define CPU_PID_INTERVAL 1 +-#define CPU_MAX_OVERTEMP 30 ++#define CPU_MAX_OVERTEMP 90 + + #define CPUA_PUMP_RPM_INDEX 7 + #define CPUB_PUMP_RPM_INDEX 8 diff --git a/linux-2.6-hotfixes.patch b/linux-2.6-hotfixes.patch index 06b57a1..e69de29 100644 --- a/linux-2.6-hotfixes.patch +++ b/linux-2.6-hotfixes.patch @@ -1,13 +0,0 @@ -fixes: -implicit declaration of function kzalloc - ---- linux-2.6.34.noarch/drivers/usb/serial/qcserial.c~ 2010-06-08 15:19:41.000000000 -0400 -+++ linux-2.6.34.noarch/drivers/usb/serial/qcserial.c 2010-06-08 15:19:47.000000000 -0400 -@@ -11,6 +11,7 @@ - * - */ - -+#include - #include - #include - #include diff --git a/linux-2.6-input-clickpad-support.patch b/linux-2.6-input-clickpad-support.patch new file mode 100644 index 0000000..c61756e --- /dev/null +++ b/linux-2.6-input-clickpad-support.patch @@ -0,0 +1,137 @@ +From 5f57d67da87332a9a1ba8fa7a33bf0680e1c76e7 Mon Sep 17 00:00:00 2001 +From: Takashi Iwai +Date: Mon, 19 Apr 2010 10:37:21 -0700 +Subject: [PATCH] Input: Add support of Synaptics Clickpad device + +The new type of touchpads can be detected via a new query command +0x0c. The clickpad flags are in cap[0]:4 and cap[1]:0 bits. + +When the device is detected, the driver now reports only the left +button as the supported buttons so that X11 driver can detect that +the device is Clickpad. A Clickpad device gives the button events +only as the middle button. The kernel driver morphs to the left +button. The real handling of Clickpad is done rather in X driver +side. + +Signed-off-by: Takashi Iwai +Signed-off-by: Dmitry Torokhov +--- + drivers/input/mouse/synaptics.c | 35 ++++++++++++++++++++++++++++++----- + drivers/input/mouse/synaptics.h | 4 ++++ + 2 files changed, 34 insertions(+), 5 deletions(-) + +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c +index d3f5243..9ab9ff0 100644 +--- a/drivers/input/mouse/synaptics.c ++++ b/drivers/input/mouse/synaptics.c +@@ -136,7 +136,8 @@ static int synaptics_capability(struct psmouse *psmouse) + if (synaptics_send_cmd(psmouse, SYN_QUE_CAPABILITIES, cap)) + return -1; + priv->capabilities = (cap[0] << 16) | (cap[1] << 8) | cap[2]; +- priv->ext_cap = 0; ++ priv->ext_cap = priv->ext_cap_0c = 0; ++ + if (!SYN_CAP_VALID(priv->capabilities)) + return -1; + +@@ -149,7 +150,7 @@ static int synaptics_capability(struct psmouse *psmouse) + if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 1) { + if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_CAPAB, cap)) { + printk(KERN_ERR "Synaptics claims to have extended capabilities," +- " but I'm not able to read them."); ++ " but I'm not able to read them.\n"); + } else { + priv->ext_cap = (cap[0] << 16) | (cap[1] << 8) | cap[2]; + +@@ -161,6 +162,16 @@ static int synaptics_capability(struct psmouse *psmouse) + priv->ext_cap &= 0xff0fff; + } + } ++ ++ if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 4) { ++ if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_CAPAB_0C, cap)) { ++ printk(KERN_ERR "Synaptics claims to have extended capability 0x0c," ++ " but I'm not able to read it.\n"); ++ } else { ++ priv->ext_cap_0c = (cap[0] << 16) | (cap[1] << 8) | cap[2]; ++ } ++ } ++ + return 0; + } + +@@ -347,7 +358,15 @@ static void synaptics_parse_hw_state(unsigned char buf[], struct synaptics_data + hw->left = (buf[0] & 0x01) ? 1 : 0; + hw->right = (buf[0] & 0x02) ? 1 : 0; + +- if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) { ++ if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { ++ /* ++ * Clickpad's button is transmitted as middle button, ++ * however, since it is primary button, we will report ++ * it as BTN_LEFT. ++ */ ++ hw->left = ((buf[0] ^ buf[3]) & 0x01) ? 1 : 0; ++ ++ } else if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) { + hw->middle = ((buf[0] ^ buf[3]) & 0x01) ? 1 : 0; + if (hw->w == 2) + hw->scroll = (signed char)(buf[1]); +@@ -592,6 +611,12 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv) + + dev->absres[ABS_X] = priv->x_res; + dev->absres[ABS_Y] = priv->y_res; ++ ++ if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { ++ /* Clickpads report only left button */ ++ __clear_bit(BTN_RIGHT, dev->keybit); ++ __clear_bit(BTN_MIDDLE, dev->keybit); ++ } + } + + static void synaptics_disconnect(struct psmouse *psmouse) +@@ -696,10 +721,10 @@ int synaptics_init(struct psmouse *psmouse) + + priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS; + +- printk(KERN_INFO "Synaptics Touchpad, model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx\n", ++ printk(KERN_INFO "Synaptics Touchpad, model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx\n", + SYN_ID_MODEL(priv->identity), + SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity), +- priv->model_id, priv->capabilities, priv->ext_cap); ++ priv->model_id, priv->capabilities, priv->ext_cap, priv->ext_cap_0c); + + set_input_params(psmouse->dev, priv); + +diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h +index f0f40a3..ae37c5d 100644 +--- a/drivers/input/mouse/synaptics.h ++++ b/drivers/input/mouse/synaptics.h +@@ -18,6 +18,7 @@ + #define SYN_QUE_SERIAL_NUMBER_SUFFIX 0x07 + #define SYN_QUE_RESOLUTION 0x08 + #define SYN_QUE_EXT_CAPAB 0x09 ++#define SYN_QUE_EXT_CAPAB_0C 0x0c + + /* synatics modes */ + #define SYN_BIT_ABSOLUTE_MODE (1 << 7) +@@ -48,6 +49,8 @@ + #define SYN_CAP_VALID(c) ((((c) & 0x00ff00) >> 8) == 0x47) + #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) + #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) ++#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) ++#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100100) + + /* synaptics modes query bits */ + #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) +@@ -96,6 +99,7 @@ struct synaptics_data { + unsigned long int model_id; /* Model-ID */ + unsigned long int capabilities; /* Capabilities */ + unsigned long int ext_cap; /* Extended Capabilities */ ++ unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */ + unsigned long int identity; /* Identification */ + int x_res; /* X resolution in units/mm */ + int y_res; /* Y resolution in units/mm */ +-- +1.7.0.1 + diff --git a/linux-2.6-input-fix-toshiba-hotkeys.patch b/linux-2.6-input-fix-toshiba-hotkeys.patch new file mode 100644 index 0000000..74558e6 --- /dev/null +++ b/linux-2.6-input-fix-toshiba-hotkeys.patch @@ -0,0 +1,278 @@ +commit 61a2aa30877a6e2be1d3fb3a71385e1f741819d7 +Author: Matthew Garrett +Date: Fri Mar 6 00:25:45 2009 +0000 + + toshiba-acpi: Add support for hotkey notifications + + Calling the ENAB method on Toshiba laptops results in notifications being + sent when laptop hotkeys are pressed. This patch simply calls that method + and sets up an input device if it's successful. + + Signed-off-by: Matthew Garrett + +diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c +index 40e60fc..604f9fa 100644 +--- a/drivers/platform/x86/toshiba_acpi.c ++++ b/drivers/platform/x86/toshiba_acpi.c +@@ -46,6 +46,7 @@ + #include + #include + #include ++#include + + #include + +@@ -62,9 +63,10 @@ MODULE_LICENSE("GPL"); + + /* Toshiba ACPI method paths */ + #define METHOD_LCD_BRIGHTNESS "\\_SB_.PCI0.VGA_.LCD_._BCM" +-#define METHOD_HCI_1 "\\_SB_.VALD.GHCI" +-#define METHOD_HCI_2 "\\_SB_.VALZ.GHCI" ++#define TOSH_INTERFACE_1 "\\_SB_.VALD" ++#define TOSH_INTERFACE_2 "\\_SB_.VALZ" + #define METHOD_VIDEO_OUT "\\_SB_.VALX.DSSX" ++#define GHCI_METHOD ".GHCI" + + /* Toshiba HCI interface definitions + * +@@ -116,6 +118,36 @@ static const struct acpi_device_id toshiba_device_ids[] = { + }; + MODULE_DEVICE_TABLE(acpi, toshiba_device_ids); + ++struct key_entry { ++ char type; ++ u16 code; ++ u16 keycode; ++}; ++ ++enum {KE_KEY, KE_END}; ++ ++static struct key_entry toshiba_acpi_keymap[] = { ++ {KE_KEY, 0x101, KEY_MUTE}, ++ {KE_KEY, 0x13b, KEY_COFFEE}, ++ {KE_KEY, 0x13c, KEY_BATTERY}, ++ {KE_KEY, 0x13d, KEY_SLEEP}, ++ {KE_KEY, 0x13e, KEY_SUSPEND}, ++ {KE_KEY, 0x13f, KEY_SWITCHVIDEOMODE}, ++ {KE_KEY, 0x140, KEY_BRIGHTNESSDOWN}, ++ {KE_KEY, 0x141, KEY_BRIGHTNESSUP}, ++ {KE_KEY, 0x142, KEY_WLAN}, ++ {KE_KEY, 0x143, KEY_PROG1}, ++ {KE_KEY, 0xb05, KEY_PROG2}, ++ {KE_KEY, 0xb06, KEY_WWW}, ++ {KE_KEY, 0xb07, KEY_MAIL}, ++ {KE_KEY, 0xb30, KEY_STOP}, ++ {KE_KEY, 0xb31, KEY_PREVIOUSSONG}, ++ {KE_KEY, 0xb32, KEY_NEXTSONG}, ++ {KE_KEY, 0xb33, KEY_PLAYPAUSE}, ++ {KE_KEY, 0xb5a, KEY_MEDIA}, ++ {KE_END, 0, 0}, ++}; ++ + /* utility + */ + +@@ -252,6 +284,8 @@ struct toshiba_acpi_dev { + struct platform_device *p_dev; + struct rfkill *rfk_dev; + struct input_polled_dev *poll_dev; ++ struct input_dev *hotkey_dev; ++ acpi_handle handle; + + const char *bt_name; + const char *rfk_name; +@@ -702,6 +736,154 @@ static struct backlight_ops toshiba_backlight_data = { + .update_status = set_lcd_status, + }; + ++static struct key_entry *toshiba_acpi_get_entry_by_scancode(int code) ++{ ++ struct key_entry *key; ++ ++ for (key = toshiba_acpi_keymap; key->type != KE_END; key++) ++ if (code == key->code) ++ return key; ++ ++ return NULL; ++} ++ ++static struct key_entry *toshiba_acpi_get_entry_by_keycode(int code) ++{ ++ struct key_entry *key; ++ ++ for (key = toshiba_acpi_keymap; key->type != KE_END; key++) ++ if (code == key->keycode && key->type == KE_KEY) ++ return key; ++ ++ return NULL; ++} ++ ++static int toshiba_acpi_getkeycode(struct input_dev *dev, int scancode, ++ int *keycode) ++{ ++ struct key_entry *key = toshiba_acpi_get_entry_by_scancode(scancode); ++ ++ if (key && key->type == KE_KEY) { ++ *keycode = key->keycode; ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++ ++static int toshiba_acpi_setkeycode(struct input_dev *dev, int scancode, ++ int keycode) ++{ ++ struct key_entry *key; ++ int old_keycode; ++ ++ if (keycode < 0 || keycode > KEY_MAX) ++ return -EINVAL; ++ ++ key = toshiba_acpi_get_entry_by_scancode(scancode); ++ if (key && key->type == KE_KEY) { ++ old_keycode = key->keycode; ++ key->keycode = keycode; ++ set_bit(keycode, dev->keybit); ++ if (!toshiba_acpi_get_entry_by_keycode(old_keycode)) ++ clear_bit(old_keycode, dev->keybit); ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++ ++static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *data) ++{ ++ u32 hci_result, value; ++ struct key_entry *key; ++ ++ if (event != 0x80) ++ return; ++ do { ++ hci_read1(HCI_SYSTEM_EVENT, &value, &hci_result); ++ if (hci_result == HCI_SUCCESS) { ++ if (value == 0x100) ++ continue; ++ else if (value & 0x80) { ++ key = toshiba_acpi_get_entry_by_scancode ++ (value & ~0x80); ++ if (!key) { ++ printk(MY_INFO "Unknown key %x\n", ++ value & ~0x80); ++ continue; ++ } ++ input_report_key(toshiba_acpi.hotkey_dev, ++ key->keycode, 1); ++ input_sync(toshiba_acpi.hotkey_dev); ++ input_report_key(toshiba_acpi.hotkey_dev, ++ key->keycode, 0); ++ input_sync(toshiba_acpi.hotkey_dev); ++ } ++ } else if (hci_result == HCI_NOT_SUPPORTED) { ++ /* This is a workaround for an unresolved issue on ++ * some machines where system events sporadically ++ * become disabled. */ ++ hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result); ++ printk(MY_NOTICE "Re-enabled hotkeys\n"); ++ } ++ } while (hci_result != HCI_EMPTY); ++} ++ ++static int toshiba_acpi_setup_keyboard(char *device) ++{ ++ acpi_status status; ++ acpi_handle handle; ++ int result; ++ const struct key_entry *key; ++ ++ status = acpi_get_handle(NULL, device, &handle); ++ if (ACPI_FAILURE(status)) { ++ printk(MY_INFO "Unable to get notification device\n"); ++ return -ENODEV; ++ } ++ ++ toshiba_acpi.handle = handle; ++ ++ status = acpi_evaluate_object(handle, "ENAB", NULL, NULL); ++ if (ACPI_FAILURE(status)) { ++ printk(MY_INFO "Unable to enable hotkeys\n"); ++ return -ENODEV; ++ } ++ ++ status = acpi_install_notify_handler (handle, ACPI_DEVICE_NOTIFY, ++ toshiba_acpi_notify, NULL); ++ if (ACPI_FAILURE(status)) { ++ printk(MY_INFO "Unable to install hotkey notification\n"); ++ return -ENODEV; ++ } ++ ++ toshiba_acpi.hotkey_dev = input_allocate_device(); ++ if (!toshiba_acpi.hotkey_dev) { ++ printk(MY_INFO "Unable to register input device\n"); ++ return -ENOMEM; ++ } ++ ++ toshiba_acpi.hotkey_dev->name = "Toshiba input device"; ++ toshiba_acpi.hotkey_dev->phys = device; ++ toshiba_acpi.hotkey_dev->id.bustype = BUS_HOST; ++ toshiba_acpi.hotkey_dev->getkeycode = toshiba_acpi_getkeycode; ++ toshiba_acpi.hotkey_dev->setkeycode = toshiba_acpi_setkeycode; ++ ++ for (key = toshiba_acpi_keymap; key->type != KE_END; key++) { ++ set_bit(EV_KEY, toshiba_acpi.hotkey_dev->evbit); ++ set_bit(key->keycode, toshiba_acpi.hotkey_dev->keybit); ++ } ++ ++ result = input_register_device(toshiba_acpi.hotkey_dev); ++ if (result) { ++ printk(MY_INFO "Unable to register input device\n"); ++ return result; ++ } ++ ++ return 0; ++} ++ + static void toshiba_acpi_exit(void) + { + if (toshiba_acpi.poll_dev) { +@@ -709,12 +891,18 @@ static void toshiba_acpi_exit(void) + input_free_polled_device(toshiba_acpi.poll_dev); + } + ++ if (toshiba_acpi.hotkey_dev) ++ input_unregister_device(toshiba_acpi.hotkey_dev); ++ + if (toshiba_acpi.rfk_dev) + rfkill_unregister(toshiba_acpi.rfk_dev); + + if (toshiba_backlight_device) + backlight_device_unregister(toshiba_backlight_device); + ++ acpi_remove_notify_handler(toshiba_acpi.handle, ACPI_DEVICE_NOTIFY, ++ toshiba_acpi_notify); ++ + remove_device(); + + if (toshiba_proc_dir) +@@ -738,11 +926,15 @@ static int __init toshiba_acpi_init(void) + return -ENODEV; + + /* simple device detection: look for HCI method */ +- if (is_valid_acpi_path(METHOD_HCI_1)) +- method_hci = METHOD_HCI_1; +- else if (is_valid_acpi_path(METHOD_HCI_2)) +- method_hci = METHOD_HCI_2; +- else ++ if (is_valid_acpi_path(TOSH_INTERFACE_1 GHCI_METHOD)) { ++ method_hci = TOSH_INTERFACE_1 GHCI_METHOD; ++ if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_1)) ++ printk(MY_INFO "Unable to activate hotkeys\n"); ++ } else if (is_valid_acpi_path(TOSH_INTERFACE_2 GHCI_METHOD)) { ++ method_hci = TOSH_INTERFACE_2 GHCI_METHOD; ++ if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_2)) ++ printk(MY_INFO "Unable to activate hotkeys\n"); ++ } else + return -ENODEV; + + printk(MY_INFO "Toshiba Laptop ACPI Extras version %s\n", diff --git a/linux-2.6-input-kill-stupid-messages.patch b/linux-2.6-input-kill-stupid-messages.patch index cc1dd74..2e63146 100644 --- a/linux-2.6-input-kill-stupid-messages.patch +++ b/linux-2.6-input-kill-stupid-messages.patch @@ -1,32 +1,17 @@ -From b2c6d55b2351152696aafb8c9bf3ec8968acf77c Mon Sep 17 00:00:00 2001 -From: Kyle McMartin -Date: Mon, 29 Mar 2010 23:59:58 -0400 -Subject: linux-2.6-input-kill-stupid-messages - ---- - drivers/input/keyboard/atkbd.c | 5 +++++ - 1 files changed, 5 insertions(+), 0 deletions(-) - -diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c -index d358ef8..38db098 100644 ---- a/drivers/input/keyboard/atkbd.c -+++ b/drivers/input/keyboard/atkbd.c -@@ -425,11 +426,15 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data, - goto out; - case ATKBD_RET_ACK: - case ATKBD_RET_NAK: +--- linux-2.6.21.noarch/drivers/input/keyboard/atkbd.c~ 2007-07-06 10:51:04.000000000 -0400 ++++ linux-2.6.21.noarch/drivers/input/keyboard/atkbd.c 2007-07-06 10:51:33.000000000 -0400 +@@ -409,10 +409,14 @@ static irqreturn_t atkbd_interrupt(struc + goto out; + case ATKBD_RET_ACK: + case ATKBD_RET_NAK: +#if 0 -+ /* Quite a few key switchers and other tools trigger this -+ * and it confuses people who can do nothing about it */ - if (printk_ratelimit()) - dev_warn(&serio->dev, - "Spurious %s on %s. " - "Some program might be trying access hardware directly.\n", - data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys); ++ /* Quite a few key switchers and other tools trigger this and it confuses ++ people who can do nothing about it */ + if (printk_ratelimit()) + printk(KERN_WARNING "atkbd.c: Spurious %s on %s. " + "Some program might be trying access hardware directly.\n", + data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys); +#endif - goto out; - case ATKBD_RET_ERR: - atkbd->err_count++; --- -1.7.0.1 - + goto out; + case ATKBD_RET_ERR: + atkbd->err_count++; diff --git a/linux-2.6-intel-iommu-igfx.patch b/linux-2.6-intel-iommu-igfx.patch index 44fd141..f5888d1 100644 --- a/linux-2.6-intel-iommu-igfx.patch +++ b/linux-2.6-intel-iommu-igfx.patch @@ -49,7 +49,7 @@ index 4173125..8f36786 100644 int dmar_disabled = 1; #endif /*CONFIG_DMAR_DEFAULT_ON*/ --static int dmar_map_gfx = 1; +-static int __initdata dmar_map_gfx = 1; +/* disabled by default; causes way too many issues */ +static int dmar_map_gfx = 0; static int dmar_forcedac; diff --git a/linux-2.6-mac80211-age-scan-results-on-resume.patch b/linux-2.6-mac80211-age-scan-results-on-resume.patch new file mode 100644 index 0000000..d9e9631 --- /dev/null +++ b/linux-2.6-mac80211-age-scan-results-on-resume.patch @@ -0,0 +1,181 @@ +Backport of "cfg80211: age scan results on resume" by Dan Williams. + +diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h +index 23c0ab7..0432eb6 100644 +--- a/include/net/cfg80211.h ++++ b/include/net/cfg80211.h +@@ -450,6 +450,9 @@ struct ieee80211_channel; + * wireless extensions but this is subject to reevaluation as soon as this + * code is used more widely and we have a first user without wext. + * ++ * @suspend: wiphy device needs to be suspended ++ * @resume: wiphy device needs to be resumed ++ * + * @add_virtual_intf: create a new virtual interface with the given name, + * must set the struct wireless_dev's iftype. + * +@@ -499,6 +502,9 @@ struct ieee80211_channel; + * @set_channel: Set channel + */ + struct cfg80211_ops { ++ int (*suspend)(struct wiphy *wiphy); ++ int (*resume)(struct wiphy *wiphy); ++ + int (*add_virtual_intf)(struct wiphy *wiphy, char *name, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params); +diff --git a/include/net/wireless.h b/include/net/wireless.h +index 21c5d96..ae2d34d 100644 +--- a/include/net/wireless.h ++++ b/include/net/wireless.h +@@ -220,6 +220,9 @@ struct wiphy { + /* dir in debugfs: ieee80211/ */ + struct dentry *debugfsdir; + ++ /* time spent in suspend, in seconds */ ++ unsigned long suspend_duration; ++ + char priv[0] __attribute__((__aligned__(NETDEV_ALIGN))); + }; + +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index 9d4e4d8..691183e 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -1141,6 +1141,32 @@ static int ieee80211_set_channel(struct wiphy *wiphy, + return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + } + ++#ifdef CONFIG_PM ++static int ieee80211_suspend(struct wiphy *wiphy) ++{ ++ return 0; ++} ++ ++static int ieee80211_resume(struct wiphy *wiphy) ++{ ++ struct ieee80211_local *local = wiphy_priv(wiphy); ++ unsigned long age_jiffies; ++ struct ieee80211_bss *bss; ++ ++ age_jiffies = msecs_to_jiffies(wiphy->suspend_duration * MSEC_PER_SEC); ++ spin_lock_bh(&local->bss_lock); ++ list_for_each_entry(bss, &local->bss_list, list) { ++ bss->last_update -= age_jiffies; ++ } ++ spin_unlock_bh(&local->bss_lock); ++ ++ return 0; ++} ++#else ++#define ieee80211_suspend NULL ++#define ieee80211_resume NULL ++#endif ++ + struct cfg80211_ops mac80211_config_ops = { + .add_virtual_intf = ieee80211_add_iface, + .del_virtual_intf = ieee80211_del_iface, +@@ -1169,4 +1195,6 @@ struct cfg80211_ops mac80211_config_ops = { + .change_bss = ieee80211_change_bss, + .set_txq_params = ieee80211_set_txq_params, + .set_channel = ieee80211_set_channel, ++ .suspend = ieee80211_suspend, ++ .resume = ieee80211_resume, + }; +diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c +index f5c7c33..eb43ff5 100644 +--- a/net/mac80211/scan.c ++++ b/net/mac80211/scan.c +@@ -745,6 +745,15 @@ static void ieee80211_scan_add_ies(struct iw_request_info *info, + } + } + ++static inline unsigned int elapsed_jiffies_msecs(unsigned long start) ++{ ++ unsigned long end = jiffies; ++ ++ if (end >= start) ++ return jiffies_to_msecs(end - start); ++ ++ return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1); ++} + + static char * + ieee80211_scan_result(struct ieee80211_local *local, +@@ -857,8 +866,8 @@ ieee80211_scan_result(struct ieee80211_local *local, + &iwe, buf); + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = IWEVCUSTOM; +- sprintf(buf, " Last beacon: %dms ago", +- jiffies_to_msecs(jiffies - bss->last_update)); ++ sprintf(buf, " Last beacon: %ums ago", ++ elapsed_jiffies_msecs(bss->last_update)); + iwe.u.data.length = strlen(buf); + current_ev = iwe_stream_add_point(info, current_ev, + end_buf, &iwe, buf); +diff --git a/net/wireless/core.h b/net/wireless/core.h +index f7fb9f4..a4031a9 100644 +--- a/net/wireless/core.h ++++ b/net/wireless/core.h +@@ -41,6 +41,8 @@ struct cfg80211_registered_device { + struct mutex devlist_mtx; + struct list_head netdev_list; + ++ unsigned long suspend_at; ++ + /* must be last because of the way we do wiphy_priv(), + * and it should at least be aligned to NETDEV_ALIGN */ + struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); +diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c +index 79a3828..dc92564 100644 +--- a/net/wireless/sysfs.c ++++ b/net/wireless/sysfs.c +@@ -55,6 +55,39 @@ static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env) + } + #endif + ++static int wiphy_suspend(struct device *dev, pm_message_t state) ++{ ++ struct cfg80211_registered_device *rdev = dev_to_rdev(dev); ++ int ret = 0; ++ ++ rdev->wiphy.suspend_duration = 0; ++ rdev->suspend_at = get_seconds(); ++ ++ if (rdev->ops->suspend) { ++ rtnl_lock(); ++ ret = rdev->ops->suspend(&rdev->wiphy); ++ rtnl_unlock(); ++ } ++ ++ return ret; ++} ++ ++static int wiphy_resume(struct device *dev) ++{ ++ struct cfg80211_registered_device *rdev = dev_to_rdev(dev); ++ int ret = 0; ++ ++ rdev->wiphy.suspend_duration = get_seconds() - rdev->suspend_at; ++ ++ if (rdev->ops->resume) { ++ rtnl_lock(); ++ ret = rdev->ops->resume(&rdev->wiphy); ++ rtnl_unlock(); ++ } ++ ++ return ret; ++} ++ + struct class ieee80211_class = { + .name = "ieee80211", + .owner = THIS_MODULE, +@@ -63,6 +96,8 @@ struct class ieee80211_class = { + #ifdef CONFIG_HOTPLUG + .dev_uevent = wiphy_uevent, + #endif ++ .suspend = wiphy_suspend, ++ .resume = wiphy_resume, + }; + + int wiphy_sysfs_init(void) diff --git a/linux-2.6-nfs4-callback-hidden.patch b/linux-2.6-nfs4-callback-hidden.patch new file mode 100644 index 0000000..8fc2368 --- /dev/null +++ b/linux-2.6-nfs4-callback-hidden.patch @@ -0,0 +1,20 @@ +Author: Steve Dickson +Date: Tue Oct 13 15:59:57 EDT 2009 + +To avoid hangs in the svc_unregister(), on version 4 mounts +(and unmounts), when rpcbind is not running, make the nfs4 callback +program an 'hidden' service by setting the 'vs_hidden' flag in the +nfs4_callback_version structure. + +Signed-off-by: Steve Dickson + +diff -up linux-2.6.31.x86_64/fs/nfs/callback_xdr.c.orig linux-2.6.31.x86_64/fs/nfs/callback_xdr.c +--- linux-2.6.31.x86_64/fs/nfs/callback_xdr.c.orig 2009-09-09 18:13:59.000000000 -0400 ++++ linux-2.6.31.x86_64/fs/nfs/callback_xdr.c 2009-10-13 15:40:19.000000000 -0400 +@@ -716,5 +716,6 @@ struct svc_version nfs4_callback_version + .vs_proc = nfs4_callback_procedures1, + .vs_xdrsize = NFS4_CALLBACK_XDRSIZE, + .vs_dispatch = NULL, ++ .vs_hidden = 1, + }; + diff --git a/linux-2.6-p54pci.patch b/linux-2.6-p54pci.patch new file mode 100644 index 0000000..116cd6f --- /dev/null +++ b/linux-2.6-p54pci.patch @@ -0,0 +1,513 @@ +From: Christian Lamparter +Date: Sun, 17 Jan 2010 22:19:25 +0000 (+0100) +Subject: p54pci: move tx cleanup into tasklet +X-Git-Tag: master-2010-01-19~9 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Flinville%2Fwireless-next-2.6.git;a=commitdiff_plain;h=d713804c6032b95cd3035014e16fadebb9655c6f + +p54pci: move tx cleanup into tasklet + +This patch moves the tx cleanup routines out of the critical +interrupt context and into the (previously known as rx) tasklet. + +The main goal of this operation is to remove the extensive +usage of spin_lock_irqsaves in the generic p54common library. + +The next step would be to modify p54usb to do the +rx processing inside a tasklet (just like usbnet). + +Signed-off-by: Christian Lamparter +Signed-off-by: John W. Linville +--- + +diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c +index 4bf4c21..48cae48 100644 +--- a/drivers/net/wireless/p54/p54pci.c ++++ b/drivers/net/wireless/p54/p54pci.c +@@ -234,25 +234,26 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, + p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf); + } + +-/* caller must hold priv->lock */ + static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, + int ring_index, struct p54p_desc *ring, u32 ring_limit, +- void **tx_buf) ++ struct sk_buff **tx_buf) + { ++ unsigned long flags; + struct p54p_priv *priv = dev->priv; + struct p54p_ring_control *ring_control = priv->ring_control; + struct p54p_desc *desc; ++ struct sk_buff *skb; + u32 idx, i; + + i = (*index) % ring_limit; + (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]); + idx %= ring_limit; + ++ spin_lock_irqsave(&priv->lock, flags); + while (i != idx) { + desc = &ring[i]; +- if (tx_buf[i]) +- if (FREE_AFTER_TX((struct sk_buff *) tx_buf[i])) +- p54_free_skb(dev, tx_buf[i]); ++ ++ skb = tx_buf[i]; + tx_buf[i] = NULL; + + pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), +@@ -263,17 +264,32 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, + desc->len = 0; + desc->flags = 0; + ++ if (skb && FREE_AFTER_TX(skb)) { ++ spin_unlock_irqrestore(&priv->lock, flags); ++ p54_free_skb(dev, skb); ++ spin_lock_irqsave(&priv->lock, flags); ++ } ++ + i++; + i %= ring_limit; + } ++ spin_unlock_irqrestore(&priv->lock, flags); + } + +-static void p54p_rx_tasklet(unsigned long dev_id) ++static void p54p_tasklet(unsigned long dev_id) + { + struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id; + struct p54p_priv *priv = dev->priv; + struct p54p_ring_control *ring_control = priv->ring_control; + ++ p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt, ++ ARRAY_SIZE(ring_control->tx_mgmt), ++ priv->tx_buf_mgmt); ++ ++ p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data, ++ ARRAY_SIZE(ring_control->tx_data), ++ priv->tx_buf_data); ++ + p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt, + ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt); + +@@ -288,38 +304,24 @@ static irqreturn_t p54p_interrupt(int irq, void *dev_id) + { + struct ieee80211_hw *dev = dev_id; + struct p54p_priv *priv = dev->priv; +- struct p54p_ring_control *ring_control = priv->ring_control; + __le32 reg; + + spin_lock(&priv->lock); + reg = P54P_READ(int_ident); + if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) { +- spin_unlock(&priv->lock); +- return IRQ_HANDLED; ++ goto out; + } +- + P54P_WRITE(int_ack, reg); + + reg &= P54P_READ(int_enable); + +- if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) { +- p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, +- 3, ring_control->tx_mgmt, +- ARRAY_SIZE(ring_control->tx_mgmt), +- priv->tx_buf_mgmt); +- +- p54p_check_tx_ring(dev, &priv->tx_idx_data, +- 1, ring_control->tx_data, +- ARRAY_SIZE(ring_control->tx_data), +- priv->tx_buf_data); +- +- tasklet_schedule(&priv->rx_tasklet); +- +- } else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT)) ++ if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) ++ tasklet_schedule(&priv->tasklet); ++ else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT)) + complete(&priv->boot_comp); + ++out: + spin_unlock(&priv->lock); +- + return reg ? IRQ_HANDLED : IRQ_NONE; + } + +@@ -368,7 +370,7 @@ static void p54p_stop(struct ieee80211_hw *dev) + unsigned int i; + struct p54p_desc *desc; + +- tasklet_kill(&priv->rx_tasklet); ++ tasklet_kill(&priv->tasklet); + + P54P_WRITE(int_enable, cpu_to_le32(0)); + P54P_READ(int_enable); +@@ -559,7 +561,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev, + priv->common.tx = p54p_tx; + + spin_lock_init(&priv->lock); +- tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev); ++ tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev); + + err = request_firmware(&priv->firmware, "isl3886pci", + &priv->pdev->dev); +diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h +index fbb6839..2feead6 100644 +--- a/drivers/net/wireless/p54/p54pci.h ++++ b/drivers/net/wireless/p54/p54pci.h +@@ -92,7 +92,7 @@ struct p54p_priv { + struct p54_common common; + struct pci_dev *pdev; + struct p54p_csr __iomem *map; +- struct tasklet_struct rx_tasklet; ++ struct tasklet_struct tasklet; + const struct firmware *firmware; + spinlock_t lock; + struct p54p_ring_control *ring_control; +@@ -101,8 +101,8 @@ struct p54p_priv { + u32 rx_idx_mgmt, tx_idx_mgmt; + struct sk_buff *rx_buf_data[8]; + struct sk_buff *rx_buf_mgmt[4]; +- void *tx_buf_data[32]; +- void *tx_buf_mgmt[4]; ++ struct sk_buff *tx_buf_data[32]; ++ struct sk_buff *tx_buf_mgmt[4]; + struct completion boot_comp; + }; + +From: Christian Lamparter +Date: Fri, 22 Jan 2010 07:01:11 +0000 (+0100) +Subject: p54pci: revise tx locking +X-Git-Tag: master-2010-01-22~1 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Flinville%2Fwireless-next-2.6.git;a=commitdiff_plain;h=b92f7d30830a319148df2943b7565989494e5ad1 + +p54pci: revise tx locking + +This patch continues the effort which began with: +"[PATCH] p54pci: move tx cleanup into tasklet". + +Thanks to these changes, p54pci's interrupt & tx +cleanup routines can be made lock-less. + +Signed-off-by: Christian Lamparter +Signed-off-by: John W. Linville +--- + +diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c +index 48cae48..bda29c0 100644 +--- a/drivers/net/wireless/p54/p54pci.c ++++ b/drivers/net/wireless/p54/p54pci.c +@@ -238,7 +238,6 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, + int ring_index, struct p54p_desc *ring, u32 ring_limit, + struct sk_buff **tx_buf) + { +- unsigned long flags; + struct p54p_priv *priv = dev->priv; + struct p54p_ring_control *ring_control = priv->ring_control; + struct p54p_desc *desc; +@@ -249,7 +248,6 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, + (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]); + idx %= ring_limit; + +- spin_lock_irqsave(&priv->lock, flags); + while (i != idx) { + desc = &ring[i]; + +@@ -264,16 +262,12 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, + desc->len = 0; + desc->flags = 0; + +- if (skb && FREE_AFTER_TX(skb)) { +- spin_unlock_irqrestore(&priv->lock, flags); ++ if (skb && FREE_AFTER_TX(skb)) + p54_free_skb(dev, skb); +- spin_lock_irqsave(&priv->lock, flags); +- } + + i++; + i %= ring_limit; + } +- spin_unlock_irqrestore(&priv->lock, flags); + } + + static void p54p_tasklet(unsigned long dev_id) +@@ -306,7 +300,6 @@ static irqreturn_t p54p_interrupt(int irq, void *dev_id) + struct p54p_priv *priv = dev->priv; + __le32 reg; + +- spin_lock(&priv->lock); + reg = P54P_READ(int_ident); + if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) { + goto out; +@@ -321,15 +314,14 @@ static irqreturn_t p54p_interrupt(int irq, void *dev_id) + complete(&priv->boot_comp); + + out: +- spin_unlock(&priv->lock); + return reg ? IRQ_HANDLED : IRQ_NONE; + } + + static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb) + { ++ unsigned long flags; + struct p54p_priv *priv = dev->priv; + struct p54p_ring_control *ring_control = priv->ring_control; +- unsigned long flags; + struct p54p_desc *desc; + dma_addr_t mapping; + u32 device_idx, idx, i; +@@ -370,14 +362,14 @@ static void p54p_stop(struct ieee80211_hw *dev) + unsigned int i; + struct p54p_desc *desc; + +- tasklet_kill(&priv->tasklet); +- + P54P_WRITE(int_enable, cpu_to_le32(0)); + P54P_READ(int_enable); + udelay(10); + + free_irq(priv->pdev->irq, dev); + ++ tasklet_kill(&priv->tasklet); ++ + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); + + for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) { +From: Quintin Pitts +Date: Fri, 9 Apr 2010 19:37:38 +0000 (+0200) +Subject: p54pci: prevent stuck rx-ring on slow system +X-Git-Tag: master-2010-04-12~9 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Flinville%2Fwireless-next-2.6.git;a=commitdiff_plain;h=5988f385b4cffa9ca72c5be0188e5f4c9ef46d82 + +p54pci: prevent stuck rx-ring on slow system + +This patch fixes an old problem, which - under certain +circumstances - could cause the device to become +unresponsive. + +most of p54pci's rx-ring management is implemented in just +two distinct standalone functions. p54p_check_rx_ring takes +care of processing incoming data, while p54p_refill_rx_ring +tries to replenish all depleted communication buffers. + +This has always worked fine on my fast machine, but +now I know there is a hidden race... + +The most likely candidate here is ring_control->device_idx. +Quintin Pitts had already analyzed the culprit and posted +a patch back in Oct 2009. But sadly, no one's picked up on this. +( https://patchwork.kernel.org/patch/53079/ [2 & 3] ). +This patch does the same way, except that it also prioritize +rx data processing, simply because tx routines *can* wait. + +Reported-by: Sean Young +Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=11386 +Reported-by: Quintin Pitts +Signed-off-by: Quintin Pitts +Signed-off-by: Christian Lamparter +Signed-off-by: John W. Linville +--- + +diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c +index ed4bdff..aa29663 100644 +--- a/drivers/net/wireless/p54/p54pci.c ++++ b/drivers/net/wireless/p54/p54pci.c +@@ -131,7 +131,7 @@ static int p54p_upload_firmware(struct ieee80211_hw *dev) + + static void p54p_refill_rx_ring(struct ieee80211_hw *dev, + int ring_index, struct p54p_desc *ring, u32 ring_limit, +- struct sk_buff **rx_buf) ++ struct sk_buff **rx_buf, u32 index) + { + struct p54p_priv *priv = dev->priv; + struct p54p_ring_control *ring_control = priv->ring_control; +@@ -139,7 +139,7 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev, + + idx = le32_to_cpu(ring_control->host_idx[ring_index]); + limit = idx; +- limit -= le32_to_cpu(ring_control->device_idx[ring_index]); ++ limit -= le32_to_cpu(index); + limit = ring_limit - limit; + + i = idx % ring_limit; +@@ -231,7 +231,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, + i %= ring_limit; + } + +- p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf); ++ p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index); + } + + static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, +@@ -276,14 +276,6 @@ static void p54p_tasklet(unsigned long dev_id) + struct p54p_priv *priv = dev->priv; + struct p54p_ring_control *ring_control = priv->ring_control; + +- p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt, +- ARRAY_SIZE(ring_control->tx_mgmt), +- priv->tx_buf_mgmt); +- +- p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data, +- ARRAY_SIZE(ring_control->tx_data), +- priv->tx_buf_data); +- + p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt, + ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt); + +@@ -292,6 +284,14 @@ static void p54p_tasklet(unsigned long dev_id) + + wmb(); + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); ++ ++ p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt, ++ ARRAY_SIZE(ring_control->tx_mgmt), ++ priv->tx_buf_mgmt); ++ ++ p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data, ++ ARRAY_SIZE(ring_control->tx_data), ++ priv->tx_buf_data); + } + + static irqreturn_t p54p_interrupt(int irq, void *dev_id) +@@ -444,10 +444,10 @@ static int p54p_open(struct ieee80211_hw *dev) + priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0; + + p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data, +- ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data); ++ ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0); + + p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt, +- ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt); ++ ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0); + + P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma)); + P54P_READ(ring_control_base); +From: Christian Lamparter +Date: Thu, 15 Apr 2010 12:17:07 +0000 (+0200) +Subject: p54pci: fix serious sparse warning +X-Git-Tag: master-2010-04-16~117 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Flinville%2Fwireless-next-2.6.git;a=commitdiff_plain;h=103823db62ffca028c7a214c80266519d2ea7d8d + +p54pci: fix serious sparse warning + +This patch fixes a bug which was just recently introduced by +("p54pci: prevent stuck rx-ring on slow system"). + +make M=drivers/net/wireless/p54 C=2 CF=-D__CHECK_ENDIAN__ + CHECK drivers/net/wireless/p54/p54pci.c +drivers/net/wireless/p54/p54pci.c:143:11: warning: cast to restricted __le32 + CC [M] drivers/net/wireless/p54/p54pci.o + +Reported-by: Johannes Berg +Signed-off-by: Christian Lamparter +Signed-off-by: John W. Linville +--- + +diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c +index aa29663..0a516c8 100644 +--- a/drivers/net/wireless/p54/p54pci.c ++++ b/drivers/net/wireless/p54/p54pci.c +@@ -139,7 +139,7 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev, + + idx = le32_to_cpu(ring_control->host_idx[ring_index]); + limit = idx; +- limit -= le32_to_cpu(index); ++ limit -= index; + limit = ring_limit - limit; + + i = idx % ring_limit; +Subject: +[PATCH 2/2] p54pci: fix regression from prevent stuck rx-ring on slow system +From: +Christian Lamparter +Date: +Thu, 22 Apr 2010 19:52:43 +0200 +To: +linux-wireless@vger.kernel.org +CC: +linville@tuxdriver.com, hdegoede@redhat.com + +From: Hans de Goede + +This patch fixes a recently introduced use-after-free regression +from "p54pci: prevent stuck rx-ring on slow system". + +Hans de Goede reported a use-after-free regression: +> >BUG: unable to handle kernel paging request at 6b6b6b6b +> >IP: [] p54p_check_tx_ring+0x84/0xb1 [p54pci] +> >*pde = 00000000 +> >Oops: 0000 [#1] SMP +> >EIP: 0060:[] EFLAGS: 00010286 CPU: 0 +> >EIP is at p54p_check_tx_ring+0x84/0xb1 [p54pci] +> >EAX: 6b6b6b6b EBX: df10b170 ECX: 00000003 EDX: 00000001 +> >ESI: dc471500 EDI: d8acaeb0 EBP: c098be9c ESP: c098be84 +> > DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 +> >Process swapper (pid: 0, ti=c098a000 task=c09ccfe0 task.ti=c098a000) +> >Call Trace: +> > [] ? p54p_tasklet+0xaa/0xb5 [p54pci] +> > [] ? tasklet_action+0x78/0xcb +> > [] ? __do_softirq+0xbc/0x173 + +Quote from comment #17: +"The problem is the innocent looking moving of the tx processing to + after the rx processing in the tasklet. Quoting from the changelog: + This patch does it the same way, except that it also prioritize + rx data processing, simply because tx routines *can* wait. + + This is causing an issue with us referencing already freed memory, + because some skb's we transmit, we immediately receive back, such + as those for reading the eeprom (*) and getting stats. + + What can happen because of the moving of the tx processing to after + the rx processing is that when the tasklet first runs after doing a + special skb tx (such as eeprom) we've already received the answer + to it. + + Then the rx processing ends up calling p54_find_and_unlink_skb to + find the matching tx skb for the just received special rx skb and + frees the tx skb. + + Then after the processing of the rx skb answer, and thus freeing + the tx skb, we go process the completed tx ring entires, and then + dereference the free-ed skb, to see if it should free free-ed by + p54p_check_tx_ring()." + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=583623 +Bug-Identified-by: Hans de Goede +Signed-off-by: Hans de Goede +Signed-off-by: Christian Lamparter +--- +diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c +index ca42ccb..07c4528 100644 +--- a/drivers/net/wireless/p54/p54pci.c ++++ b/drivers/net/wireless/p54/p54pci.c +@@ -277,6 +277,14 @@ static void p54p_tasklet(unsigned long dev_id) + struct p54p_priv *priv = dev->priv; + struct p54p_ring_control *ring_control = priv->ring_control; + ++ p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt, ++ ARRAY_SIZE(ring_control->tx_mgmt), ++ priv->tx_buf_mgmt); ++ ++ p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data, ++ ARRAY_SIZE(ring_control->tx_data), ++ priv->tx_buf_data); ++ + p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt, + ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt); + +@@ -285,14 +293,6 @@ static void p54p_tasklet(unsigned long dev_id) + + wmb(); + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); +- +- p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt, +- ARRAY_SIZE(ring_control->tx_mgmt), +- priv->tx_buf_mgmt); +- +- p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data, +- ARRAY_SIZE(ring_control->tx_data), +- priv->tx_buf_data); + } + + static irqreturn_t p54p_interrupt(int irq, void *dev_id) diff --git a/linux-2.6-pciehp-update.patch b/linux-2.6-pciehp-update.patch new file mode 100644 index 0000000..38ec797 --- /dev/null +++ b/linux-2.6-pciehp-update.patch @@ -0,0 +1,147 @@ +diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h +index b2801a7..c9f18f9 100644 +--- a/drivers/pci/hotplug/pciehp.h ++++ b/drivers/pci/hotplug/pciehp.h +@@ -224,6 +224,10 @@ static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev) + { + u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | + OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); ++ if (pciehp_force) { ++ dev_info(&dev->dev, "Bypassing BIOS check for pciehp\n"); ++ return 0; ++ } + return acpi_get_hp_hw_control_from_firmware(dev, flags); + } + +diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c +index 39cf248..ab6b016 100644 +--- a/drivers/pci/hotplug/pciehp_core.c ++++ b/drivers/pci/hotplug/pciehp_core.c +@@ -41,6 +41,7 @@ int pciehp_debug; + int pciehp_poll_mode; + int pciehp_poll_time; + int pciehp_force; ++int pciehp_passive; + struct workqueue_struct *pciehp_wq; + + #define DRIVER_VERSION "0.4" +@@ -50,15 +51,18 @@ struct workqueue_struct *pciehp_wq; + MODULE_AUTHOR(DRIVER_AUTHOR); + MODULE_DESCRIPTION(DRIVER_DESC); + MODULE_LICENSE("GPL"); ++MODULE_ALIAS("acpi*:PNP0A08:*"); + + module_param(pciehp_debug, bool, 0644); + module_param(pciehp_poll_mode, bool, 0644); + module_param(pciehp_poll_time, int, 0644); + module_param(pciehp_force, bool, 0644); ++module_param(pciehp_passive, bool, 0644); + MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); + MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); + MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); + MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing"); ++MODULE_PARM_DESC(pciehp_passive, "Listen for pciehp events, even if _OSC and OSHP are missing"); + + #define PCIE_MODULE_NAME "pciehp" + +@@ -85,6 +89,13 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = { + .get_cur_bus_speed = get_cur_bus_speed, + }; + ++static struct hotplug_slot_ops pciehp_passive_hotplug_slot_ops = { ++ .owner = THIS_MODULE, ++ .get_adapter_status = get_adapter_status, ++ .get_max_bus_speed = get_max_bus_speed, ++ .get_cur_bus_speed = get_cur_bus_speed, ++}; ++ + /* + * Check the status of the Electro Mechanical Interlock (EMI) + */ +@@ -212,7 +223,11 @@ static int init_slots(struct controller *ctrl) + hotplug_slot->info = info; + hotplug_slot->private = slot; + hotplug_slot->release = &release_slot; +- hotplug_slot->ops = &pciehp_hotplug_slot_ops; ++ if (pciehp_passive && ++ pciehp_get_hp_hw_control_from_firmware(ctrl->pci_dev)) ++ hotplug_slot->ops = &pciehp_passive_hotplug_slot_ops; ++ else ++ hotplug_slot->ops = &pciehp_hotplug_slot_ops; + slot->hotplug_slot = hotplug_slot; + snprintf(name, SLOT_NAME_SIZE, "%u", slot->number); + +@@ -407,11 +422,7 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ + u8 value; + struct pci_dev *pdev = dev->port; + +- if (pciehp_force) +- dev_info(&dev->device, +- "Bypassing BIOS check for pciehp use on %s\n", +- pci_name(pdev)); +- else if (pciehp_get_hp_hw_control_from_firmware(pdev)) ++ if (!pciehp_passive && pciehp_get_hp_hw_control_from_firmware(pdev)) + goto err_out_none; + + ctrl = pcie_init(dev); +@@ -436,7 +447,7 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ + t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); + t_slot->hpc_ops->get_adapter_status(t_slot, &value); + if (value) { +- if (pciehp_force) ++ if (pciehp_force || pciehp_passive) + pciehp_enable_slot(t_slot); + } else { + /* Power off slot if not occupied */ +@@ -474,8 +485,11 @@ static int pciehp_suspend (struct pcie_device *dev, pm_message_t state) + + static int pciehp_resume (struct pcie_device *dev) + { ++ struct pci_dev *pdev = dev->port; + dev_info(&dev->device, "%s ENTRY\n", __func__); +- if (pciehp_force) { ++ ++ if (pciehp_force || (pciehp_passive && ++ pciehp_get_hp_hw_control_from_firmware(pdev))) { + struct controller *ctrl = get_service_data(dev); + struct slot *t_slot; + u8 status; +diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c +index fead63c..12640bd 100644 +--- a/drivers/pci/hotplug/pciehp_ctrl.c ++++ b/drivers/pci/hotplug/pciehp_ctrl.c +@@ -185,7 +185,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) + * before taking any action that relies on power having been + * removed from the slot/adapter. + */ +- msleep(1000); ++ if (PWR_LED(ctrl) || ATTN_LED(ctrl)) ++ msleep(1000); + + if (PWR_LED(ctrl)) + pslot->hpc_ops->green_led_off(pslot); +@@ -288,16 +289,16 @@ static int remove_board(struct slot *p_slot) + } + } + +- /* +- * After turning power off, we must wait for at least 1 second +- * before taking any action that relies on power having been +- * removed from the slot/adapter. +- */ +- msleep(1000); +- +- if (PWR_LED(ctrl)) ++ if (PWR_LED(ctrl)) { ++ /* ++ * After turning power off, we must wait for at least 1 second ++ * before taking any action that relies on power having been ++ * removed from the slot/adapter. ++ */ ++ msleep(1000); + /* turn off Green LED */ + p_slot->hpc_ops->green_led_off(p_slot); ++ } + + return 0; + } diff --git a/linux-2.6-qcserial-autosuspend.diff b/linux-2.6-qcserial-autosuspend.diff new file mode 100644 index 0000000..5660145 --- /dev/null +++ b/linux-2.6-qcserial-autosuspend.diff @@ -0,0 +1,23 @@ +commit b2bcfa17349e5a6a01170b5269ee261dbd762a0c +Author: Matthew Garrett +Date: Sat Jul 18 14:43:36 2009 +0100 + + usb: enable autosuspend by default on qcserial + + All qcserial hardware supports autosuspend properly. Enable it by default. + + Signed-off-by: Matthew Garrett + +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c +index 7528b8d..959a176 100644 +--- a/drivers/usb/serial/qcserial.c ++++ b/drivers/usb/serial/qcserial.c +@@ -74,6 +74,8 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) + ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber; + dbg("This Interface = %d", ifnum); + ++ usb_device_autosuspend_enable(serial->dev); ++ + switch (nintf) { + case 1: + /* QDL mode */ diff --git a/linux-2.6-rfkill-all.patch b/linux-2.6-rfkill-all.patch new file mode 100644 index 0000000..e353d22 --- /dev/null +++ b/linux-2.6-rfkill-all.patch @@ -0,0 +1,52 @@ +From 06d4bc456d4a43fc79288b576a7ff8a15109a3fd Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Mon, 11 Jan 2010 08:47:44 -0500 +Subject: linux-2.6-rfkill-all.patch + +--- + include/linux/rfkill.h | 2 +- + net/rfkill/input.c | 8 ++++++++ + 3 files changed, 11 insertions(+), 1 deletions(-) + +diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h +index 97059d0..4f82326 100644 +--- a/include/linux/rfkill.h ++++ b/include/linux/rfkill.h +@@ -29,7 +29,7 @@ + /** + * enum rfkill_type - type of rfkill switch. + * +- * @RFKILL_TYPE_ALL: toggles all switches (userspace only) ++ * @RFKILL_TYPE_ALL: toggles all switches (requests only - not a switch type) + * @RFKILL_TYPE_WLAN: switch is on a 802.11 wireless network device. + * @RFKILL_TYPE_BLUETOOTH: switch is on a bluetooth device. + * @RFKILL_TYPE_UWB: switch is on a ultra wideband device. +diff --git a/net/rfkill/input.c b/net/rfkill/input.c +index a7295ad..3713d7e 100644 +--- a/net/rfkill/input.c ++++ b/net/rfkill/input.c +@@ -212,6 +212,9 @@ static void rfkill_event(struct input_handle *handle, unsigned int type, + case KEY_WIMAX: + rfkill_schedule_toggle(RFKILL_TYPE_WIMAX); + break; ++ case KEY_RFKILL: ++ rfkill_schedule_toggle(RFKILL_TYPE_ALL); ++ break; + } + } else if (type == EV_SW && code == SW_RFKILL_ALL) + rfkill_schedule_evsw_rfkillall(data); +@@ -295,6 +298,11 @@ static const struct input_device_id rfkill_ids[] = { + .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, + }, + { ++ .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, ++ .evbit = { BIT_MASK(EV_KEY) }, ++ .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) }, ++ }, ++ { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, + .evbit = { BIT(EV_SW) }, + .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, +-- +1.6.5.2 + diff --git a/linux-2.6-selinux-avtab-size.patch b/linux-2.6-selinux-avtab-size.patch new file mode 100644 index 0000000..59d635f --- /dev/null +++ b/linux-2.6-selinux-avtab-size.patch @@ -0,0 +1,30 @@ +commit 6c9ff1013b7a21099da838eeef7c3f23ee347957 +Author: Stephen Smalley +Date: Mon Mar 15 10:42:11 2010 -0400 + + SELinux: Reduce max avtab size to avoid page allocation failures + + Reduce MAX_AVTAB_HASH_BITS so that the avtab allocation is an order 2 + allocation rather than an order 4 allocation on x86_64. This + addresses reports of page allocation failures: + http://marc.info/?l=selinux&m=126757230625867&w=2 + https://bugzilla.redhat.com/show_bug.cgi?id=570433 + + Reported-by: Russell Coker + Signed-off-by: Stephen D. Smalley + Acked-by: Eric Paris + Signed-off-by: James Morris + +diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h +index 8da6a84..cd4f734 100644 +--- a/security/selinux/ss/avtab.h ++++ b/security/selinux/ss/avtab.h +@@ -82,7 +82,7 @@ struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified + void avtab_cache_init(void); + void avtab_cache_destroy(void); + +-#define MAX_AVTAB_HASH_BITS 13 ++#define MAX_AVTAB_HASH_BITS 11 + #define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS) + #define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1) + #define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS diff --git a/linux-2.6-selinux-mprotect-checks.patch b/linux-2.6-selinux-mprotect-checks.patch index 010a63c..1752525 100644 --- a/linux-2.6-selinux-mprotect-checks.patch +++ b/linux-2.6-selinux-mprotect-checks.patch @@ -5,83 +5,17 @@ ie, Fedora has a new enough toolchain, and has been rebuilt, so we don't need the ifdefs. Other distros don't/haven't, and this patch would break them if pushed upstream. - -Subject: [Fwd: Re: [PATCH] Disable execmem for sparc] -From: Stephen Smalley -To: Dave Jones -Date: Wed, 28 Apr 2010 16:04:56 -0400 -Message-Id: <1272485096.6013.326.camel@moss-pluto.epoch.ncsc.mil> - --------- Forwarded Message -------- -From: Stephen Smalley -To: David Miller -Cc: tcallawa@redhat.com, dennis@ausil.us, sparclinux@vger.kernel.org, dgilmore@redhat.com, jmorris@namei.org, eparis@parisplace.org -Subject: Re: [PATCH] Disable execmem for sparc -Date: Wed, 28 Apr 2010 15:57:57 -0400 - -On Tue, 2010-04-27 at 11:47 -0700, David Miller wrote: -> From: "Tom \"spot\" Callaway" -> Date: Tue, 27 Apr 2010 14:20:21 -0400 -> -> > [root@apollo ~]$ cat /proc/2174/maps -> > 00010000-00014000 r-xp 00000000 fd:00 15466577 -> > /sbin/mingetty -> > 00022000-00024000 rwxp 00002000 fd:00 15466577 -> > /sbin/mingetty -> > 00024000-00046000 rwxp 00000000 00:00 0 -> > [heap] -> -> SELINUX probably barfs on the executable heap, the PLT is in the HEAP -> just like powerpc32 and that's why VM_DATA_DEFAULT_FLAGS has to set -> both executable and writable. -> -> You also can't remove the CONFIG_PPC32 ifdefs in selinux, since -> because of the VM_DATA_DEFAULT_FLAGS setting used still in that arch, -> the heap will always have executable permission, just like sparc does. -> You have to support those binaries forever, whether you like it or not. -> -> Let's just replace the CONFIG_PPC32 ifdef in SELINUX with CONFIG_PPC32 -> || CONFIG_SPARC as in Tom's original patch and let's be done with -> this. -> -> In fact I would go through all the arch/ header files and check the -> VM_DATA_DEFAULT_FLAGS settings and add the necessary new ifdefs to the -> SELINUX code so that other platforms don't have the pain of having to -> go through this process too. - -To avoid maintaining per-arch ifdefs, it seems that we could just -directly use (VM_DATA_DEFAULT_FLAGS & VM_EXEC) as the basis for deciding -whether to enable or disable these checks. VM_DATA_DEFAULT_FLAGS isn't -constant on some architectures but instead depends on -current->personality, but we want this applied uniformly. So we'll just -use the initial task state to determine whether or not to enable these -checks. - -Signed-off-by: Stephen Smalley - -diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c -index ebee467..a03fd74 100644 ---- a/security/selinux/hooks.c -+++ b/security/selinux/hooks.c -@@ -2999,13 +2999,15 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd, - return file_has_perm(cred, file, av); - } - -+static int default_noexec; -+ - static int file_map_prot_check(struct file *file, unsigned long prot, int shared) - { +--- linux-2.6.26.noarch/security/selinux/hooks.c~ 2008-09-25 14:11:17.000000000 -0400 ++++ linux-2.6.26.noarch/security/selinux/hooks.c 2008-09-25 14:12:17.000000000 -0400 +@@ -3018,7 +3018,6 @@ static int file_map_prot_check(struct fi const struct cred *cred = current_cred(); int rc = 0; -#ifndef CONFIG_PPC32 -- if ((prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) { -+ if (default_noexec && -+ (prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) { + if ((prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) { /* * We are making executable an anonymous mapping or a - * private file mapping that will also be writable. -@@ -3015,7 +3017,6 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared +@@ -3029,7 +3028,6 @@ static int file_map_prot_check(struct fi if (rc) goto error; } @@ -89,18 +23,15 @@ index ebee467..a03fd74 100644 if (file) { /* read access is always possible with a mapping */ -@@ -3076,8 +3077,8 @@ static int selinux_file_mprotect(struct vm_area_struct *vma, +@@ -3024,7 +3022,6 @@ static int selinux_file_mprotect(struct if (selinux_checkreqprot) prot = reqprot; -#ifndef CONFIG_PPC32 -- if ((prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) { -+ if (default_noexec && -+ (prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) { - int rc = 0; + if ((prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) { + rc = 0; if (vma->vm_start >= vma->vm_mm->start_brk && - vma->vm_end <= vma->vm_mm->brk) { -@@ -3099,7 +3100,6 @@ static int selinux_file_mprotect(struct vm_area_struct *vma, +@@ -3049,7 +3046,6 @@ static int selinux_file_mprotect(struct if (rc) return rc; } @@ -108,17 +39,3 @@ index ebee467..a03fd74 100644 return file_map_prot_check(vma->vm_file, prot, vma->vm_flags&VM_SHARED); } -@@ -5662,6 +5662,8 @@ static __init int selinux_init(void) - /* Set the security state for the initial task. */ - cred_init_security(); - -+ default_noexec = !(VM_DATA_DEFAULT_FLAGS & VM_EXEC); -+ - sel_inode_cache = kmem_cache_create("selinux_inode_security", - sizeof(struct inode_security_struct), - 0, SLAB_PANIC, NULL); - --- -Stephen Smalley -National Security Agency - diff --git a/linux-2.6-tracehook.patch b/linux-2.6-tracehook.patch index bfed531..b8c54ad 100644 --- a/linux-2.6-tracehook.patch +++ b/linux-2.6-tracehook.patch @@ -20,10 +20,10 @@ Signed-off-by: Roland McGrath 5 files changed, 10 insertions(+), 8 deletions(-) diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h -index e1fb607..0d84f1e 100644 +index 56f2d63..4802e2a 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h -@@ -105,6 +105,7 @@ extern int ptrace_traceme(void); +@@ -85,6 +85,7 @@ extern int ptrace_traceme(void); extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); extern int ptrace_attach(struct task_struct *tsk); @@ -32,10 +32,10 @@ index e1fb607..0d84f1e 100644 extern void ptrace_disable(struct task_struct *); extern int ptrace_check_attach(struct task_struct *task, int kill); diff --git a/include/linux/sched.h b/include/linux/sched.h -index 2b7b81d..82e4494 100644 +index 78efe7c..9a2b557 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -2034,6 +2034,7 @@ extern int kill_pgrp(struct pid *pid, in +@@ -2095,6 +2095,7 @@ extern int kill_pgrp(struct pid *pid, in extern int kill_pid(struct pid *pid, int sig, int priv); extern int kill_proc_info(int, struct siginfo *, pid_t); extern int do_notify_parent(struct task_struct *, int); @@ -93,10 +93,10 @@ index 10db010..c78b2f4 100644 /** diff --git a/kernel/ptrace.c b/kernel/ptrace.c -index 42ad8ae..067f120 100644 +index 23bd09c..b7c1d32 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c -@@ -272,7 +272,7 @@ static int ignoring_children(struct sigh +@@ -271,7 +271,7 @@ static int ignoring_children(struct sigh * reap it now, in that case we must also wake up sub-threads sleeping in * do_wait(). */ @@ -106,10 +106,10 @@ index 42ad8ae..067f120 100644 __ptrace_unlink(p); diff --git a/kernel/signal.c b/kernel/signal.c -index dbd7fe0..5122b80 100644 +index 934ae5e..9551cec 100644 --- a/kernel/signal.c +++ b/kernel/signal.c -@@ -1515,7 +1515,7 @@ int do_notify_parent(struct task_struct +@@ -1498,7 +1498,7 @@ int do_notify_parent(struct task_struct return ret; } @@ -118,7 +118,7 @@ index dbd7fe0..5122b80 100644 { struct siginfo info; unsigned long flags; -@@ -1785,7 +1785,7 @@ static int do_signal_stop(int signr) +@@ -1768,7 +1768,7 @@ static int do_signal_stop(int signr) static int ptrace_signal(int signr, siginfo_t *info, struct pt_regs *regs, void *cookie) { diff --git a/linux-2.6-upstream-reverts.patch b/linux-2.6-upstream-reverts.patch index 600fb93..197189a 100644 --- a/linux-2.6-upstream-reverts.patch +++ b/linux-2.6-upstream-reverts.patch @@ -1,1148 +1,1665 @@ -From 468f0b44ce4b002ca7d9260f802a341854752c02 Mon Sep 17 00:00:00 2001 -From: Chris Wilson -Date: Thu, 27 May 2010 13:18:13 +0100 -Subject: drm/i915: Hold the spinlock whilst resetting unpin_work along error path -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit +From 5ce8ba7c9279a63f99e1f131602580472b8af968 Mon Sep 17 00:00:00 2001 +From: Adam Jackson +Date: Thu, 15 Apr 2010 14:03:30 -0400 +Subject: drm/i915: Fix 82854 PCI ID, and treat it like other 85X -From: Chris Wilson +From: Adam Jackson -commit 468f0b44ce4b002ca7d9260f802a341854752c02 upstream. +commit 5ce8ba7c9279a63f99e1f131602580472b8af968 upstream. -Delay taking the mutex until we need to and ensure that we hold the -spinlock when resetting unpin_work on the error path. Also defer the -debugging print messages until after we have released the spinlock. +pci.ids and the datasheet both say it's 358e, not 35e8. -Signed-off-by: Chris Wilson -Cc: Jesse Barnes -Cc: Kristian Høgsberg -Reviewed-by: Jesse Barnes +Signed-off-by: Adam Jackson Signed-off-by: Eric Anholt Signed-off-by: Greg Kroah-Hartman --- - drivers/gpu/drm/i915/intel_display.c | 20 ++++++++++++-------- - 1 file changed, 12 insertions(+), 8 deletions(-) + drivers/gpu/drm/i915/i915_drv.c | 5 +++-- + drivers/gpu/drm/i915/i915_drv.h | 3 ++- + 2 files changed, 5 insertions(+), 3 deletions(-) ---- a/drivers/gpu/drm/i915/intel_display.c -+++ b/drivers/gpu/drm/i915/intel_display.c -@@ -4239,8 +4239,6 @@ static int intel_crtc_page_flip(struct d - if (work == NULL) - return -ENOMEM; - -- mutex_lock(&dev->struct_mutex); -- - work->event = event; - work->dev = crtc->dev; - intel_fb = to_intel_framebuffer(crtc->fb); -@@ -4250,10 +4248,10 @@ static int intel_crtc_page_flip(struct d - /* We borrow the event spin lock for protecting unpin_work */ - spin_lock_irqsave(&dev->event_lock, flags); - if (intel_crtc->unpin_work) { -- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); - spin_unlock_irqrestore(&dev->event_lock, flags); - kfree(work); -- mutex_unlock(&dev->struct_mutex); -+ -+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); - return -EBUSY; - } - intel_crtc->unpin_work = work; -@@ -4262,13 +4260,19 @@ static int intel_crtc_page_flip(struct d - intel_fb = to_intel_framebuffer(fb); - obj = intel_fb->obj; +--- a/drivers/gpu/drm/i915/i915_drv.c ++++ b/drivers/gpu/drm/i915/i915_drv.c +@@ -68,7 +68,8 @@ const static struct intel_device_info in + }; -+ mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, obj); - if (ret != 0) { -- DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", -- to_intel_bo(obj)); -- kfree(work); -- intel_crtc->unpin_work = NULL; - mutex_unlock(&dev->struct_mutex); -+ -+ spin_lock_irqsave(&dev->event_lock, flags); -+ intel_crtc->unpin_work = NULL; -+ spin_unlock_irqrestore(&dev->event_lock, flags); -+ -+ kfree(work); -+ -+ DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", -+ to_intel_bo(obj)); - return ret; - } + const static struct intel_device_info intel_i85x_info = { +- .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, ++ .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, ++ .cursor_needs_physical = 1, + }; -From f953c9353f5fe6e98fa7f32f51060a74d845b5f8 Mon Sep 17 00:00:00 2001 -From: Daniel J Blueman -Date: Mon, 17 May 2010 14:23:52 +0100 -Subject: i915: fix lock imbalance on error path... + const static struct intel_device_info intel_i865g_info = { +@@ -140,7 +141,7 @@ const static struct pci_device_id pciidl + INTEL_VGA_DEVICE(0x3577, &intel_i830_info), + INTEL_VGA_DEVICE(0x2562, &intel_845g_info), + INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), +- INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), ++ INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), + INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), + INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), + INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -175,6 +175,7 @@ struct intel_overlay; + struct intel_device_info { + u8 is_mobile : 1; + u8 is_i8xx : 1; ++ u8 is_i85x : 1; + u8 is_i915g : 1; + u8 is_i9xx : 1; + u8 is_i945gm : 1; +@@ -1027,7 +1028,7 @@ extern int i915_wait_ring(struct drm_dev + + #define IS_I830(dev) ((dev)->pci_device == 0x3577) + #define IS_845G(dev) ((dev)->pci_device == 0x2562) +-#define IS_I85X(dev) ((dev)->pci_device == 0x3582) ++#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) + #define IS_I865G(dev) ((dev)->pci_device == 0x2572) + #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) + #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) +From 61dd98fad58f945ed720ba132681acb58fcee015 Mon Sep 17 00:00:00 2001 +From: Adam Jackson +Date: Thu, 13 May 2010 14:55:28 -0400 +Subject: drm/edid: Fix 1024x768@85Hz -From: Daniel J Blueman +From: Adam Jackson -commit f953c9353f5fe6e98fa7f32f51060a74d845b5f8 upstream. +commit 61dd98fad58f945ed720ba132681acb58fcee015 upstream. -While investigating Intel i5 Arrandale GPU lockups with -rc4, I -noticed a lock imbalance. +Having hsync both start and end on pixel 1072 ain't gonna work very +well. Matches the X server's list. -Signed-off-by: Daniel J Blueman +Signed-off-by: Adam Jackson +Tested-By: Michael Tokarev +Signed-off-by: Dave Airlie +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/drm_edid.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/gpu/drm/drm_edid.c ++++ b/drivers/gpu/drm/drm_edid.c +@@ -334,7 +334,7 @@ static struct drm_display_mode drm_dmt_m + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1024x768@85Hz */ + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, +- 1072, 1376, 0, 768, 769, 772, 808, 0, ++ 1168, 1376, 0, 768, 769, 772, 808, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1152x864@75Hz */ + { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, +From 8d06a1e1e9c69244f08beb7d17146483f9dcd120 Mon Sep 17 00:00:00 2001 +From: Robert Hooker +Date: Fri, 19 Mar 2010 15:13:27 -0400 +Subject: drm/i915: Disable FBC on 915GM and 945GM. + +From: Robert Hooker + +commit 8d06a1e1e9c69244f08beb7d17146483f9dcd120 upstream. + +It is causing hangs after a suspend/resume cycle with the default +powersave=1 module option on these chipsets since 2.6.32-rc. + +BugLink: http://bugs.launchpad.net/bugs/492392 +Signed-off-by: Robert Hooker +Acked-by: Jesse Barnes Signed-off-by: Eric Anholt Signed-off-by: Greg Kroah-Hartman --- - drivers/gpu/drm/i915/i915_drv.c | 1 + - 1 file changed, 1 insertion(+) + drivers/gpu/drm/i915/i915_drv.c | 4 ++-- + drivers/gpu/drm/i915/intel_display.c | 2 +- + 2 files changed, 3 insertions(+), 3 deletions(-) --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c -@@ -341,6 +341,7 @@ int i965_reset(struct drm_device *dev, u - } - } else { - DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); -+ mutex_unlock(&dev->struct_mutex); - return -ENODEV; - } +@@ -79,14 +79,14 @@ const static struct intel_device_info in + .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, + }; + const static struct intel_device_info intel_i915gm_info = { +- .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, ++ .is_i9xx = 1, .is_mobile = 1, + .cursor_needs_physical = 1, + }; + const static struct intel_device_info intel_i945g_info = { + .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, + }; + const static struct intel_device_info intel_i945gm_info = { +- .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, ++ .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, + .has_hotplug = 1, .cursor_needs_physical = 1, + }; -From 6fd024893911dcb51b4a0aa71971db5ba38f7071 Mon Sep 17 00:00:00 2001 -From: Ben Hutchings -Date: Wed, 24 Mar 2010 03:36:31 +0000 -Subject: amd64-agp: Probe unknown AGP devices the right way +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -4683,7 +4683,7 @@ static void intel_init_display(struct dr + dev_priv->display.fbc_enabled = g4x_fbc_enabled; + dev_priv->display.enable_fbc = g4x_enable_fbc; + dev_priv->display.disable_fbc = g4x_disable_fbc; +- } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { ++ } else if (IS_I965GM(dev)) { + dev_priv->display.fbc_enabled = i8xx_fbc_enabled; + dev_priv->display.enable_fbc = i8xx_enable_fbc; + dev_priv->display.disable_fbc = i8xx_disable_fbc; +From 1918ad77f7f908ed67cf37c505c6ad4ac52f1ecf Mon Sep 17 00:00:00 2001 +From: Jesse Barnes +Date: Fri, 23 Apr 2010 09:32:23 -0700 +Subject: drm/i915: fix non-Ironlake 965 class crashes +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit -From: Ben Hutchings +From: Jesse Barnes -commit 6fd024893911dcb51b4a0aa71971db5ba38f7071 upstream. +commit 1918ad77f7f908ed67cf37c505c6ad4ac52f1ecf upstream. -The current initialisation code probes 'unsupported' AGP devices -simply by calling its own probe function. It does not lock these -devices or even check whether another driver is already bound to -them. +My PIPE_CONTROL fix (just sent via Eric's tree) was buggy; I was +testing a whole set of patches together and missed a conversion to the +new HAS_PIPE_CONTROL macro, which will cause breakage on non-Ironlake +965 class chips. Fortunately, the fix is trivial and has been tested. -We must use the device core to manage this. So if the specific -device id table didn't match anything and agp_try_unsupported=1, -switch the device id table and call driver_attach() again. +Be sure to use the HAS_PIPE_CONTROL macro in i915_get_gem_seqno, or +we'll end up reading the wrong graphics memory, likely causing hangs, +crashes, or worse. -Signed-off-by: Ben Hutchings -Signed-off-by: Dave Airlie +Reported-by: Zdenek Kabelac +Reported-by: Toralf Förster +Tested-by: Toralf Förster +Signed-off-by: Jesse Barnes +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/i915/i915_gem.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -1785,7 +1785,7 @@ i915_get_gem_seqno(struct drm_device *de + { + drm_i915_private_t *dev_priv = dev->dev_private; + +- if (IS_I965G(dev)) ++ if (HAS_PIPE_CONTROL(dev)) + return ((volatile u32 *)(dev_priv->seqno_page))[0]; + else + return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); +From e552eb7038a36d9b18860f525aa02875e313fe16 Mon Sep 17 00:00:00 2001 +From: Jesse Barnes +Date: Wed, 21 Apr 2010 11:39:23 -0700 +Subject: drm/i915: use PIPE_CONTROL instruction on Ironlake and Sandy Bridge + +From: Jesse Barnes + +commit e552eb7038a36d9b18860f525aa02875e313fe16 upstream. + +Since 965, the hardware has supported the PIPE_CONTROL command, which +provides fine grained GPU cache flushing control. On recent chipsets, +this instruction is required for reliable interrupt and sequence number +reporting in the driver. + +So add support for this instruction, including workarounds, on Ironlake +and Sandy Bridge hardware. + +https://bugs.freedesktop.org/show_bug.cgi?id=27108 + +Signed-off-by: Jesse Barnes +Tested-by: Chris Wilson +Signed-off-by: Eric Anholt Signed-off-by: Greg Kroah-Hartman --- - drivers/char/agp/amd64-agp.c | 27 +++++++++++++++------------ - 1 file changed, 15 insertions(+), 12 deletions(-) - ---- a/drivers/char/agp/amd64-agp.c -+++ b/drivers/char/agp/amd64-agp.c -@@ -499,6 +499,10 @@ static int __devinit agp_amd64_probe(str - u8 cap_ptr; - int err; - -+ /* The Highlander principle */ -+ if (agp_bridges_found) -+ return -ENODEV; + drivers/gpu/drm/i915/i915_drv.h | 4 + + drivers/gpu/drm/i915/i915_gem.c | 145 ++++++++++++++++++++++++++++++++++++---- + drivers/gpu/drm/i915/i915_irq.c | 8 +- + drivers/gpu/drm/i915/i915_reg.h | 11 +++ + 4 files changed, 152 insertions(+), 16 deletions(-) + +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -206,11 +206,14 @@ typedef struct drm_i915_private { + + drm_dma_handle_t *status_page_dmah; + void *hw_status_page; ++ void *seqno_page; + dma_addr_t dma_status_page; + uint32_t counter; + unsigned int status_gfx_addr; ++ unsigned int seqno_gfx_addr; + drm_local_map_t hws_map; + struct drm_gem_object *hws_obj; ++ struct drm_gem_object *seqno_obj; + struct drm_gem_object *pwrctx; + + struct resource mch_res; +@@ -1090,6 +1093,7 @@ extern int i915_wait_ring(struct drm_dev + + #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ + IS_GEN6(dev)) ++#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) + + #define PRIMARY_RINGBUFFER_SIZE (128*1024) + +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -1559,6 +1559,13 @@ i915_gem_object_move_to_inactive(struct + i915_verify_inactive(dev, __FILE__, __LINE__); + } + ++#define PIPE_CONTROL_FLUSH(addr) \ ++ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ ++ PIPE_CONTROL_DEPTH_STALL); \ ++ OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ ++ OUT_RING(0); \ ++ OUT_RING(0); \ + - cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); - if (!cap_ptr) - return -ENODEV; -@@ -562,6 +566,8 @@ static void __devexit agp_amd64_remove(s - amd64_aperture_sizes[bridge->aperture_size_idx].size); - agp_remove_bridge(bridge); - agp_put_bridge(bridge); + /** + * Creates a new sequence number, emitting a write of it to the status page + * plus an interrupt, which will trigger i915_user_interrupt_handler. +@@ -1593,13 +1600,47 @@ i915_add_request(struct drm_device *dev, + if (dev_priv->mm.next_gem_seqno == 0) + dev_priv->mm.next_gem_seqno++; + +- BEGIN_LP_RING(4); +- OUT_RING(MI_STORE_DWORD_INDEX); +- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); +- OUT_RING(seqno); ++ if (HAS_PIPE_CONTROL(dev)) { ++ u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; + +- OUT_RING(MI_USER_INTERRUPT); +- ADVANCE_LP_RING(); ++ /* ++ * Workaround qword write incoherence by flushing the ++ * PIPE_NOTIFY buffers out to memory before requesting ++ * an interrupt. ++ */ ++ BEGIN_LP_RING(32); ++ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | ++ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); ++ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); ++ OUT_RING(seqno); ++ OUT_RING(0); ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; /* write to separate cachelines */ ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ scratch_addr += 128; ++ PIPE_CONTROL_FLUSH(scratch_addr); ++ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | ++ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | ++ PIPE_CONTROL_NOTIFY); ++ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); ++ OUT_RING(seqno); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } else { ++ BEGIN_LP_RING(4); ++ OUT_RING(MI_STORE_DWORD_INDEX); ++ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); ++ OUT_RING(seqno); + -+ agp_bridges_found--; - } ++ OUT_RING(MI_USER_INTERRUPT); ++ ADVANCE_LP_RING(); ++ } - #ifdef CONFIG_PM -@@ -709,6 +715,11 @@ static struct pci_device_id agp_amd64_pc + DRM_DEBUG_DRIVER("%d\n", seqno); - MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); +@@ -1744,7 +1785,10 @@ i915_get_gem_seqno(struct drm_device *de + { + drm_i915_private_t *dev_priv = dev->dev_private; -+static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = { -+ { PCI_DEVICE_CLASS(0, 0) }, -+ { } -+}; -+ - static struct pci_driver agp_amd64_pci_driver = { - .name = "agpgart-amd64", - .id_table = agp_amd64_pci_table, -@@ -734,7 +745,6 @@ int __init agp_amd64_init(void) - return err; - - if (agp_bridges_found == 0) { -- struct pci_dev *dev; - if (!agp_try_unsupported && !agp_try_unsupported_boot) { - printk(KERN_INFO PFX "No supported AGP bridge found.\n"); - #ifdef MODULE -@@ -750,17 +760,10 @@ int __init agp_amd64_init(void) - return -ENODEV; - - /* Look for any AGP bridge */ -- dev = NULL; -- err = -ENODEV; -- for_each_pci_dev(dev) { -- if (!pci_find_capability(dev, PCI_CAP_ID_AGP)) -- continue; -- /* Only one bridge supported right now */ -- if (agp_amd64_probe(dev, NULL) == 0) { -- err = 0; -- break; -- } -- } -+ agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; -+ err = driver_attach(&agp_amd64_pci_driver.driver); -+ if (err == 0 && agp_bridges_found == 0) -+ err = -ENODEV; - } - return err; +- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); ++ if (IS_I965G(dev)) ++ return ((volatile u32 *)(dev_priv->seqno_page))[0]; ++ else ++ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); } -From c05556421742eb47f80301767653a4bcb19de9de Mon Sep 17 00:00:00 2001 -From: Ian Munsie -Date: Tue, 13 Apr 2010 18:37:33 +1000 -Subject: perf: Fix endianness argument compatibility with OPT_BOOLEAN() and introduce OPT_INCR() - -From: Ian Munsie - -commit c05556421742eb47f80301767653a4bcb19de9de upstream. - -Parsing an option from the command line with OPT_BOOLEAN on a -bool data type would not work on a big-endian machine due to the -manner in which the boolean was being cast into an int and -incremented. For example, running 'perf probe --list' on a -PowerPC machine would fail to properly set the list_events bool -and would therefore print out the usage information and -terminate. - -This patch makes OPT_BOOLEAN work as expected with a bool -datatype. For cases where the original OPT_BOOLEAN was -intentionally being used to increment an int each time it was -passed in on the command line, this patch introduces OPT_INCR -with the old behaviour of OPT_BOOLEAN (the verbose variable is -currently the only such example of this). - -I have reviewed every use of OPT_BOOLEAN to verify that a true -C99 bool was passed. Where integers were used, I verified that -they were only being used for boolean logic and changed them to -bools to ensure that they would not be mistakenly used as ints. -The major exception was the verbose variable which now uses -OPT_INCR instead of OPT_BOOLEAN. - -Signed-off-by: Ian Munsie -Acked-by: David S. Miller -Cc: -Cc: Git development list -Cc: Ian Munsie -Cc: Peter Zijlstra -Cc: Paul Mackerras -Cc: Arnaldo Carvalho de Melo -Cc: KOSAKI Motohiro -Cc: Hitoshi Mitake -Cc: Rusty Russell -Cc: Frederic Weisbecker -Cc: Eric B Munson -Cc: Valdis.Kletnieks@vt.edu -Cc: WANG Cong -Cc: Thiago Farina -Cc: Masami Hiramatsu -Cc: Xiao Guangrong -Cc: Jaswinder Singh Rajput -Cc: Arjan van de Ven -Cc: OGAWA Hirofumi -Cc: Mike Galbraith -Cc: Tom Zanussi -Cc: Anton Blanchard -Cc: John Kacur -Cc: Li Zefan -Cc: Steven Rostedt -LKML-Reference: <1271147857-11604-1-git-send-email-imunsie@au.ibm.com> -Signed-off-by: Ingo Molnar -Signed-off-by: Greg Kroah-Hartman - ---- - tools/perf/bench/mem-memcpy.c | 2 +- - tools/perf/bench/sched-messaging.c | 4 ++-- - tools/perf/builtin-annotate.c | 8 ++++---- - tools/perf/builtin-buildid-cache.c | 2 +- - tools/perf/builtin-buildid-list.c | 4 ++-- - tools/perf/builtin-diff.c | 4 ++-- - tools/perf/builtin-help.c | 2 +- - tools/perf/builtin-lock.c | 2 +- - tools/perf/builtin-probe.c | 2 +- - tools/perf/builtin-record.c | 24 ++++++++++++------------ - tools/perf/builtin-report.c | 6 +++--- - tools/perf/builtin-sched.c | 6 +++--- - tools/perf/builtin-stat.c | 10 +++++----- - tools/perf/builtin-timechart.c | 2 +- - tools/perf/builtin-top.c | 14 +++++++------- - tools/perf/builtin-trace.c | 2 +- - tools/perf/util/debug.c | 2 +- - tools/perf/util/debug.h | 3 ++- - tools/perf/util/parse-options.c | 6 ++++++ - tools/perf/util/parse-options.h | 4 +++- - tools/perf/util/trace-event-parse.c | 2 +- - tools/perf/util/trace-event.h | 3 ++- - 22 files changed, 62 insertions(+), 52 deletions(-) - ---- a/tools/perf/bench/mem-memcpy.c -+++ b/tools/perf/bench/mem-memcpy.c -@@ -24,7 +24,7 @@ - - static const char *length_str = "1MB"; - static const char *routine = "default"; --static int use_clock = 0; -+static bool use_clock = false; - static int clock_fd; - - static const struct option options[] = { ---- a/tools/perf/bench/sched-messaging.c -+++ b/tools/perf/bench/sched-messaging.c -@@ -31,9 +31,9 @@ - - #define DATASIZE 100 - --static int use_pipes = 0; -+static bool use_pipes = false; - static unsigned int loops = 100; --static unsigned int thread_mode = 0; -+static bool thread_mode = false; - static unsigned int num_groups = 10; - - struct sender_context { ---- a/tools/perf/builtin-annotate.c -+++ b/tools/perf/builtin-annotate.c -@@ -29,11 +29,11 @@ - - static char const *input_name = "perf.data"; - --static int force; -+static bool force; - --static int full_paths; -+static bool full_paths; - --static int print_line; -+static bool print_line; - - struct sym_hist { - u64 sum; -@@ -584,7 +584,7 @@ static const struct option options[] = { - OPT_STRING('s', "symbol", &sym_hist_filter, "symbol", - "symbol to annotate"), - OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), -- OPT_BOOLEAN('v', "verbose", &verbose, -+ OPT_INCR('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, - "dump raw trace in ASCII"), ---- a/tools/perf/builtin-buildid-cache.c -+++ b/tools/perf/builtin-buildid-cache.c -@@ -27,7 +27,7 @@ static const struct option buildid_cache - "file list", "file(s) to add"), - OPT_STRING('r', "remove", &remove_name_list_str, "file list", - "file(s) to remove"), -- OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose"), -+ OPT_INCR('v', "verbose", &verbose, "be more verbose"), - OPT_END() - }; ---- a/tools/perf/builtin-buildid-list.c -+++ b/tools/perf/builtin-buildid-list.c -@@ -16,7 +16,7 @@ - #include "util/symbol.h" - - static char const *input_name = "perf.data"; --static int force; -+static bool force; - static bool with_hits; - - static const char * const buildid_list_usage[] = { -@@ -29,7 +29,7 @@ static const struct option options[] = { - OPT_STRING('i', "input", &input_name, "file", - "input file name"), - OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), -- OPT_BOOLEAN('v', "verbose", &verbose, -+ OPT_INCR('v', "verbose", &verbose, - "be more verbose"), - OPT_END() - }; ---- a/tools/perf/builtin-diff.c -+++ b/tools/perf/builtin-diff.c -@@ -19,7 +19,7 @@ - static char const *input_old = "perf.data.old", - *input_new = "perf.data"; - static char diff__default_sort_order[] = "dso,symbol"; --static int force; -+static bool force; - static bool show_displacement; - - static int perf_session__add_hist_entry(struct perf_session *self, -@@ -188,7 +188,7 @@ static const char * const diff_usage[] = - }; - - static const struct option options[] = { -- OPT_BOOLEAN('v', "verbose", &verbose, -+ OPT_INCR('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('m', "displacement", &show_displacement, - "Show position displacement relative to baseline"), ---- a/tools/perf/builtin-help.c -+++ b/tools/perf/builtin-help.c -@@ -29,7 +29,7 @@ enum help_format { - HELP_FORMAT_WEB, - }; + /** +@@ -4576,6 +4620,49 @@ i915_gem_idle(struct drm_device *dev) + return 0; + } --static int show_all = 0; -+static bool show_all = false; - static enum help_format help_format = HELP_FORMAT_MAN; - static struct option builtin_help_options[] = { - OPT_BOOLEAN('a', "all", &show_all, "print all available commands"), ---- a/tools/perf/builtin-lock.c -+++ b/tools/perf/builtin-lock.c -@@ -744,7 +744,7 @@ static const char * const lock_usage[] = - - static const struct option lock_options[] = { - OPT_STRING('i', "input", &input_name, "file", "input file name"), -- OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), -+ OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), - OPT_END() - }; ---- a/tools/perf/builtin-probe.c -+++ b/tools/perf/builtin-probe.c -@@ -162,7 +162,7 @@ static const char * const probe_usage[] - }; ++/* ++ * 965+ support PIPE_CONTROL commands, which provide finer grained control ++ * over cache flushing. ++ */ ++static int ++i915_gem_init_pipe_control(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret; ++ ++ obj = drm_gem_object_alloc(dev, 4096); ++ if (obj == NULL) { ++ DRM_ERROR("Failed to allocate seqno page\n"); ++ ret = -ENOMEM; ++ goto err; ++ } ++ obj_priv = obj->driver_private; ++ obj_priv->agp_type = AGP_USER_CACHED_MEMORY; ++ ++ ret = i915_gem_object_pin(obj, 4096); ++ if (ret) ++ goto err_unref; ++ ++ dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; ++ dev_priv->seqno_page = kmap(obj_priv->pages[0]); ++ if (dev_priv->seqno_page == NULL) ++ goto err_unpin; ++ ++ dev_priv->seqno_obj = obj; ++ memset(dev_priv->seqno_page, 0, PAGE_SIZE); ++ ++ return 0; ++ ++err_unpin: ++ i915_gem_object_unpin(obj); ++err_unref: ++ drm_gem_object_unreference(obj); ++err: ++ return ret; ++} ++ + static int + i915_gem_init_hws(struct drm_device *dev) + { +@@ -4593,7 +4680,8 @@ i915_gem_init_hws(struct drm_device *dev + obj = drm_gem_object_alloc(dev, 4096); + if (obj == NULL) { + DRM_ERROR("Failed to allocate status page\n"); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto err; + } + obj_priv = obj->driver_private; + obj_priv->agp_type = AGP_USER_CACHED_MEMORY; +@@ -4601,7 +4689,7 @@ i915_gem_init_hws(struct drm_device *dev + ret = i915_gem_object_pin(obj, 4096); + if (ret != 0) { + drm_gem_object_unreference(obj); +- return ret; ++ goto err_unref; + } - static const struct option options[] = { -- OPT_BOOLEAN('v', "verbose", &verbose, -+ OPT_INCR('v', "verbose", &verbose, - "be more verbose (show parsed arguments, etc)"), - #ifndef NO_DWARF_SUPPORT - OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, ---- a/tools/perf/builtin-record.c -+++ b/tools/perf/builtin-record.c -@@ -39,19 +39,19 @@ static int output; - static const char *output_name = "perf.data"; - static int group = 0; - static unsigned int realtime_prio = 0; --static int raw_samples = 0; --static int system_wide = 0; -+static bool raw_samples = false; -+static bool system_wide = false; - static int profile_cpu = -1; - static pid_t target_pid = -1; - static pid_t child_pid = -1; --static int inherit = 1; --static int force = 0; --static int append_file = 0; --static int call_graph = 0; --static int inherit_stat = 0; --static int no_samples = 0; --static int sample_address = 0; --static int multiplex = 0; -+static bool inherit = true; -+static bool force = false; -+static bool append_file = false; -+static bool call_graph = false; -+static bool inherit_stat = false; -+static bool no_samples = false; -+static bool sample_address = false; -+static bool multiplex = false; - static int multiplex_fd = -1; - - static long samples = 0; -@@ -451,7 +451,7 @@ static int __cmd_record(int argc, const - rename(output_name, oldname); - } - } else { -- append_file = 0; -+ append_file = false; + dev_priv->status_gfx_addr = obj_priv->gtt_offset; +@@ -4610,10 +4698,16 @@ i915_gem_init_hws(struct drm_device *dev + if (dev_priv->hw_status_page == NULL) { + DRM_ERROR("Failed to map status page.\n"); + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); +- i915_gem_object_unpin(obj); +- drm_gem_object_unreference(obj); +- return -EINVAL; ++ ret = -EINVAL; ++ goto err_unpin; } ++ ++ if (HAS_PIPE_CONTROL(dev)) { ++ ret = i915_gem_init_pipe_control(dev); ++ if (ret) ++ goto err_unpin; ++ } ++ + dev_priv->hws_obj = obj; + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); +@@ -4621,6 +4715,30 @@ i915_gem_init_hws(struct drm_device *dev + DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); - flags = O_CREAT|O_RDWR; -@@ -676,7 +676,7 @@ static const struct option options[] = { - "number of mmap data pages"), - OPT_BOOLEAN('g', "call-graph", &call_graph, - "do call-graph (stack chain/backtrace) recording"), -- OPT_BOOLEAN('v', "verbose", &verbose, -+ OPT_INCR('v', "verbose", &verbose, - "be more verbose (show counter open errors, etc)"), - OPT_BOOLEAN('s', "stat", &inherit_stat, - "per thread counts"), ---- a/tools/perf/builtin-report.c -+++ b/tools/perf/builtin-report.c -@@ -33,11 +33,11 @@ - - static char const *input_name = "perf.data"; - --static int force; -+static bool force; - static bool hide_unresolved; - static bool dont_use_callchains; - --static int show_threads; -+static bool show_threads; - static struct perf_read_values show_threads_values; - - static char default_pretty_printing_style[] = "normal"; -@@ -400,7 +400,7 @@ static const char * const report_usage[] - static const struct option options[] = { - OPT_STRING('i', "input", &input_name, "file", - "input file name"), -- OPT_BOOLEAN('v', "verbose", &verbose, -+ OPT_INCR('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, - "dump raw trace in ASCII"), ---- a/tools/perf/builtin-sched.c -+++ b/tools/perf/builtin-sched.c -@@ -1790,7 +1790,7 @@ static const char * const sched_usage[] - static const struct option sched_options[] = { - OPT_STRING('i', "input", &input_name, "file", - "input file name"), -- OPT_BOOLEAN('v', "verbose", &verbose, -+ OPT_INCR('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, - "dump raw trace in ASCII"), -@@ -1805,7 +1805,7 @@ static const char * const latency_usage[ - static const struct option latency_options[] = { - OPT_STRING('s', "sort", &sort_order, "key[,key2...]", - "sort by key(s): runtime, switch, avg, max"), -- OPT_BOOLEAN('v', "verbose", &verbose, -+ OPT_INCR('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_INTEGER('C', "CPU", &profile_cpu, - "CPU to profile on"), -@@ -1822,7 +1822,7 @@ static const char * const replay_usage[] - static const struct option replay_options[] = { - OPT_INTEGER('r', "repeat", &replay_repeat, - "repeat the workload replay N times (-1: infinite)"), -- OPT_BOOLEAN('v', "verbose", &verbose, -+ OPT_INCR('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, - "dump raw trace in ASCII"), ---- a/tools/perf/builtin-stat.c -+++ b/tools/perf/builtin-stat.c -@@ -66,16 +66,16 @@ static struct perf_event_attr default_at + return 0; ++ ++err_unpin: ++ i915_gem_object_unpin(obj); ++err_unref: ++ drm_gem_object_unreference(obj); ++err: ++ return 0; ++} ++ ++static void ++i915_gem_cleanup_pipe_control(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ++ obj = dev_priv->seqno_obj; ++ obj_priv = obj->driver_private; ++ kunmap(obj_priv->pages[0]); ++ i915_gem_object_unpin(obj); ++ drm_gem_object_unreference(obj); ++ dev_priv->seqno_obj = NULL; ++ ++ dev_priv->seqno_page = NULL; + } - }; + static void +@@ -4644,6 +4762,9 @@ i915_gem_cleanup_hws(struct drm_device * + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); + dev_priv->hw_status_page = NULL; --static int system_wide = 0; -+static bool system_wide = false; - static unsigned int nr_cpus = 0; - static int run_idx = 0; - - static int run_count = 1; --static int inherit = 1; --static int scale = 1; -+static bool inherit = true; -+static bool scale = true; - static pid_t target_pid = -1; - static pid_t child_pid = -1; --static int null_run = 0; -+static bool null_run = false; - - static int fd[MAX_NR_CPUS][MAX_COUNTERS]; - -@@ -494,7 +494,7 @@ static const struct option options[] = { - "system-wide collection from all CPUs"), - OPT_BOOLEAN('c', "scale", &scale, - "scale/normalize counters"), -- OPT_BOOLEAN('v', "verbose", &verbose, -+ OPT_INCR('v', "verbose", &verbose, - "be more verbose (show counter open errors, etc)"), - OPT_INTEGER('r', "repeat", &run_count, - "repeat command and print average + stddev (max: 100)"), ---- a/tools/perf/builtin-timechart.c -+++ b/tools/perf/builtin-timechart.c -@@ -43,7 +43,7 @@ static u64 turbo_frequency; - - static u64 first_time, last_time; - --static int power_only; -+static bool power_only; - - - struct per_pid; ---- a/tools/perf/builtin-top.c -+++ b/tools/perf/builtin-top.c -@@ -57,7 +57,7 @@ - - static int fd[MAX_NR_CPUS][MAX_COUNTERS]; - --static int system_wide = 0; -+static bool system_wide = false; - - static int default_interval = 0; - -@@ -65,18 +65,18 @@ static int count_filter = 5; - static int print_entries; - - static int target_pid = -1; --static int inherit = 0; -+static bool inherit = false; - static int profile_cpu = -1; - static int nr_cpus = 0; - static unsigned int realtime_prio = 0; --static int group = 0; -+static bool group = false; - static unsigned int page_size; - static unsigned int mmap_pages = 16; - static int freq = 1000; /* 1 KHz */ - - static int delay_secs = 2; --static int zero = 0; --static int dump_symtab = 0; -+static bool zero = false; -+static bool dump_symtab = false; - - static bool hide_kernel_symbols = false; - static bool hide_user_symbols = false; -@@ -839,7 +839,7 @@ static void handle_keypress(int c) - display_weighted = ~display_weighted; - break; - case 'z': -- zero = ~zero; -+ zero = !zero; - break; - default: - break; -@@ -1296,7 +1296,7 @@ static const struct option options[] = { - "display this many functions"), - OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols, - "hide user symbols"), -- OPT_BOOLEAN('v', "verbose", &verbose, -+ OPT_INCR('v', "verbose", &verbose, - "be more verbose (show counter open errors, etc)"), - OPT_END() - }; ---- a/tools/perf/builtin-trace.c -+++ b/tools/perf/builtin-trace.c -@@ -505,7 +505,7 @@ static const char * const trace_usage[] - static const struct option options[] = { - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, - "dump raw trace in ASCII"), -- OPT_BOOLEAN('v', "verbose", &verbose, -+ OPT_INCR('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('L', "Latency", &latency_format, - "show latency attributes (irqs/preemption disabled, etc)"), ---- a/tools/perf/util/debug.c -+++ b/tools/perf/util/debug.c -@@ -12,7 +12,7 @@ - #include "util.h" - - int verbose = 0; --int dump_trace = 0; -+bool dump_trace = false; - - int eprintf(int level, const char *fmt, ...) - { ---- a/tools/perf/util/debug.h -+++ b/tools/perf/util/debug.h -@@ -2,10 +2,11 @@ - #ifndef __PERF_DEBUG_H - #define __PERF_DEBUG_H - -+#include - #include "event.h" - - extern int verbose; --extern int dump_trace; -+extern bool dump_trace; - - int eprintf(int level, - const char *fmt, ...) __attribute__((format(printf, 2, 3))); ---- a/tools/perf/util/parse-options.c -+++ b/tools/perf/util/parse-options.c -@@ -49,6 +49,7 @@ static int get_value(struct parse_opt_ct - break; - /* FALLTHROUGH */ - case OPTION_BOOLEAN: -+ case OPTION_INCR: - case OPTION_BIT: - case OPTION_SET_INT: - case OPTION_SET_PTR: -@@ -73,6 +74,10 @@ static int get_value(struct parse_opt_ct - return 0; - - case OPTION_BOOLEAN: -+ *(bool *)opt->value = unset ? false : true; -+ return 0; ++ if (HAS_PIPE_CONTROL(dev)) ++ i915_gem_cleanup_pipe_control(dev); + -+ case OPTION_INCR: - *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1; - return 0; - -@@ -478,6 +483,7 @@ int usage_with_options_internal(const ch - case OPTION_GROUP: - case OPTION_BIT: - case OPTION_BOOLEAN: -+ case OPTION_INCR: - case OPTION_SET_INT: - case OPTION_SET_PTR: - case OPTION_LONG: ---- a/tools/perf/util/parse-options.h -+++ b/tools/perf/util/parse-options.h -@@ -8,7 +8,8 @@ enum parse_opt_type { - OPTION_GROUP, - /* options with no arguments */ - OPTION_BIT, -- OPTION_BOOLEAN, /* _INCR would have been a better name */ -+ OPTION_BOOLEAN, -+ OPTION_INCR, - OPTION_SET_INT, - OPTION_SET_PTR, - /* options with arguments (usually) */ -@@ -95,6 +96,7 @@ struct option { - #define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) } - #define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (b) } - #define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = (v), .help = (h) } -+#define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = (v), .help = (h) } - #define OPT_SET_INT(s, l, v, h, i) { .type = OPTION_SET_INT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (i) } - #define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) } - #define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = (v), .help = (h) } ---- a/tools/perf/util/trace-event-parse.c -+++ b/tools/perf/util/trace-event-parse.c -@@ -40,7 +40,7 @@ int header_page_size_size; - int header_page_data_offset; - int header_page_data_size; - --int latency_format; -+bool latency_format; - - static char *input_buf; - static unsigned long long input_buf_ptr; ---- a/tools/perf/util/trace-event.h -+++ b/tools/perf/util/trace-event.h -@@ -1,6 +1,7 @@ - #ifndef __PERF_TRACE_EVENTS_H - #define __PERF_TRACE_EVENTS_H - -+#include - #include "parse-events.h" - - #define __unused __attribute__((unused)) -@@ -241,7 +242,7 @@ extern int header_page_size_size; - extern int header_page_data_offset; - extern int header_page_data_size; - --extern int latency_format; -+extern bool latency_format; - - int parse_header_page(char *buf, unsigned long size); - int trace_parse_common_type(void *data); -From 6e0032f0ae4440e75256bee11b163552cae21962 Mon Sep 17 00:00:00 2001 -From: Karsten Wiese -Date: Sat, 27 Mar 2010 22:48:33 +0100 -Subject: drm/i915: Don't touch PORT_HOTPLUG_EN in intel_dp_detect() - -From: Karsten Wiese - -commit 6e0032f0ae4440e75256bee11b163552cae21962 upstream. - -PORT_HOTPLUG_EN has allready been setup in i915_driver_irq_postinstall(), -when intel_dp_detect() runs. - -Delete the DP[BCD]_HOTPLUG_INT_EN defines, they are not referenced anymore. - -I found this while searching for a fix for - https://bugzilla.redhat.com/show_bug.cgi?id=528312 - -Signed-off-by: Karsten Wiese + /* Write high address into HWS_PGA when disabling. */ + I915_WRITE(HWS_PGA, 0x1ffff000); + } +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -297,7 +297,7 @@ irqreturn_t ironlake_irq_handler(struct + READ_BREADCRUMB(dev_priv); + } + +- if (gt_iir & GT_USER_INTERRUPT) { ++ if (gt_iir & GT_PIPE_NOTIFY) { + u32 seqno = i915_get_gem_seqno(dev); + dev_priv->mm.irq_gem_seqno = seqno; + trace_i915_gem_request_complete(dev, seqno); +@@ -738,7 +738,7 @@ void i915_user_irq_get(struct drm_device + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { + if (HAS_PCH_SPLIT(dev)) +- ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); ++ ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); + else + i915_enable_irq(dev_priv, I915_USER_INTERRUPT); + } +@@ -754,7 +754,7 @@ void i915_user_irq_put(struct drm_device + BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); + if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { + if (HAS_PCH_SPLIT(dev)) +- ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); ++ ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); + else + i915_disable_irq(dev_priv, I915_USER_INTERRUPT); + } +@@ -1034,7 +1034,7 @@ static int ironlake_irq_postinstall(stru + /* enable kind of interrupts always enabled */ + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; +- u32 render_mask = GT_USER_INTERRUPT; ++ u32 render_mask = GT_PIPE_NOTIFY; + u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | + SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; + +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -210,6 +210,16 @@ + #define ASYNC_FLIP (1<<22) + #define DISPLAY_PLANE_A (0<<20) + #define DISPLAY_PLANE_B (1<<20) ++#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2) ++#define PIPE_CONTROL_QW_WRITE (1<<14) ++#define PIPE_CONTROL_DEPTH_STALL (1<<13) ++#define PIPE_CONTROL_WC_FLUSH (1<<12) ++#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */ ++#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */ ++#define PIPE_CONTROL_ISP_DIS (1<<9) ++#define PIPE_CONTROL_NOTIFY (1<<8) ++#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ ++#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ + + /* + * Fence registers +@@ -2111,6 +2121,7 @@ + #define DEIER 0x4400c + + /* GT interrupt */ ++#define GT_PIPE_NOTIFY (1 << 4) + #define GT_SYNC_STATUS (1 << 2) + #define GT_USER_INTERRUPT (1 << 0) + +From c36a2a6de59e4a141a68b7575de837d3b0bd96b3 Mon Sep 17 00:00:00 2001 +From: Daniel Vetter +Date: Sat, 17 Apr 2010 15:12:03 +0200 +Subject: drm/i915: fix tiling limits for i915 class hw v2 + +From: Daniel Vetter + +commit c36a2a6de59e4a141a68b7575de837d3b0bd96b3 upstream. + +Current code is definitely crap: Largest pitch allowed spills into +the TILING_Y bit of the fence registers ... :( + +I've rewritten the limits check under the assumption that 3rd gen hw +has a 3d pitch limit of 8kb (like 2nd gen). This is supported by an +otherwise totally misleading XXX comment. + +This bug mostly resulted in tiling-corrupted pixmaps because the kernel +allowed too wide buffers to be tiled. Bug brought to the light by the +xf86-video-intel 2.11 release because that unconditionally enabled +tiling for pixmaps, relying on the kernel to check things. Tiling for +the framebuffer was not affected because the ddx does some additional +checks there ensure the buffer is within hw-limits. + +v2: Instead of computing the value that would be written into the +hw fence registers and then checking the limits simply check whether +the stride is above the 8kb limit. To better document the hw, add +some WARN_ONs in i915_write_fence_reg like I've done for the i830 +case (using the right limits). + +Signed-off-by: Daniel Vetter +Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=27449 +Tested-by: Alexander Lam Signed-off-by: Eric Anholt Signed-off-by: Greg Kroah-Hartman --- - drivers/gpu/drm/i915/intel_dp.c | 10 ---------- - 1 file changed, 10 deletions(-) + drivers/gpu/drm/i915/i915_gem.c | 6 ++++++ + drivers/gpu/drm/i915/i915_gem_tiling.c | 22 +++++++++------------- + drivers/gpu/drm/i915/i915_reg.h | 2 +- + 3 files changed, 16 insertions(+), 14 deletions(-) ---- a/drivers/gpu/drm/i915/intel_dp.c -+++ b/drivers/gpu/drm/i915/intel_dp.c -@@ -1180,16 +1180,6 @@ intel_dp_detect(struct drm_connector *co - if (HAS_PCH_SPLIT(dev)) - return ironlake_dp_detect(connector); - -- temp = I915_READ(PORT_HOTPLUG_EN); -- -- I915_WRITE(PORT_HOTPLUG_EN, -- temp | -- DPB_HOTPLUG_INT_EN | -- DPC_HOTPLUG_INT_EN | -- DPD_HOTPLUG_INT_EN); -- -- POSTING_READ(PORT_HOTPLUG_EN); +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -2316,6 +2316,12 @@ static void i915_write_fence_reg(struct + pitch_val = obj_priv->stride / tile_width; + pitch_val = ffs(pitch_val) - 1; + ++ if (obj_priv->tiling_mode == I915_TILING_Y && ++ HAS_128_BYTE_Y_TILING(dev)) ++ WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); ++ else ++ WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); ++ + val = obj_priv->gtt_offset; + if (obj_priv->tiling_mode == I915_TILING_Y) + val |= 1 << I830_FENCE_TILING_Y_SHIFT; +--- a/drivers/gpu/drm/i915/i915_gem_tiling.c ++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c +@@ -357,21 +357,17 @@ i915_tiling_ok(struct drm_device *dev, i + * reg, so dont bother to check the size */ + if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) + return false; +- } else if (IS_I9XX(dev)) { +- uint32_t pitch_val = ffs(stride / tile_width) - 1; - - switch (dp_priv->output_reg) { - case DP_B: - bit = DPB_HOTPLUG_INT_STATUS; -From 9908ff736adf261e749b4887486a32ffa209304c Mon Sep 17 00:00:00 2001 -From: Chris Wilson -Date: Sat, 15 May 2010 09:57:03 +0100 -Subject: drm/i915: Kill dangerous pending-flip debugging - -From: Chris Wilson - -commit 9908ff736adf261e749b4887486a32ffa209304c upstream. +- /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB) +- * instead of 4 (2KB) on 945s. +- */ +- if (pitch_val > I915_FENCE_MAX_PITCH_VAL || +- size > (I830_FENCE_MAX_SIZE_VAL << 20)) ++ } else if (IS_GEN3(dev) || IS_GEN2(dev)) { ++ if (stride > 8192) + return false; +- } else { +- uint32_t pitch_val = ffs(stride / tile_width) - 1; + +- if (pitch_val > I830_FENCE_MAX_PITCH_VAL || +- size > (I830_FENCE_MAX_SIZE_VAL << 19)) +- return false; ++ if (IS_GEN3(dev)) { ++ if (size > I830_FENCE_MAX_SIZE_VAL << 20) ++ return false; ++ } else { ++ if (size > I830_FENCE_MAX_SIZE_VAL << 19) ++ return false; ++ } + } + + /* 965+ just needs multiples of tile width */ +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -221,7 +221,7 @@ + #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) + #define I830_FENCE_PITCH_SHIFT 4 + #define I830_FENCE_REG_VALID (1<<0) +-#define I915_FENCE_MAX_PITCH_VAL 0x10 ++#define I915_FENCE_MAX_PITCH_VAL 4 + #define I830_FENCE_MAX_PITCH_VAL 6 + #define I830_FENCE_MAX_SIZE_VAL (1<<8) + +From bad720ff3e8e47a04bd88d9bbc8317e7d7e049d3 Mon Sep 17 00:00:00 2001 +From: Eric Anholt +Date: Thu, 22 Oct 2009 16:11:14 -0700 +Subject: drm/i915: Add initial bits for VGA modesetting bringup on Sandybridge. -We can, by virtue of a vblank interrupt firing in the middle of setting -up the unpin work (i.e. after we set the unpin_work field and before we -write to the ringbuffer) enter intel_finish_page_flip() prior to -receiving the pending flip notification. Therefore we can expect to hit -intel_finish_page_flip() under normal circumstances without a pending flip -and even without installing the pending_flip_obj. This is exacerbated by -aperture thrashing whilst binding the framebuffer +From: Eric Anholt -References: +commit bad720ff3e8e47a04bd88d9bbc8317e7d7e049d3 upstream. - Bug 28079 - "glresize" causes kernel panic in intel_finish_page_flip. - https://bugs.freedesktop.org/show_bug.cgi?id=28079 +[needed for stable as it's just a bunch of macros that other drm patches +need, it changes no code functionality besides adding support for a new +device type. - gregkh] -Reported-by: Nick Bowler -Signed-off-by: Chris Wilson -Cc: Jesse Barnes -Reviewed-by: Jesse Barnes Signed-off-by: Eric Anholt Signed-off-by: Greg Kroah-Hartman --- - drivers/gpu/drm/i915/intel_display.c | 6 ------ - 1 file changed, 6 deletions(-) - + drivers/gpu/drm/i915/i915_debugfs.c | 2 - + drivers/gpu/drm/i915/i915_dma.c | 16 ++++++--- + drivers/gpu/drm/i915/i915_drv.h | 26 ++++++++++++++- + drivers/gpu/drm/i915/i915_gem.c | 2 - + drivers/gpu/drm/i915/i915_gem_tiling.c | 2 - + drivers/gpu/drm/i915/i915_irq.c | 18 +++++----- + drivers/gpu/drm/i915/intel_bios.c | 3 + + drivers/gpu/drm/i915/intel_crt.c | 14 ++++---- + drivers/gpu/drm/i915/intel_display.c | 56 ++++++++++++++++----------------- + drivers/gpu/drm/i915/intel_lvds.c | 2 - + drivers/gpu/drm/i915/intel_overlay.c | 2 - + include/drm/drm_pciids.h | 1 + 12 files changed, 88 insertions(+), 56 deletions(-) + +--- a/drivers/gpu/drm/i915/i915_debugfs.c ++++ b/drivers/gpu/drm/i915/i915_debugfs.c +@@ -162,7 +162,7 @@ static int i915_interrupt_info(struct se + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + +- if (!IS_IRONLAKE(dev)) { ++ if (!HAS_PCH_SPLIT(dev)) { + seq_printf(m, "Interrupt enable: %08x\n", + I915_READ(IER)); + seq_printf(m, "Interrupt identity: %08x\n", +--- a/drivers/gpu/drm/i915/i915_dma.c ++++ b/drivers/gpu/drm/i915/i915_dma.c +@@ -978,15 +978,21 @@ static int i915_probe_agp(struct drm_dev + * Some of the preallocated space is taken by the GTT + * and popup. GTT is 1K per MB of aperture size, and popup is 4K. + */ +- if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev)) ++ if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) + overhead = 4096; + else + overhead = (*aperture_size / 1024) + 4096; + + switch (tmp & INTEL_GMCH_GMS_MASK) { + case INTEL_855_GMCH_GMS_DISABLED: +- DRM_ERROR("video memory is disabled\n"); +- return -1; ++ /* XXX: This is what my A1 silicon has. */ ++ if (IS_GEN6(dev)) { ++ stolen = 64 * 1024 * 1024; ++ } else { ++ DRM_ERROR("video memory is disabled\n"); ++ return -1; ++ } ++ break; + case INTEL_855_GMCH_GMS_STOLEN_1M: + stolen = 1 * 1024 * 1024; + break; +@@ -1064,7 +1070,7 @@ static unsigned long i915_gtt_to_phys(st + int gtt_offset, gtt_size; + + if (IS_I965G(dev)) { +- if (IS_G4X(dev) || IS_IRONLAKE(dev)) { ++ if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { + gtt_offset = 2*1024*1024; + gtt_size = 2*1024*1024; + } else { +@@ -1445,7 +1451,7 @@ int i915_driver_load(struct drm_device * + + dev->driver->get_vblank_counter = i915_get_vblank_counter; + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ +- if (IS_G4X(dev) || IS_IRONLAKE(dev)) { ++ if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { + dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ + dev->driver->get_vblank_counter = gm45_get_vblank_counter; + } +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -1026,7 +1026,7 @@ extern int i915_wait_ring(struct drm_dev + #define IS_845G(dev) ((dev)->pci_device == 0x2562) + #define IS_I85X(dev) ((dev)->pci_device == 0x3582) + #define IS_I865G(dev) ((dev)->pci_device == 0x2572) +-#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) ++#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) + #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) + #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) + #define IS_I945G(dev) ((dev)->pci_device == 0x2772) +@@ -1045,8 +1045,29 @@ extern int i915_wait_ring(struct drm_dev + #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) + #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) + ++#define IS_GEN3(dev) (IS_I915G(dev) || \ ++ IS_I915GM(dev) || \ ++ IS_I945G(dev) || \ ++ IS_I945GM(dev) || \ ++ IS_G33(dev) || \ ++ IS_PINEVIEW(dev)) ++#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \ ++ (dev)->pci_device == 0x2982 || \ ++ (dev)->pci_device == 0x2992 || \ ++ (dev)->pci_device == 0x29A2 || \ ++ (dev)->pci_device == 0x2A02 || \ ++ (dev)->pci_device == 0x2A12 || \ ++ (dev)->pci_device == 0x2E02 || \ ++ (dev)->pci_device == 0x2E12 || \ ++ (dev)->pci_device == 0x2E22 || \ ++ (dev)->pci_device == 0x2E32 || \ ++ (dev)->pci_device == 0x2A42 || \ ++ (dev)->pci_device == 0x2E42) ++ + #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) + ++#define IS_GEN6(dev) ((dev)->pci_device == 0x0102) ++ + /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte + * rows, which changed the alignment requirements and fence programming. + */ +@@ -1067,6 +1088,9 @@ extern int i915_wait_ring(struct drm_dev + #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) + #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) + ++#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ ++ IS_GEN6(dev)) ++ + #define PRIMARY_RINGBUFFER_SIZE (128*1024) + + #endif +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -1819,7 +1819,7 @@ i915_do_wait_request(struct drm_device * + return -EIO; + + if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + ier = I915_READ(DEIER) | I915_READ(GTIER); + else + ier = I915_READ(IER); +--- a/drivers/gpu/drm/i915/i915_gem_tiling.c ++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c +@@ -209,7 +209,7 @@ i915_gem_detect_bit_6_swizzle(struct drm + uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + bool need_disable; + +- if (IS_IRONLAKE(dev)) { ++ if (IS_IRONLAKE(dev) || IS_GEN6(dev)) { + /* On Ironlake whatever DRAM config, GPU always do + * same swizzling setup. + */ +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -576,7 +576,7 @@ irqreturn_t i915_driver_irq_handler(DRM_ + + atomic_inc(&dev_priv->irq_received); + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + return ironlake_irq_handler(dev); + + iir = I915_READ(IIR); +@@ -737,7 +737,7 @@ void i915_user_irq_get(struct drm_device + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); + else + i915_enable_irq(dev_priv, I915_USER_INTERRUPT); +@@ -753,7 +753,7 @@ void i915_user_irq_put(struct drm_device + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); + if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); + else + i915_disable_irq(dev_priv, I915_USER_INTERRUPT); +@@ -861,7 +861,7 @@ int i915_enable_vblank(struct drm_device + return -EINVAL; + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + ironlake_enable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); + else if (IS_I965G(dev)) +@@ -883,7 +883,7 @@ void i915_disable_vblank(struct drm_devi + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); + else +@@ -897,7 +897,7 @@ void i915_enable_interrupt (struct drm_d + { + struct drm_i915_private *dev_priv = dev->dev_private; + +- if (!IS_IRONLAKE(dev)) ++ if (!HAS_PCH_SPLIT(dev)) + opregion_enable_asle(dev); + dev_priv->irq_enabled = 1; + } +@@ -1076,7 +1076,7 @@ void i915_driver_irq_preinstall(struct d + INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); + INIT_WORK(&dev_priv->error_work, i915_error_work_func); + +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + ironlake_irq_preinstall(dev); + return; + } +@@ -1108,7 +1108,7 @@ int i915_driver_irq_postinstall(struct d + + dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + return ironlake_irq_postinstall(dev); + + /* Unmask the interrupts that we always want on. */ +@@ -1196,7 +1196,7 @@ void i915_driver_irq_uninstall(struct dr + + dev_priv->vblank_pipe = 0; + +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + ironlake_irq_uninstall(dev); + return; + } +--- a/drivers/gpu/drm/i915/intel_bios.c ++++ b/drivers/gpu/drm/i915/intel_bios.c +@@ -247,6 +247,7 @@ static void + parse_general_features(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) + { ++ struct drm_device *dev = dev_priv->dev; + struct bdb_general_features *general; + + /* Set sensible defaults in case we can't find the general block */ +@@ -263,7 +264,7 @@ parse_general_features(struct drm_i915_p + if (IS_I85X(dev_priv->dev)) + dev_priv->lvds_ssc_freq = + general->ssc_freq ? 66 : 48; +- else if (IS_IRONLAKE(dev_priv->dev)) ++ else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev)) + dev_priv->lvds_ssc_freq = + general->ssc_freq ? 100 : 120; + else +--- a/drivers/gpu/drm/i915/intel_crt.c ++++ b/drivers/gpu/drm/i915/intel_crt.c +@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_en + struct drm_i915_private *dev_priv = dev->dev_private; + u32 temp, reg; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + reg = PCH_ADPA; + else + reg = ADPA; +@@ -113,7 +113,7 @@ static void intel_crt_mode_set(struct dr + else + dpll_md_reg = DPLL_B_MD; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + adpa_reg = PCH_ADPA; + else + adpa_reg = ADPA; +@@ -122,7 +122,7 @@ static void intel_crt_mode_set(struct dr + * Disable separate mode multiplier used when cloning SDVO to CRT + * XXX this needs to be adjusted when we really are cloning + */ +- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { ++ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + dpll_md = I915_READ(dpll_md_reg); + I915_WRITE(dpll_md_reg, + dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); +@@ -136,11 +136,11 @@ static void intel_crt_mode_set(struct dr + + if (intel_crtc->pipe == 0) { + adpa |= ADPA_PIPE_A_SELECT; +- if (!IS_IRONLAKE(dev)) ++ if (!HAS_PCH_SPLIT(dev)) + I915_WRITE(BCLRPAT_A, 0); + } else { + adpa |= ADPA_PIPE_B_SELECT; +- if (!IS_IRONLAKE(dev)) ++ if (!HAS_PCH_SPLIT(dev)) + I915_WRITE(BCLRPAT_B, 0); + } + +@@ -202,7 +202,7 @@ static bool intel_crt_detect_hotplug(str + u32 hotplug_en; + int i, tries = 0; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + return intel_ironlake_crt_detect_hotplug(connector); + + /* +@@ -524,7 +524,7 @@ void intel_crt_init(struct drm_device *d + &intel_output->enc); + + /* Set up the DDC bus. */ +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + i2c_reg = PCH_GPIOA; + else { + i2c_reg = GPIOA; --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c -@@ -4155,12 +4155,6 @@ void intel_finish_page_flip(struct drm_d - spin_lock_irqsave(&dev->event_lock, flags); - work = intel_crtc->unpin_work; - if (work == NULL || !work->pending) { -- if (work && !work->pending) { -- obj_priv = to_intel_bo(work->pending_flip_obj); -- DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", -- obj_priv, -- atomic_read(&obj_priv->pending_flip)); -- } - spin_unlock_irqrestore(&dev->event_lock, flags); - return; +@@ -232,7 +232,7 @@ struct intel_limit { + #define G4X_P2_DISPLAY_PORT_FAST 10 + #define G4X_P2_DISPLAY_PORT_LIMIT 0 + +-/* Ironlake */ ++/* Ironlake / Sandybridge */ + /* as we calculate clock using (register_value + 2) for + N/M1/M2, so here the range value for them is (actual_value-2). + */ +@@ -690,7 +690,7 @@ static const intel_limit_t *intel_limit( + struct drm_device *dev = crtc->dev; + const intel_limit_t *limit; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + limit = intel_ironlake_limit(crtc); + else if (IS_G4X(dev)) { + limit = intel_g4x_limit(crtc); +@@ -1366,7 +1366,7 @@ intel_pipe_set_base(struct drm_crtc *crt + dspcntr &= ~DISPPLANE_TILED; } -From ac0c6b5ad3b3b513e1057806d4b7627fcc0ecc27 Mon Sep 17 00:00:00 2001 -From: Chris Wilson -Date: Thu, 27 May 2010 13:18:18 +0100 -Subject: drm/i915: Rebind bo if currently bound with incorrect alignment. + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + /* must disable */ + dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; + +@@ -1427,7 +1427,7 @@ static void i915_disable_vga (struct drm + u8 sr1; + u32 vga_reg; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + vga_reg = CPU_VGACNTRL; + else + vga_reg = VGACNTRL; +@@ -2111,7 +2111,7 @@ static bool intel_crtc_mode_fixup(struct + struct drm_display_mode *adjusted_mode) + { + struct drm_device *dev = crtc->dev; +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + /* FDI link clock is fixed at 2.7G */ + if (mode->clock * 3 > 27000 * 4) + return MODE_CLOCK_HIGH; +@@ -2967,7 +2967,7 @@ static int intel_crtc_mode_set(struct dr + refclk / 1000); + } else if (IS_I9XX(dev)) { + refclk = 96000; +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + refclk = 120000; /* 120Mhz refclk */ + } else { + refclk = 48000; +@@ -3025,7 +3025,7 @@ static int intel_crtc_mode_set(struct dr + } + + /* FDI link */ +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + int lane, link_bw, bpp; + /* eDP doesn't require FDI link, so just set DP M/N + according to current link config */ +@@ -3102,7 +3102,7 @@ static int intel_crtc_mode_set(struct dr + * PCH B stepping, previous chipset stepping should be + * ignoring this setting. + */ +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + temp = I915_READ(PCH_DREF_CONTROL); + /* Always enable nonspread source */ + temp &= ~DREF_NONSPREAD_SOURCE_MASK; +@@ -3149,7 +3149,7 @@ static int intel_crtc_mode_set(struct dr + reduced_clock.m2; + } + +- if (!IS_IRONLAKE(dev)) ++ if (!HAS_PCH_SPLIT(dev)) + dpll = DPLL_VGA_MODE_DIS; + + if (IS_I9XX(dev)) { +@@ -3162,7 +3162,7 @@ static int intel_crtc_mode_set(struct dr + sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; +- else if (IS_IRONLAKE(dev)) ++ else if (HAS_PCH_SPLIT(dev)) + dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; + } + if (is_dp) +@@ -3174,7 +3174,7 @@ static int intel_crtc_mode_set(struct dr + else { + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; + /* also FPA1 */ +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; + if (IS_G4X(dev) && has_reduced_clock) + dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; +@@ -3193,7 +3193,7 @@ static int intel_crtc_mode_set(struct dr + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; + break; + } +- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) ++ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) + dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); + } else { + if (is_lvds) { +@@ -3227,7 +3227,7 @@ static int intel_crtc_mode_set(struct dr + + /* Ironlake's plane is forced to pipe, bit 24 is to + enable color space conversion */ +- if (!IS_IRONLAKE(dev)) { ++ if (!HAS_PCH_SPLIT(dev)) { + if (pipe == 0) + dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; + else +@@ -3254,14 +3254,14 @@ static int intel_crtc_mode_set(struct dr + + + /* Disable the panel fitter if it was on our pipe */ +- if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe) ++ if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) + I915_WRITE(PFIT_CONTROL, 0); + + DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); + drm_mode_debug_printmodeline(mode); + + /* assign to Ironlake registers */ +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + fp_reg = pch_fp_reg; + dpll_reg = pch_dpll_reg; + } +@@ -3282,7 +3282,7 @@ static int intel_crtc_mode_set(struct dr + if (is_lvds) { + u32 lvds; + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + lvds_reg = PCH_LVDS; + + lvds = I915_READ(lvds_reg); +@@ -3328,7 +3328,7 @@ static int intel_crtc_mode_set(struct dr + /* Wait for the clocks to stabilize. */ + udelay(150); + +- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { ++ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + if (is_sdvo) { + sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; + I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | +@@ -3375,14 +3375,14 @@ static int intel_crtc_mode_set(struct dr + /* pipesrc and dspsize control the size that is scaled from, which should + * always be the user's requested size. + */ +- if (!IS_IRONLAKE(dev)) { ++ if (!HAS_PCH_SPLIT(dev)) { + I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | + (mode->hdisplay - 1)); + I915_WRITE(dsppos_reg, 0); + } + I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); + +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); + I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); + I915_WRITE(link_m1_reg, m_n.link_m); +@@ -3403,7 +3403,7 @@ static int intel_crtc_mode_set(struct dr + + intel_wait_for_vblank(dev); + +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + /* enable address swizzle for tiling buffer */ + temp = I915_READ(DISP_ARB_CTL); + I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); +@@ -3438,7 +3438,7 @@ void intel_crtc_load_lut(struct drm_crtc + return; + + /* use legacy palette for Ironlake */ +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : + LGC_PALETTE_B; + +@@ -3922,7 +3922,7 @@ static void intel_increase_pllclock(stru + int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; + int dpll = I915_READ(dpll_reg); + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + return; + + if (!dev_priv->lvds_downclock_avail) +@@ -3961,7 +3961,7 @@ static void intel_decrease_pllclock(stru + int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; + int dpll = I915_READ(dpll_reg); + +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + return; + + if (!dev_priv->lvds_downclock_avail) +@@ -4382,7 +4382,7 @@ static void intel_setup_outputs(struct d + if (IS_MOBILE(dev) && !IS_I830(dev)) + intel_lvds_init(dev); + +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + int found; + + if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) +@@ -4451,7 +4451,7 @@ static void intel_setup_outputs(struct d + DRM_DEBUG_KMS("probing DP_D\n"); + intel_dp_init(dev, DP_D); + } +- } else if (IS_I8XX(dev)) ++ } else if (IS_GEN2(dev)) + intel_dvo_init(dev); + + if (SUPPORTS_TV(dev)) +@@ -4599,7 +4599,7 @@ void intel_init_clock_gating(struct drm_ + * Disable clock gating reported to work incorrectly according to the + * specs, but enable as much else as we can. + */ +- if (IS_IRONLAKE(dev)) { ++ if (HAS_PCH_SPLIT(dev)) { + return; + } else if (IS_G4X(dev)) { + uint32_t dspclk_gate; +@@ -4672,7 +4672,7 @@ static void intel_init_display(struct dr + struct drm_i915_private *dev_priv = dev->dev_private; + + /* We always want a DPMS function */ +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + dev_priv->display.dpms = ironlake_crtc_dpms; + else + dev_priv->display.dpms = i9xx_crtc_dpms; +@@ -4715,7 +4715,7 @@ static void intel_init_display(struct dr + i830_get_display_clock_speed; + + /* For FIFO watermark updates */ +- if (IS_IRONLAKE(dev)) ++ if (HAS_PCH_SPLIT(dev)) + dev_priv->display.update_wm = NULL; + else if (IS_G4X(dev)) + dev_priv->display.update_wm = g4x_update_wm; +--- a/drivers/gpu/drm/i915/intel_lvds.c ++++ b/drivers/gpu/drm/i915/intel_lvds.c +@@ -661,7 +661,7 @@ static enum drm_connector_status intel_l + /* ACPI lid methods were generally unreliable in this generation, so + * don't even bother. + */ +- if (IS_I8XX(dev)) ++ if (IS_GEN2(dev)) + return connector_status_connected; + + if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) +--- a/drivers/gpu/drm/i915/intel_overlay.c ++++ b/drivers/gpu/drm/i915/intel_overlay.c +@@ -172,7 +172,7 @@ struct overlay_registers { + #define OFC_UPDATE 0x1 + + #define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) +-#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev)) ++#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev)) + + + static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) +From 79b9517a33a283c5d9db875c263670ed1e055f7e Mon Sep 17 00:00:00 2001 +From: Dave Airlie +Date: Mon, 19 Apr 2010 17:54:31 +1000 +Subject: drm/radeon/kms: add FireMV 2400 PCI ID. -From: Chris Wilson +From: Dave Airlie -commit ac0c6b5ad3b3b513e1057806d4b7627fcc0ecc27 upstream. +commit 79b9517a33a283c5d9db875c263670ed1e055f7e upstream. -Whilst pinning the buffer, check that that its current alignment -matches the requested alignment. If it does not, rebind. +This is an M24/X600 chip. -This should clear up any final render errors whilst resuming, -for reference: +From RH# 581927 - Bug 27070 - [i915] Page table errors with empty ringbuffer - https://bugs.freedesktop.org/show_bug.cgi?id=27070 +Signed-off-by: Dave Airlie +Signed-off-by: Greg Kroah-Hartman - Bug 15502 - render error detected, EIR: 0x00000010 - https://bugzilla.kernel.org/show_bug.cgi?id=15502 +--- + include/drm/drm_pciids.h | 1 + + 1 file changed, 1 insertion(+) - Bug 13844 - i915 error: "render error detected" - https://bugzilla.kernel.org/show_bug.cgi?id=13844 +--- a/include/drm/drm_pciids.h ++++ b/include/drm/drm_pciids.h +@@ -6,6 +6,7 @@ + {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ + {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ +From 30f69f3fb20bd719b5e1bf879339914063d38f47 Mon Sep 17 00:00:00 2001 +From: Jerome Glisse +Date: Fri, 16 Apr 2010 18:46:35 +0200 +Subject: drm/radeon/kms: fix rs600 tlb flush -Signed-off-by: Chris Wilson -Signed-off-by: Eric Anholt +From: Jerome Glisse + +commit 30f69f3fb20bd719b5e1bf879339914063d38f47 upstream. + +Typo in in flush leaded to no flush of the RS600 tlb which +ultimately leaded to massive system ram corruption, with +this patch everythings seems to work properly. + +Signed-off-by: Jerome Glisse +Signed-off-by: Dave Airlie Signed-off-by: Greg Kroah-Hartman --- - drivers/gpu/drm/i915/i915_gem.c | 11 +++++++++++ - 1 file changed, 11 insertions(+) + drivers/gpu/drm/radeon/rs600.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) ---- a/drivers/gpu/drm/i915/i915_gem.c -+++ b/drivers/gpu/drm/i915/i915_gem.c -@@ -4239,6 +4239,17 @@ i915_gem_object_pin(struct drm_gem_objec - int ret; +--- a/drivers/gpu/drm/radeon/rs600.c ++++ b/drivers/gpu/drm/radeon/rs600.c +@@ -175,7 +175,7 @@ void rs600_gart_tlb_flush(struct radeon_ + WREG32_MC(R_000100_MC_PT0_CNTL, tmp); - i915_verify_inactive(dev, __FILE__, __LINE__); -+ -+ if (obj_priv->gtt_space != NULL) { -+ if (alignment == 0) -+ alignment = i915_gem_get_gtt_alignment(obj); -+ if (obj_priv->gtt_offset & (alignment - 1)) { -+ ret = i915_gem_object_unbind(obj); -+ if (ret) -+ return ret; -+ } -+ } -+ - if (obj_priv->gtt_space == NULL) { - ret = i915_gem_object_bind_to_gtt(obj, alignment); - if (ret) -From cf22f20ade30f8c03955324aaf27b1049e182600 Mon Sep 17 00:00:00 2001 -From: Dave Airlie -Date: Sat, 29 May 2010 06:50:37 +1000 -Subject: drm/radeon: fix the r100/r200 ums block 0 page fix + tmp = RREG32_MC(R_000100_MC_PT0_CNTL); +- tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); ++ tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); + WREG32_MC(R_000100_MC_PT0_CNTL, tmp); + + tmp = RREG32_MC(R_000100_MC_PT0_CNTL); +From ba1163de2f74d624e7b0e530c4104c98ede0045a Mon Sep 17 00:00:00 2001 +From: Adam Jackson +Date: Tue, 6 Apr 2010 16:11:00 +0000 +Subject: drm/edid/quirks: Envision EN2028 -From: Dave Airlie +From: Adam Jackson -commit cf22f20ade30f8c03955324aaf27b1049e182600 upstream. +commit ba1163de2f74d624e7b0e530c4104c98ede0045a upstream. -airlied -> brown paper bag. +Claims 1280x1024 preferred, physically 1600x1200 -I blame Hi-5 or the Wiggles for lowering my IQ, move the fix inside some -brackets instead of breaking everything in site. +cf. http://bugzilla.redhat.com/530399 +Signed-off-by: Adam Jackson Signed-off-by: Dave Airlie Signed-off-by: Greg Kroah-Hartman --- - drivers/gpu/drm/radeon/radeon_state.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) + drivers/gpu/drm/drm_edid.c | 2 ++ + 1 file changed, 2 insertions(+) ---- a/drivers/gpu/drm/radeon/radeon_state.c -+++ b/drivers/gpu/drm/radeon/radeon_state.c -@@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(str - flags |= RADEON_FRONT; - } - if (flags & (RADEON_DEPTH|RADEON_STENCIL)) { -- if (!dev_priv->have_z_offset) -+ if (!dev_priv->have_z_offset) { - printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n"); -- flags &= ~(RADEON_DEPTH | RADEON_STENCIL); -+ flags &= ~(RADEON_DEPTH | RADEON_STENCIL); -+ } - } +--- a/drivers/gpu/drm/drm_edid.c ++++ b/drivers/gpu/drm/drm_edid.c +@@ -85,6 +85,8 @@ static struct edid_quirk { - if (flags & (RADEON_FRONT | RADEON_BACK)) { -From 10b06122afcc78468bd1d009633cb71e528acdc5 Mon Sep 17 00:00:00 2001 -From: Jerome Glisse -Date: Fri, 21 May 2010 18:48:54 +0200 -Subject: drm/radeon/kms: release AGP bridge at suspend - -From: Jerome Glisse + /* Envision Peripherals, Inc. EN-7100e */ + { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, ++ /* Envision EN2028 */ ++ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 }, + + /* Funai Electronics PM36B */ + { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | +From da58405860b992d2bb21ebae5d685fe3204dd3f0 Mon Sep 17 00:00:00 2001 +From: Chris Wilson +Date: Thu, 18 Mar 2010 11:56:54 +0000 +Subject: drm: Return ENODEV if the inode mapping changes -commit 10b06122afcc78468bd1d009633cb71e528acdc5 upstream. +From: Chris Wilson -I think it's good to release the AGP bridge at suspend -and reacquire it at resume. Also fix : -https://bugzilla.kernel.org/show_bug.cgi?id=15969 +commit da58405860b992d2bb21ebae5d685fe3204dd3f0 upstream. + +Replace a BUG_ON with an error code in the event that the inode mapping +changes between calls to drm_open. This may happen for instance if udev +is loaded subsequent to the original opening of the device: + +[ 644.291870] kernel BUG at drivers/gpu/drm/drm_fops.c:146! +[ 644.291876] invalid opcode: 0000 [#1] SMP +[ 644.291882] last sysfs file: /sys/kernel/uevent_seqnum +[ 644.291888] +[ 644.291895] Pid: 7276, comm: lt-cairo-test-s Not tainted 2.6.34-rc1 #2 N150/N210/N220 /N150/N210/N220 +[ 644.291903] EIP: 0060:[] EFLAGS: 00210283 CPU: 0 +[ 644.291912] EIP is at drm_open+0x4b1/0x4e2 +[ 644.291918] EAX: f72d8d18 EBX: f790a400 ECX: f73176b8 EDX: 00000000 +[ 644.291923] ESI: f790a414 EDI: f790a414 EBP: f647ae20 ESP: f647adfc +[ 644.291929] DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068 +[ 644.291937] Process lt-cairo-test-s (pid: 7276, ti=f647a000 task=f73f5c80 task.ti=f647a000) +[ 644.291941] Stack: +[ 644.291945] 00000000 f7bb7400 00000080 f6451100 f73176b8 f6479214 f6451100 f73176b8 +[ 644.291957] <0> c1297ce0 f647ae34 c11c6c04 f73176b8 f7949800 00000000 f647ae54 c1080ac5 +[ 644.291969] <0> f7949800 f6451100 00000000 f6451100 f73176b8 f6452780 f647ae70 c107d1e6 +[ 644.291982] Call Trace: +[ 644.291991] [] ? drm_stub_open+0x8a/0xb8 +[ 644.292000] [] ? chrdev_open+0xef/0x106 +[ 644.292008] [] ? __dentry_open+0xd4/0x1a6 +[ 644.292015] [] ? nameidata_to_filp+0x31/0x45 +[ 644.292022] [] ? chrdev_open+0x0/0x106 +[ 644.292030] [] ? do_last+0x346/0x423 +[ 644.292037] [] ? do_filp_open+0x190/0x415 +[ 644.292046] [] ? handle_mm_fault+0x214/0x710 +[ 644.292053] [] ? do_sys_open+0x4d/0xe9 +[ 644.292061] [] ? do_page_fault+0x211/0x23f +[ 644.292068] [] ? sys_open+0x23/0x2b +[ 644.292075] [] ? sysenter_do_call+0x12/0x26 +[ 644.292079] Code: 89 f0 89 55 dc e8 8d 96 0a 00 8b 45 e0 8b 55 dc 83 78 04 01 75 28 8b 83 18 02 00 00 85 c0 74 0f 8b 4d ec 3b 81 ac 00 00 00 74 13 <0f> 0b eb fe 8b 4d ec 8b 81 ac 00 00 00 89 83 18 02 00 00 89 f0 +[ 644.292143] EIP: [] drm_open+0x4b1/0x4e2 SS:ESP 0068:f647adfc +[ 644.292175] ---[ end trace 2ddd476af89a60fa ]--- -Signed-off-by: Jerome Glisse +Signed-off-by: Chris Wilson Signed-off-by: Dave Airlie Signed-off-by: Greg Kroah-Hartman --- - drivers/gpu/drm/radeon/radeon.h | 1 + - drivers/gpu/drm/radeon/radeon_agp.c | 5 +++++ - drivers/gpu/drm/radeon/radeon_device.c | 2 ++ - 3 files changed, 8 insertions(+) - ---- a/drivers/gpu/drm/radeon/radeon.h -+++ b/drivers/gpu/drm/radeon/radeon.h -@@ -566,6 +566,7 @@ typedef int (*radeon_packet3_check_t)(st - */ - int radeon_agp_init(struct radeon_device *rdev); - void radeon_agp_resume(struct radeon_device *rdev); -+void radeon_agp_suspend(struct radeon_device *rdev); - void radeon_agp_fini(struct radeon_device *rdev); - - ---- a/drivers/gpu/drm/radeon/radeon_agp.c -+++ b/drivers/gpu/drm/radeon/radeon_agp.c -@@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_devic + drivers/gpu/drm/drm_fops.c | 16 +++++++++------- + 1 file changed, 9 insertions(+), 7 deletions(-) + +--- a/drivers/gpu/drm/drm_fops.c ++++ b/drivers/gpu/drm/drm_fops.c +@@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct + spin_unlock(&dev->count_lock); } - #endif - } -+ -+void radeon_agp_suspend(struct radeon_device *rdev) -+{ -+ radeon_agp_fini(rdev); -+} ---- a/drivers/gpu/drm/radeon/radeon_device.c -+++ b/drivers/gpu/drm/radeon/radeon_device.c -@@ -748,6 +748,8 @@ int radeon_suspend_kms(struct drm_device - /* evict remaining vram memory */ - radeon_bo_evict_vram(rdev); + out: +- mutex_lock(&dev->struct_mutex); +- if (minor->type == DRM_MINOR_LEGACY) { +- BUG_ON((dev->dev_mapping != NULL) && +- (dev->dev_mapping != inode->i_mapping)); +- if (dev->dev_mapping == NULL) +- dev->dev_mapping = inode->i_mapping; ++ if (!retcode) { ++ mutex_lock(&dev->struct_mutex); ++ if (minor->type == DRM_MINOR_LEGACY) { ++ if (dev->dev_mapping == NULL) ++ dev->dev_mapping = inode->i_mapping; ++ else if (dev->dev_mapping != inode->i_mapping) ++ retcode = -ENODEV; ++ } ++ mutex_unlock(&dev->struct_mutex); + } +- mutex_unlock(&dev->struct_mutex); -+ radeon_agp_suspend(rdev); -+ - pci_save_state(dev->pdev); - if (state.event == PM_EVENT_SUSPEND) { - /* Shut down the device */ -From 1ff26a3604d0292988d4cade0e49ba9918dbfd46 Mon Sep 17 00:00:00 2001 -From: Alex Deucher -Date: Tue, 18 May 2010 00:23:15 -0400 -Subject: drm/radeon/kms/atom: fix typo in LVDS panel info parsing + return retcode; + } +From 725398322d05486109375fbb85c3404108881e17 Mon Sep 17 00:00:00 2001 +From: Zhao Yakui +Date: Thu, 4 Mar 2010 08:25:55 +0000 +Subject: drm: remove the EDID blob stored in the EDID property when it is disconnected -From: Alex Deucher +From: Zhao Yakui -commit 1ff26a3604d0292988d4cade0e49ba9918dbfd46 upstream. +commit 725398322d05486109375fbb85c3404108881e17 upstream. -Fixes LVDS issues on some laptops; notably laptops with -2048x1536 panels. +Now the EDID property will be updated when the corresponding EDID can be +obtained from the external display device. But after the external device +is plugged-out, the EDID property is not updated. In such case we still +get the corresponding EDID property although it is already detected as +disconnected. -Signed-off-by: Alex Deucher -Signed-off-by: Dave Airlie -Signed-off-by: Greg Kroah-Hartman +https://bugs.freedesktop.org/show_bug.cgi?id=26743 ---- - drivers/gpu/drm/radeon/radeon_atombios.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/gpu/drm/radeon/radeon_atombios.c -+++ b/drivers/gpu/drm/radeon/radeon_atombios.c -@@ -1173,7 +1173,7 @@ struct radeon_encoder_atom_dig *radeon_a - lvds->native_mode.vtotal = lvds->native_mode.vdisplay + - le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); - lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + -- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); -+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); - lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + - le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); - lvds->panel_pwr_delay = -From 2bfcc0fc698d550689ef020c73b2d977b73e728c Mon Sep 17 00:00:00 2001 -From: Alex Deucher -Date: Tue, 18 May 2010 19:26:46 -0400 -Subject: drm/radeon/kms: reset ddc_bus in object header parsing - -From: Alex Deucher - -commit 2bfcc0fc698d550689ef020c73b2d977b73e728c upstream. - -Some LVDS connectors don't have a ddc bus, so reset the -ddc bus to invalid before parsing the next connector -to avoid using stale ddc bus data. Should fix -fdo bug 28164. - -Signed-off-by: Alex Deucher +Signed-off-by: Zhao Yakui +Signed-off-by: Zhenyu Wang Signed-off-by: Dave Airlie Signed-off-by: Greg Kroah-Hartman --- - drivers/gpu/drm/radeon/radeon_atombios.c | 5 ++--- - 1 file changed, 2 insertions(+), 3 deletions(-) - ---- a/drivers/gpu/drm/radeon/radeon_atombios.c -+++ b/drivers/gpu/drm/radeon/radeon_atombios.c -@@ -530,6 +530,8 @@ bool radeon_get_atom_connector_info_from - } - - /* look up gpio for ddc, hpd */ -+ ddc_bus.valid = false; -+ hpd.hpd = RADEON_HPD_NONE; - if ((le16_to_cpu(path->usDeviceTag) & - (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { - for (j = 0; j < con_obj->ucNumberOfObjects; j++) { -@@ -585,9 +587,6 @@ bool radeon_get_atom_connector_info_from - break; - } - } -- } else { -- hpd.hpd = RADEON_HPD_NONE; -- ddc_bus.valid = false; - } - - /* needed for aux chan transactions */ -From 61dd98fad58f945ed720ba132681acb58fcee015 Mon Sep 17 00:00:00 2001 -From: Adam Jackson -Date: Thu, 13 May 2010 14:55:28 -0400 -Subject: drm/edid: Fix 1024x768@85Hz + drivers/gpu/drm/drm_crtc_helper.c | 1 + + 1 file changed, 1 insertion(+) -From: Adam Jackson +--- a/drivers/gpu/drm/drm_crtc_helper.c ++++ b/drivers/gpu/drm/drm_crtc_helper.c +@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_mo + if (connector->status == connector_status_disconnected) { + DRM_DEBUG_KMS("%s is disconnected\n", + drm_get_connector_name(connector)); ++ drm_mode_connector_update_edid_property(connector, NULL); + goto prune; + } + +From 44fef22416886a04d432043f741a6faf2c6ffefd Mon Sep 17 00:00:00 2001 +From: Ben Skeggs +Date: Thu, 18 Feb 2010 09:12:09 +1000 +Subject: drm/edid: allow certain bogus edids to hit a fixup path rather than fail -commit 61dd98fad58f945ed720ba132681acb58fcee015 upstream. +From: Ben Skeggs -Having hsync both start and end on pixel 1072 ain't gonna work very -well. Matches the X server's list. +commit 44fef22416886a04d432043f741a6faf2c6ffefd upstream. -Signed-off-by: Adam Jackson -Tested-By: Michael Tokarev +Signed-off-by: Ben Skeggs Signed-off-by: Dave Airlie Signed-off-by: Greg Kroah-Hartman --- - drivers/gpu/drm/drm_edid.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) + drivers/gpu/drm/drm_edid.c | 9 --------- + 1 file changed, 9 deletions(-) --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c -@@ -335,7 +335,7 @@ static struct drm_display_mode drm_dmt_m - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1024x768@85Hz */ - { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, -- 1072, 1376, 0, 768, 769, 772, 808, 0, -+ 1168, 1376, 0, 768, 769, 772, 808, 0, - DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - /* 1152x864@75Hz */ - { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, -From 45737447ed160faaba036c0709226bf9057f7b72 Mon Sep 17 00:00:00 2001 -From: Alex Deucher -Date: Thu, 20 May 2010 11:26:11 -0400 -Subject: drm/radeon/kms: don't default display priority to high on rs4xx +@@ -707,15 +707,6 @@ static struct drm_display_mode *drm_mode + mode->vsync_end = mode->vsync_start + vsync_pulse_width; + mode->vtotal = mode->vdisplay + vblank; + +- /* perform the basic check for the detailed timing */ +- if (mode->hsync_end > mode->htotal || +- mode->vsync_end > mode->vtotal) { +- drm_mode_destroy(dev, mode); +- DRM_DEBUG_KMS("Incorrect detailed timing. " +- "Sync is beyond the blank.\n"); +- return NULL; +- } +- + /* Some EDIDs have bogus h/vtotal values */ + if (mode->hsync_end > mode->htotal) + mode->htotal = mode->hsync_end + 1; +From 0725e95ea56698774e893edb7e7276b1d6890954 Mon Sep 17 00:00:00 2001 +From: Bernhard Rosenkraenzer +Date: Wed, 10 Mar 2010 12:36:43 +0100 +Subject: USB: qcserial: add new device ids -From: Alex Deucher +From: Bernhard Rosenkraenzer -commit 45737447ed160faaba036c0709226bf9057f7b72 upstream. +commit 0725e95ea56698774e893edb7e7276b1d6890954 upstream. -Seems to cause issues with the sound hardware. Fixes kernel -bug 15982: -https://bugzilla.kernel.org/show_bug.cgi?id=15982 +This patch adds various USB device IDs for Gobi 2000 devices, as found in the +drivers available at https://www.codeaurora.org/wiki/GOBI_Releases -Signed-off-by: Alex Deucher -Signed-off-by: Dave Airlie +Signed-off-by: Bernhard Rosenkraenzer Signed-off-by: Greg Kroah-Hartman --- - drivers/gpu/drm/radeon/radeon_display.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - ---- a/drivers/gpu/drm/radeon/radeon_display.c -+++ b/drivers/gpu/drm/radeon/radeon_display.c -@@ -978,8 +978,11 @@ void radeon_update_display_priority(stru - /* set display priority to high for r3xx, rv515 chips - * this avoids flickering due to underflow to the - * display controllers during heavy acceleration. -+ * Don't force high on rs4xx igp chips as it seems to -+ * affect the sound card. See kernel bug 15982. - */ -- if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) -+ if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) && -+ !(rdev->flags & RADEON_IS_IGP)) - rdev->disp_priority = 2; - else - rdev->disp_priority = 0; -From 654fc6073f68efa3b6c466825749e73e7fbb92cd Mon Sep 17 00:00:00 2001 -From: Chris Wilson -Date: Thu, 27 May 2010 13:18:21 +0100 -Subject: drm/i915: Reject bind_to_gtt() early if object > aperture + drivers/usb/serial/qcserial.c | 29 +++++++++++++++++++++++++++++ + 1 file changed, 29 insertions(+) + +--- a/drivers/usb/serial/qcserial.c ++++ b/drivers/usb/serial/qcserial.c +@@ -47,6 +47,35 @@ static struct usb_device_id id_table[] = + {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ + {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ + {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ ++ {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */ ++ {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ ++ {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */ ++ {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ ++ {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */ ++ {USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ ++ {USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */ ++ {USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ ++ {USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */ ++ {USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */ ++ {USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */ ++ {USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */ ++ {USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */ ++ {USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */ ++ {USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */ ++ {USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */ ++ {USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */ ++ {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ + { } /* Terminating entry */ + }; + MODULE_DEVICE_TABLE(usb, id_table); +From 70136081fc67ea77d849f86fa323e5773c8e40ea Mon Sep 17 00:00:00 2001 +From: Theodore Kilgore +Date: Fri, 25 Dec 2009 05:15:10 -0300 +Subject: V4L/DVB (13991): gspca_mr973010a: Fix cif type 1 cameras not streaming on UHCI controllers -From: Chris Wilson +From: Theodore Kilgore -commit 654fc6073f68efa3b6c466825749e73e7fbb92cd upstream. +commit 70136081fc67ea77d849f86fa323e5773c8e40ea upstream. -If the object is bigger than the entire aperture, reject it early -before evicting everything in a vain attempt to find space. +If you read the mail to Oliver Neukum on the linux-usb list, then you know +that I found a cure for the mysterious problem that the MR97310a CIF "type +1" cameras have been freezing up and refusing to stream if hooked up to a +machine with a UHCI controller. -v2: Use E2BIG as suggested by Owain G. Ainsworth. +Namely, the cure is that if the camera is an mr97310a CIF type 1 camera, you +have to send it 0xa0, 0x00. Somehow, this is a timing reset command, or +such. It un-blocks whatever was previously stopping the CIF type 1 cameras +from working on the UHCI-based machines. -Signed-off-by: Chris Wilson -Signed-off-by: Eric Anholt +Signed-off-by: Theodore Kilgore +Signed-off-by: Hans de Goede +Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- - drivers/gpu/drm/i915/i915_gem.c | 8 ++++++++ - 1 file changed, 8 insertions(+) - ---- a/drivers/gpu/drm/i915/i915_gem.c -+++ b/drivers/gpu/drm/i915/i915_gem.c -@@ -2688,6 +2688,14 @@ i915_gem_object_bind_to_gtt(struct drm_g + drivers/media/video/gspca/mr97310a.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/drivers/media/video/gspca/mr97310a.c ++++ b/drivers/media/video/gspca/mr97310a.c +@@ -697,6 +697,12 @@ static int start_cif_cam(struct gspca_de + {0x13, 0x00, {0x01}, 1}, + {0, 0, {0}, 0} + }; ++ /* Without this command the cam won't work with USB-UHCI */ ++ gspca_dev->usb_buf[0] = 0x0a; ++ gspca_dev->usb_buf[1] = 0x00; ++ err_code = mr_write(gspca_dev, 2); ++ if (err_code < 0) ++ return err_code; + err_code = sensor_write_regs(gspca_dev, cif_sensor1_init_data, + ARRAY_SIZE(cif_sensor1_init_data)); + } +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index 315fea4..3245d33 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -2421,18 +2421,17 @@ EXPORT_SYMBOL_GPL(pci_reset_function); + */ + int pcix_get_max_mmrbc(struct pci_dev *dev) + { +- int err, cap; ++ int cap; + u32 stat; + + cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); + if (!cap) + return -EINVAL; + +- err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); +- if (err) ++ if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) + return -EINVAL; + +- return (stat & PCI_X_STATUS_MAX_READ) >> 12; ++ return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21); + } + EXPORT_SYMBOL(pcix_get_max_mmrbc); + +@@ -2445,18 +2444,17 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc); + */ + int pcix_get_mmrbc(struct pci_dev *dev) + { +- int ret, cap; +- u32 cmd; ++ int cap; ++ u16 cmd; + + cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); + if (!cap) return -EINVAL; + +- ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); +- if (!ret) +- ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); ++ if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) ++ return -EINVAL; + +- return ret; ++ return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); + } + EXPORT_SYMBOL(pcix_get_mmrbc); + +@@ -2471,28 +2469,27 @@ EXPORT_SYMBOL(pcix_get_mmrbc); + */ + int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) + { +- int cap, err = -EINVAL; +- u32 stat, cmd, v, o; ++ int cap; ++ u32 stat, v, o; ++ u16 cmd; + + if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) +- goto out; ++ return -EINVAL; + + v = ffs(mmrbc) - 10; + + cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); + if (!cap) +- goto out; ++ return -EINVAL; + +- err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); +- if (err) +- goto out; ++ if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) ++ return -EINVAL; + + if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) + return -E2BIG; + +- err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); +- if (err) +- goto out; ++ if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) ++ return -EINVAL; + + o = (cmd & PCI_X_CMD_MAX_READ) >> 2; + if (o != v) { +@@ -2502,10 +2499,10 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) + + cmd &= ~PCI_X_CMD_MAX_READ; + cmd |= v << 2; +- err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd); ++ if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd)) ++ return -EIO; } +-out: +- return err; ++ return 0; + } + EXPORT_SYMBOL(pcix_set_mmrbc); -+ /* If the object is bigger than the entire aperture, reject it early -+ * before evicting everything in a vain attempt to find space. -+ */ -+ if (obj->size > dev->gtt_total) { -+ DRM_ERROR("Attempting to bind an object larger than the aperture\n"); -+ return -E2BIG; -+ } -+ - search_free: - free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, - obj->size, alignment, 0); + diff --git a/linux-2.6-usb-wwan-update.patch b/linux-2.6-usb-wwan-update.patch index fbb5ad0..11c8d37 100644 --- a/linux-2.6-usb-wwan-update.patch +++ b/linux-2.6-usb-wwan-update.patch @@ -1,21 +1,12 @@ -diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig -index a0ecb42..71238de 100644 ---- a/drivers/usb/serial/Kconfig -+++ b/drivers/usb/serial/Kconfig -@@ -485,6 +485,7 @@ config USB_SERIAL_QCAUX - - config USB_SERIAL_QUALCOMM - tristate "USB Qualcomm Serial modem" -+ select USB_SERIAL_WWAN - help - Say Y here if you have a Qualcomm USB modem device. These are - usually wireless cellular modems. -@@ -576,8 +577,12 @@ config USB_SERIAL_XIRCOM +diff -up linux-2.6.33.noarch/drivers/usb/serial/Kconfig.orig linux-2.6.33.noarch/drivers/usb/serial/Kconfig +--- linux-2.6.33.noarch/drivers/usb/serial/Kconfig.orig 2010-02-24 13:52:17.000000000 -0500 ++++ linux-2.6.33.noarch/drivers/usb/serial/Kconfig 2010-04-01 12:40:00.838027293 -0400 +@@ -565,8 +565,12 @@ config USB_SERIAL_XIRCOM To compile this driver as a module, choose M here: the module will be called keyspan_pda. +config USB_SERIAL_WWAN -+ tristate ++ tristate + config USB_SERIAL_OPTION tristate "USB driver for GSM and CDMA modems" @@ -23,11 +14,10 @@ index a0ecb42..71238de 100644 help Say Y here if you have a GSM or CDMA modem that's connected to USB. -diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile -index 83c9e43..7928cf4 100644 ---- a/drivers/usb/serial/Makefile -+++ b/drivers/usb/serial/Makefile -@@ -52,6 +52,7 @@ obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o +diff -up linux-2.6.33.noarch/drivers/usb/serial/Makefile.orig linux-2.6.33.noarch/drivers/usb/serial/Makefile +--- linux-2.6.33.noarch/drivers/usb/serial/Makefile.orig 2010-02-24 13:52:17.000000000 -0500 ++++ linux-2.6.33.noarch/drivers/usb/serial/Makefile 2010-04-01 12:41:11.407997314 -0400 +@@ -51,6 +51,7 @@ obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o obj-$(CONFIG_USB_SERIAL_SPCP8X5) += spcp8x5.o obj-$(CONFIG_USB_SERIAL_SYMBOL) += symbolserial.o @@ -35,10 +25,9 @@ index 83c9e43..7928cf4 100644 obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o -diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c -index 950cb31..10a9276 100644 ---- a/drivers/usb/serial/option.c -+++ b/drivers/usb/serial/option.c +diff -up linux-2.6.33.noarch/drivers/usb/serial/option.c.orig linux-2.6.33.noarch/drivers/usb/serial/option.c +--- linux-2.6.33.noarch/drivers/usb/serial/option.c.orig 2010-02-24 13:52:17.000000000 -0500 ++++ linux-2.6.33.noarch/drivers/usb/serial/option.c 2010-04-01 12:42:37.072122049 -0400 @@ -41,35 +41,14 @@ #include #include @@ -77,7 +66,7 @@ index 950cb31..10a9276 100644 /* Vendor and product IDs */ #define OPTION_VENDOR_ID 0x0AF0 #define OPTION_PRODUCT_COLT 0x5000 -@@ -746,22 +725,22 @@ static struct usb_serial_driver option_1port_device = { +@@ -677,22 +656,22 @@ static struct usb_serial_driver option_1 .id_table = option_ids, .num_ports = 1, .probe = option_probe, @@ -114,7 +103,7 @@ index 950cb31..10a9276 100644 #endif }; -@@ -774,13 +753,6 @@ static int debug; +@@ -705,12 +684,6 @@ static int debug; #define IN_BUFLEN 4096 #define OUT_BUFLEN 4096 @@ -122,42 +111,39 @@ index 950cb31..10a9276 100644 - spinlock_t susp_lock; - unsigned int suspended:1; - int in_flight; -- struct option_blacklist_info *blacklist_info; -}; - struct option_port_private { /* Input endpoints and buffer for this port */ struct urb *in_urbs[N_IN_URB]; -@@ -837,8 +809,7 @@ module_exit(option_exit); +@@ -767,216 +740,28 @@ module_exit(option_exit); static int option_probe(struct usb_serial *serial, const struct usb_device_id *id) { - struct option_intf_private *data; -- + struct usb_wwan_intf_private *data; + /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */ if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID && serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 && -@@ -851,11 +822,13 @@ static int option_probe(struct usb_serial *serial, + serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8) + return -ENODEV; + + /* Bandrich modem and AT command interface is 0xff */ + if ((serial->dev->descriptor.idVendor == BANDRICH_VENDOR_ID || + serial->dev->descriptor.idVendor == PIRELLI_VENDOR_ID) && serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff) return -ENODEV; - data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL); + data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); -+ if (!data) return -ENOMEM; + data->send_setup = option_send_setup; spin_lock_init(&data->susp_lock); -- data->blacklist_info = (struct option_blacklist_info*) id->driver_info; -+ data->private = (void *)id->driver_info; return 0; } -@@ -876,194 +849,6 @@ static enum option_blacklist_reason is_blacklisted(const u8 ifnum, - return OPTION_BLACKLIST_NONE; - } - -static void option_set_termios(struct tty_struct *tty, - struct usb_serial_port *port, struct ktermios *old_termios) -{ @@ -298,6 +284,7 @@ index 950cb31..10a9276 100644 - } else { - tty = tty_port_tty_get(&port->port); - if (urb->actual_length) { +- tty_buffer_request_room(tty, urb->actual_length); - tty_insert_flip_string(tty, data, urb->actual_length); - tty_flip_buffer_push(tty); - } else @@ -305,9 +292,9 @@ index 950cb31..10a9276 100644 - tty_kref_put(tty); - - /* Resubmit urb so we continue receiving */ -- if (status != -ESHUTDOWN) { +- if (port->port.count && status != -ESHUTDOWN) { - err = usb_submit_urb(urb, GFP_ATOMIC); -- if (err && err != -EPERM) +- if (err) - printk(KERN_ERR "%s: resubmit read urb failed. " - "(%d)", __func__, err); - else @@ -349,7 +336,7 @@ index 950cb31..10a9276 100644 static void option_instat_callback(struct urb *urb) { int err; -@@ -1120,183 +905,6 @@ static void option_instat_callback(struct urb *urb) +@@ -1026,183 +811,6 @@ static void option_instat_callback(struc } } @@ -533,28 +520,7 @@ index 950cb31..10a9276 100644 /** send RTS/DTR state to the port. * * This is exactly the same as SET_CONTROL_LINE_STATE from the PSTN -@@ -1305,15 +913,16 @@ static void option_setup_urbs(struct usb_serial *serial) - static int option_send_setup(struct usb_serial_port *port) - { - struct usb_serial *serial = port->serial; -- struct option_intf_private *intfdata = -- (struct option_intf_private *) serial->private; -+ struct usb_wwan_intf_private *intfdata = -+ (struct usb_wwan_intf_private *) serial->private; - struct option_port_private *portdata; - int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; - int val = 0; - dbg("%s", __func__); - -- if (is_blacklisted(ifNum, intfdata->blacklist_info) == -- OPTION_BLACKLIST_SENDSETUP) { -+ if (is_blacklisted(ifNum, -+ (struct option_blacklist_info *) intfdata->private) -+ == OPTION_BLACKLIST_SENDSETUP) { - dbg("No send_setup on blacklisted interface #%d\n", ifNum); - return -EIO; - } -@@ -1330,224 +939,6 @@ static int option_send_setup(struct usb_serial_port *port) +@@ -1228,224 +836,6 @@ static int option_send_setup(struct usb_ 0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT); } @@ -731,7 +697,7 @@ index 950cb31..10a9276 100644 - for (i = 0; i < serial->num_ports; i++) { - port = serial->port[i]; - if (!port->interrupt_in_urb) { -- dbg("%s: No interrupt URB for port %d", __func__, i); +- dbg("%s: No interrupt URB for port %d\n", __func__, i); - continue; - } - err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); @@ -779,11 +745,10 @@ index 950cb31..10a9276 100644 MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); -diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c -index 53a2d5a..9e55ef5 100644 ---- a/drivers/usb/serial/qcserial.c -+++ b/drivers/usb/serial/qcserial.c -@@ -15,6 +15,7 @@ +diff -up linux-2.6.33.noarch/drivers/usb/serial/qcserial.c.orig linux-2.6.33.noarch/drivers/usb/serial/qcserial.c +--- linux-2.6.33.noarch/drivers/usb/serial/qcserial.c.orig 2010-04-01 12:38:39.954019769 -0400 ++++ linux-2.6.33.noarch/drivers/usb/serial/qcserial.c 2010-04-01 12:41:29.346996823 -0400 +@@ -15,13 +15,14 @@ #include #include #include @@ -791,16 +756,53 @@ index 53a2d5a..9e55ef5 100644 #define DRIVER_AUTHOR "Qualcomm Inc" #define DRIVER_DESC "Qualcomm USB Serial driver" -@@ -76,6 +77,8 @@ static const struct usb_device_id id_table[] = { - {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ - {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */ - {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ + + static int debug; + +-static struct usb_device_id id_table[] = { ++static const struct usb_device_id id_table[] = { + {USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */ + {USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ + {USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ +@@ -47,6 +48,37 @@ static struct usb_device_id id_table[] = + {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ + {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ + {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ ++ {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */ ++ {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ ++ {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */ ++ {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ ++ {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */ ++ {USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ ++ {USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */ ++ {USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ ++ {USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */ ++ {USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */ ++ {USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */ ++ {USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */ ++ {USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */ ++ {USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */ ++ {USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */ ++ {USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */ ++ {USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ ++ {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */ ++ {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ + {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */ + {USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); -@@ -92,6 +95,8 @@ static struct usb_driver qcdriver = { +@@ -63,6 +95,8 @@ static struct usb_driver qcdriver = { static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) { @@ -809,7 +811,7 @@ index 53a2d5a..9e55ef5 100644 int retval = -ENODEV; __u8 nintf; __u8 ifnum; -@@ -100,33 +105,45 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) +@@ -71,33 +105,45 @@ static int qcprobe(struct usb_serial *se nintf = serial->dev->actconfig->desc.bNumInterfaces; dbg("Num Interfaces = %d", nintf); @@ -872,7 +874,7 @@ index 53a2d5a..9e55ef5 100644 case 4: /* Composite mode */ if (ifnum == 2) { -@@ -161,6 +178,18 @@ static struct usb_serial_driver qcdevice = { +@@ -132,6 +178,18 @@ static struct usb_serial_driver qcdevice .usb_driver = &qcdriver, .num_ports = 1, .probe = qcprobe, @@ -891,84 +893,9 @@ index 53a2d5a..9e55ef5 100644 }; static int __init qcinit(void) -diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h -new file mode 100644 -index 0000000..2be298a ---- /dev/null -+++ b/drivers/usb/serial/usb-wwan.h -@@ -0,0 +1,67 @@ -+/* -+ * Definitions for USB serial mobile broadband cards -+ */ -+ -+#ifndef __LINUX_USB_USB_WWAN -+#define __LINUX_USB_USB_WWAN -+ -+extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on); -+extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port); -+extern void usb_wwan_close(struct usb_serial_port *port); -+extern int usb_wwan_startup(struct usb_serial *serial); -+extern void usb_wwan_disconnect(struct usb_serial *serial); -+extern void usb_wwan_release(struct usb_serial *serial); -+extern int usb_wwan_write_room(struct tty_struct *tty); -+extern void usb_wwan_set_termios(struct tty_struct *tty, -+ struct usb_serial_port *port, -+ struct ktermios *old); -+extern int usb_wwan_tiocmget(struct tty_struct *tty, struct file *file); -+extern int usb_wwan_tiocmset(struct tty_struct *tty, struct file *file, -+ unsigned int set, unsigned int clear); -+extern int usb_wwan_send_setup(struct usb_serial_port *port); -+extern int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port, -+ const unsigned char *buf, int count); -+extern int usb_wwan_chars_in_buffer(struct tty_struct *tty); -+#ifdef CONFIG_PM -+extern int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message); -+extern int usb_wwan_resume(struct usb_serial *serial); -+#endif -+ -+/* per port private data */ -+ -+#define N_IN_URB 4 -+#define N_OUT_URB 4 -+#define IN_BUFLEN 4096 -+#define OUT_BUFLEN 4096 -+ -+struct usb_wwan_intf_private { -+ spinlock_t susp_lock; -+ unsigned int suspended:1; -+ int in_flight; -+ int (*send_setup) (struct usb_serial_port *port); -+ void *private; -+}; -+ -+struct usb_wwan_port_private { -+ /* Input endpoints and buffer for this port */ -+ struct urb *in_urbs[N_IN_URB]; -+ u8 *in_buffer[N_IN_URB]; -+ /* Output endpoints and buffer for this port */ -+ struct urb *out_urbs[N_OUT_URB]; -+ u8 *out_buffer[N_OUT_URB]; -+ unsigned long out_busy; /* Bit vector of URBs in use */ -+ int opened; -+ struct usb_anchor delayed; -+ -+ /* Settings for the port */ -+ int rts_state; /* Handshaking pins (outputs) */ -+ int dtr_state; -+ int cts_state; /* Handshaking pins (inputs) */ -+ int dsr_state; -+ int dcd_state; -+ int ri_state; -+ -+ unsigned long tx_start_time[N_OUT_URB]; -+}; -+ -+#endif /* __LINUX_USB_USB_WWAN */ -diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c -new file mode 100644 -index 0000000..1ccf852 ---- /dev/null -+++ b/drivers/usb/serial/usb_wwan.c +diff -up linux-2.6.33.noarch/drivers/usb/serial/usb_wwan.c.orig linux-2.6.33.noarch/drivers/usb/serial/usb_wwan.c +--- linux-2.6.33.noarch/drivers/usb/serial/usb_wwan.c.orig 2010-04-01 12:39:41.531121118 -0400 ++++ linux-2.6.33.noarch/drivers/usb/serial/usb_wwan.c 2010-04-01 12:39:41.531121118 -0400 @@ -0,0 +1,665 @@ +/* + USB Driver layer for GSM modems @@ -998,7 +925,6 @@ index 0000000..1ccf852 +#include +#include +#include -+#include +#include +#include +#include @@ -1189,6 +1115,7 @@ index 0000000..1ccf852 + } else { + tty = tty_port_tty_get(&port->port); + if (urb->actual_length) { ++ tty_buffer_request_room(tty, urb->actual_length); + tty_insert_flip_string(tty, data, urb->actual_length); + tty_flip_buffer_push(tty); + } else @@ -1196,9 +1123,9 @@ index 0000000..1ccf852 + tty_kref_put(tty); + + /* Resubmit urb so we continue receiving */ -+ if (status != -ESHUTDOWN) { ++ if (port->port.count && status != -ESHUTDOWN) { + err = usb_submit_urb(urb, GFP_ATOMIC); -+ if (err && err != -EPERM) ++ if (err) + printk(KERN_ERR "%s: resubmit read urb failed. " + "(%d)", __func__, err); + else @@ -1582,7 +1509,7 @@ index 0000000..1ccf852 + for (i = 0; i < serial->num_ports; i++) { + port = serial->port[i]; + if (!port->interrupt_in_urb) { -+ dbg("%s: No interrupt URB for port %d", __func__, i); ++ dbg("%s: No interrupt URB for port %d\n", __func__, i); + continue; + } + err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); @@ -1635,3 +1562,73 @@ index 0000000..1ccf852 + +module_param(debug, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "Debug messages"); +diff -up linux-2.6.33.noarch/drivers/usb/serial/usb-wwan.h.orig linux-2.6.33.noarch/drivers/usb/serial/usb-wwan.h +--- linux-2.6.33.noarch/drivers/usb/serial/usb-wwan.h.orig 2010-04-01 12:42:23.050997135 -0400 ++++ linux-2.6.33.noarch/drivers/usb/serial/usb-wwan.h 2010-04-01 12:42:19.341996684 -0400 +@@ -0,0 +1,66 @@ ++/* ++ * Definitions for USB serial mobile broadband cards ++ */ ++ ++#ifndef __LINUX_USB_USB_WWAN ++#define __LINUX_USB_USB_WWAN ++ ++extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on); ++extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port); ++extern void usb_wwan_close(struct usb_serial_port *port); ++extern int usb_wwan_startup(struct usb_serial *serial); ++extern void usb_wwan_disconnect(struct usb_serial *serial); ++extern void usb_wwan_release(struct usb_serial *serial); ++extern int usb_wwan_write_room(struct tty_struct *tty); ++extern void usb_wwan_set_termios(struct tty_struct *tty, ++ struct usb_serial_port *port, ++ struct ktermios *old); ++extern int usb_wwan_tiocmget(struct tty_struct *tty, struct file *file); ++extern int usb_wwan_tiocmset(struct tty_struct *tty, struct file *file, ++ unsigned int set, unsigned int clear); ++extern int usb_wwan_send_setup(struct usb_serial_port *port); ++extern int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port, ++ const unsigned char *buf, int count); ++extern int usb_wwan_chars_in_buffer(struct tty_struct *tty); ++#ifdef CONFIG_PM ++extern int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message); ++extern int usb_wwan_resume(struct usb_serial *serial); ++#endif ++ ++/* per port private data */ ++ ++#define N_IN_URB 4 ++#define N_OUT_URB 4 ++#define IN_BUFLEN 4096 ++#define OUT_BUFLEN 4096 ++ ++struct usb_wwan_intf_private { ++ spinlock_t susp_lock; ++ unsigned int suspended:1; ++ int in_flight; ++ int (*send_setup) (struct usb_serial_port *port); ++}; ++ ++struct usb_wwan_port_private { ++ /* Input endpoints and buffer for this port */ ++ struct urb *in_urbs[N_IN_URB]; ++ u8 *in_buffer[N_IN_URB]; ++ /* Output endpoints and buffer for this port */ ++ struct urb *out_urbs[N_OUT_URB]; ++ u8 *out_buffer[N_OUT_URB]; ++ unsigned long out_busy; /* Bit vector of URBs in use */ ++ int opened; ++ struct usb_anchor delayed; ++ ++ /* Settings for the port */ ++ int rts_state; /* Handshaking pins (outputs) */ ++ int dtr_state; ++ int cts_state; /* Handshaking pins (inputs) */ ++ int dsr_state; ++ int dcd_state; ++ int ri_state; ++ ++ unsigned long tx_start_time[N_OUT_URB]; ++}; ++ ++#endif /* __LINUX_USB_USB_WWAN */ diff --git a/linux-2.6-utrace-ptrace.patch b/linux-2.6-utrace-ptrace.patch index a3d3674..3771682 100644 --- a/linux-2.6-utrace-ptrace.patch +++ b/linux-2.6-utrace-ptrace.patch @@ -14,16 +14,16 @@ Signed-off-by: Oleg Nesterov --- include/linux/ptrace.h | 2 +- kernel/Makefile | 1 + - kernel/ptrace-utrace.c | 1127 ++++++++++++++++++++++++++++++++++++++++++++++++ - kernel/ptrace.c | 654 ++++++++++++++-------------- + kernel/ptrace-utrace.c | 1080 ++++++++++++++++++++++++++++++++++++++++++++++++ + kernel/ptrace.c | 572 +++++++++++++------------- kernel/utrace.c | 16 + - 5 files changed, 1466 insertions(+), 334 deletions(-) + 5 files changed, 1378 insertions(+), 293 deletions(-) diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h -index 0d84f1e..102cb0f 100644 +index 4802e2a..03f8fc7 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h -@@ -99,7 +99,7 @@ +@@ -79,7 +79,7 @@ #include /* For unlikely. */ #include /* For struct task_struct. */ @@ -33,10 +33,10 @@ index 0d84f1e..102cb0f 100644 extern int ptrace_traceme(void); extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); diff --git a/kernel/Makefile b/kernel/Makefile -index 8bbb631..0cf7a15 100644 +index 8a0185e..30a118d 100644 --- a/kernel/Makefile +++ b/kernel/Makefile -@@ -71,6 +71,7 @@ obj-$(CONFIG_RESOURCE_COUNTERS) += res_c +@@ -70,6 +70,7 @@ obj-$(CONFIG_RESOURCE_COUNTERS) += res_c obj-$(CONFIG_STOP_MACHINE) += stop_machine.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o obj-$(CONFIG_UTRACE) += utrace.o @@ -46,10 +46,10 @@ index 8bbb631..0cf7a15 100644 obj-$(CONFIG_GCOV_KERNEL) += gcov/ diff --git a/kernel/ptrace-utrace.c b/kernel/ptrace-utrace.c new file mode 100644 -index ...86234ee 100644 +index ...ea419ee 100644 --- /dev/null +++ b/kernel/ptrace-utrace.c -@@ -0,0 +1,1127 @@ +@@ -0,0 +1,1080 @@ +/* + * linux/kernel/ptrace.c + * @@ -999,9 +999,6 @@ index ...86234ee 100644 + return 0; +} + -+extern int ptrace_regset(struct task_struct *task, int req, unsigned int type, -+ struct iovec *kiov); -+ +int ptrace_request(struct task_struct *child, long request, + long addr, long data) +{ @@ -1061,25 +1058,6 @@ index ...86234ee 100644 + ret = 0; + break; + -+ case PTRACE_GETREGSET: -+ case PTRACE_SETREGSET: -+ { -+ struct iovec kiov; -+ struct iovec __user *uiov = (struct iovec __user *) data; -+ -+ if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) -+ return -EFAULT; -+ -+ if (__get_user(kiov.iov_base, &uiov->iov_base) || -+ __get_user(kiov.iov_len, &uiov->iov_len)) -+ return -EFAULT; -+ -+ ret = ptrace_regset(child, request, addr, &kiov); -+ if (!ret) -+ ret = __put_user(kiov.iov_len, &uiov->iov_len); -+ break; -+ } -+ + default: + ret = ptrace_resume(child, engine, request, data); + break; @@ -1144,31 +1122,6 @@ index ...86234ee 100644 + &siginfo, true); + break; + -+ case PTRACE_GETREGSET: -+ case PTRACE_SETREGSET: -+ { -+ struct iovec kiov; -+ struct compat_iovec __user *uiov = -+ (struct compat_iovec __user *) datap; -+ compat_uptr_t ptr; -+ compat_size_t len; -+ -+ if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) -+ return -EFAULT; -+ -+ if (__get_user(ptr, &uiov->iov_base) || -+ __get_user(len, &uiov->iov_len)) -+ return -EFAULT; -+ -+ kiov.iov_base = compat_ptr(ptr); -+ kiov.iov_len = len; -+ -+ ret = ptrace_regset(child, request, addr, &kiov); -+ if (!ret) -+ ret = __put_user(kiov.iov_len, &uiov->iov_len); -+ break; -+ } -+ + default: + ret = ptrace_request(child, request, addr, data); + } @@ -1178,7 +1131,7 @@ index ...86234ee 100644 +} +#endif /* CONFIG_COMPAT */ diff --git a/kernel/ptrace.c b/kernel/ptrace.c -index 0ad4dc0..448b353 100644 +index a408bf7..4e87441 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -16,7 +16,6 @@ @@ -1189,9 +1142,9 @@ index 0ad4dc0..448b353 100644 #include #include #include -@@ -25,7 +24,327 @@ +@@ -24,7 +23,286 @@ + #include #include - #include +int __ptrace_may_access(struct task_struct *task, unsigned int mode) +{ @@ -1341,47 +1294,6 @@ index 0ad4dc0..448b353 100644 + return copied; +} + -+#ifdef CONFIG_HAVE_ARCH_TRACEHOOK -+ -+static const struct user_regset * -+find_regset(const struct user_regset_view *view, unsigned int type) -+{ -+ const struct user_regset *regset; -+ int n; -+ -+ for (n = 0; n < view->n; ++n) { -+ regset = view->regsets + n; -+ if (regset->core_note_type == type) -+ return regset; -+ } -+ -+ return NULL; -+} -+ -+int ptrace_regset(struct task_struct *task, int req, unsigned int type, -+ struct iovec *kiov) -+{ -+ const struct user_regset_view *view = task_user_regset_view(task); -+ const struct user_regset *regset = find_regset(view, type); -+ int regset_no; -+ -+ if (!regset || (kiov->iov_len % regset->size) != 0) -+ return -EINVAL; -+ -+ regset_no = regset - view->regsets; -+ kiov->iov_len = min(kiov->iov_len, -+ (__kernel_size_t) (regset->n * regset->size)); -+ -+ if (req == PTRACE_GETREGSET) -+ return copy_regset_to_user(task, view, regset_no, 0, -+ kiov->iov_len, kiov->iov_base); -+ else -+ return copy_regset_from_user(task, view, regset_no, 0, -+ kiov->iov_len, kiov->iov_base); -+} -+ -+#endif -+ +static struct task_struct *ptrace_get_task_struct(pid_t pid) +{ + struct task_struct *child; @@ -1400,7 +1312,7 @@ index 0ad4dc0..448b353 100644 +#ifndef arch_ptrace_attach +#define arch_ptrace_attach(child) do { } while (0) +#endif -+ + +SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data) +{ + struct task_struct *child; @@ -1446,7 +1358,7 @@ index 0ad4dc0..448b353 100644 + unlock_kernel(); + return ret; +} - ++ +int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data) +{ + unsigned long tmp; @@ -1517,10 +1429,27 @@ index 0ad4dc0..448b353 100644 /* * ptrace a task: make the debugger its new parent and * move it to the ptrace list. -@@ -119,61 +438,6 @@ int ptrace_check_attach(struct task_stru - return ret; - } - +@@ -101,76 +379,21 @@ int ptrace_check_attach(struct task_stru + /* + * child->sighand can't be NULL, release_task() + * does ptrace_unlink() before __exit_signal(). +- */ +- spin_lock_irq(&child->sighand->siglock); +- if (task_is_stopped(child)) +- child->state = TASK_TRACED; +- else if (!task_is_traced(child) && !kill) +- ret = -ESRCH; +- spin_unlock_irq(&child->sighand->siglock); +- } +- read_unlock(&tasklist_lock); +- +- if (!ret && !kill) +- ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; +- +- /* All systems go.. */ +- return ret; +-} +- -int __ptrace_may_access(struct task_struct *task, unsigned int mode) -{ - const struct cred *cred = current_cred(), *tcred; @@ -1558,7 +1487,16 @@ index 0ad4dc0..448b353 100644 - - return security_ptrace_access_check(task, mode); -} -- ++ */ ++ spin_lock_irq(&child->sighand->siglock); ++ if (task_is_stopped(child)) ++ child->state = TASK_TRACED; ++ else if (!task_is_traced(child) && !kill) ++ ret = -ESRCH; ++ spin_unlock_irq(&child->sighand->siglock); ++ } ++ read_unlock(&tasklist_lock); + -bool ptrace_may_access(struct task_struct *task, unsigned int mode) -{ - int err; @@ -1567,19 +1505,21 @@ index 0ad4dc0..448b353 100644 - task_unlock(task); - return !err; -} -- ++ if (!ret && !kill) ++ ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; + -/* - * For experimental use of utrace, exclude ptrace on the same task. - */ -static inline bool exclude_ptrace(struct task_struct *task) -{ - return unlikely(!!task_utrace_flags(task)); --} -- ++ /* All systems go.. */ ++ return ret; + } + int ptrace_attach(struct task_struct *task) - { - int retval; -@@ -197,8 +461,6 @@ int ptrace_attach(struct task_struct *ta +@@ -196,8 +419,6 @@ int ptrace_attach(struct task_struct *ta task_lock(task); retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); @@ -1588,42 +1528,20 @@ index 0ad4dc0..448b353 100644 task_unlock(task); if (retval) goto unlock_creds; -@@ -226,87 +488,33 @@ out: - return retval; - } +@@ -235,9 +456,6 @@ int ptrace_traceme(void) + { + int ret = -EPERM; --/** -- * ptrace_traceme -- helper for PTRACE_TRACEME -- * -- * Performs checks and sets PT_PTRACED. -- * Should be used by all ptrace implementations for PTRACE_TRACEME. -- */ --int ptrace_traceme(void) --{ -- int ret = -EPERM; -- - if (exclude_ptrace(current)) /* XXX locking */ - return -EBUSY; - -- write_lock_irq(&tasklist_lock); -- /* Are we already being traced? */ -- if (!current->ptrace) { -- ret = security_ptrace_traceme(current->parent); -- /* -- * Check PF_EXITING to ensure ->real_parent has not passed -- * exit_ptrace(). Otherwise we don't report the error but -- * pretend ->real_parent untraces us right after return. -- */ -- if (!ret && !(current->real_parent->flags & PF_EXITING)) { -- current->ptrace = PT_PTRACED; -- __ptrace_link(current, current->real_parent); -- } -- } -- write_unlock_irq(&tasklist_lock); -- -- return ret; --} -- + write_lock_irq(&tasklist_lock); + /* Are we already being traced? */ + if (!current->ptrace) { +@@ -257,57 +475,6 @@ int ptrace_traceme(void) + return ret; + } + -/* - * Called with irqs disabled, returns true if childs should reap themselves. - */ @@ -1651,18 +1569,11 @@ index 0ad4dc0..448b353 100644 - * children self-reap, then this child was prevented by ptrace and we must - * reap it now, in that case we must also wake up sub-threads sleeping in - * do_wait(). -+/** -+ * ptrace_traceme -- helper for PTRACE_TRACEME -+ * -+ * Performs checks and sets PT_PTRACED. -+ * Should be used by all ptrace implementations for PTRACE_TRACEME. - */ +- */ -bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) -+int ptrace_traceme(void) - { +-{ - __ptrace_unlink(p); -+ int ret = -EPERM; - +- - if (p->exit_state == EXIT_ZOMBIE) { - if (!task_detached(p) && thread_group_empty(p)) { - if (!same_thread_group(p->real_parent, tracer)) @@ -1676,28 +1587,16 @@ index 0ad4dc0..448b353 100644 - /* Mark it as in the process of being reaped. */ - p->exit_state = EXIT_DEAD; - return true; -+ write_lock_irq(&tasklist_lock); -+ /* Are we already being traced? */ -+ if (!current->ptrace) { -+ ret = security_ptrace_traceme(current->parent); -+ /* -+ * Check PF_EXITING to ensure ->real_parent has not passed -+ * exit_ptrace(). Otherwise we don't report the error but -+ * pretend ->real_parent untraces us right after return. -+ */ -+ if (!ret && !(current->real_parent->flags & PF_EXITING)) { -+ current->ptrace = PT_PTRACED; -+ __ptrace_link(current, current->real_parent); - } - } -+ write_unlock_irq(&tasklist_lock); - +- } +- } +- - return false; -+ return ret; - } - +-} +- int ptrace_detach(struct task_struct *child, unsigned int data) -@@ -362,56 +570,6 @@ void exit_ptrace(struct task_struct *tra + { + bool dead = false; +@@ -361,56 +528,6 @@ void exit_ptrace(struct task_struct *tra } } @@ -1754,55 +1653,7 @@ index 0ad4dc0..448b353 100644 static int ptrace_setoptions(struct task_struct *child, long data) { child->ptrace &= ~PT_TRACE_MASK; -@@ -526,47 +683,6 @@ static int ptrace_resume(struct task_str - return 0; - } - --#ifdef CONFIG_HAVE_ARCH_TRACEHOOK -- --static const struct user_regset * --find_regset(const struct user_regset_view *view, unsigned int type) --{ -- const struct user_regset *regset; -- int n; -- -- for (n = 0; n < view->n; ++n) { -- regset = view->regsets + n; -- if (regset->core_note_type == type) -- return regset; -- } -- -- return NULL; --} -- --static int ptrace_regset(struct task_struct *task, int req, unsigned int type, -- struct iovec *kiov) --{ -- const struct user_regset_view *view = task_user_regset_view(task); -- const struct user_regset *regset = find_regset(view, type); -- int regset_no; -- -- if (!regset || (kiov->iov_len % regset->size) != 0) -- return -EINVAL; -- -- regset_no = regset - view->regsets; -- kiov->iov_len = min(kiov->iov_len, -- (__kernel_size_t) (regset->n * regset->size)); -- -- if (req == PTRACE_GETREGSET) -- return copy_regset_to_user(task, view, regset_no, 0, -- kiov->iov_len, kiov->iov_base); -- else -- return copy_regset_from_user(task, view, regset_no, 0, -- kiov->iov_len, kiov->iov_base); --} -- --#endif -- - int ptrace_request(struct task_struct *child, long request, - long addr, long data) - { -@@ -656,93 +772,7 @@ int ptrace_request(struct task_struct *c +@@ -594,93 +710,7 @@ int ptrace_request(struct task_struct *c return ret; } @@ -1896,7 +1747,7 @@ index 0ad4dc0..448b353 100644 int compat_ptrace_request(struct task_struct *child, compat_long_t request, compat_ulong_t addr, compat_ulong_t data) { -@@ -820,47 +850,5 @@ int compat_ptrace_request(struct task_st +@@ -732,47 +762,5 @@ int compat_ptrace_request(struct task_st return ret; } diff --git a/linux-2.6-utrace.patch b/linux-2.6-utrace.patch index c95ffdb..a7d0adc 100644 --- a/linux-2.6-utrace.patch +++ b/linux-2.6-utrace.patch @@ -648,10 +648,10 @@ index ...e149f49 100644 + + diff --git a/fs/proc/array.c b/fs/proc/array.c -index 885ab55..b4d0b8a 100644 +index 13b5d07..cda9489 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c -@@ -81,6 +81,7 @@ +@@ -82,6 +82,7 @@ #include #include #include @@ -659,7 +659,7 @@ index 885ab55..b4d0b8a 100644 #include #include -@@ -192,6 +193,8 @@ static inline void task_state(struct seq +@@ -194,6 +195,8 @@ static inline void task_state(struct seq cred->uid, cred->euid, cred->suid, cred->fsuid, cred->gid, cred->egid, cred->sgid, cred->fsgid); @@ -669,10 +669,10 @@ index 885ab55..b4d0b8a 100644 if (p->files) fdt = files_fdtable(p->files); diff --git a/include/linux/sched.h b/include/linux/sched.h -index 82e4494..8461a2d 100644 +index 9a2b557..47379c1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1352,6 +1352,11 @@ struct task_struct { +@@ -1412,6 +1412,11 @@ struct task_struct { #endif seccomp_t seccomp; @@ -1621,7 +1621,7 @@ index ...f251efe 100644 + +#endif /* linux/utrace.h */ diff --git a/init/Kconfig b/init/Kconfig -index eb77e8c..b849517 100644 +index d95ca7c..6d52a08 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -320,6 +320,15 @@ config AUDIT_TREE @@ -1641,10 +1641,10 @@ index eb77e8c..b849517 100644 choice diff --git a/kernel/Makefile b/kernel/Makefile -index a987aa1..8bbb631 100644 +index 864ff75..8a0185e 100644 --- a/kernel/Makefile +++ b/kernel/Makefile -@@ -70,6 +70,7 @@ obj-$(CONFIG_IKCONFIG) += configs.o +@@ -69,6 +69,7 @@ obj-$(CONFIG_IKCONFIG) += configs.o obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o obj-$(CONFIG_STOP_MACHINE) += stop_machine.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o @@ -1653,10 +1653,10 @@ index a987aa1..8bbb631 100644 obj-$(CONFIG_AUDITSYSCALL) += auditsc.o obj-$(CONFIG_GCOV_KERNEL) += gcov/ diff --git a/kernel/fork.c b/kernel/fork.c -index 4c14942..16030e3 100644 +index f88bd98..2b6820c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -161,6 +161,7 @@ void free_task(struct task_struct *tsk) +@@ -153,6 +153,7 @@ void free_task(struct task_struct *tsk) free_thread_info(tsk->stack); rt_mutex_debug_task_free(tsk); ftrace_graph_exit_task(tsk); @@ -1664,7 +1664,7 @@ index 4c14942..16030e3 100644 free_task_struct(tsk); } EXPORT_SYMBOL(free_task); -@@ -1000,6 +1001,8 @@ static struct task_struct *copy_process( +@@ -1023,6 +1024,8 @@ static struct task_struct *copy_process( if (!p) goto fork_out; @@ -1674,7 +1674,7 @@ index 4c14942..16030e3 100644 rt_mutex_init_task(p); diff --git a/kernel/ptrace.c b/kernel/ptrace.c -index 067f120..0ad4dc0 100644 +index b7c1d32..a408bf7 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -16,6 +16,7 @@ @@ -1685,7 +1685,7 @@ index 067f120..0ad4dc0 100644 #include #include #include -@@ -165,6 +166,14 @@ bool ptrace_may_access(struct task_struc +@@ -164,6 +165,14 @@ bool ptrace_may_access(struct task_struc return !err; } @@ -1700,7 +1700,7 @@ index 067f120..0ad4dc0 100644 int ptrace_attach(struct task_struct *task) { int retval; -@@ -188,6 +197,8 @@ int ptrace_attach(struct task_struct *ta +@@ -187,6 +196,8 @@ int ptrace_attach(struct task_struct *ta task_lock(task); retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); @@ -1709,7 +1709,7 @@ index 067f120..0ad4dc0 100644 task_unlock(task); if (retval) goto unlock_creds; -@@ -225,6 +236,9 @@ int ptrace_traceme(void) +@@ -224,6 +235,9 @@ int ptrace_traceme(void) { int ret = -EPERM; diff --git a/linux-2.6-v4l-dvb-add-kworld-a340-support.patch b/linux-2.6-v4l-dvb-add-kworld-a340-support.patch index 0c7d241..95c912a 100644 --- a/linux-2.6-v4l-dvb-add-kworld-a340-support.patch +++ b/linux-2.6-v4l-dvb-add-kworld-a340-support.patch @@ -1,7 +1,4 @@ -From c34c78838f02693a70808e38309629e85aa50266 Mon Sep 17 00:00:00 2001 -From: Jarod Wilson -Date: Thu, 20 May 2010 10:03:13 -0400 -Subject: [PATCH] dvb: add support for kworld 340u and ub435-q to em28xx-dvb +[PATCH] dvb: add support for kworld 340u and ub435-q to em28xx-dvb This adds support for the KWorld PlusTV 340U and KWorld UB345-Q ATSC sticks, which are really the same device. The sticks have an eMPIA @@ -15,17 +12,12 @@ rather than the current lgdt3304 driver, as its severely lacking in functionality by comparison (see said patch for details). Signed-off-by: Jarod Wilson ---- - drivers/media/video/em28xx/em28xx-cards.c | 28 ++++++++++++++++++++++++ - drivers/media/video/em28xx/em28xx-dvb.c | 33 +++++++++++++++++++++++++++++ - drivers/media/video/em28xx/em28xx.h | 1 + - 3 files changed, 62 insertions(+), 0 deletions(-) -diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c -index b0fb083..6312e76 100644 ---- a/drivers/media/video/em28xx/em28xx-cards.c -+++ b/drivers/media/video/em28xx/em28xx-cards.c -@@ -158,6 +158,22 @@ static struct em28xx_reg_seq evga_indtube_digital[] = { +--- +diff -r 14021dfc00f3 drivers/media/video/em28xx/em28xx-cards.c +--- a/drivers/media/video/em28xx/em28xx-cards.c Thu Feb 11 23:11:30 2010 -0200 ++++ b/drivers/media/video/em28xx/em28xx-cards.c Thu Feb 18 16:47:17 2010 -0500 +@@ -170,6 +170,22 @@ { -1, -1, -1, -1}, }; @@ -48,7 +40,7 @@ index b0fb083..6312e76 100644 /* Pinnacle Hybrid Pro eb1a:2881 */ static struct em28xx_reg_seq pinnacle_hybrid_pro_analog[] = { {EM28XX_R08_GPIO, 0xfd, ~EM_GPIO_4, 10}, -@@ -1649,6 +1665,16 @@ struct em28xx_board em28xx_boards[] = { +@@ -1703,6 +1719,16 @@ .tuner_gpio = reddo_dvb_c_usb_box, .has_dvb = 1, }, @@ -65,7 +57,7 @@ index b0fb083..6312e76 100644 }; const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards); -@@ -1768,6 +1794,8 @@ struct usb_device_id em28xx_id_table[] = { +@@ -1826,6 +1852,8 @@ .driver_info = EM2820_BOARD_IODATA_GVMVP_SZ }, { USB_DEVICE(0xeb1a, 0x50a6), .driver_info = EM2860_BOARD_GADMEI_UTV330 }, @@ -74,10 +66,9 @@ index b0fb083..6312e76 100644 { }, }; MODULE_DEVICE_TABLE(usb, em28xx_id_table); -diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c -index bcd3c37..ce8a9ee 100644 ---- a/drivers/media/video/em28xx/em28xx-dvb.c -+++ b/drivers/media/video/em28xx/em28xx-dvb.c +diff -r 14021dfc00f3 drivers/media/video/em28xx/em28xx-dvb.c +--- a/drivers/media/video/em28xx/em28xx-dvb.c Thu Feb 11 23:11:30 2010 -0200 ++++ b/drivers/media/video/em28xx/em28xx-dvb.c Thu Feb 18 16:47:17 2010 -0500 @@ -30,11 +30,13 @@ #include "tuner-simple.h" @@ -92,7 +83,7 @@ index bcd3c37..ce8a9ee 100644 MODULE_DESCRIPTION("driver for em28xx based DVB cards"); MODULE_AUTHOR("Mauro Carvalho Chehab "); -@@ -231,6 +233,18 @@ static struct lgdt330x_config em2880_lgdt3303_dev = { +@@ -231,6 +233,18 @@ .demod_chip = LGDT3303, }; @@ -111,7 +102,7 @@ index bcd3c37..ce8a9ee 100644 static struct zl10353_config em28xx_zl10353_with_xc3028 = { .demod_address = (0x1e >> 1), .no_tuner = 1, -@@ -247,6 +261,17 @@ static struct s5h1409_config em28xx_s5h1409_with_xc3028 = { +@@ -247,6 +261,17 @@ .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK }; @@ -129,7 +120,7 @@ index bcd3c37..ce8a9ee 100644 static struct zl10353_config em28xx_zl10353_xc3028_no_i2c_gate = { .demod_address = (0x1e >> 1), .no_tuner = 1, -@@ -570,6 +595,14 @@ static int dvb_init(struct em28xx *dev) +@@ -569,6 +594,14 @@ } } break; @@ -144,18 +135,14 @@ index bcd3c37..ce8a9ee 100644 default: em28xx_errdev("/2: The frontend of your DVB/ATSC card" " isn't supported yet\n"); -diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h -index ba6fe5d..9f3fdad 100644 ---- a/drivers/media/video/em28xx/em28xx.h -+++ b/drivers/media/video/em28xx/em28xx.h +diff -r 14021dfc00f3 drivers/media/video/em28xx/em28xx.h +--- a/drivers/media/video/em28xx/em28xx.h Thu Feb 11 23:11:30 2010 -0200 ++++ b/drivers/media/video/em28xx/em28xx.h Thu Feb 18 16:47:17 2010 -0500 @@ -112,6 +112,7 @@ + #define EM2861_BOARD_GADMEI_UTV330PLUS 72 #define EM2870_BOARD_REDDO_DVB_C_USB_BOX 73 #define EM2800_BOARD_VC211A 74 - #define EM2882_BOARD_DIKOM_DK300 75 +#define EM2870_BOARD_KWORLD_A340 76 /* Limits minimum and default number of buffers */ #define EM28XX_MIN_BUF 4 --- -1.7.0.1 - diff --git a/linux-2.6-v4l-dvb-gspca-fixes.patch b/linux-2.6-v4l-dvb-gspca-fixes.patch index dbf1b56..2677c02 100644 --- a/linux-2.6-v4l-dvb-gspca-fixes.patch +++ b/linux-2.6-v4l-dvb-gspca-fixes.patch @@ -73,8 +73,8 @@ index dce5ef8..d2a4902 100644 -#include +#ifdef CONFIG_INPUT #include - #include #endif + @@ -54,6 +51,9 @@ MODULE_LICENSE("GPL"); #define SENSOR_HV7131R 10 #define SENSOR_MT9VPRB 20 @@ -108,7 +108,7 @@ index dce5ef8..d2a4902 100644 - struct gspca_dev *gspca_dev = (struct gspca_dev *)data; - struct sd *sd = (struct sd *) gspca_dev; - -- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait); +- DECLARE_WAIT_QUEUE_HEAD(wait); - set_freezable(); - for (;;) { - if (kthread_should_stop()) @@ -2295,6 +2295,139 @@ Signed-off-by: Hans de Goede }; static const struct sd_desc *sd_desc[2] = { &sd_desc_12a, + +gspca: make usb id 0461:0815 get handled by the right driver + +From: John Ellson + +The 0461:0815 camera is spca561 based not spca508 + +Priority: high + +Signed-off-by: John Ellson +Signed-off-by: Hans de Goede + +diff -r 6687077521f7 -r 649d692c7bc1 linux/drivers/media/video/gspca/spca508.c +--- a/drivers/media/video/gspca/spca508.c Thu Mar 11 10:12:39 2010 +0100 ++++ b/drivers/media/video/gspca/spca508.c Wed Mar 17 14:22:58 2010 +0100 +@@ -1559,7 +1559,6 @@ + static const __devinitdata struct usb_device_id device_table[] = { + {USB_DEVICE(0x0130, 0x0130), .driver_info = HamaUSBSightcam}, + {USB_DEVICE(0x041e, 0x4018), .driver_info = CreativeVista}, +- {USB_DEVICE(0x0461, 0x0815), .driver_info = MicroInnovationIC200}, + {USB_DEVICE(0x0733, 0x0110), .driver_info = ViewQuestVQ110}, + {USB_DEVICE(0x0af9, 0x0010), .driver_info = HamaUSBSightcam}, + {USB_DEVICE(0x0af9, 0x0011), .driver_info = HamaUSBSightcam2}, +diff -r 6687077521f7 -r 649d692c7bc1 linux/drivers/media/video/gspca/spca561.c +--- a/drivers/media/video/gspca/spca561.c Thu Mar 11 10:12:39 2010 +0100 ++++ b/drivers/media/video/gspca/spca561.c Wed Mar 17 14:22:58 2010 +0100 +@@ -1096,6 +1096,7 @@ + {USB_DEVICE(0x041e, 0x401a), .driver_info = Rev072A}, + {USB_DEVICE(0x041e, 0x403b), .driver_info = Rev012A}, + {USB_DEVICE(0x0458, 0x7004), .driver_info = Rev072A}, ++ {USB_DEVICE(0x0461, 0x0815), .driver_info = Rev072A}, + {USB_DEVICE(0x046d, 0x0928), .driver_info = Rev012A}, + {USB_DEVICE(0x046d, 0x0929), .driver_info = Rev012A}, + {USB_DEVICE(0x046d, 0x092a), .driver_info = Rev012A}, +From: Jean-François Moine +Date: Thu, 18 Feb 2010 07:12:06 +0000 (-0300) +Subject: V4L/DVB: gspca - sonixj: Add vertical flip control for sensor hv7131r +X-Git-Url: http://git.linuxtv.org/jfrancois/gspca.git?a=commitdiff_plain;h=f344792e0c1dd7e0029b7dd92612ff002646634e + +V4L/DVB: gspca - sonixj: Add vertical flip control for sensor hv7131r + +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c +index 4ece110..fea8075 100644 +--- a/drivers/media/video/gspca/sonixj.c ++++ b/drivers/media/video/gspca/sonixj.c +@@ -282,7 +282,7 @@ static const struct ctrl sd_ctrls[] = { + static __u32 ctrl_dis[] = { + (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX) | + (1 << AUTOGAIN_IDX), /* SENSOR_ADCM1700 0 */ +- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX), ++ (1 << INFRARED_IDX) | (1 << FREQ_IDX), + /* SENSOR_HV7131R 1 */ + (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX), + /* SENSOR_MI0360 2 */ +@@ -1780,23 +1780,34 @@ static void setautogain(struct gspca_dev *gspca_dev) + sd->ag_cnt = -1; + } + +-/* ov7630/ov7648 only */ ++/* hv7131r/ov7630/ov7648 only */ + static void setvflip(struct sd *sd) + { + u8 comn; + + if (sd->gspca_dev.ctrl_dis & (1 << VFLIP_IDX)) + return; +- if (sd->sensor == SENSOR_OV7630) { ++ switch (sd->sensor) { ++ case SENSOR_HV7131R: ++ comn = 0x18; /* clkdiv = 1, ablcen = 1 */ ++ if (sd->vflip) ++ comn |= 0x01; ++ i2c_w1(&sd->gspca_dev, 0x01, comn); /* sctra */ ++ break; ++ case SENSOR_OV7630: + comn = 0x02; + if (!sd->vflip) + comn |= 0x80; +- } else { ++ i2c_w1(&sd->gspca_dev, 0x75, comn); ++ break; ++ default: ++/* case SENSOR_OV7648: */ + comn = 0x06; + if (sd->vflip) + comn |= 0x80; ++ i2c_w1(&sd->gspca_dev, 0x75, comn); ++ break; + } +- i2c_w1(&sd->gspca_dev, 0x75, comn); + } + + static void setsharpness(struct sd *sd) +From: Jean-François Moine +Date: Thu, 18 Feb 2010 17:56:33 +0000 (-0300) +Subject: V4L/DVB: gspca - sonixj: Set the vertical flip at capture start for all sensors +X-Git-Url: http://git.linuxtv.org/jfrancois/gspca.git?a=commitdiff_plain;h=648399060d0e1dea17e6450e1cfa7629c5aa704b + +V4L/DVB: gspca - sonixj: Set the vertical flip at capture start for all sensors + +Signed-off-by: Jean-François Moine +Signed-off-by: Mauro Carvalho Chehab +--- + +diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c +index fea8075..2ccddcd 100644 +--- a/drivers/media/video/gspca/sonixj.c ++++ b/drivers/media/video/gspca/sonixj.c +@@ -2082,7 +2082,6 @@ static int sd_start(struct gspca_dev *gspca_dev) + reg17 = 0x64; /* 640 MCKSIZE */ + break; + case SENSOR_OV7630: +- setvflip(sd); + reg17 = 0xe2; + reg1 = 0x44; + break; +@@ -2154,11 +2153,7 @@ static int sd_start(struct gspca_dev *gspca_dev) + reg_w1(gspca_dev, 0x17, reg17); + reg_w1(gspca_dev, 0x01, reg1); + +- switch (sd->sensor) { +- case SENSOR_OV7630: +- setvflip(sd); +- break; +- } ++ setvflip(sd); + setbrightness(gspca_dev); + setcontrast(gspca_dev); + setautogain(gspca_dev); From: Jean-François Moine Date: Wed, 17 Mar 2010 18:25:32 +0000 (-0300) Subject: V4L/DVB: gspca - sonixj: More static const and better array initialization diff --git a/linux-2.6-v4l-dvb-rebase-gspca-to-latest.patch b/linux-2.6-v4l-dvb-rebase-gspca-to-latest.patch new file mode 100644 index 0000000..aed3aec --- /dev/null +++ b/linux-2.6-v4l-dvb-rebase-gspca-to-latest.patch @@ -0,0 +1,15766 @@ +From 1adf4b1e78d961522e4d598dd82456630458e617 Mon Sep 17 00:00:00 2001 +From: Hans de Goede +Date: Sat, 20 Feb 2010 21:40:38 +0100 +Subject: [PATCH] Rebase gspca webcamdrivers to latest upstream + +This adds drivers for benq, cpia1 and sn9c2028 bridges + support for +more devices and many bugfixes in other drivers. + +This also adds support for the button found on most cameras in most +gspca sub drivers. +--- + drivers/media/video/gspca/Kconfig | 46 +- + drivers/media/video/gspca/Makefile | 8 + + drivers/media/video/gspca/benq.c | 322 ++++ + drivers/media/video/gspca/coarse_expo_autogain.h | 116 ++ + drivers/media/video/gspca/conex.c | 4 +- + drivers/media/video/gspca/cpia1.c | 2022 ++++++++++++++++++++ + drivers/media/video/gspca/etoms.c | 10 +- + drivers/media/video/gspca/gl860/gl860.c | 10 +- + drivers/media/video/gspca/gspca.c | 252 +++- + drivers/media/video/gspca/gspca.h | 30 +- + drivers/media/video/gspca/m5602/m5602_mt9m111.c | 16 +- + drivers/media/video/gspca/m5602/m5602_ov7660.c | 102 +- + drivers/media/video/gspca/m5602/m5602_ov7660.h | 2 +- + drivers/media/video/gspca/m5602/m5602_ov9650.c | 2 +- + drivers/media/video/gspca/m5602/m5602_po1030.c | 14 +- + drivers/media/video/gspca/m5602/m5602_s5k4aa.c | 2 +- + drivers/media/video/gspca/m5602/m5602_s5k83a.c | 2 +- + drivers/media/video/gspca/mars.c | 40 +- + drivers/media/video/gspca/mr97310a.c | 226 ++- + drivers/media/video/gspca/ov519.c | 170 ++- + drivers/media/video/gspca/ov534.c | 1253 ++----------- + drivers/media/video/gspca/ov534_9.c | 1477 ++++++++++++++ + drivers/media/video/gspca/pac207.c | 25 +- + drivers/media/video/gspca/pac7302.c | 432 ++--- + drivers/media/video/gspca/pac7311.c | 272 ++-- + drivers/media/video/gspca/pac_common.h | 9 +- + drivers/media/video/gspca/sn9c2028.c | 757 ++++++++ + drivers/media/video/gspca/sn9c2028.h | 51 + + drivers/media/video/gspca/sn9c20x.c | 33 +- + drivers/media/video/gspca/sonixb.c | 481 ++++-- + drivers/media/video/gspca/sonixj.c | 423 ++++- + drivers/media/video/gspca/spca500.c | 21 +- + drivers/media/video/gspca/spca501.c | 27 +- + drivers/media/video/gspca/spca505.c | 2 +- + drivers/media/video/gspca/spca506.c | 4 +- + drivers/media/video/gspca/spca508.c | 48 +- + drivers/media/video/gspca/spca561.c | 35 +- + drivers/media/video/gspca/sq905c.c | 45 +- + drivers/media/video/gspca/stk014.c | 2 +- + drivers/media/video/gspca/stv0680.c | 36 +- + drivers/media/video/gspca/stv06xx/stv06xx.c | 32 +- + drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c | 22 + + drivers/media/video/gspca/sunplus.c | 71 +- + drivers/media/video/gspca/t613.c | 102 +- + drivers/media/video/gspca/tv8532.c | 8 +- + drivers/media/video/gspca/vc032x.c | 1163 +++++++++++- + drivers/media/video/gspca/zc3xx.c | 712 ++++---- + include/linux/videodev2.h | 2 + + 48 files changed, 8692 insertions(+), 2249 deletions(-) + create mode 100644 drivers/media/video/gspca/benq.c + create mode 100644 drivers/media/video/gspca/coarse_expo_autogain.h + create mode 100644 drivers/media/video/gspca/cpia1.c + create mode 100644 drivers/media/video/gspca/ov534_9.c + create mode 100644 drivers/media/video/gspca/sn9c2028.c + create mode 100644 drivers/media/video/gspca/sn9c2028.h + +diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig +index 609d65b..e0060c1 100644 +--- a/drivers/media/video/gspca/Kconfig ++++ b/drivers/media/video/gspca/Kconfig +@@ -21,6 +21,15 @@ source "drivers/media/video/gspca/m5602/Kconfig" + source "drivers/media/video/gspca/stv06xx/Kconfig" + source "drivers/media/video/gspca/gl860/Kconfig" + ++config USB_GSPCA_BENQ ++ tristate "Benq USB Camera Driver" ++ depends on VIDEO_V4L2 && USB_GSPCA ++ help ++ Say Y here if you want support for the Benq DC E300 camera. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called gspca_benq. ++ + config USB_GSPCA_CONEX + tristate "Conexant Camera Driver" + depends on VIDEO_V4L2 && USB_GSPCA +@@ -30,6 +39,17 @@ config USB_GSPCA_CONEX + To compile this driver as a module, choose M here: the + module will be called gspca_conex. + ++config USB_GSPCA_CPIA1 ++ tristate "cpia CPiA (version 1) Camera Driver" ++ depends on VIDEO_V4L2 && USB_GSPCA ++ help ++ Say Y here if you want support for USB cameras based on the cpia ++ CPiA chip. Note that you need atleast version 0.6.4 of libv4l for ++ applications to understand the videoformat generated by this driver. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called gspca_cpia1. ++ + config USB_GSPCA_ETOMS + tristate "Etoms USB Camera Driver" + depends on VIDEO_V4L2 && USB_GSPCA +@@ -86,15 +106,25 @@ config USB_GSPCA_OV519 + module will be called gspca_ov519. + + config USB_GSPCA_OV534 +- tristate "OV534 USB Camera Driver" ++ tristate "OV534 OV772x USB Camera Driver" + depends on VIDEO_V4L2 && USB_GSPCA + help +- Say Y here if you want support for cameras based on the OV534 chip. +- (e.g. Sony Playstation EYE) ++ Say Y here if you want support for cameras based on the OV534 chip ++ and sensor OV772x (e.g. Sony Playstation EYE) + + To compile this driver as a module, choose M here: the + module will be called gspca_ov534. + ++config USB_GSPCA_OV534_9 ++ tristate "OV534 OV965x USB Camera Driver" ++ depends on VIDEO_V4L2 && USB_GSPCA ++ help ++ Say Y here if you want support for cameras based on the OV534 chip ++ and sensor OV965x (e.g. Hercules Dualpix) ++ ++ To compile this driver as a module, choose M here: the ++ module will be called gspca_ov534_9. ++ + config USB_GSPCA_PAC207 + tristate "Pixart PAC207 USB Camera Driver" + depends on VIDEO_V4L2 && USB_GSPCA +@@ -122,6 +152,16 @@ config USB_GSPCA_PAC7311 + To compile this driver as a module, choose M here: the + module will be called gspca_pac7311. + ++config USB_GSPCA_SN9C2028 ++ tristate "SONIX Dual-Mode USB Camera Driver" ++ depends on VIDEO_V4L2 && USB_GSPCA ++ help ++ Say Y here if you want streaming support for Sonix SN9C2028 cameras. ++ These are supported as stillcams in libgphoto2/camlibs/sonix. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called gspca_sn9c2028. ++ + config USB_GSPCA_SN9C20X + tristate "SN9C20X USB Camera Driver" + depends on VIDEO_V4L2 && USB_GSPCA +diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile +index ff2c727..6e4cf1c 100644 +--- a/drivers/media/video/gspca/Makefile ++++ b/drivers/media/video/gspca/Makefile +@@ -1,5 +1,7 @@ + obj-$(CONFIG_USB_GSPCA) += gspca_main.o ++obj-$(CONFIG_USB_GSPCA_BENQ) += gspca_benq.o + obj-$(CONFIG_USB_GSPCA_CONEX) += gspca_conex.o ++obj-$(CONFIG_USB_GSPCA_CPIA1) += gspca_cpia1.o + obj-$(CONFIG_USB_GSPCA_ETOMS) += gspca_etoms.o + obj-$(CONFIG_USB_GSPCA_FINEPIX) += gspca_finepix.o + obj-$(CONFIG_USB_GSPCA_JEILINJ) += gspca_jeilinj.o +@@ -7,9 +9,11 @@ obj-$(CONFIG_USB_GSPCA_MARS) += gspca_mars.o + obj-$(CONFIG_USB_GSPCA_MR97310A) += gspca_mr97310a.o + obj-$(CONFIG_USB_GSPCA_OV519) += gspca_ov519.o + obj-$(CONFIG_USB_GSPCA_OV534) += gspca_ov534.o ++obj-$(CONFIG_USB_GSPCA_OV534_9) += gspca_ov534_9.o + obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o + obj-$(CONFIG_USB_GSPCA_PAC7302) += gspca_pac7302.o + obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o ++obj-$(CONFIG_USB_GSPCA_SN9C2028) += gspca_sn9c2028.o + obj-$(CONFIG_USB_GSPCA_SN9C20X) += gspca_sn9c20x.o + obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o + obj-$(CONFIG_USB_GSPCA_SONIXJ) += gspca_sonixj.o +@@ -30,7 +34,9 @@ obj-$(CONFIG_USB_GSPCA_VC032X) += gspca_vc032x.o + obj-$(CONFIG_USB_GSPCA_ZC3XX) += gspca_zc3xx.o + + gspca_main-objs := gspca.o ++gspca_benq-objs := benq.o + gspca_conex-objs := conex.o ++gspca_cpia1-objs := cpia1.o + gspca_etoms-objs := etoms.o + gspca_finepix-objs := finepix.o + gspca_jeilinj-objs := jeilinj.o +@@ -38,9 +44,11 @@ gspca_mars-objs := mars.o + gspca_mr97310a-objs := mr97310a.o + gspca_ov519-objs := ov519.o + gspca_ov534-objs := ov534.o ++gspca_ov534_9-objs := ov534_9.o + gspca_pac207-objs := pac207.o + gspca_pac7302-objs := pac7302.o + gspca_pac7311-objs := pac7311.o ++gspca_sn9c2028-objs := sn9c2028.o + gspca_sn9c20x-objs := sn9c20x.o + gspca_sonixb-objs := sonixb.o + gspca_sonixj-objs := sonixj.o +diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c +new file mode 100644 +index 0000000..43ac4af +--- /dev/null ++++ b/drivers/media/video/gspca/benq.c +@@ -0,0 +1,322 @@ ++/* ++ * Benq DC E300 subdriver ++ * ++ * Copyright (C) 2009 Jean-Francois Moine (http://moinejf.free.fr) ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#define MODULE_NAME "benq" ++ ++#include "gspca.h" ++ ++MODULE_AUTHOR("Jean-Francois Moine "); ++MODULE_DESCRIPTION("Benq DC E300 USB Camera Driver"); ++MODULE_LICENSE("GPL"); ++ ++/* specific webcam descriptor */ ++struct sd { ++ struct gspca_dev gspca_dev; /* !! must be the first item */ ++}; ++ ++/* V4L2 controls supported by the driver */ ++static const struct ctrl sd_ctrls[] = { ++}; ++ ++static const struct v4l2_pix_format vga_mode[] = { ++ {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, ++ .bytesperline = 320, ++ .sizeimage = 320 * 240 * 3 / 8 + 590, ++ .colorspace = V4L2_COLORSPACE_JPEG}, ++}; ++ ++static void sd_isoc_irq(struct urb *urb); ++ ++/* -- write a register -- */ ++static void reg_w(struct gspca_dev *gspca_dev, ++ u16 value, u16 index) ++{ ++ struct usb_device *dev = gspca_dev->dev; ++ int ret; ++ ++ if (gspca_dev->usb_err < 0) ++ return; ++ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ++ 0x02, ++ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ++ value, ++ index, ++ NULL, ++ 0, ++ 500); ++ if (ret < 0) { ++ PDEBUG(D_ERR, "reg_w err %d", ret); ++ gspca_dev->usb_err = ret; ++ } ++} ++ ++/* this function is called at probe time */ ++static int sd_config(struct gspca_dev *gspca_dev, ++ const struct usb_device_id *id) ++{ ++ gspca_dev->cam.cam_mode = vga_mode; ++ gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode); ++ gspca_dev->cam.no_urb_create = 1; ++ gspca_dev->cam.reverse_alts = 1; ++ return 0; ++} ++ ++/* this function is called at probe and resume time */ ++static int sd_init(struct gspca_dev *gspca_dev) ++{ ++ return 0; ++} ++ ++static int sd_isoc_init(struct gspca_dev *gspca_dev) ++{ ++ int ret; ++ ++ ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, ++ gspca_dev->nbalt - 1); ++ if (ret < 0) { ++ err("usb_set_interface failed"); ++ return ret; ++ } ++/* reg_w(gspca_dev, 0x0003, 0x0002); */ ++ return 0; ++} ++ ++/* -- start the camera -- */ ++static int sd_start(struct gspca_dev *gspca_dev) ++{ ++ struct urb *urb; ++ int i, n; ++ ++ /* create 4 URBs - 2 on endpoint 0x83 and 2 on 0x082 */ ++#if MAX_NURBS < 4 ++#error "Not enough URBs in the gspca table" ++#endif ++#define SD_PKT_SZ 64 ++#define SD_NPKT 32 ++ for (n = 0; n < 4; n++) { ++ urb = usb_alloc_urb(SD_NPKT, GFP_KERNEL); ++ if (!urb) { ++ err("usb_alloc_urb failed"); ++ return -ENOMEM; ++ } ++ gspca_dev->urb[n] = urb; ++ urb->transfer_buffer = usb_buffer_alloc(gspca_dev->dev, ++ SD_PKT_SZ * SD_NPKT, ++ GFP_KERNEL, ++ &urb->transfer_dma); ++ ++ if (urb->transfer_buffer == NULL) { ++ err("usb_buffer_alloc failed"); ++ return -ENOMEM; ++ } ++ urb->dev = gspca_dev->dev; ++ urb->context = gspca_dev; ++ urb->transfer_buffer_length = SD_PKT_SZ * SD_NPKT; ++ urb->pipe = usb_rcvisocpipe(gspca_dev->dev, ++ n & 1 ? 0x82 : 0x83); ++ urb->transfer_flags = URB_ISO_ASAP ++ | URB_NO_TRANSFER_DMA_MAP; ++ urb->interval = 1; ++ urb->complete = sd_isoc_irq; ++ urb->number_of_packets = SD_NPKT; ++ for (i = 0; i < SD_NPKT; i++) { ++ urb->iso_frame_desc[i].length = SD_PKT_SZ; ++ urb->iso_frame_desc[i].offset = SD_PKT_SZ * i; ++ } ++ } ++ ++ return gspca_dev->usb_err; ++} ++ ++static void sd_stopN(struct gspca_dev *gspca_dev) ++{ ++ reg_w(gspca_dev, 0x003c, 0x0003); ++ reg_w(gspca_dev, 0x003c, 0x0004); ++ reg_w(gspca_dev, 0x003c, 0x0005); ++ reg_w(gspca_dev, 0x003c, 0x0006); ++ reg_w(gspca_dev, 0x003c, 0x0007); ++ usb_set_interface(gspca_dev->dev, gspca_dev->iface, gspca_dev->nbalt - 1); ++} ++ ++static void sd_pkt_scan(struct gspca_dev *gspca_dev, ++ u8 *data, /* isoc packet */ ++ int len) /* iso packet length */ ++{ ++ /* unused */ ++} ++ ++/* reception of an URB */ ++static void sd_isoc_irq(struct urb *urb) ++{ ++ struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context; ++ struct urb *urb0; ++ u8 *data; ++ int i, st; ++ ++ PDEBUG(D_PACK, "sd isoc irq"); ++ if (!gspca_dev->streaming) ++ return; ++ if (urb->status != 0) { ++ if (urb->status == -ESHUTDOWN) ++ return; /* disconnection */ ++#ifdef CONFIG_PM ++ if (gspca_dev->frozen) ++ return; ++#endif ++ PDEBUG(D_ERR|D_PACK, "urb status: %d", urb->status); ++ return; ++ } ++ ++ /* if this is a control URN (ep 0x83), wait */ ++ if (urb == gspca_dev->urb[0] || urb == gspca_dev->urb[2]) ++ return; ++ ++ /* scan both received URBs */ ++ if (urb == gspca_dev->urb[1]) ++ urb0 = gspca_dev->urb[0]; ++ else ++ urb0 = gspca_dev->urb[2]; ++ for (i = 0; i < urb->number_of_packets; i++) { ++ ++ /* check the packet status and length */ ++ if (urb0->iso_frame_desc[i].actual_length != SD_PKT_SZ ++ || urb->iso_frame_desc[i].actual_length != SD_PKT_SZ) { ++ PDEBUG(D_ERR, "ISOC bad lengths %d / %d", ++ urb0->iso_frame_desc[i].actual_length, ++ urb->iso_frame_desc[i].actual_length); ++ gspca_dev->last_packet_type = DISCARD_PACKET; ++ continue; ++ } ++ st = urb0->iso_frame_desc[i].status; ++ if (st == 0) ++ st = urb->iso_frame_desc[i].status; ++ if (st) { ++ PDEBUG(D_ERR, ++ "ISOC data error: [%d] status=%d", ++ i, st); ++ gspca_dev->last_packet_type = DISCARD_PACKET; ++ continue; ++ } ++ ++ /* ++ * The images are received in URBs of different endpoints ++ * (0x83 and 0x82). ++ * Image pieces in URBs of ep 0x83 are continuated in URBs of ++ * ep 0x82 of the same index. ++ * The packets in the URBs of endpoint 0x83 start with: ++ * - 80 ba/bb 00 00 = start of image followed by 'ff d8' ++ * - 04 ba/bb oo oo = image piece ++ * where 'oo oo' is the image offset ++ (not cheked) ++ * - (other -> bad frame) ++ * The images are JPEG encoded with full header and ++ * normal ff escape. ++ * The end of image ('ff d9') may occur in any URB. ++ * (not cheked) ++ */ ++ data = (u8 *) urb0->transfer_buffer ++ + urb0->iso_frame_desc[i].offset; ++ if (data[0] == 0x80 && (data[1] & 0xfe) == 0xba) { ++ ++ /* new image */ ++ gspca_frame_add(gspca_dev, LAST_PACKET, ++ NULL, 0); ++ gspca_frame_add(gspca_dev, FIRST_PACKET, ++ data + 4, SD_PKT_SZ - 4); ++ } else if (data[0] == 0x04 && (data[1] & 0xfe) == 0xba) { ++ gspca_frame_add(gspca_dev, INTER_PACKET, ++ data + 4, SD_PKT_SZ - 4); ++ } else { ++ gspca_dev->last_packet_type = DISCARD_PACKET; ++ continue; ++ } ++ data = (u8 *) urb->transfer_buffer ++ + urb->iso_frame_desc[i].offset; ++ gspca_frame_add(gspca_dev, INTER_PACKET, ++ data, SD_PKT_SZ); ++ } ++ ++ /* resubmit the URBs */ ++ st = usb_submit_urb(urb0, GFP_ATOMIC); ++ if (st < 0) ++ PDEBUG(D_ERR|D_PACK, "usb_submit_urb(0) ret %d", st); ++ st = usb_submit_urb(urb, GFP_ATOMIC); ++ if (st < 0) ++ PDEBUG(D_ERR|D_PACK, "usb_submit_urb() ret %d", st); ++} ++ ++/* sub-driver description */ ++static const struct sd_desc sd_desc = { ++ .name = MODULE_NAME, ++ .ctrls = sd_ctrls, ++ .nctrls = ARRAY_SIZE(sd_ctrls), ++ .config = sd_config, ++ .init = sd_init, ++ .isoc_init = sd_isoc_init, ++ .start = sd_start, ++ .stopN = sd_stopN, ++ .pkt_scan = sd_pkt_scan, ++}; ++ ++/* -- module initialisation -- */ ++static const __devinitdata struct usb_device_id device_table[] = { ++ {USB_DEVICE(0x04a5, 0x3035)}, ++ {} ++}; ++MODULE_DEVICE_TABLE(usb, device_table); ++ ++/* -- device connect -- */ ++static int sd_probe(struct usb_interface *intf, ++ const struct usb_device_id *id) ++{ ++ return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), ++ THIS_MODULE); ++} ++ ++static struct usb_driver sd_driver = { ++ .name = MODULE_NAME, ++ .id_table = device_table, ++ .probe = sd_probe, ++ .disconnect = gspca_disconnect, ++#ifdef CONFIG_PM ++ .suspend = gspca_suspend, ++ .resume = gspca_resume, ++#endif ++}; ++ ++/* -- module insert / remove -- */ ++static int __init sd_mod_init(void) ++{ ++ int ret; ++ ++ ret = usb_register(&sd_driver); ++ if (ret < 0) ++ return ret; ++ info("registered"); ++ return 0; ++} ++static void __exit sd_mod_exit(void) ++{ ++ usb_deregister(&sd_driver); ++ info("deregistered"); ++} ++ ++module_init(sd_mod_init); ++module_exit(sd_mod_exit); +diff --git a/drivers/media/video/gspca/coarse_expo_autogain.h b/drivers/media/video/gspca/coarse_expo_autogain.h +new file mode 100644 +index 0000000..1cb9d94 +--- /dev/null ++++ b/drivers/media/video/gspca/coarse_expo_autogain.h +@@ -0,0 +1,116 @@ ++/* ++ * Auto gain algorithm for camera's with a coarse exposure control ++ * ++ * Copyright (C) 2010 Hans de Goede ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++/* Autogain + exposure algorithm for cameras with a coarse exposure control ++ (usually this means we can only control the clockdiv to change exposure) ++ As changing the clockdiv so that the fps drops from 30 to 15 fps for ++ example, will lead to a huge exposure change (it effectively doubles), ++ this algorithm normally tries to only adjust the gain (between 40 and ++ 80 %) and if that does not help, only then changes exposure. This leads ++ to a much more stable image then using the knee algorithm which at ++ certain points of the knee graph will only try to adjust exposure, ++ which leads to oscilating as one exposure step is huge. ++ ++ Note this assumes that the sd struct for the cam in question has ++ exp_too_high_cnt and exp_too_high_cnt int members for use by this function. ++ ++ Returns 0 if no changes were made, 1 if the gain and or exposure settings ++ where changed. */ ++static int gspca_coarse_grained_expo_autogain(struct gspca_dev *gspca_dev, ++ int avg_lum, int desired_avg_lum, int deadzone) ++{ ++ int i, steps, gain, orig_gain, exposure, orig_exposure; ++ int gain_low, gain_high; ++ const struct ctrl *gain_ctrl = NULL; ++ const struct ctrl *exposure_ctrl = NULL; ++ struct sd *sd = (struct sd *) gspca_dev; ++ int retval = 0; ++ ++ for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) { ++ if (gspca_dev->ctrl_dis & (1 << i)) ++ continue; ++ if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_GAIN) ++ gain_ctrl = &gspca_dev->sd_desc->ctrls[i]; ++ if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_EXPOSURE) ++ exposure_ctrl = &gspca_dev->sd_desc->ctrls[i]; ++ } ++ if (!gain_ctrl || !exposure_ctrl) { ++ PDEBUG(D_ERR, "Error: gspca_coarse_grained_expo_autogain " ++ "called on cam without gain or exposure"); ++ return 0; ++ } ++ ++ if (gain_ctrl->get(gspca_dev, &gain) || ++ exposure_ctrl->get(gspca_dev, &exposure)) ++ return 0; ++ ++ orig_gain = gain; ++ orig_exposure = exposure; ++ gain_low = ++ (gain_ctrl->qctrl.maximum - gain_ctrl->qctrl.minimum) / 5 * 2; ++ gain_low += gain_ctrl->qctrl.minimum; ++ gain_high = ++ (gain_ctrl->qctrl.maximum - gain_ctrl->qctrl.minimum) / 5 * 4; ++ gain_high += gain_ctrl->qctrl.minimum; ++ ++ /* If we are of a multiple of deadzone, do multiple steps to reach the ++ desired lumination fast (with the risc of a slight overshoot) */ ++ steps = (desired_avg_lum - avg_lum) / deadzone; ++ ++ PDEBUG(D_FRAM, "autogain: lum: %d, desired: %d, steps: %d", ++ avg_lum, desired_avg_lum, steps); ++ ++ if ((gain + steps) > gain_high && ++ sd->exposure < exposure_ctrl->qctrl.maximum) { ++ gain = gain_high; ++ sd->exp_too_low_cnt++; ++ } else if ((gain + steps) < gain_low && ++ sd->exposure > exposure_ctrl->qctrl.minimum) { ++ gain = gain_low; ++ sd->exp_too_high_cnt++; ++ } else { ++ gain += steps; ++ if (gain > gain_ctrl->qctrl.maximum) ++ gain = gain_ctrl->qctrl.maximum; ++ else if (gain < gain_ctrl->qctrl.minimum) ++ gain = gain_ctrl->qctrl.minimum; ++ sd->exp_too_high_cnt = 0; ++ sd->exp_too_low_cnt = 0; ++ } ++ ++ if (sd->exp_too_high_cnt > 3) { ++ exposure--; ++ sd->exp_too_high_cnt = 0; ++ } else if (sd->exp_too_low_cnt > 3) { ++ exposure++; ++ sd->exp_too_low_cnt = 0; ++ } ++ ++ if (gain != orig_gain) { ++ gain_ctrl->set(gspca_dev, gain); ++ retval = 1; ++ } ++ if (exposure != orig_exposure) { ++ exposure_ctrl->set(gspca_dev, exposure); ++ retval = 1; ++ } ++ ++ return retval; ++} +diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c +index c98b5d6..19fe6b2 100644 +--- a/drivers/media/video/gspca/conex.c ++++ b/drivers/media/video/gspca/conex.c +@@ -52,7 +52,7 @@ static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + { + { + .id = V4L2_CID_BRIGHTNESS, +@@ -1032,7 +1032,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev, + } + + /* sub-driver description */ +-static struct sd_desc sd_desc = { ++static const struct sd_desc sd_desc = { + .name = MODULE_NAME, + .ctrls = sd_ctrls, + .nctrls = ARRAY_SIZE(sd_ctrls), +diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c +new file mode 100644 +index 0000000..7afdc58 +--- /dev/null ++++ b/drivers/media/video/gspca/cpia1.c +@@ -0,0 +1,2022 @@ ++/* ++ * cpia CPiA (1) gspca driver ++ * ++ * Copyright (C) 2010 Hans de Goede ++ * ++ * This module is adapted from the in kernel v4l1 cpia driver which is : ++ * ++ * (C) Copyright 1999-2000 Peter Pregler ++ * (C) Copyright 1999-2000 Scott J. Bertin ++ * (C) Copyright 1999-2000 Johannes Erdfelt ++ * (C) Copyright 2000 STMicroelectronics ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++#define MODULE_NAME "cpia1" ++ ++#include "gspca.h" ++ ++MODULE_AUTHOR("Hans de Goede "); ++MODULE_DESCRIPTION("Vision CPiA"); ++MODULE_LICENSE("GPL"); ++ ++/* constant value's */ ++#define MAGIC_0 0x19 ++#define MAGIC_1 0x68 ++#define DATA_IN 0xC0 ++#define DATA_OUT 0x40 ++#define VIDEOSIZE_QCIF 0 /* 176x144 */ ++#define VIDEOSIZE_CIF 1 /* 352x288 */ ++#define SUBSAMPLE_420 0 ++#define SUBSAMPLE_422 1 ++#define YUVORDER_YUYV 0 ++#define YUVORDER_UYVY 1 ++#define NOT_COMPRESSED 0 ++#define COMPRESSED 1 ++#define NO_DECIMATION 0 ++#define DECIMATION_ENAB 1 ++#define EOI 0xff /* End Of Image */ ++#define EOL 0xfd /* End Of Line */ ++#define FRAME_HEADER_SIZE 64 ++ ++/* Image grab modes */ ++#define CPIA_GRAB_SINGLE 0 ++#define CPIA_GRAB_CONTINEOUS 1 ++ ++/* Compression parameters */ ++#define CPIA_COMPRESSION_NONE 0 ++#define CPIA_COMPRESSION_AUTO 1 ++#define CPIA_COMPRESSION_MANUAL 2 ++#define CPIA_COMPRESSION_TARGET_QUALITY 0 ++#define CPIA_COMPRESSION_TARGET_FRAMERATE 1 ++ ++/* Return offsets for GetCameraState */ ++#define SYSTEMSTATE 0 ++#define GRABSTATE 1 ++#define STREAMSTATE 2 ++#define FATALERROR 3 ++#define CMDERROR 4 ++#define DEBUGFLAGS 5 ++#define VPSTATUS 6 ++#define ERRORCODE 7 ++ ++/* SystemState */ ++#define UNINITIALISED_STATE 0 ++#define PASS_THROUGH_STATE 1 ++#define LO_POWER_STATE 2 ++#define HI_POWER_STATE 3 ++#define WARM_BOOT_STATE 4 ++ ++/* GrabState */ ++#define GRAB_IDLE 0 ++#define GRAB_ACTIVE 1 ++#define GRAB_DONE 2 ++ ++/* StreamState */ ++#define STREAM_NOT_READY 0 ++#define STREAM_READY 1 ++#define STREAM_OPEN 2 ++#define STREAM_PAUSED 3 ++#define STREAM_FINISHED 4 ++ ++/* Fatal Error, CmdError, and DebugFlags */ ++#define CPIA_FLAG 1 ++#define SYSTEM_FLAG 2 ++#define INT_CTRL_FLAG 4 ++#define PROCESS_FLAG 8 ++#define COM_FLAG 16 ++#define VP_CTRL_FLAG 32 ++#define CAPTURE_FLAG 64 ++#define DEBUG_FLAG 128 ++ ++/* VPStatus */ ++#define VP_STATE_OK 0x00 ++ ++#define VP_STATE_FAILED_VIDEOINIT 0x01 ++#define VP_STATE_FAILED_AECACBINIT 0x02 ++#define VP_STATE_AEC_MAX 0x04 ++#define VP_STATE_ACB_BMAX 0x08 ++ ++#define VP_STATE_ACB_RMIN 0x10 ++#define VP_STATE_ACB_GMIN 0x20 ++#define VP_STATE_ACB_RMAX 0x40 ++#define VP_STATE_ACB_GMAX 0x80 ++ ++/* default (minimum) compensation values */ ++#define COMP_RED 220 ++#define COMP_GREEN1 214 ++#define COMP_GREEN2 COMP_GREEN1 ++#define COMP_BLUE 230 ++ ++/* exposure status */ ++#define EXPOSURE_VERY_LIGHT 0 ++#define EXPOSURE_LIGHT 1 ++#define EXPOSURE_NORMAL 2 ++#define EXPOSURE_DARK 3 ++#define EXPOSURE_VERY_DARK 4 ++ ++#define CPIA_MODULE_CPIA (0 << 5) ++#define CPIA_MODULE_SYSTEM (1 << 5) ++#define CPIA_MODULE_VP_CTRL (5 << 5) ++#define CPIA_MODULE_CAPTURE (6 << 5) ++#define CPIA_MODULE_DEBUG (7 << 5) ++ ++#define INPUT (DATA_IN << 8) ++#define OUTPUT (DATA_OUT << 8) ++ ++#define CPIA_COMMAND_GetCPIAVersion (INPUT | CPIA_MODULE_CPIA | 1) ++#define CPIA_COMMAND_GetPnPID (INPUT | CPIA_MODULE_CPIA | 2) ++#define CPIA_COMMAND_GetCameraStatus (INPUT | CPIA_MODULE_CPIA | 3) ++#define CPIA_COMMAND_GotoHiPower (OUTPUT | CPIA_MODULE_CPIA | 4) ++#define CPIA_COMMAND_GotoLoPower (OUTPUT | CPIA_MODULE_CPIA | 5) ++#define CPIA_COMMAND_GotoSuspend (OUTPUT | CPIA_MODULE_CPIA | 7) ++#define CPIA_COMMAND_GotoPassThrough (OUTPUT | CPIA_MODULE_CPIA | 8) ++#define CPIA_COMMAND_ModifyCameraStatus (OUTPUT | CPIA_MODULE_CPIA | 10) ++ ++#define CPIA_COMMAND_ReadVCRegs (INPUT | CPIA_MODULE_SYSTEM | 1) ++#define CPIA_COMMAND_WriteVCReg (OUTPUT | CPIA_MODULE_SYSTEM | 2) ++#define CPIA_COMMAND_ReadMCPorts (INPUT | CPIA_MODULE_SYSTEM | 3) ++#define CPIA_COMMAND_WriteMCPort (OUTPUT | CPIA_MODULE_SYSTEM | 4) ++#define CPIA_COMMAND_SetBaudRate (OUTPUT | CPIA_MODULE_SYSTEM | 5) ++#define CPIA_COMMAND_SetECPTiming (OUTPUT | CPIA_MODULE_SYSTEM | 6) ++#define CPIA_COMMAND_ReadIDATA (INPUT | CPIA_MODULE_SYSTEM | 7) ++#define CPIA_COMMAND_WriteIDATA (OUTPUT | CPIA_MODULE_SYSTEM | 8) ++#define CPIA_COMMAND_GenericCall (OUTPUT | CPIA_MODULE_SYSTEM | 9) ++#define CPIA_COMMAND_I2CStart (OUTPUT | CPIA_MODULE_SYSTEM | 10) ++#define CPIA_COMMAND_I2CStop (OUTPUT | CPIA_MODULE_SYSTEM | 11) ++#define CPIA_COMMAND_I2CWrite (OUTPUT | CPIA_MODULE_SYSTEM | 12) ++#define CPIA_COMMAND_I2CRead (INPUT | CPIA_MODULE_SYSTEM | 13) ++ ++#define CPIA_COMMAND_GetVPVersion (INPUT | CPIA_MODULE_VP_CTRL | 1) ++#define CPIA_COMMAND_ResetFrameCounter (INPUT | CPIA_MODULE_VP_CTRL | 2) ++#define CPIA_COMMAND_SetColourParams (OUTPUT | CPIA_MODULE_VP_CTRL | 3) ++#define CPIA_COMMAND_SetExposure (OUTPUT | CPIA_MODULE_VP_CTRL | 4) ++#define CPIA_COMMAND_SetColourBalance (OUTPUT | CPIA_MODULE_VP_CTRL | 6) ++#define CPIA_COMMAND_SetSensorFPS (OUTPUT | CPIA_MODULE_VP_CTRL | 7) ++#define CPIA_COMMAND_SetVPDefaults (OUTPUT | CPIA_MODULE_VP_CTRL | 8) ++#define CPIA_COMMAND_SetApcor (OUTPUT | CPIA_MODULE_VP_CTRL | 9) ++#define CPIA_COMMAND_SetFlickerCtrl (OUTPUT | CPIA_MODULE_VP_CTRL | 10) ++#define CPIA_COMMAND_SetVLOffset (OUTPUT | CPIA_MODULE_VP_CTRL | 11) ++#define CPIA_COMMAND_GetColourParams (INPUT | CPIA_MODULE_VP_CTRL | 16) ++#define CPIA_COMMAND_GetColourBalance (INPUT | CPIA_MODULE_VP_CTRL | 17) ++#define CPIA_COMMAND_GetExposure (INPUT | CPIA_MODULE_VP_CTRL | 18) ++#define CPIA_COMMAND_SetSensorMatrix (OUTPUT | CPIA_MODULE_VP_CTRL | 19) ++#define CPIA_COMMAND_ColourBars (OUTPUT | CPIA_MODULE_VP_CTRL | 25) ++#define CPIA_COMMAND_ReadVPRegs (INPUT | CPIA_MODULE_VP_CTRL | 30) ++#define CPIA_COMMAND_WriteVPReg (OUTPUT | CPIA_MODULE_VP_CTRL | 31) ++ ++#define CPIA_COMMAND_GrabFrame (OUTPUT | CPIA_MODULE_CAPTURE | 1) ++#define CPIA_COMMAND_UploadFrame (OUTPUT | CPIA_MODULE_CAPTURE | 2) ++#define CPIA_COMMAND_SetGrabMode (OUTPUT | CPIA_MODULE_CAPTURE | 3) ++#define CPIA_COMMAND_InitStreamCap (OUTPUT | CPIA_MODULE_CAPTURE | 4) ++#define CPIA_COMMAND_FiniStreamCap (OUTPUT | CPIA_MODULE_CAPTURE | 5) ++#define CPIA_COMMAND_StartStreamCap (OUTPUT | CPIA_MODULE_CAPTURE | 6) ++#define CPIA_COMMAND_EndStreamCap (OUTPUT | CPIA_MODULE_CAPTURE | 7) ++#define CPIA_COMMAND_SetFormat (OUTPUT | CPIA_MODULE_CAPTURE | 8) ++#define CPIA_COMMAND_SetROI (OUTPUT | CPIA_MODULE_CAPTURE | 9) ++#define CPIA_COMMAND_SetCompression (OUTPUT | CPIA_MODULE_CAPTURE | 10) ++#define CPIA_COMMAND_SetCompressionTarget (OUTPUT | CPIA_MODULE_CAPTURE | 11) ++#define CPIA_COMMAND_SetYUVThresh (OUTPUT | CPIA_MODULE_CAPTURE | 12) ++#define CPIA_COMMAND_SetCompressionParams (OUTPUT | CPIA_MODULE_CAPTURE | 13) ++#define CPIA_COMMAND_DiscardFrame (OUTPUT | CPIA_MODULE_CAPTURE | 14) ++#define CPIA_COMMAND_GrabReset (OUTPUT | CPIA_MODULE_CAPTURE | 15) ++ ++#define CPIA_COMMAND_OutputRS232 (OUTPUT | CPIA_MODULE_DEBUG | 1) ++#define CPIA_COMMAND_AbortProcess (OUTPUT | CPIA_MODULE_DEBUG | 4) ++#define CPIA_COMMAND_SetDramPage (OUTPUT | CPIA_MODULE_DEBUG | 5) ++#define CPIA_COMMAND_StartDramUpload (OUTPUT | CPIA_MODULE_DEBUG | 6) ++#define CPIA_COMMAND_StartDummyDtream (OUTPUT | CPIA_MODULE_DEBUG | 8) ++#define CPIA_COMMAND_AbortStream (OUTPUT | CPIA_MODULE_DEBUG | 9) ++#define CPIA_COMMAND_DownloadDRAM (OUTPUT | CPIA_MODULE_DEBUG | 10) ++#define CPIA_COMMAND_Null (OUTPUT | CPIA_MODULE_DEBUG | 11) ++ ++#define ROUND_UP_EXP_FOR_FLICKER 15 ++ ++/* Constants for automatic frame rate adjustment */ ++#define MAX_EXP 302 ++#define MAX_EXP_102 255 ++#define LOW_EXP 140 ++#define VERY_LOW_EXP 70 ++#define TC 94 ++#define EXP_ACC_DARK 50 ++#define EXP_ACC_LIGHT 90 ++#define HIGH_COMP_102 160 ++#define MAX_COMP 239 ++#define DARK_TIME 3 ++#define LIGHT_TIME 3 ++ ++#define FIRMWARE_VERSION(x, y) (sd->params.version.firmwareVersion == (x) && \ ++ sd->params.version.firmwareRevision == (y)) ++ ++/* Developer's Guide Table 5 p 3-34 ++ * indexed by [mains][sensorFps.baserate][sensorFps.divisor]*/ ++static u8 flicker_jumps[2][2][4] = ++{ { { 76, 38, 19, 9 }, { 92, 46, 23, 11 } }, ++ { { 64, 32, 16, 8 }, { 76, 38, 19, 9} } ++}; ++ ++struct cam_params { ++ struct { ++ u8 firmwareVersion; ++ u8 firmwareRevision; ++ u8 vcVersion; ++ u8 vcRevision; ++ } version; ++ struct { ++ u16 vendor; ++ u16 product; ++ u16 deviceRevision; ++ } pnpID; ++ struct { ++ u8 vpVersion; ++ u8 vpRevision; ++ u16 cameraHeadID; ++ } vpVersion; ++ struct { ++ u8 systemState; ++ u8 grabState; ++ u8 streamState; ++ u8 fatalError; ++ u8 cmdError; ++ u8 debugFlags; ++ u8 vpStatus; ++ u8 errorCode; ++ } status; ++ struct { ++ u8 brightness; ++ u8 contrast; ++ u8 saturation; ++ } colourParams; ++ struct { ++ u8 gainMode; ++ u8 expMode; ++ u8 compMode; ++ u8 centreWeight; ++ u8 gain; ++ u8 fineExp; ++ u8 coarseExpLo; ++ u8 coarseExpHi; ++ u8 redComp; ++ u8 green1Comp; ++ u8 green2Comp; ++ u8 blueComp; ++ } exposure; ++ struct { ++ u8 balanceMode; ++ u8 redGain; ++ u8 greenGain; ++ u8 blueGain; ++ } colourBalance; ++ struct { ++ u8 divisor; ++ u8 baserate; ++ } sensorFps; ++ struct { ++ u8 gain1; ++ u8 gain2; ++ u8 gain4; ++ u8 gain8; ++ } apcor; ++ struct { ++ u8 disabled; ++ u8 flickerMode; ++ u8 coarseJump; ++ u8 allowableOverExposure; ++ } flickerControl; ++ struct { ++ u8 gain1; ++ u8 gain2; ++ u8 gain4; ++ u8 gain8; ++ } vlOffset; ++ struct { ++ u8 mode; ++ u8 decimation; ++ } compression; ++ struct { ++ u8 frTargeting; ++ u8 targetFR; ++ u8 targetQ; ++ } compressionTarget; ++ struct { ++ u8 yThreshold; ++ u8 uvThreshold; ++ } yuvThreshold; ++ struct { ++ u8 hysteresis; ++ u8 threshMax; ++ u8 smallStep; ++ u8 largeStep; ++ u8 decimationHysteresis; ++ u8 frDiffStepThresh; ++ u8 qDiffStepThresh; ++ u8 decimationThreshMod; ++ } compressionParams; ++ struct { ++ u8 videoSize; /* CIF/QCIF */ ++ u8 subSample; ++ u8 yuvOrder; ++ } format; ++ struct { /* Intel QX3 specific data */ ++ u8 qx3_detected; /* a QX3 is present */ ++ u8 toplight; /* top light lit , R/W */ ++ u8 bottomlight; /* bottom light lit, R/W */ ++ u8 button; /* snapshot button pressed (R/O) */ ++ u8 cradled; /* microscope is in cradle (R/O) */ ++ } qx3; ++ struct { ++ u8 colStart; /* skip first 8*colStart pixels */ ++ u8 colEnd; /* finish at 8*colEnd pixels */ ++ u8 rowStart; /* skip first 4*rowStart lines */ ++ u8 rowEnd; /* finish at 4*rowEnd lines */ ++ } roi; ++ u8 ecpTiming; ++ u8 streamStartLine; ++}; ++ ++/* specific webcam descriptor */ ++struct sd { ++ struct gspca_dev gspca_dev; /* !! must be the first item */ ++ struct cam_params params; /* camera settings */ ++ ++ atomic_t cam_exposure; ++ atomic_t fps; ++ int exposure_count; ++ u8 exposure_status; ++ u8 mainsFreq; /* 0 = 50hz, 1 = 60hz */ ++ u8 first_frame; ++ u8 freq; ++}; ++ ++/* V4L2 controls supported by the driver */ ++static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setsaturation(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getsaturation(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setcomptarget(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getcomptarget(struct gspca_dev *gspca_dev, __s32 *val); ++ ++static struct ctrl sd_ctrls[] = { ++ { ++ { ++ .id = V4L2_CID_BRIGHTNESS, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Brightness", ++ .minimum = 0, ++ .maximum = 100, ++ .step = 1, ++#define BRIGHTNESS_DEF 50 ++ .default_value = BRIGHTNESS_DEF, ++ .flags = 0, ++ }, ++ .set = sd_setbrightness, ++ .get = sd_getbrightness, ++ }, ++ { ++ { ++ .id = V4L2_CID_CONTRAST, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Contrast", ++ .minimum = 0, ++ .maximum = 96, ++ .step = 8, ++#define CONTRAST_DEF 48 ++ .default_value = CONTRAST_DEF, ++ }, ++ .set = sd_setcontrast, ++ .get = sd_getcontrast, ++ }, ++ { ++ { ++ .id = V4L2_CID_SATURATION, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Saturation", ++ .minimum = 0, ++ .maximum = 100, ++ .step = 1, ++#define SATURATION_DEF 50 ++ .default_value = SATURATION_DEF, ++ }, ++ .set = sd_setsaturation, ++ .get = sd_getsaturation, ++ }, ++ { ++ { ++ .id = V4L2_CID_POWER_LINE_FREQUENCY, ++ .type = V4L2_CTRL_TYPE_MENU, ++ .name = "Light frequency filter", ++ .minimum = 0, ++ .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */ ++ .step = 1, ++#define FREQ_DEF 1 ++ .default_value = FREQ_DEF, ++ }, ++ .set = sd_setfreq, ++ .get = sd_getfreq, ++ }, ++ { ++ { ++#define V4L2_CID_COMP_TARGET V4L2_CID_PRIVATE_BASE ++ .id = V4L2_CID_COMP_TARGET, ++ .type = V4L2_CTRL_TYPE_MENU, ++ .name = "Compression Target", ++ .minimum = 0, ++ .maximum = 1, ++ .step = 1, ++#define COMP_TARGET_DEF CPIA_COMPRESSION_TARGET_QUALITY ++ .default_value = COMP_TARGET_DEF, ++ }, ++ .set = sd_setcomptarget, ++ .get = sd_getcomptarget, ++ }, ++}; ++ ++static const struct v4l2_pix_format mode[] = { ++ {160, 120, V4L2_PIX_FMT_CPIA1, V4L2_FIELD_NONE, ++ /* The sizeimage is trial and error, as with low framerates ++ the camera will pad out usb frames, making the image ++ data larger then strictly necessary */ ++ .bytesperline = 160, ++ .sizeimage = 65536, ++ .colorspace = V4L2_COLORSPACE_SRGB, ++ .priv = 3}, ++ {176, 144, V4L2_PIX_FMT_CPIA1, V4L2_FIELD_NONE, ++ .bytesperline = 172, ++ .sizeimage = 65536, ++ .colorspace = V4L2_COLORSPACE_SRGB, ++ .priv = 2}, ++ {320, 240, V4L2_PIX_FMT_CPIA1, V4L2_FIELD_NONE, ++ .bytesperline = 320, ++ .sizeimage = 262144, ++ .colorspace = V4L2_COLORSPACE_SRGB, ++ .priv = 1}, ++ {352, 288, V4L2_PIX_FMT_CPIA1, V4L2_FIELD_NONE, ++ .bytesperline = 352, ++ .sizeimage = 262144, ++ .colorspace = V4L2_COLORSPACE_SRGB, ++ .priv = 0}, ++}; ++ ++/********************************************************************** ++ * ++ * General functions ++ * ++ **********************************************************************/ ++ ++static int cpia_usb_transferCmd(struct gspca_dev *gspca_dev, u8 *command) ++{ ++ u8 requesttype; ++ unsigned int pipe; ++ int ret, databytes = command[6] | (command[7] << 8); ++ /* Sometimes we see spurious EPIPE errors */ ++ int retries = 3; ++ ++ if (command[0] == DATA_IN) { ++ pipe = usb_rcvctrlpipe(gspca_dev->dev, 0); ++ requesttype = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE; ++ } else if (command[0] == DATA_OUT) { ++ pipe = usb_sndctrlpipe(gspca_dev->dev, 0); ++ requesttype = USB_TYPE_VENDOR | USB_RECIP_DEVICE; ++ } else { ++ PDEBUG(D_ERR, "Unexpected first byte of command: %x", ++ command[0]); ++ return -EINVAL; ++ } ++ ++retry: ++ ret = usb_control_msg(gspca_dev->dev, pipe, ++ command[1], ++ requesttype, ++ command[2] | (command[3] << 8), ++ command[4] | (command[5] << 8), ++ gspca_dev->usb_buf, databytes, 1000); ++ ++ if (ret < 0) ++ PDEBUG(D_ERR, "usb_control_msg %02x, error %d", command[1], ++ ret); ++ ++ if (ret == -EPIPE && retries > 0) { ++ retries--; ++ goto retry; ++ } ++ ++ return (ret < 0) ? ret : 0; ++} ++ ++/* send an arbitrary command to the camera */ ++static int do_command(struct gspca_dev *gspca_dev, u16 command, ++ u8 a, u8 b, u8 c, u8 d) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int ret, datasize; ++ u8 cmd[8]; ++ ++ switch (command) { ++ case CPIA_COMMAND_GetCPIAVersion: ++ case CPIA_COMMAND_GetPnPID: ++ case CPIA_COMMAND_GetCameraStatus: ++ case CPIA_COMMAND_GetVPVersion: ++ case CPIA_COMMAND_GetColourParams: ++ case CPIA_COMMAND_GetColourBalance: ++ case CPIA_COMMAND_GetExposure: ++ datasize = 8; ++ break; ++ case CPIA_COMMAND_ReadMCPorts: ++ case CPIA_COMMAND_ReadVCRegs: ++ datasize = 4; ++ break; ++ default: ++ datasize = 0; ++ break; ++ } ++ ++ cmd[0] = command >> 8; ++ cmd[1] = command & 0xff; ++ cmd[2] = a; ++ cmd[3] = b; ++ cmd[4] = c; ++ cmd[5] = d; ++ cmd[6] = datasize; ++ cmd[7] = 0; ++ ++ ret = cpia_usb_transferCmd(gspca_dev, cmd); ++ if (ret) ++ return ret; ++ ++ switch (command) { ++ case CPIA_COMMAND_GetCPIAVersion: ++ sd->params.version.firmwareVersion = gspca_dev->usb_buf[0]; ++ sd->params.version.firmwareRevision = gspca_dev->usb_buf[1]; ++ sd->params.version.vcVersion = gspca_dev->usb_buf[2]; ++ sd->params.version.vcRevision = gspca_dev->usb_buf[3]; ++ break; ++ case CPIA_COMMAND_GetPnPID: ++ sd->params.pnpID.vendor = ++ gspca_dev->usb_buf[0] | (gspca_dev->usb_buf[1] << 8); ++ sd->params.pnpID.product = ++ gspca_dev->usb_buf[2] | (gspca_dev->usb_buf[3] << 8); ++ sd->params.pnpID.deviceRevision = ++ gspca_dev->usb_buf[4] | (gspca_dev->usb_buf[5] << 8); ++ break; ++ case CPIA_COMMAND_GetCameraStatus: ++ sd->params.status.systemState = gspca_dev->usb_buf[0]; ++ sd->params.status.grabState = gspca_dev->usb_buf[1]; ++ sd->params.status.streamState = gspca_dev->usb_buf[2]; ++ sd->params.status.fatalError = gspca_dev->usb_buf[3]; ++ sd->params.status.cmdError = gspca_dev->usb_buf[4]; ++ sd->params.status.debugFlags = gspca_dev->usb_buf[5]; ++ sd->params.status.vpStatus = gspca_dev->usb_buf[6]; ++ sd->params.status.errorCode = gspca_dev->usb_buf[7]; ++ break; ++ case CPIA_COMMAND_GetVPVersion: ++ sd->params.vpVersion.vpVersion = gspca_dev->usb_buf[0]; ++ sd->params.vpVersion.vpRevision = gspca_dev->usb_buf[1]; ++ sd->params.vpVersion.cameraHeadID = ++ gspca_dev->usb_buf[2] | (gspca_dev->usb_buf[3] << 8); ++ break; ++ case CPIA_COMMAND_GetColourParams: ++ sd->params.colourParams.brightness = gspca_dev->usb_buf[0]; ++ sd->params.colourParams.contrast = gspca_dev->usb_buf[1]; ++ sd->params.colourParams.saturation = gspca_dev->usb_buf[2]; ++ break; ++ case CPIA_COMMAND_GetColourBalance: ++ sd->params.colourBalance.redGain = gspca_dev->usb_buf[0]; ++ sd->params.colourBalance.greenGain = gspca_dev->usb_buf[1]; ++ sd->params.colourBalance.blueGain = gspca_dev->usb_buf[2]; ++ break; ++ case CPIA_COMMAND_GetExposure: ++ sd->params.exposure.gain = gspca_dev->usb_buf[0]; ++ sd->params.exposure.fineExp = gspca_dev->usb_buf[1]; ++ sd->params.exposure.coarseExpLo = gspca_dev->usb_buf[2]; ++ sd->params.exposure.coarseExpHi = gspca_dev->usb_buf[3]; ++ sd->params.exposure.redComp = gspca_dev->usb_buf[4]; ++ sd->params.exposure.green1Comp = gspca_dev->usb_buf[5]; ++ sd->params.exposure.green2Comp = gspca_dev->usb_buf[6]; ++ sd->params.exposure.blueComp = gspca_dev->usb_buf[7]; ++ break; ++ ++ case CPIA_COMMAND_ReadMCPorts: ++ if (!sd->params.qx3.qx3_detected) ++ break; ++ /* test button press */ ++ sd->params.qx3.button = ((gspca_dev->usb_buf[1] & 0x02) == 0); ++ if (sd->params.qx3.button) { ++ /* button pressed - unlock the latch */ ++ do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, ++ 3, 0xDF, 0xDF, 0); ++ do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, ++ 3, 0xFF, 0xFF, 0); ++ } ++ ++ /* test whether microscope is cradled */ ++ sd->params.qx3.cradled = ((gspca_dev->usb_buf[2] & 0x40) == 0); ++ break; ++ } ++ ++ return 0; ++} ++ ++/* send a command to the camera with an additional data transaction */ ++static int do_command_extended(struct gspca_dev *gspca_dev, u16 command, ++ u8 a, u8 b, u8 c, u8 d, ++ u8 e, u8 f, u8 g, u8 h, ++ u8 i, u8 j, u8 k, u8 l) ++{ ++ u8 cmd[8]; ++ ++ cmd[0] = command >> 8; ++ cmd[1] = command & 0xff; ++ cmd[2] = a; ++ cmd[3] = b; ++ cmd[4] = c; ++ cmd[5] = d; ++ cmd[6] = 8; ++ cmd[7] = 0; ++ gspca_dev->usb_buf[0] = e; ++ gspca_dev->usb_buf[1] = f; ++ gspca_dev->usb_buf[2] = g; ++ gspca_dev->usb_buf[3] = h; ++ gspca_dev->usb_buf[4] = i; ++ gspca_dev->usb_buf[5] = j; ++ gspca_dev->usb_buf[6] = k; ++ gspca_dev->usb_buf[7] = l; ++ ++ return cpia_usb_transferCmd(gspca_dev, cmd); ++} ++ ++/* find_over_exposure ++ * Finds a suitable value of OverExposure for use with SetFlickerCtrl ++ * Some calculation is required because this value changes with the brightness ++ * set with SetColourParameters ++ * ++ * Parameters: Brightness - last brightness value set with SetColourParameters ++ * ++ * Returns: OverExposure value to use with SetFlickerCtrl ++ */ ++#define FLICKER_MAX_EXPOSURE 250 ++#define FLICKER_ALLOWABLE_OVER_EXPOSURE 146 ++#define FLICKER_BRIGHTNESS_CONSTANT 59 ++static int find_over_exposure(int brightness) ++{ ++ int MaxAllowableOverExposure, OverExposure; ++ ++ MaxAllowableOverExposure = FLICKER_MAX_EXPOSURE - brightness - ++ FLICKER_BRIGHTNESS_CONSTANT; ++ ++ if (MaxAllowableOverExposure < FLICKER_ALLOWABLE_OVER_EXPOSURE) ++ OverExposure = MaxAllowableOverExposure; ++ else ++ OverExposure = FLICKER_ALLOWABLE_OVER_EXPOSURE; ++ ++ return OverExposure; ++} ++#undef FLICKER_MAX_EXPOSURE ++#undef FLICKER_ALLOWABLE_OVER_EXPOSURE ++#undef FLICKER_BRIGHTNESS_CONSTANT ++ ++/* initialise cam_data structure */ ++static void reset_camera_params(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ struct cam_params *params = &sd->params; ++ ++ /* The following parameter values are the defaults from ++ * "Software Developer's Guide for CPiA Cameras". Any changes ++ * to the defaults are noted in comments. */ ++ params->colourParams.brightness = BRIGHTNESS_DEF; ++ params->colourParams.contrast = CONTRAST_DEF; ++ params->colourParams.saturation = SATURATION_DEF; ++ params->exposure.gainMode = 4; ++ params->exposure.expMode = 2; /* AEC */ ++ params->exposure.compMode = 1; ++ params->exposure.centreWeight = 1; ++ params->exposure.gain = 0; ++ params->exposure.fineExp = 0; ++ params->exposure.coarseExpLo = 185; ++ params->exposure.coarseExpHi = 0; ++ params->exposure.redComp = COMP_RED; ++ params->exposure.green1Comp = COMP_GREEN1; ++ params->exposure.green2Comp = COMP_GREEN2; ++ params->exposure.blueComp = COMP_BLUE; ++ params->colourBalance.balanceMode = 2; /* ACB */ ++ params->colourBalance.redGain = 32; ++ params->colourBalance.greenGain = 6; ++ params->colourBalance.blueGain = 92; ++ params->apcor.gain1 = 0x18; ++ params->apcor.gain2 = 0x16; ++ params->apcor.gain4 = 0x24; ++ params->apcor.gain8 = 0x34; ++ params->flickerControl.flickerMode = 0; ++ params->flickerControl.disabled = 1; ++ ++ params->flickerControl.coarseJump = ++ flicker_jumps[sd->mainsFreq] ++ [params->sensorFps.baserate] ++ [params->sensorFps.divisor]; ++ params->flickerControl.allowableOverExposure = ++ find_over_exposure(params->colourParams.brightness); ++ params->vlOffset.gain1 = 20; ++ params->vlOffset.gain2 = 24; ++ params->vlOffset.gain4 = 26; ++ params->vlOffset.gain8 = 26; ++ params->compressionParams.hysteresis = 3; ++ params->compressionParams.threshMax = 11; ++ params->compressionParams.smallStep = 1; ++ params->compressionParams.largeStep = 3; ++ params->compressionParams.decimationHysteresis = 2; ++ params->compressionParams.frDiffStepThresh = 5; ++ params->compressionParams.qDiffStepThresh = 3; ++ params->compressionParams.decimationThreshMod = 2; ++ /* End of default values from Software Developer's Guide */ ++ ++ /* Set Sensor FPS to 15fps. This seems better than 30fps ++ * for indoor lighting. */ ++ params->sensorFps.divisor = 1; ++ params->sensorFps.baserate = 1; ++ ++ params->yuvThreshold.yThreshold = 6; /* From windows driver */ ++ params->yuvThreshold.uvThreshold = 6; /* From windows driver */ ++ ++ params->format.subSample = SUBSAMPLE_420; ++ params->format.yuvOrder = YUVORDER_YUYV; ++ ++ params->compression.mode = CPIA_COMPRESSION_AUTO; ++ params->compression.decimation = NO_DECIMATION; ++ ++ params->compressionTarget.frTargeting = COMP_TARGET_DEF; ++ params->compressionTarget.targetFR = 15; /* From windows driver */ ++ params->compressionTarget.targetQ = 5; /* From windows driver */ ++ ++ params->qx3.qx3_detected = 0; ++ params->qx3.toplight = 0; ++ params->qx3.bottomlight = 0; ++ params->qx3.button = 0; ++ params->qx3.cradled = 0; ++} ++ ++static void printstatus(struct cam_params *params) ++{ ++ PDEBUG(D_PROBE, "status: %02x %02x %02x %02x %02x %02x %02x %02x", ++ params->status.systemState, params->status.grabState, ++ params->status.streamState, params->status.fatalError, ++ params->status.cmdError, params->status.debugFlags, ++ params->status.vpStatus, params->status.errorCode); ++} ++ ++static int goto_low_power(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int ret; ++ ++ ret = do_command(gspca_dev, CPIA_COMMAND_GotoLoPower, 0, 0, 0, 0); ++ if (ret) ++ return ret; ++ ++ do_command(gspca_dev, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0); ++ if (ret) ++ return ret; ++ ++ if (sd->params.status.systemState != LO_POWER_STATE) { ++ if (sd->params.status.systemState != WARM_BOOT_STATE) { ++ PDEBUG(D_ERR, ++ "unexpected state after lo power cmd: %02x", ++ sd->params.status.systemState); ++ printstatus(&sd->params); ++ } ++ return -EIO; ++ } ++ ++ PDEBUG(D_CONF, "camera now in LOW power state"); ++ return 0; ++} ++ ++static int goto_high_power(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int ret; ++ ++ ret = do_command(gspca_dev, CPIA_COMMAND_GotoHiPower, 0, 0, 0, 0); ++ if (ret) ++ return ret; ++ ++ msleep_interruptible(40); /* windows driver does it too */ ++ ++ if (signal_pending(current)) ++ return -EINTR; ++ ++ do_command(gspca_dev, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0); ++ if (ret) ++ return ret; ++ ++ if (sd->params.status.systemState != HI_POWER_STATE) { ++ PDEBUG(D_ERR, "unexpected state after hi power cmd: %02x", ++ sd->params.status.systemState); ++ printstatus(&sd->params); ++ return -EIO; ++ } ++ ++ PDEBUG(D_CONF, "camera now in HIGH power state"); ++ return 0; ++} ++ ++static int get_version_information(struct gspca_dev *gspca_dev) ++{ ++ int ret; ++ ++ /* GetCPIAVersion */ ++ ret = do_command(gspca_dev, CPIA_COMMAND_GetCPIAVersion, 0, 0, 0, 0); ++ if (ret) ++ return ret; ++ ++ /* GetPnPID */ ++ return do_command(gspca_dev, CPIA_COMMAND_GetPnPID, 0, 0, 0, 0); ++} ++ ++static int save_camera_state(struct gspca_dev *gspca_dev) ++{ ++ int ret; ++ ++ ret = do_command(gspca_dev, CPIA_COMMAND_GetColourBalance, 0, 0, 0, 0); ++ if (ret) ++ return ret; ++ ++ return do_command(gspca_dev, CPIA_COMMAND_GetExposure, 0, 0, 0, 0); ++} ++ ++int command_setformat(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int ret; ++ ++ ret = do_command(gspca_dev, CPIA_COMMAND_SetFormat, ++ sd->params.format.videoSize, ++ sd->params.format.subSample, ++ sd->params.format.yuvOrder, 0); ++ if (ret) ++ return ret; ++ ++ return do_command(gspca_dev, CPIA_COMMAND_SetROI, ++ sd->params.roi.colStart, sd->params.roi.colEnd, ++ sd->params.roi.rowStart, sd->params.roi.rowEnd); ++} ++ ++int command_setcolourparams(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ return do_command(gspca_dev, CPIA_COMMAND_SetColourParams, ++ sd->params.colourParams.brightness, ++ sd->params.colourParams.contrast, ++ sd->params.colourParams.saturation, 0); ++} ++ ++int command_setapcor(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ return do_command(gspca_dev, CPIA_COMMAND_SetApcor, ++ sd->params.apcor.gain1, ++ sd->params.apcor.gain2, ++ sd->params.apcor.gain4, ++ sd->params.apcor.gain8); ++} ++ ++int command_setvloffset(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ return do_command(gspca_dev, CPIA_COMMAND_SetVLOffset, ++ sd->params.vlOffset.gain1, ++ sd->params.vlOffset.gain2, ++ sd->params.vlOffset.gain4, ++ sd->params.vlOffset.gain8); ++} ++ ++int command_setexposure(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int ret; ++ ++ ret = do_command_extended(gspca_dev, CPIA_COMMAND_SetExposure, ++ sd->params.exposure.gainMode, ++ 1, ++ sd->params.exposure.compMode, ++ sd->params.exposure.centreWeight, ++ sd->params.exposure.gain, ++ sd->params.exposure.fineExp, ++ sd->params.exposure.coarseExpLo, ++ sd->params.exposure.coarseExpHi, ++ sd->params.exposure.redComp, ++ sd->params.exposure.green1Comp, ++ sd->params.exposure.green2Comp, ++ sd->params.exposure.blueComp); ++ if (ret) ++ return ret; ++ ++ if (sd->params.exposure.expMode != 1) { ++ ret = do_command_extended(gspca_dev, CPIA_COMMAND_SetExposure, ++ 0, ++ sd->params.exposure.expMode, ++ 0, 0, ++ sd->params.exposure.gain, ++ sd->params.exposure.fineExp, ++ sd->params.exposure.coarseExpLo, ++ sd->params.exposure.coarseExpHi, ++ 0, 0, 0, 0); ++ } ++ ++ return ret; ++} ++ ++int command_setcolourbalance(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ if (sd->params.colourBalance.balanceMode == 1) { ++ int ret; ++ ++ ret = do_command(gspca_dev, CPIA_COMMAND_SetColourBalance, ++ 1, ++ sd->params.colourBalance.redGain, ++ sd->params.colourBalance.greenGain, ++ sd->params.colourBalance.blueGain); ++ if (ret) ++ return ret; ++ ++ return do_command(gspca_dev, CPIA_COMMAND_SetColourBalance, ++ 3, 0, 0, 0); ++ } ++ if (sd->params.colourBalance.balanceMode == 2) { ++ return do_command(gspca_dev, CPIA_COMMAND_SetColourBalance, ++ 2, 0, 0, 0); ++ } ++ if (sd->params.colourBalance.balanceMode == 3) { ++ return do_command(gspca_dev, CPIA_COMMAND_SetColourBalance, ++ 3, 0, 0, 0); ++ } ++ ++ return -EINVAL; ++} ++ ++int command_setcompressiontarget(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ return do_command(gspca_dev, CPIA_COMMAND_SetCompressionTarget, ++ sd->params.compressionTarget.frTargeting, ++ sd->params.compressionTarget.targetFR, ++ sd->params.compressionTarget.targetQ, 0); ++} ++ ++int command_setyuvtresh(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ return do_command(gspca_dev, CPIA_COMMAND_SetYUVThresh, ++ sd->params.yuvThreshold.yThreshold, ++ sd->params.yuvThreshold.uvThreshold, 0, 0); ++} ++ ++int command_setcompressionparams(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ return do_command_extended(gspca_dev, ++ CPIA_COMMAND_SetCompressionParams, ++ 0, 0, 0, 0, ++ sd->params.compressionParams.hysteresis, ++ sd->params.compressionParams.threshMax, ++ sd->params.compressionParams.smallStep, ++ sd->params.compressionParams.largeStep, ++ sd->params.compressionParams.decimationHysteresis, ++ sd->params.compressionParams.frDiffStepThresh, ++ sd->params.compressionParams.qDiffStepThresh, ++ sd->params.compressionParams.decimationThreshMod); ++} ++ ++int command_setcompression(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ return do_command(gspca_dev, CPIA_COMMAND_SetCompression, ++ sd->params.compression.mode, ++ sd->params.compression.decimation, 0, 0); ++} ++ ++int command_setsensorfps(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ return do_command(gspca_dev, CPIA_COMMAND_SetSensorFPS, ++ sd->params.sensorFps.divisor, ++ sd->params.sensorFps.baserate, 0, 0); ++} ++ ++int command_setflickerctrl(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ return do_command(gspca_dev, CPIA_COMMAND_SetFlickerCtrl, ++ sd->params.flickerControl.flickerMode, ++ sd->params.flickerControl.coarseJump, ++ sd->params.flickerControl.allowableOverExposure, ++ 0); ++} ++ ++int command_setecptiming(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ return do_command(gspca_dev, CPIA_COMMAND_SetECPTiming, ++ sd->params.ecpTiming, 0, 0, 0); ++} ++ ++int command_pause(struct gspca_dev *gspca_dev) ++{ ++ return do_command(gspca_dev, CPIA_COMMAND_EndStreamCap, 0, 0, 0, 0); ++} ++ ++int command_resume(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ return do_command(gspca_dev, CPIA_COMMAND_InitStreamCap, ++ 0, sd->params.streamStartLine, 0, 0); ++} ++ ++int command_setlights(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int ret, p1, p2; ++ ++ if (!sd->params.qx3.qx3_detected) ++ return 0; ++ ++ p1 = (sd->params.qx3.bottomlight == 0) << 1; ++ p2 = (sd->params.qx3.toplight == 0) << 3; ++ ++ ret = do_command(gspca_dev, CPIA_COMMAND_WriteVCReg, ++ 0x90, 0x8F, 0x50, 0); ++ if (ret) ++ return ret; ++ ++ return do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, 2, 0, ++ p1 | p2 | 0xE0, 0); ++} ++ ++static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply) ++{ ++ /* Everything in here is from the Windows driver */ ++/* define for compgain calculation */ ++#if 0 /* keep */ ++#define COMPGAIN(base, curexp, newexp) \ ++ (u8) ((((float) base - 128.0) * ((float) curexp / (float) newexp)) + 128.5) ++#define EXP_FROM_COMP(basecomp, curcomp, curexp) \ ++ (u16)((float)curexp * (float)(u8)(curcomp + 128) / \ ++ (float)(u8)(basecomp - 128)) ++#else ++ /* equivalent functions without floating point math */ ++#define COMPGAIN(base, curexp, newexp) \ ++ (u8)(128 + (((u32)(2*(base-128)*curexp + newexp)) / (2 * newexp))) ++#define EXP_FROM_COMP(basecomp, curcomp, curexp) \ ++ (u16)(((u32)(curexp * (u8)(curcomp + 128)) / (u8)(basecomp - 128))) ++#endif ++ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int currentexp = sd->params.exposure.coarseExpLo + ++ sd->params.exposure.coarseExpHi * 256; ++ int ret, startexp; ++ ++ if (on) { ++ int cj = sd->params.flickerControl.coarseJump; ++ sd->params.flickerControl.flickerMode = 1; ++ sd->params.flickerControl.disabled = 0; ++ if (sd->params.exposure.expMode != 2) { ++ sd->params.exposure.expMode = 2; ++ sd->exposure_status = EXPOSURE_NORMAL; ++ } ++ currentexp = currentexp << sd->params.exposure.gain; ++ sd->params.exposure.gain = 0; ++ /* round down current exposure to nearest value */ ++ startexp = (currentexp + ROUND_UP_EXP_FOR_FLICKER) / cj; ++ if (startexp < 1) ++ startexp = 1; ++ startexp = (startexp * cj) - 1; ++ if (FIRMWARE_VERSION(1, 2)) ++ while (startexp > MAX_EXP_102) ++ startexp -= cj; ++ else ++ while (startexp > MAX_EXP) ++ startexp -= cj; ++ sd->params.exposure.coarseExpLo = startexp & 0xff; ++ sd->params.exposure.coarseExpHi = startexp >> 8; ++ if (currentexp > startexp) { ++ if (currentexp > (2 * startexp)) ++ currentexp = 2 * startexp; ++ sd->params.exposure.redComp = ++ COMPGAIN(COMP_RED, currentexp, startexp); ++ sd->params.exposure.green1Comp = ++ COMPGAIN(COMP_GREEN1, currentexp, startexp); ++ sd->params.exposure.green2Comp = ++ COMPGAIN(COMP_GREEN2, currentexp, startexp); ++ sd->params.exposure.blueComp = ++ COMPGAIN(COMP_BLUE, currentexp, startexp); ++ } else { ++ sd->params.exposure.redComp = COMP_RED; ++ sd->params.exposure.green1Comp = COMP_GREEN1; ++ sd->params.exposure.green2Comp = COMP_GREEN2; ++ sd->params.exposure.blueComp = COMP_BLUE; ++ } ++ if (FIRMWARE_VERSION(1, 2)) ++ sd->params.exposure.compMode = 0; ++ else ++ sd->params.exposure.compMode = 1; ++ ++ sd->params.apcor.gain1 = 0x18; ++ sd->params.apcor.gain2 = 0x18; ++ sd->params.apcor.gain4 = 0x16; ++ sd->params.apcor.gain8 = 0x14; ++ } else { ++ sd->params.flickerControl.flickerMode = 0; ++ sd->params.flickerControl.disabled = 1; ++ /* Average equivalent coarse for each comp channel */ ++ startexp = EXP_FROM_COMP(COMP_RED, ++ sd->params.exposure.redComp, currentexp); ++ startexp += EXP_FROM_COMP(COMP_GREEN1, ++ sd->params.exposure.green1Comp, currentexp); ++ startexp += EXP_FROM_COMP(COMP_GREEN2, ++ sd->params.exposure.green2Comp, currentexp); ++ startexp += EXP_FROM_COMP(COMP_BLUE, ++ sd->params.exposure.blueComp, currentexp); ++ startexp = startexp >> 2; ++ while (startexp > MAX_EXP && sd->params.exposure.gain < ++ sd->params.exposure.gainMode - 1) { ++ startexp = startexp >> 1; ++ ++sd->params.exposure.gain; ++ } ++ if (FIRMWARE_VERSION(1, 2) && startexp > MAX_EXP_102) ++ startexp = MAX_EXP_102; ++ if (startexp > MAX_EXP) ++ startexp = MAX_EXP; ++ sd->params.exposure.coarseExpLo = startexp & 0xff; ++ sd->params.exposure.coarseExpHi = startexp >> 8; ++ sd->params.exposure.redComp = COMP_RED; ++ sd->params.exposure.green1Comp = COMP_GREEN1; ++ sd->params.exposure.green2Comp = COMP_GREEN2; ++ sd->params.exposure.blueComp = COMP_BLUE; ++ sd->params.exposure.compMode = 1; ++ sd->params.apcor.gain1 = 0x18; ++ sd->params.apcor.gain2 = 0x16; ++ sd->params.apcor.gain4 = 0x24; ++ sd->params.apcor.gain8 = 0x34; ++ } ++ sd->params.vlOffset.gain1 = 20; ++ sd->params.vlOffset.gain2 = 24; ++ sd->params.vlOffset.gain4 = 26; ++ sd->params.vlOffset.gain8 = 26; ++ ++ if (apply) { ++ ret = command_setexposure(gspca_dev); ++ if (ret) ++ return ret; ++ ++ ret = command_setapcor(gspca_dev); ++ if (ret) ++ return ret; ++ ++ ret = command_setvloffset(gspca_dev); ++ if (ret) ++ return ret; ++ ++ ret = command_setflickerctrl(gspca_dev); ++ if (ret) ++ return ret; ++ } ++ ++ return 0; ++#undef EXP_FROM_COMP ++#undef COMPGAIN ++} ++ ++/* monitor the exposure and adjust the sensor frame rate if needed */ ++static void monitor_exposure(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ u8 exp_acc, bcomp, gain, coarseL, cmd[8]; ++ int ret, light_exp, dark_exp, very_dark_exp; ++ int old_exposure, new_exposure, framerate; ++ int setfps = 0, setexp = 0, setflicker = 0; ++ ++ /* get necessary stats and register settings from camera */ ++ /* do_command can't handle this, so do it ourselves */ ++ cmd[0] = CPIA_COMMAND_ReadVPRegs >> 8; ++ cmd[1] = CPIA_COMMAND_ReadVPRegs & 0xff; ++ cmd[2] = 30; ++ cmd[3] = 4; ++ cmd[4] = 9; ++ cmd[5] = 8; ++ cmd[6] = 8; ++ cmd[7] = 0; ++ ret = cpia_usb_transferCmd(gspca_dev, cmd); ++ if (ret) { ++ PDEBUG(D_ERR, "ReadVPRegs(30,4,9,8) - failed: %d", ret); ++ return; ++ } ++ exp_acc = gspca_dev->usb_buf[0]; ++ bcomp = gspca_dev->usb_buf[1]; ++ gain = gspca_dev->usb_buf[2]; ++ coarseL = gspca_dev->usb_buf[3]; ++ ++ light_exp = sd->params.colourParams.brightness + ++ TC - 50 + EXP_ACC_LIGHT; ++ if (light_exp > 255) ++ light_exp = 255; ++ dark_exp = sd->params.colourParams.brightness + ++ TC - 50 - EXP_ACC_DARK; ++ if (dark_exp < 0) ++ dark_exp = 0; ++ very_dark_exp = dark_exp / 2; ++ ++ old_exposure = sd->params.exposure.coarseExpHi * 256 + ++ sd->params.exposure.coarseExpLo; ++ ++ if (!sd->params.flickerControl.disabled) { ++ /* Flicker control on */ ++ int max_comp = FIRMWARE_VERSION(1, 2) ? MAX_COMP : ++ HIGH_COMP_102; ++ bcomp += 128; /* decode */ ++ if (bcomp >= max_comp && exp_acc < dark_exp) { ++ /* dark */ ++ if (exp_acc < very_dark_exp) { ++ /* very dark */ ++ if (sd->exposure_status == EXPOSURE_VERY_DARK) ++ ++sd->exposure_count; ++ else { ++ sd->exposure_status = ++ EXPOSURE_VERY_DARK; ++ sd->exposure_count = 1; ++ } ++ } else { ++ /* just dark */ ++ if (sd->exposure_status == EXPOSURE_DARK) ++ ++sd->exposure_count; ++ else { ++ sd->exposure_status = EXPOSURE_DARK; ++ sd->exposure_count = 1; ++ } ++ } ++ } else if (old_exposure <= LOW_EXP || exp_acc > light_exp) { ++ /* light */ ++ if (old_exposure <= VERY_LOW_EXP) { ++ /* very light */ ++ if (sd->exposure_status == EXPOSURE_VERY_LIGHT) ++ ++sd->exposure_count; ++ else { ++ sd->exposure_status = ++ EXPOSURE_VERY_LIGHT; ++ sd->exposure_count = 1; ++ } ++ } else { ++ /* just light */ ++ if (sd->exposure_status == EXPOSURE_LIGHT) ++ ++sd->exposure_count; ++ else { ++ sd->exposure_status = EXPOSURE_LIGHT; ++ sd->exposure_count = 1; ++ } ++ } ++ } else { ++ /* not dark or light */ ++ sd->exposure_status = EXPOSURE_NORMAL; ++ } ++ } else { ++ /* Flicker control off */ ++ if (old_exposure >= MAX_EXP && exp_acc < dark_exp) { ++ /* dark */ ++ if (exp_acc < very_dark_exp) { ++ /* very dark */ ++ if (sd->exposure_status == EXPOSURE_VERY_DARK) ++ ++sd->exposure_count; ++ else { ++ sd->exposure_status = ++ EXPOSURE_VERY_DARK; ++ sd->exposure_count = 1; ++ } ++ } else { ++ /* just dark */ ++ if (sd->exposure_status == EXPOSURE_DARK) ++ ++sd->exposure_count; ++ else { ++ sd->exposure_status = EXPOSURE_DARK; ++ sd->exposure_count = 1; ++ } ++ } ++ } else if (old_exposure <= LOW_EXP || exp_acc > light_exp) { ++ /* light */ ++ if (old_exposure <= VERY_LOW_EXP) { ++ /* very light */ ++ if (sd->exposure_status == EXPOSURE_VERY_LIGHT) ++ ++sd->exposure_count; ++ else { ++ sd->exposure_status = ++ EXPOSURE_VERY_LIGHT; ++ sd->exposure_count = 1; ++ } ++ } else { ++ /* just light */ ++ if (sd->exposure_status == EXPOSURE_LIGHT) ++ ++sd->exposure_count; ++ else { ++ sd->exposure_status = EXPOSURE_LIGHT; ++ sd->exposure_count = 1; ++ } ++ } ++ } else { ++ /* not dark or light */ ++ sd->exposure_status = EXPOSURE_NORMAL; ++ } ++ } ++ ++ framerate = atomic_read(&sd->fps); ++ if (framerate > 30 || framerate < 1) ++ framerate = 1; ++ ++ if (!sd->params.flickerControl.disabled) { ++ /* Flicker control on */ ++ if ((sd->exposure_status == EXPOSURE_VERY_DARK || ++ sd->exposure_status == EXPOSURE_DARK) && ++ sd->exposure_count >= DARK_TIME * framerate && ++ sd->params.sensorFps.divisor < 3) { ++ ++ /* dark for too long */ ++ ++sd->params.sensorFps.divisor; ++ setfps = 1; ++ ++ sd->params.flickerControl.coarseJump = ++ flicker_jumps[sd->mainsFreq] ++ [sd->params.sensorFps.baserate] ++ [sd->params.sensorFps.divisor]; ++ setflicker = 1; ++ ++ new_exposure = sd->params.flickerControl.coarseJump-1; ++ while (new_exposure < old_exposure / 2) ++ new_exposure += ++ sd->params.flickerControl.coarseJump; ++ sd->params.exposure.coarseExpLo = new_exposure & 0xff; ++ sd->params.exposure.coarseExpHi = new_exposure >> 8; ++ setexp = 1; ++ sd->exposure_status = EXPOSURE_NORMAL; ++ PDEBUG(D_CONF, "Automatically decreasing sensor_fps"); ++ ++ } else if ((sd->exposure_status == EXPOSURE_VERY_LIGHT || ++ sd->exposure_status == EXPOSURE_LIGHT) && ++ sd->exposure_count >= LIGHT_TIME * framerate && ++ sd->params.sensorFps.divisor > 0) { ++ ++ /* light for too long */ ++ int max_exp = FIRMWARE_VERSION(1, 2) ? MAX_EXP_102 : ++ MAX_EXP; ++ --sd->params.sensorFps.divisor; ++ setfps = 1; ++ ++ sd->params.flickerControl.coarseJump = ++ flicker_jumps[sd->mainsFreq] ++ [sd->params.sensorFps.baserate] ++ [sd->params.sensorFps.divisor]; ++ setflicker = 1; ++ ++ new_exposure = sd->params.flickerControl.coarseJump-1; ++ while (new_exposure < 2 * old_exposure && ++ new_exposure + ++ sd->params.flickerControl.coarseJump < max_exp) ++ new_exposure += ++ sd->params.flickerControl.coarseJump; ++ sd->params.exposure.coarseExpLo = new_exposure & 0xff; ++ sd->params.exposure.coarseExpHi = new_exposure >> 8; ++ setexp = 1; ++ sd->exposure_status = EXPOSURE_NORMAL; ++ PDEBUG(D_CONF, "Automatically increasing sensor_fps"); ++ } ++ } else { ++ /* Flicker control off */ ++ if ((sd->exposure_status == EXPOSURE_VERY_DARK || ++ sd->exposure_status == EXPOSURE_DARK) && ++ sd->exposure_count >= DARK_TIME * framerate && ++ sd->params.sensorFps.divisor < 3) { ++ ++ /* dark for too long */ ++ ++sd->params.sensorFps.divisor; ++ setfps = 1; ++ ++ if (sd->params.exposure.gain > 0) { ++ --sd->params.exposure.gain; ++ setexp = 1; ++ } ++ sd->exposure_status = EXPOSURE_NORMAL; ++ PDEBUG(D_CONF, "Automatically decreasing sensor_fps"); ++ ++ } else if ((sd->exposure_status == EXPOSURE_VERY_LIGHT || ++ sd->exposure_status == EXPOSURE_LIGHT) && ++ sd->exposure_count >= LIGHT_TIME * framerate && ++ sd->params.sensorFps.divisor > 0) { ++ ++ /* light for too long */ ++ --sd->params.sensorFps.divisor; ++ setfps = 1; ++ ++ if (sd->params.exposure.gain < ++ sd->params.exposure.gainMode - 1) { ++ ++sd->params.exposure.gain; ++ setexp = 1; ++ } ++ sd->exposure_status = EXPOSURE_NORMAL; ++ PDEBUG(D_CONF, "Automatically increasing sensor_fps"); ++ } ++ } ++ ++ if (setexp) ++ command_setexposure(gspca_dev); ++ ++ if (setfps) ++ command_setsensorfps(gspca_dev); ++ ++ if (setflicker) ++ command_setflickerctrl(gspca_dev); ++} ++ ++/*-----------------------------------------------------------------*/ ++/* if flicker is switched off, this function switches it back on.It checks, ++ however, that conditions are suitable before restarting it. ++ This should only be called for firmware version 1.2. ++ ++ It also adjust the colour balance when an exposure step is detected - as ++ long as flicker is running ++*/ ++static void restart_flicker(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int cam_exposure, old_exp; ++ ++ if (!FIRMWARE_VERSION(1, 2)) ++ return; ++ ++ cam_exposure = atomic_read(&sd->cam_exposure); ++ ++ if (sd->params.flickerControl.flickerMode == 0 || ++ cam_exposure == 0) ++ return; ++ ++ old_exp = sd->params.exposure.coarseExpLo + ++ sd->params.exposure.coarseExpHi*256; ++ /* ++ see how far away camera exposure is from a valid ++ flicker exposure value ++ */ ++ cam_exposure %= sd->params.flickerControl.coarseJump; ++ if (!sd->params.flickerControl.disabled && ++ cam_exposure <= sd->params.flickerControl.coarseJump - 3) { ++ /* Flicker control auto-disabled */ ++ sd->params.flickerControl.disabled = 1; ++ } ++ ++ if (sd->params.flickerControl.disabled && ++ old_exp > sd->params.flickerControl.coarseJump + ++ ROUND_UP_EXP_FOR_FLICKER) { ++ /* exposure is now high enough to switch ++ flicker control back on */ ++ set_flicker(gspca_dev, 1, 1); ++ } ++} ++ ++/* this function is called at probe time */ ++static int sd_config(struct gspca_dev *gspca_dev, ++ const struct usb_device_id *id) ++{ ++ struct cam *cam; ++ ++ reset_camera_params(gspca_dev); ++ ++ PDEBUG(D_PROBE, "cpia CPiA camera detected (vid/pid 0x%04X:0x%04X)", ++ id->idVendor, id->idProduct); ++ ++ cam = &gspca_dev->cam; ++ cam->cam_mode = mode; ++ cam->nmodes = ARRAY_SIZE(mode); ++ ++ sd_setfreq(gspca_dev, FREQ_DEF); ++ ++ return 0; ++} ++ ++/* -- start the camera -- */ ++static int sd_start(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int priv, ret; ++ ++ /* Start the camera in low power mode */ ++ if (goto_low_power(gspca_dev)) { ++ if (sd->params.status.systemState != WARM_BOOT_STATE) { ++ PDEBUG(D_ERR, "unexpected systemstate: %02x", ++ sd->params.status.systemState); ++ printstatus(&sd->params); ++ return -ENODEV; ++ } ++ ++ /* FIXME: this is just dirty trial and error */ ++ ret = goto_high_power(gspca_dev); ++ if (ret) ++ return ret; ++ ++ ret = do_command(gspca_dev, CPIA_COMMAND_DiscardFrame, ++ 0, 0, 0, 0); ++ if (ret) ++ return ret; ++ ++ ret = goto_low_power(gspca_dev); ++ if (ret) ++ return ret; ++ } ++ ++ /* procedure described in developer's guide p3-28 */ ++ ++ /* Check the firmware version. */ ++ sd->params.version.firmwareVersion = 0; ++ get_version_information(gspca_dev); ++ if (sd->params.version.firmwareVersion != 1) { ++ PDEBUG(D_ERR, "only firmware version 1 is supported (got: %d)", ++ sd->params.version.firmwareVersion); ++ return -ENODEV; ++ } ++ ++ /* A bug in firmware 1-02 limits gainMode to 2 */ ++ if (sd->params.version.firmwareRevision <= 2 && ++ sd->params.exposure.gainMode > 2) { ++ sd->params.exposure.gainMode = 2; ++ } ++ ++ /* set QX3 detected flag */ ++ sd->params.qx3.qx3_detected = (sd->params.pnpID.vendor == 0x0813 && ++ sd->params.pnpID.product == 0x0001); ++ ++ /* The fatal error checking should be done after ++ * the camera powers up (developer's guide p 3-38) */ ++ ++ /* Set streamState before transition to high power to avoid bug ++ * in firmware 1-02 */ ++ ret = do_command(gspca_dev, CPIA_COMMAND_ModifyCameraStatus, ++ STREAMSTATE, 0, STREAM_NOT_READY, 0); ++ if (ret) ++ return ret; ++ ++ /* GotoHiPower */ ++ ret = goto_high_power(gspca_dev); ++ if (ret) ++ return ret; ++ ++ /* Check the camera status */ ++ ret = do_command(gspca_dev, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0); ++ if (ret) ++ return ret; ++ ++ if (sd->params.status.fatalError) { ++ PDEBUG(D_ERR, "fatal_error: %04x, vp_status: %04x", ++ sd->params.status.fatalError, ++ sd->params.status.vpStatus); ++ return -EIO; ++ } ++ ++ /* VPVersion can't be retrieved before the camera is in HiPower, ++ * so get it here instead of in get_version_information. */ ++ ret = do_command(gspca_dev, CPIA_COMMAND_GetVPVersion, 0, 0, 0, 0); ++ if (ret) ++ return ret; ++ ++ /* Determine video mode settings */ ++ sd->params.streamStartLine = 120; ++ ++ priv = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; ++ if (priv & 0x01) { /* crop */ ++ sd->params.roi.colStart = 2; ++ sd->params.roi.rowStart = 6; ++ } else { ++ sd->params.roi.colStart = 0; ++ sd->params.roi.rowStart = 0; ++ } ++ ++ if (priv & 0x02) { /* quarter */ ++ sd->params.format.videoSize = VIDEOSIZE_QCIF; ++ sd->params.roi.colStart /= 2; ++ sd->params.roi.rowStart /= 2; ++ sd->params.streamStartLine /= 2; ++ } else ++ sd->params.format.videoSize = VIDEOSIZE_CIF; ++ ++ sd->params.roi.colEnd = sd->params.roi.colStart + ++ (gspca_dev->width >> 3); ++ sd->params.roi.rowEnd = sd->params.roi.rowStart + ++ (gspca_dev->height >> 2); ++ ++ /* And now set the camera to a known state */ ++ ret = do_command(gspca_dev, CPIA_COMMAND_SetGrabMode, ++ CPIA_GRAB_CONTINEOUS, 0, 0, 0); ++ if (ret) ++ return ret; ++ /* We start with compression disabled, as we need one uncompressed ++ frame to handle later compressed frames */ ++ ret = do_command(gspca_dev, CPIA_COMMAND_SetCompression, ++ CPIA_COMPRESSION_NONE, ++ NO_DECIMATION, 0, 0); ++ if (ret) ++ return ret; ++ ret = command_setcompressiontarget(gspca_dev); ++ if (ret) ++ return ret; ++ ret = command_setcolourparams(gspca_dev); ++ if (ret) ++ return ret; ++ ret = command_setformat(gspca_dev); ++ if (ret) ++ return ret; ++ ret = command_setyuvtresh(gspca_dev); ++ if (ret) ++ return ret; ++ ret = command_setecptiming(gspca_dev); ++ if (ret) ++ return ret; ++ ret = command_setcompressionparams(gspca_dev); ++ if (ret) ++ return ret; ++ ret = command_setexposure(gspca_dev); ++ if (ret) ++ return ret; ++ ret = command_setcolourbalance(gspca_dev); ++ if (ret) ++ return ret; ++ ret = command_setsensorfps(gspca_dev); ++ if (ret) ++ return ret; ++ ret = command_setapcor(gspca_dev); ++ if (ret) ++ return ret; ++ ret = command_setflickerctrl(gspca_dev); ++ if (ret) ++ return ret; ++ ret = command_setvloffset(gspca_dev); ++ if (ret) ++ return ret; ++ ++ /* Start stream */ ++ ret = command_resume(gspca_dev); ++ if (ret) ++ return ret; ++ ++ /* Wait 6 frames before turning compression on for the sensor to get ++ all settings and AEC/ACB to settle */ ++ sd->first_frame = 6; ++ sd->exposure_status = EXPOSURE_NORMAL; ++ sd->exposure_count = 0; ++ atomic_set(&sd->cam_exposure, 0); ++ atomic_set(&sd->fps, 0); ++ ++ return 0; ++} ++ ++static void sd_stopN(struct gspca_dev *gspca_dev) ++{ ++ command_pause(gspca_dev); ++ ++ /* save camera state for later open (developers guide ch 3.5.3) */ ++ save_camera_state(gspca_dev); ++ ++ /* GotoLoPower */ ++ goto_low_power(gspca_dev); ++ ++ /* Update the camera status */ ++ do_command(gspca_dev, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0); ++} ++ ++/* this function is called at probe and resume time */ ++static int sd_init(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int ret; ++ ++ /* Start / Stop the camera to make sure we are talking to ++ a supported camera, and to get some information from it ++ to print. */ ++ ret = sd_start(gspca_dev); ++ if (ret) ++ return ret; ++ ++ sd_stopN(gspca_dev); ++ ++ PDEBUG(D_PROBE, "CPIA Version: %d.%02d (%d.%d)", ++ sd->params.version.firmwareVersion, ++ sd->params.version.firmwareRevision, ++ sd->params.version.vcVersion, ++ sd->params.version.vcRevision); ++ PDEBUG(D_PROBE, "CPIA PnP-ID: %04x:%04x:%04x", ++ sd->params.pnpID.vendor, sd->params.pnpID.product, ++ sd->params.pnpID.deviceRevision); ++ PDEBUG(D_PROBE, "VP-Version: %d.%d %04x", ++ sd->params.vpVersion.vpVersion, ++ sd->params.vpVersion.vpRevision, ++ sd->params.vpVersion.cameraHeadID); ++ ++ return 0; ++} ++ ++static void sd_pkt_scan(struct gspca_dev *gspca_dev, ++ u8 *data, ++ int len) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ /* Check for SOF */ ++ if (len >= 64 && ++ data[0] == MAGIC_0 && data[1] == MAGIC_1 && ++ data[16] == sd->params.format.videoSize && ++ data[17] == sd->params.format.subSample && ++ data[18] == sd->params.format.yuvOrder && ++ data[24] == sd->params.roi.colStart && ++ data[25] == sd->params.roi.colEnd && ++ data[26] == sd->params.roi.rowStart && ++ data[27] == sd->params.roi.rowEnd) { ++ struct gspca_frame *frame = gspca_get_i_frame(gspca_dev); ++ ++ atomic_set(&sd->cam_exposure, data[39] * 2); ++ atomic_set(&sd->fps, data[41]); ++ ++ if (frame == NULL) { ++ gspca_dev->last_packet_type = DISCARD_PACKET; ++ return; ++ } ++ ++ /* Check for proper EOF for last frame */ ++ if ((frame->data_end - frame->data) > 4 && ++ frame->data_end[-4] == 0xff && ++ frame->data_end[-3] == 0xff && ++ frame->data_end[-2] == 0xff && ++ frame->data_end[-1] == 0xff) ++ gspca_frame_add(gspca_dev, LAST_PACKET, ++ NULL, 0); ++ ++ gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); ++ return; ++ } ++ ++ gspca_frame_add(gspca_dev, INTER_PACKET, data, len); ++} ++ ++static void sd_dq_callback(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ /* Set the normal compression settings once we have captured a ++ few uncompressed frames (and AEC has hopefully settled) */ ++ if (sd->first_frame) { ++ sd->first_frame--; ++ if (sd->first_frame == 0) ++ command_setcompression(gspca_dev); ++ } ++ ++ /* Switch flicker control back on if it got turned off */ ++ restart_flicker(gspca_dev); ++ ++ /* If AEC is enabled, monitor the exposure and ++ adjust the sensor frame rate if needed */ ++ if (sd->params.exposure.expMode == 2) ++ monitor_exposure(gspca_dev); ++ ++ /* Update our knowledge of the camera state */ ++ do_command(gspca_dev, CPIA_COMMAND_GetExposure, 0, 0, 0, 0); ++ if (sd->params.qx3.qx3_detected) ++ do_command(gspca_dev, CPIA_COMMAND_ReadMCPorts, 0, 0, 0, 0); ++} ++ ++static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int ret; ++ ++ sd->params.colourParams.brightness = val; ++ sd->params.flickerControl.allowableOverExposure = ++ find_over_exposure(sd->params.colourParams.brightness); ++ if (gspca_dev->streaming) { ++ ret = command_setcolourparams(gspca_dev); ++ if (ret) ++ return ret; ++ return command_setflickerctrl(gspca_dev); ++ } ++ return 0; ++} ++ ++static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->params.colourParams.brightness; ++ return 0; ++} ++ ++static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->params.colourParams.contrast = val; ++ if (gspca_dev->streaming) ++ return command_setcolourparams(gspca_dev); ++ ++ return 0; ++} ++ ++static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->params.colourParams.contrast; ++ return 0; ++} ++ ++static int sd_setsaturation(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->params.colourParams.saturation = val; ++ if (gspca_dev->streaming) ++ return command_setcolourparams(gspca_dev); ++ ++ return 0; ++} ++ ++static int sd_getsaturation(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->params.colourParams.saturation; ++ return 0; ++} ++ ++static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int on; ++ ++ switch (val) { ++ case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */ ++ on = 0; ++ break; ++ case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */ ++ on = 1; ++ sd->mainsFreq = 0; ++ break; ++ case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */ ++ on = 1; ++ sd->mainsFreq = 1; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ sd->freq = val; ++ sd->params.flickerControl.coarseJump = ++ flicker_jumps[sd->mainsFreq] ++ [sd->params.sensorFps.baserate] ++ [sd->params.sensorFps.divisor]; ++ ++ return set_flicker(gspca_dev, on, gspca_dev->streaming); ++} ++ ++static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->freq; ++ return 0; ++} ++ ++static int sd_setcomptarget(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->params.compressionTarget.frTargeting = val; ++ if (gspca_dev->streaming) ++ return command_setcompressiontarget(gspca_dev); ++ ++ return 0; ++} ++ ++static int sd_getcomptarget(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->params.compressionTarget.frTargeting; ++ return 0; ++} ++ ++static int sd_querymenu(struct gspca_dev *gspca_dev, ++ struct v4l2_querymenu *menu) ++{ ++ switch (menu->id) { ++ case V4L2_CID_POWER_LINE_FREQUENCY: ++ switch (menu->index) { ++ case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */ ++ strcpy((char *) menu->name, "NoFliker"); ++ return 0; ++ case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */ ++ strcpy((char *) menu->name, "50 Hz"); ++ return 0; ++ case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */ ++ strcpy((char *) menu->name, "60 Hz"); ++ return 0; ++ } ++ break; ++ case V4L2_CID_COMP_TARGET: ++ switch (menu->index) { ++ case CPIA_COMPRESSION_TARGET_QUALITY: ++ strcpy((char *) menu->name, "Quality"); ++ return 0; ++ case CPIA_COMPRESSION_TARGET_FRAMERATE: ++ strcpy((char *) menu->name, "Framerate"); ++ return 0; ++ } ++ break; ++ } ++ return -EINVAL; ++} ++ ++/* sub-driver description */ ++static const struct sd_desc sd_desc = { ++ .name = MODULE_NAME, ++ .ctrls = sd_ctrls, ++ .nctrls = ARRAY_SIZE(sd_ctrls), ++ .config = sd_config, ++ .init = sd_init, ++ .start = sd_start, ++ .stopN = sd_stopN, ++ .dq_callback = sd_dq_callback, ++ .pkt_scan = sd_pkt_scan, ++ .querymenu = sd_querymenu, ++}; ++ ++/* -- module initialisation -- */ ++static const __devinitdata struct usb_device_id device_table[] = { ++ {USB_DEVICE(0x0553, 0x0002)}, ++ {USB_DEVICE(0x0813, 0x0001)}, ++ {} ++}; ++MODULE_DEVICE_TABLE(usb, device_table); ++ ++/* -- device connect -- */ ++static int sd_probe(struct usb_interface *intf, ++ const struct usb_device_id *id) ++{ ++ return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), ++ THIS_MODULE); ++} ++ ++static struct usb_driver sd_driver = { ++ .name = MODULE_NAME, ++ .id_table = device_table, ++ .probe = sd_probe, ++ .disconnect = gspca_disconnect, ++#ifdef CONFIG_PM ++ .suspend = gspca_suspend, ++ .resume = gspca_resume, ++#endif ++}; ++ ++/* -- module insert / remove -- */ ++static int __init sd_mod_init(void) ++{ ++ int ret; ++ ret = usb_register(&sd_driver); ++ if (ret < 0) ++ return ret; ++ PDEBUG(D_PROBE, "registered"); ++ return 0; ++} ++static void __exit sd_mod_exit(void) ++{ ++ usb_deregister(&sd_driver); ++ PDEBUG(D_PROBE, "deregistered"); ++} ++ ++module_init(sd_mod_init); ++module_exit(sd_mod_exit); +diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c +index fdf4c0e..6ac9eac 100644 +--- a/drivers/media/video/gspca/etoms.c ++++ b/drivers/media/video/gspca/etoms.c +@@ -52,7 +52,7 @@ static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + { + { + .id = V4L2_CID_BRIGHTNESS, +@@ -706,6 +706,12 @@ static void Et_setgainG(struct gspca_dev *gspca_dev, __u8 gain) + + i2c_w(gspca_dev, PAS106_REG13, &i2cflags, 1, 3); + i2c_w(gspca_dev, PAS106_REG0e, &gain, 1, 1); ++#if 0 ++ i2c_w(gspca_dev, 0x09, &gain, 1, 1); ++ i2c_w(gspca_dev, 0x0a, &gain, 1, 1); ++ i2c_w(gspca_dev, 0x0b, &gain, 1, 1); ++ i2c_w(gspca_dev, 0x0c, &gain, 1, 1); ++#endif + } + } + +@@ -851,7 +857,7 @@ static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val) + } + + /* sub-driver description */ +-static struct sd_desc sd_desc = { ++static const struct sd_desc sd_desc = { + .name = MODULE_NAME, + .ctrls = sd_ctrls, + .nctrls = ARRAY_SIZE(sd_ctrls), +diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c +index 4878c8f..9e42476 100644 +--- a/drivers/media/video/gspca/gl860/gl860.c ++++ b/drivers/media/video/gspca/gl860/gl860.c +@@ -161,7 +161,7 @@ static int gl860_build_control_table(struct gspca_dev *gspca_dev) + + /*==================== sud-driver structure initialisation =================*/ + +-static struct sd_desc sd_desc_mi1320 = { ++static const struct sd_desc sd_desc_mi1320 = { + .name = MODULE_NAME, + .ctrls = sd_ctrls_mi1320, + .nctrls = GL860_NCTRLS, +@@ -174,7 +174,7 @@ static struct sd_desc sd_desc_mi1320 = { + .dq_callback = sd_callback, + }; + +-static struct sd_desc sd_desc_mi2020 = { ++static const struct sd_desc sd_desc_mi2020 = { + .name = MODULE_NAME, + .ctrls = sd_ctrls_mi2020, + .nctrls = GL860_NCTRLS, +@@ -187,7 +187,7 @@ static struct sd_desc sd_desc_mi2020 = { + .dq_callback = sd_callback, + }; + +-static struct sd_desc sd_desc_mi2020b = { ++static const struct sd_desc sd_desc_mi2020b = { + .name = MODULE_NAME, + .ctrls = sd_ctrls_mi2020b, + .nctrls = GL860_NCTRLS, +@@ -200,7 +200,7 @@ static struct sd_desc sd_desc_mi2020b = { + .dq_callback = sd_callback, + }; + +-static struct sd_desc sd_desc_ov2640 = { ++static const struct sd_desc sd_desc_ov2640 = { + .name = MODULE_NAME, + .ctrls = sd_ctrls_ov2640, + .nctrls = GL860_NCTRLS, +@@ -213,7 +213,7 @@ static struct sd_desc sd_desc_ov2640 = { + .dq_callback = sd_callback, + }; + +-static struct sd_desc sd_desc_ov9655 = { ++static const struct sd_desc sd_desc_ov9655 = { + .name = MODULE_NAME, + .ctrls = sd_ctrls_ov9655, + .nctrls = GL860_NCTRLS, +diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c +index bd6214d..85c966e 100644 +--- a/drivers/media/video/gspca/gspca.c ++++ b/drivers/media/video/gspca/gspca.c +@@ -3,6 +3,9 @@ + * + * Copyright (C) 2008-2009 Jean-Francois Moine (http://moinejf.free.fr) + * ++ * Camera button input handling by Márton Németh ++ * Copyright (C) 2009-2010 Márton Németh ++ * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your +@@ -37,6 +40,9 @@ + + #include "gspca.h" + ++#include ++#include ++ + /* global values */ + #define DEF_NURBS 3 /* default number of URBs */ + #if DEF_NURBS > MAX_NURBS +@@ -47,7 +53,7 @@ MODULE_AUTHOR("Jean-Francois Moine "); + MODULE_DESCRIPTION("GSPCA USB Camera Driver"); + MODULE_LICENSE("GPL"); + +-#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 8, 0) ++#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 9, 0) + + #ifdef GSPCA_DEBUG + int gspca_debug = D_ERR | D_PROBE; +@@ -104,15 +110,184 @@ static const struct vm_operations_struct gspca_vm_ops = { + .close = gspca_vm_close, + }; + ++/* ++ * Input and interrupt endpoint handling functions ++ */ ++#ifdef CONFIG_INPUT ++static void int_irq(struct urb *urb) ++{ ++ struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context; ++ int ret; ++ ++ ret = urb->status; ++ switch (ret) { ++ case 0: ++ if (gspca_dev->sd_desc->int_pkt_scan(gspca_dev, ++ urb->transfer_buffer, urb->actual_length) < 0) { ++ PDEBUG(D_ERR, "Unknown packet received"); ++ } ++ break; ++ ++ case -ENOENT: ++ case -ECONNRESET: ++ case -ENODEV: ++ case -ESHUTDOWN: ++ /* Stop is requested either by software or hardware is gone, ++ * keep the ret value non-zero and don't resubmit later. ++ */ ++ break; ++ ++ default: ++ PDEBUG(D_ERR, "URB error %i, resubmitting", urb->status); ++ urb->status = 0; ++ ret = 0; ++ } ++ ++ if (ret == 0) { ++ ret = usb_submit_urb(urb, GFP_ATOMIC); ++ if (ret < 0) ++ PDEBUG(D_ERR, "Resubmit URB failed with error %i", ret); ++ } ++} ++ ++static int gspca_input_connect(struct gspca_dev *dev) ++{ ++ struct input_dev *input_dev; ++ int err = 0; ++ ++ dev->input_dev = NULL; ++ if (dev->sd_desc->int_pkt_scan || dev->sd_desc->other_input) { ++ input_dev = input_allocate_device(); ++ if (!input_dev) ++ return -ENOMEM; ++ ++ usb_make_path(dev->dev, dev->phys, sizeof(dev->phys)); ++ strlcat(dev->phys, "/input0", sizeof(dev->phys)); ++ ++ input_dev->name = dev->sd_desc->name; ++ input_dev->phys = dev->phys; ++ ++ usb_to_input_id(dev->dev, &input_dev->id); ++ ++ input_dev->evbit[0] = BIT_MASK(EV_KEY); ++ input_dev->keybit[BIT_WORD(KEY_CAMERA)] = BIT_MASK(KEY_CAMERA); ++ input_dev->dev.parent = &dev->dev->dev; ++ err = input_register_device(input_dev); ++ if (err) { ++ PDEBUG(D_ERR, "Input device registration failed " ++ "with error %i", err); ++ input_dev->dev.parent = NULL; ++ input_free_device(input_dev); ++ } else { ++ dev->input_dev = input_dev; ++ } ++ } ++ ++ return err; ++} ++ ++static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev, ++ struct usb_endpoint_descriptor *ep) ++{ ++ unsigned int buffer_len; ++ int interval; ++ struct urb *urb; ++ struct usb_device *dev; ++ void *buffer = NULL; ++ int ret = -EINVAL; ++ ++ buffer_len = ep->wMaxPacketSize; ++ interval = ep->bInterval; ++ PDEBUG(D_PROBE, "found int in endpoint: 0x%x, " ++ "buffer_len=%u, interval=%u", ++ ep->bEndpointAddress, buffer_len, interval); ++ ++ dev = gspca_dev->dev; ++ ++ urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!urb) { ++ ret = -ENOMEM; ++ goto error; ++ } ++ ++ buffer = usb_buffer_alloc(dev, ep->wMaxPacketSize, ++ GFP_KERNEL, &urb->transfer_dma); ++ if (!buffer) { ++ ret = -ENOMEM; ++ goto error_buffer; ++ } ++ usb_fill_int_urb(urb, dev, ++ usb_rcvintpipe(dev, ep->bEndpointAddress), ++ buffer, buffer_len, ++ int_irq, (void *)gspca_dev, interval); ++ gspca_dev->int_urb = urb; ++ ret = usb_submit_urb(urb, GFP_KERNEL); ++ if (ret < 0) { ++ PDEBUG(D_ERR, "submit URB failed with error %i", ret); ++ goto error_submit; ++ } ++ return ret; ++ ++error_submit: ++ usb_buffer_free(dev, ++ urb->transfer_buffer_length, ++ urb->transfer_buffer, ++ urb->transfer_dma); ++error_buffer: ++ usb_free_urb(urb); ++error: ++ return ret; ++} ++ ++static void gspca_input_create_urb(struct gspca_dev *gspca_dev) ++{ ++ struct usb_interface *intf; ++ struct usb_host_interface *intf_desc; ++ struct usb_endpoint_descriptor *ep; ++ int i; ++ ++ if (gspca_dev->sd_desc->int_pkt_scan) { ++ intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); ++ intf_desc = intf->cur_altsetting; ++ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { ++ ep = &intf_desc->endpoint[i].desc; ++ if (usb_endpoint_dir_in(ep) && ++ usb_endpoint_xfer_int(ep)) { ++ ++ alloc_and_submit_int_urb(gspca_dev, ep); ++ break; ++ } ++ } ++ } ++} ++ ++static void gspca_input_destroy_urb(struct gspca_dev *gspca_dev) ++{ ++ struct urb *urb; ++ ++ urb = gspca_dev->int_urb; ++ if (urb) { ++ gspca_dev->int_urb = NULL; ++ usb_kill_urb(urb); ++ usb_buffer_free(gspca_dev->dev, ++ urb->transfer_buffer_length, ++ urb->transfer_buffer, ++ urb->transfer_dma); ++ usb_free_urb(urb); ++ } ++} ++#else ++#define gspca_input_connect(gspca_dev) 0 ++#define gspca_input_create_urb(gspca_dev) ++#define gspca_input_destroy_urb(gspca_dev) ++#endif ++ + /* get the current input frame buffer */ + struct gspca_frame *gspca_get_i_frame(struct gspca_dev *gspca_dev) + { + struct gspca_frame *frame; +- int i; + +- i = gspca_dev->fr_i; +- i = gspca_dev->fr_queue[i]; +- frame = &gspca_dev->frame[i]; ++ frame = gspca_dev->cur_frame; + if ((frame->v4l2_buf.flags & BUF_ALL_FLAGS) + != V4L2_BUF_FLAG_QUEUED) + return NULL; +@@ -486,11 +661,13 @@ static struct usb_host_endpoint *get_ep(struct gspca_dev *gspca_dev) + i, ep->desc.bEndpointAddress); + gspca_dev->alt = i; /* memorize the current alt setting */ + if (gspca_dev->nbalt > 1) { ++ gspca_input_destroy_urb(gspca_dev); + ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, i); + if (ret < 0) { + err("set alt %d err %d", i, ret); +- return NULL; ++ ep = NULL; + } ++ gspca_input_create_urb(gspca_dev); + } + return ep; + } +@@ -534,26 +711,22 @@ static int create_urbs(struct gspca_dev *gspca_dev, + nurbs = 1; + } + +- gspca_dev->nurbs = nurbs; + for (n = 0; n < nurbs; n++) { + urb = usb_alloc_urb(npkt, GFP_KERNEL); + if (!urb) { + err("usb_alloc_urb failed"); +- destroy_urbs(gspca_dev); + return -ENOMEM; + } ++ gspca_dev->urb[n] = urb; + urb->transfer_buffer = usb_buffer_alloc(gspca_dev->dev, + bsize, + GFP_KERNEL, + &urb->transfer_dma); + + if (urb->transfer_buffer == NULL) { +- usb_free_urb(urb); +- err("usb_buffer_urb failed"); +- destroy_urbs(gspca_dev); ++ err("usb_buffer_alloc failed"); + return -ENOMEM; + } +- gspca_dev->urb[n] = urb; + urb->dev = gspca_dev->dev; + urb->context = gspca_dev; + urb->transfer_buffer_length = bsize; +@@ -585,6 +758,7 @@ static int create_urbs(struct gspca_dev *gspca_dev, + static int gspca_init_transfer(struct gspca_dev *gspca_dev) + { + struct usb_host_endpoint *ep; ++ struct urb *urb; + int n, ret; + + if (mutex_lock_interruptible(&gspca_dev->usb_lock)) +@@ -595,6 +769,8 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev) + goto out; + } + ++ gspca_dev->usb_err = 0; ++ + /* set the higher alternate setting and + * loop until urb submit succeeds */ + if (gspca_dev->cam.reverse_alts) +@@ -613,10 +789,15 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev) + goto out; + } + for (;;) { +- PDEBUG(D_STREAM, "init transfer alt %d", gspca_dev->alt); +- ret = create_urbs(gspca_dev, ep); +- if (ret < 0) +- goto out; ++ if (!gspca_dev->cam.no_urb_create) { ++ PDEBUG(D_STREAM, "init transfer alt %d", ++ gspca_dev->alt); ++ ret = create_urbs(gspca_dev, ep); ++ if (ret < 0) { ++ destroy_urbs(gspca_dev); ++ goto out; ++ } ++ } + + /* clear the bulk endpoint */ + if (gspca_dev->cam.bulk) +@@ -636,8 +817,11 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev) + break; + + /* submit the URBs */ +- for (n = 0; n < gspca_dev->nurbs; n++) { +- ret = usb_submit_urb(gspca_dev->urb[n], GFP_KERNEL); ++ for (n = 0; n < MAX_NURBS; n++) { ++ urb = gspca_dev->urb[n]; ++ if (urb == NULL) ++ break; ++ ret = usb_submit_urb(urb, GFP_KERNEL); + if (ret < 0) + break; + } +@@ -694,7 +878,9 @@ static void gspca_stream_off(struct gspca_dev *gspca_dev) + if (gspca_dev->sd_desc->stopN) + gspca_dev->sd_desc->stopN(gspca_dev); + destroy_urbs(gspca_dev); ++ gspca_input_destroy_urb(gspca_dev); + gspca_set_alt0(gspca_dev); ++ gspca_input_create_urb(gspca_dev); + } + + /* always call stop0 to free the subdriver's resources */ +@@ -2060,11 +2246,12 @@ int gspca_dev_probe(struct usb_interface *intf, + PDEBUG(D_ERR, "Too many config"); + return -ENODEV; + } ++ ++ /* the USB video interface must be the first one */ + interface = &intf->cur_altsetting->desc; +- if (interface->bInterfaceNumber > 0) { +- PDEBUG(D_ERR, "intf != 0"); ++ if (dev->config->desc.bNumInterfaces != 1 && ++ interface->bInterfaceNumber != 0) + return -ENODEV; +- } + + /* create the device */ + if (dev_size < sizeof *gspca_dev) +@@ -2096,6 +2283,10 @@ int gspca_dev_probe(struct usb_interface *intf, + goto out; + gspca_set_default_mode(gspca_dev); + ++ ret = gspca_input_connect(gspca_dev); ++ if (ret) ++ goto out; ++ + mutex_init(&gspca_dev->usb_lock); + mutex_init(&gspca_dev->read_lock); + mutex_init(&gspca_dev->queue_lock); +@@ -2116,8 +2307,15 @@ int gspca_dev_probe(struct usb_interface *intf, + + usb_set_intfdata(intf, gspca_dev); + PDEBUG(D_PROBE, "%s created", video_device_node_name(&gspca_dev->vdev)); ++ ++ gspca_input_create_urb(gspca_dev); ++ + return 0; + out: ++#ifdef CONFIG_INPUT ++ if (gspca_dev->input_dev) ++ input_unregister_device(gspca_dev->input_dev); ++#endif + kfree(gspca_dev->usb_buf); + kfree(gspca_dev); + return ret; +@@ -2133,6 +2331,7 @@ EXPORT_SYMBOL(gspca_dev_probe); + void gspca_disconnect(struct usb_interface *intf) + { + struct gspca_dev *gspca_dev = usb_get_intfdata(intf); ++ struct input_dev *input_dev; + + PDEBUG(D_PROBE, "%s disconnect", + video_device_node_name(&gspca_dev->vdev)); +@@ -2144,6 +2343,13 @@ void gspca_disconnect(struct usb_interface *intf) + wake_up_interruptible(&gspca_dev->wq); + } + ++ gspca_input_destroy_urb(gspca_dev); ++ input_dev = gspca_dev->input_dev; ++ if (input_dev) { ++ gspca_dev->input_dev = NULL; ++ input_unregister_device(input_dev); ++ } ++ + /* the device is freed at exit of this function */ + gspca_dev->dev = NULL; + mutex_unlock(&gspca_dev->usb_lock); +@@ -2169,6 +2375,7 @@ int gspca_suspend(struct usb_interface *intf, pm_message_t message) + if (gspca_dev->sd_desc->stopN) + gspca_dev->sd_desc->stopN(gspca_dev); + destroy_urbs(gspca_dev); ++ gspca_input_destroy_urb(gspca_dev); + gspca_set_alt0(gspca_dev); + if (gspca_dev->sd_desc->stop0) + gspca_dev->sd_desc->stop0(gspca_dev); +@@ -2182,6 +2389,7 @@ int gspca_resume(struct usb_interface *intf) + + gspca_dev->frozen = 0; + gspca_dev->sd_desc->init(gspca_dev); ++ gspca_input_create_urb(gspca_dev); + if (gspca_dev->streaming) + return gspca_init_transfer(gspca_dev); + return 0; +@@ -2205,6 +2413,8 @@ int gspca_auto_gain_n_exposure(struct gspca_dev *gspca_dev, int avg_lum, + int retval = 0; + + for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) { ++ if (gspca_dev->ctrl_dis & (1 << i)) ++ continue; + if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_GAIN) + gain_ctrl = &gspca_dev->sd_desc->ctrls[i]; + if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_EXPOSURE) +diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h +index 59c7941..02c696a 100644 +--- a/drivers/media/video/gspca/gspca.h ++++ b/drivers/media/video/gspca/gspca.h +@@ -48,26 +48,27 @@ extern int gspca_debug; + + /* used to list framerates supported by a camera mode (resolution) */ + struct framerates { +- int *rates; ++ const u8 *rates; + int nrates; + }; + + /* device information - set at probe time */ + struct cam { +- int bulk_size; /* buffer size when image transfer by bulk */ + const struct v4l2_pix_format *cam_mode; /* size nmodes */ +- char nmodes; + const struct framerates *mode_framerates; /* must have size nmode, + * just like cam_mode */ +- __u8 bulk_nurbs; /* number of URBs in bulk mode ++ u32 bulk_size; /* buffer size when image transfer by bulk */ ++ u32 input_flags; /* value for ENUM_INPUT status flags */ ++ u8 nmodes; /* size of cam_mode */ ++ u8 no_urb_create; /* don't create transfer URBs */ ++ u8 bulk_nurbs; /* number of URBs in bulk mode + * - cannot be > MAX_NURBS + * - when 0 and bulk_size != 0 means + * 1 URB and submit done by subdriver */ + u8 bulk; /* image transfer by 0:isoc / 1:bulk */ + u8 npkt; /* number of packets in an ISOC message + * 0 is the default value: 32 packets */ +- u32 input_flags; /* value for ENUM_INPUT status flags */ +- char reverse_alts; /* Alt settings are in high to low order */ ++ u8 reverse_alts; /* Alt settings are in high to low order */ + }; + + struct gspca_dev; +@@ -90,6 +91,9 @@ typedef int (*cam_qmnu_op) (struct gspca_dev *, + typedef void (*cam_pkt_op) (struct gspca_dev *gspca_dev, + u8 *data, + int len); ++typedef int (*cam_int_pkt_op) (struct gspca_dev *gspca_dev, ++ u8 *data, ++ int len); + + struct ctrl { + struct v4l2_queryctrl qctrl; +@@ -125,6 +129,12 @@ struct sd_desc { + cam_reg_op get_register; + #endif + cam_ident_op get_chip_ident; ++#ifdef CONFIG_INPUT ++ cam_int_pkt_op int_pkt_scan; ++ /* other_input makes the gspca core create gspca_dev->input even when ++ int_pkt_scan is NULL, for cams with non interrupt driven buttons */ ++ u8 other_input; ++#endif + }; + + /* packet types when moving from iso buf to frame buf */ +@@ -147,6 +157,10 @@ struct gspca_dev { + struct module *module; /* subdriver handling the device */ + struct usb_device *dev; + struct file *capt_file; /* file doing video capture */ ++#ifdef CONFIG_INPUT ++ struct input_dev *input_dev; ++ char phys[64]; /* physical device path */ ++#endif + + struct cam cam; /* device information */ + const struct sd_desc *sd_desc; /* subdriver description */ +@@ -156,6 +170,9 @@ struct gspca_dev { + #define USB_BUF_SZ 64 + __u8 *usb_buf; /* buffer for USB exchanges */ + struct urb *urb[MAX_NURBS]; ++#ifdef CONFIG_INPUT ++ struct urb *int_urb; ++#endif + + __u8 *frbuf; /* buffer for nframes */ + struct gspca_frame frame[GSPCA_MAX_FRAMES]; +@@ -187,7 +204,6 @@ struct gspca_dev { + char users; /* number of opens */ + char present; /* device connected */ + char nbufread; /* number of buffers for read() */ +- char nurbs; /* number of allocated URBs */ + char memory; /* memory type (V4L2_MEMORY_xxx) */ + __u8 iface; /* USB interface number */ + __u8 alt; /* USB alternate setting */ +diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.c b/drivers/media/video/gspca/m5602/m5602_mt9m111.c +index 8d071df..de5425a 100644 +--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.c ++++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.c +@@ -36,6 +36,18 @@ static int mt9m111_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val); + static int mt9m111_set_red_balance(struct gspca_dev *gspca_dev, __s32 val); + + static struct v4l2_pix_format mt9m111_modes[] = { ++#if 0 ++ { ++ 320, ++ 240, ++ V4L2_PIX_FMT_SBGGR8, ++ V4L2_FIELD_NONE, ++ .sizeimage = 320 * 240, ++ .bytesperline = 320, ++ .colorspace = V4L2_COLORSPACE_SRGB, ++ .priv = 0 ++ }, ++#endif + { + 640, + 480, +@@ -48,7 +60,7 @@ static struct v4l2_pix_format mt9m111_modes[] = { + } + }; + +-const static struct ctrl mt9m111_ctrls[] = { ++static const struct ctrl mt9m111_ctrls[] = { + #define VFLIP_IDX 0 + { + { +@@ -171,7 +183,7 @@ int mt9m111_probe(struct sd *sd) + return -ENODEV; + } + +- info("Probing for a mt9m111 sensor"); ++ PDEBUG(D_PROBE, "Probing for a mt9m111 sensor"); + + /* Do the preinit */ + for (i = 0; i < ARRAY_SIZE(preinit_mt9m111); i++) { +diff --git a/drivers/media/video/gspca/m5602/m5602_ov7660.c b/drivers/media/video/gspca/m5602/m5602_ov7660.c +index 2a28b74..99fef65 100644 +--- a/drivers/media/video/gspca/m5602/m5602_ov7660.c ++++ b/drivers/media/video/gspca/m5602/m5602_ov7660.c +@@ -20,6 +20,12 @@ + + static int ov7660_get_gain(struct gspca_dev *gspca_dev, __s32 *val); + static int ov7660_set_gain(struct gspca_dev *gspca_dev, __s32 val); ++#if 0 ++static int ov7660_get_blue_gain(struct gspca_dev *gspca_dev, __s32 *val); ++static int ov7660_set_blue_gain(struct gspca_dev *gspca_dev, __s32 val); ++static int ov7660_get_red_gain(struct gspca_dev *gspca_dev, __s32 *val); ++static int ov7660_set_red_gain(struct gspca_dev *gspca_dev, __s32 val); ++#endif + static int ov7660_get_auto_white_balance(struct gspca_dev *gspca_dev, + __s32 *val); + static int ov7660_set_auto_white_balance(struct gspca_dev *gspca_dev, +@@ -33,7 +39,7 @@ static int ov7660_set_hflip(struct gspca_dev *gspca_dev, __s32 val); + static int ov7660_get_vflip(struct gspca_dev *gspca_dev, __s32 *val); + static int ov7660_set_vflip(struct gspca_dev *gspca_dev, __s32 val); + +-const static struct ctrl ov7660_ctrls[] = { ++static const struct ctrl ov7660_ctrls[] = { + #define GAIN_IDX 1 + { + { +@@ -50,7 +56,39 @@ const static struct ctrl ov7660_ctrls[] = { + .get = ov7660_get_gain + }, + #define BLUE_BALANCE_IDX 2 ++#if 0 ++ { ++ { ++ .id = V4L2_CID_BLUE_BALANCE, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "blue balance", ++ .minimum = 0x00, ++ .maximum = 0x7f, ++ .step = 0x1, ++ .default_value = OV7660_DEFAULT_BLUE_GAIN, ++ .flags = V4L2_CTRL_FLAG_SLIDER ++ }, ++ .set = ov7660_set_blue_gain, ++ .get = ov7660_get_blue_gain ++ }, ++#endif + #define RED_BALANCE_IDX 3 ++#if 0 ++ { ++ { ++ .id = V4L2_CID_RED_BALANCE, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "red balance", ++ .minimum = 0x00, ++ .maximum = 0x7f, ++ .step = 0x1, ++ .default_value = OV7660_DEFAULT_RED_GAIN, ++ .flags = V4L2_CTRL_FLAG_SLIDER ++ }, ++ .set = ov7660_set_red_gain, ++ .get = ov7660_get_red_gain ++ }, ++#endif + #define AUTO_WHITE_BALANCE_IDX 4 + { + { +@@ -247,6 +285,17 @@ int ov7660_init(struct sd *sd) + sensor_settings[AUTO_EXPOSURE_IDX]); + if (err < 0) + return err; ++#if 0 ++ err = ov7660_set_blue_gain(&sd->gspca_dev, ++ sensor_settings[BLUE_BALANCE_IDX]); ++ if (err < 0) ++ return err; ++ ++ err = ov7660_set_red_gain(&sd->gspca_dev, ++ sensor_settings[RED_BALANCE_IDX]); ++ if (err < 0) ++ return err; ++#endif + err = ov7660_set_hflip(&sd->gspca_dev, + sensor_settings[HFLIP_IDX]); + if (err < 0) +@@ -301,6 +350,57 @@ static int ov7660_set_gain(struct gspca_dev *gspca_dev, __s32 val) + return err; + } + ++#if 0 ++static int ov7660_get_blue_gain(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ s32 *sensor_settings = sd->sensor_priv; ++ ++ *val = sensor_settings[BLUE_BALANCE_IDX]; ++ PDEBUG(D_V4L2, "Read blue balance %d", *val); ++ return 0; ++} ++ ++static int ov7660_set_blue_gain(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ int err; ++ u8 i2c_data; ++ struct sd *sd = (struct sd *) gspca_dev; ++ s32 *sensor_settings = sd->sensor_priv; ++ ++ PDEBUG(D_V4L2, "Setting blue balance to %d", val); ++ ++ sensor_settings[BLUE_BALANCE_IDX] = val; ++ ++ err = m5602_write_sensor(sd, OV7660_BLUE_GAIN, &i2c_data, 1); ++ return err; ++} ++ ++static int ov7660_get_red_gain(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ s32 *sensor_settings = sd->sensor_priv; ++ ++ *val = sensor_settings[RED_BALANCE_IDX]; ++ PDEBUG(D_V4L2, "Read red balance %d", *val); ++ return 0; ++} ++ ++static int ov7660_set_red_gain(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ int err; ++ u8 i2c_data; ++ struct sd *sd = (struct sd *) gspca_dev; ++ s32 *sensor_settings = sd->sensor_priv; ++ ++ PDEBUG(D_V4L2, "Setting red balance to %d", val); ++ ++ sensor_settings[RED_BALANCE_IDX] = val; ++ ++ err = m5602_write_sensor(sd, OV7660_RED_GAIN, &i2c_data, 1); ++ return err; ++} ++#endif + + static int ov7660_get_auto_white_balance(struct gspca_dev *gspca_dev, + __s32 *val) +diff --git a/drivers/media/video/gspca/m5602/m5602_ov7660.h b/drivers/media/video/gspca/m5602/m5602_ov7660.h +index f5588eb..4d9dcf2 100644 +--- a/drivers/media/video/gspca/m5602/m5602_ov7660.h ++++ b/drivers/media/video/gspca/m5602/m5602_ov7660.h +@@ -94,7 +94,7 @@ int ov7660_start(struct sd *sd); + int ov7660_stop(struct sd *sd); + void ov7660_disconnect(struct sd *sd); + +-const static struct m5602_sensor ov7660 = { ++static const struct m5602_sensor ov7660 = { + .name = "ov7660", + .i2c_slave_id = 0x42, + .i2c_regW = 1, +diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.c b/drivers/media/video/gspca/m5602/m5602_ov9650.c +index 923cdd5..069ba00 100644 +--- a/drivers/media/video/gspca/m5602/m5602_ov9650.c ++++ b/drivers/media/video/gspca/m5602/m5602_ov9650.c +@@ -307,7 +307,7 @@ int ov9650_probe(struct sd *sd) + return -ENODEV; + } + +- info("Probing for an ov9650 sensor"); ++ PDEBUG(D_PROBE, "Probing for an ov9650 sensor"); + + /* Run the pre-init before probing the sensor */ + for (i = 0; i < ARRAY_SIZE(preinit_ov9650) && !err; i++) { +diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.c b/drivers/media/video/gspca/m5602/m5602_po1030.c +index 8d74d80..03b8a3a 100644 +--- a/drivers/media/video/gspca/m5602/m5602_po1030.c ++++ b/drivers/media/video/gspca/m5602/m5602_po1030.c +@@ -42,6 +42,18 @@ static int po1030_get_auto_exposure(struct gspca_dev *gspca_dev, + __s32 *val); + + static struct v4l2_pix_format po1030_modes[] = { ++#if 0 ++ { ++ 320, ++ 240, ++ V4L2_PIX_FMT_SBGGR8, ++ V4L2_FIELD_NONE, ++ .sizeimage = 320 * 240, ++ .bytesperline = 320, ++ .colorspace = V4L2_COLORSPACE_SRGB, ++ .priv = 2 ++ }, ++#endif + { + 640, + 480, +@@ -205,7 +217,7 @@ int po1030_probe(struct sd *sd) + return -ENODEV; + } + +- info("Probing for a po1030 sensor"); ++ PDEBUG(D_PROBE, "Probing for a po1030 sensor"); + + /* Run the pre-init to actually probe the unit */ + for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) { +diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c +index 1b536f7..da0a38c 100644 +--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c ++++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c +@@ -248,7 +248,7 @@ int s5k4aa_probe(struct sd *sd) + return -ENODEV; + } + +- info("Probing for a s5k4aa sensor"); ++ PDEBUG(D_PROBE, "Probing for a s5k4aa sensor"); + + /* Preinit the sensor */ + for (i = 0; i < ARRAY_SIZE(preinit_s5k4aa) && !err; i++) { +diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.c b/drivers/media/video/gspca/m5602/m5602_s5k83a.c +index 6b89f33..fbd9154 100644 +--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.c ++++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.c +@@ -143,7 +143,7 @@ int s5k83a_probe(struct sd *sd) + return -ENODEV; + } + +- info("Probing for a s5k83a sensor"); ++ PDEBUG(D_PROBE, "Probing for a s5k83a sensor"); + + /* Preinit the sensor */ + for (i = 0; i < ARRAY_SIZE(preinit_s5k83a) && !err; i++) { +diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c +index 9cf8d68..a08cd2b 100644 +--- a/drivers/media/video/gspca/mars.c ++++ b/drivers/media/video/gspca/mars.c +@@ -54,7 +54,7 @@ static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + { + { + .id = V4L2_CID_BRIGHTNESS, +@@ -232,8 +232,13 @@ static int sd_start(struct gspca_dev *gspca_dev) + /* data[9]= 0x56; * reg 8, 24MHz, 2:1 scale down */ + /* else */ + data[9] = 0x52; /* reg 8, 24MHz, no scale down */ ++#if 1 + /*jfm: from win trace*/ + data[10] = 0x18; ++#else ++ data[10] = 0x5d; /* reg 9, I2C device address ++ * [for PAS5101 (0x40)] [for MI (0x5d)] */ ++#endif + + err_code = reg_w(gspca_dev, 11); + if (err_code < 0) +@@ -260,6 +265,7 @@ static int sd_start(struct gspca_dev *gspca_dev) + /* auto dark-gain */ + data[0] = 0x5e; /* address */ + data[1] = 0; /* reg 94, Y Gain (auto) */ ++#if 1 + /*jfm: from win trace*/ + /* reg 0x5f/0x60 (LE) = saturation */ + /* h (60): xxxx x100 +@@ -268,12 +274,40 @@ static int sd_start(struct gspca_dev *gspca_dev) + data[3] = ((sd->colors >> 2) & 0xf8) | 0x04; + data[4] = sd->brightness; /* reg 0x61 = brightness */ + data[5] = 0x00; ++#else ++ data[2] = 0; /* reg 95, UV Gain (1.75) */ ++ data[3] = 0x78; /* reg 96, Y Gain/UV Gain/disable ++ * auto dark-gain */ ++ switch (gspca_dev->width) { ++/* case 1280: */ ++/* data[4] = 154; ++ * reg 97, %3 shadow point (unit: 256 pixel) */ ++/* data[5] = 51; ++ * reg 98, %1 highlight point ++ * (uint: 256 pixel) */ ++/* break; */ ++ default: ++/* case 640: */ ++ data[4] = 36; /* reg 97, %3 shadow point ++ * (unit: 256 pixel) */ ++ data[5] = 12; /* reg 98, %1 highlight point ++ * (uint: 256 pixel) */ ++ break; ++ case 320: ++ data[4] = 9; /* reg 97, %3 shadow point ++ * (unit: 256 pixel) */ ++ data[5] = 3; /* reg 98, %1 highlight point ++ * (uint: 256 pixel) */ ++ break; ++ } ++#endif + + err_code = reg_w(gspca_dev, 6); + if (err_code < 0) + return err_code; + + data[0] = 0x67; ++#if 1 + /*jfm: from win trace*/ + data[1] = sd->sharpness * 4 + 3; + data[2] = 0x14; +@@ -293,6 +327,10 @@ static int sd_start(struct gspca_dev *gspca_dev) + data[1] = 0x07; + err_code = reg_w(gspca_dev, 2); + /*jfm: win trace - many writes here to reg 0x64*/ ++#else ++ data[1] = 0x13; /* reg 103, first pixel B, disable sharpness */ ++ err_code = reg_w(gspca_dev, 2); ++#endif + if (err_code < 0) + return err_code; + +diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c +index 9154870..33744e7 100644 +--- a/drivers/media/video/gspca/mr97310a.c ++++ b/drivers/media/video/gspca/mr97310a.c +@@ -57,6 +57,14 @@ + #define MR97310A_GAIN_MAX 31 + #define MR97310A_GAIN_DEFAULT 25 + ++#define MR97310A_CONTRAST_MIN 0 ++#define MR97310A_CONTRAST_MAX 31 ++#define MR97310A_CONTRAST_DEFAULT 23 ++ ++#define MR97310A_CS_GAIN_MIN 0 ++#define MR97310A_CS_GAIN_MAX 0x7ff ++#define MR97310A_CS_GAIN_DEFAULT 0x110 ++ + #define MR97310A_MIN_CLOCKDIV_MIN 3 + #define MR97310A_MIN_CLOCKDIV_MAX 8 + #define MR97310A_MIN_CLOCKDIV_DEFAULT 3 +@@ -82,7 +90,8 @@ struct sd { + + int brightness; + u16 exposure; +- u8 gain; ++ u32 gain; ++ u8 contrast; + u8 min_clockdiv; + }; + +@@ -98,6 +107,8 @@ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val); + static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setmin_clockdiv(struct gspca_dev *gspca_dev, __s32 val); +@@ -105,11 +116,13 @@ static int sd_getmin_clockdiv(struct gspca_dev *gspca_dev, __s32 *val); + static void setbrightness(struct gspca_dev *gspca_dev); + static void setexposure(struct gspca_dev *gspca_dev); + static void setgain(struct gspca_dev *gspca_dev); ++static void setcontrast(struct gspca_dev *gspca_dev); + + /* V4L2 controls supported by the driver */ +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + /* Separate brightness control description for Argus QuickClix as it has +- different limits from the other mr97310a cameras */ ++ * different limits from the other mr97310a cameras, and separate gain ++ * control for Sakar CyberPix camera. */ + { + #define NORM_BRIGHTNESS_IDX 0 + { +@@ -171,7 +184,37 @@ static struct ctrl sd_ctrls[] = { + .get = sd_getgain, + }, + { +-#define MIN_CLOCKDIV_IDX 4 ++#define SAKAR_CS_GAIN_IDX 4 ++ { ++ .id = V4L2_CID_GAIN, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Gain", ++ .minimum = MR97310A_CS_GAIN_MIN, ++ .maximum = MR97310A_CS_GAIN_MAX, ++ .step = 1, ++ .default_value = MR97310A_CS_GAIN_DEFAULT, ++ .flags = 0, ++ }, ++ .set = sd_setgain, ++ .get = sd_getgain, ++ }, ++ { ++#define CONTRAST_IDX 5 ++ { ++ .id = V4L2_CID_CONTRAST, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Contrast", ++ .minimum = MR97310A_CONTRAST_MIN, ++ .maximum = MR97310A_CONTRAST_MAX, ++ .step = 1, ++ .default_value = MR97310A_CONTRAST_DEFAULT, ++ .flags = 0, ++ }, ++ .set = sd_setcontrast, ++ .get = sd_getcontrast, ++ }, ++ { ++#define MIN_CLOCKDIV_IDX 6 + { + .id = V4L2_CID_PRIVATE_BASE, + .type = V4L2_CTRL_TYPE_INTEGER, +@@ -327,7 +370,6 @@ static int zero_the_pointer(struct gspca_dev *gspca_dev) + if (err_code < 0) + return err_code; + +- err_code = mr_write(gspca_dev, 1); + data[0] = 0x19; + data[1] = 0x51; + err_code = mr_write(gspca_dev, 2); +@@ -437,6 +479,7 @@ static int sd_config(struct gspca_dev *gspca_dev, + { + struct sd *sd = (struct sd *) gspca_dev; + struct cam *cam; ++ int gain_default = MR97310A_GAIN_DEFAULT; + int err_code; + + cam = &gspca_dev->cam; +@@ -460,12 +503,14 @@ static int sd_config(struct gspca_dev *gspca_dev, + if (err_code < 0) + return err_code; + ++ /* Now, the query for sensor type. */ ++ err_code = cam_get_response16(gspca_dev, 0x07, 1); ++ if (err_code < 0) ++ return err_code; ++ + if (id->idProduct == 0x0110 || id->idProduct == 0x010e) { + sd->cam_type = CAM_TYPE_CIF; + cam->nmodes--; +- err_code = cam_get_response16(gspca_dev, 0x06, 1); +- if (err_code < 0) +- return err_code; + /* + * All but one of the known CIF cameras share the same USB ID, + * but two different init routines are in use, and the control +@@ -473,12 +518,12 @@ static int sd_config(struct gspca_dev *gspca_dev, + * of the two known varieties is connected! + * + * A list of known CIF cameras follows. They all report either +- * 0002 for type 0 or 0003 for type 1. ++ * 0200 for type 0 or 0300 for type 1. + * If you have another to report, please do + * + * Name sd->sensor_type reported by + * +- * Sakar Spy-shot 0 T. Kilgore ++ * Sakar 56379 Spy-shot 0 T. Kilgore + * Innovage 0 T. Kilgore + * Vivitar Mini 0 H. De Goede + * Vivitar Mini 0 E. Rodriguez +@@ -487,7 +532,7 @@ static int sd_config(struct gspca_dev *gspca_dev, + * Philips dig. keych. 1 T. Kilgore + * Trust Spyc@m 100 1 A. Jacobs + */ +- switch (gspca_dev->usb_buf[1]) { ++ switch (gspca_dev->usb_buf[0]) { + case 2: + sd->sensor_type = 0; + break; +@@ -504,20 +549,19 @@ static int sd_config(struct gspca_dev *gspca_dev, + } else { + sd->cam_type = CAM_TYPE_VGA; + +- err_code = cam_get_response16(gspca_dev, 0x07, 1); +- if (err_code < 0) +- return err_code; +- + /* +- * Here is a table of the responses to the previous command +- * from the known MR97310A VGA cameras. ++ * Here is a table of the responses to the query for sensor ++ * type, from the known MR97310A VGA cameras. Six different ++ * cameras of which five share the same USB ID. + * + * Name gspca_dev->usb_buf[] sd->sensor_type + * sd->do_lcd_stop + * Aiptek Pencam VGA+ 0300 0 1 +- * ION digital 0350 0 1 ++ * ION digital 0300 0 1 + * Argus DC-1620 0450 1 0 + * Argus QuickClix 0420 1 1 ++ * Sakar 77379 Digital 0350 0 1 ++ * Sakar 1638x CyberPix 0120 0 2 + * + * Based upon these results, we assume default settings + * and then correct as necessary, as follows. +@@ -527,10 +571,12 @@ static int sd_config(struct gspca_dev *gspca_dev, + sd->sensor_type = 1; + sd->do_lcd_stop = 0; + sd->adj_colors = 0; +- if ((gspca_dev->usb_buf[0] != 0x03) && ++ if (gspca_dev->usb_buf[0] == 0x01) { ++ sd->sensor_type = 2; ++ } else if ((gspca_dev->usb_buf[0] != 0x03) && + (gspca_dev->usb_buf[0] != 0x04)) { + PDEBUG(D_ERR, "Unknown VGA Sensor id Byte 0: %02x", +- gspca_dev->usb_buf[1]); ++ gspca_dev->usb_buf[0]); + PDEBUG(D_ERR, "Defaults assumed, may not work"); + PDEBUG(D_ERR, "Please report this"); + } +@@ -560,7 +606,7 @@ static int sd_config(struct gspca_dev *gspca_dev, + PDEBUG(D_PROBE, "MR97310A VGA camera detected, sensor: %d", + sd->sensor_type); + } +- /* Stop streaming as we've started it to probe the sensor type. */ ++ /* Stop streaming as we've started it only to probe the sensor type. */ + sd_stopN(gspca_dev); + + if (force_sensor_type != -1) { +@@ -574,9 +620,13 @@ static int sd_config(struct gspca_dev *gspca_dev, + /* No brightness for sensor_type 0 */ + if (sd->sensor_type == 0) + gspca_dev->ctrl_dis = (1 << NORM_BRIGHTNESS_IDX) | +- (1 << ARGUS_QC_BRIGHTNESS_IDX); ++ (1 << ARGUS_QC_BRIGHTNESS_IDX) | ++ (1 << CONTRAST_IDX) | ++ (1 << SAKAR_CS_GAIN_IDX); + else + gspca_dev->ctrl_dis = (1 << ARGUS_QC_BRIGHTNESS_IDX) | ++ (1 << CONTRAST_IDX) | ++ (1 << SAKAR_CS_GAIN_IDX) | + (1 << MIN_CLOCKDIV_IDX); + } else { + /* All controls need to be disabled if VGA sensor_type is 0 */ +@@ -585,17 +635,30 @@ static int sd_config(struct gspca_dev *gspca_dev, + (1 << ARGUS_QC_BRIGHTNESS_IDX) | + (1 << EXPOSURE_IDX) | + (1 << GAIN_IDX) | ++ (1 << CONTRAST_IDX) | ++ (1 << SAKAR_CS_GAIN_IDX) | + (1 << MIN_CLOCKDIV_IDX); +- else if (sd->do_lcd_stop) ++ else if (sd->sensor_type == 2) { ++ gspca_dev->ctrl_dis = (1 << NORM_BRIGHTNESS_IDX) | ++ (1 << ARGUS_QC_BRIGHTNESS_IDX) | ++ (1 << GAIN_IDX) | ++ (1 << MIN_CLOCKDIV_IDX); ++ gain_default = MR97310A_CS_GAIN_DEFAULT; ++ } else if (sd->do_lcd_stop) + /* Argus QuickClix has different brightness limits */ +- gspca_dev->ctrl_dis = (1 << NORM_BRIGHTNESS_IDX); ++ gspca_dev->ctrl_dis = (1 << NORM_BRIGHTNESS_IDX) | ++ (1 << CONTRAST_IDX) | ++ (1 << SAKAR_CS_GAIN_IDX); + else +- gspca_dev->ctrl_dis = (1 << ARGUS_QC_BRIGHTNESS_IDX); ++ gspca_dev->ctrl_dis = (1 << ARGUS_QC_BRIGHTNESS_IDX) | ++ (1 << CONTRAST_IDX) | ++ (1 << SAKAR_CS_GAIN_IDX); + } + + sd->brightness = MR97310A_BRIGHTNESS_DEFAULT; + sd->exposure = MR97310A_EXPOSURE_DEFAULT; +- sd->gain = MR97310A_GAIN_DEFAULT; ++ sd->gain = gain_default; ++ sd->contrast = MR97310A_CONTRAST_DEFAULT; + sd->min_clockdiv = MR97310A_MIN_CLOCKDIV_DEFAULT; + + return 0; +@@ -697,6 +760,12 @@ static int start_cif_cam(struct gspca_dev *gspca_dev) + {0x13, 0x00, {0x01}, 1}, + {0, 0, {0}, 0} + }; ++ /* Without this command the cam won't work with USB-UHCI */ ++ gspca_dev->usb_buf[0] = 0x0a; ++ gspca_dev->usb_buf[1] = 0x00; ++ err_code = mr_write(gspca_dev, 2); ++ if (err_code < 0) ++ return err_code; + err_code = sensor_write_regs(gspca_dev, cif_sensor1_init_data, + ARRAY_SIZE(cif_sensor1_init_data)); + } +@@ -717,6 +786,10 @@ static int start_vga_cam(struct gspca_dev *gspca_dev) + data[5] = 0x00; + data[10] = 0x91; + } ++ if (sd->sensor_type == 2) { ++ data[5] = 0x00; ++ data[10] = 0x18; ++ } + + switch (gspca_dev->width) { + case 160: +@@ -731,6 +804,10 @@ static int start_vga_cam(struct gspca_dev *gspca_dev) + data[4] = 0x78; /* reg 3, V size/4 */ + data[6] = 0x04; /* reg 5, H start */ + data[8] = 0x03; /* reg 7, V start */ ++ if (sd->sensor_type == 2) { ++ data[6] = 2; ++ data[8] = 1; ++ } + if (sd->do_lcd_stop) + data[8] = 0x04; /* Bayer tile shifted */ + break; +@@ -753,7 +830,6 @@ static int start_vga_cam(struct gspca_dev *gspca_dev) + return err_code; + + if (!sd->sensor_type) { +- /* The only known sensor_type 0 cam is the Argus DC-1620 */ + const struct sensor_w_data vga_sensor0_init_data[] = { + {0x01, 0x00, {0x0c, 0x00, 0x04}, 3}, + {0x14, 0x00, {0x01, 0xe4, 0x02, 0x84}, 4}, +@@ -764,7 +840,7 @@ static int start_vga_cam(struct gspca_dev *gspca_dev) + }; + err_code = sensor_write_regs(gspca_dev, vga_sensor0_init_data, + ARRAY_SIZE(vga_sensor0_init_data)); +- } else { /* sd->sensor_type = 1 */ ++ } else if (sd->sensor_type == 1) { + const struct sensor_w_data color_adj[] = { + {0x02, 0x00, {0x06, 0x59, 0x0c, 0x16, 0x00, + /* adjusted blue, green, red gain correct +@@ -802,6 +878,48 @@ static int start_vga_cam(struct gspca_dev *gspca_dev) + + err_code = sensor_write_regs(gspca_dev, vga_sensor1_init_data, + ARRAY_SIZE(vga_sensor1_init_data)); ++ } else { /* sensor type == 2 */ ++ const struct sensor_w_data vga_sensor2_init_data[] = { ++ ++ {0x01, 0x00, {0x48}, 1}, ++ {0x02, 0x00, {0x22}, 1}, ++ /* Reg 3 msb and 4 is lsb of the exposure setting*/ ++ {0x05, 0x00, {0x10}, 1}, ++ {0x06, 0x00, {0x00}, 1}, ++ {0x07, 0x00, {0x00}, 1}, ++ {0x08, 0x00, {0x00}, 1}, ++ {0x09, 0x00, {0x00}, 1}, ++ /* The following are used in the gain control ++ * which is BTW completely borked in the OEM driver ++ * The values for each color go from 0 to 0x7ff ++ *{0x0a, 0x00, {0x01}, 1}, green1 gain msb ++ *{0x0b, 0x00, {0x10}, 1}, green1 gain lsb ++ *{0x0c, 0x00, {0x01}, 1}, red gain msb ++ *{0x0d, 0x00, {0x10}, 1}, red gain lsb ++ *{0x0e, 0x00, {0x01}, 1}, blue gain msb ++ *{0x0f, 0x00, {0x10}, 1}, blue gain lsb ++ *{0x10, 0x00, {0x01}, 1}, green2 gain msb ++ *{0x11, 0x00, {0x10}, 1}, green2 gain lsb ++ */ ++ {0x12, 0x00, {0x00}, 1}, ++ {0x13, 0x00, {0x04}, 1}, /* weird effect on colors */ ++ {0x14, 0x00, {0x00}, 1}, ++ {0x15, 0x00, {0x06}, 1}, ++ {0x16, 0x00, {0x01}, 1}, ++ {0x17, 0x00, {0xe2}, 1}, /* vertical alignment */ ++ {0x18, 0x00, {0x02}, 1}, ++ {0x19, 0x00, {0x82}, 1}, /* don't mess with */ ++ {0x1a, 0x00, {0x00}, 1}, ++ {0x1b, 0x00, {0x20}, 1}, ++ /* {0x1c, 0x00, {0x17}, 1}, contrast control */ ++ {0x1d, 0x00, {0x80}, 1}, /* moving causes a mess */ ++ {0x1e, 0x00, {0x08}, 1}, /* moving jams the camera */ ++ {0x1f, 0x00, {0x0c}, 1}, ++ {0x20, 0x00, {0x00}, 1}, ++ {0, 0, {0}, 0} ++ }; ++ err_code = sensor_write_regs(gspca_dev, vga_sensor2_init_data, ++ ARRAY_SIZE(vga_sensor2_init_data)); + } + return err_code; + } +@@ -834,6 +952,7 @@ static int sd_start(struct gspca_dev *gspca_dev) + return err_code; + + setbrightness(gspca_dev); ++ setcontrast(gspca_dev); + setexposure(gspca_dev); + setgain(gspca_dev); + +@@ -893,7 +1012,7 @@ static void setbrightness(struct gspca_dev *gspca_dev) + static void setexposure(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- int exposure; ++ int exposure = MR97310A_EXPOSURE_DEFAULT; + u8 buf[2]; + + if (gspca_dev->ctrl_dis & (1 << EXPOSURE_IDX)) +@@ -905,6 +1024,11 @@ static void setexposure(struct gspca_dev *gspca_dev) + exposure = (sd->exposure * 9267) / 10000 + 300; + sensor_write1(gspca_dev, 3, exposure >> 4); + sensor_write1(gspca_dev, 4, exposure & 0x0f); ++ } else if (sd->sensor_type == 2) { ++ exposure = sd->exposure; ++ exposure >>= 3; ++ sensor_write1(gspca_dev, 3, exposure >> 8); ++ sensor_write1(gspca_dev, 4, exposure & 0xff); + } else { + /* We have both a clock divider and an exposure register. + We first calculate the clock divider, as that determines +@@ -943,17 +1067,34 @@ static void setexposure(struct gspca_dev *gspca_dev) + static void setgain(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; ++ u8 gainreg; + +- if (gspca_dev->ctrl_dis & (1 << GAIN_IDX)) ++ if ((gspca_dev->ctrl_dis & (1 << GAIN_IDX)) && ++ (gspca_dev->ctrl_dis & (1 << SAKAR_CS_GAIN_IDX))) + return; + +- if (sd->cam_type == CAM_TYPE_CIF && sd->sensor_type == 1) { ++ if (sd->cam_type == CAM_TYPE_CIF && sd->sensor_type == 1) + sensor_write1(gspca_dev, 0x0e, sd->gain); +- } else { ++ else if (sd->cam_type == CAM_TYPE_VGA && sd->sensor_type == 2) ++ for (gainreg = 0x0a; gainreg < 0x11; gainreg += 2) { ++ sensor_write1(gspca_dev, gainreg, sd->gain >> 8); ++ sensor_write1(gspca_dev, gainreg + 1, sd->gain & 0xff); ++ } ++ else + sensor_write1(gspca_dev, 0x10, sd->gain); +- } + } + ++static void setcontrast(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ if (gspca_dev->ctrl_dis & (1 << CONTRAST_IDX)) ++ return; ++ ++ sensor_write1(gspca_dev, 0x1c, sd->contrast); ++} ++ ++ + static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) + { + struct sd *sd = (struct sd *) gspca_dev; +@@ -1008,6 +1149,25 @@ static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val) + return 0; + } + ++static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->contrast = val; ++ if (gspca_dev->streaming) ++ setcontrast(gspca_dev); ++ return 0; ++} ++ ++ ++static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->contrast; ++ return 0; ++} ++ + static int sd_setmin_clockdiv(struct gspca_dev *gspca_dev, __s32 val) + { + struct sd *sd = (struct sd *) gspca_dev; +diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c +index b4f9657..16fabc2 100644 +--- a/drivers/media/video/gspca/ov519.c ++++ b/drivers/media/video/gspca/ov519.c +@@ -38,6 +38,7 @@ + */ + #define MODULE_NAME "ov519" + ++#include + #include "gspca.h" + + MODULE_AUTHOR("Jean-Francois Moine "); +@@ -70,6 +71,9 @@ struct sd { + char invert_led; + #define BRIDGE_INVERT_LED 8 + ++ char snapshot_pressed; ++ char snapshot_needs_reset; ++ + /* Determined by sensor type */ + __u8 sif; + +@@ -99,10 +103,12 @@ struct sd { + #define SEN_OV66308AF 5 + #define SEN_OV7610 6 + #define SEN_OV7620 7 +-#define SEN_OV7640 8 +-#define SEN_OV7670 9 +-#define SEN_OV76BE 10 +-#define SEN_OV8610 11 ++#define SEN_OV7620AE 8 ++#define SEN_OV7640 9 ++#define SEN_OV7648 10 ++#define SEN_OV7670 11 ++#define SEN_OV76BE 12 ++#define SEN_OV8610 13 + + u8 sensor_addr; + int sensor_width; +@@ -139,6 +145,7 @@ static void setautobrightness(struct sd *sd); + static void setfreq(struct sd *sd); + + static const struct ctrl sd_ctrls[] = { ++#define BRIGHTNESS_IDX 0 + { + { + .id = V4L2_CID_BRIGHTNESS, +@@ -153,6 +160,7 @@ static const struct ctrl sd_ctrls[] = { + .set = sd_setbrightness, + .get = sd_getbrightness, + }, ++#define CONTRAST_IDX 1 + { + { + .id = V4L2_CID_CONTRAST, +@@ -167,6 +175,7 @@ static const struct ctrl sd_ctrls[] = { + .set = sd_setcontrast, + .get = sd_getcontrast, + }, ++#define COLOR_IDX 2 + { + { + .id = V4L2_CID_SATURATION, +@@ -2030,6 +2039,10 @@ static int ov511_i2c_w(struct sd *sd, __u8 reg, __u8 value) + + if ((rc & 2) == 0) /* Ack? */ + break; ++#if 0 ++ /* I2C abort */ ++ reg_w(sd, R511_I2C_CTL, 0x10); ++#endif + if (--retries < 0) { + PDEBUG(D_USBO, "i2c write retries exhausted"); + return -1; +@@ -2554,7 +2567,7 @@ static int ov7xx0_configure(struct sd *sd) + /* I don't know what's different about the 76BE yet. */ + if (i2c_r(sd, 0x15) & 1) { + PDEBUG(D_PROBE, "Sensor is an OV7620AE"); +- sd->sensor = SEN_OV7620; ++ sd->sensor = SEN_OV7620AE; + } else { + PDEBUG(D_PROBE, "Sensor is an OV76BE"); + sd->sensor = SEN_OV76BE; +@@ -2588,7 +2601,7 @@ static int ov7xx0_configure(struct sd *sd) + break; + case 0x48: + PDEBUG(D_PROBE, "Sensor is an OV7648"); +- sd->sensor = SEN_OV7640; /* FIXME */ ++ sd->sensor = SEN_OV7648; + break; + default: + PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low); +@@ -2680,6 +2693,36 @@ static void ov51x_led_control(struct sd *sd, int on) + } + } + ++static void sd_reset_snapshot(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ if (!sd->snapshot_needs_reset) ++ return; ++ ++ /* Note it is important that we clear sd->snapshot_needs_reset, ++ before actually clearing the snapshot state in the bridge ++ otherwise we might race with the pkt_scan interrupt handler */ ++ sd->snapshot_needs_reset = 0; ++ ++ switch (sd->bridge) { ++ case BRIDGE_OV511: ++ case BRIDGE_OV511PLUS: ++ reg_w(sd, R51x_SYS_SNAP, 0x02); ++ reg_w(sd, R51x_SYS_SNAP, 0x00); ++ break; ++ case BRIDGE_OV518: ++ case BRIDGE_OV518PLUS: ++ reg_w(sd, R51x_SYS_SNAP, 0x02); /* Reset */ ++ reg_w(sd, R51x_SYS_SNAP, 0x01); /* Enable */ ++ break; ++ case BRIDGE_OV519: ++ reg_w(sd, R51x_SYS_RESET, 0x40); ++ reg_w(sd, R51x_SYS_RESET, 0x00); ++ break; ++ } ++} ++ + static int ov51x_upload_quan_tables(struct sd *sd) + { + const unsigned char yQuanTable511[] = { +@@ -3115,7 +3158,11 @@ static int sd_config(struct gspca_dev *gspca_dev, + (1 << OV7670_FREQ_IDX); + } + sd->quality = QUALITY_DEF; +- if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7670) ++ if (sd->sensor == SEN_OV7640 || ++ sd->sensor == SEN_OV7648) ++ gspca_dev->ctrl_dis |= (1 << AUTOBRIGHT_IDX) | ++ (1 << CONTRAST_IDX); ++ if (sd->sensor == SEN_OV7670) + gspca_dev->ctrl_dis |= 1 << AUTOBRIGHT_IDX; + /* OV8610 Frequency filter control should work but needs testing */ + if (sd->sensor == SEN_OV8610) +@@ -3169,10 +3216,12 @@ static int sd_init(struct gspca_dev *gspca_dev) + return -EIO; + break; + case SEN_OV7620: ++ case SEN_OV7620AE: + if (write_i2c_regvals(sd, norm_7620, ARRAY_SIZE(norm_7620))) + return -EIO; + break; + case SEN_OV7640: ++ case SEN_OV7648: + if (write_i2c_regvals(sd, norm_7640, ARRAY_SIZE(norm_7640))) + return -EIO; + break; +@@ -3246,7 +3295,9 @@ static int ov511_mode_init_regs(struct sd *sd) + /* Note once the FIXME's in mode_init_ov_sensor_regs() are fixed + for more sensors we need to do this for them too */ + case SEN_OV7620: ++ case SEN_OV7620AE: + case SEN_OV7640: ++ case SEN_OV7648: + case SEN_OV76BE: + if (sd->gspca_dev.width == 320) + interlaced = 1; +@@ -3377,7 +3428,7 @@ static int ov518_mode_init_regs(struct sd *sd) + + if (sd->bridge == BRIDGE_OV518PLUS) { + switch (sd->sensor) { +- case SEN_OV7620: ++ case SEN_OV7620AE: + if (sd->gspca_dev.width == 320) { + reg_w(sd, 0x20, 0x00); + reg_w(sd, 0x21, 0x19); +@@ -3386,6 +3437,10 @@ static int ov518_mode_init_regs(struct sd *sd) + reg_w(sd, 0x21, 0x1f); + } + break; ++ case SEN_OV7620: ++ reg_w(sd, 0x20, 0x00); ++ reg_w(sd, 0x21, 0x19); ++ break; + default: + reg_w(sd, 0x21, 0x19); + } +@@ -3488,7 +3543,8 @@ static int ov519_mode_init_regs(struct sd *sd) + if (write_regvals(sd, mode_init_519, + ARRAY_SIZE(mode_init_519))) + return -EIO; +- if (sd->sensor == SEN_OV7640) { ++ if (sd->sensor == SEN_OV7640 || ++ sd->sensor == SEN_OV7648) { + /* Select 8-bit input mode */ + reg_w_mask(sd, OV519_R20_DFR, 0x10, 0x10); + } +@@ -3503,6 +3559,9 @@ static int ov519_mode_init_regs(struct sd *sd) + if (sd->sensor == SEN_OV7670 && + sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv) + reg_w(sd, OV519_R12_X_OFFSETL, 0x04); ++ else if (sd->sensor == SEN_OV7648 && ++ sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv) ++ reg_w(sd, OV519_R12_X_OFFSETL, 0x01); + else + reg_w(sd, OV519_R12_X_OFFSETL, 0x00); + reg_w(sd, OV519_R13_X_OFFSETH, 0x00); +@@ -3520,6 +3579,7 @@ static int ov519_mode_init_regs(struct sd *sd) + sd->clockdiv = 0; + switch (sd->sensor) { + case SEN_OV7640: ++ case SEN_OV7648: + switch (sd->frame_rate) { + default: + /* case 30: */ +@@ -3637,6 +3697,14 @@ static int mode_init_ov_sensor_regs(struct sd *sd) + case SEN_OV8610: + /* For OV8610 qvga means qsvga */ + i2c_w_mask(sd, OV7610_REG_COM_C, qvga ? (1 << 5) : 0, 1 << 5); ++#if 0 ++ /* FIXME: Does this improve the image quality or frame rate? */ ++ i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20); ++ i2c_w(sd, 0x24, 0x10); ++ i2c_w(sd, 0x25, qvga ? 0x40 : 0x8a); ++ i2c_w(sd, 0x2f, qvga ? 0x30 : 0xb0); ++ i2c_w(sd, 0x35, qvga ? 0x1c : 0x9c); ++#endif + i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */ + i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */ + i2c_w_mask(sd, 0x2d, 0x00, 0x40); /* from windrv 090403 */ +@@ -3644,11 +3712,19 @@ static int mode_init_ov_sensor_regs(struct sd *sd) + break; + case SEN_OV7610: + i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); ++#if 0 ++ /* FIXME: Does this improve the image quality or frame rate? */ ++ i2c_w_mask(sd, 0x28, qvga?0x00:0x20, 0x20); ++ i2c_w(sd, 0x24, 0x10); ++ i2c_w(sd, 0x25, qvga?0x40:0x8a); ++ i2c_w(sd, 0x2f, qvga?0x30:0xb0); ++#endif + i2c_w(sd, 0x35, qvga?0x1e:0x9e); + i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */ + i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */ + break; + case SEN_OV7620: ++ case SEN_OV7620AE: + case SEN_OV76BE: + i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); + i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20); +@@ -3663,13 +3739,16 @@ static int mode_init_ov_sensor_regs(struct sd *sd) + i2c_w(sd, 0x35, qvga ? 0x1e : 0x9e); + break; + case SEN_OV7640: ++ case SEN_OV7648: + i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); + i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20); +-/* i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a); */ +-/* i2c_w(sd, 0x25, qvga ? 0x30 : 0x60); */ +-/* i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40); */ +-/* i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0); */ +-/* i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20); */ ++ /* Setting this undocumented bit in qvga mode removes a very ++ annoying vertical shaking of the image */ ++ i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40); ++ /* Unknown */ ++ i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0); ++ /* Allow higher automatic gain (to allow higher framerates) */ ++ i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20); + i2c_w_mask(sd, 0x12, 0x04, 0x04); /* AWB: 1 */ + break; + case SEN_OV7670: +@@ -3795,11 +3874,13 @@ static int set_ov_sensor_window(struct sd *sd) + } + break; + case SEN_OV7620: ++ case SEN_OV7620AE: + hwsbase = 0x2f; /* From 7620.SET (spec is wrong) */ + hwebase = 0x2f; + vwsbase = vwebase = 0x05; + break; + case SEN_OV7640: ++ case SEN_OV7648: + hwsbase = 0x1a; + hwebase = 0x1a; + vwsbase = vwebase = 0x03; +@@ -3893,6 +3974,12 @@ static int sd_start(struct gspca_dev *gspca_dev) + setautobrightness(sd); + setfreq(sd); + ++ /* Force clear snapshot state in case the snapshot button was ++ pressed while we weren't streaming */ ++ sd->snapshot_needs_reset = 1; ++ sd_reset_snapshot(gspca_dev); ++ sd->snapshot_pressed = 0; ++ + ret = ov51x_restart(sd); + if (ret < 0) + goto out; +@@ -3919,6 +4006,34 @@ static void sd_stop0(struct gspca_dev *gspca_dev) + w9968cf_stop0(sd); + } + ++static void ov51x_handle_button(struct gspca_dev *gspca_dev, u8 state) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ if (sd->snapshot_pressed != state) { ++#ifdef CONFIG_INPUT ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, state); ++ input_sync(gspca_dev->input_dev); ++#endif ++ if (state) ++ sd->snapshot_needs_reset = 1; ++ ++ sd->snapshot_pressed = state; ++ } else { ++ /* On the ov511 / ov519 we need to reset the button state ++ multiple times, as resetting does not work as long as the ++ button stays pressed */ ++ switch (sd->bridge) { ++ case BRIDGE_OV511: ++ case BRIDGE_OV511PLUS: ++ case BRIDGE_OV519: ++ if (state) ++ sd->snapshot_needs_reset = 1; ++ break; ++ } ++ } ++} ++ + static void ov511_pkt_scan(struct gspca_dev *gspca_dev, + u8 *in, /* isoc packet */ + int len) /* iso packet length */ +@@ -3940,6 +4055,7 @@ static void ov511_pkt_scan(struct gspca_dev *gspca_dev, + */ + if (!(in[0] | in[1] | in[2] | in[3] | in[4] | in[5] | in[6] | in[7]) && + (in[8] & 0x08)) { ++ ov51x_handle_button(gspca_dev, (in[8] >> 2) & 1); + if (in[8] & 0x80) { + /* Frame end */ + if ((in[9] + 1) * 8 != gspca_dev->width || +@@ -3977,6 +4093,7 @@ static void ov518_pkt_scan(struct gspca_dev *gspca_dev, + /* A false positive here is likely, until OVT gives me + * the definitive SOF/EOF format */ + if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) { ++ ov51x_handle_button(gspca_dev, (data[6] >> 1) & 1); + gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); + gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); + sd->packet_nr = 0; +@@ -4024,6 +4141,9 @@ static void ov519_pkt_scan(struct gspca_dev *gspca_dev, + if (data[0] == 0xff && data[1] == 0xff && data[2] == 0xff) { + switch (data[3]) { + case 0x50: /* start of frame */ ++ /* Don't check the button state here, as the state ++ usually (always ?) changes at EOF and checking it ++ here leads to unnecessary snapshot state resets. */ + #define HDRSZ 16 + data += HDRSZ; + len -= HDRSZ; +@@ -4035,6 +4155,7 @@ static void ov519_pkt_scan(struct gspca_dev *gspca_dev, + gspca_dev->last_packet_type = DISCARD_PACKET; + return; + case 0x51: /* end of frame */ ++ ov51x_handle_button(gspca_dev, data[11] & 1); + if (data[9] != 0) + gspca_dev->last_packet_type = DISCARD_PACKET; + gspca_frame_add(gspca_dev, LAST_PACKET, +@@ -4103,9 +4224,11 @@ static void setbrightness(struct gspca_dev *gspca_dev) + case SEN_OV6630: + case SEN_OV66308AF: + case SEN_OV7640: ++ case SEN_OV7648: + i2c_w(sd, OV7610_REG_BRT, val); + break; + case SEN_OV7620: ++ case SEN_OV7620AE: + /* 7620 doesn't like manual changes when in auto mode */ + if (!sd->autobrightness) + i2c_w(sd, OV7610_REG_BRT, val); +@@ -4142,7 +4265,8 @@ static void setcontrast(struct gspca_dev *gspca_dev) + i2c_w(sd, 0x64, ctab[val >> 5]); + break; + } +- case SEN_OV7620: { ++ case SEN_OV7620: ++ case SEN_OV7620AE: { + static const __u8 ctab[] = { + 0x01, 0x05, 0x09, 0x11, 0x15, 0x35, 0x37, 0x57, + 0x5b, 0xa5, 0xa7, 0xc7, 0xc9, 0xcf, 0xef, 0xff +@@ -4152,10 +4276,6 @@ static void setcontrast(struct gspca_dev *gspca_dev) + i2c_w(sd, 0x64, ctab[val >> 4]); + break; + } +- case SEN_OV7640: +- /* Use gain control instead. */ +- i2c_w(sd, OV7610_REG_GAIN, val >> 2); +- break; + case SEN_OV7670: + /* check that this isn't just the same as ov7610 */ + i2c_w(sd, OV7670_REG_CONTRAS, val >> 1); +@@ -4179,6 +4299,7 @@ static void setcolors(struct gspca_dev *gspca_dev) + i2c_w(sd, OV7610_REG_SAT, val); + break; + case SEN_OV7620: ++ case SEN_OV7620AE: + /* Use UV gamma control instead. Bits 0 & 7 are reserved. */ + /* rc = ov_i2c_write(sd->dev, 0x62, (val >> 9) & 0x7e); + if (rc < 0) +@@ -4186,6 +4307,7 @@ static void setcolors(struct gspca_dev *gspca_dev) + i2c_w(sd, OV7610_REG_SAT, val); + break; + case SEN_OV7640: ++ case SEN_OV7648: + i2c_w(sd, OV7610_REG_SAT, val & 0xf0); + break; + case SEN_OV7670: +@@ -4198,7 +4320,8 @@ static void setcolors(struct gspca_dev *gspca_dev) + + static void setautobrightness(struct sd *sd) + { +- if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7670 || ++ if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7648 || ++ sd->sensor == SEN_OV7670 || + sd->sensor == SEN_OV2610 || sd->sensor == SEN_OV3610) + return; + +@@ -4475,9 +4598,13 @@ static const struct sd_desc sd_desc = { + .stopN = sd_stopN, + .stop0 = sd_stop0, + .pkt_scan = sd_pkt_scan, ++ .dq_callback = sd_reset_snapshot, + .querymenu = sd_querymenu, + .get_jcomp = sd_get_jcomp, + .set_jcomp = sd_set_jcomp, ++#ifdef CONFIG_INPUT ++ .other_input = 1, ++#endif + }; + + /* -- module initialisation -- */ +@@ -4494,7 +4621,8 @@ static const __devinitdata struct usb_device_id device_table[] = { + .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, + {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 }, + {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 }, +- {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 }, ++ {USB_DEVICE(0x054c, 0x0155), ++ .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, + {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 }, + {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 }, + {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 }, +diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c +index 0a6b8f0..957e05e 100644 +--- a/drivers/media/video/gspca/ov534.c ++++ b/drivers/media/video/gspca/ov534.c +@@ -1,5 +1,5 @@ + /* +- * ov534 gspca driver ++ * ov534-ov772x gspca driver + * + * Copyright (C) 2008 Antonio Ospite + * Copyright (C) 2008 Jim Paris +@@ -68,12 +68,7 @@ struct sd { + s8 sharpness; + u8 hflip; + u8 vflip; +- u8 satur; +- u8 lightfreq; + +- u8 sensor; +-#define SENSOR_OV772X 0 +-#define SENSOR_OV965X 1 + }; + + /* V4L2 controls supported by the driver */ +@@ -101,12 +96,8 @@ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); +-static int sd_setsatur(struct gspca_dev *gspca_dev, __s32 val); +-static int sd_getsatur(struct gspca_dev *gspca_dev, __s32 *val); +-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val); +-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls_ov772x[] = { ++static const struct ctrl sd_ctrls[] = { + { /* 0 */ + { + .id = V4L2_CID_BRIGHTNESS, +@@ -115,8 +106,8 @@ static struct ctrl sd_ctrls_ov772x[] = { + .minimum = 0, + .maximum = 255, + .step = 1, +-#define BRIGHTNESS_77_DEF 20 +- .default_value = BRIGHTNESS_77_DEF, ++#define BRIGHTNESS_DEF 20 ++ .default_value = BRIGHTNESS_DEF, + }, + .set = sd_setbrightness, + .get = sd_getbrightness, +@@ -129,8 +120,8 @@ static struct ctrl sd_ctrls_ov772x[] = { + .minimum = 0, + .maximum = 255, + .step = 1, +-#define CONTRAST_77_DEF 37 +- .default_value = CONTRAST_77_DEF, ++#define CONTRAST_DEF 37 ++ .default_value = CONTRAST_DEF, + }, + .set = sd_setcontrast, + .get = sd_getcontrast, +@@ -157,8 +148,8 @@ static struct ctrl sd_ctrls_ov772x[] = { + .minimum = 0, + .maximum = 255, + .step = 1, +-#define EXPO_77_DEF 120 +- .default_value = EXPO_77_DEF, ++#define EXPO_DEF 120 ++ .default_value = EXPO_DEF, + }, + .set = sd_setexposure, + .get = sd_getexposure, +@@ -213,13 +204,13 @@ static struct ctrl sd_ctrls_ov772x[] = { + .minimum = 0, + .maximum = 1, + .step = 1, +-#define AUTOGAIN_77_DEF 0 +- .default_value = AUTOGAIN_77_DEF, ++#define AUTOGAIN_DEF 0 ++ .default_value = AUTOGAIN_DEF, + }, + .set = sd_setautogain, + .get = sd_getautogain, + }, +-#define AWB_77_IDX 8 ++#define AWB_IDX 8 + { /* 8 */ + { + .id = V4L2_CID_AUTO_WHITE_BALANCE, +@@ -242,8 +233,8 @@ static struct ctrl sd_ctrls_ov772x[] = { + .minimum = 0, + .maximum = 63, + .step = 1, +-#define SHARPNESS_77_DEF 0 +- .default_value = SHARPNESS_77_DEF, ++#define SHARPNESS_DEF 0 ++ .default_value = SHARPNESS_DEF, + }, + .set = sd_setsharpness, + .get = sd_getsharpness, +@@ -277,107 +268,6 @@ static struct ctrl sd_ctrls_ov772x[] = { + .get = sd_getvflip, + }, + }; +-static struct ctrl sd_ctrls_ov965x[] = { +- { /* 0 */ +- { +- .id = V4L2_CID_BRIGHTNESS, +- .type = V4L2_CTRL_TYPE_INTEGER, +- .name = "Brightness", +- .minimum = 0, +- .maximum = 15, +- .step = 1, +-#define BRIGHTNESS_96_DEF 7 +- .default_value = BRIGHTNESS_96_DEF, +- }, +- .set = sd_setbrightness, +- .get = sd_getbrightness, +- }, +- { /* 1 */ +- { +- .id = V4L2_CID_CONTRAST, +- .type = V4L2_CTRL_TYPE_INTEGER, +- .name = "Contrast", +- .minimum = 0, +- .maximum = 15, +- .step = 1, +-#define CONTRAST_96_DEF 3 +- .default_value = CONTRAST_96_DEF, +- }, +- .set = sd_setcontrast, +- .get = sd_getcontrast, +- }, +- { /* 2 */ +- { +- .id = V4L2_CID_AUTOGAIN, +- .type = V4L2_CTRL_TYPE_BOOLEAN, +- .name = "Autogain", +- .minimum = 0, +- .maximum = 1, +- .step = 1, +-#define AUTOGAIN_96_DEF 1 +- .default_value = AUTOGAIN_96_DEF, +- }, +- .set = sd_setautogain, +- .get = sd_getautogain, +- }, +-#define EXPO_96_IDX 3 +- { /* 3 */ +- { +- .id = V4L2_CID_EXPOSURE, +- .type = V4L2_CTRL_TYPE_INTEGER, +- .name = "Exposure", +- .minimum = 0, +- .maximum = 3, +- .step = 1, +-#define EXPO_96_DEF 0 +- .default_value = EXPO_96_DEF, +- }, +- .set = sd_setexposure, +- .get = sd_getexposure, +- }, +- { /* 4 */ +- { +- .id = V4L2_CID_SHARPNESS, +- .type = V4L2_CTRL_TYPE_INTEGER, +- .name = "Sharpness", +- .minimum = -1, /* -1 = auto */ +- .maximum = 4, +- .step = 1, +-#define SHARPNESS_96_DEF -1 +- .default_value = SHARPNESS_96_DEF, +- }, +- .set = sd_setsharpness, +- .get = sd_getsharpness, +- }, +- { /* 5 */ +- { +- .id = V4L2_CID_SATURATION, +- .type = V4L2_CTRL_TYPE_INTEGER, +- .name = "Saturation", +- .minimum = 0, +- .maximum = 4, +- .step = 1, +-#define SATUR_DEF 2 +- .default_value = SATUR_DEF, +- }, +- .set = sd_setsatur, +- .get = sd_getsatur, +- }, +- { +- { +- .id = V4L2_CID_POWER_LINE_FREQUENCY, +- .type = V4L2_CTRL_TYPE_MENU, +- .name = "Light frequency filter", +- .minimum = 0, +- .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */ +- .step = 1, +-#define FREQ_DEF 0 +- .default_value = FREQ_DEF, +- }, +- .set = sd_setfreq, +- .get = sd_getfreq, +- }, +-}; + + static const struct v4l2_pix_format ov772x_mode[] = { + {320, 240, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE, +@@ -392,35 +282,21 @@ static const struct v4l2_pix_format ov772x_mode[] = { + .priv = 0}, + }; + +-static const struct v4l2_pix_format ov965x_mode[] = { +- {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, +- .bytesperline = 320, +- .sizeimage = 320 * 240 * 3 / 8 + 590, +- .colorspace = V4L2_COLORSPACE_JPEG, +- .priv = 4}, +- {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, +- .bytesperline = 640, +- .sizeimage = 640 * 480 * 3 / 8 + 590, +- .colorspace = V4L2_COLORSPACE_JPEG, +- .priv = 3}, +- {800, 600, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, +- .bytesperline = 800, +- .sizeimage = 800 * 600 * 3 / 8 + 590, +- .colorspace = V4L2_COLORSPACE_JPEG, +- .priv = 2}, +- {1024, 768, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, +- .bytesperline = 1024, +- .sizeimage = 1024 * 768 * 3 / 8 + 590, +- .colorspace = V4L2_COLORSPACE_JPEG, +- .priv = 1}, +- {1280, 1024, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, +- .bytesperline = 1280, +- .sizeimage = 1280 * 1024 * 3 / 8 + 590, +- .colorspace = V4L2_COLORSPACE_JPEG, +- .priv = 0}, ++static const u8 qvga_rates[] = {125, 100, 75, 60, 50, 40, 30}; ++static const u8 vga_rates[] = {60, 50, 40, 30, 15}; ++ ++static const struct framerates ov772x_framerates[] = { ++ { /* 320x240 */ ++ .rates = qvga_rates, ++ .nrates = ARRAY_SIZE(qvga_rates), ++ }, ++ { /* 640x480 */ ++ .rates = vga_rates, ++ .nrates = ARRAY_SIZE(vga_rates), ++ }, + }; + +-static const u8 bridge_init_ov772x[][2] = { ++static const u8 bridge_init[][2] = { + { 0xc2, 0x0c }, + { 0x88, 0xf8 }, + { 0xc3, 0x69 }, +@@ -478,7 +354,7 @@ static const u8 bridge_init_ov772x[][2] = { + { 0xc1, 0x3c }, + { 0xc2, 0x0c }, + }; +-static const u8 sensor_init_ov772x[][2] = { ++static const u8 sensor_init[][2] = { + { 0x12, 0x80 }, + { 0x11, 0x01 }, + /*fixme: better have a delay?*/ +@@ -571,7 +447,7 @@ static const u8 sensor_init_ov772x[][2] = { + { 0x8e, 0x00 }, /* De-noise threshold */ + { 0x0c, 0xd0 } + }; +-static const u8 bridge_start_ov772x_vga[][2] = { ++static const u8 bridge_start_vga[][2] = { + {0x1c, 0x00}, + {0x1d, 0x40}, + {0x1d, 0x02}, +@@ -582,7 +458,7 @@ static const u8 bridge_start_ov772x_vga[][2] = { + {0xc0, 0x50}, + {0xc1, 0x3c}, + }; +-static const u8 sensor_start_ov772x_vga[][2] = { ++static const u8 sensor_start_vga[][2] = { + {0x12, 0x00}, + {0x17, 0x26}, + {0x18, 0xa0}, +@@ -592,7 +468,7 @@ static const u8 sensor_start_ov772x_vga[][2] = { + {0x2c, 0xf0}, + {0x65, 0x20}, + }; +-static const u8 bridge_start_ov772x_qvga[][2] = { ++static const u8 bridge_start_qvga[][2] = { + {0x1c, 0x00}, + {0x1d, 0x40}, + {0x1d, 0x02}, +@@ -603,7 +479,7 @@ static const u8 bridge_start_ov772x_qvga[][2] = { + {0xc0, 0x28}, + {0xc1, 0x1e}, + }; +-static const u8 sensor_start_ov772x_qvga[][2] = { ++static const u8 sensor_start_qvga[][2] = { + {0x12, 0x40}, + {0x17, 0x3f}, + {0x18, 0x50}, +@@ -614,571 +490,6 @@ static const u8 sensor_start_ov772x_qvga[][2] = { + {0x65, 0x2f}, + }; + +-static const u8 bridge_init_ov965x[][2] = { +- {0x88, 0xf8}, +- {0x89, 0xff}, +- {0x76, 0x03}, +- {0x92, 0x03}, +- {0x95, 0x10}, +- {0xe2, 0x00}, +- {0xe7, 0x3e}, +- {0x8d, 0x1c}, +- {0x8e, 0x00}, +- {0x8f, 0x00}, +- {0x1f, 0x00}, +- {0xc3, 0xf9}, +- {0x89, 0xff}, +- {0x88, 0xf8}, +- {0x76, 0x03}, +- {0x92, 0x01}, +- {0x93, 0x18}, +- {0x1c, 0x0a}, +- {0x1d, 0x48}, +- {0xc0, 0x50}, +- {0xc1, 0x3c}, +- {0x34, 0x05}, +- {0xc2, 0x0c}, +- {0xc3, 0xf9}, +- {0x34, 0x05}, +- {0xe7, 0x2e}, +- {0x31, 0xf9}, +- {0x35, 0x02}, +- {0xd9, 0x10}, +- {0x25, 0x42}, +- {0x94, 0x11}, +-}; +- +-static const u8 sensor_init_ov965x[][2] = { +- {0x12, 0x80}, /* com7 - SSCB reset */ +- {0x00, 0x00}, /* gain */ +- {0x01, 0x80}, /* blue */ +- {0x02, 0x80}, /* red */ +- {0x03, 0x1b}, /* vref */ +- {0x04, 0x03}, /* com1 - exposure low bits */ +- {0x0b, 0x57}, /* ver */ +- {0x0e, 0x61}, /* com5 */ +- {0x0f, 0x42}, /* com6 */ +- {0x11, 0x00}, /* clkrc */ +- {0x12, 0x02}, /* com7 - 15fps VGA YUYV */ +- {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ +- {0x14, 0x28}, /* com9 */ +- {0x16, 0x24}, /* reg16 */ +- {0x17, 0x1d}, /* hstart*/ +- {0x18, 0xbd}, /* hstop */ +- {0x19, 0x01}, /* vstrt */ +- {0x1a, 0x81}, /* vstop*/ +- {0x1e, 0x04}, /* mvfp */ +- {0x24, 0x3c}, /* aew */ +- {0x25, 0x36}, /* aeb */ +- {0x26, 0x71}, /* vpt */ +- {0x27, 0x08}, /* bbias */ +- {0x28, 0x08}, /* gbbias */ +- {0x29, 0x15}, /* gr com */ +- {0x2a, 0x00}, /* exhch */ +- {0x2b, 0x00}, /* exhcl */ +- {0x2c, 0x08}, /* rbias */ +- {0x32, 0xff}, /* href */ +- {0x33, 0x00}, /* chlf */ +- {0x34, 0x3f}, /* aref1 */ +- {0x35, 0x00}, /* aref2 */ +- {0x36, 0xf8}, /* aref3 */ +- {0x38, 0x72}, /* adc2 */ +- {0x39, 0x57}, /* aref4 */ +- {0x3a, 0x80}, /* tslb - yuyv */ +- {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */ +- {0x3d, 0x99}, /* com13 */ +- {0x3f, 0xc1}, /* edge */ +- {0x40, 0xc0}, /* com15 */ +- {0x41, 0x40}, /* com16 */ +- {0x42, 0xc0}, /* com17 */ +- {0x43, 0x0a}, /* rsvd */ +- {0x44, 0xf0}, +- {0x45, 0x46}, +- {0x46, 0x62}, +- {0x47, 0x2a}, +- {0x48, 0x3c}, +- {0x4a, 0xfc}, +- {0x4b, 0xfc}, +- {0x4c, 0x7f}, +- {0x4d, 0x7f}, +- {0x4e, 0x7f}, +- {0x4f, 0x98}, /* matrix */ +- {0x50, 0x98}, +- {0x51, 0x00}, +- {0x52, 0x28}, +- {0x53, 0x70}, +- {0x54, 0x98}, +- {0x58, 0x1a}, /* matrix coef sign */ +- {0x59, 0x85}, /* AWB control */ +- {0x5a, 0xa9}, +- {0x5b, 0x64}, +- {0x5c, 0x84}, +- {0x5d, 0x53}, +- {0x5e, 0x0e}, +- {0x5f, 0xf0}, /* AWB blue limit */ +- {0x60, 0xf0}, /* AWB red limit */ +- {0x61, 0xf0}, /* AWB green limit */ +- {0x62, 0x00}, /* lcc1 */ +- {0x63, 0x00}, /* lcc2 */ +- {0x64, 0x02}, /* lcc3 */ +- {0x65, 0x16}, /* lcc4 */ +- {0x66, 0x01}, /* lcc5 */ +- {0x69, 0x02}, /* hv */ +- {0x6b, 0x5a}, /* dbvl */ +- {0x6c, 0x04}, +- {0x6d, 0x55}, +- {0x6e, 0x00}, +- {0x6f, 0x9d}, +- {0x70, 0x21}, /* dnsth */ +- {0x71, 0x78}, +- {0x72, 0x00}, /* poidx */ +- {0x73, 0x01}, /* pckdv */ +- {0x74, 0x3a}, /* xindx */ +- {0x75, 0x35}, /* yindx */ +- {0x76, 0x01}, +- {0x77, 0x02}, +- {0x7a, 0x12}, /* gamma curve */ +- {0x7b, 0x08}, +- {0x7c, 0x16}, +- {0x7d, 0x30}, +- {0x7e, 0x5e}, +- {0x7f, 0x72}, +- {0x80, 0x82}, +- {0x81, 0x8e}, +- {0x82, 0x9a}, +- {0x83, 0xa4}, +- {0x84, 0xac}, +- {0x85, 0xb8}, +- {0x86, 0xc3}, +- {0x87, 0xd6}, +- {0x88, 0xe6}, +- {0x89, 0xf2}, +- {0x8a, 0x03}, +- {0x8c, 0x89}, /* com19 */ +- {0x14, 0x28}, /* com9 */ +- {0x90, 0x7d}, +- {0x91, 0x7b}, +- {0x9d, 0x03}, /* lcc6 */ +- {0x9e, 0x04}, /* lcc7 */ +- {0x9f, 0x7a}, +- {0xa0, 0x79}, +- {0xa1, 0x40}, /* aechm */ +- {0xa4, 0x50}, /* com21 */ +- {0xa5, 0x68}, /* com26 */ +- {0xa6, 0x4a}, /* AWB green */ +- {0xa8, 0xc1}, /* refa8 */ +- {0xa9, 0xef}, /* refa9 */ +- {0xaa, 0x92}, +- {0xab, 0x04}, +- {0xac, 0x80}, /* black level control */ +- {0xad, 0x80}, +- {0xae, 0x80}, +- {0xaf, 0x80}, +- {0xb2, 0xf2}, +- {0xb3, 0x20}, +- {0xb4, 0x20}, /* ctrlb4 */ +- {0xb5, 0x00}, +- {0xb6, 0xaf}, +- {0xbb, 0xae}, +- {0xbc, 0x7f}, /* ADC channel offsets */ +- {0xdb, 0x7f}, +- {0xbe, 0x7f}, +- {0xbf, 0x7f}, +- {0xc0, 0xe2}, +- {0xc1, 0xc0}, +- {0xc2, 0x01}, +- {0xc3, 0x4e}, +- {0xc6, 0x85}, +- {0xc7, 0x80}, /* com24 */ +- {0xc9, 0xe0}, +- {0xca, 0xe8}, +- {0xcb, 0xf0}, +- {0xcc, 0xd8}, +- {0xcd, 0xf1}, +- {0x4f, 0x98}, /* matrix */ +- {0x50, 0x98}, +- {0x51, 0x00}, +- {0x52, 0x28}, +- {0x53, 0x70}, +- {0x54, 0x98}, +- {0x58, 0x1a}, +- {0xff, 0x41}, /* read 41, write ff 00 */ +- {0x41, 0x40}, /* com16 */ +- +- {0xc5, 0x03}, /* 60 Hz banding filter */ +- {0x6a, 0x02}, /* 50 Hz banding filter */ +- +- {0x12, 0x62}, /* com7 - 30fps VGA YUV */ +- {0x36, 0xfa}, /* aref3 */ +- {0x69, 0x0a}, /* hv */ +- {0x8c, 0x89}, /* com22 */ +- {0x14, 0x28}, /* com9 */ +- {0x3e, 0x0c}, +- {0x41, 0x40}, /* com16 */ +- {0x72, 0x00}, +- {0x73, 0x00}, +- {0x74, 0x3a}, +- {0x75, 0x35}, +- {0x76, 0x01}, +- {0xc7, 0x80}, +- {0x03, 0x12}, /* vref */ +- {0x17, 0x16}, /* hstart */ +- {0x18, 0x02}, /* hstop */ +- {0x19, 0x01}, /* vstrt */ +- {0x1a, 0x3d}, /* vstop */ +- {0x32, 0xff}, /* href */ +- {0xc0, 0xaa}, +-}; +- +-static const u8 bridge_init_ov965x_2[][2] = { +- {0x94, 0xaa}, +- {0xf1, 0x60}, +- {0xe5, 0x04}, +- {0xc0, 0x50}, +- {0xc1, 0x3c}, +- {0x8c, 0x00}, +- {0x8d, 0x1c}, +- {0x34, 0x05}, +- +- {0xc2, 0x0c}, +- {0xc3, 0xf9}, +- {0xda, 0x01}, +- {0x50, 0x00}, +- {0x51, 0xa0}, +- {0x52, 0x3c}, +- {0x53, 0x00}, +- {0x54, 0x00}, +- {0x55, 0x00}, +- {0x57, 0x00}, +- {0x5c, 0x00}, +- {0x5a, 0xa0}, +- {0x5b, 0x78}, +- {0x35, 0x02}, +- {0xd9, 0x10}, +- {0x94, 0x11}, +-}; +- +-static const u8 sensor_init_ov965x_2[][2] = { +- {0x3b, 0xc4}, +- {0x1e, 0x04}, /* mvfp */ +- {0x13, 0xe0}, /* com8 */ +- {0x00, 0x00}, /* gain */ +- {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ +- {0x11, 0x03}, /* clkrc */ +- {0x6b, 0x5a}, /* dblv */ +- {0x6a, 0x05}, +- {0xc5, 0x07}, +- {0xa2, 0x4b}, +- {0xa3, 0x3e}, +- {0x2d, 0x00}, +- {0xff, 0x42}, /* read 42, write ff 00 */ +- {0x42, 0xc0}, /* com17 */ +- {0x2d, 0x00}, +- {0xff, 0x42}, /* read 42, write ff 00 */ +- {0x42, 0xc1}, /* com17 */ +-/* sharpness */ +- {0x3f, 0x01}, +- {0xff, 0x42}, /* read 42, write ff 00 */ +- {0x42, 0xc1}, /* com17 */ +-/* saturation */ +- {0x4f, 0x98}, /* matrix */ +- {0x50, 0x98}, +- {0x51, 0x00}, +- {0x52, 0x28}, +- {0x53, 0x70}, +- {0x54, 0x98}, +- {0x58, 0x1a}, +- {0xff, 0x41}, /* read 41, write ff 00 */ +- {0x41, 0x40}, /* com16 */ +-/* contrast */ +- {0x56, 0x40}, +-/* brightness */ +- {0x55, 0x8f}, +-/* expo */ +- {0x10, 0x25}, /* aech - exposure high bits */ +- {0xff, 0x13}, /* read 13, write ff 00 */ +- {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ +-}; +- +-static const u8 sensor_start_ov965x_1_vga[][2] = { /* same for qvga */ +- {0x12, 0x62}, /* com7 - 30fps VGA YUV */ +- {0x36, 0xfa}, /* aref3 */ +- {0x69, 0x0a}, /* hv */ +- {0x8c, 0x89}, /* com22 */ +- {0x14, 0x28}, /* com9 */ +- {0x3e, 0x0c}, /* com14 */ +- {0x41, 0x40}, /* com16 */ +- {0x72, 0x00}, +- {0x73, 0x00}, +- {0x74, 0x3a}, +- {0x75, 0x35}, +- {0x76, 0x01}, +- {0xc7, 0x80}, /* com24 */ +- {0x03, 0x12}, /* vref */ +- {0x17, 0x16}, /* hstart */ +- {0x18, 0x02}, /* hstop */ +- {0x19, 0x01}, /* vstrt */ +- {0x1a, 0x3d}, /* vstop */ +- {0x32, 0xff}, /* href */ +- {0xc0, 0xaa}, +-}; +- +-static const u8 sensor_start_ov965x_1_svga[][2] = { +- {0x12, 0x02}, /* com7 - YUYV - VGA 15 full resolution */ +- {0x36, 0xf8}, /* aref3 */ +- {0x69, 0x02}, /* hv */ +- {0x8c, 0x0d}, /* com22 */ +- {0x3e, 0x0c}, /* com14 */ +- {0x41, 0x40}, /* com16 */ +- {0x72, 0x00}, +- {0x73, 0x01}, +- {0x74, 0x3a}, +- {0x75, 0x35}, +- {0x76, 0x01}, +- {0xc7, 0x80}, /* com24 */ +- {0x03, 0x1b}, /* vref */ +- {0x17, 0x1d}, /* hstart */ +- {0x18, 0xbd}, /* hstop */ +- {0x19, 0x01}, /* vstrt */ +- {0x1a, 0x81}, /* vstop */ +- {0x32, 0xff}, /* href */ +- {0xc0, 0xe2}, +-}; +- +-static const u8 sensor_start_ov965x_1_xga[][2] = { +- {0x12, 0x02}, /* com7 */ +- {0x36, 0xf8}, /* aref3 */ +- {0x69, 0x02}, /* hv */ +- {0x8c, 0x89}, /* com22 */ +- {0x14, 0x28}, /* com9 */ +- {0x3e, 0x0c}, /* com14 */ +- {0x41, 0x40}, /* com16 */ +- {0x72, 0x00}, +- {0x73, 0x01}, +- {0x74, 0x3a}, +- {0x75, 0x35}, +- {0x76, 0x01}, +- {0xc7, 0x80}, /* com24 */ +- {0x03, 0x1b}, /* vref */ +- {0x17, 0x1d}, /* hstart */ +- {0x18, 0xbd}, /* hstop */ +- {0x19, 0x01}, /* vstrt */ +- {0x1a, 0x81}, /* vstop */ +- {0x32, 0xff}, /* href */ +- {0xc0, 0xe2}, +-}; +- +-static const u8 sensor_start_ov965x_1_sxga[][2] = { +- {0x12, 0x02}, /* com7 */ +- {0x36, 0xf8}, /* aref3 */ +- {0x69, 0x02}, /* hv */ +- {0x8c, 0x89}, /* com22 */ +- {0x14, 0x28}, /* com9 */ +- {0x3e, 0x0c}, /* com14 */ +- {0x41, 0x40}, /* com16 */ +- {0x72, 0x00}, +- {0x73, 0x01}, +- {0x74, 0x3a}, +- {0x75, 0x35}, +- {0x76, 0x01}, +- {0xc7, 0x80}, /* com24 */ +- {0x03, 0x1b}, /* vref */ +- {0x17, 0x1d}, /* hstart */ +- {0x18, 0x02}, /* hstop */ +- {0x19, 0x01}, /* vstrt */ +- {0x1a, 0x81}, /* vstop */ +- {0x32, 0xff}, /* href */ +- {0xc0, 0xe2}, +-}; +- +-static const u8 bridge_start_ov965x_qvga[][2] = { +- {0x94, 0xaa}, +- {0xf1, 0x60}, +- {0xe5, 0x04}, +- {0xc0, 0x50}, +- {0xc1, 0x3c}, +- {0x8c, 0x00}, +- {0x8d, 0x1c}, +- {0x34, 0x05}, +- +- {0xc2, 0x4c}, +- {0xc3, 0xf9}, +- {0xda, 0x00}, +- {0x50, 0x00}, +- {0x51, 0xa0}, +- {0x52, 0x78}, +- {0x53, 0x00}, +- {0x54, 0x00}, +- {0x55, 0x00}, +- {0x57, 0x00}, +- {0x5c, 0x00}, +- {0x5a, 0x50}, +- {0x5b, 0x3c}, +- {0x35, 0x02}, +- {0xd9, 0x10}, +- {0x94, 0x11}, +-}; +- +-static const u8 bridge_start_ov965x_vga[][2] = { +- {0x94, 0xaa}, +- {0xf1, 0x60}, +- {0xe5, 0x04}, +- {0xc0, 0x50}, +- {0xc1, 0x3c}, +- {0x8c, 0x00}, +- {0x8d, 0x1c}, +- {0x34, 0x05}, +- {0xc2, 0x0c}, +- {0xc3, 0xf9}, +- {0xda, 0x01}, +- {0x50, 0x00}, +- {0x51, 0xa0}, +- {0x52, 0x3c}, +- {0x53, 0x00}, +- {0x54, 0x00}, +- {0x55, 0x00}, +- {0x57, 0x00}, +- {0x5c, 0x00}, +- {0x5a, 0xa0}, +- {0x5b, 0x78}, +- {0x35, 0x02}, +- {0xd9, 0x10}, +- {0x94, 0x11}, +-}; +- +-static const u8 bridge_start_ov965x_svga[][2] = { +- {0x94, 0xaa}, +- {0xf1, 0x60}, +- {0xe5, 0x04}, +- {0xc0, 0xa0}, +- {0xc1, 0x80}, +- {0x8c, 0x00}, +- {0x8d, 0x1c}, +- {0x34, 0x05}, +- {0xc2, 0x4c}, +- {0xc3, 0xf9}, +- {0x50, 0x00}, +- {0x51, 0x40}, +- {0x52, 0x00}, +- {0x53, 0x00}, +- {0x54, 0x00}, +- {0x55, 0x88}, +- {0x57, 0x00}, +- {0x5c, 0x00}, +- {0x5a, 0xc8}, +- {0x5b, 0x96}, +- {0x35, 0x02}, +- {0xd9, 0x10}, +- {0xda, 0x00}, +- {0x94, 0x11}, +-}; +- +-static const u8 bridge_start_ov965x_xga[][2] = { +- {0x94, 0xaa}, +- {0xf1, 0x60}, +- {0xe5, 0x04}, +- {0xc0, 0xa0}, +- {0xc1, 0x80}, +- {0x8c, 0x00}, +- {0x8d, 0x1c}, +- {0x34, 0x05}, +- {0xc2, 0x4c}, +- {0xc3, 0xf9}, +- {0x50, 0x00}, +- {0x51, 0x40}, +- {0x52, 0x00}, +- {0x53, 0x00}, +- {0x54, 0x00}, +- {0x55, 0x88}, +- {0x57, 0x00}, +- {0x5c, 0x01}, +- {0x5a, 0x00}, +- {0x5b, 0xc0}, +- {0x35, 0x02}, +- {0xd9, 0x10}, +- {0xda, 0x01}, +- {0x94, 0x11}, +-}; +- +-static const u8 bridge_start_ov965x_sxga[][2] = { +- {0x94, 0xaa}, +- {0xf1, 0x60}, +- {0xe5, 0x04}, +- {0xc0, 0xa0}, +- {0xc1, 0x80}, +- {0x8c, 0x00}, +- {0x8d, 0x1c}, +- {0x34, 0x05}, +- {0xc2, 0x0c}, +- {0xc3, 0xf9}, +- {0xda, 0x00}, +- {0x35, 0x02}, +- {0xd9, 0x10}, +- {0x94, 0x11}, +-}; +- +-static const u8 sensor_start_ov965x_2_qvga[][2] = { +- {0x3b, 0xe4}, /* com11 - night mode 1/4 frame rate */ +- {0x1e, 0x04}, /* mvfp */ +- {0x13, 0xe0}, /* com8 */ +- {0x00, 0x00}, +- {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ +- {0x11, 0x01}, /* clkrc */ +- {0x6b, 0x5a}, /* dblv */ +- {0x6a, 0x02}, /* 50 Hz banding filter */ +- {0xc5, 0x03}, /* 60 Hz banding filter */ +- {0xa2, 0x96}, /* bd50 */ +- {0xa3, 0x7d}, /* bd60 */ +- +- {0xff, 0x13}, /* read 13, write ff 00 */ +- {0x13, 0xe7}, +- {0x3a, 0x80}, /* tslb - yuyv */ +-}; +- +-static const u8 sensor_start_ov965x_2_vga[][2] = { +- {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */ +- {0x1e, 0x04}, /* mvfp */ +- {0x13, 0xe0}, /* com8 */ +- {0x00, 0x00}, +- {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ +- {0x11, 0x03}, /* clkrc */ +- {0x6b, 0x5a}, /* dblv */ +- {0x6a, 0x05}, /* 50 Hz banding filter */ +- {0xc5, 0x07}, /* 60 Hz banding filter */ +- {0xa2, 0x4b}, /* bd50 */ +- {0xa3, 0x3e}, /* bd60 */ +- +- {0x2d, 0x00}, /* advfl */ +-}; +- +-static const u8 sensor_start_ov965x_2_svga[][2] = { /* same for xga */ +- {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */ +- {0x1e, 0x04}, /* mvfp */ +- {0x13, 0xe0}, /* com8 */ +- {0x00, 0x00}, +- {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ +- {0x11, 0x01}, /* clkrc */ +- {0x6b, 0x5a}, /* dblv */ +- {0x6a, 0x0c}, /* 50 Hz banding filter */ +- {0xc5, 0x0f}, /* 60 Hz banding filter */ +- {0xa2, 0x4e}, /* bd50 */ +- {0xa3, 0x41}, /* bd60 */ +-}; +- +-static const u8 sensor_start_ov965x_2_sxga[][2] = { +- {0x13, 0xe0}, /* com8 */ +- {0x00, 0x00}, +- {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ +- {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */ +- {0x1e, 0x04}, /* mvfp */ +- {0x11, 0x01}, /* clkrc */ +- {0x6b, 0x5a}, /* dblv */ +- {0x6a, 0x0c}, /* 50 Hz banding filter */ +- {0xc5, 0x0f}, /* 60 Hz banding filter */ +- {0xa2, 0x4e}, /* bd50 */ +- {0xa3, 0x41}, /* bd60 */ +-}; +- + static void ov534_reg_write(struct gspca_dev *gspca_dev, u16 reg, u8 val) + { + struct usb_device *udev = gspca_dev->dev; +@@ -1360,14 +671,14 @@ static void set_frame_rate(struct gspca_dev *gspca_dev) + PDEBUG(D_PROBE, "frame_rate: %d", r->fps); + } + +-static void setbrightness_77(struct gspca_dev *gspca_dev) ++static void setbrightness(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + + sccb_reg_write(gspca_dev, 0x9B, sd->brightness); + } + +-static void setcontrast_77(struct gspca_dev *gspca_dev) ++static void setcontrast(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + +@@ -1401,7 +712,7 @@ static void setgain(struct gspca_dev *gspca_dev) + sccb_reg_write(gspca_dev, 0x00, val); + } + +-static void setexposure_77(struct gspca_dev *gspca_dev) ++static void setexposure(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + u8 val; +@@ -1432,7 +743,7 @@ static void sethue(struct gspca_dev *gspca_dev) + sccb_reg_write(gspca_dev, 0x01, sd->hue); + } + +-static void setautogain_77(struct gspca_dev *gspca_dev) ++static void setautogain(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + +@@ -1457,7 +768,7 @@ static void setawb(struct gspca_dev *gspca_dev) + sccb_reg_write(gspca_dev, 0x63, 0xaa); /* AWB off */ + } + +-static void setsharpness_77(struct gspca_dev *gspca_dev) ++static void setsharpness(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + u8 val; +@@ -1491,132 +802,6 @@ static void setvflip(struct gspca_dev *gspca_dev) + sccb_reg_read(gspca_dev, 0x0c) & 0x7f); + } + +-/* ov965x specific controls */ +-static void setbrightness_96(struct gspca_dev *gspca_dev) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- u8 val; +- +- val = sd->brightness; +- if (val < 8) +- val = 15 - val; /* f .. 8 */ +- else +- val = val - 8; /* 0 .. 7 */ +- sccb_reg_write(gspca_dev, 0x55, /* brtn - brightness adjustment */ +- 0x0f | (val << 4)); +-} +- +-static void setcontrast_96(struct gspca_dev *gspca_dev) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- sccb_reg_write(gspca_dev, 0x56, /* cnst1 - contrast 1 ctrl coeff */ +- sd->contrast << 4); +-} +- +-static void setexposure_96(struct gspca_dev *gspca_dev) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- u8 val; +- static const u8 expo[4] = {0x00, 0x25, 0x38, 0x5e}; +- +- sccb_reg_write(gspca_dev, 0x10, /* aec[9:2] */ +- expo[sd->exposure]); +- val = sccb_reg_read(gspca_dev, 0x13); /* com8 */ +- sccb_reg_write(gspca_dev, 0xff, 0x00); +- sccb_reg_write(gspca_dev, 0x13, val); +- val = sccb_reg_read(gspca_dev, 0xa1); /* aech */ +- sccb_reg_write(gspca_dev, 0xff, 0x00); +- sccb_reg_write(gspca_dev, 0xa1, val & 0xe0); /* aec[15:10] = 0 */ +-} +- +-static void setsharpness_96(struct gspca_dev *gspca_dev) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- s8 val; +- +- val = sd->sharpness; +- if (val < 0) { /* auto */ +- val = sccb_reg_read(gspca_dev, 0x42); /* com17 */ +- sccb_reg_write(gspca_dev, 0xff, 0x00); +- sccb_reg_write(gspca_dev, 0x42, val | 0x40); +- /* Edge enhancement strength auto adjust */ +- return; +- } +- if (val != 0) +- val = 1 << (val - 1); +- sccb_reg_write(gspca_dev, 0x3f, /* edge - edge enhance. factor */ +- val); +- val = sccb_reg_read(gspca_dev, 0x42); /* com17 */ +- sccb_reg_write(gspca_dev, 0xff, 0x00); +- sccb_reg_write(gspca_dev, 0x42, val & 0xbf); +-} +- +-static void setautogain_96(struct gspca_dev *gspca_dev) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- u8 val; +- +-/*fixme: should adjust agc/awb/aec by different controls */ +- val = sd->autogain; +- val = sccb_reg_read(gspca_dev, 0x13); /* com8 */ +- sccb_reg_write(gspca_dev, 0xff, 0x00); +- if (sd->autogain) +- val |= 0x05; /* agc & aec */ +- else +- val &= 0xfa; +- sccb_reg_write(gspca_dev, 0x13, val); +-} +- +-static void setsatur(struct gspca_dev *gspca_dev) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- u8 val1, val2, val3; +- static const u8 matrix[5][2] = { +- {0x14, 0x38}, +- {0x1e, 0x54}, +- {0x28, 0x70}, +- {0x32, 0x8c}, +- {0x48, 0x90} +- }; +- +- val1 = matrix[sd->satur][0]; +- val2 = matrix[sd->satur][1]; +- val3 = val1 + val2; +- sccb_reg_write(gspca_dev, 0x4f, val3); /* matrix coeff */ +- sccb_reg_write(gspca_dev, 0x50, val3); +- sccb_reg_write(gspca_dev, 0x51, 0x00); +- sccb_reg_write(gspca_dev, 0x52, val1); +- sccb_reg_write(gspca_dev, 0x53, val2); +- sccb_reg_write(gspca_dev, 0x54, val3); +- sccb_reg_write(gspca_dev, 0x58, 0x1a); /* mtxs - coeff signs */ +- val1 = sccb_reg_read(gspca_dev, 0x41); /* com16 */ +- sccb_reg_write(gspca_dev, 0xff, 0x00); +- sccb_reg_write(gspca_dev, 0x41, val1); +-} +- +-static void setfreq(struct gspca_dev *gspca_dev) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- u8 val; +- +- val = sccb_reg_read(gspca_dev, 0x13); /* com8 */ +- sccb_reg_write(gspca_dev, 0xff, 0x00); +- if (sd->lightfreq == 0) { +- sccb_reg_write(gspca_dev, 0x13, val & 0xdf); +- return; +- } +- sccb_reg_write(gspca_dev, 0x13, val | 0x20); +- +- val = sccb_reg_read(gspca_dev, 0x42); /* com17 */ +- sccb_reg_write(gspca_dev, 0xff, 0x00); +- if (sd->lightfreq == 1) +- val |= 0x01; +- else +- val &= 0xfe; +- sccb_reg_write(gspca_dev, 0x42, val); +-} +- + /* this function is called at probe time */ + static int sd_config(struct gspca_dev *gspca_dev, + const struct usb_device_id *id) +@@ -1624,77 +809,50 @@ static int sd_config(struct gspca_dev *gspca_dev, + struct sd *sd = (struct sd *) gspca_dev; + struct cam *cam; + +- sd->sensor = id->driver_info; +- + cam = &gspca_dev->cam; + +- if (sd->sensor == SENSOR_OV772X) { +- cam->cam_mode = ov772x_mode; +- cam->nmodes = ARRAY_SIZE(ov772x_mode); ++ cam->cam_mode = ov772x_mode; ++ cam->nmodes = ARRAY_SIZE(ov772x_mode); ++ cam->mode_framerates = ov772x_framerates; + +- cam->bulk = 1; +- cam->bulk_size = 16384; +- cam->bulk_nurbs = 2; +- } else { /* ov965x */ +- cam->cam_mode = ov965x_mode; +- cam->nmodes = ARRAY_SIZE(ov965x_mode); +- } ++ cam->bulk = 1; ++ cam->bulk_size = 16384; ++ cam->bulk_nurbs = 2; + + sd->frame_rate = 30; + +- if (sd->sensor == SENSOR_OV772X) { +- sd->brightness = BRIGHTNESS_77_DEF; +- sd->contrast = CONTRAST_77_DEF; +- sd->gain = GAIN_DEF; +- sd->exposure = EXPO_77_DEF; +- sd->redblc = RED_BALANCE_DEF; +- sd->blueblc = BLUE_BALANCE_DEF; +- sd->hue = HUE_DEF; +-#if AUTOGAIN_77_DEF != 0 +- sd->autogain = AUTOGAIN_77_DEF; ++ sd->brightness = BRIGHTNESS_DEF; ++ sd->contrast = CONTRAST_DEF; ++ sd->gain = GAIN_DEF; ++ sd->exposure = EXPO_DEF; ++ sd->redblc = RED_BALANCE_DEF; ++ sd->blueblc = BLUE_BALANCE_DEF; ++ sd->hue = HUE_DEF; ++#if AUTOGAIN_DEF != 0 ++ sd->autogain = AUTOGAIN_DEF; + #else +- gspca_dev->ctrl_inac |= (1 << AWB_77_IDX); ++ gspca_dev->ctrl_inac |= (1 << AWB_IDX); + #endif + #if AWB_DEF != 0 +- sd->awb = AWB_DEF ++ sd->awb = AWB_DEF + #endif +-#if SHARPNESS_77_DEF != 0 +- sd->sharpness = SHARPNESS_77_DEF; ++#if SHARPNESS_DEF != 0 ++ sd->sharpness = SHARPNESS_DEF; + #endif + #if HFLIP_DEF != 0 +- sd->hflip = HFLIP_DEF; ++ sd->hflip = HFLIP_DEF; + #endif + #if VFLIP_DEF != 0 +- sd->vflip = VFLIP_DEF; +-#endif +- } else { +- sd->brightness = BRIGHTNESS_96_DEF; +- sd->contrast = CONTRAST_96_DEF; +-#if AUTOGAIN_96_DEF != 0 +- sd->autogain = AUTOGAIN_96_DEF; +- gspca_dev->ctrl_inac |= (1 << EXPO_96_IDX); ++ sd->vflip = VFLIP_DEF; + #endif +-#if EXPO_96_DEF != 0 +- sd->exposure = EXPO_96_DEF; +-#endif +-#if SHARPNESS_96_DEF != 0 +- sd->sharpness = SHARPNESS_96_DEF; +-#endif +- sd->satur = SATUR_DEF; +- sd->lightfreq = FREQ_DEF; +- } ++ + return 0; + } + + /* this function is called at probe and resume time */ + static int sd_init(struct gspca_dev *gspca_dev) + { +- struct sd *sd = (struct sd *) gspca_dev; + u16 sensor_id; +- static const u8 sensor_addr[2] = { +- 0x42, /* 0 SENSOR_OV772X */ +- 0x60, /* 1 SENSOR_OV965X */ +- }; + + /* reset bridge */ + ov534_reg_write(gspca_dev, 0xe7, 0x3a); +@@ -1702,8 +860,7 @@ static int sd_init(struct gspca_dev *gspca_dev) + msleep(100); + + /* initialize the sensor address */ +- ov534_reg_write(gspca_dev, OV534_REG_ADDRESS, +- sensor_addr[sd->sensor]); ++ ov534_reg_write(gspca_dev, OV534_REG_ADDRESS, 0x42); + + /* reset sensor */ + sccb_reg_write(gspca_dev, 0x12, 0x80); +@@ -1717,64 +874,46 @@ static int sd_init(struct gspca_dev *gspca_dev) + PDEBUG(D_PROBE, "Sensor ID: %04x", sensor_id); + + /* initialize */ +- switch (sd->sensor) { +- case SENSOR_OV772X: +- reg_w_array(gspca_dev, bridge_init_ov772x, +- ARRAY_SIZE(bridge_init_ov772x)); +- ov534_set_led(gspca_dev, 1); +- sccb_w_array(gspca_dev, sensor_init_ov772x, +- ARRAY_SIZE(sensor_init_ov772x)); +- ov534_reg_write(gspca_dev, 0xe0, 0x09); +- ov534_set_led(gspca_dev, 0); +- set_frame_rate(gspca_dev); +- break; +- default: +-/* case SENSOR_OV965X: */ +- reg_w_array(gspca_dev, bridge_init_ov965x, +- ARRAY_SIZE(bridge_init_ov965x)); +- sccb_w_array(gspca_dev, sensor_init_ov965x, +- ARRAY_SIZE(sensor_init_ov965x)); +- reg_w_array(gspca_dev, bridge_init_ov965x_2, +- ARRAY_SIZE(bridge_init_ov965x_2)); +- sccb_w_array(gspca_dev, sensor_init_ov965x_2, +- ARRAY_SIZE(sensor_init_ov965x_2)); +- ov534_reg_write(gspca_dev, 0xe0, 0x00); +- ov534_reg_write(gspca_dev, 0xe0, 0x01); +- ov534_set_led(gspca_dev, 0); +- ov534_reg_write(gspca_dev, 0xe0, 0x00); +- } ++ reg_w_array(gspca_dev, bridge_init, ++ ARRAY_SIZE(bridge_init)); ++ ov534_set_led(gspca_dev, 1); ++ sccb_w_array(gspca_dev, sensor_init, ++ ARRAY_SIZE(sensor_init)); ++ ov534_reg_write(gspca_dev, 0xe0, 0x09); ++ ov534_set_led(gspca_dev, 0); ++ set_frame_rate(gspca_dev); + + return 0; + } + +-static int sd_start_ov772x(struct gspca_dev *gspca_dev) ++static int sd_start(struct gspca_dev *gspca_dev) + { + int mode; + + mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; + if (mode != 0) { /* 320x240 */ +- reg_w_array(gspca_dev, bridge_start_ov772x_qvga, +- ARRAY_SIZE(bridge_start_ov772x_qvga)); +- sccb_w_array(gspca_dev, sensor_start_ov772x_qvga, +- ARRAY_SIZE(sensor_start_ov772x_qvga)); ++ reg_w_array(gspca_dev, bridge_start_qvga, ++ ARRAY_SIZE(bridge_start_qvga)); ++ sccb_w_array(gspca_dev, sensor_start_qvga, ++ ARRAY_SIZE(sensor_start_qvga)); + } else { /* 640x480 */ +- reg_w_array(gspca_dev, bridge_start_ov772x_vga, +- ARRAY_SIZE(bridge_start_ov772x_vga)); +- sccb_w_array(gspca_dev, sensor_start_ov772x_vga, +- ARRAY_SIZE(sensor_start_ov772x_vga)); ++ reg_w_array(gspca_dev, bridge_start_vga, ++ ARRAY_SIZE(bridge_start_vga)); ++ sccb_w_array(gspca_dev, sensor_start_vga, ++ ARRAY_SIZE(sensor_start_vga)); + } + set_frame_rate(gspca_dev); + +- setautogain_77(gspca_dev); ++ setautogain(gspca_dev); + setawb(gspca_dev); + setgain(gspca_dev); + setredblc(gspca_dev); + setblueblc(gspca_dev); + sethue(gspca_dev); +- setexposure_77(gspca_dev); +- setbrightness_77(gspca_dev); +- setcontrast_77(gspca_dev); +- setsharpness_77(gspca_dev); ++ setexposure(gspca_dev); ++ setbrightness(gspca_dev); ++ setcontrast(gspca_dev); ++ setsharpness(gspca_dev); + setvflip(gspca_dev); + sethflip(gspca_dev); + +@@ -1783,81 +922,12 @@ static int sd_start_ov772x(struct gspca_dev *gspca_dev) + return 0; + } + +-static int sd_start_ov965x(struct gspca_dev *gspca_dev) +-{ +- int mode; +- +- mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; +- switch (mode) { +- default: +-/* case 4: * 320x240 */ +- sccb_w_array(gspca_dev, sensor_start_ov965x_1_vga, +- ARRAY_SIZE(sensor_start_ov965x_1_vga)); +- reg_w_array(gspca_dev, bridge_start_ov965x_qvga, +- ARRAY_SIZE(bridge_start_ov965x_qvga)); +- sccb_w_array(gspca_dev, sensor_start_ov965x_2_qvga, +- ARRAY_SIZE(sensor_start_ov965x_2_qvga)); +- break; +- case 3: /* 640x480 */ +- sccb_w_array(gspca_dev, sensor_start_ov965x_1_vga, +- ARRAY_SIZE(sensor_start_ov965x_1_vga)); +- reg_w_array(gspca_dev, bridge_start_ov965x_vga, +- ARRAY_SIZE(bridge_start_ov965x_vga)); +- sccb_w_array(gspca_dev, sensor_start_ov965x_2_vga, +- ARRAY_SIZE(sensor_start_ov965x_2_vga)); +- break; +- case 2: /* 800x600 */ +- sccb_w_array(gspca_dev, sensor_start_ov965x_1_svga, +- ARRAY_SIZE(sensor_start_ov965x_1_svga)); +- reg_w_array(gspca_dev, bridge_start_ov965x_svga, +- ARRAY_SIZE(bridge_start_ov965x_svga)); +- sccb_w_array(gspca_dev, sensor_start_ov965x_2_svga, +- ARRAY_SIZE(sensor_start_ov965x_2_svga)); +- break; +- case 1: /* 1024x768 */ +- sccb_w_array(gspca_dev, sensor_start_ov965x_1_xga, +- ARRAY_SIZE(sensor_start_ov965x_1_xga)); +- reg_w_array(gspca_dev, bridge_start_ov965x_xga, +- ARRAY_SIZE(bridge_start_ov965x_xga)); +- sccb_w_array(gspca_dev, sensor_start_ov965x_2_svga, +- ARRAY_SIZE(sensor_start_ov965x_2_svga)); +- break; +- case 0: /* 1280x1024 */ +- sccb_w_array(gspca_dev, sensor_start_ov965x_1_sxga, +- ARRAY_SIZE(sensor_start_ov965x_1_sxga)); +- reg_w_array(gspca_dev, bridge_start_ov965x_sxga, +- ARRAY_SIZE(bridge_start_ov965x_sxga)); +- sccb_w_array(gspca_dev, sensor_start_ov965x_2_sxga, +- ARRAY_SIZE(sensor_start_ov965x_2_sxga)); +- break; +- } +- setfreq(gspca_dev); +- setautogain_96(gspca_dev); +- setbrightness_96(gspca_dev); +- setcontrast_96(gspca_dev); +- setexposure_96(gspca_dev); +- setsharpness_96(gspca_dev); +- setsatur(gspca_dev); +- +- ov534_reg_write(gspca_dev, 0xe0, 0x00); +- ov534_reg_write(gspca_dev, 0xe0, 0x00); +- ov534_set_led(gspca_dev, 1); +- return 0; +-} +- +-static void sd_stopN_ov772x(struct gspca_dev *gspca_dev) ++static void sd_stopN(struct gspca_dev *gspca_dev) + { + ov534_reg_write(gspca_dev, 0xe0, 0x09); + ov534_set_led(gspca_dev, 0); + } + +-static void sd_stopN_ov965x(struct gspca_dev *gspca_dev) +-{ +- ov534_reg_write(gspca_dev, 0xe0, 0x01); +- ov534_set_led(gspca_dev, 0); +- ov534_reg_write(gspca_dev, 0xe0, 0x00); +-} +- + /* Values for bmHeaderInfo (Video and Still Image Payload Headers, 2.4.3.3) */ + #define UVC_STREAM_EOH (1 << 7) + #define UVC_STREAM_ERR (1 << 6) +@@ -1875,11 +945,9 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, + __u32 this_pts; + u16 this_fid; + int remaining_len = len; +- int payload_len; + +- payload_len = gspca_dev->cam.bulk ? 2048 : 2040; + do { +- len = min(remaining_len, payload_len); ++ len = min(remaining_len, 2048); + + /* Payloads are prefixed with a UVC-style header. We + consider a frame to start when the FID toggles, or the PTS +@@ -1918,7 +986,17 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, + data + 12, len - 12); + /* If this packet is marked as EOF, end the frame */ + } else if (data[1] & UVC_STREAM_EOF) { ++ struct gspca_frame *frame; ++ + sd->last_pts = 0; ++ frame = gspca_get_i_frame(gspca_dev); ++ if (frame == NULL) ++ goto discard; ++ if (frame->data_end - frame->data + (len - 12) != ++ gspca_dev->width * gspca_dev->height * 2) { ++ PDEBUG(D_PACK, "wrong sized frame"); ++ goto discard; ++ } + gspca_frame_add(gspca_dev, LAST_PACKET, + data + 12, len - 12); + } else { +@@ -1965,12 +1043,8 @@ static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val) + struct sd *sd = (struct sd *) gspca_dev; + + sd->exposure = val; +- if (gspca_dev->streaming) { +- if (sd->sensor == SENSOR_OV772X) +- setexposure_77(gspca_dev); +- else +- setexposure_96(gspca_dev); +- } ++ if (gspca_dev->streaming) ++ setexposure(gspca_dev); + return 0; + } + +@@ -1987,12 +1061,8 @@ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) + struct sd *sd = (struct sd *) gspca_dev; + + sd->brightness = val; +- if (gspca_dev->streaming) { +- if (sd->sensor == SENSOR_OV772X) +- setbrightness_77(gspca_dev); +- else +- setbrightness_96(gspca_dev); +- } ++ if (gspca_dev->streaming) ++ setbrightness(gspca_dev); + return 0; + } + +@@ -2009,12 +1079,8 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val) + struct sd *sd = (struct sd *) gspca_dev; + + sd->contrast = val; +- if (gspca_dev->streaming) { +- if (sd->sensor == SENSOR_OV772X) +- setcontrast_77(gspca_dev); +- else +- setcontrast_96(gspca_dev); +- } ++ if (gspca_dev->streaming) ++ setcontrast(gspca_dev); + return 0; + } + +@@ -2026,41 +1092,6 @@ static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) + return 0; + } + +-static int sd_setsatur(struct gspca_dev *gspca_dev, __s32 val) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- sd->satur = val; +- if (gspca_dev->streaming) +- setsatur(gspca_dev); +- return 0; +-} +- +-static int sd_getsatur(struct gspca_dev *gspca_dev, __s32 *val) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- *val = sd->satur; +- return 0; +-} +-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- sd->lightfreq = val; +- if (gspca_dev->streaming) +- setfreq(gspca_dev); +- return 0; +-} +- +-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val) +-{ +- struct sd *sd = (struct sd *) gspca_dev; +- +- *val = sd->lightfreq; +- return 0; +-} +- + static int sd_setredblc(struct gspca_dev *gspca_dev, __s32 val) + { + struct sd *sd = (struct sd *) gspca_dev; +@@ -2122,22 +1153,14 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val) + sd->autogain = val; + + if (gspca_dev->streaming) { +- if (sd->sensor == SENSOR_OV772X) { +- +- /* the auto white balance control works only +- * when auto gain is set */ +- if (val) +- gspca_dev->ctrl_inac &= ~(1 << AWB_77_IDX); +- else +- gspca_dev->ctrl_inac |= (1 << AWB_77_IDX); +- setautogain_77(gspca_dev); +- } else { +- if (val) +- gspca_dev->ctrl_inac |= (1 << EXPO_96_IDX); +- else +- gspca_dev->ctrl_inac &= ~(1 << EXPO_96_IDX); +- setautogain_96(gspca_dev); +- } ++ ++ /* the auto white balance control works only ++ * when auto gain is set */ ++ if (val) ++ gspca_dev->ctrl_inac &= ~(1 << AWB_IDX); ++ else ++ gspca_dev->ctrl_inac |= (1 << AWB_IDX); ++ setautogain(gspca_dev); + } + return 0; + } +@@ -2173,12 +1196,8 @@ static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val) + struct sd *sd = (struct sd *) gspca_dev; + + sd->sharpness = val; +- if (gspca_dev->streaming) { +- if (sd->sensor == SENSOR_OV772X) +- setsharpness_77(gspca_dev); +- else +- setsharpness_96(gspca_dev); +- } ++ if (gspca_dev->streaming) ++ setsharpness(gspca_dev); + return 0; + } + +@@ -2257,7 +1276,7 @@ static int sd_set_streamparm(struct gspca_dev *gspca_dev, + + /* Set requested framerate */ + sd->frame_rate = tpf->denominator / tpf->numerator; +- if (gspca_dev->streaming && sd->sensor == SENSOR_OV772X) ++ if (gspca_dev->streaming) + set_frame_rate(gspca_dev); + + /* Return the actual framerate */ +@@ -2267,57 +1286,23 @@ static int sd_set_streamparm(struct gspca_dev *gspca_dev, + return 0; + } + +-static int sd_querymenu(struct gspca_dev *gspca_dev, +- struct v4l2_querymenu *menu) +-{ +- switch (menu->id) { +- case V4L2_CID_POWER_LINE_FREQUENCY: +- switch (menu->index) { +- case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */ +- strcpy((char *) menu->name, "NoFliker"); +- return 0; +- case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */ +- strcpy((char *) menu->name, "50 Hz"); +- return 0; +- case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */ +- strcpy((char *) menu->name, "60 Hz"); +- return 0; +- } +- break; +- } +- return -EINVAL; +-} +- + /* sub-driver description */ +-static const struct sd_desc sd_desc_ov772x = { ++static const struct sd_desc sd_desc = { + .name = MODULE_NAME, +- .ctrls = sd_ctrls_ov772x, +- .nctrls = ARRAY_SIZE(sd_ctrls_ov772x), ++ .ctrls = sd_ctrls, ++ .nctrls = ARRAY_SIZE(sd_ctrls), + .config = sd_config, + .init = sd_init, +- .start = sd_start_ov772x, +- .stopN = sd_stopN_ov772x, ++ .start = sd_start, ++ .stopN = sd_stopN, + .pkt_scan = sd_pkt_scan, + .get_streamparm = sd_get_streamparm, + .set_streamparm = sd_set_streamparm, + }; + +-static const struct sd_desc sd_desc_ov965x = { +- .name = MODULE_NAME, +- .ctrls = sd_ctrls_ov965x, +- .nctrls = ARRAY_SIZE(sd_ctrls_ov965x), +- .config = sd_config, +- .init = sd_init, +- .start = sd_start_ov965x, +- .stopN = sd_stopN_ov965x, +- .pkt_scan = sd_pkt_scan, +- .querymenu = sd_querymenu, +-}; +- + /* -- module initialisation -- */ + static const __devinitdata struct usb_device_id device_table[] = { +- {USB_DEVICE(0x06f8, 0x3003), .driver_info = SENSOR_OV965X}, +- {USB_DEVICE(0x1415, 0x2000), .driver_info = SENSOR_OV772X}, ++ {USB_DEVICE(0x1415, 0x2000)}, + {} + }; + +@@ -2326,11 +1311,7 @@ MODULE_DEVICE_TABLE(usb, device_table); + /* -- device connect -- */ + static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) + { +- return gspca_dev_probe(intf, id, +- id->driver_info == SENSOR_OV772X +- ? &sd_desc_ov772x +- : &sd_desc_ov965x, +- sizeof(struct sd), ++ return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), + THIS_MODULE); + } + +diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c +new file mode 100644 +index 0000000..bbe5a03 +--- /dev/null ++++ b/drivers/media/video/gspca/ov534_9.c +@@ -0,0 +1,1477 @@ ++/* ++ * ov534-ov965x gspca driver ++ * ++ * Copyright (C) 2009-2010 Jean-Francois Moine http://moinejf.free.fr ++ * Copyright (C) 2008 Antonio Ospite ++ * Copyright (C) 2008 Jim Paris ++ * ++ * Based on a prototype written by Mark Ferrell ++ * USB protocol reverse engineered by Jim Paris ++ * https://jim.sh/svn/jim/devl/playstation/ps3/eye/test/ ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#define MODULE_NAME "ov534_9" ++ ++#include "gspca.h" ++ ++#define OV534_REG_ADDRESS 0xf1 /* sensor address */ ++#define OV534_REG_SUBADDR 0xf2 ++#define OV534_REG_WRITE 0xf3 ++#define OV534_REG_READ 0xf4 ++#define OV534_REG_OPERATION 0xf5 ++#define OV534_REG_STATUS 0xf6 ++ ++#define OV534_OP_WRITE_3 0x37 ++#define OV534_OP_WRITE_2 0x33 ++#define OV534_OP_READ_2 0xf9 ++ ++#define CTRL_TIMEOUT 500 ++ ++MODULE_AUTHOR("Jean-Francois Moine "); ++MODULE_DESCRIPTION("GSPCA/OV534_9 USB Camera Driver"); ++MODULE_LICENSE("GPL"); ++ ++/* specific webcam descriptor */ ++struct sd { ++ struct gspca_dev gspca_dev; /* !! must be the first item */ ++ __u32 last_pts; ++ u8 last_fid; ++ ++ u8 brightness; ++ u8 contrast; ++ u8 autogain; ++ u8 exposure; ++ s8 sharpness; ++ u8 satur; ++ u8 freq; ++}; ++ ++/* V4L2 controls supported by the driver */ ++static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setsatur(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getsatur(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val); ++ ++static const struct ctrl sd_ctrls[] = { ++ { /* 0 */ ++ { ++ .id = V4L2_CID_BRIGHTNESS, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Brightness", ++ .minimum = 0, ++ .maximum = 15, ++ .step = 1, ++#define BRIGHTNESS_DEF 7 ++ .default_value = BRIGHTNESS_DEF, ++ }, ++ .set = sd_setbrightness, ++ .get = sd_getbrightness, ++ }, ++ { /* 1 */ ++ { ++ .id = V4L2_CID_CONTRAST, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Contrast", ++ .minimum = 0, ++ .maximum = 15, ++ .step = 1, ++#define CONTRAST_DEF 3 ++ .default_value = CONTRAST_DEF, ++ }, ++ .set = sd_setcontrast, ++ .get = sd_getcontrast, ++ }, ++ { /* 2 */ ++ { ++ .id = V4L2_CID_AUTOGAIN, ++ .type = V4L2_CTRL_TYPE_BOOLEAN, ++ .name = "Autogain", ++ .minimum = 0, ++ .maximum = 1, ++ .step = 1, ++#define AUTOGAIN_DEF 1 ++ .default_value = AUTOGAIN_DEF, ++ }, ++ .set = sd_setautogain, ++ .get = sd_getautogain, ++ }, ++#define EXPO_IDX 3 ++ { /* 3 */ ++ { ++ .id = V4L2_CID_EXPOSURE, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Exposure", ++ .minimum = 0, ++ .maximum = 3, ++ .step = 1, ++#define EXPO_DEF 0 ++ .default_value = EXPO_DEF, ++ }, ++ .set = sd_setexposure, ++ .get = sd_getexposure, ++ }, ++ { /* 4 */ ++ { ++ .id = V4L2_CID_SHARPNESS, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Sharpness", ++ .minimum = -1, /* -1 = auto */ ++ .maximum = 4, ++ .step = 1, ++#define SHARPNESS_DEF -1 ++ .default_value = SHARPNESS_DEF, ++ }, ++ .set = sd_setsharpness, ++ .get = sd_getsharpness, ++ }, ++ { /* 5 */ ++ { ++ .id = V4L2_CID_SATURATION, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Saturation", ++ .minimum = 0, ++ .maximum = 4, ++ .step = 1, ++#define SATUR_DEF 2 ++ .default_value = SATUR_DEF, ++ }, ++ .set = sd_setsatur, ++ .get = sd_getsatur, ++ }, ++ { ++ { ++ .id = V4L2_CID_POWER_LINE_FREQUENCY, ++ .type = V4L2_CTRL_TYPE_MENU, ++ .name = "Light frequency filter", ++ .minimum = 0, ++ .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */ ++ .step = 1, ++#define FREQ_DEF 0 ++ .default_value = FREQ_DEF, ++ }, ++ .set = sd_setfreq, ++ .get = sd_getfreq, ++ }, ++}; ++ ++static const struct v4l2_pix_format ov965x_mode[] = { ++#define QVGA_MODE 0 ++ {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, ++ .bytesperline = 320, ++ .sizeimage = 320 * 240 * 3 / 8 + 590, ++ .colorspace = V4L2_COLORSPACE_JPEG}, ++#define VGA_MODE 1 ++ {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, ++ .bytesperline = 640, ++ .sizeimage = 640 * 480 * 3 / 8 + 590, ++ .colorspace = V4L2_COLORSPACE_JPEG}, ++#define SVGA_MODE 2 ++ {800, 600, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, ++ .bytesperline = 800, ++ .sizeimage = 800 * 600 * 3 / 8 + 590, ++ .colorspace = V4L2_COLORSPACE_JPEG}, ++#define XGA_MODE 3 ++ {1024, 768, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, ++ .bytesperline = 1024, ++ .sizeimage = 1024 * 768 * 3 / 8 + 590, ++ .colorspace = V4L2_COLORSPACE_JPEG}, ++#define SXGA_MODE 4 ++ {1280, 1024, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, ++ .bytesperline = 1280, ++ .sizeimage = 1280 * 1024 * 3 / 8 + 590, ++ .colorspace = V4L2_COLORSPACE_JPEG}, ++}; ++ ++static const u8 bridge_init[][2] = { ++ {0x88, 0xf8}, ++ {0x89, 0xff}, ++ {0x76, 0x03}, ++ {0x92, 0x03}, ++ {0x95, 0x10}, ++ {0xe2, 0x00}, ++ {0xe7, 0x3e}, ++ {0x8d, 0x1c}, ++ {0x8e, 0x00}, ++ {0x8f, 0x00}, ++ {0x1f, 0x00}, ++ {0xc3, 0xf9}, ++ {0x89, 0xff}, ++ {0x88, 0xf8}, ++ {0x76, 0x03}, ++ {0x92, 0x01}, ++ {0x93, 0x18}, ++ {0x1c, 0x0a}, ++ {0x1d, 0x48}, ++ {0xc0, 0x50}, ++ {0xc1, 0x3c}, ++ {0x34, 0x05}, ++ {0xc2, 0x0c}, ++ {0xc3, 0xf9}, ++ {0x34, 0x05}, ++ {0xe7, 0x2e}, ++ {0x31, 0xf9}, ++ {0x35, 0x02}, ++ {0xd9, 0x10}, ++ {0x25, 0x42}, ++ {0x94, 0x11}, ++}; ++ ++static const u8 sensor_init[][2] = { ++ {0x12, 0x80}, /* com7 - SSCB reset */ ++ {0x00, 0x00}, /* gain */ ++ {0x01, 0x80}, /* blue */ ++ {0x02, 0x80}, /* red */ ++ {0x03, 0x1b}, /* vref */ ++ {0x04, 0x03}, /* com1 - exposure low bits */ ++ {0x0b, 0x57}, /* ver */ ++ {0x0e, 0x61}, /* com5 */ ++ {0x0f, 0x42}, /* com6 */ ++ {0x11, 0x00}, /* clkrc */ ++ {0x12, 0x02}, /* com7 - 15fps VGA YUYV */ ++ {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ ++ {0x14, 0x28}, /* com9 */ ++ {0x16, 0x24}, /* reg16 */ ++ {0x17, 0x1d}, /* hstart*/ ++ {0x18, 0xbd}, /* hstop */ ++ {0x19, 0x01}, /* vstrt */ ++ {0x1a, 0x81}, /* vstop*/ ++ {0x1e, 0x04}, /* mvfp */ ++ {0x24, 0x3c}, /* aew */ ++ {0x25, 0x36}, /* aeb */ ++ {0x26, 0x71}, /* vpt */ ++ {0x27, 0x08}, /* bbias */ ++ {0x28, 0x08}, /* gbbias */ ++ {0x29, 0x15}, /* gr com */ ++ {0x2a, 0x00}, /* exhch */ ++ {0x2b, 0x00}, /* exhcl */ ++ {0x2c, 0x08}, /* rbias */ ++ {0x32, 0xff}, /* href */ ++ {0x33, 0x00}, /* chlf */ ++ {0x34, 0x3f}, /* aref1 */ ++ {0x35, 0x00}, /* aref2 */ ++ {0x36, 0xf8}, /* aref3 */ ++ {0x38, 0x72}, /* adc2 */ ++ {0x39, 0x57}, /* aref4 */ ++ {0x3a, 0x80}, /* tslb - yuyv */ ++ {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */ ++ {0x3d, 0x99}, /* com13 */ ++ {0x3f, 0xc1}, /* edge */ ++ {0x40, 0xc0}, /* com15 */ ++ {0x41, 0x40}, /* com16 */ ++ {0x42, 0xc0}, /* com17 */ ++ {0x43, 0x0a}, /* rsvd */ ++ {0x44, 0xf0}, ++ {0x45, 0x46}, ++ {0x46, 0x62}, ++ {0x47, 0x2a}, ++ {0x48, 0x3c}, ++ {0x4a, 0xfc}, ++ {0x4b, 0xfc}, ++ {0x4c, 0x7f}, ++ {0x4d, 0x7f}, ++ {0x4e, 0x7f}, ++ {0x4f, 0x98}, /* matrix */ ++ {0x50, 0x98}, ++ {0x51, 0x00}, ++ {0x52, 0x28}, ++ {0x53, 0x70}, ++ {0x54, 0x98}, ++ {0x58, 0x1a}, /* matrix coef sign */ ++ {0x59, 0x85}, /* AWB control */ ++ {0x5a, 0xa9}, ++ {0x5b, 0x64}, ++ {0x5c, 0x84}, ++ {0x5d, 0x53}, ++ {0x5e, 0x0e}, ++ {0x5f, 0xf0}, /* AWB blue limit */ ++ {0x60, 0xf0}, /* AWB red limit */ ++ {0x61, 0xf0}, /* AWB green limit */ ++ {0x62, 0x00}, /* lcc1 */ ++ {0x63, 0x00}, /* lcc2 */ ++ {0x64, 0x02}, /* lcc3 */ ++ {0x65, 0x16}, /* lcc4 */ ++ {0x66, 0x01}, /* lcc5 */ ++ {0x69, 0x02}, /* hv */ ++ {0x6b, 0x5a}, /* dbvl */ ++ {0x6c, 0x04}, ++ {0x6d, 0x55}, ++ {0x6e, 0x00}, ++ {0x6f, 0x9d}, ++ {0x70, 0x21}, /* dnsth */ ++ {0x71, 0x78}, ++ {0x72, 0x00}, /* poidx */ ++ {0x73, 0x01}, /* pckdv */ ++ {0x74, 0x3a}, /* xindx */ ++ {0x75, 0x35}, /* yindx */ ++ {0x76, 0x01}, ++ {0x77, 0x02}, ++ {0x7a, 0x12}, /* gamma curve */ ++ {0x7b, 0x08}, ++ {0x7c, 0x16}, ++ {0x7d, 0x30}, ++ {0x7e, 0x5e}, ++ {0x7f, 0x72}, ++ {0x80, 0x82}, ++ {0x81, 0x8e}, ++ {0x82, 0x9a}, ++ {0x83, 0xa4}, ++ {0x84, 0xac}, ++ {0x85, 0xb8}, ++ {0x86, 0xc3}, ++ {0x87, 0xd6}, ++ {0x88, 0xe6}, ++ {0x89, 0xf2}, ++ {0x8a, 0x03}, ++ {0x8c, 0x89}, /* com19 */ ++ {0x14, 0x28}, /* com9 */ ++ {0x90, 0x7d}, ++ {0x91, 0x7b}, ++ {0x9d, 0x03}, /* lcc6 */ ++ {0x9e, 0x04}, /* lcc7 */ ++ {0x9f, 0x7a}, ++ {0xa0, 0x79}, ++ {0xa1, 0x40}, /* aechm */ ++ {0xa4, 0x50}, /* com21 */ ++ {0xa5, 0x68}, /* com26 */ ++ {0xa6, 0x4a}, /* AWB green */ ++ {0xa8, 0xc1}, /* refa8 */ ++ {0xa9, 0xef}, /* refa9 */ ++ {0xaa, 0x92}, ++ {0xab, 0x04}, ++ {0xac, 0x80}, /* black level control */ ++ {0xad, 0x80}, ++ {0xae, 0x80}, ++ {0xaf, 0x80}, ++ {0xb2, 0xf2}, ++ {0xb3, 0x20}, ++ {0xb4, 0x20}, /* ctrlb4 */ ++ {0xb5, 0x00}, ++ {0xb6, 0xaf}, ++ {0xbb, 0xae}, ++ {0xbc, 0x7f}, /* ADC channel offsets */ ++ {0xdb, 0x7f}, ++ {0xbe, 0x7f}, ++ {0xbf, 0x7f}, ++ {0xc0, 0xe2}, ++ {0xc1, 0xc0}, ++ {0xc2, 0x01}, ++ {0xc3, 0x4e}, ++ {0xc6, 0x85}, ++ {0xc7, 0x80}, /* com24 */ ++ {0xc9, 0xe0}, ++ {0xca, 0xe8}, ++ {0xcb, 0xf0}, ++ {0xcc, 0xd8}, ++ {0xcd, 0xf1}, ++ {0x4f, 0x98}, /* matrix */ ++ {0x50, 0x98}, ++ {0x51, 0x00}, ++ {0x52, 0x28}, ++ {0x53, 0x70}, ++ {0x54, 0x98}, ++ {0x58, 0x1a}, ++ {0xff, 0x41}, /* read 41, write ff 00 */ ++ {0x41, 0x40}, /* com16 */ ++ ++ {0xc5, 0x03}, /* 60 Hz banding filter */ ++ {0x6a, 0x02}, /* 50 Hz banding filter */ ++ ++ {0x12, 0x62}, /* com7 - 30fps VGA YUV */ ++ {0x36, 0xfa}, /* aref3 */ ++ {0x69, 0x0a}, /* hv */ ++ {0x8c, 0x89}, /* com22 */ ++ {0x14, 0x28}, /* com9 */ ++ {0x3e, 0x0c}, ++ {0x41, 0x40}, /* com16 */ ++ {0x72, 0x00}, ++ {0x73, 0x00}, ++ {0x74, 0x3a}, ++ {0x75, 0x35}, ++ {0x76, 0x01}, ++ {0xc7, 0x80}, ++ {0x03, 0x12}, /* vref */ ++ {0x17, 0x16}, /* hstart */ ++ {0x18, 0x02}, /* hstop */ ++ {0x19, 0x01}, /* vstrt */ ++ {0x1a, 0x3d}, /* vstop */ ++ {0x32, 0xff}, /* href */ ++ {0xc0, 0xaa}, ++}; ++ ++static const u8 bridge_init_2[][2] = { ++ {0x94, 0xaa}, ++ {0xf1, 0x60}, ++ {0xe5, 0x04}, ++ {0xc0, 0x50}, ++ {0xc1, 0x3c}, ++ {0x8c, 0x00}, ++ {0x8d, 0x1c}, ++ {0x34, 0x05}, ++ ++ {0xc2, 0x0c}, ++ {0xc3, 0xf9}, ++ {0xda, 0x01}, ++ {0x50, 0x00}, ++ {0x51, 0xa0}, ++ {0x52, 0x3c}, ++ {0x53, 0x00}, ++ {0x54, 0x00}, ++ {0x55, 0x00}, ++ {0x57, 0x00}, ++ {0x5c, 0x00}, ++ {0x5a, 0xa0}, ++ {0x5b, 0x78}, ++ {0x35, 0x02}, ++ {0xd9, 0x10}, ++ {0x94, 0x11}, ++}; ++ ++static const u8 sensor_init_2[][2] = { ++ {0x3b, 0xc4}, ++ {0x1e, 0x04}, /* mvfp */ ++ {0x13, 0xe0}, /* com8 */ ++ {0x00, 0x00}, /* gain */ ++ {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ ++ {0x11, 0x03}, /* clkrc */ ++ {0x6b, 0x5a}, /* dblv */ ++ {0x6a, 0x05}, ++ {0xc5, 0x07}, ++ {0xa2, 0x4b}, ++ {0xa3, 0x3e}, ++ {0x2d, 0x00}, ++ {0xff, 0x42}, /* read 42, write ff 00 */ ++ {0x42, 0xc0}, /* com17 */ ++ {0x2d, 0x00}, ++ {0xff, 0x42}, /* read 42, write ff 00 */ ++ {0x42, 0xc1}, /* com17 */ ++/* sharpness */ ++ {0x3f, 0x01}, ++ {0xff, 0x42}, /* read 42, write ff 00 */ ++ {0x42, 0xc1}, /* com17 */ ++/* saturation */ ++ {0x4f, 0x98}, /* matrix */ ++ {0x50, 0x98}, ++ {0x51, 0x00}, ++ {0x52, 0x28}, ++ {0x53, 0x70}, ++ {0x54, 0x98}, ++ {0x58, 0x1a}, ++ {0xff, 0x41}, /* read 41, write ff 00 */ ++ {0x41, 0x40}, /* com16 */ ++/* contrast */ ++ {0x56, 0x40}, ++/* brightness */ ++ {0x55, 0x8f}, ++/* expo */ ++ {0x10, 0x25}, /* aech - exposure high bits */ ++ {0xff, 0x13}, /* read 13, write ff 00 */ ++ {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ ++}; ++ ++static const u8 sensor_start_1_vga[][2] = { /* same for qvga */ ++ {0x12, 0x62}, /* com7 - 30fps VGA YUV */ ++ {0x36, 0xfa}, /* aref3 */ ++ {0x69, 0x0a}, /* hv */ ++ {0x8c, 0x89}, /* com22 */ ++ {0x14, 0x28}, /* com9 */ ++ {0x3e, 0x0c}, /* com14 */ ++ {0x41, 0x40}, /* com16 */ ++ {0x72, 0x00}, ++ {0x73, 0x00}, ++ {0x74, 0x3a}, ++ {0x75, 0x35}, ++ {0x76, 0x01}, ++ {0xc7, 0x80}, /* com24 */ ++ {0x03, 0x12}, /* vref */ ++ {0x17, 0x16}, /* hstart */ ++ {0x18, 0x02}, /* hstop */ ++ {0x19, 0x01}, /* vstrt */ ++ {0x1a, 0x3d}, /* vstop */ ++ {0x32, 0xff}, /* href */ ++ {0xc0, 0xaa}, ++}; ++ ++static const u8 sensor_start_1_svga[][2] = { ++ {0x12, 0x02}, /* com7 - YUYV - VGA 15 full resolution */ ++ {0x36, 0xf8}, /* aref3 */ ++ {0x69, 0x02}, /* hv */ ++ {0x8c, 0x0d}, /* com22 */ ++ {0x3e, 0x0c}, /* com14 */ ++ {0x41, 0x40}, /* com16 */ ++ {0x72, 0x00}, ++ {0x73, 0x01}, ++ {0x74, 0x3a}, ++ {0x75, 0x35}, ++ {0x76, 0x01}, ++ {0xc7, 0x80}, /* com24 */ ++ {0x03, 0x1b}, /* vref */ ++ {0x17, 0x1d}, /* hstart */ ++ {0x18, 0xbd}, /* hstop */ ++ {0x19, 0x01}, /* vstrt */ ++ {0x1a, 0x81}, /* vstop */ ++ {0x32, 0xff}, /* href */ ++ {0xc0, 0xe2}, ++}; ++ ++static const u8 sensor_start_1_xga[][2] = { ++ {0x12, 0x02}, /* com7 */ ++ {0x36, 0xf8}, /* aref3 */ ++ {0x69, 0x02}, /* hv */ ++ {0x8c, 0x89}, /* com22 */ ++ {0x14, 0x28}, /* com9 */ ++ {0x3e, 0x0c}, /* com14 */ ++ {0x41, 0x40}, /* com16 */ ++ {0x72, 0x00}, ++ {0x73, 0x01}, ++ {0x74, 0x3a}, ++ {0x75, 0x35}, ++ {0x76, 0x01}, ++ {0xc7, 0x80}, /* com24 */ ++ {0x03, 0x1b}, /* vref */ ++ {0x17, 0x1d}, /* hstart */ ++ {0x18, 0xbd}, /* hstop */ ++ {0x19, 0x01}, /* vstrt */ ++ {0x1a, 0x81}, /* vstop */ ++ {0x32, 0xff}, /* href */ ++ {0xc0, 0xe2}, ++}; ++ ++static const u8 sensor_start_1_sxga[][2] = { ++ {0x12, 0x02}, /* com7 */ ++ {0x36, 0xf8}, /* aref3 */ ++ {0x69, 0x02}, /* hv */ ++ {0x8c, 0x89}, /* com22 */ ++ {0x14, 0x28}, /* com9 */ ++ {0x3e, 0x0c}, /* com14 */ ++ {0x41, 0x40}, /* com16 */ ++ {0x72, 0x00}, ++ {0x73, 0x01}, ++ {0x74, 0x3a}, ++ {0x75, 0x35}, ++ {0x76, 0x01}, ++ {0xc7, 0x80}, /* com24 */ ++ {0x03, 0x1b}, /* vref */ ++ {0x17, 0x1d}, /* hstart */ ++ {0x18, 0x02}, /* hstop */ ++ {0x19, 0x01}, /* vstrt */ ++ {0x1a, 0x81}, /* vstop */ ++ {0x32, 0xff}, /* href */ ++ {0xc0, 0xe2}, ++}; ++ ++static const u8 bridge_start_qvga[][2] = { ++ {0x94, 0xaa}, ++ {0xf1, 0x60}, ++ {0xe5, 0x04}, ++ {0xc0, 0x50}, ++ {0xc1, 0x3c}, ++ {0x8c, 0x00}, ++ {0x8d, 0x1c}, ++ {0x34, 0x05}, ++ ++ {0xc2, 0x4c}, ++ {0xc3, 0xf9}, ++ {0xda, 0x00}, ++ {0x50, 0x00}, ++ {0x51, 0xa0}, ++ {0x52, 0x78}, ++ {0x53, 0x00}, ++ {0x54, 0x00}, ++ {0x55, 0x00}, ++ {0x57, 0x00}, ++ {0x5c, 0x00}, ++ {0x5a, 0x50}, ++ {0x5b, 0x3c}, ++ {0x35, 0x02}, ++ {0xd9, 0x10}, ++ {0x94, 0x11}, ++}; ++ ++static const u8 bridge_start_vga[][2] = { ++ {0x94, 0xaa}, ++ {0xf1, 0x60}, ++ {0xe5, 0x04}, ++ {0xc0, 0x50}, ++ {0xc1, 0x3c}, ++ {0x8c, 0x00}, ++ {0x8d, 0x1c}, ++ {0x34, 0x05}, ++ {0xc2, 0x0c}, ++ {0xc3, 0xf9}, ++ {0xda, 0x01}, ++ {0x50, 0x00}, ++ {0x51, 0xa0}, ++ {0x52, 0x3c}, ++ {0x53, 0x00}, ++ {0x54, 0x00}, ++ {0x55, 0x00}, ++ {0x57, 0x00}, ++ {0x5c, 0x00}, ++ {0x5a, 0xa0}, ++ {0x5b, 0x78}, ++ {0x35, 0x02}, ++ {0xd9, 0x10}, ++ {0x94, 0x11}, ++}; ++ ++static const u8 bridge_start_svga[][2] = { ++ {0x94, 0xaa}, ++ {0xf1, 0x60}, ++ {0xe5, 0x04}, ++ {0xc0, 0xa0}, ++ {0xc1, 0x80}, ++ {0x8c, 0x00}, ++ {0x8d, 0x1c}, ++ {0x34, 0x05}, ++ {0xc2, 0x4c}, ++ {0xc3, 0xf9}, ++ {0x50, 0x00}, ++ {0x51, 0x40}, ++ {0x52, 0x00}, ++ {0x53, 0x00}, ++ {0x54, 0x00}, ++ {0x55, 0x88}, ++ {0x57, 0x00}, ++ {0x5c, 0x00}, ++ {0x5a, 0xc8}, ++ {0x5b, 0x96}, ++ {0x35, 0x02}, ++ {0xd9, 0x10}, ++ {0xda, 0x00}, ++ {0x94, 0x11}, ++}; ++ ++static const u8 bridge_start_xga[][2] = { ++ {0x94, 0xaa}, ++ {0xf1, 0x60}, ++ {0xe5, 0x04}, ++ {0xc0, 0xa0}, ++ {0xc1, 0x80}, ++ {0x8c, 0x00}, ++ {0x8d, 0x1c}, ++ {0x34, 0x05}, ++ {0xc2, 0x4c}, ++ {0xc3, 0xf9}, ++ {0x50, 0x00}, ++ {0x51, 0x40}, ++ {0x52, 0x00}, ++ {0x53, 0x00}, ++ {0x54, 0x00}, ++ {0x55, 0x88}, ++ {0x57, 0x00}, ++ {0x5c, 0x01}, ++ {0x5a, 0x00}, ++ {0x5b, 0xc0}, ++ {0x35, 0x02}, ++ {0xd9, 0x10}, ++ {0xda, 0x01}, ++ {0x94, 0x11}, ++}; ++ ++static const u8 bridge_start_sxga[][2] = { ++ {0x94, 0xaa}, ++ {0xf1, 0x60}, ++ {0xe5, 0x04}, ++ {0xc0, 0xa0}, ++ {0xc1, 0x80}, ++ {0x8c, 0x00}, ++ {0x8d, 0x1c}, ++ {0x34, 0x05}, ++ {0xc2, 0x0c}, ++ {0xc3, 0xf9}, ++ {0xda, 0x00}, ++ {0x35, 0x02}, ++ {0xd9, 0x10}, ++ {0x94, 0x11}, ++}; ++ ++static const u8 sensor_start_2_qvga[][2] = { ++ {0x3b, 0xe4}, /* com11 - night mode 1/4 frame rate */ ++ {0x1e, 0x04}, /* mvfp */ ++ {0x13, 0xe0}, /* com8 */ ++ {0x00, 0x00}, ++ {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ ++ {0x11, 0x01}, /* clkrc */ ++ {0x6b, 0x5a}, /* dblv */ ++ {0x6a, 0x02}, /* 50 Hz banding filter */ ++ {0xc5, 0x03}, /* 60 Hz banding filter */ ++ {0xa2, 0x96}, /* bd50 */ ++ {0xa3, 0x7d}, /* bd60 */ ++ ++ {0xff, 0x13}, /* read 13, write ff 00 */ ++ {0x13, 0xe7}, ++ {0x3a, 0x80}, /* tslb - yuyv */ ++}; ++ ++static const u8 sensor_start_2_vga[][2] = { ++ {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */ ++ {0x1e, 0x04}, /* mvfp */ ++ {0x13, 0xe0}, /* com8 */ ++ {0x00, 0x00}, ++ {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ ++ {0x11, 0x03}, /* clkrc */ ++ {0x6b, 0x5a}, /* dblv */ ++ {0x6a, 0x05}, /* 50 Hz banding filter */ ++ {0xc5, 0x07}, /* 60 Hz banding filter */ ++ {0xa2, 0x4b}, /* bd50 */ ++ {0xa3, 0x3e}, /* bd60 */ ++ ++ {0x2d, 0x00}, /* advfl */ ++}; ++ ++static const u8 sensor_start_2_svga[][2] = { /* same for xga */ ++ {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */ ++ {0x1e, 0x04}, /* mvfp */ ++ {0x13, 0xe0}, /* com8 */ ++ {0x00, 0x00}, ++ {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ ++ {0x11, 0x01}, /* clkrc */ ++ {0x6b, 0x5a}, /* dblv */ ++ {0x6a, 0x0c}, /* 50 Hz banding filter */ ++ {0xc5, 0x0f}, /* 60 Hz banding filter */ ++ {0xa2, 0x4e}, /* bd50 */ ++ {0xa3, 0x41}, /* bd60 */ ++}; ++ ++static const u8 sensor_start_2_sxga[][2] = { ++ {0x13, 0xe0}, /* com8 */ ++ {0x00, 0x00}, ++ {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ ++ {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */ ++ {0x1e, 0x04}, /* mvfp */ ++ {0x11, 0x01}, /* clkrc */ ++ {0x6b, 0x5a}, /* dblv */ ++ {0x6a, 0x0c}, /* 50 Hz banding filter */ ++ {0xc5, 0x0f}, /* 60 Hz banding filter */ ++ {0xa2, 0x4e}, /* bd50 */ ++ {0xa3, 0x41}, /* bd60 */ ++}; ++ ++static void reg_w_i(struct gspca_dev *gspca_dev, u16 reg, u8 val) ++{ ++ struct usb_device *udev = gspca_dev->dev; ++ int ret; ++ ++ if (gspca_dev->usb_err < 0) ++ return; ++ gspca_dev->usb_buf[0] = val; ++ ret = usb_control_msg(udev, ++ usb_sndctrlpipe(udev, 0), ++ 0x01, ++ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ++ 0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT); ++ if (ret < 0) { ++ PDEBUG(D_ERR, "reg_w failed %d", ret); ++ gspca_dev->usb_err = ret; ++ } ++} ++ ++static void reg_w(struct gspca_dev *gspca_dev, u16 reg, u8 val) ++{ ++ PDEBUG(D_USBO, "reg_w [%04x] = %02x", reg, val); ++ reg_w_i(gspca_dev, reg, val); ++} ++ ++static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg) ++{ ++ struct usb_device *udev = gspca_dev->dev; ++ int ret; ++ ++ if (gspca_dev->usb_err < 0) ++ return 0; ++ ret = usb_control_msg(udev, ++ usb_rcvctrlpipe(udev, 0), ++ 0x01, ++ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ++ 0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT); ++ PDEBUG(D_USBI, "reg_r [%04x] -> %02x", reg, gspca_dev->usb_buf[0]); ++ if (ret < 0) { ++ PDEBUG(D_ERR, "reg_r err %d", ret); ++ gspca_dev->usb_err = ret; ++ } ++ return gspca_dev->usb_buf[0]; ++} ++ ++static int sccb_check_status(struct gspca_dev *gspca_dev) ++{ ++ u8 data; ++ int i; ++ ++ for (i = 0; i < 5; i++) { ++ data = reg_r(gspca_dev, OV534_REG_STATUS); ++ ++ switch (data) { ++ case 0x00: ++ return 1; ++ case 0x04: ++ return 0; ++ case 0x03: ++ break; ++ default: ++ PDEBUG(D_USBI|D_USBO, ++ "sccb status 0x%02x, attempt %d/5", ++ data, i + 1); ++ } ++ } ++ return 0; ++} ++ ++static void sccb_write(struct gspca_dev *gspca_dev, u8 reg, u8 val) ++{ ++ PDEBUG(D_USBO, "sccb_write [%02x] = %02x", reg, val); ++ reg_w_i(gspca_dev, OV534_REG_SUBADDR, reg); ++ reg_w_i(gspca_dev, OV534_REG_WRITE, val); ++ reg_w_i(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3); ++ ++ if (!sccb_check_status(gspca_dev)) ++ PDEBUG(D_ERR, "sccb_write failed"); ++} ++ ++static u8 sccb_read(struct gspca_dev *gspca_dev, u16 reg) ++{ ++ reg_w(gspca_dev, OV534_REG_SUBADDR, reg); ++ reg_w(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_2); ++ if (!sccb_check_status(gspca_dev)) ++ PDEBUG(D_ERR, "sccb_read failed 1"); ++ ++ reg_w(gspca_dev, OV534_REG_OPERATION, OV534_OP_READ_2); ++ if (!sccb_check_status(gspca_dev)) ++ PDEBUG(D_ERR, "sccb_read failed 2"); ++ ++ return reg_r(gspca_dev, OV534_REG_READ); ++} ++ ++/* output a bridge sequence (reg - val) */ ++static void reg_w_array(struct gspca_dev *gspca_dev, ++ const u8 (*data)[2], int len) ++{ ++ while (--len >= 0) { ++ reg_w(gspca_dev, (*data)[0], (*data)[1]); ++ data++; ++ } ++} ++ ++/* output a sensor sequence (reg - val) */ ++static void sccb_w_array(struct gspca_dev *gspca_dev, ++ const u8 (*data)[2], int len) ++{ ++ while (--len >= 0) { ++ if ((*data)[0] != 0xff) { ++ sccb_write(gspca_dev, (*data)[0], (*data)[1]); ++ } else { ++ sccb_read(gspca_dev, (*data)[1]); ++ sccb_write(gspca_dev, 0xff, 0x00); ++ } ++ data++; ++ } ++} ++ ++/* Two bits control LED: 0x21 bit 7 and 0x23 bit 7. ++ * (direction and output)? */ ++static void set_led(struct gspca_dev *gspca_dev, int status) ++{ ++ u8 data; ++ ++ PDEBUG(D_CONF, "led status: %d", status); ++ ++ data = reg_r(gspca_dev, 0x21); ++ data |= 0x80; ++ reg_w(gspca_dev, 0x21, data); ++ ++ data = reg_r(gspca_dev, 0x23); ++ if (status) ++ data |= 0x80; ++ else ++ data &= ~0x80; ++ ++ reg_w(gspca_dev, 0x23, data); ++ ++ if (!status) { ++ data = reg_r(gspca_dev, 0x21); ++ data &= ~0x80; ++ reg_w(gspca_dev, 0x21, data); ++ } ++} ++ ++static void setbrightness(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ u8 val; ++ ++ val = sd->brightness; ++ if (val < 8) ++ val = 15 - val; /* f .. 8 */ ++ else ++ val = val - 8; /* 0 .. 7 */ ++ sccb_write(gspca_dev, 0x55, /* brtn - brightness adjustment */ ++ 0x0f | (val << 4)); ++} ++ ++static void setcontrast(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sccb_write(gspca_dev, 0x56, /* cnst1 - contrast 1 ctrl coeff */ ++ sd->contrast << 4); ++} ++ ++static void setautogain(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ u8 val; ++ ++/*fixme: should adjust agc/awb/aec by different controls */ ++ val = sd->autogain; ++ val = sccb_read(gspca_dev, 0x13); /* com8 */ ++ sccb_write(gspca_dev, 0xff, 0x00); ++ if (sd->autogain) ++ val |= 0x05; /* agc & aec */ ++ else ++ val &= 0xfa; ++ sccb_write(gspca_dev, 0x13, val); ++} ++ ++static void setexposure(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ u8 val; ++ static const u8 expo[4] = {0x00, 0x25, 0x38, 0x5e}; ++ ++ sccb_write(gspca_dev, 0x10, /* aec[9:2] */ ++ expo[sd->exposure]); ++ ++ val = sccb_read(gspca_dev, 0x13); /* com8 */ ++ sccb_write(gspca_dev, 0xff, 0x00); ++ sccb_write(gspca_dev, 0x13, val); ++ ++ val = sccb_read(gspca_dev, 0xa1); /* aech */ ++ sccb_write(gspca_dev, 0xff, 0x00); ++ sccb_write(gspca_dev, 0xa1, val & 0xe0); /* aec[15:10] = 0 */ ++} ++ ++static void setsharpness(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ s8 val; ++ ++ val = sd->sharpness; ++ if (val < 0) { /* auto */ ++ val = sccb_read(gspca_dev, 0x42); /* com17 */ ++ sccb_write(gspca_dev, 0xff, 0x00); ++ sccb_write(gspca_dev, 0x42, val | 0x40); ++ /* Edge enhancement strength auto adjust */ ++ return; ++ } ++ if (val != 0) ++ val = 1 << (val - 1); ++ sccb_write(gspca_dev, 0x3f, /* edge - edge enhance. factor */ ++ val); ++ val = sccb_read(gspca_dev, 0x42); /* com17 */ ++ sccb_write(gspca_dev, 0xff, 0x00); ++ sccb_write(gspca_dev, 0x42, val & 0xbf); ++} ++ ++static void setsatur(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ u8 val1, val2, val3; ++ static const u8 matrix[5][2] = { ++ {0x14, 0x38}, ++ {0x1e, 0x54}, ++ {0x28, 0x70}, ++ {0x32, 0x8c}, ++ {0x48, 0x90} ++ }; ++ ++ val1 = matrix[sd->satur][0]; ++ val2 = matrix[sd->satur][1]; ++ val3 = val1 + val2; ++ sccb_write(gspca_dev, 0x4f, val3); /* matrix coeff */ ++ sccb_write(gspca_dev, 0x50, val3); ++ sccb_write(gspca_dev, 0x51, 0x00); ++ sccb_write(gspca_dev, 0x52, val1); ++ sccb_write(gspca_dev, 0x53, val2); ++ sccb_write(gspca_dev, 0x54, val3); ++ sccb_write(gspca_dev, 0x58, 0x1a); /* mtxs - coeff signs */ ++ ++ val1 = sccb_read(gspca_dev, 0x41); /* com16 */ ++ sccb_write(gspca_dev, 0xff, 0x00); ++ sccb_write(gspca_dev, 0x41, val1); ++} ++ ++static void setfreq(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ u8 val; ++ ++ val = sccb_read(gspca_dev, 0x13); /* com8 */ ++ sccb_write(gspca_dev, 0xff, 0x00); ++ if (sd->freq == 0) { ++ sccb_write(gspca_dev, 0x13, val & 0xdf); ++ return; ++ } ++ sccb_write(gspca_dev, 0x13, val | 0x20); ++ ++ val = sccb_read(gspca_dev, 0x42); /* com17 */ ++ sccb_write(gspca_dev, 0xff, 0x00); ++ if (sd->freq == 1) ++ val |= 0x01; ++ else ++ val &= 0xfe; ++ sccb_write(gspca_dev, 0x42, val); ++} ++ ++/* this function is called at probe time */ ++static int sd_config(struct gspca_dev *gspca_dev, ++ const struct usb_device_id *id) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ struct cam *cam; ++ ++ cam = &gspca_dev->cam; ++ ++ cam->cam_mode = ov965x_mode; ++ cam->nmodes = ARRAY_SIZE(ov965x_mode); ++ ++ sd->brightness = BRIGHTNESS_DEF; ++ sd->contrast = CONTRAST_DEF; ++#if AUTOGAIN_DEF != 0 ++ sd->autogain = AUTOGAIN_DEF; ++ gspca_dev->ctrl_inac |= (1 << EXPO_IDX); ++#endif ++#if EXPO_DEF != 0 ++ sd->exposure = EXPO_DEF; ++#endif ++#if SHARPNESS_DEF != 0 ++ sd->sharpness = SHARPNESS_DEF; ++#endif ++ sd->satur = SATUR_DEF; ++ sd->freq = FREQ_DEF; ++ ++ return 0; ++} ++ ++/* this function is called at probe and resume time */ ++static int sd_init(struct gspca_dev *gspca_dev) ++{ ++ u16 sensor_id; ++ ++ /* reset bridge */ ++ reg_w(gspca_dev, 0xe7, 0x3a); ++ reg_w(gspca_dev, 0xe0, 0x08); ++ msleep(100); ++ ++ /* initialize the sensor address */ ++ reg_w(gspca_dev, OV534_REG_ADDRESS, 0x60); ++ ++ /* reset sensor */ ++ sccb_write(gspca_dev, 0x12, 0x80); ++ msleep(10); ++ ++ /* probe the sensor */ ++ sccb_read(gspca_dev, 0x0a); ++ sensor_id = sccb_read(gspca_dev, 0x0a) << 8; ++ sccb_read(gspca_dev, 0x0b); ++ sensor_id |= sccb_read(gspca_dev, 0x0b); ++ PDEBUG(D_PROBE, "Sensor ID: %04x", sensor_id); ++ ++ /* initialize */ ++ reg_w_array(gspca_dev, bridge_init, ++ ARRAY_SIZE(bridge_init)); ++ sccb_w_array(gspca_dev, sensor_init, ++ ARRAY_SIZE(sensor_init)); ++ reg_w_array(gspca_dev, bridge_init_2, ++ ARRAY_SIZE(bridge_init_2)); ++ sccb_w_array(gspca_dev, sensor_init_2, ++ ARRAY_SIZE(sensor_init_2)); ++ reg_w(gspca_dev, 0xe0, 0x00); ++ reg_w(gspca_dev, 0xe0, 0x01); ++ set_led(gspca_dev, 0); ++ reg_w(gspca_dev, 0xe0, 0x00); ++ ++ return gspca_dev->usb_err; ++} ++ ++static int sd_start(struct gspca_dev *gspca_dev) ++{ ++ switch (gspca_dev->curr_mode) { ++ case QVGA_MODE: /* 320x240 */ ++ sccb_w_array(gspca_dev, sensor_start_1_vga, ++ ARRAY_SIZE(sensor_start_1_vga)); ++ reg_w_array(gspca_dev, bridge_start_qvga, ++ ARRAY_SIZE(bridge_start_qvga)); ++ sccb_w_array(gspca_dev, sensor_start_2_qvga, ++ ARRAY_SIZE(sensor_start_2_qvga)); ++ break; ++ case VGA_MODE: /* 640x480 */ ++ sccb_w_array(gspca_dev, sensor_start_1_vga, ++ ARRAY_SIZE(sensor_start_1_vga)); ++ reg_w_array(gspca_dev, bridge_start_vga, ++ ARRAY_SIZE(bridge_start_vga)); ++ sccb_w_array(gspca_dev, sensor_start_2_vga, ++ ARRAY_SIZE(sensor_start_2_vga)); ++ break; ++ case SVGA_MODE: /* 800x600 */ ++ sccb_w_array(gspca_dev, sensor_start_1_svga, ++ ARRAY_SIZE(sensor_start_1_svga)); ++ reg_w_array(gspca_dev, bridge_start_svga, ++ ARRAY_SIZE(bridge_start_svga)); ++ sccb_w_array(gspca_dev, sensor_start_2_svga, ++ ARRAY_SIZE(sensor_start_2_svga)); ++ break; ++ case XGA_MODE: /* 1024x768 */ ++ sccb_w_array(gspca_dev, sensor_start_1_xga, ++ ARRAY_SIZE(sensor_start_1_xga)); ++ reg_w_array(gspca_dev, bridge_start_xga, ++ ARRAY_SIZE(bridge_start_xga)); ++ sccb_w_array(gspca_dev, sensor_start_2_svga, ++ ARRAY_SIZE(sensor_start_2_svga)); ++ break; ++ default: ++/* case SXGA_MODE: * 1280x1024 */ ++ sccb_w_array(gspca_dev, sensor_start_1_sxga, ++ ARRAY_SIZE(sensor_start_1_sxga)); ++ reg_w_array(gspca_dev, bridge_start_sxga, ++ ARRAY_SIZE(bridge_start_sxga)); ++ sccb_w_array(gspca_dev, sensor_start_2_sxga, ++ ARRAY_SIZE(sensor_start_2_sxga)); ++ break; ++ } ++ setfreq(gspca_dev); ++ setautogain(gspca_dev); ++ setbrightness(gspca_dev); ++ setcontrast(gspca_dev); ++ setexposure(gspca_dev); ++ setsharpness(gspca_dev); ++ setsatur(gspca_dev); ++ ++ reg_w(gspca_dev, 0xe0, 0x00); ++ reg_w(gspca_dev, 0xe0, 0x00); ++ set_led(gspca_dev, 1); ++ return gspca_dev->usb_err; ++} ++ ++static void sd_stopN(struct gspca_dev *gspca_dev) ++{ ++ reg_w(gspca_dev, 0xe0, 0x01); ++ set_led(gspca_dev, 0); ++ reg_w(gspca_dev, 0xe0, 0x00); ++} ++ ++/* Values for bmHeaderInfo (Video and Still Image Payload Headers, 2.4.3.3) */ ++#define UVC_STREAM_EOH (1 << 7) ++#define UVC_STREAM_ERR (1 << 6) ++#define UVC_STREAM_STI (1 << 5) ++#define UVC_STREAM_RES (1 << 4) ++#define UVC_STREAM_SCR (1 << 3) ++#define UVC_STREAM_PTS (1 << 2) ++#define UVC_STREAM_EOF (1 << 1) ++#define UVC_STREAM_FID (1 << 0) ++ ++static void sd_pkt_scan(struct gspca_dev *gspca_dev, ++ u8 *data, int len) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ __u32 this_pts; ++ u8 this_fid; ++ int remaining_len = len; ++ ++ do { ++ len = min(remaining_len, 2040); ++ ++ /* Payloads are prefixed with a UVC-style header. We ++ consider a frame to start when the FID toggles, or the PTS ++ changes. A frame ends when EOF is set, and we've received ++ the correct number of bytes. */ ++ ++ /* Verify UVC header. Header length is always 12 */ ++ if (data[0] != 12 || len < 12) { ++ PDEBUG(D_PACK, "bad header"); ++ goto discard; ++ } ++ ++ /* Check errors */ ++ if (data[1] & UVC_STREAM_ERR) { ++ PDEBUG(D_PACK, "payload error"); ++ goto discard; ++ } ++ ++ /* Extract PTS and FID */ ++ if (!(data[1] & UVC_STREAM_PTS)) { ++ PDEBUG(D_PACK, "PTS not present"); ++ goto discard; ++ } ++ this_pts = (data[5] << 24) | (data[4] << 16) ++ | (data[3] << 8) | data[2]; ++ this_fid = data[1] & UVC_STREAM_FID; ++ ++ /* If PTS or FID has changed, start a new frame. */ ++ if (this_pts != sd->last_pts || this_fid != sd->last_fid) { ++ if (gspca_dev->last_packet_type == INTER_PACKET) ++ gspca_frame_add(gspca_dev, LAST_PACKET, ++ NULL, 0); ++ sd->last_pts = this_pts; ++ sd->last_fid = this_fid; ++ gspca_frame_add(gspca_dev, FIRST_PACKET, ++ data + 12, len - 12); ++ /* If this packet is marked as EOF, end the frame */ ++ } else if (data[1] & UVC_STREAM_EOF) { ++ sd->last_pts = 0; ++ gspca_frame_add(gspca_dev, LAST_PACKET, ++ data + 12, len - 12); ++ } else { ++ ++ /* Add the data from this payload */ ++ gspca_frame_add(gspca_dev, INTER_PACKET, ++ data + 12, len - 12); ++ } ++ ++ /* Done this payload */ ++ goto scan_next; ++ ++discard: ++ /* Discard data until a new frame starts. */ ++ gspca_dev->last_packet_type = DISCARD_PACKET; ++ ++scan_next: ++ remaining_len -= len; ++ data += len; ++ } while (remaining_len > 0); ++} ++ ++/* controls */ ++static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->brightness = val; ++ if (gspca_dev->streaming) ++ setbrightness(gspca_dev); ++ return gspca_dev->usb_err; ++} ++ ++static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->brightness; ++ return 0; ++} ++ ++static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->contrast = val; ++ if (gspca_dev->streaming) ++ setcontrast(gspca_dev); ++ return gspca_dev->usb_err; ++} ++ ++static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->contrast; ++ return 0; ++} ++ ++static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->autogain = val; ++ ++ if (gspca_dev->streaming) { ++ if (val) ++ gspca_dev->ctrl_inac |= (1 << EXPO_IDX); ++ else ++ gspca_dev->ctrl_inac &= ~(1 << EXPO_IDX); ++ setautogain(gspca_dev); ++ } ++ return gspca_dev->usb_err; ++} ++ ++static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->autogain; ++ return 0; ++} ++ ++static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->exposure = val; ++ if (gspca_dev->streaming) ++ setexposure(gspca_dev); ++ return gspca_dev->usb_err; ++} ++ ++static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->exposure; ++ return 0; ++} ++ ++static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->sharpness = val; ++ if (gspca_dev->streaming) ++ setsharpness(gspca_dev); ++ return gspca_dev->usb_err; ++} ++ ++static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->sharpness; ++ return 0; ++} ++ ++static int sd_setsatur(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->satur = val; ++ if (gspca_dev->streaming) ++ setsatur(gspca_dev); ++ return gspca_dev->usb_err; ++} ++ ++static int sd_getsatur(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->satur; ++ return 0; ++} ++static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->freq = val; ++ if (gspca_dev->streaming) ++ setfreq(gspca_dev); ++ return gspca_dev->usb_err; ++} ++ ++static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->freq; ++ return 0; ++} ++ ++static int sd_querymenu(struct gspca_dev *gspca_dev, ++ struct v4l2_querymenu *menu) ++{ ++ switch (menu->id) { ++ case V4L2_CID_POWER_LINE_FREQUENCY: ++ switch (menu->index) { ++ case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */ ++ strcpy((char *) menu->name, "NoFliker"); ++ return 0; ++ case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */ ++ strcpy((char *) menu->name, "50 Hz"); ++ return 0; ++ case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */ ++ strcpy((char *) menu->name, "60 Hz"); ++ return 0; ++ } ++ break; ++ } ++ return -EINVAL; ++} ++ ++/* sub-driver description */ ++static const struct sd_desc sd_desc = { ++ .name = MODULE_NAME, ++ .ctrls = sd_ctrls, ++ .nctrls = ARRAY_SIZE(sd_ctrls), ++ .config = sd_config, ++ .init = sd_init, ++ .start = sd_start, ++ .stopN = sd_stopN, ++ .pkt_scan = sd_pkt_scan, ++ .querymenu = sd_querymenu, ++}; ++ ++/* -- module initialisation -- */ ++static const __devinitdata struct usb_device_id device_table[] = { ++ {USB_DEVICE(0x06f8, 0x3003)}, ++ {} ++}; ++ ++MODULE_DEVICE_TABLE(usb, device_table); ++ ++/* -- device connect -- */ ++static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) ++{ ++ return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), ++ THIS_MODULE); ++} ++ ++static struct usb_driver sd_driver = { ++ .name = MODULE_NAME, ++ .id_table = device_table, ++ .probe = sd_probe, ++ .disconnect = gspca_disconnect, ++#ifdef CONFIG_PM ++ .suspend = gspca_suspend, ++ .resume = gspca_resume, ++#endif ++}; ++ ++/* -- module insert / remove -- */ ++static int __init sd_mod_init(void) ++{ ++ int ret; ++ ++ ret = usb_register(&sd_driver); ++ if (ret < 0) ++ return ret; ++ PDEBUG(D_PROBE, "registered"); ++ return 0; ++} ++ ++static void __exit sd_mod_exit(void) ++{ ++ usb_deregister(&sd_driver); ++ PDEBUG(D_PROBE, "deregistered"); ++} ++ ++module_init(sd_mod_init); ++module_exit(sd_mod_exit); +diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c +index 4706a82..0c87c34 100644 +--- a/drivers/media/video/gspca/pac207.c ++++ b/drivers/media/video/gspca/pac207.c +@@ -25,6 +25,7 @@ + + #define MODULE_NAME "pac207" + ++#include + #include "gspca.h" + + MODULE_AUTHOR("Hans de Goede "); +@@ -77,7 +78,7 @@ static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + #define SD_BRIGHTNESS 0 + { + { +@@ -495,6 +496,25 @@ static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val) + return 0; + } + ++#ifdef CONFIG_INPUT ++static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, ++ u8 *data, /* interrupt packet data */ ++ int len) /* interrput packet length */ ++{ ++ int ret = -EINVAL; ++ ++ if (len == 2 && data[0] == 0x5a && data[1] == 0x5a) { ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); ++ input_sync(gspca_dev->input_dev); ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); ++ input_sync(gspca_dev->input_dev); ++ ret = 0; ++ } ++ ++ return ret; ++} ++#endif ++ + /* sub-driver description */ + static const struct sd_desc sd_desc = { + .name = MODULE_NAME, +@@ -506,6 +526,9 @@ static const struct sd_desc sd_desc = { + .stopN = sd_stopN, + .dq_callback = pac207_do_auto_gain, + .pkt_scan = sd_pkt_scan, ++#ifdef CONFIG_INPUT ++ .int_pkt_scan = sd_int_pkt_scan, ++#endif + }; + + /* -- module initialisation -- */ +diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c +index de0b66c..7722f7a 100644 +--- a/drivers/media/video/gspca/pac7302.c ++++ b/drivers/media/video/gspca/pac7302.c +@@ -4,7 +4,9 @@ + * + * V4L2 by Jean-Francois Moine + * +- * Separated from Pixart PAC7311 library by M�rton N�meth ++ * Separated from Pixart PAC7311 library by Márton Németh ++ * Camera button input handling by Márton Németh ++ * Copyright (C) 2009-2010 Márton Németh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -22,33 +24,26 @@ + */ + + /* Some documentation about various registers as determined by trial and error. +- When the register addresses differ between the 7202 and the 7311 the 2 +- different addresses are written as 7302addr/7311addr, when one of the 2 +- addresses is a - sign that register description is not valid for the +- matching IC. + + Register page 1: + + Address Description +- -/0x08 Unknown compressor related, must always be 8 except when not +- in 640x480 resolution and page 4 reg 2 <= 3 then set it to 9 ! +- -/0x1b Auto white balance related, bit 0 is AWB enable (inverted) +- bits 345 seem to toggle per color gains on/off (inverted) + 0x78 Global control, bit 6 controls the LED (inverted) +- -/0x80 JPEG compression ratio ? Best not touched + +- Register page 3/4: ++ Register page 3: + + Address Description +- 0x02 Clock divider 2-63, fps =~ 60 / val. Must be a multiple of 3 on ++ 0x02 Clock divider 3-63, fps = 90 / val. Must be a multiple of 3 on + the 7302, so one of 3, 6, 9, ..., except when between 6 and 12? +- -/0x0f Master gain 1-245, low value = high gain +- 0x10/- Master gain 0-31 +- -/0x10 Another gain 0-15, limited influence (1-2x gain I guess) ++ 0x03 Variable framerate ctrl reg2==3: 0 -> ~30 fps, 255 -> ~22fps ++ 0x04 Another var framerate ctrl reg2==3, reg3==0: 0 -> ~30 fps, ++ 63 -> ~27 fps, the 2 msb's must always be 1 !! ++ 0x05 Another var framerate ctrl reg2==3, reg3==0, reg4==0xc0: ++ 1 -> ~30 fps, 2 -> ~20 fps ++ 0x0e Exposure bits 0-7, 0-448, 0 = use full frame time ++ 0x0f Exposure bit 8, 0-448, 448 = no exposure at all ++ 0x10 Master gain 0-31 + 0x21 Bitfield: 0-1 unused, 2-3 vflip/hflip, 4-5 unknown, 6-7 unused +- -/0x27 Seems to toggle various gains on / off, Setting bit 7 seems to +- completely disable the analog amplification block. Set to 0x68 +- for max gain, 0x14 for minimal gain. + + The registers are accessed in the following functions: + +@@ -68,6 +63,7 @@ + + #define MODULE_NAME "pac7302" + ++#include + #include + #include "gspca.h" + +@@ -86,8 +82,8 @@ struct sd { + unsigned char red_balance; + unsigned char blue_balance; + unsigned char gain; +- unsigned char exposure; + unsigned char autogain; ++ unsigned short exposure; + __u8 hflip; + __u8 vflip; + u8 flags; +@@ -124,8 +120,7 @@ static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { +-/* This control is pac7302 only */ ++static const struct ctrl sd_ctrls[] = { + { + { + .id = V4L2_CID_BRIGHTNESS, +@@ -141,7 +136,6 @@ static struct ctrl sd_ctrls[] = { + .set = sd_setbrightness, + .get = sd_getbrightness, + }, +-/* This control is for both the 7302 and the 7311 */ + { + { + .id = V4L2_CID_CONTRAST, +@@ -157,7 +151,6 @@ static struct ctrl sd_ctrls[] = { + .set = sd_setcontrast, + .get = sd_getcontrast, + }, +-/* This control is pac7302 only */ + { + { + .id = V4L2_CID_SATURATION, +@@ -215,7 +208,6 @@ static struct ctrl sd_ctrls[] = { + .set = sd_setbluebalance, + .get = sd_getbluebalance, + }, +-/* All controls below are for both the 7302 and the 7311 */ + { + { + .id = V4L2_CID_GAIN, +@@ -238,11 +230,10 @@ static struct ctrl sd_ctrls[] = { + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Exposure", + .minimum = 0, +-#define EXPOSURE_MAX 255 +- .maximum = EXPOSURE_MAX, ++ .maximum = 1023, + .step = 1, +-#define EXPOSURE_DEF 16 /* 32 ms / 30 fps */ +-#define EXPOSURE_KNEE 50 /* 100 ms / 10 fps */ ++#define EXPOSURE_DEF 66 /* 33 ms / 30 fps */ ++#define EXPOSURE_KNEE 133 /* 66 ms / 15 fps */ + .default_value = EXPOSURE_DEF, + }, + .set = sd_setexposure, +@@ -301,7 +292,6 @@ static const struct v4l2_pix_format vga_mode[] = { + }; + + #define LOAD_PAGE3 255 +-#define LOAD_PAGE4 254 + #define END_OF_SEQUENCE 0 + + /* pac 7302 */ +@@ -379,7 +369,7 @@ static const __u8 start_7302[] = { + #define SKIP 0xaa + /* page 3 - the value SKIP says skip the index - see reg_w_page() */ + static const __u8 page3_7302[] = { +- 0x90, 0x40, 0x03, 0x50, 0xc2, 0x01, 0x14, 0x16, ++ 0x90, 0x40, 0x03, 0x00, 0xc0, 0x01, 0x14, 0x16, + 0x14, 0x12, 0x00, 0x00, 0x00, 0x02, 0x33, 0x00, + 0x0f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x47, 0x01, 0xb3, 0x01, 0x00, +@@ -388,7 +378,7 @@ static const __u8 page3_7302[] = { + 0xa4, 0xb8, 0xe0, 0x2a, 0xf6, 0x00, 0x00, 0x00, + 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xfc, 0x00, 0xf2, 0x1f, 0x04, 0x00, 0x00, +- 0x00, 0x00, 0x00, 0xc0, 0xc0, 0x10, 0x00, 0x00, ++ SKIP, 0x00, 0x00, 0xc0, 0xc0, 0x10, 0x00, 0x00, + 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x40, 0xff, 0x03, 0x19, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +@@ -401,12 +391,14 @@ static const __u8 page3_7302[] = { + 0x00 + }; + +-static int reg_w_buf(struct gspca_dev *gspca_dev, ++static void reg_w_buf(struct gspca_dev *gspca_dev, + __u8 index, + const char *buffer, int len) + { + int ret; + ++ if (gspca_dev->usb_err < 0) ++ return; + memcpy(gspca_dev->usb_buf, buffer, len); + ret = usb_control_msg(gspca_dev->dev, + usb_sndctrlpipe(gspca_dev->dev, 0), +@@ -415,20 +407,44 @@ static int reg_w_buf(struct gspca_dev *gspca_dev, + 0, /* value */ + index, gspca_dev->usb_buf, len, + 500); +- if (ret < 0) ++ if (ret < 0) { + PDEBUG(D_ERR, "reg_w_buf(): " + "Failed to write registers to index 0x%x, error %i", + index, ret); +- return ret; ++ gspca_dev->usb_err = ret; ++ } + } + ++#if 0 /* not used */ ++static __u8 reg_r(struct gspca_dev *gspca_dev, ++ __u8 index) ++{ ++ int ret; ++ ++ ret = usb_control_msg(gspca_dev->dev, ++ usb_rcvctrlpipe(gspca_dev->dev, 0), ++ 0, /* request */ ++ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ++ 0, /* value */ ++ index, gspca_dev->usb_buf, 1, ++ 500); ++ if (ret < 0) ++ PDEBUG(D_ERR, "reg_r(): " ++ "Failed to read register from index 0x%x, error %i", ++ index, ret); ++ ++ return gspca_dev->usb_buf[0]; ++} ++#endif + +-static int reg_w(struct gspca_dev *gspca_dev, ++static void reg_w(struct gspca_dev *gspca_dev, + __u8 index, + __u8 value) + { + int ret; + ++ if (gspca_dev->usb_err < 0) ++ return; + gspca_dev->usb_buf[0] = value; + ret = usb_control_msg(gspca_dev->dev, + usb_sndctrlpipe(gspca_dev->dev, 0), +@@ -436,32 +452,32 @@ static int reg_w(struct gspca_dev *gspca_dev, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, index, gspca_dev->usb_buf, 1, + 500); +- if (ret < 0) ++ if (ret < 0) { + PDEBUG(D_ERR, "reg_w(): " + "Failed to write register to index 0x%x, value 0x%x, error %i", + index, value, ret); +- return ret; ++ gspca_dev->usb_err = ret; ++ } + } + +-static int reg_w_seq(struct gspca_dev *gspca_dev, ++static void reg_w_seq(struct gspca_dev *gspca_dev, + const __u8 *seq, int len) + { +- int ret = 0; + while (--len >= 0) { +- if (0 <= ret) +- ret = reg_w(gspca_dev, seq[0], seq[1]); ++ reg_w(gspca_dev, seq[0], seq[1]); + seq += 2; + } +- return ret; + } + + /* load the beginning of a page */ +-static int reg_w_page(struct gspca_dev *gspca_dev, ++static void reg_w_page(struct gspca_dev *gspca_dev, + const __u8 *page, int len) + { + int index; + int ret = 0; + ++ if (gspca_dev->usb_err < 0) ++ return; + for (index = 0; index < len; index++) { + if (page[index] == SKIP) /* skip this index */ + continue; +@@ -477,56 +493,47 @@ static int reg_w_page(struct gspca_dev *gspca_dev, + "Failed to write register to index 0x%x, " + "value 0x%x, error %i", + index, page[index], ret); ++ gspca_dev->usb_err = ret; + break; + } + } +- return ret; + } + + /* output a variable sequence */ +-static int reg_w_var(struct gspca_dev *gspca_dev, ++static void reg_w_var(struct gspca_dev *gspca_dev, + const __u8 *seq, +- const __u8 *page3, unsigned int page3_len, +- const __u8 *page4, unsigned int page4_len) ++ const __u8 *page3, unsigned int page3_len) + { + int index, len; +- int ret = 0; + + for (;;) { + index = *seq++; + len = *seq++; + switch (len) { + case END_OF_SEQUENCE: +- return ret; +- case LOAD_PAGE4: +- ret = reg_w_page(gspca_dev, page4, page4_len); +- break; ++ return; + case LOAD_PAGE3: +- ret = reg_w_page(gspca_dev, page3, page3_len); ++ reg_w_page(gspca_dev, page3, page3_len); + break; + default: + if (len > USB_BUF_SZ) { + PDEBUG(D_ERR|D_STREAM, + "Incorrect variable sequence"); +- return -EINVAL; ++ return; + } + while (len > 0) { + if (len < 8) { +- ret = reg_w_buf(gspca_dev, ++ reg_w_buf(gspca_dev, + index, seq, len); +- if (ret < 0) +- return ret; + seq += len; + break; + } +- ret = reg_w_buf(gspca_dev, index, seq, 8); ++ reg_w_buf(gspca_dev, index, seq, 8); + seq += 8; + index += 8; + len -= 8; + } + } +- if (ret < 0) +- return ret; + } + /* not reached */ + } +@@ -560,11 +567,10 @@ static int sd_config(struct gspca_dev *gspca_dev, + } + + /* This function is used by pac7302 only */ +-static int setbrightcont(struct gspca_dev *gspca_dev) ++static void setbrightcont(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + int i, v; +- int ret; + static const __u8 max[10] = + {0x29, 0x33, 0x42, 0x5a, 0x6e, 0x80, 0x9f, 0xbb, + 0xd4, 0xec}; +@@ -572,7 +578,7 @@ static int setbrightcont(struct gspca_dev *gspca_dev) + {0x35, 0x33, 0x33, 0x2f, 0x2a, 0x25, 0x1e, 0x17, + 0x11, 0x0b}; + +- ret = reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ ++ reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ + for (i = 0; i < 10; i++) { + v = max[i]; + v += (sd->brightness - BRIGHTNESS_MAX) +@@ -582,136 +588,121 @@ static int setbrightcont(struct gspca_dev *gspca_dev) + v = 0; + else if (v > 0xff) + v = 0xff; +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xa2 + i, v); ++ reg_w(gspca_dev, 0xa2 + i, v); + } +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xdc, 0x01); +- return ret; ++ reg_w(gspca_dev, 0xdc, 0x01); + } + + /* This function is used by pac7302 only */ +-static int setcolors(struct gspca_dev *gspca_dev) ++static void setcolors(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + int i, v; +- int ret; + static const int a[9] = + {217, -212, 0, -101, 170, -67, -38, -315, 355}; + static const int b[9] = + {19, 106, 0, 19, 106, 1, 19, 106, 1}; + +- ret = reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x11, 0x01); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ ++ reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ ++ reg_w(gspca_dev, 0x11, 0x01); ++ reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ + for (i = 0; i < 9; i++) { + v = a[i] * sd->colors / COLOR_MAX + b[i]; +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x0f + 2 * i, (v >> 8) & 0x07); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x0f + 2 * i + 1, v); ++ reg_w(gspca_dev, 0x0f + 2 * i, (v >> 8) & 0x07); ++ reg_w(gspca_dev, 0x0f + 2 * i + 1, v); + } +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xdc, 0x01); ++ reg_w(gspca_dev, 0xdc, 0x01); + PDEBUG(D_CONF|D_STREAM, "color: %i", sd->colors); +- return ret; + } + +-static int setwhitebalance(struct gspca_dev *gspca_dev) ++static void setwhitebalance(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- int ret; + +- ret = reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xc6, sd->white_balance); ++ reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ ++ reg_w(gspca_dev, 0xc6, sd->white_balance); + +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xdc, 0x01); ++ reg_w(gspca_dev, 0xdc, 0x01); + PDEBUG(D_CONF|D_STREAM, "white_balance: %i", sd->white_balance); +- return ret; + } + +-static int setredbalance(struct gspca_dev *gspca_dev) ++static void setredbalance(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- int ret; + +- ret = reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xc5, sd->red_balance); ++ reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ ++ reg_w(gspca_dev, 0xc5, sd->red_balance); + +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xdc, 0x01); ++ reg_w(gspca_dev, 0xdc, 0x01); + PDEBUG(D_CONF|D_STREAM, "red_balance: %i", sd->red_balance); +- return ret; + } + +-static int setbluebalance(struct gspca_dev *gspca_dev) ++static void setbluebalance(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- int ret; + +- ret = reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xc7, sd->blue_balance); ++ reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ ++ reg_w(gspca_dev, 0xc7, sd->blue_balance); + +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xdc, 0x01); ++ reg_w(gspca_dev, 0xdc, 0x01); + PDEBUG(D_CONF|D_STREAM, "blue_balance: %i", sd->blue_balance); +- return ret; + } + +-static int setgain(struct gspca_dev *gspca_dev) ++static void setgain(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- int ret; + +- ret = reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x10, sd->gain >> 3); ++ reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ ++ reg_w(gspca_dev, 0x10, sd->gain >> 3); + + /* load registers to sensor (Bit 0, auto clear) */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x11, 0x01); +- return ret; ++ reg_w(gspca_dev, 0x11, 0x01); + } + +-static int setexposure(struct gspca_dev *gspca_dev) ++static void setexposure(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- int ret; +- __u8 reg; +- +- /* register 2 of frame 3/4 contains the clock divider configuring the +- no fps according to the formula: 60 / reg. sd->exposure is the +- desired exposure time in ms. */ +- reg = 120 * sd->exposure / 1000; +- if (reg < 2) +- reg = 2; +- else if (reg > 63) +- reg = 63; +- +- /* On the pac7302 reg2 MUST be a multiple of 3, so round it to +- the nearest multiple of 3, except when between 6 and 12? */ +- if (reg < 6 || reg > 12) +- reg = ((reg + 1) / 3) * 3; +- ret = reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x02, reg); ++ __u8 clockdiv; ++ __u16 exposure; ++ ++ /* register 2 of frame 3 contains the clock divider configuring the ++ no fps according to the formula: 90 / reg. sd->exposure is the ++ desired exposure time in 0.5 ms. */ ++ clockdiv = (90 * sd->exposure + 1999) / 2000; ++ ++ /* Note clockdiv = 3 also works, but when running at 30 fps, depending ++ on the scene being recorded, the camera switches to another ++ quantization table for certain JPEG blocks, and we don't know how ++ to decompress these blocks. So we cap the framerate at 15 fps */ ++ if (clockdiv < 6) ++ clockdiv = 6; ++ else if (clockdiv > 63) ++ clockdiv = 63; ++ ++ /* reg2 MUST be a multiple of 3, except when between 6 and 12? ++ Always round up, otherwise we cannot get the desired frametime ++ using the partial frame time exposure control */ ++ if (clockdiv < 6 || clockdiv > 12) ++ clockdiv = ((clockdiv + 2) / 3) * 3; ++ ++ /* frame exposure time in ms = 1000 * clockdiv / 90 -> ++ exposure = (sd->exposure / 2) * 448 / (1000 * clockdiv / 90) */ ++ exposure = (sd->exposure * 45 * 448) / (1000 * clockdiv); ++ /* 0 = use full frametime, 448 = no exposure, reverse it */ ++ exposure = 448 - exposure; ++ ++ reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ ++ reg_w(gspca_dev, 0x02, clockdiv); ++ reg_w(gspca_dev, 0x0e, exposure & 0xff); ++ reg_w(gspca_dev, 0x0f, exposure >> 8); + + /* load registers to sensor (Bit 0, auto clear) */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x11, 0x01); +- return ret; ++ reg_w(gspca_dev, 0x11, 0x01); + } + +-static int sethvflip(struct gspca_dev *gspca_dev) ++static void sethvflip(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- int ret; + u8 data, hflip, vflip; + + hflip = sd->hflip; +@@ -721,48 +712,37 @@ static int sethvflip(struct gspca_dev *gspca_dev) + if (sd->flags & FL_VFLIP) + vflip = !vflip; + +- ret = reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ ++ reg_w(gspca_dev, 0xff, 0x03); /* page 3 */ + data = (hflip ? 0x08 : 0x00) | (vflip ? 0x04 : 0x00); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x21, data); ++ reg_w(gspca_dev, 0x21, data); ++ + /* load registers to sensor (Bit 0, auto clear) */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x11, 0x01); +- return ret; ++ reg_w(gspca_dev, 0x11, 0x01); + } + + /* this function is called at probe and resume time for pac7302 */ + static int sd_init(struct gspca_dev *gspca_dev) + { +- return reg_w_seq(gspca_dev, init_7302, sizeof(init_7302)/2); ++ reg_w_seq(gspca_dev, init_7302, sizeof(init_7302)/2); ++ return gspca_dev->usb_err; + } + + static int sd_start(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- int ret = 0; + + sd->sof_read = 0; + +- ret = reg_w_var(gspca_dev, start_7302, +- page3_7302, sizeof(page3_7302), +- NULL, 0); +- if (0 <= ret) +- ret = setbrightcont(gspca_dev); +- if (0 <= ret) +- ret = setcolors(gspca_dev); +- if (0 <= ret) +- ret = setwhitebalance(gspca_dev); +- if (0 <= ret) +- ret = setredbalance(gspca_dev); +- if (0 <= ret) +- ret = setbluebalance(gspca_dev); +- if (0 <= ret) +- ret = setgain(gspca_dev); +- if (0 <= ret) +- ret = setexposure(gspca_dev); +- if (0 <= ret) +- ret = sethvflip(gspca_dev); ++ reg_w_var(gspca_dev, start_7302, ++ page3_7302, sizeof(page3_7302)); ++ setbrightcont(gspca_dev); ++ setcolors(gspca_dev); ++ setwhitebalance(gspca_dev); ++ setredbalance(gspca_dev); ++ setbluebalance(gspca_dev); ++ setgain(gspca_dev); ++ setexposure(gspca_dev); ++ sethvflip(gspca_dev); + + /* only resolution 640x480 is supported for pac7302 */ + +@@ -771,34 +751,27 @@ static int sd_start(struct gspca_dev *gspca_dev) + atomic_set(&sd->avg_lum, -1); + + /* start stream */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xff, 0x01); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x78, 0x01); ++ reg_w(gspca_dev, 0xff, 0x01); ++ reg_w(gspca_dev, 0x78, 0x01); + +- return ret; ++ return gspca_dev->usb_err; + } + + static void sd_stopN(struct gspca_dev *gspca_dev) + { +- int ret; + + /* stop stream */ +- ret = reg_w(gspca_dev, 0xff, 0x01); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x78, 0x00); ++ reg_w(gspca_dev, 0xff, 0x01); ++ reg_w(gspca_dev, 0x78, 0x00); + } + + /* called on streamoff with alt 0 and on disconnect for pac7302 */ + static void sd_stop0(struct gspca_dev *gspca_dev) + { +- int ret; +- + if (!gspca_dev->present) + return; +- ret = reg_w(gspca_dev, 0xff, 0x01); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x78, 0x40); ++ reg_w(gspca_dev, 0xff, 0x01); ++ reg_w(gspca_dev, 0x78, 0x40); + } + + /* Include pac common sof detection functions */ +@@ -808,22 +781,13 @@ static void do_autogain(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + int avg_lum = atomic_read(&sd->avg_lum); +- int desired_lum, deadzone; ++ int desired_lum; ++ const int deadzone = 30; + + if (avg_lum == -1) + return; + +- desired_lum = 270 + sd->brightness * 4; +- /* Hack hack, with the 7202 the first exposure step is +- pretty large, so if we're about to make the first +- exposure increase make the deadzone large to avoid +- oscilating */ +- if (desired_lum > avg_lum && sd->gain == GAIN_DEF && +- sd->exposure > EXPOSURE_DEF && +- sd->exposure < 42) +- deadzone = 90; +- else +- deadzone = 30; ++ desired_lum = 270 + sd->brightness; + + if (sd->autogain_ignore_frames > 0) + sd->autogain_ignore_frames--; +@@ -947,7 +911,7 @@ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) + sd->brightness = val; + if (gspca_dev->streaming) + setbrightcont(gspca_dev); +- return 0; ++ return gspca_dev->usb_err; + } + + static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val) +@@ -966,7 +930,7 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val) + if (gspca_dev->streaming) { + setbrightcont(gspca_dev); + } +- return 0; ++ return gspca_dev->usb_err; + } + + static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) +@@ -984,7 +948,7 @@ static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val) + sd->colors = val; + if (gspca_dev->streaming) + setcolors(gspca_dev); +- return 0; ++ return gspca_dev->usb_err; + } + + static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val) +@@ -998,14 +962,11 @@ static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val) + static int sd_setwhitebalance(struct gspca_dev *gspca_dev, __s32 val) + { + struct sd *sd = (struct sd *) gspca_dev; +- int ret = 0; + + sd->white_balance = val; + if (gspca_dev->streaming) +- ret = setwhitebalance(gspca_dev); +- if (0 <= ret) +- ret = 0; +- return ret; ++ setwhitebalance(gspca_dev); ++ return gspca_dev->usb_err; + } + + static int sd_getwhitebalance(struct gspca_dev *gspca_dev, __s32 *val) +@@ -1019,14 +980,11 @@ static int sd_getwhitebalance(struct gspca_dev *gspca_dev, __s32 *val) + static int sd_setredbalance(struct gspca_dev *gspca_dev, __s32 val) + { + struct sd *sd = (struct sd *) gspca_dev; +- int ret = 0; + + sd->red_balance = val; + if (gspca_dev->streaming) +- ret = setredbalance(gspca_dev); +- if (0 <= ret) +- ret = 0; +- return ret; ++ setredbalance(gspca_dev); ++ return gspca_dev->usb_err; + } + + static int sd_getredbalance(struct gspca_dev *gspca_dev, __s32 *val) +@@ -1040,14 +998,11 @@ static int sd_getredbalance(struct gspca_dev *gspca_dev, __s32 *val) + static int sd_setbluebalance(struct gspca_dev *gspca_dev, __s32 val) + { + struct sd *sd = (struct sd *) gspca_dev; +- int ret = 0; + + sd->blue_balance = val; + if (gspca_dev->streaming) +- ret = setbluebalance(gspca_dev); +- if (0 <= ret) +- ret = 0; +- return ret; ++ setbluebalance(gspca_dev); ++ return gspca_dev->usb_err; + } + + static int sd_getbluebalance(struct gspca_dev *gspca_dev, __s32 *val) +@@ -1065,7 +1020,7 @@ static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val) + sd->gain = val; + if (gspca_dev->streaming) + setgain(gspca_dev); +- return 0; ++ return gspca_dev->usb_err; + } + + static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val) +@@ -1083,7 +1038,7 @@ static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val) + sd->exposure = val; + if (gspca_dev->streaming) + setexposure(gspca_dev); +- return 0; ++ return gspca_dev->usb_err; + } + + static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val) +@@ -1114,7 +1069,7 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val) + } + } + +- return 0; ++ return gspca_dev->usb_err; + } + + static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val) +@@ -1132,7 +1087,7 @@ static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val) + sd->hflip = val; + if (gspca_dev->streaming) + sethvflip(gspca_dev); +- return 0; ++ return gspca_dev->usb_err; + } + + static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val) +@@ -1150,7 +1105,7 @@ static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val) + sd->vflip = val; + if (gspca_dev->streaming) + sethvflip(gspca_dev); +- return 0; ++ return gspca_dev->usb_err; + } + + static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val) +@@ -1165,7 +1120,6 @@ static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val) + static int sd_dbg_s_register(struct gspca_dev *gspca_dev, + struct v4l2_dbg_register *reg) + { +- int ret = -EINVAL; + __u8 index; + __u8 value; + +@@ -1185,14 +1139,12 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev, + /* Note that there shall be no access to other page + by any other function between the page swith and + the actual register write */ +- ret = reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, index, value); ++ reg_w(gspca_dev, 0xff, 0x00); /* page 0 */ ++ reg_w(gspca_dev, index, value); + +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xdc, 0x01); ++ reg_w(gspca_dev, 0xdc, 0x01); + } +- return ret; ++ return gspca_dev->usb_err; + } + + static int sd_chip_ident(struct gspca_dev *gspca_dev, +@@ -1210,8 +1162,39 @@ static int sd_chip_ident(struct gspca_dev *gspca_dev, + } + #endif + ++#ifdef CONFIG_INPUT ++static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, ++ u8 *data, /* interrupt packet data */ ++ int len) /* interrput packet length */ ++{ ++ int ret = -EINVAL; ++ u8 data0, data1; ++ ++ if (len == 2) { ++ data0 = data[0]; ++ data1 = data[1]; ++ if ((data0 == 0x00 && data1 == 0x11) || ++ (data0 == 0x22 && data1 == 0x33) || ++ (data0 == 0x44 && data1 == 0x55) || ++ (data0 == 0x66 && data1 == 0x77) || ++ (data0 == 0x88 && data1 == 0x99) || ++ (data0 == 0xaa && data1 == 0xbb) || ++ (data0 == 0xcc && data1 == 0xdd) || ++ (data0 == 0xee && data1 == 0xff)) { ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); ++ input_sync(gspca_dev->input_dev); ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); ++ input_sync(gspca_dev->input_dev); ++ ret = 0; ++ } ++ } ++ ++ return ret; ++} ++#endif ++ + /* sub-driver description for pac7302 */ +-static struct sd_desc sd_desc = { ++static const struct sd_desc sd_desc = { + .name = MODULE_NAME, + .ctrls = sd_ctrls, + .nctrls = ARRAY_SIZE(sd_ctrls), +@@ -1226,6 +1209,9 @@ static struct sd_desc sd_desc = { + .set_register = sd_dbg_s_register, + .get_chip_ident = sd_chip_ident, + #endif ++#ifdef CONFIG_INPUT ++ .int_pkt_scan = sd_int_pkt_scan, ++#endif + }; + + /* -- module initialisation -- */ +diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c +index 42cfcdf..322140d 100644 +--- a/drivers/media/video/gspca/pac7311.c ++++ b/drivers/media/video/gspca/pac7311.c +@@ -51,6 +51,7 @@ + + #define MODULE_NAME "pac7311" + ++#include + #include "gspca.h" + + MODULE_AUTHOR("Thomas Kaiser thomas@kaiser-linux.li"); +@@ -88,7 +89,7 @@ static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + /* This control is for both the 7302 and the 7311 */ + { + { +@@ -200,7 +201,6 @@ static const struct v4l2_pix_format vga_mode[] = { + .priv = 0}, + }; + +-#define LOAD_PAGE3 255 + #define LOAD_PAGE4 254 + #define END_OF_SEQUENCE 0 + +@@ -259,12 +259,14 @@ static const __u8 page4_7311[] = { + 0x23, 0x28, 0x04, 0x11, 0x00, 0x00 + }; + +-static int reg_w_buf(struct gspca_dev *gspca_dev, ++static void reg_w_buf(struct gspca_dev *gspca_dev, + __u8 index, + const char *buffer, int len) + { + int ret; + ++ if (gspca_dev->usb_err < 0) ++ return; + memcpy(gspca_dev->usb_buf, buffer, len); + ret = usb_control_msg(gspca_dev->dev, + usb_sndctrlpipe(gspca_dev->dev, 0), +@@ -273,20 +275,44 @@ static int reg_w_buf(struct gspca_dev *gspca_dev, + 0, /* value */ + index, gspca_dev->usb_buf, len, + 500); +- if (ret < 0) ++ if (ret < 0) { + PDEBUG(D_ERR, "reg_w_buf(): " + "Failed to write registers to index 0x%x, error %i", + index, ret); +- return ret; ++ gspca_dev->usb_err = ret; ++ } + } + ++#if 0 /* not used */ ++static __u8 reg_r(struct gspca_dev *gspca_dev, ++ __u8 index) ++{ ++ int ret; ++ ++ ret = usb_control_msg(gspca_dev->dev, ++ usb_rcvctrlpipe(gspca_dev->dev, 0), ++ 0, /* request */ ++ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ++ 0, /* value */ ++ index, gspca_dev->usb_buf, 1, ++ 500); ++ if (ret < 0) ++ PDEBUG(D_ERR, "reg_r(): " ++ "Failed to read register from index 0x%x, error %i", ++ index, ret); + +-static int reg_w(struct gspca_dev *gspca_dev, ++ return gspca_dev->usb_buf[0]; ++} ++#endif ++ ++static void reg_w(struct gspca_dev *gspca_dev, + __u8 index, + __u8 value) + { + int ret; + ++ if (gspca_dev->usb_err < 0) ++ return; + gspca_dev->usb_buf[0] = value; + ret = usb_control_msg(gspca_dev->dev, + usb_sndctrlpipe(gspca_dev->dev, 0), +@@ -294,32 +320,32 @@ static int reg_w(struct gspca_dev *gspca_dev, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, index, gspca_dev->usb_buf, 1, + 500); +- if (ret < 0) ++ if (ret < 0) { + PDEBUG(D_ERR, "reg_w(): " + "Failed to write register to index 0x%x, value 0x%x, error %i", + index, value, ret); +- return ret; ++ gspca_dev->usb_err = ret; ++ } + } + +-static int reg_w_seq(struct gspca_dev *gspca_dev, ++static void reg_w_seq(struct gspca_dev *gspca_dev, + const __u8 *seq, int len) + { +- int ret = 0; + while (--len >= 0) { +- if (0 <= ret) +- ret = reg_w(gspca_dev, seq[0], seq[1]); ++ reg_w(gspca_dev, seq[0], seq[1]); + seq += 2; + } +- return ret; + } + + /* load the beginning of a page */ +-static int reg_w_page(struct gspca_dev *gspca_dev, ++static void reg_w_page(struct gspca_dev *gspca_dev, + const __u8 *page, int len) + { + int index; + int ret = 0; + ++ if (gspca_dev->usb_err < 0) ++ return; + for (index = 0; index < len; index++) { + if (page[index] == SKIP) /* skip this index */ + continue; +@@ -335,56 +361,47 @@ static int reg_w_page(struct gspca_dev *gspca_dev, + "Failed to write register to index 0x%x, " + "value 0x%x, error %i", + index, page[index], ret); ++ gspca_dev->usb_err = ret; + break; + } + } +- return ret; + } + + /* output a variable sequence */ +-static int reg_w_var(struct gspca_dev *gspca_dev, ++static void reg_w_var(struct gspca_dev *gspca_dev, + const __u8 *seq, +- const __u8 *page3, unsigned int page3_len, + const __u8 *page4, unsigned int page4_len) + { + int index, len; +- int ret = 0; + + for (;;) { + index = *seq++; + len = *seq++; + switch (len) { + case END_OF_SEQUENCE: +- return ret; ++ return; + case LOAD_PAGE4: +- ret = reg_w_page(gspca_dev, page4, page4_len); +- break; +- case LOAD_PAGE3: +- ret = reg_w_page(gspca_dev, page3, page3_len); ++ reg_w_page(gspca_dev, page4, page4_len); + break; + default: + if (len > USB_BUF_SZ) { + PDEBUG(D_ERR|D_STREAM, + "Incorrect variable sequence"); +- return -EINVAL; ++ return; + } + while (len > 0) { + if (len < 8) { +- ret = reg_w_buf(gspca_dev, ++ reg_w_buf(gspca_dev, + index, seq, len); +- if (ret < 0) +- return ret; + seq += len; + break; + } +- ret = reg_w_buf(gspca_dev, index, seq, 8); ++ reg_w_buf(gspca_dev, index, seq, 8); + seq += 8; + index += 8; + len -= 8; + } + } +- if (ret < 0) +- return ret; + } + /* not reached */ + } +@@ -412,46 +429,36 @@ static int sd_config(struct gspca_dev *gspca_dev, + } + + /* This function is used by pac7311 only */ +-static int setcontrast(struct gspca_dev *gspca_dev) ++static void setcontrast(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- int ret; + +- ret = reg_w(gspca_dev, 0xff, 0x04); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x10, sd->contrast >> 4); ++ reg_w(gspca_dev, 0xff, 0x04); ++ reg_w(gspca_dev, 0x10, sd->contrast >> 4); + /* load registers to sensor (Bit 0, auto clear) */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x11, 0x01); +- return ret; ++ reg_w(gspca_dev, 0x11, 0x01); + } + +-static int setgain(struct gspca_dev *gspca_dev) ++static void setgain(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + int gain = GAIN_MAX - sd->gain; +- int ret; + + if (gain < 1) + gain = 1; + else if (gain > 245) + gain = 245; +- ret = reg_w(gspca_dev, 0xff, 0x04); /* page 4 */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x0e, 0x00); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x0f, gain); ++ reg_w(gspca_dev, 0xff, 0x04); /* page 4 */ ++ reg_w(gspca_dev, 0x0e, 0x00); ++ reg_w(gspca_dev, 0x0f, gain); + + /* load registers to sensor (Bit 0, auto clear) */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x11, 0x01); +- return ret; ++ reg_w(gspca_dev, 0x11, 0x01); + } + +-static int setexposure(struct gspca_dev *gspca_dev) ++static void setexposure(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- int ret; + __u8 reg; + + /* register 2 of frame 3/4 contains the clock divider configuring the +@@ -463,94 +470,72 @@ static int setexposure(struct gspca_dev *gspca_dev) + else if (reg > 63) + reg = 63; + +- ret = reg_w(gspca_dev, 0xff, 0x04); /* page 4 */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x02, reg); ++ reg_w(gspca_dev, 0xff, 0x04); /* page 4 */ ++ reg_w(gspca_dev, 0x02, reg); ++ + /* Page 1 register 8 must always be 0x08 except when not in + 640x480 mode and Page3/4 reg 2 <= 3 then it must be 9 */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xff, 0x01); ++ reg_w(gspca_dev, 0xff, 0x01); + if (gspca_dev->cam.cam_mode[(int)gspca_dev->curr_mode].priv && + reg <= 3) { +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x08, 0x09); ++ reg_w(gspca_dev, 0x08, 0x09); + } else { +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x08, 0x08); ++ reg_w(gspca_dev, 0x08, 0x08); + } + + /* load registers to sensor (Bit 0, auto clear) */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x11, 0x01); +- return ret; ++ reg_w(gspca_dev, 0x11, 0x01); + } + +-static int sethvflip(struct gspca_dev *gspca_dev) ++static void sethvflip(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- int ret; + __u8 data; + +- ret = reg_w(gspca_dev, 0xff, 0x04); /* page 4 */ ++ reg_w(gspca_dev, 0xff, 0x04); /* page 4 */ + data = (sd->hflip ? 0x04 : 0x00) | (sd->vflip ? 0x08 : 0x00); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x21, data); ++ reg_w(gspca_dev, 0x21, data); ++ + /* load registers to sensor (Bit 0, auto clear) */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x11, 0x01); +- return ret; ++ reg_w(gspca_dev, 0x11, 0x01); + } + + /* this function is called at probe and resume time for pac7311 */ + static int sd_init(struct gspca_dev *gspca_dev) + { +- return reg_w_seq(gspca_dev, init_7311, sizeof(init_7311)/2); ++ reg_w_seq(gspca_dev, init_7311, sizeof(init_7311)/2); ++ return gspca_dev->usb_err; + } + + static int sd_start(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- int ret; + + sd->sof_read = 0; + +- ret = reg_w_var(gspca_dev, start_7311, +- NULL, 0, ++ reg_w_var(gspca_dev, start_7311, + page4_7311, sizeof(page4_7311)); +- if (0 <= ret) +- ret = setcontrast(gspca_dev); +- if (0 <= ret) +- ret = setgain(gspca_dev); +- if (0 <= ret) +- ret = setexposure(gspca_dev); +- if (0 <= ret) +- ret = sethvflip(gspca_dev); ++ setcontrast(gspca_dev); ++ setgain(gspca_dev); ++ setexposure(gspca_dev); ++ sethvflip(gspca_dev); + + /* set correct resolution */ + switch (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) { + case 2: /* 160x120 pac7311 */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xff, 0x01); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x17, 0x20); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x87, 0x10); ++ reg_w(gspca_dev, 0xff, 0x01); ++ reg_w(gspca_dev, 0x17, 0x20); ++ reg_w(gspca_dev, 0x87, 0x10); + break; + case 1: /* 320x240 pac7311 */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xff, 0x01); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x17, 0x30); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x87, 0x11); ++ reg_w(gspca_dev, 0xff, 0x01); ++ reg_w(gspca_dev, 0x17, 0x30); ++ reg_w(gspca_dev, 0x87, 0x11); + break; + case 0: /* 640x480 */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xff, 0x01); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x17, 0x00); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x87, 0x12); ++ reg_w(gspca_dev, 0xff, 0x01); ++ reg_w(gspca_dev, 0x17, 0x00); ++ reg_w(gspca_dev, 0x87, 0x12); + break; + } + +@@ -559,37 +544,24 @@ static int sd_start(struct gspca_dev *gspca_dev) + atomic_set(&sd->avg_lum, -1); + + /* start stream */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xff, 0x01); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x78, 0x05); ++ reg_w(gspca_dev, 0xff, 0x01); ++ reg_w(gspca_dev, 0x78, 0x05); + +- return ret; ++ return gspca_dev->usb_err; + } + + static void sd_stopN(struct gspca_dev *gspca_dev) + { +- int ret; +- +- ret = reg_w(gspca_dev, 0xff, 0x04); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x27, 0x80); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x28, 0xca); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x29, 0x53); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x2a, 0x0e); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0xff, 0x01); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x3e, 0x20); +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */ +- if (0 <= ret) +- ret = reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */ ++ reg_w(gspca_dev, 0xff, 0x04); ++ reg_w(gspca_dev, 0x27, 0x80); ++ reg_w(gspca_dev, 0x28, 0xca); ++ reg_w(gspca_dev, 0x29, 0x53); ++ reg_w(gspca_dev, 0x2a, 0x0e); ++ reg_w(gspca_dev, 0xff, 0x01); ++ reg_w(gspca_dev, 0x3e, 0x20); ++ reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */ ++ reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */ ++ reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */ + } + + /* called on streamoff with alt 0 and on disconnect for 7311 */ +@@ -734,7 +706,7 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val) + if (gspca_dev->streaming) { + setcontrast(gspca_dev); + } +- return 0; ++ return gspca_dev->usb_err; + } + + static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) +@@ -752,7 +724,7 @@ static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val) + sd->gain = val; + if (gspca_dev->streaming) + setgain(gspca_dev); +- return 0; ++ return gspca_dev->usb_err; + } + + static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val) +@@ -770,7 +742,7 @@ static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val) + sd->exposure = val; + if (gspca_dev->streaming) + setexposure(gspca_dev); +- return 0; ++ return gspca_dev->usb_err; + } + + static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val) +@@ -801,7 +773,7 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val) + } + } + +- return 0; ++ return gspca_dev->usb_err; + } + + static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val) +@@ -819,7 +791,7 @@ static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val) + sd->hflip = val; + if (gspca_dev->streaming) + sethvflip(gspca_dev); +- return 0; ++ return gspca_dev->usb_err; + } + + static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val) +@@ -837,7 +809,7 @@ static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val) + sd->vflip = val; + if (gspca_dev->streaming) + sethvflip(gspca_dev); +- return 0; ++ return gspca_dev->usb_err; + } + + static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val) +@@ -848,8 +820,39 @@ static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val) + return 0; + } + ++#ifdef CONFIG_INPUT ++static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, ++ u8 *data, /* interrupt packet data */ ++ int len) /* interrupt packet length */ ++{ ++ int ret = -EINVAL; ++ u8 data0, data1; ++ ++ if (len == 2) { ++ data0 = data[0]; ++ data1 = data[1]; ++ if ((data0 == 0x00 && data1 == 0x11) || ++ (data0 == 0x22 && data1 == 0x33) || ++ (data0 == 0x44 && data1 == 0x55) || ++ (data0 == 0x66 && data1 == 0x77) || ++ (data0 == 0x88 && data1 == 0x99) || ++ (data0 == 0xaa && data1 == 0xbb) || ++ (data0 == 0xcc && data1 == 0xdd) || ++ (data0 == 0xee && data1 == 0xff)) { ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); ++ input_sync(gspca_dev->input_dev); ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); ++ input_sync(gspca_dev->input_dev); ++ ret = 0; ++ } ++ } ++ ++ return ret; ++} ++#endif ++ + /* sub-driver description for pac7311 */ +-static struct sd_desc sd_desc = { ++static const struct sd_desc sd_desc = { + .name = MODULE_NAME, + .ctrls = sd_ctrls, + .nctrls = ARRAY_SIZE(sd_ctrls), +@@ -860,6 +863,9 @@ static struct sd_desc sd_desc = { + .stop0 = sd_stop0, + .pkt_scan = sd_pkt_scan, + .dq_callback = do_autogain, ++#ifdef CONFIG_INPUT ++ .int_pkt_scan = sd_int_pkt_scan, ++#endif + }; + + /* -- module initialisation -- */ +diff --git a/drivers/media/video/gspca/pac_common.h b/drivers/media/video/gspca/pac_common.h +index 20f67d9..8462a7c 100644 +--- a/drivers/media/video/gspca/pac_common.h ++++ b/drivers/media/video/gspca/pac_common.h +@@ -24,11 +24,10 @@ + */ + + /* We calculate the autogain at the end of the transfer of a frame, at this +- moment a frame with the old settings is being transmitted, and a frame is +- being captured with the old settings. So if we adjust the autogain we must +- ignore atleast the 2 next frames for the new settings to come into effect +- before doing any other adjustments */ +-#define PAC_AUTOGAIN_IGNORE_FRAMES 3 ++ moment a frame with the old settings is being captured and transmitted. So ++ if we adjust the gain or exposure we must ignore atleast the next frame for ++ the new settings to come into effect before doing any other adjustments. */ ++#define PAC_AUTOGAIN_IGNORE_FRAMES 2 + + static const unsigned char pac_sof_marker[5] = + { 0xff, 0xff, 0x00, 0xff, 0x96 }; +diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c +new file mode 100644 +index 0000000..dda5fd4 +--- /dev/null ++++ b/drivers/media/video/gspca/sn9c2028.c +@@ -0,0 +1,757 @@ ++/* ++ * SN9C2028 library ++ * ++ * Copyright (C) 2009 Theodore Kilgore ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#define MODULE_NAME "sn9c2028" ++ ++#include "gspca.h" ++ ++MODULE_AUTHOR("Theodore Kilgore"); ++MODULE_DESCRIPTION("Sonix SN9C2028 USB Camera Driver"); ++MODULE_LICENSE("GPL"); ++ ++/* specific webcam descriptor */ ++struct sd { ++ struct gspca_dev gspca_dev; /* !! must be the first item */ ++ u8 sof_read; ++ u16 model; ++}; ++ ++struct init_command { ++ unsigned char instruction[6]; ++ unsigned char to_read; /* length to read. 0 means no reply requested */ ++}; ++ ++/* V4L2 controls supported by the driver */ ++static struct ctrl sd_ctrls[] = { ++}; ++ ++/* How to change the resolution of any of the VGA cams is unknown */ ++static const struct v4l2_pix_format vga_mode[] = { ++ {640, 480, V4L2_PIX_FMT_SN9C2028, V4L2_FIELD_NONE, ++ .bytesperline = 640, ++ .sizeimage = 640 * 480 * 3 / 4, ++ .colorspace = V4L2_COLORSPACE_SRGB, ++ .priv = 0}, ++}; ++ ++/* No way to change the resolution of the CIF cams is known */ ++static const struct v4l2_pix_format cif_mode[] = { ++ {352, 288, V4L2_PIX_FMT_SN9C2028, V4L2_FIELD_NONE, ++ .bytesperline = 352, ++ .sizeimage = 352 * 288 * 3 / 4, ++ .colorspace = V4L2_COLORSPACE_SRGB, ++ .priv = 0}, ++}; ++ ++/* the bytes to write are in gspca_dev->usb_buf */ ++static int sn9c2028_command(struct gspca_dev *gspca_dev, u8 *command) ++{ ++ int rc; ++ ++ PDEBUG(D_USBO, "sending command %02x%02x%02x%02x%02x%02x", command[0], ++ command[1], command[2], command[3], command[4], command[5]); ++ ++ memcpy(gspca_dev->usb_buf, command, 6); ++ rc = usb_control_msg(gspca_dev->dev, ++ usb_sndctrlpipe(gspca_dev->dev, 0), ++ USB_REQ_GET_CONFIGURATION, ++ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, ++ 2, 0, gspca_dev->usb_buf, 6, 500); ++ if (rc < 0) { ++ PDEBUG(D_ERR, "command write [%02x] error %d", ++ gspca_dev->usb_buf[0], rc); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static int sn9c2028_read1(struct gspca_dev *gspca_dev) ++{ ++ int rc; ++ ++ rc = usb_control_msg(gspca_dev->dev, ++ usb_rcvctrlpipe(gspca_dev->dev, 0), ++ USB_REQ_GET_STATUS, ++ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, ++ 1, 0, gspca_dev->usb_buf, 1, 500); ++ if (rc != 1) { ++ PDEBUG(D_ERR, "read1 error %d", rc); ++ return (rc < 0) ? rc : -EIO; ++ } ++ PDEBUG(D_USBI, "read1 response %02x", gspca_dev->usb_buf[0]); ++ return gspca_dev->usb_buf[0]; ++} ++ ++static int sn9c2028_read4(struct gspca_dev *gspca_dev, u8 *reading) ++{ ++ int rc; ++ rc = usb_control_msg(gspca_dev->dev, ++ usb_rcvctrlpipe(gspca_dev->dev, 0), ++ USB_REQ_GET_STATUS, ++ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, ++ 4, 0, gspca_dev->usb_buf, 4, 500); ++ if (rc != 4) { ++ PDEBUG(D_ERR, "read4 error %d", rc); ++ return (rc < 0) ? rc : -EIO; ++ } ++ memcpy(reading, gspca_dev->usb_buf, 4); ++ PDEBUG(D_USBI, "read4 response %02x%02x%02x%02x", reading[0], ++ reading[1], reading[2], reading[3]); ++ return rc; ++} ++ ++static int sn9c2028_long_command(struct gspca_dev *gspca_dev, u8 *command) ++{ ++ int i, status; ++ __u8 reading[4]; ++ ++ status = sn9c2028_command(gspca_dev, command); ++ if (status < 0) ++ return status; ++ ++ status = -1; ++ for (i = 0; i < 256 && status < 2; i++) ++ status = sn9c2028_read1(gspca_dev); ++ if (status != 2) { ++ PDEBUG(D_ERR, "long command status read error %d", status); ++ return (status < 0) ? status : -EIO; ++ } ++ ++ memset(reading, 0, 4); ++ status = sn9c2028_read4(gspca_dev, reading); ++ if (status < 0) ++ return status; ++ ++ /* in general, the first byte of the response is the first byte of ++ * the command, or'ed with 8 */ ++ status = sn9c2028_read1(gspca_dev); ++ if (status < 0) ++ return status; ++ ++ return 0; ++} ++ ++static int sn9c2028_short_command(struct gspca_dev *gspca_dev, u8 *command) ++{ ++ int err_code; ++ ++ err_code = sn9c2028_command(gspca_dev, command); ++ if (err_code < 0) ++ return err_code; ++ ++ err_code = sn9c2028_read1(gspca_dev); ++ if (err_code < 0) ++ return err_code; ++ ++ return 0; ++} ++ ++/* this function is called at probe time */ ++static int sd_config(struct gspca_dev *gspca_dev, ++ const struct usb_device_id *id) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ struct cam *cam = &gspca_dev->cam; ++ ++ PDEBUG(D_PROBE, "SN9C2028 camera detected (vid/pid 0x%04X:0x%04X)", ++ id->idVendor, id->idProduct); ++ ++ sd->model = id->idProduct; ++ ++ switch (sd->model) { ++ case 0x7005: ++ PDEBUG(D_PROBE, "Genius Smart 300 camera"); ++ break; ++ case 0x8000: ++ PDEBUG(D_PROBE, "DC31VC"); ++ break; ++ case 0x8001: ++ PDEBUG(D_PROBE, "Spy camera"); ++ break; ++ case 0x8003: ++ PDEBUG(D_PROBE, "CIF camera"); ++ break; ++ case 0x8008: ++ PDEBUG(D_PROBE, "Mini-Shotz ms-350 camera"); ++ break; ++ case 0x800a: ++ PDEBUG(D_PROBE, "Vivitar 3350b type camera"); ++ cam->input_flags = V4L2_IN_ST_VFLIP | V4L2_IN_ST_HFLIP; ++ break; ++ } ++ ++ switch (sd->model) { ++ case 0x8000: ++ case 0x8001: ++ case 0x8003: ++ cam->cam_mode = cif_mode; ++ cam->nmodes = ARRAY_SIZE(cif_mode); ++ break; ++ default: ++ cam->cam_mode = vga_mode; ++ cam->nmodes = ARRAY_SIZE(vga_mode); ++ } ++ return 0; ++} ++ ++/* this function is called at probe and resume time */ ++static int sd_init(struct gspca_dev *gspca_dev) ++{ ++ int status = -1; ++ ++ sn9c2028_read1(gspca_dev); ++ sn9c2028_read1(gspca_dev); ++ status = sn9c2028_read1(gspca_dev); ++ ++ return (status < 0) ? status : 0; ++} ++ ++static int run_start_commands(struct gspca_dev *gspca_dev, ++ struct init_command *cam_commands, int n) ++{ ++ int i, err_code = -1; ++ ++ for (i = 0; i < n; i++) { ++ switch (cam_commands[i].to_read) { ++ case 4: ++ err_code = sn9c2028_long_command(gspca_dev, ++ cam_commands[i].instruction); ++ break; ++ case 1: ++ err_code = sn9c2028_short_command(gspca_dev, ++ cam_commands[i].instruction); ++ break; ++ case 0: ++ err_code = sn9c2028_command(gspca_dev, ++ cam_commands[i].instruction); ++ break; ++ } ++ if (err_code < 0) ++ return err_code; ++ } ++ return 0; ++} ++ ++static int start_spy_cam(struct gspca_dev *gspca_dev) ++{ ++ struct init_command spy_start_commands[] = { ++ {{0x0c, 0x01, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x20, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x21, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x22, 0x01, 0x04, 0x00, 0x00}, 4}, ++ {{0x13, 0x23, 0x01, 0x03, 0x00, 0x00}, 4}, ++ {{0x13, 0x24, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x25, 0x01, 0x16, 0x00, 0x00}, 4}, /* width 352 */ ++ {{0x13, 0x26, 0x01, 0x12, 0x00, 0x00}, 4}, /* height 288 */ ++ /* {{0x13, 0x27, 0x01, 0x28, 0x00, 0x00}, 4}, */ ++ {{0x13, 0x27, 0x01, 0x68, 0x00, 0x00}, 4}, ++ {{0x13, 0x28, 0x01, 0x09, 0x00, 0x00}, 4}, /* red gain ?*/ ++ /* {{0x13, 0x28, 0x01, 0x00, 0x00, 0x00}, 4}, */ ++ {{0x13, 0x29, 0x01, 0x00, 0x00, 0x00}, 4}, ++ /* {{0x13, 0x29, 0x01, 0x0c, 0x00, 0x00}, 4}, */ ++ {{0x13, 0x2a, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x2b, 0x01, 0x00, 0x00, 0x00}, 4}, ++ /* {{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4}, */ ++ {{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4}, ++ {{0x13, 0x2d, 0x01, 0x02, 0x00, 0x00}, 4}, ++ /* {{0x13, 0x2e, 0x01, 0x09, 0x00, 0x00}, 4}, */ ++ {{0x13, 0x2e, 0x01, 0x09, 0x00, 0x00}, 4}, ++ {{0x13, 0x2f, 0x01, 0x07, 0x00, 0x00}, 4}, ++ {{0x12, 0x34, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x34, 0x01, 0xa1, 0x00, 0x00}, 4}, ++ {{0x13, 0x35, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x02, 0x06, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x03, 0x13, 0x00, 0x00, 0x00}, 4}, /*don't mess with*/ ++ /*{{0x11, 0x04, 0x06, 0x00, 0x00, 0x00}, 4}, observed */ ++ {{0x11, 0x04, 0x00, 0x00, 0x00, 0x00}, 4}, /* brighter */ ++ /*{{0x11, 0x05, 0x65, 0x00, 0x00, 0x00}, 4}, observed */ ++ {{0x11, 0x05, 0x00, 0x00, 0x00, 0x00}, 4}, /* brighter */ ++ {{0x11, 0x06, 0xb1, 0x00, 0x00, 0x00}, 4}, /* observed */ ++ {{0x11, 0x07, 0x00, 0x00, 0x00, 0x00}, 4}, ++ /*{{0x11, 0x08, 0x06, 0x00, 0x00, 0x00}, 4}, observed */ ++ {{0x11, 0x08, 0x0b, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x09, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x0a, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x0b, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x0c, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x0d, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x0e, 0x04, 0x00, 0x00, 0x00}, 4}, ++ /* {{0x11, 0x0f, 0x00, 0x00, 0x00, 0x00}, 4}, */ ++ /* brightness or gain. 0 is default. 4 is good ++ * indoors at night with incandescent lighting */ ++ {{0x11, 0x0f, 0x04, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x10, 0x06, 0x00, 0x00, 0x00}, 4}, /*hstart or hoffs*/ ++ {{0x11, 0x11, 0x06, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x12, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x14, 0x02, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x13, 0x01, 0x00, 0x00, 0x00}, 4}, ++ /* {{0x1b, 0x02, 0x06, 0x00, 0x00, 0x00}, 1}, observed */ ++ {{0x1b, 0x02, 0x11, 0x00, 0x00, 0x00}, 1}, /* brighter */ ++ /* {{0x1b, 0x13, 0x01, 0x00, 0x00, 0x00}, 1}, observed */ ++ {{0x1b, 0x13, 0x11, 0x00, 0x00, 0x00}, 1}, ++ {{0x20, 0x34, 0xa1, 0x00, 0x00, 0x00}, 1}, /* compresses */ ++ /* Camera should start to capture now. */ ++ }; ++ ++ return run_start_commands(gspca_dev, spy_start_commands, ++ ARRAY_SIZE(spy_start_commands)); ++} ++ ++static int start_cif_cam(struct gspca_dev *gspca_dev) ++{ ++ struct init_command cif_start_commands[] = { ++ {{0x0c, 0x01, 0x00, 0x00, 0x00, 0x00}, 4}, ++ /* The entire sequence below seems redundant */ ++ /* {{0x13, 0x20, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x21, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x22, 0x01, 0x06, 0x00, 0x00}, 4}, ++ {{0x13, 0x23, 0x01, 0x02, 0x00, 0x00}, 4}, ++ {{0x13, 0x24, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x25, 0x01, 0x16, 0x00, 0x00}, 4}, width? ++ {{0x13, 0x26, 0x01, 0x12, 0x00, 0x00}, 4}, height? ++ {{0x13, 0x27, 0x01, 0x68, 0x00, 0x00}, 4}, subsample? ++ {{0x13, 0x28, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x29, 0x01, 0x20, 0x00, 0x00}, 4}, ++ {{0x13, 0x2a, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x2b, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4}, ++ {{0x13, 0x2d, 0x01, 0x03, 0x00, 0x00}, 4}, ++ {{0x13, 0x2e, 0x01, 0x0f, 0x00, 0x00}, 4}, ++ {{0x13, 0x2f, 0x01, 0x0c, 0x00, 0x00}, 4}, ++ {{0x12, 0x34, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x34, 0x01, 0xa1, 0x00, 0x00}, 4}, ++ {{0x13, 0x35, 0x01, 0x00, 0x00, 0x00}, 4},*/ ++ {{0x1b, 0x21, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x17, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x19, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x02, 0x06, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x03, 0x5a, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x04, 0x27, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x05, 0x01, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x12, 0x14, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x13, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x14, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x15, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x16, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x77, 0xa2, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x06, 0x0f, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x07, 0x14, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x08, 0x0f, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x09, 0x10, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x0e, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x0f, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x12, 0x07, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x10, 0x1f, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x11, 0x01, 0x00, 0x00, 0x00}, 1}, ++ {{0x13, 0x25, 0x01, 0x16, 0x00, 0x00}, 1}, /* width/8 */ ++ {{0x13, 0x26, 0x01, 0x12, 0x00, 0x00}, 1}, /* height/8 */ ++ /* {{0x13, 0x27, 0x01, 0x68, 0x00, 0x00}, 4}, subsample? ++ * {{0x13, 0x28, 0x01, 0x1e, 0x00, 0x00}, 4}, does nothing ++ * {{0x13, 0x27, 0x01, 0x20, 0x00, 0x00}, 4}, */ ++ /* {{0x13, 0x29, 0x01, 0x22, 0x00, 0x00}, 4}, ++ * causes subsampling ++ * but not a change in the resolution setting! */ ++ {{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4}, ++ {{0x13, 0x2d, 0x01, 0x01, 0x00, 0x00}, 4}, ++ {{0x13, 0x2e, 0x01, 0x08, 0x00, 0x00}, 4}, ++ {{0x13, 0x2f, 0x01, 0x06, 0x00, 0x00}, 4}, ++ {{0x13, 0x28, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x1b, 0x04, 0x6d, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x05, 0x03, 0x00, 0x00, 0x00}, 1}, ++ {{0x20, 0x36, 0x06, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x0e, 0x01, 0x00, 0x00, 0x00}, 1}, ++ {{0x12, 0x27, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x1b, 0x0f, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x20, 0x36, 0x05, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x10, 0x0f, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x02, 0x06, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x11, 0x01, 0x00, 0x00, 0x00}, 1}, ++ {{0x20, 0x34, 0xa1, 0x00, 0x00, 0x00}, 1},/* use compression */ ++ /* Camera should start to capture now. */ ++ }; ++ ++ return run_start_commands(gspca_dev, cif_start_commands, ++ ARRAY_SIZE(cif_start_commands)); ++} ++ ++static int start_ms350_cam(struct gspca_dev *gspca_dev) ++{ ++ struct init_command ms350_start_commands[] = { ++ {{0x0c, 0x01, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x16, 0x01, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x20, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x21, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x22, 0x01, 0x04, 0x00, 0x00}, 4}, ++ {{0x13, 0x23, 0x01, 0x03, 0x00, 0x00}, 4}, ++ {{0x13, 0x24, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x25, 0x01, 0x16, 0x00, 0x00}, 4}, ++ {{0x13, 0x26, 0x01, 0x12, 0x00, 0x00}, 4}, ++ {{0x13, 0x27, 0x01, 0x28, 0x00, 0x00}, 4}, ++ {{0x13, 0x28, 0x01, 0x09, 0x00, 0x00}, 4}, ++ {{0x13, 0x29, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x2a, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x2b, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4}, ++ {{0x13, 0x2d, 0x01, 0x03, 0x00, 0x00}, 4}, ++ {{0x13, 0x2e, 0x01, 0x0f, 0x00, 0x00}, 4}, ++ {{0x13, 0x2f, 0x01, 0x0c, 0x00, 0x00}, 4}, ++ {{0x12, 0x34, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x34, 0x01, 0xa1, 0x00, 0x00}, 4}, ++ {{0x13, 0x35, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x00, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x01, 0x70, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x02, 0x05, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x03, 0x5d, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x04, 0x07, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x05, 0x25, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x06, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x07, 0x09, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x08, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x09, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x0b, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x0c, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x0d, 0x0c, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x0e, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x0f, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x10, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x11, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x12, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x13, 0x63, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x15, 0x70, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x18, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x11, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x25, 0x01, 0x28, 0x00, 0x00}, 4}, /* width */ ++ {{0x13, 0x26, 0x01, 0x1e, 0x00, 0x00}, 4}, /* height */ ++ {{0x13, 0x28, 0x01, 0x09, 0x00, 0x00}, 4}, /* vstart? */ ++ {{0x13, 0x27, 0x01, 0x28, 0x00, 0x00}, 4}, ++ {{0x13, 0x29, 0x01, 0x40, 0x00, 0x00}, 4}, /* hstart? */ ++ {{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4}, ++ {{0x13, 0x2d, 0x01, 0x03, 0x00, 0x00}, 4}, ++ {{0x13, 0x2e, 0x01, 0x0f, 0x00, 0x00}, 4}, ++ {{0x13, 0x2f, 0x01, 0x0c, 0x00, 0x00}, 4}, ++ {{0x1b, 0x02, 0x05, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x11, 0x01, 0x00, 0x00, 0x00}, 1}, ++ {{0x20, 0x18, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x02, 0x0a, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x11, 0x01, 0x00, 0x00, 0x00}, 0}, ++ /* Camera should start to capture now. */ ++ }; ++ ++ return run_start_commands(gspca_dev, ms350_start_commands, ++ ARRAY_SIZE(ms350_start_commands)); ++} ++ ++static int start_genius_cam(struct gspca_dev *gspca_dev) ++{ ++ struct init_command genius_start_commands[] = { ++ {{0x0c, 0x01, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x16, 0x01, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x10, 0x00, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x25, 0x01, 0x16, 0x00, 0x00}, 4}, ++ {{0x13, 0x26, 0x01, 0x12, 0x00, 0x00}, 4}, ++ /* "preliminary" width and height settings */ ++ {{0x13, 0x28, 0x01, 0x0e, 0x00, 0x00}, 4}, ++ {{0x13, 0x27, 0x01, 0x20, 0x00, 0x00}, 4}, ++ {{0x13, 0x29, 0x01, 0x22, 0x00, 0x00}, 4}, ++ {{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4}, ++ {{0x13, 0x2d, 0x01, 0x02, 0x00, 0x00}, 4}, ++ {{0x13, 0x2e, 0x01, 0x09, 0x00, 0x00}, 4}, ++ {{0x13, 0x2f, 0x01, 0x07, 0x00, 0x00}, 4}, ++ {{0x11, 0x20, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x21, 0x2d, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x22, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x23, 0x03, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x10, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x11, 0x64, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x12, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x13, 0x91, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x14, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x15, 0x20, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x16, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x17, 0x60, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x20, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x21, 0x2d, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x22, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x23, 0x03, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x25, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x26, 0x02, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x27, 0x88, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x30, 0x38, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x31, 0x2a, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x32, 0x2a, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x33, 0x2a, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x34, 0x02, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x5b, 0x0a, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x25, 0x01, 0x28, 0x00, 0x00}, 4}, /* real width */ ++ {{0x13, 0x26, 0x01, 0x1e, 0x00, 0x00}, 4}, /* real height */ ++ {{0x13, 0x28, 0x01, 0x0e, 0x00, 0x00}, 4}, ++ {{0x13, 0x27, 0x01, 0x20, 0x00, 0x00}, 4}, ++ {{0x13, 0x29, 0x01, 0x62, 0x00, 0x00}, 4}, ++ {{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4}, ++ {{0x13, 0x2d, 0x01, 0x03, 0x00, 0x00}, 4}, ++ {{0x13, 0x2e, 0x01, 0x0f, 0x00, 0x00}, 4}, ++ {{0x13, 0x2f, 0x01, 0x0c, 0x00, 0x00}, 4}, ++ {{0x11, 0x20, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x21, 0x2a, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x22, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x23, 0x28, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x10, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x11, 0x04, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x12, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x13, 0x03, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x14, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x15, 0xe0, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x16, 0x02, 0x00, 0x00, 0x00}, 4}, ++ {{0x11, 0x17, 0x80, 0x00, 0x00, 0x00}, 4}, ++ {{0x1c, 0x20, 0x00, 0x2a, 0x00, 0x00}, 1}, ++ {{0x1c, 0x20, 0x00, 0x2a, 0x00, 0x00}, 1}, ++ {{0x20, 0x34, 0xa1, 0x00, 0x00, 0x00}, 0} ++ /* Camera should start to capture now. */ ++ }; ++ ++ return run_start_commands(gspca_dev, genius_start_commands, ++ ARRAY_SIZE(genius_start_commands)); ++} ++ ++static int start_vivitar_cam(struct gspca_dev *gspca_dev) ++{ ++ struct init_command vivitar_start_commands[] = { ++ {{0x0c, 0x01, 0x00, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x20, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x21, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x22, 0x01, 0x01, 0x00, 0x00}, 4}, ++ {{0x13, 0x23, 0x01, 0x01, 0x00, 0x00}, 4}, ++ {{0x13, 0x24, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x25, 0x01, 0x28, 0x00, 0x00}, 4}, ++ {{0x13, 0x26, 0x01, 0x1e, 0x00, 0x00}, 4}, ++ {{0x13, 0x27, 0x01, 0x20, 0x00, 0x00}, 4}, ++ {{0x13, 0x28, 0x01, 0x0a, 0x00, 0x00}, 4}, ++ /* ++ * Above is changed from OEM 0x0b. Fixes Bayer tiling. ++ * Presumably gives a vertical shift of one row. ++ */ ++ {{0x13, 0x29, 0x01, 0x20, 0x00, 0x00}, 4}, ++ /* Above seems to do horizontal shift. */ ++ {{0x13, 0x2a, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x2b, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x2c, 0x01, 0x02, 0x00, 0x00}, 4}, ++ {{0x13, 0x2d, 0x01, 0x03, 0x00, 0x00}, 4}, ++ {{0x13, 0x2e, 0x01, 0x0f, 0x00, 0x00}, 4}, ++ {{0x13, 0x2f, 0x01, 0x0c, 0x00, 0x00}, 4}, ++ /* Above three commands seem to relate to brightness. */ ++ {{0x12, 0x34, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x13, 0x34, 0x01, 0xa1, 0x00, 0x00}, 4}, ++ {{0x13, 0x35, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x1b, 0x12, 0x80, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x01, 0x77, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x02, 0x3a, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x12, 0x78, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x13, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x14, 0x80, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x15, 0x34, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x1b, 0x04, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x20, 0x44, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x23, 0xee, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x26, 0xa0, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x27, 0x9a, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x28, 0xa0, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x29, 0x30, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x2a, 0x80, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x2b, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x2f, 0x3d, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x30, 0x24, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x32, 0x86, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x60, 0xa9, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x61, 0x42, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x65, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x69, 0x38, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x6f, 0x88, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x70, 0x0b, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x71, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x74, 0x21, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x75, 0x86, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x76, 0x00, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x7d, 0xf3, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x17, 0x1c, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x18, 0xc0, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x19, 0x05, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x1a, 0xf6, 0x00, 0x00, 0x00}, 1}, ++ /* {{0x13, 0x25, 0x01, 0x28, 0x00, 0x00}, 4}, ++ {{0x13, 0x26, 0x01, 0x1e, 0x00, 0x00}, 4}, ++ {{0x13, 0x28, 0x01, 0x0b, 0x00, 0x00}, 4}, */ ++ {{0x20, 0x36, 0x06, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x10, 0x26, 0x00, 0x00, 0x00}, 1}, ++ {{0x12, 0x27, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x1b, 0x76, 0x03, 0x00, 0x00, 0x00}, 1}, ++ {{0x20, 0x36, 0x05, 0x00, 0x00, 0x00}, 1}, ++ {{0x1b, 0x00, 0x3f, 0x00, 0x00, 0x00}, 1}, ++ /* Above is brightness; OEM driver setting is 0x10 */ ++ {{0x12, 0x27, 0x01, 0x00, 0x00, 0x00}, 4}, ++ {{0x20, 0x29, 0x30, 0x00, 0x00, 0x00}, 1}, ++ {{0x20, 0x34, 0xa1, 0x00, 0x00, 0x00}, 1} ++ }; ++ ++ return run_start_commands(gspca_dev, vivitar_start_commands, ++ ARRAY_SIZE(vivitar_start_commands)); ++} ++ ++static int sd_start(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int err_code; ++ ++ sd->sof_read = 0; ++ ++ switch (sd->model) { ++ case 0x7005: ++ err_code = start_genius_cam(gspca_dev); ++ break; ++ case 0x8001: ++ err_code = start_spy_cam(gspca_dev); ++ break; ++ case 0x8003: ++ err_code = start_cif_cam(gspca_dev); ++ break; ++ case 0x8008: ++ err_code = start_ms350_cam(gspca_dev); ++ break; ++ case 0x800a: ++ err_code = start_vivitar_cam(gspca_dev); ++ break; ++ default: ++ PDEBUG(D_ERR, "Starting unknown camera, please report this"); ++ return -ENXIO; ++ } ++ ++ return err_code; ++} ++ ++static void sd_stopN(struct gspca_dev *gspca_dev) ++{ ++ int result; ++ __u8 data[6]; ++ ++ result = sn9c2028_read1(gspca_dev); ++ if (result < 0) ++ PDEBUG(D_ERR, "Camera Stop read failed"); ++ ++ memset(data, 0, 6); ++ data[0] = 0x14; ++ result = sn9c2028_command(gspca_dev, data); ++ if (result < 0) ++ PDEBUG(D_ERR, "Camera Stop command failed"); ++} ++ ++/* Include sn9c2028 sof detection functions */ ++#include "sn9c2028.h" ++ ++static void sd_pkt_scan(struct gspca_dev *gspca_dev, ++ __u8 *data, /* isoc packet */ ++ int len) /* iso packet length */ ++{ ++ unsigned char *sof; ++ ++ sof = sn9c2028_find_sof(gspca_dev, data, len); ++ if (sof) { ++ int n; ++ ++ /* finish decoding current frame */ ++ n = sof - data; ++ if (n > sizeof sn9c2028_sof_marker) ++ n -= sizeof sn9c2028_sof_marker; ++ else ++ n = 0; ++ gspca_frame_add(gspca_dev, LAST_PACKET, data, n); ++ /* Start next frame. */ ++ gspca_frame_add(gspca_dev, FIRST_PACKET, ++ sn9c2028_sof_marker, sizeof sn9c2028_sof_marker); ++ len -= sof - data; ++ data = sof; ++ } ++ gspca_frame_add(gspca_dev, INTER_PACKET, data, len); ++} ++ ++/* sub-driver description */ ++static const struct sd_desc sd_desc = { ++ .name = MODULE_NAME, ++ .ctrls = sd_ctrls, ++ .nctrls = ARRAY_SIZE(sd_ctrls), ++ .config = sd_config, ++ .init = sd_init, ++ .start = sd_start, ++ .stopN = sd_stopN, ++ .pkt_scan = sd_pkt_scan, ++}; ++ ++/* -- module initialisation -- */ ++static const __devinitdata struct usb_device_id device_table[] = { ++ {USB_DEVICE(0x0458, 0x7005)}, /* Genius Smart 300, version 2 */ ++ /* The Genius Smart is untested. I can't find an owner ! */ ++ /* {USB_DEVICE(0x0c45, 0x8000)}, DC31VC, Don't know this camera */ ++ {USB_DEVICE(0x0c45, 0x8001)}, /* Wild Planet digital spy cam */ ++ {USB_DEVICE(0x0c45, 0x8003)}, /* Several small CIF cameras */ ++ /* {USB_DEVICE(0x0c45, 0x8006)}, Unknown VGA camera */ ++ {USB_DEVICE(0x0c45, 0x8008)}, /* Mini-Shotz ms-350 */ ++ {USB_DEVICE(0x0c45, 0x800a)}, /* Vivicam 3350B */ ++ {} ++}; ++MODULE_DEVICE_TABLE(usb, device_table); ++ ++/* -- device connect -- */ ++static int sd_probe(struct usb_interface *intf, ++ const struct usb_device_id *id) ++{ ++ return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), ++ THIS_MODULE); ++} ++ ++static struct usb_driver sd_driver = { ++ .name = MODULE_NAME, ++ .id_table = device_table, ++ .probe = sd_probe, ++ .disconnect = gspca_disconnect, ++#ifdef CONFIG_PM ++ .suspend = gspca_suspend, ++ .resume = gspca_resume, ++#endif ++}; ++ ++/* -- module insert / remove -- */ ++static int __init sd_mod_init(void) ++{ ++ int ret; ++ ++ ret = usb_register(&sd_driver); ++ if (ret < 0) ++ return ret; ++ PDEBUG(D_PROBE, "registered"); ++ return 0; ++} ++ ++static void __exit sd_mod_exit(void) ++{ ++ usb_deregister(&sd_driver); ++ PDEBUG(D_PROBE, "deregistered"); ++} ++ ++module_init(sd_mod_init); ++module_exit(sd_mod_exit); +diff --git a/drivers/media/video/gspca/sn9c2028.h b/drivers/media/video/gspca/sn9c2028.h +new file mode 100644 +index 0000000..8fd1d3e +--- /dev/null ++++ b/drivers/media/video/gspca/sn9c2028.h +@@ -0,0 +1,51 @@ ++/* ++ * SN9C2028 common functions ++ * ++ * Copyright (C) 2009 Theodore Kilgore ++ * ++ * Based closely upon the file gspca/pac_common.h ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++static const unsigned char sn9c2028_sof_marker[5] = ++ { 0xff, 0xff, 0x00, 0xc4, 0xc4 }; ++ ++static unsigned char *sn9c2028_find_sof(struct gspca_dev *gspca_dev, ++ unsigned char *m, int len) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ int i; ++ ++ /* Search for the SOF marker (fixed part) in the header */ ++ for (i = 0; i < len; i++) { ++ if (m[i] == sn9c2028_sof_marker[sd->sof_read]) { ++ sd->sof_read++; ++ if (sd->sof_read == sizeof(sn9c2028_sof_marker)) { ++ PDEBUG(D_FRAM, ++ "SOF found, bytes to analyze: %u." ++ " Frame starts at byte #%u", ++ len, i + 1); ++ sd->sof_read = 0; ++ return m + i + 1; ++ } ++ } else { ++ sd->sof_read = 0; ++ } ++ } ++ ++ return NULL; ++} +diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c +index 0ca1c06..4a1bc08 100644 +--- a/drivers/media/video/gspca/sn9c20x.c ++++ b/drivers/media/video/gspca/sn9c20x.c +@@ -129,7 +129,7 @@ static int sd_getexposure(struct gspca_dev *gspca_dev, s32 *val); + static int sd_setautoexposure(struct gspca_dev *gspca_dev, s32 val); + static int sd_getautoexposure(struct gspca_dev *gspca_dev, s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + { + #define BRIGHTNESS_IDX 0 + { +@@ -1506,36 +1506,36 @@ static int set_cmatrix(struct gspca_dev *gspca_dev) + struct sd *sd = (struct sd *) gspca_dev; + s32 hue_coord, hue_index = 180 + sd->hue; + u8 cmatrix[21]; +- memset(cmatrix, 0, 21); + ++ memset(cmatrix, 0, sizeof cmatrix); + cmatrix[2] = (sd->contrast * 0x25 / 0x100) + 0x26; + cmatrix[0] = 0x13 + (cmatrix[2] - 0x26) * 0x13 / 0x25; + cmatrix[4] = 0x07 + (cmatrix[2] - 0x26) * 0x07 / 0x25; + cmatrix[18] = sd->brightness - 0x80; + + hue_coord = (hsv_red_x[hue_index] * sd->saturation) >> 8; +- cmatrix[6] = (unsigned char)(hue_coord & 0xff); +- cmatrix[7] = (unsigned char)((hue_coord >> 8) & 0x0f); ++ cmatrix[6] = hue_coord; ++ cmatrix[7] = (hue_coord >> 8) & 0x0f; + + hue_coord = (hsv_red_y[hue_index] * sd->saturation) >> 8; +- cmatrix[8] = (unsigned char)(hue_coord & 0xff); +- cmatrix[9] = (unsigned char)((hue_coord >> 8) & 0x0f); ++ cmatrix[8] = hue_coord; ++ cmatrix[9] = (hue_coord >> 8) & 0x0f; + + hue_coord = (hsv_green_x[hue_index] * sd->saturation) >> 8; +- cmatrix[10] = (unsigned char)(hue_coord & 0xff); +- cmatrix[11] = (unsigned char)((hue_coord >> 8) & 0x0f); ++ cmatrix[10] = hue_coord; ++ cmatrix[11] = (hue_coord >> 8) & 0x0f; + + hue_coord = (hsv_green_y[hue_index] * sd->saturation) >> 8; +- cmatrix[12] = (unsigned char)(hue_coord & 0xff); +- cmatrix[13] = (unsigned char)((hue_coord >> 8) & 0x0f); ++ cmatrix[12] = hue_coord; ++ cmatrix[13] = (hue_coord >> 8) & 0x0f; + + hue_coord = (hsv_blue_x[hue_index] * sd->saturation) >> 8; +- cmatrix[14] = (unsigned char)(hue_coord & 0xff); +- cmatrix[15] = (unsigned char)((hue_coord >> 8) & 0x0f); ++ cmatrix[14] = hue_coord; ++ cmatrix[15] = (hue_coord >> 8) & 0x0f; + + hue_coord = (hsv_blue_y[hue_index] * sd->saturation) >> 8; +- cmatrix[16] = (unsigned char)(hue_coord & 0xff); +- cmatrix[17] = (unsigned char)((hue_coord >> 8) & 0x0f); ++ cmatrix[16] = hue_coord; ++ cmatrix[17] = (hue_coord >> 8) & 0x0f; + + return reg_w(gspca_dev, 0x10e1, cmatrix, 21); + } +@@ -2015,6 +2015,7 @@ static int sd_config(struct gspca_dev *gspca_dev, + default: + cam->cam_mode = vga_mode; + cam->nmodes = ARRAY_SIZE(vga_mode); ++ break; + } + + sd->old_step = 0; +@@ -2319,7 +2320,7 @@ static void do_autogain(struct gspca_dev *gspca_dev, u16 avg_lum) + } + } + if (avg_lum > MAX_AVG_LUM) { +- if (sd->gain >= 1) { ++ if (sd->gain > 0) { + sd->gain--; + set_gain(gspca_dev); + } +@@ -2347,7 +2348,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, + { + struct sd *sd = (struct sd *) gspca_dev; + int avg_lum; +- static unsigned char frame_header[] = ++ static u8 frame_header[] = + {0xff, 0xff, 0x00, 0xc4, 0xc4, 0x96}; + if (len == 64 && memcmp(data, frame_header, 6) == 0) { + avg_lum = ((data[35] >> 2) & 3) | +diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c +index ddff2b5..3830973 100644 +--- a/drivers/media/video/gspca/sonixb.c ++++ b/drivers/media/video/gspca/sonixb.c +@@ -42,6 +42,7 @@ Reg Use + + #define MODULE_NAME "sonixb" + ++#include + #include "gspca.h" + + MODULE_AUTHOR("Michel Xhaard "); +@@ -53,9 +54,11 @@ struct sd { + struct gspca_dev gspca_dev; /* !! must be the first item */ + atomic_t avg_lum; + int prev_avg_lum; ++ int exp_too_low_cnt; ++ int exp_too_high_cnt; + ++ unsigned short exposure; + unsigned char gain; +- unsigned char exposure; + unsigned char brightness; + unsigned char autogain; + unsigned char autogain_ignore_frames; +@@ -73,8 +76,9 @@ struct sd { + #define SENSOR_OV7630 2 + #define SENSOR_PAS106 3 + #define SENSOR_PAS202 4 +-#define SENSOR_TAS5110 5 +-#define SENSOR_TAS5130CXX 6 ++#define SENSOR_TAS5110C 5 ++#define SENSOR_TAS5110D 6 ++#define SENSOR_TAS5130CXX 7 + __u8 reg11; + }; + +@@ -95,13 +99,15 @@ struct sensor_data { + /* sensor_data flags */ + #define F_GAIN 0x01 /* has gain */ + #define F_SIF 0x02 /* sif or vga */ ++#define F_COARSE_EXPO 0x04 /* exposure control is coarse */ + + /* priv field of struct v4l2_pix_format flags (do not use low nibble!) */ + #define MODE_RAW 0x10 /* raw bayer mode */ + #define MODE_REDUCED_SIF 0x20 /* vga mode (320x240 / 160x120) on sif cam */ + + /* ctrl_dis helper macros */ +-#define NO_EXPO ((1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX)) ++#define NO_EXPO ((1 << EXPOSURE_IDX) | (1 << COARSE_EXPOSURE_IDX) | \ ++ (1 << AUTOGAIN_IDX)) + #define NO_FREQ (1 << FREQ_IDX) + #define NO_BRIGHTNESS (1 << BRIGHTNESS_IDX) + +@@ -127,11 +133,10 @@ struct sensor_data { + } + + /* We calculate the autogain at the end of the transfer of a frame, at this +- moment a frame with the old settings is being transmitted, and a frame is +- being captured with the old settings. So if we adjust the autogain we must +- ignore atleast the 2 next frames for the new settings to come into effect +- before doing any other adjustments */ +-#define AUTOGAIN_IGNORE_FRAMES 3 ++ moment a frame with the old settings is being captured and transmitted. So ++ if we adjust the gain or exposure we must ignore atleast the next frame for ++ the new settings to come into effect before doing any other adjustments. */ ++#define AUTOGAIN_IGNORE_FRAMES 1 + + /* V4L2 controls supported by the driver */ + static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); +@@ -145,7 +150,7 @@ static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + #define BRIGHTNESS_IDX 0 + { + { +@@ -171,7 +176,7 @@ static struct ctrl sd_ctrls[] = { + .maximum = 255, + .step = 1, + #define GAIN_DEF 127 +-#define GAIN_KNEE 200 ++#define GAIN_KNEE 230 + .default_value = GAIN_DEF, + }, + .set = sd_setgain, +@@ -183,10 +188,10 @@ static struct ctrl sd_ctrls[] = { + .id = V4L2_CID_EXPOSURE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Exposure", +-#define EXPOSURE_DEF 16 /* 32 ms / 30 fps */ +-#define EXPOSURE_KNEE 50 /* 100 ms / 10 fps */ ++#define EXPOSURE_DEF 66 /* 33 ms / 30 fps (except on PASXXX) */ ++#define EXPOSURE_KNEE 200 /* 100 ms / 10 fps (except on PASXXX) */ + .minimum = 0, +- .maximum = 255, ++ .maximum = 1023, + .step = 1, + .default_value = EXPOSURE_DEF, + .flags = 0, +@@ -194,7 +199,23 @@ static struct ctrl sd_ctrls[] = { + .set = sd_setexposure, + .get = sd_getexposure, + }, +-#define AUTOGAIN_IDX 3 ++#define COARSE_EXPOSURE_IDX 3 ++ { ++ { ++ .id = V4L2_CID_EXPOSURE, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Exposure", ++#define COARSE_EXPOSURE_DEF 2 /* 30 fps */ ++ .minimum = 2, ++ .maximum = 15, ++ .step = 1, ++ .default_value = COARSE_EXPOSURE_DEF, ++ .flags = 0, ++ }, ++ .set = sd_setexposure, ++ .get = sd_getexposure, ++ }, ++#define AUTOGAIN_IDX 4 + { + { + .id = V4L2_CID_AUTOGAIN, +@@ -210,7 +231,7 @@ static struct ctrl sd_ctrls[] = { + .set = sd_setautogain, + .get = sd_getautogain, + }, +-#define FREQ_IDX 4 ++#define FREQ_IDX 5 + { + { + .id = V4L2_CID_POWER_LINE_FREQUENCY, +@@ -219,7 +240,7 @@ static struct ctrl sd_ctrls[] = { + .minimum = 0, + .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */ + .step = 1, +-#define FREQ_DEF 1 ++#define FREQ_DEF 0 + .default_value = FREQ_DEF, + }, + .set = sd_setfreq, +@@ -297,10 +318,18 @@ static const __u8 hv7131_sensor_init[][8] = { + {0xa0, 0x11, 0x30, 0x10, 0x0e, 0x28, 0x00, 0x15}, + }; + static const __u8 initOv6650[] = { ++#if 1 + 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, + 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x01, 0x0a, 0x16, 0x12, 0x68, 0x8b, + 0x10, 0x1d, 0x10, 0x02, 0x02, 0x09, 0x07 ++#else ++/* old version? */ ++ 0x64, 0x44, 0x28, 0x00, 0x00, 0x00, 0x00, 0x10, ++ 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x01, 0x01, 0x0a, 0x14, 0x0f, 0x68, 0x8b, ++ 0x10, 0x1d, 0x10, 0x01, 0x01, 0x07, 0x06 ++#endif + }; + static const __u8 ov6650_sensor_init[][8] = + { +@@ -333,6 +362,24 @@ static const __u8 ov6650_sensor_init[][8] = + /* Some more unknown stuff */ + {0xa0, 0x60, 0x68, 0x04, 0x68, 0xd8, 0xa4, 0x10}, + {0xd0, 0x60, 0x17, 0x24, 0xd6, 0x04, 0x94, 0x10}, /* Clipreg */ ++#if 0 ++ /* HDG, don't change registers 0x2d, 0x32 & 0x33 from their reset ++ defaults, doing so mucks up the framerate, where as the defaults ++ seem to work good, the combinations below have been observed ++ under windows and are kept for future reference */ ++ {0xa0, 0x60, 0x2d, 0x0a, 0x99, 0x04, 0x94, 0x16}, ++ {0xa0, 0x60, 0x32, 0x00, 0x99, 0x04, 0x94, 0x16}, ++ {0xa0, 0x60, 0x33, 0x40, 0x99, 0x04, 0x94, 0x16}, ++ {0xa0, 0x60, 0x2d, 0x2a, 0x99, 0x04, 0x94, 0x15}, ++ {0xa0, 0x60, 0x2d, 0x2b, 0x99, 0x04, 0x94, 0x16}, ++ {0xa0, 0x60, 0x32, 0x00, 0x99, 0x04, 0x94, 0x16}, ++ {0xa0, 0x60, 0x33, 0x00, 0x99, 0x04, 0x94, 0x16}, ++ {0xa0, 0x60, 0x2d, 0x2b, 0x99, 0x04, 0x94, 0x16}, ++ {0xa0, 0x60, 0x32, 0x00, 0x99, 0x04, 0x94, 0x16}, ++ /* Low Light (Enabled: 0x32 0x1 | Disabled: 0x32 0x00) */ ++ {0xa0, 0x60, 0x33, 0x29, 0x99, 0x04, 0x94, 0x16}, ++ /* Low Ligth (Enabled: 0x33 0x13 | Disabled: 0x33 0x29) */ ++#endif + }; + + static const __u8 initOv7630[] = { +@@ -341,11 +388,15 @@ static const __u8 initOv7630[] = { + 0x00, 0x01, 0x01, 0x0a, /* r11 .. r14 */ + 0x28, 0x1e, /* H & V sizes r15 .. r16 */ + 0x68, COMP2, MCK_INIT1, /* r17 .. r19 */ ++#if 1 + 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c /* r1a .. r1f */ ++#else /* jfm from win */ ++ 0x1d, 0x10, 0x06, 0x01, 0x00, 0x03 /* r1a .. r1f */ ++#endif + }; + static const __u8 initOv7630_3[] = { + 0x44, 0x44, 0x00, 0x1a, 0x20, 0x20, 0x20, 0x80, /* r01 .. r08 */ +- 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, /* r09 .. r10 */ ++ 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* r09 .. r10 */ + 0x00, 0x02, 0x01, 0x0a, /* r11 .. r14 */ + 0x28, 0x1e, /* H & V sizes r15 .. r16 */ + 0x68, 0x8f, MCK_INIT1, /* r17 .. r19 */ +@@ -387,6 +438,30 @@ static const __u8 initPas106[] = { + 0x18, 0x10, 0x02, 0x02, 0x09, 0x07 + }; + /* compression 0x86 mckinit1 0x2b */ ++ ++/* "Known" PAS106B registers: ++ 0x02 clock divider ++ 0x03 Variable framerate bits 4-11 ++ 0x04 Var framerate bits 0-3, one must leave the 4 msb's at 0 !! ++ The variable framerate control must never be set lower then 300, ++ which sets the framerate at 90 / reg02, otherwise vsync is lost. ++ 0x05 Shutter Time Line Offset, this can be used as an exposure control: ++ 0 = use full frame time, 255 = no exposure at all ++ Note this may never be larger then "var-framerate control" / 2 - 2. ++ When var-framerate control is < 514, no exposure is reached at the max ++ allowed value for the framerate control value, rather then at 255. ++ 0x06 Shutter Time Pixel Offset, like reg05 this influences exposure, but ++ only a very little bit, leave at 0xcd ++ 0x07 offset sign bit (bit0 1 > negative offset) ++ 0x08 offset ++ 0x09 Blue Gain ++ 0x0a Green1 Gain ++ 0x0b Green2 Gain ++ 0x0c Red Gain ++ 0x0e Global gain ++ 0x13 Write 1 to commit settings to sensor ++*/ ++ + static const __u8 pas106_sensor_init[][8] = { + /* Pixel Clock Divider 6 */ + { 0xa1, 0x40, 0x02, 0x04, 0x00, 0x00, 0x00, 0x14 }, +@@ -433,37 +508,55 @@ static const __u8 initPas202[] = { + 0x44, 0x44, 0x21, 0x30, 0x00, 0x00, 0x00, 0x80, 0x40, 0x00, 0x00, 0x00, + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x06, 0x03, 0x0a, +- 0x28, 0x1e, 0x28, 0x89, 0x20, ++ 0x28, 0x1e, 0x20, 0x89, 0x20, + 0x00, 0x00, 0x02, 0x03, 0x0f, 0x0c + }; ++ ++/* "Known" PAS202BCB registers: ++ 0x02 clock divider ++ 0x04 Variable framerate bits 6-11 (*) ++ 0x05 Var framerate bits 0-5, one must leave the 2 msb's at 0 !! ++ 0x07 Blue Gain ++ 0x08 Green Gain ++ 0x09 Red Gain ++ 0x0b offset sign bit (bit0 1 > negative offset) ++ 0x0c offset ++ 0x0e Unknown image is slightly brighter when bit 0 is 0, if reg0f is 0 too, ++ leave at 1 otherwise we get a jump in our exposure control ++ 0x0f Exposure 0-255, 0 = use full frame time, 255 = no exposure at all ++ 0x10 Master gain 0 - 31 ++ 0x11 write 1 to apply changes ++ (*) The variable framerate control must never be set lower then 500 ++ which sets the framerate at 30 / reg02, otherwise vsync is lost. ++*/ + static const __u8 pas202_sensor_init[][8] = { +- {0xa0, 0x40, 0x02, 0x03, 0x00, 0x00, 0x00, 0x10}, ++ /* Set the clock divider to 4 -> 30 / 4 = 7.5 fps, we would like ++ to set it lower, but for some reason the bridge starts missing ++ vsync's then */ ++ {0xa0, 0x40, 0x02, 0x04, 0x00, 0x00, 0x00, 0x10}, + {0xd0, 0x40, 0x04, 0x07, 0x34, 0x00, 0x09, 0x10}, + {0xd0, 0x40, 0x08, 0x01, 0x00, 0x00, 0x01, 0x10}, +- {0xd0, 0x40, 0x0C, 0x00, 0x0C, 0x00, 0x32, 0x10}, ++ {0xd0, 0x40, 0x0C, 0x00, 0x0C, 0x01, 0x32, 0x10}, + {0xd0, 0x40, 0x10, 0x00, 0x01, 0x00, 0x63, 0x10}, + {0xa0, 0x40, 0x15, 0x70, 0x01, 0x00, 0x63, 0x10}, + {0xa0, 0x40, 0x18, 0x00, 0x01, 0x00, 0x63, 0x10}, + {0xa0, 0x40, 0x11, 0x01, 0x01, 0x00, 0x63, 0x10}, + {0xa0, 0x40, 0x03, 0x56, 0x01, 0x00, 0x63, 0x10}, + {0xa0, 0x40, 0x11, 0x01, 0x01, 0x00, 0x63, 0x10}, +- {0xb0, 0x40, 0x04, 0x07, 0x2a, 0x00, 0x63, 0x10}, +- {0xb0, 0x40, 0x0e, 0x00, 0x3d, 0x00, 0x63, 0x10}, +- +- {0xa0, 0x40, 0x11, 0x01, 0x3d, 0x00, 0x63, 0x16}, +- {0xa0, 0x40, 0x10, 0x08, 0x3d, 0x00, 0x63, 0x15}, +- {0xa0, 0x40, 0x02, 0x04, 0x3d, 0x00, 0x63, 0x16}, +- {0xa0, 0x40, 0x11, 0x01, 0x3d, 0x00, 0x63, 0x16}, +- {0xb0, 0x40, 0x0e, 0x00, 0x31, 0x00, 0x63, 0x16}, +- {0xa0, 0x40, 0x11, 0x01, 0x31, 0x00, 0x63, 0x16}, +- {0xa0, 0x40, 0x10, 0x0e, 0x31, 0x00, 0x63, 0x15}, +- {0xa0, 0x40, 0x11, 0x01, 0x31, 0x00, 0x63, 0x16}, + }; + +-static const __u8 initTas5110[] = { ++static const __u8 initTas5110c[] = { + 0x44, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x11, 0x00, 0x00, 0x00, + 0x00, 0x00, +- 0x00, 0x01, 0x00, 0x45, 0x09, 0x0a, ++ 0x00, 0x00, 0x00, 0x45, 0x09, 0x0a, ++ 0x16, 0x12, 0x60, 0x86, 0x2b, ++ 0x14, 0x0a, 0x02, 0x02, 0x09, 0x07 ++}; ++/* Same as above, except a different hstart */ ++static const __u8 initTas5110d[] = { ++ 0x44, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x11, 0x00, 0x00, 0x00, ++ 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x41, 0x09, 0x0a, + 0x16, 0x12, 0x60, 0x86, 0x2b, + 0x14, 0x0a, 0x02, 0x02, 0x09, 0x07 + }; +@@ -476,7 +569,7 @@ static const __u8 tas5110_sensor_init[][8] = { + static const __u8 initTas5130[] = { + 0x04, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x11, 0x00, 0x00, 0x00, + 0x00, 0x00, +- 0x00, 0x01, 0x00, 0x68, 0x0c, 0x0a, ++ 0x00, 0x00, 0x00, 0x68, 0x0c, 0x0a, + 0x28, 0x1e, 0x60, COMP, MCK_INIT, + 0x18, 0x10, 0x04, 0x03, 0x11, 0x0c + }; +@@ -493,12 +586,14 @@ SENS(initHv7131, NULL, hv7131_sensor_init, NULL, NULL, 0, NO_EXPO|NO_FREQ, 0), + SENS(initOv6650, NULL, ov6650_sensor_init, NULL, NULL, F_GAIN|F_SIF, 0, 0x60), + SENS(initOv7630, initOv7630_3, ov7630_sensor_init, NULL, ov7630_sensor_init_3, + F_GAIN, 0, 0x21), +-SENS(initPas106, NULL, pas106_sensor_init, NULL, NULL, F_SIF, NO_EXPO|NO_FREQ, ++SENS(initPas106, NULL, pas106_sensor_init, NULL, NULL, F_GAIN|F_SIF, NO_FREQ, + 0), +-SENS(initPas202, initPas202, pas202_sensor_init, NULL, NULL, 0, +- NO_EXPO|NO_FREQ, 0), +-SENS(initTas5110, NULL, tas5110_sensor_init, NULL, NULL, F_GAIN|F_SIF, +- NO_BRIGHTNESS|NO_FREQ, 0), ++SENS(initPas202, initPas202, pas202_sensor_init, NULL, NULL, F_GAIN, ++ NO_FREQ, 0), ++SENS(initTas5110c, NULL, tas5110_sensor_init, NULL, NULL, ++ F_GAIN|F_SIF|F_COARSE_EXPO, NO_BRIGHTNESS|NO_FREQ, 0), ++SENS(initTas5110d, NULL, tas5110_sensor_init, NULL, NULL, ++ F_GAIN|F_SIF|F_COARSE_EXPO, NO_BRIGHTNESS|NO_FREQ, 0), + SENS(initTas5130, NULL, tas5130_sensor_init, NULL, NULL, 0, NO_EXPO|NO_FREQ, + 0), + }; +@@ -587,42 +682,28 @@ static void setbrightness(struct gspca_dev *gspca_dev) + goto err; + break; + } +- case SENSOR_PAS106: { +- __u8 i2c1[] = +- {0xa1, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14}; +- +- i2c1[3] = sd->brightness >> 3; +- i2c1[2] = 0x0e; +- if (i2c_w(gspca_dev, i2c1) < 0) +- goto err; +- i2c1[3] = 0x01; +- i2c1[2] = 0x13; +- if (i2c_w(gspca_dev, i2c1) < 0) +- goto err; +- break; +- } ++ case SENSOR_PAS106: + case SENSOR_PAS202: { +- /* __u8 i2cpexpo1[] = +- {0xb0, 0x40, 0x04, 0x07, 0x2a, 0x00, 0x63, 0x16}; */ +- __u8 i2cpexpo[] = +- {0xb0, 0x40, 0x0e, 0x01, 0xab, 0x00, 0x63, 0x16}; +- __u8 i2cp202[] = +- {0xa0, 0x40, 0x10, 0x0e, 0x31, 0x00, 0x63, 0x15}; +- static __u8 i2cpdoit[] = +- {0xa0, 0x40, 0x11, 0x01, 0x31, 0x00, 0x63, 0x16}; +- +- /* change reg 0x10 */ +- i2cpexpo[4] = 0xff - sd->brightness; +-/* if(i2c_w(gspca_dev,i2cpexpo1) < 0) +- goto err; */ +-/* if(i2c_w(gspca_dev,i2cpdoit) < 0) +- goto err; */ +- if (i2c_w(gspca_dev, i2cpexpo) < 0) +- goto err; +- if (i2c_w(gspca_dev, i2cpdoit) < 0) +- goto err; +- i2cp202[3] = sd->brightness >> 3; +- if (i2c_w(gspca_dev, i2cp202) < 0) ++ __u8 i2cpbright[] = ++ {0xb0, 0x40, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x16}; ++ __u8 i2cpdoit[] = ++ {0xa0, 0x40, 0x11, 0x01, 0x00, 0x00, 0x00, 0x16}; ++ ++ /* PAS106 uses reg 7 and 8 instead of b and c */ ++ if (sd->sensor == SENSOR_PAS106) { ++ i2cpbright[2] = 7; ++ i2cpdoit[2] = 0x13; ++ } ++ ++ if (sd->brightness < 127) { ++ /* change reg 0x0b, signreg */ ++ i2cpbright[3] = 0x01; ++ /* set reg 0x0c, offset */ ++ i2cpbright[4] = 127 - sd->brightness; ++ } else ++ i2cpbright[4] = sd->brightness - 127; ++ ++ if (i2c_w(gspca_dev, i2cpbright) < 0) + goto err; + if (i2c_w(gspca_dev, i2cpdoit) < 0) + goto err; +@@ -652,7 +733,8 @@ static void setsensorgain(struct gspca_dev *gspca_dev) + + switch (sd->sensor) { + +- case SENSOR_TAS5110: { ++ case SENSOR_TAS5110C: ++ case SENSOR_TAS5110D: { + __u8 i2c[] = + {0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10}; + +@@ -674,6 +756,37 @@ static void setsensorgain(struct gspca_dev *gspca_dev) + goto err; + break; + } ++ case SENSOR_PAS106: ++ case SENSOR_PAS202: { ++ __u8 i2cpgain[] = ++ {0xa0, 0x40, 0x10, 0x00, 0x00, 0x00, 0x00, 0x15}; ++ __u8 i2cpcolorgain[] = ++ {0xc0, 0x40, 0x07, 0x00, 0x00, 0x00, 0x00, 0x15}; ++ __u8 i2cpdoit[] = ++ {0xa0, 0x40, 0x11, 0x01, 0x00, 0x00, 0x00, 0x16}; ++ ++ /* PAS106 uses different regs (and has split green gains) */ ++ if (sd->sensor == SENSOR_PAS106) { ++ i2cpgain[2] = 0x0e; ++ i2cpcolorgain[0] = 0xd0; ++ i2cpcolorgain[2] = 0x09; ++ i2cpdoit[2] = 0x13; ++ } ++ ++ i2cpgain[3] = sd->gain >> 3; ++ i2cpcolorgain[3] = sd->gain >> 4; ++ i2cpcolorgain[4] = sd->gain >> 4; ++ i2cpcolorgain[5] = sd->gain >> 4; ++ i2cpcolorgain[6] = sd->gain >> 4; ++ ++ if (i2c_w(gspca_dev, i2cpgain) < 0) ++ goto err; ++ if (i2c_w(gspca_dev, i2cpcolorgain) < 0) ++ goto err; ++ if (i2c_w(gspca_dev, i2cpdoit) < 0) ++ goto err; ++ break; ++ } + } + return; + err: +@@ -684,19 +797,21 @@ static void setgain(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + __u8 gain; +- __u8 rgb_value; ++ __u8 buf[2] = { 0, 0 }; ++ ++ if (sensor_data[sd->sensor].flags & F_GAIN) { ++ /* Use the sensor gain to do the actual gain */ ++ setsensorgain(gspca_dev); ++ return; ++ } + + gain = sd->gain >> 4; + + /* red and blue gain */ +- rgb_value = gain << 4 | gain; +- reg_w(gspca_dev, 0x10, &rgb_value, 1); ++ buf[0] = gain << 4 | gain; + /* green gain */ +- rgb_value = gain; +- reg_w(gspca_dev, 0x11, &rgb_value, 1); +- +- if (sensor_data[sd->sensor].flags & F_GAIN) +- setsensorgain(gspca_dev); ++ buf[1] = gain; ++ reg_w(gspca_dev, 0x10, buf, 2); + } + + static void setexposure(struct gspca_dev *gspca_dev) +@@ -704,17 +819,12 @@ static void setexposure(struct gspca_dev *gspca_dev) + struct sd *sd = (struct sd *) gspca_dev; + + switch (sd->sensor) { +- case SENSOR_TAS5110: { +- __u8 reg; +- ++ case SENSOR_TAS5110C: ++ case SENSOR_TAS5110D: { + /* register 19's high nibble contains the sn9c10x clock divider + The high nibble configures the no fps according to the + formula: 60 / high_nibble. With a maximum of 30 fps */ +- reg = 120 * sd->exposure / 1000; +- if (reg < 2) +- reg = 2; +- else if (reg > 15) +- reg = 15; ++ __u8 reg = sd->exposure; + reg = (reg << 4) | 0x0b; + reg_w(gspca_dev, 0x19, ®, 1); + break; +@@ -750,20 +860,21 @@ static void setexposure(struct gspca_dev *gspca_dev) + } else + reg10_max = 0x41; + +- reg11 = (60 * sd->exposure + 999) / 1000; ++ reg11 = (15 * sd->exposure + 999) / 1000; + if (reg11 < 1) + reg11 = 1; + else if (reg11 > 16) + reg11 = 16; + +- /* In 640x480, if the reg11 has less than 3, the image is +- unstable (not enough bandwidth). */ +- if (gspca_dev->width == 640 && reg11 < 3) +- reg11 = 3; ++ /* In 640x480, if the reg11 has less than 4, the image is ++ unstable (the bridge goes into a higher compression mode ++ which we have not reverse engineered yet). */ ++ if (gspca_dev->width == 640 && reg11 < 4) ++ reg11 = 4; + + /* frame exposure time in ms = 1000 * reg11 / 30 -> +- reg10 = sd->exposure * 2 * reg10_max / (1000 * reg11 / 30) */ +- reg10 = (sd->exposure * 60 * reg10_max) / (1000 * reg11); ++ reg10 = (sd->exposure / 2) * reg10_max / (1000 * reg11 / 30) */ ++ reg10 = (sd->exposure * 15 * reg10_max) / (1000 * reg11); + + /* Don't allow this to get below 10 when using autogain, the + steps become very large (relatively) when below 10 causing +@@ -786,10 +897,85 @@ static void setexposure(struct gspca_dev *gspca_dev) + if (i2c_w(gspca_dev, i2c) == 0) + sd->reg11 = reg11; + else +- PDEBUG(D_ERR, "i2c error exposure"); ++ goto err; ++ break; ++ } ++ case SENSOR_PAS202: { ++ __u8 i2cpframerate[] = ++ {0xb0, 0x40, 0x04, 0x00, 0x00, 0x00, 0x00, 0x16}; ++ __u8 i2cpexpo[] = ++ {0xa0, 0x40, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x16}; ++ const __u8 i2cpdoit[] = ++ {0xa0, 0x40, 0x11, 0x01, 0x00, 0x00, 0x00, 0x16}; ++ int framerate_ctrl; ++ ++ /* The exposure knee for the autogain algorithm is 200 ++ (100 ms / 10 fps on other sensors), for values below this ++ use the control for setting the partial frame expose time, ++ above that use variable framerate. This way we run at max ++ framerate (640x480@7.5 fps, 320x240@10fps) until the knee ++ is reached. Using the variable framerate control above 200 ++ is better then playing around with both clockdiv + partial ++ frame exposure times (like we are doing with the ov chips), ++ as that sometimes leads to jumps in the exposure control, ++ which are bad for auto exposure. */ ++ if (sd->exposure < 200) { ++ i2cpexpo[3] = 255 - (sd->exposure * 255) / 200; ++ framerate_ctrl = 500; ++ } else { ++ /* The PAS202's exposure control goes from 0 - 4095, ++ but anything below 500 causes vsync issues, so scale ++ our 200-1023 to 500-4095 */ ++ framerate_ctrl = (sd->exposure - 200) * 1000 / 229 + ++ 500; ++ } ++ ++ i2cpframerate[3] = framerate_ctrl >> 6; ++ i2cpframerate[4] = framerate_ctrl & 0x3f; ++ if (i2c_w(gspca_dev, i2cpframerate) < 0) ++ goto err; ++ if (i2c_w(gspca_dev, i2cpexpo) < 0) ++ goto err; ++ if (i2c_w(gspca_dev, i2cpdoit) < 0) ++ goto err; ++ break; ++ } ++ case SENSOR_PAS106: { ++ __u8 i2cpframerate[] = ++ {0xb1, 0x40, 0x03, 0x00, 0x00, 0x00, 0x00, 0x14}; ++ __u8 i2cpexpo[] = ++ {0xa1, 0x40, 0x05, 0x00, 0x00, 0x00, 0x00, 0x14}; ++ const __u8 i2cpdoit[] = ++ {0xa1, 0x40, 0x13, 0x01, 0x00, 0x00, 0x00, 0x14}; ++ int framerate_ctrl; ++ ++ /* For values below 150 use partial frame exposure, above ++ that use framerate ctrl */ ++ if (sd->exposure < 150) { ++ i2cpexpo[3] = 150 - sd->exposure; ++ framerate_ctrl = 300; ++ } else { ++ /* The PAS106's exposure control goes from 0 - 4095, ++ but anything below 300 causes vsync issues, so scale ++ our 150-1023 to 300-4095 */ ++ framerate_ctrl = (sd->exposure - 150) * 1000 / 230 + ++ 300; ++ } ++ ++ i2cpframerate[3] = framerate_ctrl >> 4; ++ i2cpframerate[4] = framerate_ctrl & 0x0f; ++ if (i2c_w(gspca_dev, i2cpframerate) < 0) ++ goto err; ++ if (i2c_w(gspca_dev, i2cpexpo) < 0) ++ goto err; ++ if (i2c_w(gspca_dev, i2cpdoit) < 0) ++ goto err; + break; + } + } ++ return; ++err: ++ PDEBUG(D_ERR, "i2c error exposure"); + } + + static void setfreq(struct gspca_dev *gspca_dev) +@@ -823,30 +1009,43 @@ static void setfreq(struct gspca_dev *gspca_dev) + } + } + ++#include "coarse_expo_autogain.h" ++ + static void do_autogain(struct gspca_dev *gspca_dev) + { +- int deadzone, desired_avg_lum; ++ int deadzone, desired_avg_lum, result; + struct sd *sd = (struct sd *) gspca_dev; + int avg_lum = atomic_read(&sd->avg_lum); + +- if (avg_lum == -1) ++ if (avg_lum == -1 || !sd->autogain) + return; + ++ if (sd->autogain_ignore_frames > 0) { ++ sd->autogain_ignore_frames--; ++ return; ++ } ++ + /* SIF / VGA sensors have a different autoexposure area and thus + different avg_lum values for the same picture brightness */ + if (sensor_data[sd->sensor].flags & F_SIF) { +- deadzone = 1000; +- desired_avg_lum = 7000; ++ deadzone = 500; ++ /* SIF sensors tend to overexpose, so keep this small */ ++ desired_avg_lum = 5000; + } else { +- deadzone = 3000; +- desired_avg_lum = 23000; ++ deadzone = 1500; ++ desired_avg_lum = 18000; + } + +- if (sd->autogain_ignore_frames > 0) +- sd->autogain_ignore_frames--; +- else if (gspca_auto_gain_n_exposure(gspca_dev, avg_lum, +- sd->brightness * desired_avg_lum / 127, +- deadzone, GAIN_KNEE, EXPOSURE_KNEE)) { ++ if (sensor_data[sd->sensor].flags & F_COARSE_EXPO) ++ result = gspca_coarse_grained_expo_autogain(gspca_dev, avg_lum, ++ sd->brightness * desired_avg_lum / 127, ++ deadzone); ++ else ++ result = gspca_auto_gain_n_exposure(gspca_dev, avg_lum, ++ sd->brightness * desired_avg_lum / 127, ++ deadzone, GAIN_KNEE, EXPOSURE_KNEE); ++ ++ if (result) { + PDEBUG(D_FRAM, "autogain: gain changed: gain: %d expo: %d", + (int)sd->gain, (int)sd->exposure); + sd->autogain_ignore_frames = AUTOGAIN_IGNORE_FRAMES; +@@ -881,7 +1080,13 @@ static int sd_config(struct gspca_dev *gspca_dev, + + sd->brightness = BRIGHTNESS_DEF; + sd->gain = GAIN_DEF; +- sd->exposure = EXPOSURE_DEF; ++ if (sensor_data[sd->sensor].flags & F_COARSE_EXPO) { ++ sd->exposure = COARSE_EXPOSURE_DEF; ++ gspca_dev->ctrl_dis |= (1 << EXPOSURE_IDX); ++ } else { ++ sd->exposure = EXPOSURE_DEF; ++ gspca_dev->ctrl_dis |= (1 << COARSE_EXPOSURE_IDX); ++ } + if (gspca_dev->ctrl_dis & (1 << AUTOGAIN_IDX)) + sd->autogain = 0; /* Disable do_autogain callback */ + else +@@ -917,9 +1122,6 @@ static int sd_start(struct gspca_dev *gspca_dev) + reg12_19[6] = sn9c10x[0x18 - 1] | (mode << 4); + /* Special cases where reg 17 and or 19 value depends on mode */ + switch (sd->sensor) { +- case SENSOR_PAS202: +- reg12_19[5] = mode ? 0x24 : 0x20; +- break; + case SENSOR_TAS5130CXX: + /* probably not mode specific at all most likely the upper + nibble of 0x19 is exposure (clock divider) just as with +@@ -955,6 +1157,16 @@ static int sd_start(struct gspca_dev *gspca_dev) + sensor_data[sd->sensor].sensor_bridge_init_size[ + sd->bridge]); + ++ /* Mode specific sensor setup */ ++ switch (sd->sensor) { ++ case SENSOR_PAS202: { ++ const __u8 i2cpclockdiv[] = ++ {0xa0, 0x40, 0x02, 0x03, 0x00, 0x00, 0x00, 0x10}; ++ /* clockdiv from 4 to 3 (7.5 -> 10 fps) when in low res mode */ ++ if (mode) ++ i2c_w(gspca_dev, i2cpclockdiv); ++ } ++ } + /* H_size V_size 0x28, 0x1e -> 640x480. 0x16, 0x12 -> 352x288 */ + reg_w(gspca_dev, 0x15, ®12_19[3], 2); + /* compression register */ +@@ -985,6 +1197,8 @@ static int sd_start(struct gspca_dev *gspca_dev) + + sd->frames_to_drop = 0; + sd->autogain_ignore_frames = 0; ++ sd->exp_too_high_cnt = 0; ++ sd->exp_too_low_cnt = 0; + atomic_set(&sd->avg_lum, -1); + return 0; + } +@@ -1143,11 +1357,14 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val) + struct sd *sd = (struct sd *) gspca_dev; + + sd->autogain = val; ++ sd->exp_too_high_cnt = 0; ++ sd->exp_too_low_cnt = 0; ++ + /* when switching to autogain set defaults to make sure + we are on a valid point of the autogain gain / + exposure knee graph, and give this change time to + take effect before doing autogain. */ +- if (sd->autogain) { ++ if (sd->autogain && !(sensor_data[sd->sensor].flags & F_COARSE_EXPO)) { + sd->exposure = EXPOSURE_DEF; + sd->gain = GAIN_DEF; + if (gspca_dev->streaming) { +@@ -1207,6 +1424,25 @@ static int sd_querymenu(struct gspca_dev *gspca_dev, + return -EINVAL; + } + ++#ifdef CONFIG_INPUT ++static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, ++ u8 *data, /* interrupt packet data */ ++ int len) /* interrupt packet length */ ++{ ++ int ret = -EINVAL; ++ ++ if (len == 1 && data[0] == 1) { ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); ++ input_sync(gspca_dev->input_dev); ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); ++ input_sync(gspca_dev->input_dev); ++ ret = 0; ++ } ++ ++ return ret; ++} ++#endif ++ + /* sub-driver description */ + static const struct sd_desc sd_desc = { + .name = MODULE_NAME, +@@ -1219,6 +1455,9 @@ static const struct sd_desc sd_desc = { + .pkt_scan = sd_pkt_scan, + .querymenu = sd_querymenu, + .dq_callback = do_autogain, ++#ifdef CONFIG_INPUT ++ .int_pkt_scan = sd_int_pkt_scan, ++#endif + }; + + /* -- module initialisation -- */ +@@ -1227,21 +1466,21 @@ static const struct sd_desc sd_desc = { + + + static const struct usb_device_id device_table[] __devinitconst = { +- {USB_DEVICE(0x0c45, 0x6001), SB(TAS5110, 102)}, /* TAS5110C1B */ +- {USB_DEVICE(0x0c45, 0x6005), SB(TAS5110, 101)}, /* TAS5110C1B */ ++ {USB_DEVICE(0x0c45, 0x6001), SB(TAS5110C, 102)}, /* TAS5110C1B */ ++ {USB_DEVICE(0x0c45, 0x6005), SB(TAS5110C, 101)}, /* TAS5110C1B */ + #if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE +- {USB_DEVICE(0x0c45, 0x6007), SB(TAS5110, 101)}, /* TAS5110D */ ++ {USB_DEVICE(0x0c45, 0x6007), SB(TAS5110D, 101)}, /* TAS5110D */ ++#endif + {USB_DEVICE(0x0c45, 0x6009), SB(PAS106, 101)}, + {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)}, +-#endif + {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)}, + #if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE + {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)}, + {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)}, + {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)}, ++#endif + {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)}, + {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)}, +-#endif + {USB_DEVICE(0x0c45, 0x602c), SB(OV7630, 102)}, + {USB_DEVICE(0x0c45, 0x602d), SB(HV7131R, 102)}, + #if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE +diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c +index 0bd36a0..c5a2423 100644 +--- a/drivers/media/video/gspca/sonixj.c ++++ b/drivers/media/video/gspca/sonixj.c +@@ -21,6 +21,7 @@ + + #define MODULE_NAME "sonixj" + ++#include + #include "gspca.h" + #include "jpeg.h" + +@@ -45,6 +46,7 @@ struct sd { + u8 red; + u8 gamma; + u8 vflip; /* ov7630/ov7648 only */ ++ u8 sharpness; + u8 infrared; /* mt9v111 only */ + u8 freq; /* ov76xx only */ + u8 quality; /* image quality */ +@@ -64,16 +66,17 @@ struct sd { + #define BRIDGE_SN9C110 2 + #define BRIDGE_SN9C120 3 + u8 sensor; /* Type of image sensor chip */ +-#define SENSOR_HV7131R 0 +-#define SENSOR_MI0360 1 +-#define SENSOR_MO4000 2 +-#define SENSOR_MT9V111 3 +-#define SENSOR_OM6802 4 +-#define SENSOR_OV7630 5 +-#define SENSOR_OV7648 6 +-#define SENSOR_OV7660 7 +-#define SENSOR_PO1030 8 +-#define SENSOR_SP80708 9 ++#define SENSOR_ADCM1700 0 ++#define SENSOR_HV7131R 1 ++#define SENSOR_MI0360 2 ++#define SENSOR_MO4000 3 ++#define SENSOR_MT9V111 4 ++#define SENSOR_OM6802 5 ++#define SENSOR_OV7630 6 ++#define SENSOR_OV7648 7 ++#define SENSOR_OV7660 8 ++#define SENSOR_PO1030 9 ++#define SENSOR_SP80708 10 + u8 i2c_addr; + + u8 *jpeg_hdr; +@@ -96,12 +99,14 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setinfrared(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + #define BRIGHTNESS_IDX 0 + { + { +@@ -225,8 +230,23 @@ static struct ctrl sd_ctrls[] = { + .set = sd_setvflip, + .get = sd_getvflip, + }, ++#define SHARPNESS_IDX 8 ++ { ++ { ++ .id = V4L2_CID_SHARPNESS, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Sharpness", ++ .minimum = 0, ++ .maximum = 255, ++ .step = 1, ++#define SHARPNESS_DEF 90 ++ .default_value = SHARPNESS_DEF, ++ }, ++ .set = sd_setsharpness, ++ .get = sd_getsharpness, ++ }, + /* mt9v111 only */ +-#define INFRARED_IDX 8 ++#define INFRARED_IDX 9 + { + { + .id = V4L2_CID_INFRARED, +@@ -242,7 +262,7 @@ static struct ctrl sd_ctrls[] = { + .get = sd_getinfrared, + }, + /* ov7630/ov7648/ov7660 only */ +-#define FREQ_IDX 9 ++#define FREQ_IDX 10 + { + { + .id = V4L2_CID_POWER_LINE_FREQUENCY, +@@ -261,28 +281,37 @@ static struct ctrl sd_ctrls[] = { + + /* table of the disabled controls */ + static __u32 ctrl_dis[] = { ++ (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX) | ++ (1 << AUTOGAIN_IDX), /* SENSOR_ADCM1700 0 */ + (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX), +- /* SENSOR_HV7131R 0 */ ++ /* SENSOR_HV7131R 1 */ + (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX), +- /* SENSOR_MI0360 1 */ ++ /* SENSOR_MI0360 2 */ + (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX), +- /* SENSOR_MO4000 2 */ ++ /* SENSOR_MO4000 3 */ + (1 << VFLIP_IDX) | (1 << FREQ_IDX), +- /* SENSOR_MT9V111 3 */ ++ /* SENSOR_MT9V111 4 */ + (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX), +- /* SENSOR_OM6802 4 */ ++ /* SENSOR_OM6802 5 */ + (1 << INFRARED_IDX), +- /* SENSOR_OV7630 5 */ ++ /* SENSOR_OV7630 6 */ + (1 << INFRARED_IDX), +- /* SENSOR_OV7648 6 */ ++ /* SENSOR_OV7648 7 */ + (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX), +- /* SENSOR_OV7660 7 */ ++ /* SENSOR_OV7660 8 */ + (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | +- (1 << FREQ_IDX), /* SENSOR_PO1030 8 */ ++ (1 << FREQ_IDX), /* SENSOR_PO1030 9 */ + (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | +- (1 << FREQ_IDX), /* SENSOR_SP80708 9 */ ++ (1 << FREQ_IDX), /* SENSOR_SP80708 10 */ + }; + ++static const struct v4l2_pix_format cif_mode[] = { ++ {352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, ++ .bytesperline = 352, ++ .sizeimage = 352 * 288 * 4 / 8 + 590, ++ .colorspace = V4L2_COLORSPACE_JPEG, ++ .priv = 0}, ++}; + static const struct v4l2_pix_format vga_mode[] = { + {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, + .bytesperline = 160, +@@ -302,6 +335,17 @@ static const struct v4l2_pix_format vga_mode[] = { + .priv = 0}, + }; + ++static const u8 sn_adcm1700[0x1c] = { ++/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ ++ 0x00, 0x43, 0x60, 0x00, 0x1a, 0x00, 0x00, 0x00, ++/* reg8 reg9 rega regb regc regd rege regf */ ++ 0x80, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ ++ 0x03, 0x00, 0x05, 0x01, 0x05, 0x16, 0x12, 0x42, ++/* reg18 reg19 reg1a reg1b */ ++ 0x06, 0x00, 0x00, 0x00 ++}; ++ + /*Data from sn9c102p+hv7131r */ + static const u8 sn_hv7131[0x1c] = { + /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ +@@ -415,6 +459,7 @@ static const u8 sn_sp80708[0x1c] = { + + /* sequence specific to the sensors - !! index = SENSOR_xxx */ + static const u8 *sn_tb[] = { ++ sn_adcm1700, + sn_hv7131, + sn_mi0360, + sn_mo4000, +@@ -432,6 +477,11 @@ static const u8 gamma_def[17] = { + 0x00, 0x2d, 0x46, 0x5a, 0x6c, 0x7c, 0x8b, 0x99, + 0xa6, 0xb2, 0xbf, 0xca, 0xd5, 0xe0, 0xeb, 0xf5, 0xff + }; ++/* gamma for sensor ADCM1700 */ ++static const u8 gamma_spec_0[17] = { ++ 0x0f, 0x39, 0x5a, 0x74, 0x86, 0x95, 0xa6, 0xb4, ++ 0xbd, 0xc4, 0xcc, 0xd4, 0xd5, 0xde, 0xe4, 0xed, 0xf5 ++}; + /* gamma for sensors HV7131R and MT9V111 */ + static const u8 gamma_spec_1[17] = { + 0x08, 0x3a, 0x52, 0x65, 0x75, 0x83, 0x91, 0x9d, +@@ -450,6 +500,42 @@ static const u8 reg84[] = { + 0x3e, 0x00, 0xcd, 0x0f, 0xf7, 0x0f, /* VR VG VB */ + 0x00, 0x00, 0x00 /* YUV offsets */ + }; ++static const u8 adcm1700_sensor_init[][8] = { ++ {0xa0, 0x51, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xb0, 0x51, 0x04, 0x08, 0x00, 0x00, 0x00, 0x10}, /* reset */ ++ {0xdd, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ++ {0xb0, 0x51, 0x04, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {0xdd, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ++ {0xb0, 0x51, 0x0c, 0xe0, 0x2e, 0x00, 0x00, 0x10}, ++ {0xb0, 0x51, 0x10, 0x02, 0x02, 0x00, 0x00, 0x10}, ++ {0xb0, 0x51, 0x14, 0x0e, 0x0e, 0x00, 0x00, 0x10}, ++ {0xb0, 0x51, 0x1c, 0x00, 0x80, 0x00, 0x00, 0x10}, ++ {0xb0, 0x51, 0x20, 0x01, 0x00, 0x00, 0x00, 0x10}, ++ {0xdd, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ++ {0xb0, 0x51, 0x04, 0x04, 0x00, 0x00, 0x00, 0x10}, ++ {0xdd, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ++ {0xb0, 0x51, 0x04, 0x01, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x51, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x10}, ++ {0xb0, 0x51, 0x14, 0x01, 0x00, 0x00, 0x00, 0x10}, ++ {0xb0, 0x51, 0x32, 0x00, 0x00, 0x00, 0x00, 0x10}, ++ {} ++}; ++static const u8 adcm1700_sensor_param1[][8] = { ++ {0xb0, 0x51, 0x26, 0xf9, 0x01, 0x00, 0x00, 0x10}, /* exposure? */ ++ {0xd0, 0x51, 0x1e, 0x8e, 0x8e, 0x8e, 0x8e, 0x10}, ++ ++ {0xa0, 0x51, 0xfe, 0x01, 0x00, 0x00, 0x00, 0x10}, ++ {0xb0, 0x51, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x51, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x10}, ++ {0xb0, 0x51, 0x32, 0x00, 0x72, 0x00, 0x00, 0x10}, ++ {0xd0, 0x51, 0x1e, 0xbe, 0xd7, 0xe8, 0xbe, 0x10}, /* exposure? */ ++ ++ {0xa0, 0x51, 0xfe, 0x01, 0x00, 0x00, 0x00, 0x10}, ++ {0xb0, 0x51, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10}, ++ {0xa0, 0x51, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x10}, ++ {0xb0, 0x51, 0x32, 0x00, 0xa2, 0x00, 0x00, 0x10}, ++ {} ++}; + static const u8 hv7131r_sensor_init[][8] = { + {0xc1, 0x11, 0x01, 0x08, 0x01, 0x00, 0x00, 0x10}, + {0xb1, 0x11, 0x34, 0x17, 0x7f, 0x00, 0x00, 0x10}, +@@ -986,17 +1116,18 @@ static const u8 sp80708_sensor_param1[][8] = { + {} + }; + +-static const u8 (*sensor_init[10])[8] = { +- hv7131r_sensor_init, /* HV7131R 0 */ +- mi0360_sensor_init, /* MI0360 1 */ +- mo4000_sensor_init, /* MO4000 2 */ +- mt9v111_sensor_init, /* MT9V111 3 */ +- om6802_sensor_init, /* OM6802 4 */ +- ov7630_sensor_init, /* OV7630 5 */ +- ov7648_sensor_init, /* OV7648 6 */ +- ov7660_sensor_init, /* OV7660 7 */ +- po1030_sensor_init, /* PO1030 8 */ +- sp80708_sensor_init, /* SP80708 9 */ ++static const u8 (*sensor_init[11])[8] = { ++ adcm1700_sensor_init, /* ADCM1700 0 */ ++ hv7131r_sensor_init, /* HV7131R 1 */ ++ mi0360_sensor_init, /* MI0360 2 */ ++ mo4000_sensor_init, /* MO4000 3 */ ++ mt9v111_sensor_init, /* MT9V111 4 */ ++ om6802_sensor_init, /* OM6802 5 */ ++ ov7630_sensor_init, /* OV7630 6 */ ++ ov7648_sensor_init, /* OV7648 7 */ ++ ov7660_sensor_init, /* OV7660 8 */ ++ po1030_sensor_init, /* PO1030 9 */ ++ sp80708_sensor_init, /* SP80708 10 */ + }; + + /* read bytes to gspca_dev->usb_buf */ +@@ -1064,6 +1195,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val) + + PDEBUG(D_USBO, "i2c_w2 [%02x] = %02x", reg, val); + switch (sd->sensor) { ++ case SENSOR_ADCM1700: + case SENSOR_OM6802: /* i2c command = a0 (100 kHz) */ + gspca_dev->usb_buf[0] = 0x80 | (2 << 4); + break; +@@ -1110,6 +1242,7 @@ static void i2c_r(struct gspca_dev *gspca_dev, u8 reg, int len) + u8 mode[8]; + + switch (sd->sensor) { ++ case SENSOR_ADCM1700: + case SENSOR_OM6802: /* i2c command = 90 (100 kHz) */ + mode[0] = 0x80 | 0x10; + break; +@@ -1255,13 +1388,22 @@ static void bridge_init(struct gspca_dev *gspca_dev, + struct sd *sd = (struct sd *) gspca_dev; + const u8 *reg9a; + static const u8 reg9a_def[] = ++#if 1 + {0x00, 0x40, 0x20, 0x00, 0x00, 0x00}; ++#else ++ {0x00, 0x40, 0x20, 0x10, 0x00, 0x04}; ++#endif + static const u8 reg9a_spec[] = + {0x00, 0x40, 0x38, 0x30, 0x00, 0x20}; + static const u8 regd4[] = {0x60, 0x00, 0x00}; + +- reg_w1(gspca_dev, 0xf1, 0x00); ++ /* sensor clock already enabled in sd_init */ ++ /* reg_w1(gspca_dev, 0xf1, 0x00); */ ++#if 1 + reg_w1(gspca_dev, 0x01, sn9c1xx[1]); ++#else ++ reg_w1(gspca_dev, 0x01, 0x00); /*jfm: in some win traces*/ ++#endif + + /* configure gpio */ + reg_w(gspca_dev, 0x01, &sn9c1xx[1], 2); +@@ -1284,6 +1426,12 @@ static void bridge_init(struct gspca_dev *gspca_dev, + reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f); + + switch (sd->sensor) { ++ case SENSOR_ADCM1700: ++ reg_w1(gspca_dev, 0x01, 0x43); ++ reg_w1(gspca_dev, 0x17, 0x62); ++ reg_w1(gspca_dev, 0x01, 0x42); ++ reg_w1(gspca_dev, 0x01, 0x42); ++ break; + case SENSOR_MT9V111: + reg_w1(gspca_dev, 0x01, 0x61); + reg_w1(gspca_dev, 0x17, 0x61); +@@ -1357,14 +1513,19 @@ static int sd_config(struct gspca_dev *gspca_dev, + struct sd *sd = (struct sd *) gspca_dev; + struct cam *cam; + +- cam = &gspca_dev->cam; +- cam->cam_mode = vga_mode; +- cam->nmodes = ARRAY_SIZE(vga_mode); +- cam->npkt = 24; /* 24 packets per ISOC message */ +- + sd->bridge = id->driver_info >> 16; + sd->sensor = id->driver_info; + ++ cam = &gspca_dev->cam; ++ if (sd->sensor == SENSOR_ADCM1700) { ++ cam->cam_mode = cif_mode; ++ cam->nmodes = ARRAY_SIZE(cif_mode); ++ } else { ++ cam->cam_mode = vga_mode; ++ cam->nmodes = ARRAY_SIZE(vga_mode); ++ } ++ cam->npkt = 24; /* 24 packets per ISOC message */ ++ + sd->brightness = BRIGHTNESS_DEF; + sd->contrast = CONTRAST_DEF; + sd->colors = COLOR_DEF; +@@ -1374,6 +1535,14 @@ static int sd_config(struct gspca_dev *gspca_dev, + sd->autogain = AUTOGAIN_DEF; + sd->ag_cnt = -1; + sd->vflip = VFLIP_DEF; ++ switch (sd->sensor) { ++ case SENSOR_OM6802: ++ sd->sharpness = 0x10; ++ break; ++ default: ++ sd->sharpness = SHARPNESS_DEF; ++ break; ++ } + sd->infrared = INFRARED_DEF; + sd->freq = FREQ_DEF; + sd->quality = QUALITY_DEF; +@@ -1433,7 +1602,9 @@ static int sd_init(struct gspca_dev *gspca_dev) + break; + } + +- reg_w1(gspca_dev, 0xf1, 0x01); ++ /* Note we do not disable the sensor clock here (power saving mode), ++ as that also disables the button on the cam. */ ++ reg_w1(gspca_dev, 0xf1, 0x00); + + /* set the i2c address */ + sn9c1xx = sn_tb[sd->sensor]; +@@ -1543,6 +1724,10 @@ static void setbrightness(struct gspca_dev *gspca_dev) + + k2 = ((int) sd->brightness - 0x8000) >> 10; + switch (sd->sensor) { ++ case SENSOR_ADCM1700: ++ if (k2 > 0x1f) ++ k2 = 0; /* only positive Y offset */ ++ break; + case SENSOR_HV7131R: + expo = sd->brightness << 4; + if (expo > 0x002dc6c0) +@@ -1625,6 +1810,9 @@ static void setgamma(struct gspca_dev *gspca_dev) + }; + + switch (sd->sensor) { ++ case SENSOR_ADCM1700: ++ gamma_base = gamma_spec_0; ++ break; + case SENSOR_HV7131R: + case SENSOR_MT9V111: + gamma_base = gamma_spec_1; +@@ -1689,14 +1877,25 @@ static void setvflip(struct sd *sd) + i2c_w1(&sd->gspca_dev, 0x75, comn); + } + ++static void setsharpness(struct sd *sd) ++{ ++ reg_w1(&sd->gspca_dev, 0x99, sd->sharpness); ++} ++ + static void setinfrared(struct sd *sd) + { + if (sd->gspca_dev.ctrl_dis & (1 << INFRARED_IDX)) + return; + /*fixme: different sequence for StarCam Clip and StarCam 370i */ ++#if 1 + /* Clip */ + i2c_w1(&sd->gspca_dev, 0x02, /* gpio */ + sd->infrared ? 0x66 : 0x64); ++#else ++/* 370i */ ++ i2c_w1(&sd->gspca_dev, 0x02, /* gpio */ ++ sd->infrared ? 0x55 : 0x54); ++#endif + } + + static void setfreq(struct gspca_dev *gspca_dev) +@@ -1804,6 +2007,8 @@ static int sd_start(struct gspca_dev *gspca_dev) + int mode; + static const u8 C0[] = { 0x2d, 0x2d, 0x3a, 0x05, 0x04, 0x3f }; + static const u8 CA[] = { 0x28, 0xd8, 0x14, 0xec }; ++ static const u8 CA_adcm1700[] = ++ { 0x14, 0xec, 0x0a, 0xf6 }; + static const u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */ + static const u8 CE_ov76xx[] = + { 0x32, 0xdd, 0x32, 0xdd }; +@@ -1824,6 +2029,9 @@ static int sd_start(struct gspca_dev *gspca_dev) + i2c_w_seq(gspca_dev, sensor_init[sd->sensor]); + + switch (sd->sensor) { ++ case SENSOR_ADCM1700: ++ reg2 = 0x60; ++ break; + case SENSOR_OM6802: + reg2 = 0x71; + break; +@@ -1842,17 +2050,28 @@ static int sd_start(struct gspca_dev *gspca_dev) + reg_w1(gspca_dev, 0x12, sn9c1xx[0x12]); + reg_w1(gspca_dev, 0x13, sn9c1xx[0x13]); + reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]); +- reg_w1(gspca_dev, 0xd2, 0x6a); /* DC29 */ +- reg_w1(gspca_dev, 0xd3, 0x50); ++ if (sd->sensor == SENSOR_ADCM1700) { ++ reg_w1(gspca_dev, 0xd2, 0x3a); /* AE_H_SIZE = 116 */ ++ reg_w1(gspca_dev, 0xd3, 0x30); /* AE_V_SIZE = 96 */ ++ } else { ++ reg_w1(gspca_dev, 0xd2, 0x6a); /* AE_H_SIZE = 212 */ ++ reg_w1(gspca_dev, 0xd3, 0x50); /* AE_V_SIZE = 160 */ ++ } + reg_w1(gspca_dev, 0xc6, 0x00); + reg_w1(gspca_dev, 0xc7, 0x00); +- reg_w1(gspca_dev, 0xc8, 0x50); +- reg_w1(gspca_dev, 0xc9, 0x3c); ++ if (sd->sensor == SENSOR_ADCM1700) { ++ reg_w1(gspca_dev, 0xc8, 0x2c); /* AW_H_STOP = 352 */ ++ reg_w1(gspca_dev, 0xc9, 0x24); /* AW_V_STOP = 288 */ ++ } else { ++ reg_w1(gspca_dev, 0xc8, 0x50); /* AW_H_STOP = 640 */ ++ reg_w1(gspca_dev, 0xc9, 0x3c); /* AW_V_STOP = 480 */ ++ } + reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]); + switch (sd->sensor) { + case SENSOR_MT9V111: + reg17 = 0xe0; + break; ++ case SENSOR_ADCM1700: + case SENSOR_OV7630: + reg17 = 0xe2; + break; +@@ -1863,44 +2082,39 @@ static int sd_start(struct gspca_dev *gspca_dev) + break; + } + reg_w1(gspca_dev, 0x17, reg17); +-/* set reg1 was here */ +- reg_w1(gspca_dev, 0x05, sn9c1xx[5]); /* red */ +- reg_w1(gspca_dev, 0x07, sn9c1xx[7]); /* green */ +- reg_w1(gspca_dev, 0x06, sn9c1xx[6]); /* blue */ ++ ++ reg_w1(gspca_dev, 0x05, 0x00); /* red */ ++ reg_w1(gspca_dev, 0x07, 0x00); /* green */ ++ reg_w1(gspca_dev, 0x06, 0x00); /* blue */ + reg_w1(gspca_dev, 0x14, sn9c1xx[0x14]); + + setgamma(gspca_dev); + ++/*fixme: 8 times with all zeroes and 1 or 2 times with normal values */ + for (i = 0; i < 8; i++) + reg_w(gspca_dev, 0x84, reg84, sizeof reg84); + switch (sd->sensor) { ++ case SENSOR_ADCM1700: ++ case SENSOR_OV7660: ++ case SENSOR_SP80708: ++ reg_w1(gspca_dev, 0x9a, 0x05); ++ break; + case SENSOR_MT9V111: + reg_w1(gspca_dev, 0x9a, 0x07); +- reg_w1(gspca_dev, 0x99, 0x59); +- break; +- case SENSOR_OM6802: +- reg_w1(gspca_dev, 0x9a, 0x08); +- reg_w1(gspca_dev, 0x99, 0x10); + break; + case SENSOR_OV7648: + reg_w1(gspca_dev, 0x9a, 0x0a); +- reg_w1(gspca_dev, 0x99, 0x60); +- break; +- case SENSOR_OV7660: +- case SENSOR_SP80708: +- reg_w1(gspca_dev, 0x9a, 0x05); +- reg_w1(gspca_dev, 0x99, 0x59); + break; + default: + reg_w1(gspca_dev, 0x9a, 0x08); +- reg_w1(gspca_dev, 0x99, 0x59); + break; + } ++ setsharpness(sd); + + reg_w(gspca_dev, 0x84, reg84, sizeof reg84); +- reg_w1(gspca_dev, 0x05, sn9c1xx[5]); /* red */ +- reg_w1(gspca_dev, 0x07, sn9c1xx[7]); /* green */ +- reg_w1(gspca_dev, 0x06, sn9c1xx[6]); /* blue */ ++ reg_w1(gspca_dev, 0x05, 0x20); /* red */ ++ reg_w1(gspca_dev, 0x07, 0x20); /* green */ ++ reg_w1(gspca_dev, 0x06, 0x20); /* blue */ + + init = NULL; + mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; +@@ -1917,6 +2135,11 @@ static int sd_start(struct gspca_dev *gspca_dev) + reg1 = 0x06; /* 640x480: clk 24Mhz, video trf enable */ + reg17 = 0x61; /* 0x:20: enable sensor clock */ + switch (sd->sensor) { ++ case SENSOR_ADCM1700: ++ init = adcm1700_sensor_param1; ++ reg1 = 0x46; ++ reg17 = 0xe2; ++ break; + case SENSOR_MO4000: + if (mode) { + /* reg1 = 0x46; * 320 clk 48Mhz 60fp/s */ +@@ -1986,8 +2209,12 @@ static int sd_start(struct gspca_dev *gspca_dev) + } + + reg_w(gspca_dev, 0xc0, C0, 6); +- reg_w(gspca_dev, 0xca, CA, 4); ++ if (sd->sensor == SENSOR_ADCM1700) ++ reg_w(gspca_dev, 0xca, CA_adcm1700, 4); ++ else ++ reg_w(gspca_dev, 0xca, CA, 4); + switch (sd->sensor) { ++ case SENSOR_ADCM1700: + case SENSOR_OV7630: + case SENSOR_OV7648: + case SENSOR_OV7660: +@@ -2056,7 +2300,8 @@ static void sd_stopN(struct gspca_dev *gspca_dev) + reg_w1(gspca_dev, 0x17, sn9c1xx[0x17]); + reg_w1(gspca_dev, 0x01, sn9c1xx[1]); + reg_w1(gspca_dev, 0x01, data); +- reg_w1(gspca_dev, 0xf1, 0x00); ++ /* Don't disable sensor clock as that disables the button on the cam */ ++ /* reg_w1(gspca_dev, 0xf1, 0x01); */ + } + + static void sd_stop0(struct gspca_dev *gspca_dev) +@@ -2288,6 +2533,24 @@ static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val) + return 0; + } + ++static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->sharpness = val; ++ if (gspca_dev->streaming) ++ setsharpness(sd); ++ return 0; ++} ++ ++static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->sharpness; ++ return 0; ++} ++ + static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val) + { + struct sd *sd = (struct sd *) gspca_dev; +@@ -2391,6 +2654,25 @@ static int sd_querymenu(struct gspca_dev *gspca_dev, + return -EINVAL; + } + ++#ifdef CONFIG_INPUT ++static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, ++ u8 *data, /* interrupt packet data */ ++ int len) /* interrupt packet length */ ++{ ++ int ret = -EINVAL; ++ ++ if (len == 1 && data[0] == 1) { ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); ++ input_sync(gspca_dev->input_dev); ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); ++ input_sync(gspca_dev->input_dev); ++ ret = 0; ++ } ++ ++ return ret; ++} ++#endif ++ + /* sub-driver description */ + static const struct sd_desc sd_desc = { + .name = MODULE_NAME, +@@ -2406,6 +2688,9 @@ static const struct sd_desc sd_desc = { + .get_jcomp = sd_get_jcomp, + .set_jcomp = sd_set_jcomp, + .querymenu = sd_querymenu, ++#ifdef CONFIG_INPUT ++ .int_pkt_scan = sd_int_pkt_scan, ++#endif + }; + + /* -- module initialisation -- */ +@@ -2472,6 +2762,7 @@ static const __devinitdata struct usb_device_id device_table[] = { + /* {USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, *sn9c120b*/ + {USB_DEVICE(0x0c45, 0x6143), BS(SN9C120, SP80708)}, /*sn9c120b*/ + {USB_DEVICE(0x0c45, 0x6148), BS(SN9C120, OM6802)}, /*sn9c120b*/ ++ {USB_DEVICE(0x0c45, 0x614a), BS(SN9C120, ADCM1700)}, /*sn9c120b*/ + {} + }; + MODULE_DEVICE_TABLE(usb, device_table); +diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c +index fe46868..29e1699 100644 +--- a/drivers/media/video/gspca/spca500.c ++++ b/drivers/media/video/gspca/spca500.c +@@ -68,7 +68,7 @@ static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + { + { + .id = V4L2_CID_BRIGHTNESS, +@@ -153,6 +153,23 @@ static const struct v4l2_pix_format sif_mode[] = { + #define SPCA500_OFFSET_AUGPIO 12 + #define SPCA500_OFFSET_DATA 16 + ++#if 0 ++static const __u16 spca500_read_stats[][3] = { ++ {0x0c, 0x0000, 0x0000}, ++ {0x30, 0x03fd, 0x0001}, ++ /* possible values for following call: 0x01b3, 0x01e6, 0x01f7, 0x0218 */ ++ {0x30, 0x01b3, 0x0002}, ++ /* possible values for following call: 0x0000, 0x0001, 0x0002 */ ++ {0x30, 0x0000, 0x0003}, ++ {0x30, 0x003b, 0x0004}, ++ /* possible values for following call: 0x00aa, 0x00e0 */ ++ {0x30, 0x00e0, 0x0005}, ++ {0x30, 0x0001, 0x0006}, ++ {0x30, 0x0080, 0x0007}, ++ {0x30, 0x0004, 0x0000}, ++ {} ++}; ++#endif + + static const __u16 spca500_visual_defaults[][3] = { + {0x00, 0x0003, 0x816b}, /* SSI not active sync with vsync, +@@ -1047,7 +1064,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev, + } + + /* sub-driver description */ +-static struct sd_desc sd_desc = { ++static const struct sd_desc sd_desc = { + .name = MODULE_NAME, + .ctrls = sd_ctrls, + .nctrls = ARRAY_SIZE(sd_ctrls), +diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c +index 6761a30..543f4db 100644 +--- a/drivers/media/video/gspca/spca501.c ++++ b/drivers/media/video/gspca/spca501.c +@@ -59,7 +59,7 @@ static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + #define MY_BRIGHTNESS 0 + { + { +@@ -1856,6 +1856,31 @@ static int reg_write(struct usb_device *dev, + return ret; + } + ++#if 0 ++/* returns: negative is error, pos or zero is data */ ++static int reg_read(struct gspca_dev *gspca_dev, ++ __u16 req, /* bRequest */ ++ __u16 index, /* wIndex */ ++ __u16 length) /* wLength (1 or 2 only) */ ++{ ++ int ret; ++ ++ gspca_dev->usb_buf[1] = 0; ++ ret = usb_control_msg(gspca_dev->dev, ++ usb_rcvctrlpipe(gspca_dev->dev, 0), ++ req, ++ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ++ 0, /* value */ ++ index, ++ gspca_dev->usb_buf, length, ++ 500); /* timeout */ ++ if (ret < 0) { ++ PDEBUG(D_ERR, "reg_read err %d", ret); ++ return -1; ++ } ++ return (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0]; ++} ++#endif + + static int write_vector(struct gspca_dev *gspca_dev, + const __u16 data[][3]) +diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c +index 0f9232f..c576eed 100644 +--- a/drivers/media/video/gspca/spca505.c ++++ b/drivers/media/video/gspca/spca505.c +@@ -42,7 +42,7 @@ struct sd { + static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + { + { + .id = V4L2_CID_BRIGHTNESS, +diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c +index 39257e4..89fec4c 100644 +--- a/drivers/media/video/gspca/spca506.c ++++ b/drivers/media/video/gspca/spca506.c +@@ -51,7 +51,7 @@ static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val); + static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + #define SD_BRIGHTNESS 0 + { + { +@@ -673,7 +673,7 @@ static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val) + } + + /* sub-driver description */ +-static struct sd_desc sd_desc = { ++static const struct sd_desc sd_desc = { + .name = MODULE_NAME, + .ctrls = sd_ctrls, + .nctrls = ARRAY_SIZE(sd_ctrls), +diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c +index 4d8e6cf..b202e62 100644 +--- a/drivers/media/video/gspca/spca508.c ++++ b/drivers/media/video/gspca/spca508.c +@@ -45,7 +45,7 @@ struct sd { + static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + { + { + .id = V4L2_CID_BRIGHTNESS, +@@ -86,6 +86,26 @@ static const struct v4l2_pix_format sif_mode[] = { + }; + + /* Frame packet header offsets for the spca508 */ ++#if 0 ++#define SPCA508_OFFSET_ ++#define SPCA508_OFFSET_TYPE 1 ++#define SPCA508_IMAGE_TYPE_MASK 0x1f ++#define SPCA508_SNAPBIT 0x20 ++#define SPCA508_SNAPCTRL 0x40 ++#define SPCA508_OFFSET_COMPRESS 2 ++#define SPCA508_OFFSET_FRAMSEQ 8 ++#define SPCA508_OFFSET_GPIO 9 ++#define SPCA508_OFFSET_GLBINF 10 ++#define SPCA508_OFFSET_WIN1LUM 11 ++#define SPCA508_OFFSET_WIN2LUM 12 ++#define SPCA508_OFFSET_WIN1RG 13 ++#define SPCA508_OFFSET_WIN1BG 14 ++#define SPCA508_OFFSET_WIN2RG 15 ++#define SPCA508_OFFSET_WIN2BG 16 ++#define SPCA508_OFFSET_21 21 ++#define SPCA508_GAMMAEN 0x01 ++#define SPCA508_EDGEMODE 0x02 ++#endif + #define SPCA508_OFFSET_DATA 37 + + /* +@@ -423,7 +443,24 @@ static const u16 spca508_init_data[][2] = + {0x007d, 0x8800}, + /* READ { 0x0001, 0x8803 } -> 0000: 00 */ + ++#if 0 ++ /* experimental. dark version. */ ++ {0xba, 0x8705}, /* total pixel clocks per hsync cycle (L) */ ++ {0x00, 0x8706}, /* total pixel clocks per hsync cycle (H in 2:0) */ ++ {0x5a, 0x8707}, /* total pixel clocks per hsync blank period (L) */ ++#elif 0 ++ /* experimental. factory default. */ ++ {0x8e, 0x8705}, /* total pixel clocks per hsync cycle (L) */ ++ {0x03, 0x8706}, /* total pixel clocks per hsync cycle (H in 2:0) */ ++ {0x5a, 0x8707}, /* total pixel clocks per hsync blank period (L) */ ++#elif 0 ++ /* experimental. light. */ ++ {0xba, 0x8705}, /* total pixel clocks per hsync cycle (L) */ ++ {0x01, 0x8706}, /* total pixel clocks per hsync cycle (H in 2:0) */ ++ {0x10, 0x8707}, /* total pixel clocks per hsync blank period (L) */ ++#endif + ++#if 1 + /* This chunk is seemingly redundant with */ + /* earlier commands (A11 Coef...), but if I disable it, */ + /* the image appears too dark. Maybe there was some kind of */ +@@ -438,6 +475,7 @@ static const u16 spca508_init_data[][2] = + {0xffdc, 0x860f}, + {0x0039, 0x8610}, + {0x0018, 0x8657}, ++#endif + + {0x0000, 0x8508}, /* Disable compression. */ + /* Previous line was: +@@ -480,8 +518,16 @@ static const u16 spca508_init_data[][2] = + /* READ { 0x0001, 0x8803 } -> 0000: 00 */ + {0x0001, 0x8602}, /* optical black level for user settning = 1 */ + ++#if 0 ++ /* NOTE: Code like this case lets this driver (often) work */ ++ /* in 352x288 resolution, apparently by slowing down the */ ++ /* clock. */ ++ ++ {0x002f, 0x8700}, /* Clock speed */ ++#else + /* Original: */ + {0x0023, 0x8700}, /* Clock speed 48Mhz/(3+2)/4= 2.4 Mhz */ ++#endif + {0x000f, 0x8602}, /* optical black level for user settning = 15 */ + + {0x0028, 0x8802}, +diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c +index 58c2f00..85eeb51 100644 +--- a/drivers/media/video/gspca/spca561.c ++++ b/drivers/media/video/gspca/spca561.c +@@ -190,16 +190,36 @@ static const __u16 rev72a_init_data2[][2] = { + {0x0002, 0x8201}, /* Output address for r/w serial EEPROM */ + {0x0008, 0x8200}, /* Clear valid bit for serial EEPROM */ + {0x0001, 0x8200}, /* OprMode to be executed by hardware */ ++#if 0 ++ {0x0007, 0x8201}, /* Output address for r/w serial EEPROM */ ++ {0x0008, 0x8200}, /* Clear valid bit for serial EEPROM */ ++ {0x0001, 0x8200}, /* OprMode to be executed by hardware */ ++ {0x0010, 0x8660}, /* Compensation memory stuff */ ++ {0x0018, 0x8660}, /* Compensation memory stuff */ ++#endif ++#if 1 + /* from ms-win */ + {0x0000, 0x8611}, /* R offset for white balance */ + {0x00fd, 0x8612}, /* Gr offset for white balance */ + {0x0003, 0x8613}, /* B offset for white balance */ ++#else ++ {0x0004, 0x8611}, /* R offset for white balance */ ++ {0x0004, 0x8612}, /* Gr offset for white balance */ ++ {0x0007, 0x8613}, /* B offset for white balance */ ++#endif + {0x0000, 0x8614}, /* Gb offset for white balance */ ++#if 1 + /* from ms-win */ + {0x0035, 0x8651}, /* R gain for white balance */ + {0x0040, 0x8652}, /* Gr gain for white balance */ + {0x005f, 0x8653}, /* B gain for white balance */ + {0x0040, 0x8654}, /* Gb gain for white balance */ ++#else ++ {0x008c, 0x8651}, /* R gain for white balance */ ++ {0x008c, 0x8652}, /* Gr gain for white balance */ ++ {0x00b5, 0x8653}, /* B gain for white balance */ ++ {0x008c, 0x8654}, /* Gb gain for white balance */ ++#endif + {0x0002, 0x8502}, /* Maximum average bit rate stuff */ + {0x0011, 0x8802}, + +@@ -224,6 +244,17 @@ static const u16 rev72a_init_sensor2[][2] = { + {0x0035, 0x0014}, + {} + }; ++#if 0 ++static const __u16 rev72a_init_data3[][2] = { ++ {0x0087, 0x8700}, /* overwrite by start */ ++ {0x0081, 0x8702}, ++ {0x0000, 0x8500}, ++/* {0x0010, 0x8500}, -- Previous line was this */ ++ {0x0002, 0x865b}, ++ {0x0003, 0x865c}, ++ {} ++}; ++#endif + + /******************** QC Express etch2 stuff ********************/ + static const __u16 Pb100_1map8300[][2] = { +@@ -922,7 +953,7 @@ static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val) + } + + /* control tables */ +-static struct ctrl sd_ctrls_12a[] = { ++static const struct ctrl sd_ctrls_12a[] = { + { + { + .id = V4L2_CID_HUE, +@@ -964,7 +995,7 @@ static struct ctrl sd_ctrls_12a[] = { + }, + }; + +-static struct ctrl sd_ctrls_72a[] = { ++static const struct ctrl sd_ctrls_72a[] = { + { + { + .id = V4L2_CID_HUE, +diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c +index d70b156..e646620 100644 +--- a/drivers/media/video/gspca/sq905c.c ++++ b/drivers/media/video/gspca/sq905c.c +@@ -47,6 +47,7 @@ MODULE_LICENSE("GPL"); + + /* Commands. These go in the "value" slot. */ + #define SQ905C_CLEAR 0xa0 /* clear everything */ ++#define SQ905C_GET_ID 0x14f4 /* Read version number */ + #define SQ905C_CAPTURE_LOW 0xa040 /* Starts capture at 160x120 */ + #define SQ905C_CAPTURE_MED 0x1440 /* Starts capture at 320x240 */ + #define SQ905C_CAPTURE_HI 0x2840 /* Starts capture at 320x240 */ +@@ -101,6 +102,26 @@ static int sq905c_command(struct gspca_dev *gspca_dev, u16 command, u16 index) + return 0; + } + ++static int sq905c_read(struct gspca_dev *gspca_dev, u16 command, u16 index, ++ int size) ++{ ++ int ret; ++ ++ ret = usb_control_msg(gspca_dev->dev, ++ usb_rcvctrlpipe(gspca_dev->dev, 0), ++ USB_REQ_SYNCH_FRAME, /* request */ ++ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ++ command, index, gspca_dev->usb_buf, size, ++ SQ905C_CMD_TIMEOUT); ++ if (ret < 0) { ++ PDEBUG(D_ERR, "%s: usb_control_msg failed (%d)", ++ __func__, ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ + /* This function is called as a workqueue function and runs whenever the camera + * is streaming data. Because it is a workqueue function it is allowed to sleep + * so we can use synchronous USB calls. To avoid possible collisions with other +@@ -183,13 +204,34 @@ static int sd_config(struct gspca_dev *gspca_dev, + { + struct cam *cam = &gspca_dev->cam; + struct sd *dev = (struct sd *) gspca_dev; ++ int ret; + + PDEBUG(D_PROBE, + "SQ9050 camera detected" + " (vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct); ++ ++ ret = sq905c_command(gspca_dev, SQ905C_GET_ID, 0); ++ if (ret < 0) { ++ PDEBUG(D_ERR, "Get version command failed"); ++ return ret; ++ } ++ ++ ret = sq905c_read(gspca_dev, 0xf5, 0, 20); ++ if (ret < 0) { ++ PDEBUG(D_ERR, "Reading version command failed"); ++ return ret; ++ } ++ /* Note we leave out the usb id and the manufacturing date */ ++ PDEBUG(D_PROBE, ++ "SQ9050 ID string: %02x - %02x %02x %02x %02x %02x %02x", ++ gspca_dev->usb_buf[3], ++ gspca_dev->usb_buf[14], gspca_dev->usb_buf[15], ++ gspca_dev->usb_buf[16], gspca_dev->usb_buf[17], ++ gspca_dev->usb_buf[18], gspca_dev->usb_buf[19]); ++ + cam->cam_mode = sq905c_mode; + cam->nmodes = 2; +- if (id->idProduct == 0x9050) ++ if (gspca_dev->usb_buf[15] == 0) + cam->nmodes = 1; + /* We don't use the buffer gspca allocates so make it small. */ + cam->bulk_size = 32; +@@ -258,6 +300,7 @@ static int sd_start(struct gspca_dev *gspca_dev) + static const __devinitdata struct usb_device_id device_table[] = { + {USB_DEVICE(0x2770, 0x905c)}, + {USB_DEVICE(0x2770, 0x9050)}, ++ {USB_DEVICE(0x2770, 0x9052)}, + {USB_DEVICE(0x2770, 0x913d)}, + {} + }; +diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c +index 2e29355..0fb5342 100644 +--- a/drivers/media/video/gspca/stk014.c ++++ b/drivers/media/video/gspca/stk014.c +@@ -53,7 +53,7 @@ static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + { + { + .id = V4L2_CID_BRIGHTNESS, +diff --git a/drivers/media/video/gspca/stv0680.c b/drivers/media/video/gspca/stv0680.c +index 2a69d7c..c3743c9 100644 +--- a/drivers/media/video/gspca/stv0680.c ++++ b/drivers/media/video/gspca/stv0680.c +@@ -45,7 +45,7 @@ struct sd { + }; + + /* V4L2 controls supported by the driver */ +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + }; + + static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val, +@@ -53,24 +53,28 @@ static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val, + { + int ret = -1; + u8 req_type = 0; ++ unsigned int pipe = 0; + + switch (set) { + case 0: /* 0xc1 */ + req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT; ++ pipe = usb_rcvctrlpipe(gspca_dev->dev, 0); + break; + case 1: /* 0x41 */ + req_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT; ++ pipe = usb_sndctrlpipe(gspca_dev->dev, 0); + break; + case 2: /* 0x80 */ + req_type = USB_DIR_IN | USB_RECIP_DEVICE; ++ pipe = usb_rcvctrlpipe(gspca_dev->dev, 0); + break; + case 3: /* 0x40 */ + req_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE; ++ pipe = usb_sndctrlpipe(gspca_dev->dev, 0); + break; + } + +- ret = usb_control_msg(gspca_dev->dev, +- usb_rcvctrlpipe(gspca_dev->dev, 0), ++ ret = usb_control_msg(gspca_dev->dev, pipe, + req, req_type, + val, 0, gspca_dev->usb_buf, size, 500); + +@@ -138,6 +142,10 @@ static int sd_config(struct gspca_dev *gspca_dev, + struct sd *sd = (struct sd *) gspca_dev; + struct cam *cam = &gspca_dev->cam; + ++ /* Give the camera some time to settle, otherwise initalization will ++ fail on hotplug, and yes it really needs a full second. */ ++ msleep(1000); ++ + /* ping camera to be sure STV0680 is present */ + if (stv_sndctrl(gspca_dev, 0, 0x88, 0x5678, 0x02) != 0x02 || + gspca_dev->usb_buf[0] != 0x56 || gspca_dev->usb_buf[1] != 0x78) { +@@ -169,6 +177,8 @@ static int sd_config(struct gspca_dev *gspca_dev, + PDEBUG(D_PROBE, "Camera supports CIF mode"); + if (gspca_dev->usb_buf[7] & 0x02) + PDEBUG(D_PROBE, "Camera supports VGA mode"); ++ if (gspca_dev->usb_buf[7] & 0x04) ++ PDEBUG(D_PROBE, "Camera supports QCIF mode"); + if (gspca_dev->usb_buf[7] & 0x08) + PDEBUG(D_PROBE, "Camera supports QVGA mode"); + +@@ -185,6 +195,19 @@ static int sd_config(struct gspca_dev *gspca_dev, + PDEBUG(D_PROBE, "Sensor ID is %i", + (gspca_dev->usb_buf[4]*16) + (gspca_dev->usb_buf[5]>>4)); + ++#if 0 /* The v4l1 driver used to this but I don't think it is necessary */ ++ ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1); ++ if (ret < 0) { ++ PDEBUG(D_ERR, "Set alt 1 failed (%d)", ret); ++ return ret; ++ } ++ ++ if (stv_sndctrl(gspca_dev, 0, 0x85, 0, 0x10) != 0x10) ++ return stv0680_handle_error(gspca_dev, -EIO); ++ if (stv_sndctrl(gspca_dev, 0, 0x8d, 0, 0x08) != 0x08) ++ return stv0680_handle_error(gspca_dev, -EIO); ++ PDEBUG(D_PROBE, "Camera has %i pictures.", gspca_dev->usb_buf[3]); ++#endif + + ret = stv0680_get_video_mode(gspca_dev); + if (ret < 0) +@@ -220,6 +243,13 @@ static int sd_config(struct gspca_dev *gspca_dev, + cam->cam_mode = &sd->mode; + cam->nmodes = 1; + ++#if 0 /* The v4l1 driver used to this but I don't think it is necessary */ ++ ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0); ++ if (ret < 0) { ++ PDEBUG(D_ERR, "Set alt 0 failed (%d)", ret); ++ return ret; ++ } ++#endif + + ret = stv0680_set_video_mode(gspca_dev, sd->orig_mode); + if (ret < 0) +diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c +index 5d0241b..af73da3 100644 +--- a/drivers/media/video/gspca/stv06xx/stv06xx.c ++++ b/drivers/media/video/gspca/stv06xx/stv06xx.c +@@ -27,6 +27,7 @@ + * P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web + */ + ++#include + #include "stv06xx_sensor.h" + + MODULE_AUTHOR("Erik Andrén"); +@@ -219,6 +220,7 @@ static void stv06xx_dump_bridge(struct sd *sd) + info("Read 0x%x from address 0x%x", data, i); + } + ++ info("Testing stv06xx bridge registers for writability"); + for (i = 0x1400; i < 0x160f; i++) { + stv06xx_read_bridge(sd, i, &data); + buf = data; +@@ -229,7 +231,7 @@ static void stv06xx_dump_bridge(struct sd *sd) + info("Register 0x%x is read/write", i); + else if (data != buf) + info("Register 0x%x is read/write," +- "but only partially", i); ++ " but only partially", i); + else + info("Register 0x%x is read-only", i); + +@@ -426,6 +428,29 @@ frame_data: + } + } + ++#ifdef CONFIG_INPUT ++static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, ++ u8 *data, /* interrupt packet data */ ++ int len) /* interrupt packet length */ ++{ ++ int ret = -EINVAL; ++ ++ if (len == 1 && data[0] == 0x80) { ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); ++ input_sync(gspca_dev->input_dev); ++ ret = 0; ++ } ++ ++ if (len == 1 && data[0] == 0x88) { ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); ++ input_sync(gspca_dev->input_dev); ++ ret = 0; ++ } ++ ++ return ret; ++} ++#endif ++ + static int stv06xx_config(struct gspca_dev *gspca_dev, + const struct usb_device_id *id); + +@@ -436,7 +461,10 @@ static const struct sd_desc sd_desc = { + .init = stv06xx_init, + .start = stv06xx_start, + .stopN = stv06xx_stopN, +- .pkt_scan = stv06xx_pkt_scan ++ .pkt_scan = stv06xx_pkt_scan, ++#ifdef CONFIG_INPUT ++ .int_pkt_scan = sd_int_pkt_scan, ++#endif + }; + + /* This function is called at probe time */ +diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c +index 285221e..180f52c 100644 +--- a/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c ++++ b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c +@@ -150,6 +150,28 @@ static const struct ctrl pb0100_ctrl[] = { + static struct v4l2_pix_format pb0100_mode[] = { + /* low res / subsample modes disabled as they are only half res horizontal, + halving the vertical resolution does not seem to work */ ++#if 0 ++ { ++ 160, ++ 120, ++ V4L2_PIX_FMT_SGRBG8, ++ V4L2_FIELD_NONE, ++ .sizeimage = 160 * 120 * 2, ++ .bytesperline = 160, ++ .colorspace = V4L2_COLORSPACE_SRGB, ++ .priv = PB0100_CROP_TO_VGA | PB0100_SUBSAMPLE ++ }, ++ { ++ 176, ++ 144, ++ V4L2_PIX_FMT_SGRBG8, ++ V4L2_FIELD_NONE, ++ .sizeimage = 176 * 144 * 2, ++ .bytesperline = 176, ++ .colorspace = V4L2_COLORSPACE_SRGB, ++ .priv = PB0100_SUBSAMPLE ++ }, ++#endif + { + 320, + 240, +diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c +index 306b7d7..378c4dd 100644 +--- a/drivers/media/video/gspca/sunplus.c ++++ b/drivers/media/video/gspca/sunplus.c +@@ -67,7 +67,7 @@ static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + { + { + .id = V4L2_CID_BRIGHTNESS, +@@ -207,6 +207,13 @@ static const struct cmd spca504_pccam600_init_data[] = { + {0x00, 0x0000, 0x21ad}, /* hue */ + {0x00, 0x001a, 0x21ae}, /* saturation */ + {0x00, 0x0002, 0x21a3}, /* gamma */ ++#if 0 ++ {0xb0, 0x0000, 0x0000}, /* reset auto exposure */ ++ {0x0c, 0x0000, 0x0000}, /* reset auto whiteness */ ++ {0x0c, 0x0004, 0x0000}, /* enable auto whiteness */ ++ {0x30, 0x020f, 0x0001}, /* exposure compensation */ ++ {0x30, 0x01f7, 0x0002}, /* whiteness balance */ ++#endif + {0x30, 0x0154, 0x0008}, + {0x30, 0x0004, 0x0006}, + {0x30, 0x0258, 0x0009}, +@@ -244,10 +251,20 @@ static const struct cmd spca504A_clicksmart420_init_data[] = { + {0x00, 0x0000, 0x21ad}, /* hue */ + {0x00, 0x001a, 0x21ae}, /* saturation */ + {0x00, 0x0002, 0x21a3}, /* gamma */ ++#if 1 + {0x30, 0x0004, 0x000a}, + {0xb0, 0x0001, 0x0000}, ++#endif + ++#if 0 ++ {0xb0, 0x0000, 0x0000}, /* reset auto exposure */ ++ {0x0c, 0x0000, 0x0000}, /* reset auto whiteness */ ++ {0x0c, 0x0004, 0x0000}, /* enable auto whiteness */ ++ {0x30, 0x020f, 0x0001}, /* exposure compensation */ ++ {0x30, 0x01f7, 0x0002}, /* whiteness balance */ ++#endif + ++#if 1 + {0xa1, 0x0080, 0x0001}, + {0x30, 0x0049, 0x0000}, + {0x30, 0x0060, 0x0005}, +@@ -257,6 +274,7 @@ static const struct cmd spca504A_clicksmart420_init_data[] = { + {0x00, 0x0013, 0x2301}, + {0x00, 0x0003, 0x2000}, + {0x00, 0x0000, 0x2000}, ++#endif + + }; + +@@ -267,6 +285,8 @@ static const struct cmd spca504A_clicksmart420_open_data[] = { + {0x06, 0x0000, 0x0000}, + {0x00, 0x0004, 0x2880}, + {0x00, 0x0001, 0x2881}, ++#if 0 ++/*jfm: overloaded by setup_qtable()*/ + /* look like setting a qTable */ + {0x00, 0x0006, 0x2800}, + {0x00, 0x0004, 0x2801}, +@@ -403,6 +423,7 @@ static const struct cmd spca504A_clicksmart420_open_data[] = { + {0x00, 0x0028, 0x287d}, + {0x00, 0x0028, 0x287e}, + {0x00, 0x0028, 0x287f}, ++#endif + + {0xa0, 0x0000, 0x0503}, + }; +@@ -622,6 +643,20 @@ static void spca504_acknowledged_command(struct gspca_dev *gspca_dev, + PDEBUG(D_FRAM, "after wait 0x%04x", notdone); + } + ++static void spca504_read_info(struct gspca_dev *gspca_dev) ++{ ++ int i; ++ u8 info[6]; ++ ++ for (i = 0; i < 6; i++) ++ info[i] = reg_r_1(gspca_dev, i); ++ PDEBUG(D_STREAM, ++ "Read info: %d %d %d %d %d %d." ++ " Should be 1,0,2,2,0,0", ++ info[0], info[1], info[2], ++ info[3], info[4], info[5]); ++} ++ + static void spca504A_acknowledged_command(struct gspca_dev *gspca_dev, + u8 req, + u16 idx, u16 val, u16 endcode, u8 count) +@@ -881,8 +916,6 @@ static int sd_config(struct gspca_dev *gspca_dev, + static int sd_init(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- int i; +- u8 info[6]; + + switch (sd->bridge) { + case BRIDGE_SPCA504B: +@@ -924,15 +957,8 @@ static int sd_init(struct gspca_dev *gspca_dev) + /* case BRIDGE_SPCA504: */ + PDEBUG(D_STREAM, "Opening SPCA504"); + if (sd->subtype == AiptekMiniPenCam13) { +- /*****************************/ +- for (i = 0; i < 6; i++) +- info[i] = reg_r_1(gspca_dev, i); +- PDEBUG(D_STREAM, +- "Read info: %d %d %d %d %d %d." +- " Should be 1,0,2,2,0,0", +- info[0], info[1], info[2], +- info[3], info[4], info[5]); +- /* spca504a aiptek */ ++ spca504_read_info(gspca_dev); ++ + /* Set AE AWB Banding Type 3-> 50Hz 2-> 60Hz */ + spca504A_acknowledged_command(gspca_dev, 0x24, + 8, 3, 0x9e, 1); +@@ -971,8 +997,6 @@ static int sd_start(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + int enable; +- int i; +- u8 info[6]; + + /* create the JPEG header */ + sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); +@@ -1008,14 +1032,8 @@ static int sd_start(struct gspca_dev *gspca_dev) + break; + case BRIDGE_SPCA504: + if (sd->subtype == AiptekMiniPenCam13) { +- for (i = 0; i < 6; i++) +- info[i] = reg_r_1(gspca_dev, i); +- PDEBUG(D_STREAM, +- "Read info: %d %d %d %d %d %d." +- " Should be 1,0,2,2,0,0", +- info[0], info[1], info[2], +- info[3], info[4], info[5]); +- /* spca504a aiptek */ ++ spca504_read_info(gspca_dev); ++ + /* Set AE AWB Banding Type 3-> 50Hz 2-> 60Hz */ + spca504A_acknowledged_command(gspca_dev, 0x24, + 8, 3, 0x9e, 1); +@@ -1026,13 +1044,7 @@ static int sd_start(struct gspca_dev *gspca_dev) + 0, 0, 0x9d, 1); + } else { + spca504_acknowledged_command(gspca_dev, 0x24, 8, 3); +- for (i = 0; i < 6; i++) +- info[i] = reg_r_1(gspca_dev, i); +- PDEBUG(D_STREAM, +- "Read info: %d %d %d %d %d %d." +- " Should be 1,0,2,2,0,0", +- info[0], info[1], info[2], +- info[3], info[4], info[5]); ++ spca504_read_info(gspca_dev); + spca504_acknowledged_command(gspca_dev, 0x24, 8, 3); + spca504_acknowledged_command(gspca_dev, 0x24, 0, 0); + } +@@ -1336,6 +1348,7 @@ static const __devinitdata struct usb_device_id device_table[] = { + {USB_DEVICE(0x04fc, 0x5330), BS(SPCA533, 0)}, + {USB_DEVICE(0x04fc, 0x5360), BS(SPCA536, 0)}, + {USB_DEVICE(0x04fc, 0xffff), BS(SPCA504B, 0)}, ++ {USB_DEVICE(0x052b, 0x1507), BS(SPCA533, MegapixV4)}, + {USB_DEVICE(0x052b, 0x1513), BS(SPCA533, MegapixV4)}, + {USB_DEVICE(0x052b, 0x1803), BS(SPCA533, MegaImageVI)}, + {USB_DEVICE(0x0546, 0x3155), BS(SPCA533, 0)}, +diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c +index 55ef6a7..07002d3 100644 +--- a/drivers/media/video/gspca/t613.c ++++ b/drivers/media/video/gspca/t613.c +@@ -52,6 +52,7 @@ struct sd { + #define SENSOR_OM6802 0 + #define SENSOR_OTHER 1 + #define SENSOR_TAS5130A 2 ++#define SENSOR_LT168G 3 /* must verify if this is the actual model */ + }; + + /* V4L2 controls supported by the driver */ +@@ -78,7 +79,7 @@ static int sd_geteffect(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_querymenu(struct gspca_dev *gspca_dev, + struct v4l2_querymenu *menu); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + { + { + .id = V4L2_CID_BRIGHTNESS, +@@ -306,6 +307,17 @@ static const u8 n4_tas5130a[] = { + 0xbe, 0x36, 0xbf, 0xff, 0xc2, 0x88, 0xc5, 0xc8, + 0xc6, 0xda + }; ++static const u8 n4_lt168g[] = { ++ 0x66, 0x01, 0x7f, 0x00, 0x80, 0x7c, 0x81, 0x28, ++ 0x83, 0x44, 0x84, 0x20, 0x86, 0x20, 0x8a, 0x70, ++ 0x8b, 0x58, 0x8c, 0x88, 0x8d, 0xa0, 0x8e, 0xb3, ++ 0x8f, 0x24, 0xa1, 0xb0, 0xa2, 0x38, 0xa5, 0x20, ++ 0xa6, 0x4a, 0xa8, 0xe8, 0xaf, 0x38, 0xb0, 0x68, ++ 0xb1, 0x44, 0xb2, 0x88, 0xbb, 0x86, 0xbd, 0x40, ++ 0xbe, 0x26, 0xc1, 0x05, 0xc2, 0x88, 0xc5, 0xc0, ++ 0xda, 0x8e, 0xdb, 0xca, 0xdc, 0xa8, 0xdd, 0x8c, ++ 0xde, 0x44, 0xdf, 0x0c, 0xe9, 0x80 ++}; + + static const struct additional_sensor_data sensor_data[] = { + { /* 0: OM6802 */ +@@ -317,16 +329,35 @@ static const struct additional_sensor_data sensor_data[] = { + .reg8e = 0x33, + .nset8 = {0xa8, 0xf0, 0xc6, 0x88, 0xc0, 0x00}, + .data1 = ++#if 1 + {0xc2, 0x28, 0x0f, 0x22, 0xcd, 0x27, 0x2c, 0x06, + 0xb3, 0xfc}, ++#else ++ {0xbb, 0x28, 0x10, 0x10, 0xbb, 0x28, 0x1e, 0x27, ++ 0xc8, 0xfc}, ++#endif + .data2 = ++#if 1 + {0x80, 0xff, 0xff, 0x80, 0xff, 0xff, 0x80, 0xff, + 0xff}, ++#else ++ {0x60, 0xa8, 0xe0, 0x60, 0xa8, 0xe0, 0x60, 0xa8, ++ 0xe0}, ++#endif + .data3 = ++#if 1 + {0x80, 0xff, 0xff, 0x80, 0xff, 0xff, 0x80, 0xff, + 0xff}, ++#else ++ {0x60, 0xa8, 0xe0, 0x60, 0xa8, 0xe0, 0x60, 0xa8, ++ 0xe0}, ++#endif + .data4 = /*Freq (50/60Hz). Splitted for test purpose */ ++#if 0 ++ {0x66, 0x8a, 0xa8, 0xe8}, ++#else + {0x66, 0xca, 0xa8, 0xf0}, ++#endif + .data5 = /* this could be removed later */ + {0x0c, 0x03, 0xab, 0x13, 0x81, 0x23}, + .stream = +@@ -365,21 +396,61 @@ static const struct additional_sensor_data sensor_data[] = { + .reg8e = 0xb4, + .nset8 = {0xa8, 0xf0, 0xc6, 0xda, 0xc0, 0x00}, + .data1 = ++#if 1 + {0xbb, 0x28, 0x10, 0x10, 0xbb, 0x28, 0x1e, 0x27, + 0xc8, 0xfc}, ++#else ++ {0xf3, 0xa1, 0x0a, 0x2f, 0xbb, 0x2e, 0x21, 0x68, ++ 0xea, 0xbf}, ++#endif + .data2 = ++#if 1 + {0x60, 0xa8, 0xe0, 0x60, 0xa8, 0xe0, 0x60, 0xa8, + 0xe0}, ++#else ++ {0x40, 0x80, 0xc0, 0x40, 0x80, 0xc0, 0x4d, 0x97, ++ 0xe4}, ++#endif + .data3 = ++#if 1 + {0x60, 0xa8, 0xe0, 0x60, 0xa8, 0xe0, 0x60, 0xa8, + 0xe0}, ++#else ++ {0x40, 0x80, 0xc0, 0x41, 0x81, 0xc3, 0x5c, 0x99, ++ 0xe0}, ++#endif + .data4 = /* Freq (50/60Hz). Splitted for test purpose */ ++#if 1 + {0x66, 0x00, 0xa8, 0xe8}, ++#else ++ {0x66, 0x40, 0xa8, 0xf0}, ++#endif + .data5 = ++#if 0 ++ {0x0c, 0x03, 0xab, 0x08, 0x81, 0x68}, ++#else + {0x0c, 0x03, 0xab, 0x10, 0x81, 0x20}, ++#endif + .stream = + {0x0b, 0x04, 0x0a, 0x40}, + }, ++ { /* 3: LT168G */ ++ .n3 = {0x61, 0xc2, 0x65, 0x68, 0x60, 0x00}, ++ .n4 = n4_lt168g, ++ .n4sz = sizeof n4_lt168g, ++ .reg80 = 0x7c, ++ .reg8e = 0xb3, ++ .nset8 = {0xa8, 0xf0, 0xc6, 0xba, 0xc0, 0x00}, ++ .data1 = {0xc0, 0x38, 0x08, 0x10, 0xc0, 0x30, 0x10, 0x40, ++ 0xb0, 0xf4}, ++ .data2 = {0x40, 0x80, 0xc0, 0x50, 0xa0, 0xf0, 0x53, 0xa6, ++ 0xff}, ++ .data3 = {0x40, 0x80, 0xc0, 0x50, 0xa0, 0xf0, 0x53, 0xa6, ++ 0xff}, ++ .data4 = {0x66, 0x41, 0xa8, 0xf0}, ++ .data5 = {0x0c, 0x03, 0xab, 0x4b, 0x81, 0x2b}, ++ .stream = {0x0b, 0x04, 0x0a, 0x28}, ++ }, + }; + + #define MAX_EFFECTS 7 +@@ -716,6 +787,10 @@ static int sd_init(struct gspca_dev *gspca_dev) + PDEBUG(D_PROBE, "sensor tas5130a"); + sd->sensor = SENSOR_TAS5130A; + break; ++ case 0x0802: ++ PDEBUG(D_PROBE, "sensor lt168g"); ++ sd->sensor = SENSOR_LT168G; ++ break; + case 0x0803: + PDEBUG(D_PROBE, "sensor 'other'"); + sd->sensor = SENSOR_OTHER; +@@ -758,6 +833,13 @@ static int sd_init(struct gspca_dev *gspca_dev) + reg_w_buf(gspca_dev, sensor->n3, sizeof sensor->n3); + reg_w_buf(gspca_dev, sensor->n4, sensor->n4sz); + ++ if (sd->sensor == SENSOR_LT168G) { ++ test_byte = reg_r(gspca_dev, 0x80); ++ PDEBUG(D_STREAM, "Reg 0x%02x = 0x%02x", 0x80, ++ test_byte); ++ reg_w(gspca_dev, 0x6c80); ++ } ++ + reg_w_ixbuf(gspca_dev, 0xd0, sensor->data1, sizeof sensor->data1); + reg_w_ixbuf(gspca_dev, 0xc7, sensor->data2, sizeof sensor->data2); + reg_w_ixbuf(gspca_dev, 0xe0, sensor->data3, sizeof sensor->data3); +@@ -782,6 +864,13 @@ static int sd_init(struct gspca_dev *gspca_dev) + reg_w_buf(gspca_dev, sensor->nset8, sizeof sensor->nset8); + reg_w_buf(gspca_dev, sensor->stream, sizeof sensor->stream); + ++ if (sd->sensor == SENSOR_LT168G) { ++ test_byte = reg_r(gspca_dev, 0x80); ++ PDEBUG(D_STREAM, "Reg 0x%02x = 0x%02x", 0x80, ++ test_byte); ++ reg_w(gspca_dev, 0x6c80); ++ } ++ + reg_w_ixbuf(gspca_dev, 0xd0, sensor->data1, sizeof sensor->data1); + reg_w_ixbuf(gspca_dev, 0xc7, sensor->data2, sizeof sensor->data2); + reg_w_ixbuf(gspca_dev, 0xe0, sensor->data3, sizeof sensor->data3); +@@ -888,6 +977,8 @@ static int sd_start(struct gspca_dev *gspca_dev) + case SENSOR_OM6802: + om6802_sensor_init(gspca_dev); + break; ++ case SENSOR_LT168G: ++ break; + case SENSOR_OTHER: + break; + default: +@@ -920,6 +1011,15 @@ static int sd_start(struct gspca_dev *gspca_dev) + if (sd->sensor == SENSOR_OM6802) + poll_sensor(gspca_dev); + ++#if 0 ++ /* restart on each start, just in case, sometimes regs goes wrong ++ * when using controls from app */ ++ setbrightness(gspca_dev); ++ setcontrast(gspca_dev); ++ setcolors(gspca_dev); ++ seteffect(gspca_dev); ++ setflip(gspca_dev); ++#endif + return 0; + } + +diff --git a/drivers/media/video/gspca/tv8532.c b/drivers/media/video/gspca/tv8532.c +index b74a3b6..0267396 100644 +--- a/drivers/media/video/gspca/tv8532.c ++++ b/drivers/media/video/gspca/tv8532.c +@@ -39,7 +39,7 @@ struct sd { + static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + { + { + .id = V4L2_CID_BRIGHTNESS, +@@ -228,6 +228,12 @@ static void tv_8532ReadRegisters(struct gspca_dev *gspca_dev) + 0 + }; + ++#if 0 ++ data = reg_r(gspca_dev, 0x0001); ++ PDEBUG(D_USBI, "register 0x01-> %x", data); ++ data = reg_r(gspca_dev, 0x0002); ++ PDEBUG(D_USBI, "register 0x02-> %x", data); ++#endif + i = 0; + do { + reg_r(gspca_dev, reg_tb[i]); +diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c +index 71921c8..e42e55a 100644 +--- a/drivers/media/video/gspca/vc032x.c ++++ b/drivers/media/video/gspca/vc032x.c +@@ -32,10 +32,13 @@ MODULE_LICENSE("GPL"); + struct sd { + struct gspca_dev gspca_dev; /* !! must be the first item */ + ++ u8 brightness; ++ u8 contrast; ++ u8 colors; + u8 hflip; + u8 vflip; + u8 lightfreq; +- u8 sharpness; ++ s8 sharpness; + + u8 image_offset; + +@@ -52,6 +55,7 @@ struct sd { + #define SENSOR_OV7670 6 + #define SENSOR_PO1200 7 + #define SENSOR_PO3130NC 8 ++#define SENSOR_POxxxx 9 + u8 flags; + #define FL_SAMSUNG 0x01 /* SamsungQ1 (2 sensors) */ + #define FL_HFLIP 0x02 /* mirrored by default */ +@@ -59,6 +63,12 @@ struct sd { + }; + + /* V4L2 controls supported by the driver */ ++static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); ++static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val); ++static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val); + static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val); +@@ -68,9 +78,54 @@ static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { ++#define BRIGHTNESS_IDX 0 ++ { ++ { ++ .id = V4L2_CID_BRIGHTNESS, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Brightness", ++ .minimum = 0, ++ .maximum = 255, ++ .step = 1, ++#define BRIGHTNESS_DEF 128 ++ .default_value = BRIGHTNESS_DEF, ++ }, ++ .set = sd_setbrightness, ++ .get = sd_getbrightness, ++ }, ++#define CONTRAST_IDX 1 ++ { ++ { ++ .id = V4L2_CID_CONTRAST, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Contrast", ++ .minimum = 0, ++ .maximum = 255, ++ .step = 1, ++#define CONTRAST_DEF 127 ++ .default_value = CONTRAST_DEF, ++ }, ++ .set = sd_setcontrast, ++ .get = sd_getcontrast, ++ }, ++#define COLORS_IDX 2 ++ { ++ { ++ .id = V4L2_CID_SATURATION, ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .name = "Saturation", ++ .minimum = 1, ++ .maximum = 127, ++ .step = 1, ++#define COLOR_DEF 63 ++ .default_value = COLOR_DEF, ++ }, ++ .set = sd_setcolors, ++ .get = sd_getcolors, ++ }, + /* next 2 controls work with some sensors only */ +-#define HFLIP_IDX 0 ++#define HFLIP_IDX 3 + { + { + .id = V4L2_CID_HFLIP, +@@ -85,7 +140,7 @@ static struct ctrl sd_ctrls[] = { + .set = sd_sethflip, + .get = sd_gethflip, + }, +-#define VFLIP_IDX 1 ++#define VFLIP_IDX 4 + { + { + .id = V4L2_CID_VFLIP, +@@ -100,7 +155,7 @@ static struct ctrl sd_ctrls[] = { + .set = sd_setvflip, + .get = sd_getvflip, + }, +-#define LIGHTFREQ_IDX 2 ++#define LIGHTFREQ_IDX 5 + { + { + .id = V4L2_CID_POWER_LINE_FREQUENCY, +@@ -115,17 +170,16 @@ static struct ctrl sd_ctrls[] = { + .set = sd_setfreq, + .get = sd_getfreq, + }, +-/* po1200 only */ +-#define SHARPNESS_IDX 3 ++#define SHARPNESS_IDX 6 + { + { + .id = V4L2_CID_SHARPNESS, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Sharpness", +- .minimum = 0, ++ .minimum = -1, + .maximum = 2, + .step = 1, +-#define SHARPNESS_DEF 1 ++#define SHARPNESS_DEF -1 + .default_value = SHARPNESS_DEF, + }, + .set = sd_setsharpness, +@@ -133,6 +187,42 @@ static struct ctrl sd_ctrls[] = { + }, + }; + ++/* table of the disabled controls */ ++static u32 ctrl_dis[] = { ++/* SENSOR_HV7131R 0 */ ++ (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX) ++ | (1 << HFLIP_IDX) | (1 << VFLIP_IDX) | (1 << LIGHTFREQ_IDX) ++ | (1 << SHARPNESS_IDX), ++/* SENSOR_MI0360 1 */ ++ (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX) ++ | (1 << HFLIP_IDX) | (1 << VFLIP_IDX) | (1 << LIGHTFREQ_IDX) ++ | (1 << SHARPNESS_IDX), ++/* SENSOR_MI1310_SOC 2 */ ++ (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX) ++ | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX), ++/* SENSOR_MI1320 3 */ ++ (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX) ++ | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX), ++/* SENSOR_MI1320_SOC 4 */ ++ (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX) ++ | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX), ++/* SENSOR_OV7660 5 */ ++ (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX) ++ | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX), ++/* SENSOR_OV7670 6 */ ++ (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX) ++ | (1 << SHARPNESS_IDX), ++/* SENSOR_PO1200 7 */ ++ (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX) ++ | (1 << LIGHTFREQ_IDX), ++/* SENSOR_PO3130NC 8 */ ++ (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX) ++ | (1 << HFLIP_IDX) | (1 << VFLIP_IDX) | (1 << LIGHTFREQ_IDX) ++ | (1 << SHARPNESS_IDX), ++/* SENSOR_POxxxx 9 */ ++ (1 << HFLIP_IDX) | (1 << VFLIP_IDX) | (1 << LIGHTFREQ_IDX), ++}; ++ + static const struct v4l2_pix_format vc0321_mode[] = { + {320, 240, V4L2_PIX_FMT_YVYU, V4L2_FIELD_NONE, + .bytesperline = 320, +@@ -163,16 +253,37 @@ static const struct v4l2_pix_format vc0323_mode[] = { + .priv = 2}, + }; + static const struct v4l2_pix_format bi_mode[] = { ++#if 0 /* JPEG vc0323 */ ++ {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, ++ .bytesperline = 320, ++ .sizeimage = 320 * 240 * 3 / 8 + 590, ++ .colorspace = V4L2_COLORSPACE_JPEG, ++ .priv = 5}, ++#endif + {320, 240, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE, + .bytesperline = 320, + .sizeimage = 320 * 240 * 2, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 2}, ++#if 0 /* JPEG vc0323 */ ++ {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, ++ .bytesperline = 640, ++ .sizeimage = 640 * 480 * 3 / 8 + 590, ++ .colorspace = V4L2_COLORSPACE_JPEG, ++ .priv = 4}, ++#endif + {640, 480, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE, + .bytesperline = 640, + .sizeimage = 640 * 480 * 2, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 1}, ++#if 0 /* JPEG vc0323 */ ++ {1280, 1024, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, ++ .bytesperline = 1280, ++ .sizeimage = 1280 * 1024 * 1 / 4 + 590, ++ .colorspace = V4L2_COLORSPACE_JPEG, ++ .priv = 3}, ++#endif + {1280, 1024, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE, + .bytesperline = 1280, + .sizeimage = 1280 * 1024 * 2, +@@ -215,7 +326,7 @@ static const u8 mi0360_initVGA_JPG[][4] = { + {0xb3, 0x15, 0x00, 0xcc}, + {0xb3, 0x16, 0x02, 0xcc}, + {0xb3, 0x17, 0x7f, 0xcc}, +- {0xb3, 0x35, 0xdd, 0xcc}, ++ {0xb3, 0x35, 0xdd, 0xcc}, /* i2c add: 5d */ + {0xb3, 0x34, 0x02, 0xcc}, + {0xb3, 0x00, 0x25, 0xcc}, + {0xbc, 0x00, 0x71, 0xcc}, +@@ -435,7 +546,7 @@ static const u8 mi1310_socinitVGA_JPG[][4] = { + {0xb3, 0x08, 0x01, 0xcc}, + {0xb3, 0x09, 0x0c, 0xcc}, + {0xb3, 0x34, 0x02, 0xcc}, +- {0xb3, 0x35, 0xdd, 0xcc}, ++ {0xb3, 0x35, 0xdd, 0xcc}, /* i2c add: 5d */ + {0xb3, 0x02, 0x00, 0xcc}, + {0xb3, 0x03, 0x0a, 0xcc}, + {0xb3, 0x04, 0x05, 0xcc}, +@@ -681,7 +792,11 @@ static const u8 mi1310_soc_InitSXGA_JPG[][4] = { + {0xc8, 0x9f, 0x0b, 0xbb}, + {0x5b, 0x00, 0x01, 0xbb}, + {0xf0, 0x00, 0x00, 0xbb}, ++#if 1 + {0x20, 0x03, 0x02, 0xbb}, /* h/v flip */ ++#else ++ {0x20, 0x03, 0x03, 0xbb}, /* h/v flip */ ++#endif + {0xf0, 0x00, 0x01, 0xbb}, + {0x05, 0x00, 0x07, 0xbb}, + {0x34, 0x00, 0x00, 0xbb}, +@@ -860,7 +975,8 @@ static const u8 mi1320_initVGA_data[][4] = { + {0xb0, 0x16, 0x03, 0xcc}, {0xb3, 0x05, 0x00, 0xcc}, + {0xb3, 0x06, 0x00, 0xcc}, {0xb3, 0x08, 0x01, 0xcc}, + {0xb3, 0x09, 0x0c, 0xcc}, {0xb3, 0x34, 0x02, 0xcc}, +- {0xb3, 0x35, 0xc8, 0xcc}, {0xb3, 0x02, 0x00, 0xcc}, ++ {0xb3, 0x35, 0xc8, 0xcc}, /* i2c add: 48 */ ++ {0xb3, 0x02, 0x00, 0xcc}, + {0xb3, 0x03, 0x0a, 0xcc}, {0xb3, 0x04, 0x05, 0xcc}, + {0xb3, 0x20, 0x00, 0xcc}, {0xb3, 0x21, 0x00, 0xcc}, + {0xb3, 0x22, 0x03, 0xcc}, {0xb3, 0x23, 0xc0, 0xcc}, +@@ -901,7 +1017,8 @@ static const u8 mi1320_initVGA_data[][4] = { + {0xc3, 0x01, 0x03, 0xbb}, {0xc4, 0x00, 0x04, 0xbb}, + {0xf0, 0x00, 0x00, 0xbb}, {0x05, 0x01, 0x13, 0xbb}, + {0x06, 0x00, 0x11, 0xbb}, {0x07, 0x00, 0x85, 0xbb}, +- {0x08, 0x00, 0x27, 0xbb}, {0x20, 0x01, 0x03, 0xbb}, ++ {0x08, 0x00, 0x27, 0xbb}, ++ {0x20, 0x01, 0x00, 0xbb}, /* h/v flips - was 03 */ + {0x21, 0x80, 0x00, 0xbb}, {0x22, 0x0d, 0x0f, 0xbb}, + {0x24, 0x80, 0x00, 0xbb}, {0x59, 0x00, 0xff, 0xbb}, + {0xf0, 0x00, 0x02, 0xbb}, {0x39, 0x03, 0x0d, 0xbb}, +@@ -1012,7 +1129,7 @@ static const u8 mi1320_soc_InitVGA[][4] = { + {0xb3, 0x08, 0x01, 0xcc}, + {0xb3, 0x09, 0x0c, 0xcc}, + {0xb3, 0x34, 0x02, 0xcc}, +- {0xb3, 0x35, 0xc8, 0xcc}, ++ {0xb3, 0x35, 0xc8, 0xcc}, /* i2c add: 48 */ + {0xb3, 0x02, 0x00, 0xcc}, + {0xb3, 0x03, 0x0a, 0xcc}, + {0xb3, 0x04, 0x05, 0xcc}, +@@ -1103,6 +1220,123 @@ static const u8 mi1320_soc_InitVGA[][4] = { + {0xb3, 0x5c, 0x01, 0xcc}, + {} + }; ++#if 0 /* JPEG vc0323 */ ++static const u8 mi1320_soc_InitVGA_JPG[][4] = { ++ {0xb3, 0x01, 0x01, 0xcc}, ++ {0xb0, 0x03, 0x19, 0xcc}, ++ {0xb0, 0x04, 0x02, 0xcc}, ++ {0x00, 0x00, 0x30, 0xdd}, ++ {0xb3, 0x00, 0x64, 0xcc}, ++ {0xb3, 0x00, 0x67, 0xcc}, ++ {0xb3, 0x05, 0x01, 0xcc}, ++ {0xb3, 0x06, 0x01, 0xcc}, ++ {0xb3, 0x08, 0x01, 0xcc}, ++ {0xb3, 0x09, 0x0c, 0xcc}, ++ {0xb3, 0x34, 0x02, 0xcc}, ++ {0xb3, 0x35, 0xc8, 0xcc}, ++ {0xb3, 0x02, 0x00, 0xcc}, ++ {0xb3, 0x03, 0x0a, 0xcc}, ++ {0xb3, 0x04, 0x05, 0xcc}, ++ {0xb3, 0x20, 0x00, 0xcc}, ++ {0xb3, 0x21, 0x00, 0xcc}, ++ {0xb3, 0x22, 0x01, 0xcc}, ++ {0xb3, 0x23, 0xe0, 0xcc}, ++ {0xb3, 0x14, 0x00, 0xcc}, ++ {0xb3, 0x15, 0x00, 0xcc}, ++ {0xb3, 0x16, 0x02, 0xcc}, ++ {0xb3, 0x17, 0x7f, 0xcc}, ++ {0xb3, 0x00, 0x67, 0xcc}, ++ {0xb8, 0x00, 0x00, 0xcc}, ++ {0xbc, 0x00, 0x71, 0xcc}, ++ {0xbc, 0x01, 0x01, 0xcc}, ++ {0xb3, 0x5c, 0x01, 0xcc}, ++ {0xf0, 0x00, 0x02, 0xbb}, ++ {0x00, 0x00, 0x10, 0xdd}, ++ {0xc8, 0x00, 0x00, 0xbb}, ++ {0x00, 0x00, 0x30, 0xdd}, ++ {0xf0, 0x00, 0x00, 0xbb}, ++ {0x00, 0x00, 0x10, 0xdd}, ++ {0x07, 0x00, 0xe0, 0xbb}, ++ {0x08, 0x00, 0x0b, 0xbb}, ++ {0x21, 0x00, 0x0c, 0xbb}, ++ {0x20, 0x01, 0x03, 0xbb}, /* h/v flip */ ++ {0xb6, 0x00, 0x00, 0xcc}, ++ {0xb6, 0x03, 0x02, 0xcc}, ++ {0xb6, 0x02, 0x80, 0xcc}, ++ {0xb6, 0x05, 0x01, 0xcc}, ++ {0xb6, 0x04, 0xe0, 0xcc}, ++ {0xb6, 0x12, 0xf8, 0xcc}, ++ {0xb6, 0x13, 0x05, 0xcc}, ++ {0xb6, 0x18, 0x02, 0xcc}, ++ {0xb6, 0x17, 0x58, 0xcc}, ++ {0xb6, 0x16, 0x00, 0xcc}, ++ {0xb6, 0x22, 0x12, 0xcc}, ++ {0xb6, 0x23, 0x0b, 0xcc}, ++ {0xbf, 0xc0, 0x39, 0xcc}, ++ {0xbf, 0xc1, 0x04, 0xcc}, ++ {0xbf, 0xcc, 0x00, 0xcc}, ++ {0xb3, 0x01, 0x41, 0xcc}, ++ {0xf0, 0x00, 0x00, 0xbb}, ++ {0x05, 0x01, 0x78, 0xbb}, ++ {0x06, 0x00, 0x11, 0xbb}, ++ {0x07, 0x01, 0x42, 0xbb}, ++ {0x08, 0x00, 0x11, 0xbb}, ++ {0x20, 0x01, 0x03, 0xbb}, /* h/v flip */ ++ {0x21, 0x80, 0x00, 0xbb}, ++ {0x22, 0x0d, 0x0f, 0xbb}, ++ {0x24, 0x80, 0x00, 0xbb}, ++ {0x59, 0x00, 0xff, 0xbb}, ++ {0xf0, 0x00, 0x02, 0xbb}, ++ {0x39, 0x03, 0xca, 0xbb}, ++ {0x3a, 0x06, 0x80, 0xbb}, ++ {0x3b, 0x01, 0x52, 0xbb}, ++ {0x3c, 0x05, 0x40, 0xbb}, ++ {0x57, 0x01, 0x9c, 0xbb}, ++ {0x58, 0x01, 0xee, 0xbb}, ++ {0x59, 0x00, 0xf0, 0xbb}, ++ {0x5a, 0x01, 0x20, 0xbb}, ++ {0x5c, 0x1d, 0x17, 0xbb}, ++ {0x5d, 0x22, 0x1c, 0xbb}, ++ {0x64, 0x1e, 0x1c, 0xbb}, ++ {0x5b, 0x00, 0x00, 0xbb}, ++ {0xf0, 0x00, 0x02, 0xbb}, ++ {0x22, 0xa0, 0x78, 0xbb}, ++ {0x23, 0xa0, 0x78, 0xbb}, ++ {0x24, 0x7f, 0x00, 0xbb}, ++ {0x28, 0xea, 0x02, 0xbb}, ++ {0x29, 0x86, 0x7a, 0xbb}, ++ {0x5e, 0x52, 0x4c, 0xbb}, ++ {0x5f, 0x20, 0x24, 0xbb}, ++ {0x60, 0x00, 0x02, 0xbb}, ++ {0x02, 0x00, 0xee, 0xbb}, ++ {0x03, 0x39, 0x23, 0xbb}, ++ {0x04, 0x07, 0x24, 0xbb}, ++ {0x09, 0x00, 0xc0, 0xbb}, ++ {0x0a, 0x00, 0x79, 0xbb}, ++ {0x0b, 0x00, 0x04, 0xbb}, ++ {0x0c, 0x00, 0x5c, 0xbb}, ++ {0x0d, 0x00, 0xd9, 0xbb}, ++ {0x0e, 0x00, 0x53, 0xbb}, ++ {0x0f, 0x00, 0x21, 0xbb}, ++ {0x10, 0x00, 0xa4, 0xbb}, ++ {0x11, 0x00, 0xe5, 0xbb}, ++ {0x15, 0x00, 0x00, 0xbb}, ++ {0x16, 0x00, 0x00, 0xbb}, ++ {0x17, 0x00, 0x00, 0xbb}, ++ {0x18, 0x00, 0x00, 0xbb}, ++ {0x19, 0x00, 0x00, 0xbb}, ++ {0x1a, 0x00, 0x00, 0xbb}, ++ {0x1b, 0x00, 0x00, 0xbb}, ++ {0x1c, 0x00, 0x00, 0xbb}, ++ {0x1d, 0x00, 0x00, 0xbb}, ++ {0x1e, 0x00, 0x00, 0xbb}, ++ {0xf0, 0x00, 0x01, 0xbb}, ++ {0x06, 0xe0, 0x0e, 0xbb}, ++ {0x06, 0x60, 0x0e, 0xbb}, ++ {0xb3, 0x5c, 0x01, 0xcc}, ++ {} ++}; ++#endif + static const u8 mi1320_soc_InitQVGA[][4] = { + {0xb3, 0x01, 0x01, 0xcc}, + {0xb0, 0x03, 0x19, 0xcc}, +@@ -1216,6 +1450,272 @@ static const u8 mi1320_soc_InitQVGA[][4] = { + {0xb3, 0x5c, 0x01, 0xcc}, + {} + }; ++#if 0 /* JPEG vc0323 */ ++static const u8 mi1320_soc_InitQVGA_JPG[][4] = { ++ {0xb3, 0x01, 0x01, 0xcc}, ++ {0xb0, 0x03, 0x19, 0xcc}, ++ {0xb0, 0x04, 0x02, 0xcc}, ++ {0x00, 0x00, 0x30, 0xdd}, ++ {0xb3, 0x00, 0x64, 0xcc}, ++ {0xb3, 0x00, 0x67, 0xcc}, ++ {0xb3, 0x05, 0x01, 0xcc}, ++ {0xb3, 0x06, 0x01, 0xcc}, ++ {0xb3, 0x08, 0x01, 0xcc}, ++ {0xb3, 0x09, 0x0c, 0xcc}, ++ {0xb3, 0x34, 0x02, 0xcc}, ++ {0xb3, 0x35, 0xc8, 0xcc}, ++ {0xb3, 0x02, 0x00, 0xcc}, ++ {0xb3, 0x03, 0x0a, 0xcc}, ++ {0xb3, 0x04, 0x05, 0xcc}, ++ {0xb3, 0x20, 0x00, 0xcc}, ++ {0xb3, 0x21, 0x00, 0xcc}, ++ {0xb3, 0x22, 0x01, 0xcc}, ++ {0xb3, 0x23, 0xe0, 0xcc}, ++ {0xb3, 0x14, 0x00, 0xcc}, ++ {0xb3, 0x15, 0x00, 0xcc}, ++ {0xb3, 0x16, 0x02, 0xcc}, ++ {0xb3, 0x17, 0x7f, 0xcc}, ++ {0xb3, 0x00, 0x67, 0xcc}, ++ {0xb8, 0x00, 0x00, 0xcc}, ++ {0xbc, 0x00, 0xd1, 0xcc}, ++ {0xbc, 0x01, 0x01, 0xcc}, ++ {0xb3, 0x5c, 0x01, 0xcc}, ++ {0xf0, 0x00, 0x02, 0xbb}, ++ {0x00, 0x00, 0x10, 0xdd}, ++ {0xc8, 0x00, 0x00, 0xbb}, ++ {0x00, 0x00, 0x30, 0xdd}, ++ {0xf0, 0x00, 0x00, 0xbb}, ++ {0x00, 0x00, 0x10, 0xdd}, ++ {0x07, 0x00, 0xe0, 0xbb}, ++ {0x08, 0x00, 0x0b, 0xbb}, ++ {0x21, 0x00, 0x0c, 0xbb}, ++ {0x20, 0x01, 0x03, 0xbb}, /* h/v flip */ ++ {0xb6, 0x00, 0x00, 0xcc}, ++ {0xb6, 0x03, 0x01, 0xcc}, ++ {0xb6, 0x02, 0x40, 0xcc}, ++ {0xb6, 0x05, 0x00, 0xcc}, ++ {0xb6, 0x04, 0xf0, 0xcc}, ++ {0xb6, 0x12, 0xf8, 0xcc}, ++ {0xb6, 0x13, 0x05, 0xcc}, ++ {0xb6, 0x18, 0x00, 0xcc}, ++ {0xb6, 0x17, 0x96, 0xcc}, ++ {0xb6, 0x16, 0x00, 0xcc}, ++ {0xb6, 0x22, 0x12, 0xcc}, ++ {0xb6, 0x23, 0x0b, 0xcc}, ++ {0xbf, 0xc0, 0x39, 0xcc}, ++ {0xbf, 0xc1, 0x04, 0xcc}, ++ {0xbf, 0xcc, 0x00, 0xcc}, ++ {0xbc, 0x02, 0x18, 0xcc}, ++ {0xbc, 0x03, 0x50, 0xcc}, ++ {0xbc, 0x04, 0x18, 0xcc}, ++ {0xbc, 0x05, 0x00, 0xcc}, ++ {0xbc, 0x06, 0x00, 0xcc}, ++ {0xbc, 0x08, 0x30, 0xcc}, ++ {0xbc, 0x09, 0x40, 0xcc}, ++ {0xbc, 0x0a, 0x10, 0xcc}, ++ {0xbc, 0x0b, 0x00, 0xcc}, ++ {0xbc, 0x0c, 0x00, 0xcc}, ++ {0xb3, 0x01, 0x41, 0xcc}, ++ {0xf0, 0x00, 0x00, 0xbb}, ++ {0x05, 0x01, 0x78, 0xbb}, ++ {0x06, 0x00, 0x11, 0xbb}, ++ {0x07, 0x01, 0x42, 0xbb}, ++ {0x08, 0x00, 0x11, 0xbb}, ++ {0x20, 0x01, 0x03, 0xbb}, /* h/v flip */ ++ {0x21, 0x80, 0x00, 0xbb}, ++ {0x22, 0x0d, 0x0f, 0xbb}, ++ {0x24, 0x80, 0x00, 0xbb}, ++ {0x59, 0x00, 0xff, 0xbb}, ++ {0xf0, 0x00, 0x02, 0xbb}, ++ {0x39, 0x03, 0xca, 0xbb}, ++ {0x3a, 0x06, 0x80, 0xbb}, ++ {0x3b, 0x01, 0x52, 0xbb}, ++ {0x3c, 0x05, 0x40, 0xbb}, ++ {0x57, 0x01, 0x9c, 0xbb}, ++ {0x58, 0x01, 0xee, 0xbb}, ++ {0x59, 0x00, 0xf0, 0xbb}, ++ {0x5a, 0x01, 0x20, 0xbb}, ++ {0x5c, 0x1d, 0x17, 0xbb}, ++ {0x5d, 0x22, 0x1c, 0xbb}, ++ {0x64, 0x1e, 0x1c, 0xbb}, ++ {0x5b, 0x00, 0x00, 0xbb}, ++ {0xf0, 0x00, 0x02, 0xbb}, ++ {0x22, 0xa0, 0x78, 0xbb}, ++ {0x23, 0xa0, 0x78, 0xbb}, ++ {0x24, 0x7f, 0x00, 0xbb}, ++ {0x28, 0xea, 0x02, 0xbb}, ++ {0x29, 0x86, 0x7a, 0xbb}, ++ {0x5e, 0x52, 0x4c, 0xbb}, ++ {0x5f, 0x20, 0x24, 0xbb}, ++ {0x60, 0x00, 0x02, 0xbb}, ++ {0x02, 0x00, 0xee, 0xbb}, ++ {0x03, 0x39, 0x23, 0xbb}, ++ {0x04, 0x07, 0x24, 0xbb}, ++ {0x09, 0x00, 0xc0, 0xbb}, ++ {0x0a, 0x00, 0x79, 0xbb}, ++ {0x0b, 0x00, 0x04, 0xbb}, ++ {0x0c, 0x00, 0x5c, 0xbb}, ++ {0x0d, 0x00, 0xd9, 0xbb}, ++ {0x0e, 0x00, 0x53, 0xbb}, ++ {0x0f, 0x00, 0x21, 0xbb}, ++ {0x10, 0x00, 0xa4, 0xbb}, ++ {0x11, 0x00, 0xe5, 0xbb}, ++ {0x15, 0x00, 0x00, 0xbb}, ++ {0x16, 0x00, 0x00, 0xbb}, ++ {0x17, 0x00, 0x00, 0xbb}, ++ {0x18, 0x00, 0x00, 0xbb}, ++ {0x19, 0x00, 0x00, 0xbb}, ++ {0x1a, 0x00, 0x00, 0xbb}, ++ {0x1b, 0x00, 0x00, 0xbb}, ++ {0x1c, 0x00, 0x00, 0xbb}, ++ {0x1d, 0x00, 0x00, 0xbb}, ++ {0x1e, 0x00, 0x00, 0xbb}, ++ {0xf0, 0x00, 0x01, 0xbb}, ++ {0x06, 0xe0, 0x0e, 0xbb}, ++ {0x06, 0x60, 0x0e, 0xbb}, ++ {0xb3, 0x5c, 0x01, 0xcc}, ++ {} ++}; ++#endif ++#if 0 /* JPEG vc0323 */ ++static const u8 mi1320_soc_InitSXGA_JPG[][4] = { ++ {0xb3, 0x01, 0x01, 0xcc}, ++ {0xb0, 0x03, 0x19, 0xcc}, ++ {0xb0, 0x04, 0x02, 0xcc}, ++ {0x00, 0x00, 0x33, 0xdd}, ++ {0xb3, 0x00, 0x64, 0xcc}, ++ {0xb3, 0x00, 0x67, 0xcc}, ++ {0xb3, 0x05, 0x00, 0xcc}, ++ {0xb3, 0x06, 0x00, 0xcc}, ++ {0xb3, 0x08, 0x01, 0xcc}, ++ {0xb3, 0x09, 0x0c, 0xcc}, ++ {0xb3, 0x34, 0x02, 0xcc}, ++ {0xb3, 0x35, 0xc8, 0xcc}, ++ {0xb3, 0x02, 0x00, 0xcc}, ++ {0xb3, 0x03, 0x0a, 0xcc}, ++ {0xb3, 0x04, 0x05, 0xcc}, ++ {0xb3, 0x20, 0x00, 0xcc}, ++ {0xb3, 0x21, 0x00, 0xcc}, ++ {0xb3, 0x22, 0x04, 0xcc}, ++ {0xb3, 0x23, 0x00, 0xcc}, ++ {0xb3, 0x14, 0x00, 0xcc}, ++ {0xb3, 0x15, 0x00, 0xcc}, ++ {0xb3, 0x16, 0x04, 0xcc}, ++ {0xb3, 0x17, 0xff, 0xcc}, ++ {0xb3, 0x00, 0x67, 0xcc}, ++ {0xbc, 0x00, 0x71, 0xcc}, ++ {0xbc, 0x01, 0x01, 0xcc}, ++ {0xf0, 0x00, 0x02, 0xbb}, ++ {0x00, 0x00, 0x30, 0xdd}, ++ {0xc8, 0x9f, 0x0b, 0xbb}, ++ {0x00, 0x00, 0x20, 0xdd}, ++ {0x5b, 0x00, 0x01, 0xbb}, ++ {0x00, 0x00, 0x20, 0xdd}, ++ {0xf0, 0x00, 0x00, 0xbb}, ++ {0x00, 0x00, 0x30, 0xdd}, ++ {0x20, 0x01, 0x03, 0xbb}, /* h/v flip */ ++ {0x00, 0x00, 0x20, 0xdd}, ++ {0xb6, 0x00, 0x00, 0xcc}, ++ {0xb6, 0x03, 0x05, 0xcc}, ++ {0xb6, 0x02, 0x00, 0xcc}, ++ {0xb6, 0x05, 0x04, 0xcc}, ++ {0xb6, 0x04, 0x00, 0xcc}, ++ {0xb6, 0x12, 0xf8, 0xcc}, ++ {0xb6, 0x13, 0x29, 0xcc}, ++ {0xb6, 0x18, 0x0a, 0xcc}, ++ {0xb6, 0x17, 0x00, 0xcc}, ++ {0xb6, 0x16, 0x00, 0xcc}, ++ {0xb6, 0x22, 0x12, 0xcc}, ++ {0xb6, 0x23, 0x0b, 0xcc}, ++ {0xbf, 0xc0, 0x39, 0xcc}, ++ {0xbf, 0xc1, 0x04, 0xcc}, ++ {0xbf, 0xcc, 0x00, 0xcc}, ++ {0xb3, 0x5c, 0x01, 0xcc}, ++ {0xb3, 0x01, 0x41, 0xcc}, ++ {0xf0, 0x00, 0x00, 0xbb}, ++ {0x05, 0x01, 0x78, 0xbb}, ++ {0x06, 0x00, 0x11, 0xbb}, ++ {0x07, 0x01, 0x42, 0xbb}, ++ {0x08, 0x00, 0x11, 0xbb}, ++ {0x20, 0x01, 0x03, 0xbb}, /* h/v flip */ ++ {0x21, 0x80, 0x00, 0xbb}, ++ {0x22, 0x0d, 0x0f, 0xbb}, ++ {0x24, 0x80, 0x00, 0xbb}, ++ {0x59, 0x00, 0xff, 0xbb}, ++ {0xf0, 0x00, 0x02, 0xbb}, ++ {0x39, 0x03, 0xca, 0xbb}, ++ {0x3a, 0x06, 0x80, 0xbb}, ++ {0x3b, 0x01, 0x52, 0xbb}, ++ {0x3c, 0x05, 0x40, 0xbb}, ++ {0x57, 0x01, 0x9c, 0xbb}, ++ {0x58, 0x01, 0xee, 0xbb}, ++ {0x59, 0x00, 0xf0, 0xbb}, ++ {0x5a, 0x01, 0x20, 0xbb}, ++ {0x5c, 0x1d, 0x17, 0xbb}, ++ {0x5d, 0x22, 0x1c, 0xbb}, ++ {0x64, 0x1e, 0x1c, 0xbb}, ++ {0x5b, 0x00, 0x00, 0xbb}, ++ {0xf0, 0x00, 0x02, 0xbb}, ++ {0x22, 0xa0, 0x78, 0xbb}, ++ {0x23, 0xa0, 0x78, 0xbb}, ++ {0x24, 0x7f, 0x00, 0xbb}, ++ {0x28, 0xea, 0x02, 0xbb}, ++ {0x29, 0x86, 0x7a, 0xbb}, ++ {0x5e, 0x52, 0x4c, 0xbb}, ++ {0x5f, 0x20, 0x24, 0xbb}, ++ {0x60, 0x00, 0x02, 0xbb}, ++ {0x02, 0x00, 0xee, 0xbb}, ++ {0x03, 0x39, 0x23, 0xbb}, ++ {0x04, 0x07, 0x24, 0xbb}, ++ {0x09, 0x00, 0xc0, 0xbb}, ++ {0x0a, 0x00, 0x79, 0xbb}, ++ {0x0b, 0x00, 0x04, 0xbb}, ++ {0x0c, 0x00, 0x5c, 0xbb}, ++ {0x0d, 0x00, 0xd9, 0xbb}, ++ {0x0e, 0x00, 0x53, 0xbb}, ++ {0x0f, 0x00, 0x21, 0xbb}, ++ {0x10, 0x00, 0xa4, 0xbb}, ++ {0x11, 0x00, 0xe5, 0xbb}, ++ {0x15, 0x00, 0x00, 0xbb}, ++ {0x16, 0x00, 0x00, 0xbb}, ++ {0x17, 0x00, 0x00, 0xbb}, ++ {0x18, 0x00, 0x00, 0xbb}, ++ {0x19, 0x00, 0x00, 0xbb}, ++ {0x1a, 0x00, 0x00, 0xbb}, ++ {0x1b, 0x00, 0x00, 0xbb}, ++ {0x1c, 0x00, 0x00, 0xbb}, ++ {0x1d, 0x00, 0x00, 0xbb}, ++ {0x1e, 0x00, 0x00, 0xbb}, ++ {0xf0, 0x00, 0x01, 0xbb}, ++ {0x06, 0xe0, 0x0e, 0xbb}, ++ {0x06, 0x60, 0x0e, 0xbb}, ++ {0xb3, 0x5c, 0x01, 0xcc}, ++ {0xf0, 0x00, 0x00, 0xbb}, ++ {0x05, 0x01, 0x13, 0xbb}, ++ {0x06, 0x00, 0x11, 0xbb}, ++ {0x07, 0x00, 0x85, 0xbb}, ++ {0x08, 0x00, 0x27, 0xbb}, ++ {0x20, 0x01, 0x03, 0xbb}, /* h/v flip */ ++ {0x21, 0x80, 0x00, 0xbb}, ++ {0x22, 0x0d, 0x0f, 0xbb}, ++ {0x24, 0x80, 0x00, 0xbb}, ++ {0x59, 0x00, 0xff, 0xbb}, ++ {0xf0, 0x00, 0x02, 0xbb}, ++ {0x39, 0x03, 0x0d, 0xbb}, ++ {0x3a, 0x06, 0x1b, 0xbb}, ++ {0x3b, 0x00, 0x95, 0xbb}, ++ {0x3c, 0x04, 0xdb, 0xbb}, ++ {0x57, 0x02, 0x00, 0xbb}, ++ {0x58, 0x02, 0x66, 0xbb}, ++ {0x59, 0x00, 0xff, 0xbb}, ++ {0x5a, 0x01, 0x33, 0xbb}, ++ {0x5c, 0x12, 0x0d, 0xbb}, ++ {0x5d, 0x16, 0x11, 0xbb}, ++ {0x64, 0x5e, 0x1c, 0xbb}, ++ {0x2f, 0x90, 0x00, 0xbb}, ++ {} ++}; ++#endif + static const u8 mi1320_soc_InitSXGA[][4] = { + {0xb3, 0x01, 0x01, 0xcc}, + {0xb0, 0x03, 0x19, 0xcc}, +@@ -1359,7 +1859,8 @@ static const u8 po3130_initVGA_data[][4] = { + {0xb3, 0x23, 0xe8, 0xcc}, {0xb8, 0x08, 0xe8, 0xcc}, + {0xb3, 0x14, 0x00, 0xcc}, {0xb3, 0x15, 0x00, 0xcc}, + {0xb3, 0x16, 0x02, 0xcc}, {0xb3, 0x17, 0x7f, 0xcc}, +- {0xb3, 0x34, 0x01, 0xcc}, {0xb3, 0x35, 0xf6, 0xcc}, ++ {0xb3, 0x34, 0x01, 0xcc}, ++ {0xb3, 0x35, 0xf6, 0xcc}, /* i2c add: 76 */ + {0xb3, 0x00, 0x27, 0xcc}, {0xbc, 0x00, 0x71, 0xcc}, + {0xb8, 0x00, 0x21, 0xcc}, {0xb8, 0x27, 0x20, 0xcc}, + {0xb8, 0x01, 0x79, 0xcc}, {0xb8, 0x81, 0x09, 0xcc}, +@@ -1532,8 +2033,13 @@ static const u8 po3130_initQVGA_data[][4] = { + }; + + static const u8 hv7131r_gamma[17] = { ++#if 1 + 0x00, 0x13, 0x38, 0x59, 0x79, 0x92, 0xa7, 0xb9, 0xc8, + 0xd4, 0xdf, 0xe7, 0xee, 0xf4, 0xf9, 0xfc, 0xff ++#else ++ 0x04, 0x1a, 0x36, 0x55, 0x6f, 0x87, 0x9d, 0xb0, 0xc1, ++ 0xcf, 0xda, 0xe4, 0xec, 0xf3, 0xf8, 0xfd, 0xff ++#endif + }; + static const u8 hv7131r_matrix[9] = { + 0x5f, 0xec, 0xf5, 0xf1, 0x5a, 0xf5, 0xf1, 0xec, 0x63 +@@ -1561,7 +2067,7 @@ static const u8 hv7131r_initVGA_data[][4] = { + {0xb3, 0x16, 0x02, 0xcc}, + {0xb3, 0x17, 0x7f, 0xcc}, + {0xb3, 0x34, 0x01, 0xcc}, +- {0xb3, 0x35, 0x91, 0xcc}, ++ {0xb3, 0x35, 0x91, 0xcc}, /* i2c add: 11 */ + {0xb3, 0x00, 0x27, 0xcc}, + {0xbc, 0x00, 0x73, 0xcc}, + {0xb8, 0x00, 0x23, 0xcc}, +@@ -1747,7 +2253,8 @@ static const u8 ov7660_initVGA_data[][4] = { + {0xb3, 0x23, 0xe0, 0xcc}, {0xb3, 0x1d, 0x01, 0xcc}, + {0xb3, 0x1f, 0x02, 0xcc}, + {0xb3, 0x34, 0x01, 0xcc}, +- {0xb3, 0x35, 0xa1, 0xcc}, {0xb3, 0x00, 0x26, 0xcc}, ++ {0xb3, 0x35, 0xa1, 0xcc}, /* i2c add: 21 */ ++ {0xb3, 0x00, 0x26, 0xcc}, + {0xb8, 0x00, 0x33, 0xcc}, /* 13 */ + {0xb8, 0x01, 0x7d, 0xcc}, + {0xbc, 0x00, 0x73, 0xcc}, {0xb8, 0x81, 0x09, 0xcc}, +@@ -1883,7 +2390,8 @@ static const u8 ov7670_initVGA_JPG[][4] = { + {0x00, 0x00, 0x10, 0xdd}, + {0xb0, 0x04, 0x02, 0xcc}, {0x00, 0x00, 0x10, 0xdd}, + {0xb3, 0x00, 0x66, 0xcc}, {0xb3, 0x00, 0x67, 0xcc}, +- {0xb3, 0x35, 0xa1, 0xcc}, {0xb3, 0x34, 0x01, 0xcc}, ++ {0xb3, 0x35, 0xa1, 0xcc}, /* i2c add: 21 */ ++ {0xb3, 0x34, 0x01, 0xcc}, + {0xb3, 0x05, 0x01, 0xcc}, {0xb3, 0x06, 0x01, 0xcc}, + {0xb3, 0x08, 0x01, 0xcc}, {0xb3, 0x09, 0x0c, 0xcc}, + {0xb3, 0x02, 0x02, 0xcc}, {0xb3, 0x03, 0x1f, 0xcc}, +@@ -2143,14 +2651,26 @@ static const u8 ov7670_initQVGA_JPG[][4] = { + + /* PO1200 - values from usbvm326.inf and ms-win trace */ + static const u8 po1200_gamma[17] = { ++#if 1 + 0x00, 0x13, 0x38, 0x59, 0x79, 0x92, 0xa7, 0xb9, 0xc8, + 0xd4, 0xdf, 0xe7, 0xee, 0xf4, 0xf9, 0xfc, 0xff ++#else ++/*ms-win trace*/ ++ 0x01, 0x0b, 0x1e, 0x38, 0x51, 0x6b, 0x83, 0x9a, 0xaf, ++ 0xc1, 0xd0, 0xdd, 0xe8, 0xf2, 0xf9, 0xff, 0xff ++#endif + }; + static const u8 po1200_matrix[9] = { + 0x60, 0xf9, 0xe5, 0xe7, 0x50, 0x05, 0xf3, 0xe6, 0x5e + }; + static const u8 po1200_initVGA_data[][4] = { + {0xb0, 0x03, 0x19, 0xcc}, /* reset? */ ++#if 0 ++ {0x00, 0x00, 0x64, 0xdd}, ++ {0xb3, 0x49, 0x11, 0xcc}, ++ {0x00, 0x00, 0x33, 0xdd}, ++/*read b349*/ ++#endif + {0xb0, 0x03, 0x19, 0xcc}, + /* {0x00, 0x00, 0x33, 0xdd}, */ + {0xb0, 0x04, 0x02, 0xcc}, +@@ -2181,7 +2701,7 @@ static const u8 po1200_initVGA_data[][4] = { + {0xb0, 0x54, 0x13, 0xcc}, + {0xb3, 0x00, 0x67, 0xcc}, + {0xb3, 0x34, 0x01, 0xcc}, +- {0xb3, 0x35, 0xdc, 0xcc}, ++ {0xb3, 0x35, 0xdc, 0xcc}, /* i2c add: 5c */ + {0x00, 0x03, 0x00, 0xaa}, + {0x00, 0x12, 0x05, 0xaa}, + {0x00, 0x13, 0x02, 0xaa}, +@@ -2346,7 +2866,12 @@ static const u8 po1200_initVGA_data[][4] = { + {0x00, 0xe2, 0x01, 0xaa}, + {0x00, 0xd6, 0x40, 0xaa}, + {0x00, 0xe4, 0x40, 0xaa}, ++#if 1 + {0x00, 0xa8, 0x8f, 0xaa}, ++#else ++/*modified later*/ ++ {0x00, 0xa8, 0x9f, 0xaa}, ++#endif + {0x00, 0xb4, 0x16, 0xaa}, + {0xb0, 0x02, 0x06, 0xcc}, + {0xb0, 0x18, 0x06, 0xcc}, +@@ -2383,6 +2908,10 @@ static const u8 po1200_initVGA_data[][4] = { + {0x00, 0x03, 0x00, 0xaa}, + {0x00, 0x95, 0x85, 0xaa}, + /*matrix*/ ++#if 0 ++ {0x00, 0x03, 0x00, 0xaa}, ++ {0x00, 0x61, 0xb8, 0xaa}, /* sharpness */ ++#endif + {0x00, 0x03, 0x00, 0xaa}, + {0x00, 0x4d, 0x20, 0xaa}, + {0xb8, 0x22, 0x40, 0xcc}, +@@ -2402,12 +2931,270 @@ static const u8 po1200_initVGA_data[][4] = { + {0x00, 0x46, 0x3c, 0xaa}, + {0x00, 0x00, 0x18, 0xdd}, + /*read bfff*/ ++#if 0 ++ {0x00, 0x03, 0x00, 0xaa}, ++ {0x00, 0x1e, 0x46, 0xaa}, /* h/v flip */ ++ {0x00, 0xa8, 0x8f, 0xaa}, ++#endif + {0x00, 0x03, 0x00, 0xaa}, + {0x00, 0xb4, 0x1c, 0xaa}, + {0x00, 0xb5, 0x92, 0xaa}, + {0x00, 0xb6, 0x39, 0xaa}, + {0x00, 0xb7, 0x24, 0xaa}, + /*write 89 0400 1415*/ ++ {} ++}; ++ ++static const u8 poxxxx_init_common[][4] = { ++ {0xb3, 0x00, 0x04, 0xcc}, ++ {0x00, 0x00, 0x10, 0xdd}, ++ {0xb3, 0x00, 0x64, 0xcc}, ++ {0x00, 0x00, 0x10, 0xdd}, ++ {0xb3, 0x00, 0x65, 0xcc}, ++ {0x00, 0x00, 0x10, 0xdd}, ++ {0xb3, 0x00, 0x67, 0xcc}, ++ {0xb0, 0x03, 0x09, 0xcc}, ++ {0xb3, 0x05, 0x00, 0xcc}, ++ {0xb3, 0x06, 0x00, 0xcc}, ++ {0xb3, 0x5c, 0x01, 0xcc}, ++ {0xb3, 0x08, 0x01, 0xcc}, ++ {0xb3, 0x09, 0x0c, 0xcc}, ++ {0xb3, 0x34, 0x01, 0xcc}, ++ {0xb3, 0x35, 0xf6, 0xcc}, /* i2c add: 76 */ ++ {0xb3, 0x02, 0xb0, 0xcc}, ++ {0xb3, 0x03, 0x18, 0xcc}, ++ {0xb3, 0x04, 0x15, 0xcc}, ++ {0xb3, 0x20, 0x00, 0xcc}, ++ {0xb3, 0x21, 0x00, 0xcc}, ++ {0xb3, 0x22, 0x04, 0xcc}, ++ {0xb3, 0x23, 0x00, 0xcc}, ++ {0xb3, 0x14, 0x00, 0xcc}, ++ {0xb3, 0x15, 0x00, 0xcc}, ++ {0xb3, 0x16, 0x04, 0xcc}, ++ {0xb3, 0x17, 0xff, 0xcc}, ++ {0xb3, 0x2c, 0x03, 0xcc}, ++ {0xb3, 0x2d, 0x56, 0xcc}, ++ {0xb3, 0x2e, 0x02, 0xcc}, ++ {0xb3, 0x2f, 0x0a, 0xcc}, ++ {0xb3, 0x40, 0x00, 0xcc}, ++ {0xb3, 0x41, 0x34, 0xcc}, ++ {0xb3, 0x42, 0x01, 0xcc}, ++ {0xb3, 0x43, 0xe0, 0xcc}, ++ {0xbc, 0x00, 0x71, 0xcc}, ++ {0xbc, 0x01, 0x01, 0xcc}, ++ {0xb3, 0x01, 0x41, 0xcc}, ++ {0xb3, 0x4d, 0x00, 0xcc}, ++ {0x00, 0x0b, 0x2a, 0xaa}, ++ {0x00, 0x0e, 0x03, 0xaa}, ++ {0x00, 0x0f, 0xea, 0xaa}, ++ {0x00, 0x12, 0x08, 0xaa}, ++ {0x00, 0x1e, 0x06, 0xaa}, ++ {0x00, 0x21, 0x00, 0xaa}, ++ {0x00, 0x31, 0x1f, 0xaa}, ++ {0x00, 0x33, 0x38, 0xaa}, ++ {0x00, 0x36, 0xc0, 0xaa}, ++ {0x00, 0x37, 0xc8, 0xaa}, ++ {0x00, 0x3b, 0x36, 0xaa}, ++ {0x00, 0x4b, 0xfe, 0xaa}, ++ {0x00, 0x4d, 0x2e, 0xaa}, ++ {0x00, 0x51, 0x1c, 0xaa}, ++ {0x00, 0x52, 0x01, 0xaa}, ++ {0x00, 0x55, 0x0a, 0xaa}, ++ {0x00, 0x56, 0x0a, 0xaa}, ++ {0x00, 0x57, 0x07, 0xaa}, ++ {0x00, 0x58, 0x07, 0xaa}, ++ {0x00, 0x59, 0x04, 0xaa}, ++ {0x00, 0x70, 0x68, 0xaa}, ++ {0x00, 0x71, 0x04, 0xaa}, ++ {0x00, 0x72, 0x10, 0xaa}, ++ {0x00, 0x80, 0x71, 0xaa}, ++ {0x00, 0x81, 0x08, 0xaa}, ++ {0x00, 0x82, 0x00, 0xaa}, ++ {0x00, 0x83, 0x55, 0xaa}, ++ {0x00, 0x84, 0x06, 0xaa}, ++ {0x00, 0x85, 0x06, 0xaa}, ++ {0x00, 0x8b, 0x25, 0xaa}, ++ {0x00, 0x8c, 0x00, 0xaa}, ++ {0x00, 0x8d, 0x86, 0xaa}, ++ {0x00, 0x8e, 0x82, 0xaa}, ++ {0x00, 0x8f, 0x2d, 0xaa}, ++ {0x00, 0x90, 0x8b, 0xaa}, ++ {0x00, 0x91, 0x81, 0xaa}, ++ {0x00, 0x92, 0x81, 0xaa}, ++ {0x00, 0x93, 0x23, 0xaa}, ++ {0x00, 0xa3, 0x2a, 0xaa}, ++ {0x00, 0xa4, 0x03, 0xaa}, ++ {0x00, 0xa5, 0xea, 0xaa}, ++ {0x00, 0xb0, 0x68, 0xaa}, ++ {0x00, 0xbc, 0x04, 0xaa}, ++ {0x00, 0xbe, 0x3b, 0xaa}, ++ {0x00, 0x4e, 0x40, 0xaa}, ++ {0x00, 0x06, 0x04, 0xaa}, ++ {0x00, 0x07, 0x03, 0xaa}, ++ {0x00, 0xcd, 0x18, 0xaa}, ++ {0x00, 0x28, 0x03, 0xaa}, ++ {0x00, 0x29, 0xef, 0xaa}, ++/* reinit on alt 2 (qvga) or alt7 (vga) */ ++ {0xb3, 0x05, 0x00, 0xcc}, ++ {0xb3, 0x06, 0x00, 0xcc}, ++ {0xb8, 0x00, 0x01, 0xcc}, ++ ++ {0x00, 0x1d, 0x85, 0xaa}, ++ {0x00, 0x1e, 0xc6, 0xaa}, ++ {0x00, 0x00, 0x40, 0xdd}, ++ {0x00, 0x1d, 0x05, 0xaa}, ++ ++ {0x00, 0xd6, 0x22, 0xaa}, /* gamma 0 */ ++ {0x00, 0x73, 0x00, 0xaa}, ++ {0x00, 0x74, 0x0a, 0xaa}, ++ {0x00, 0x75, 0x16, 0xaa}, ++ {0x00, 0x76, 0x25, 0xaa}, ++ {0x00, 0x77, 0x34, 0xaa}, ++ {0x00, 0x78, 0x49, 0xaa}, ++ {0x00, 0x79, 0x5a, 0xaa}, ++ {0x00, 0x7a, 0x7f, 0xaa}, ++ {0x00, 0x7b, 0x9b, 0xaa}, ++ {0x00, 0x7c, 0xba, 0xaa}, ++ {0x00, 0x7d, 0xd4, 0xaa}, ++ {0x00, 0x7e, 0xea, 0xaa}, ++ ++ {0x00, 0xd6, 0x62, 0xaa}, /* gamma 1 */ ++ {0x00, 0x73, 0x00, 0xaa}, ++ {0x00, 0x74, 0x0a, 0xaa}, ++ {0x00, 0x75, 0x16, 0xaa}, ++ {0x00, 0x76, 0x25, 0xaa}, ++ {0x00, 0x77, 0x34, 0xaa}, ++ {0x00, 0x78, 0x49, 0xaa}, ++ {0x00, 0x79, 0x5a, 0xaa}, ++ {0x00, 0x7a, 0x7f, 0xaa}, ++ {0x00, 0x7b, 0x9b, 0xaa}, ++ {0x00, 0x7c, 0xba, 0xaa}, ++ {0x00, 0x7d, 0xd4, 0xaa}, ++ {0x00, 0x7e, 0xea, 0xaa}, ++ ++ {0x00, 0xd6, 0xa2, 0xaa}, /* gamma 2 */ ++ {0x00, 0x73, 0x00, 0xaa}, ++ {0x00, 0x74, 0x0a, 0xaa}, ++ {0x00, 0x75, 0x16, 0xaa}, ++ {0x00, 0x76, 0x25, 0xaa}, ++ {0x00, 0x77, 0x34, 0xaa}, ++ {0x00, 0x78, 0x49, 0xaa}, ++ {0x00, 0x79, 0x5a, 0xaa}, ++ {0x00, 0x7a, 0x7f, 0xaa}, ++ {0x00, 0x7b, 0x9b, 0xaa}, ++ {0x00, 0x7c, 0xba, 0xaa}, ++ {0x00, 0x7d, 0xd4, 0xaa}, ++ {0x00, 0x7e, 0xea, 0xaa}, ++ ++ {0x00, 0xaa, 0xff, 0xaa}, /* back light comp */ ++ {0x00, 0xc4, 0x03, 0xaa}, ++ {0x00, 0xc5, 0x19, 0xaa}, ++ {0x00, 0xc6, 0x03, 0xaa}, ++ {0x00, 0xc7, 0x91, 0xaa}, ++ {0x00, 0xc8, 0x01, 0xaa}, ++ {0x00, 0xc9, 0xdd, 0xaa}, ++ {0x00, 0xca, 0x02, 0xaa}, ++ {0x00, 0xcb, 0x37, 0xaa}, ++ ++/* read d1 */ ++ {0x00, 0xd1, 0x3c, 0xaa}, ++ {0x00, 0xb8, 0x28, 0xaa}, ++ {0x00, 0xb9, 0x1e, 0xaa}, ++ {0x00, 0xb6, 0x14, 0xaa}, ++ {0x00, 0xb7, 0x0f, 0xaa}, ++ {0x00, 0x5c, 0x10, 0xaa}, ++ {0x00, 0x5d, 0x18, 0xaa}, ++ {0x00, 0x5e, 0x24, 0xaa}, ++ {0x00, 0x5f, 0x24, 0xaa}, ++ {0x00, 0x86, 0x1a, 0xaa}, ++ {0x00, 0x60, 0x00, 0xaa}, ++ {0x00, 0x61, 0x1b, 0xaa}, ++ {0x00, 0x62, 0x30, 0xaa}, ++ {0x00, 0x63, 0x40, 0xaa}, ++ {0x00, 0x87, 0x1a, 0xaa}, ++ {0x00, 0x64, 0x00, 0xaa}, ++ {0x00, 0x65, 0x08, 0xaa}, ++ {0x00, 0x66, 0x10, 0xaa}, ++ {0x00, 0x67, 0x20, 0xaa}, ++ {0x00, 0x88, 0x10, 0xaa}, ++ {0x00, 0x68, 0x00, 0xaa}, ++ {0x00, 0x69, 0x08, 0xaa}, ++ {0x00, 0x6a, 0x0f, 0xaa}, ++ {0x00, 0x6b, 0x0f, 0xaa}, ++ {0x00, 0x89, 0x07, 0xaa}, ++ {0x00, 0xd5, 0x4c, 0xaa}, ++ {0x00, 0x0a, 0x00, 0xaa}, ++ {0x00, 0x0b, 0x2a, 0xaa}, ++ {0x00, 0x0e, 0x03, 0xaa}, ++ {0x00, 0x0f, 0xea, 0xaa}, ++ {0x00, 0xa2, 0x00, 0xaa}, ++ {0x00, 0xa3, 0x2a, 0xaa}, ++ {0x00, 0xa4, 0x03, 0xaa}, ++ {0x00, 0xa5, 0xea, 0xaa}, ++ {} ++}; ++static const u8 poxxxx_initVGA[][4] = { ++ {0x00, 0x20, 0x11, 0xaa}, ++ {0x00, 0x33, 0x38, 0xaa}, ++ {0x00, 0xbb, 0x0d, 0xaa}, ++ {0xb3, 0x22, 0x01, 0xcc}, ++ {0xb3, 0x23, 0xe0, 0xcc}, ++ {0xb3, 0x16, 0x02, 0xcc}, ++ {0xb3, 0x17, 0x7f, 0xcc}, ++ {0xb3, 0x02, 0xb0, 0xcc}, ++ {0xb3, 0x06, 0x00, 0xcc}, ++ {0xb3, 0x5c, 0x01, 0xcc}, ++ {0x00, 0x04, 0x06, 0xaa}, ++ {0x00, 0x05, 0x3f, 0xaa}, ++ {0x00, 0x04, 0x00, 0xdd}, /* delay 1s */ ++ {} ++}; ++static const u8 poxxxx_initQVGA[][4] = { ++ {0x00, 0x20, 0x33, 0xaa}, ++ {0x00, 0x33, 0x38, 0xaa}, ++ {0x00, 0xbb, 0x0d, 0xaa}, ++ {0xb3, 0x22, 0x00, 0xcc}, ++ {0xb3, 0x23, 0xf0, 0xcc}, ++ {0xb3, 0x16, 0x01, 0xcc}, ++ {0xb3, 0x17, 0x3f, 0xcc}, ++ {0xb3, 0x02, 0xb0, 0xcc}, ++ {0xb3, 0x06, 0x01, 0xcc}, ++ {0xb3, 0x5c, 0x00, 0xcc}, ++ {0x00, 0x04, 0x06, 0xaa}, ++ {0x00, 0x05, 0x3f, 0xaa}, ++ {0x00, 0x04, 0x00, 0xdd}, /* delay 1s */ ++ {} ++}; ++static const u8 poxxxx_init_end_1[][4] = { ++ {0x00, 0x47, 0x25, 0xaa}, ++ {0x00, 0x48, 0x80, 0xaa}, ++ {0x00, 0x49, 0x1f, 0xaa}, ++ {0x00, 0x4a, 0x40, 0xaa}, ++ {0x00, 0x44, 0x40, 0xaa}, ++ {0x00, 0xab, 0x4a, 0xaa}, ++ {0x00, 0xb1, 0x00, 0xaa}, ++ {0x00, 0xb2, 0x04, 0xaa}, ++ {0x00, 0xb3, 0x08, 0xaa}, ++ {0x00, 0xb4, 0x0b, 0xaa}, ++ {0x00, 0xb5, 0x0d, 0xaa}, ++ {0x00, 0x59, 0x7e, 0xaa}, /* sharpness */ ++ {0x00, 0x16, 0x00, 0xaa}, /* white balance */ ++ {0x00, 0x18, 0x00, 0xaa}, ++#if 0 ++/* read d1 */ ++ {0x00, 0xd1, 0x3c, 0xaa}, ++ {0x00, 0x94, 0x46, 0xaa}, /* colors */ ++ {0x00, 0x95, 0x51, 0xaa}, ++ {0x00, 0x98, 0x88, 0xaa}, /* contrast */ ++ {0x00, 0x99, 0x93, 0xaa}, /* brightness */ ++#endif ++ {} ++}; ++static const u8 poxxxx_init_end_2[][4] = { ++ {0x00, 0x1d, 0x85, 0xaa}, ++ {0x00, 0x1e, 0x06, 0xaa}, ++ {0x00, 0x1d, 0x05, 0xaa}, ++ {} + }; + + struct sensor_info { +@@ -2420,33 +3207,89 @@ struct sensor_info { + u8 op; + }; + +-static const struct sensor_info sensor_info_data[] = { +-/* sensorId, I2cAdd, IdAdd, VpId, m1, m2, op */ ++/* probe values */ ++static const struct sensor_info vc0321_probe_data[] = { ++/* sensorId, I2cAdd, IdAdd, VpId, m1, m2, op */ ++/* 0 OV9640 */ + {-1, 0x80 | 0x30, 0x0a, 0x0000, 0x25, 0x24, 0x05}, ++/* 1 ICM108T (may respond on IdAdd == 0x83 - tested in vc032x_probe_sensor) */ + {-1, 0x80 | 0x20, 0x82, 0x0000, 0x24, 0x25, 0x01}, +-/* (tested in vc032x_probe_sensor) */ +-/* {-1, 0x80 | 0x20, 0x83, 0x0000, 0x24, 0x25, 0x01}, */ +- {SENSOR_PO3130NC, 0x80 | 0x76, 0x00, 0x3130, 0x24, 0x25, 0x01}, ++/* 2 PO2130 (may detect PO3130NC - tested in vc032x_probe_sensor)*/ ++ {-1, 0x80 | 0x76, 0x00, 0x0000, 0x24, 0x25, 0x01}, ++/* 3 MI1310 */ ++ {-1, 0x80 | 0x5d, 0x00, 0x0000, 0x24, 0x25, 0x01}, ++/* 4 MI360 - tested in vc032x_probe_sensor */ ++/* {SENSOR_MI0360, 0x80 | 0x5d, 0x00, 0x8243, 0x24, 0x25, 0x01}, */ ++/* 5 7131R */ ++ {SENSOR_HV7131R, 0x80 | 0x11, 0x00, 0x0209, 0x24, 0x25, 0x01}, ++/* 6 OV7649 */ ++ {-1, 0x80 | 0x21, 0x0a, 0x0000, 0x21, 0x20, 0x05}, ++/* 7 PAS302BCW */ ++ {-1, 0x80 | 0x40, 0x00, 0x0000, 0x20, 0x22, 0x05}, ++/* 8 OV7660 */ ++ {SENSOR_OV7660, 0x80 | 0x21, 0x0a, 0x7660, 0x26, 0x26, 0x05}, ++/* 9 PO3130NC - (tested in vc032x_probe_sensor) */ ++/* {SENSOR_PO3130NC, 0x80 | 0x76, 0x00, 0x3130, 0x24, 0x25, 0x01}, */ ++/* 10 PO1030KC */ ++ {-1, 0x80 | 0x6e, 0x00, 0x0000, 0x24, 0x25, 0x01}, ++/* 11 MI1310_SOC */ + {SENSOR_MI1310_SOC, 0x80 | 0x5d, 0x00, 0x143a, 0x24, 0x25, 0x01}, +-/* (tested in vc032x_probe_sensor) */ ++/* 12 OV9650 */ ++ {-1, 0x80 | 0x30, 0x0a, 0x0000, 0x25, 0x24, 0x05}, ++/* 13 S5K532 */ ++ {-1, 0x80 | 0x11, 0x39, 0x0000, 0x24, 0x25, 0x01}, ++/* 14 MI360_SOC - ??? */ ++/* 15 PO1200N */ ++ {SENSOR_PO1200, 0x80 | 0x5c, 0x00, 0x1200, 0x67, 0x67, 0x01}, ++/* 16 PO3030K */ ++ {-1, 0x80 | 0x18, 0x00, 0x0000, 0x24, 0x25, 0x01}, ++/* 17 PO2030 */ ++ {-1, 0x80 | 0x6e, 0x00, 0x0000, 0x24, 0x25, 0x01}, ++/* ?? */ ++ {-1, 0x80 | 0x56, 0x01, 0x0000, 0x64, 0x67, 0x01}, ++ {SENSOR_MI1320, 0x80 | 0x48, 0x00, 0x148c, 0x64, 0x65, 0x01}, ++}; ++static const struct sensor_info vc0323_probe_data[] = { ++/* sensorId, I2cAdd, IdAdd, VpId, m1, m2, op */ ++/* 0 OV9640 */ ++ {-1, 0x80 | 0x30, 0x0a, 0x0000, 0x25, 0x24, 0x05}, ++/* 1 ICM108T (may respond on IdAdd == 0x83 - tested in vc032x_probe_sensor) */ ++ {-1, 0x80 | 0x20, 0x82, 0x0000, 0x24, 0x25, 0x01}, ++/* 2 PO2130 (may detect PO3130NC - tested in vc032x_probe_sensor)*/ ++ {-1, 0x80 | 0x76, 0x00, 0x0000, 0x24, 0x25, 0x01}, ++/* 3 MI1310 */ ++ {-1, 0x80 | 0x5d, 0x00, 0x0000, 0x24, 0x25, 0x01}, ++/* 4 MI360 - tested in vc032x_probe_sensor */ + /* {SENSOR_MI0360, 0x80 | 0x5d, 0x00, 0x8243, 0x24, 0x25, 0x01}, */ ++/* 5 7131R */ + {SENSOR_HV7131R, 0x80 | 0x11, 0x00, 0x0209, 0x24, 0x25, 0x01}, ++/* 6 OV7649 */ + {-1, 0x80 | 0x21, 0x0a, 0x0000, 0x21, 0x20, 0x05}, ++/* 7 PAS302BCW */ + {-1, 0x80 | 0x40, 0x00, 0x0000, 0x20, 0x22, 0x05}, ++/* 8 OV7660 */ + {SENSOR_OV7660, 0x80 | 0x21, 0x0a, 0x7660, 0x26, 0x26, 0x05}, +-/* {SENSOR_PO3130NC, 0x80 | 0x76, 0x00, 0x0000, 0x24, 0x25, 0x01}, */ ++/* 9 PO3130NC - (tested in vc032x_probe_sensor) */ ++/* {SENSOR_PO3130NC, 0x80 | 0x76, 0x00, 0x3130, 0x24, 0x25, 0x01}, */ ++/* 10 PO1030KC */ + {-1, 0x80 | 0x6e, 0x00, 0x0000, 0x24, 0x25, 0x01}, +-/* {SENSOR_MI1310_SOC, 0x80 | 0x5d, 0x00, 0x0000, 0x24, 0x25, 0x01}, */ +-/* {-1, 0x80 | 0x30, 0x0a, 0x0000, 0x25, 0x24, 0x05}, */ ++/* 11 MI1310_SOC */ ++ {SENSOR_MI1310_SOC, 0x80 | 0x5d, 0x00, 0x143a, 0x24, 0x25, 0x01}, ++/* 12 OV9650 */ ++ {-1, 0x80 | 0x30, 0x0a, 0x0000, 0x25, 0x24, 0x05}, ++/* 13 S5K532 */ + {-1, 0x80 | 0x11, 0x39, 0x0000, 0x24, 0x25, 0x01}, ++/* 14 MI360_SOC - ??? */ ++/* 15 PO1200N */ + {SENSOR_PO1200, 0x80 | 0x5c, 0x00, 0x1200, 0x67, 0x67, 0x01}, ++/* 16 ?? */ + {-1, 0x80 | 0x2d, 0x00, 0x0000, 0x65, 0x67, 0x01}, ++/* 17 PO2030 */ + {-1, 0x80 | 0x6e, 0x00, 0x0000, 0x24, 0x25, 0x01}, ++/* ?? */ + {-1, 0x80 | 0x56, 0x01, 0x0000, 0x64, 0x67, 0x01}, + {SENSOR_MI1320_SOC, 0x80 | 0x48, 0x00, 0x148c, 0x64, 0x67, 0x01}, +-/*fixme: previously detected?*/ +- {SENSOR_MI1320, 0x80 | 0x48, 0x00, 0x148c, 0x64, 0x65, 0x01}, +-/*fixme: not in the ms-win probe - may be found before?*/ ++/*fixme: not in the ms-win probe - may be found before? */ + {SENSOR_OV7670, 0x80 | 0x21, 0x0a, 0x7673, 0x66, 0x67, 0x05}, + }; + +@@ -2520,20 +3363,31 @@ static int vc032x_probe_sensor(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + struct usb_device *dev = gspca_dev->dev; +- int i; ++ int i, n; + u16 value; + const struct sensor_info *ptsensor_info; + + /*fixme: should also check the other sensor (back mi1320_soc, front mc501cb)*/ + if (sd->flags & FL_SAMSUNG) { + reg_w(dev, 0xa0, 0x01, 0xb301); ++#if 1 + reg_w(dev, 0x89, 0xf0ff, 0xffff); /* select the back sensor */ ++#else ++ reg_w(dev, 0x89, 0xf3ff, 0xffff); /* select the front sensor */ ++#endif + } + + reg_r(gspca_dev, 0xa1, 0xbfcf, 1); +- PDEBUG(D_PROBE, "check sensor header %02x", gspca_dev->usb_buf[0]); +- for (i = 0; i < ARRAY_SIZE(sensor_info_data); i++) { +- ptsensor_info = &sensor_info_data[i]; ++ PDEBUG(D_PROBE, "vc032%d check sensor header %02x", ++ sd->bridge == BRIDGE_VC0321 ? 1 : 3, gspca_dev->usb_buf[0]); ++ if (sd->bridge == BRIDGE_VC0321) { ++ ptsensor_info = vc0321_probe_data; ++ n = ARRAY_SIZE(vc0321_probe_data); ++ } else { ++ ptsensor_info = vc0323_probe_data; ++ n = ARRAY_SIZE(vc0323_probe_data); ++ } ++ for (i = 0; i < n; i++) { + reg_w(dev, 0xa0, 0x02, 0xb334); + reg_w(dev, 0xa0, ptsensor_info->m1, 0xb300); + reg_w(dev, 0xa0, ptsensor_info->m2, 0xb300); +@@ -2551,13 +3405,15 @@ static int vc032x_probe_sensor(struct gspca_dev *gspca_dev) + return ptsensor_info->sensorId; + + switch (value) { ++ case 0x3130: ++ return SENSOR_PO3130NC; + case 0x7673: + return SENSOR_OV7670; + case 0x8243: + return SENSOR_MI0360; + } +-/*fixme: should return here*/ + } ++ ptsensor_info++; + } + return -1; + } +@@ -2619,7 +3475,7 @@ static void usb_exchange(struct gspca_dev *gspca_dev, + i2c_write(gspca_dev, data[i][0], &data[i][1], 2); + break; + case 0xdd: +- msleep(data[i][2] + 10); ++ msleep(data[i][1] * 256 + data[i][2] + 10); + break; + } + i++; +@@ -2627,6 +3483,16 @@ static void usb_exchange(struct gspca_dev *gspca_dev, + /*not reached*/ + } + ++#if 0 ++static void vc0321_reset(struct gspca_dev *gspca_dev) ++{ ++ reg_w(gspca_dev->dev, 0xa0, 0x00, 0xb04d); ++ reg_w(gspca_dev->dev, 0xa0, 0x01, 0xb301); ++ msleep(100); ++ reg_w(gspca_dev->dev, 0xa0, 0x01, 0xb003); ++ msleep(100); ++} ++#endif + + /* this function is called at probe time */ + static int sd_config(struct gspca_dev *gspca_dev, +@@ -2646,12 +3512,20 @@ static int sd_config(struct gspca_dev *gspca_dev, + 64, /* OV7670 6 */ + 128, /* PO1200 7 */ + 128, /* PO3130NC 8 */ ++ 128, /* POxxxx 9 */ + }; + + cam = &gspca_dev->cam; + sd->bridge = id->driver_info >> 8; + sd->flags = id->driver_info & 0xff; +- sensor = vc032x_probe_sensor(gspca_dev); ++#if 0 ++ vc0321_reset(gspca_dev); ++#endif ++ if (id->idVendor == 0x046d && ++ (id->idProduct == 0x0892 || id->idProduct == 0x0896)) ++ sensor = SENSOR_POxxxx; ++ else ++ sensor = vc032x_probe_sensor(gspca_dev); + switch (sensor) { + case -1: + PDEBUG(D_PROBE, "Unknown sensor..."); +@@ -2684,6 +3558,9 @@ static int sd_config(struct gspca_dev *gspca_dev, + case SENSOR_PO3130NC: + PDEBUG(D_PROBE, "Find Sensor PO3130NC"); + break; ++ case SENSOR_POxxxx: ++ PDEBUG(D_PROBE, "Sensor POxxxx"); ++ break; + } + sd->sensor = sensor; + +@@ -2712,28 +3589,19 @@ static int sd_config(struct gspca_dev *gspca_dev, + } + cam->npkt = npkt[sd->sensor]; + ++ sd->brightness = BRIGHTNESS_DEF; ++ sd->contrast = CONTRAST_DEF; ++ sd->colors = COLOR_DEF; + sd->hflip = HFLIP_DEF; + sd->vflip = VFLIP_DEF; +- if (sd->sensor == SENSOR_OV7670) +- sd->flags |= FL_HFLIP | FL_VFLIP; + sd->lightfreq = FREQ_DEF; +- if (sd->sensor != SENSOR_OV7670) +- gspca_dev->ctrl_dis = (1 << LIGHTFREQ_IDX); +- switch (sd->sensor) { +- case SENSOR_MI1310_SOC: +- case SENSOR_MI1320_SOC: +- case SENSOR_OV7660: +- case SENSOR_OV7670: +- case SENSOR_PO1200: +- break; +- default: +- gspca_dev->ctrl_dis = (1 << HFLIP_IDX) +- | (1 << VFLIP_IDX); +- break; +- } +- + sd->sharpness = SHARPNESS_DEF; + ++ gspca_dev->ctrl_dis = ctrl_dis[sd->sensor]; ++ ++ if (sd->sensor == SENSOR_OV7670) ++ sd->flags |= FL_HFLIP | FL_VFLIP; ++ + if (sd->bridge == BRIDGE_VC0321) { + reg_r(gspca_dev, 0x8a, 0, 3); + reg_w(dev, 0x87, 0x00, 0x0f0f); +@@ -2747,10 +3615,55 @@ static int sd_config(struct gspca_dev *gspca_dev, + /* this function is called at probe and resume time */ + static int sd_init(struct gspca_dev *gspca_dev) + { ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ if (sd->sensor == SENSOR_POxxxx) { ++ reg_r(gspca_dev, 0xa1, 0xb300, 1); ++ if (gspca_dev->usb_buf[0] != 0) { ++ reg_w(gspca_dev->dev, 0xa0, 0x26, 0xb300); ++ reg_w(gspca_dev->dev, 0xa0, 0x04, 0xb300); ++ reg_w(gspca_dev->dev, 0xa0, 0x00, 0xb300); ++ } ++ } + return 0; + } + +-/* some sensors only */ ++static void setbrightness(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ u8 data; ++ ++ if (gspca_dev->ctrl_dis & (1 << BRIGHTNESS_IDX)) ++ return; ++ data = sd->brightness; ++ if (data >= 0x80) ++ data &= 0x7f; ++ else ++ data = 0xff ^ data; ++ i2c_write(gspca_dev, 0x98, &data, 1); ++} ++ ++static void setcontrast(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ if (gspca_dev->ctrl_dis & (1 << CONTRAST_IDX)) ++ return; ++ i2c_write(gspca_dev, 0x99, &sd->contrast, 1); ++} ++ ++static void setcolors(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ u8 data; ++ ++ if (gspca_dev->ctrl_dis & (1 << COLORS_IDX)) ++ return; ++ data = sd->colors - (sd->colors >> 3) - 1; ++ i2c_write(gspca_dev, 0x94, &data, 1); ++ i2c_write(gspca_dev, 0x95, &sd->colors, 1); ++} ++ + static void sethvflip(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +@@ -2764,6 +3677,7 @@ static void sethvflip(struct gspca_dev *gspca_dev) + vflip = !vflip; + switch (sd->sensor) { + case SENSOR_MI1310_SOC: ++ case SENSOR_MI1320: + case SENSOR_MI1320_SOC: + data[0] = data[1] = 0; /* select page 0 */ + i2c_write(gspca_dev, 0xf0, data, 2); +@@ -2801,18 +3715,29 @@ static void setlightfreq(struct gspca_dev *gspca_dev) + usb_exchange(gspca_dev, ov7660_freq_tb[sd->lightfreq]); + } + +-/* po1200 only */ + static void setsharpness(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + u8 data; + +- if (sd->sensor != SENSOR_PO1200) +- return; +- data = 0; +- i2c_write(gspca_dev, 0x03, &data, 1); +- data = 0xb5 + sd->sharpness * 3; +- i2c_write(gspca_dev, 0x61, &data, 1); ++ switch (sd->sensor) { ++ case SENSOR_PO1200: ++ data = 0; ++ i2c_write(gspca_dev, 0x03, &data, 1); ++ if (sd->sharpness < 0) ++ data = 0x6a; ++ else ++ data = 0xb5 + sd->sharpness * 3; ++ i2c_write(gspca_dev, 0x61, &data, 1); ++ break; ++ case SENSOR_POxxxx: ++ if (sd->sharpness < 0) ++ data = 0x7e; /* def = max */ ++ else ++ data = 0x60 + sd->sharpness * 0x0f; ++ i2c_write(gspca_dev, 0x59, &data, 1); ++ break; ++ } + } + + static int sd_start(struct gspca_dev *gspca_dev) +@@ -2826,6 +3751,11 @@ static int sd_start(struct gspca_dev *gspca_dev) + mi1320_soc_InitSXGA, + mi1320_soc_InitVGA, + mi1320_soc_InitQVGA, ++#if 0 /* JPEG vc0323 */ ++ mi1320_soc_InitSXGA_JPG, ++ mi1320_soc_InitVGA_JPG, ++ mi1320_soc_InitQVGA_JPG ++#endif + }; + + /*fixme: back sensor only*/ +@@ -2922,12 +3852,27 @@ static int sd_start(struct gspca_dev *gspca_dev) + usb_exchange(gspca_dev, init); + init = po3130_rundata; + break; +- default: +-/* case SENSOR_PO1200: */ ++ case SENSOR_PO1200: + GammaT = po1200_gamma; + MatrixT = po1200_matrix; + init = po1200_initVGA_data; + break; ++ default: ++/* case SENSOR_POxxxx: */ ++ usb_exchange(gspca_dev, poxxxx_init_common); ++ if (mode) ++ init = poxxxx_initQVGA; ++ else ++ init = poxxxx_initVGA; ++ usb_exchange(gspca_dev, init); ++ reg_r(gspca_dev, 0x8c, 0x0000, 3); ++ reg_w(gspca_dev->dev, 0xa0, ++ gspca_dev->usb_buf[2] & 1 ? 0 : 1, ++ 0xb35c); ++ msleep(300); ++/*fixme: i2c read 04 and 05*/ ++ init = poxxxx_init_end_1; ++ break; + } + usb_exchange(gspca_dev, init); + if (GammaT && MatrixT) { +@@ -2936,7 +3881,6 @@ static int sd_start(struct gspca_dev *gspca_dev) + put_tab_to_reg(gspca_dev, GammaT, 17, 0xb86c); + put_tab_to_reg(gspca_dev, MatrixT, 9, 0xb82c); + +- /* set the led on 0x0892 0x0896 */ + switch (sd->sensor) { + case SENSOR_PO1200: + case SENSOR_HV7131R: +@@ -2945,16 +3889,22 @@ static int sd_start(struct gspca_dev *gspca_dev) + case SENSOR_MI1310_SOC: + reg_w(gspca_dev->dev, 0x89, 0x058c, 0x0000); + break; +- default: +- if (!(sd->flags & FL_SAMSUNG)) +- reg_w(gspca_dev->dev, 0x89, 0xffff, 0xfdff); +- break; + } + msleep(100); + setsharpness(gspca_dev); + sethvflip(gspca_dev); + setlightfreq(gspca_dev); + } ++ if (sd->sensor == SENSOR_POxxxx) { ++ setcolors(gspca_dev); ++ setbrightness(gspca_dev); ++ setcontrast(gspca_dev); ++ ++ /* led on */ ++ msleep(80); ++ reg_w(gspca_dev->dev, 0x89, 0xffff, 0xfdff); ++ usb_exchange(gspca_dev, poxxxx_init_end_2); ++ } + return 0; + } + +@@ -2963,10 +3913,17 @@ static void sd_stopN(struct gspca_dev *gspca_dev) + struct usb_device *dev = gspca_dev->dev; + struct sd *sd = (struct sd *) gspca_dev; + +- if (sd->sensor == SENSOR_MI1310_SOC) ++ switch (sd->sensor) { ++ case SENSOR_MI1310_SOC: + reg_w(dev, 0x89, 0x058c, 0x00ff); +- else if (!(sd->flags & FL_SAMSUNG)) +- reg_w(dev, 0x89, 0xffff, 0xffff); ++ break; ++ case SENSOR_POxxxx: ++ return; ++ default: ++ if (!(sd->flags & FL_SAMSUNG)) ++ reg_w(dev, 0x89, 0xffff, 0xffff); ++ break; ++ } + reg_w(dev, 0xa0, 0x01, 0xb301); + reg_w(dev, 0xa0, 0x09, 0xb003); + } +@@ -2984,6 +3941,12 @@ static void sd_stop0(struct gspca_dev *gspca_dev) + reg_w(dev, 0x89, 0x058c, 0x00ff); + else if (!(sd->flags & FL_SAMSUNG)) + reg_w(dev, 0x89, 0xffff, 0xffff); ++ ++ if (sd->sensor == SENSOR_POxxxx) { ++ reg_w(dev, 0xa0, 0x26, 0xb300); ++ reg_w(dev, 0xa0, 0x04, 0xb300); ++ reg_w(dev, 0xa0, 0x00, 0xb300); ++ } + } + + static void sd_pkt_scan(struct gspca_dev *gspca_dev, +@@ -3020,6 +3983,60 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, + gspca_frame_add(gspca_dev, INTER_PACKET, data, len); + } + ++static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->brightness = val; ++ if (gspca_dev->streaming) ++ setbrightness(gspca_dev); ++ return 0; ++} ++ ++static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->brightness; ++ return 0; ++} ++ ++static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->contrast = val; ++ if (gspca_dev->streaming) ++ setcontrast(gspca_dev); ++ return 0; ++} ++ ++static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->contrast; ++ return 0; ++} ++ ++static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ sd->colors = val; ++ if (gspca_dev->streaming) ++ setcolors(gspca_dev); ++ return 0; ++} ++ ++static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val) ++{ ++ struct sd *sd = (struct sd *) gspca_dev; ++ ++ *val = sd->colors; ++ return 0; ++} ++ + static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val) + { + struct sd *sd = (struct sd *) gspca_dev; +diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c +index 1a800fc..56fc686 100644 +--- a/drivers/media/video/gspca/zc3xx.c ++++ b/drivers/media/video/gspca/zc3xx.c +@@ -1,9 +1,8 @@ + /* +- * Z-Star/Vimicro zc301/zc302p/vc30x library +- * Copyright (C) 2004 2005 2006 Michel Xhaard +- * mxhaard@magic.fr ++ * Z-Star/Vimicro zc301/zc302p/vc30x library + * +- * V4L2 by Jean-Francois Moine ++ * Copyright (C) 2009-2010 Jean-Francois Moine ++ * Copyright (C) 2004 2005 2006 Michel Xhaard mxhaard@magic.fr + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -22,10 +21,11 @@ + + #define MODULE_NAME "zc3xx" + ++#include + #include "gspca.h" + #include "jpeg.h" + +-MODULE_AUTHOR("Michel Xhaard , " ++MODULE_AUTHOR("Jean-Francois Moine , " + "Serge A. Suchkov "); + MODULE_DESCRIPTION("GSPCA ZC03xx/VC3xx USB Camera Driver"); + MODULE_LICENSE("GPL"); +@@ -39,18 +39,18 @@ static int force_sensor = -1; + struct sd { + struct gspca_dev gspca_dev; /* !! must be the first item */ + +- __u8 brightness; +- __u8 contrast; +- __u8 gamma; +- __u8 autogain; +- __u8 lightfreq; +- __u8 sharpness; ++ u8 brightness; ++ u8 contrast; ++ u8 gamma; ++ u8 autogain; ++ u8 lightfreq; ++ u8 sharpness; + u8 quality; /* image quality */ + #define QUALITY_MIN 40 + #define QUALITY_MAX 60 + #define QUALITY_DEF 50 + +- signed char sensor; /* Type of image sensor chip */ ++ u8 sensor; /* Type of image sensor chip */ + /* !! values used in different tables */ + #define SENSOR_ADCM2700 0 + #define SENSOR_CS2102 1 +@@ -92,9 +92,8 @@ static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val); + static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val); + static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val); + +-static struct ctrl sd_ctrls[] = { ++static const struct ctrl sd_ctrls[] = { + #define BRIGHTNESS_IDX 0 +-#define SD_BRIGHTNESS 0 + { + { + .id = V4L2_CID_BRIGHTNESS, +@@ -103,26 +102,26 @@ static struct ctrl sd_ctrls[] = { + .minimum = 0, + .maximum = 255, + .step = 1, +- .default_value = 128, ++#define BRIGHTNESS_DEF 128 ++ .default_value = BRIGHTNESS_DEF, + }, + .set = sd_setbrightness, + .get = sd_getbrightness, + }, +-#define SD_CONTRAST 1 + { + { + .id = V4L2_CID_CONTRAST, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Contrast", + .minimum = 0, +- .maximum = 256, ++ .maximum = 255, + .step = 1, +- .default_value = 128, ++#define CONTRAST_DEF 128 ++ .default_value = CONTRAST_DEF, + }, + .set = sd_setcontrast, + .get = sd_getcontrast, + }, +-#define SD_GAMMA 2 + { + { + .id = V4L2_CID_GAMMA, +@@ -136,7 +135,6 @@ static struct ctrl sd_ctrls[] = { + .set = sd_setgamma, + .get = sd_getgamma, + }, +-#define SD_AUTOGAIN 3 + { + { + .id = V4L2_CID_AUTOGAIN, +@@ -145,13 +143,13 @@ static struct ctrl sd_ctrls[] = { + .minimum = 0, + .maximum = 1, + .step = 1, +- .default_value = 1, ++#define AUTOGAIN_DEF 1 ++ .default_value = AUTOGAIN_DEF, + }, + .set = sd_setautogain, + .get = sd_getautogain, + }, + #define LIGHTFREQ_IDX 4 +-#define SD_FREQ 4 + { + { + .id = V4L2_CID_POWER_LINE_FREQUENCY, +@@ -160,12 +158,12 @@ static struct ctrl sd_ctrls[] = { + .minimum = 0, + .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */ + .step = 1, +- .default_value = 1, ++#define FREQ_DEF 0 ++ .default_value = FREQ_DEF, + }, + .set = sd_setfreq, + .get = sd_getfreq, + }, +-#define SD_SHARPNESS 5 + { + { + .id = V4L2_CID_SHARPNESS, +@@ -174,7 +172,8 @@ static struct ctrl sd_ctrls[] = { + .minimum = 0, + .maximum = 3, + .step = 1, +- .default_value = 2, ++#define SHARPNESS_DEF 2 ++ .default_value = SHARPNESS_DEF, + }, + .set = sd_setsharpness, + .get = sd_getsharpness, +@@ -194,6 +193,19 @@ static const struct v4l2_pix_format vga_mode[] = { + .priv = 0}, + }; + ++static const struct v4l2_pix_format broken_vga_mode[] = { ++ {320, 232, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, ++ .bytesperline = 320, ++ .sizeimage = 320 * 232 * 4 / 8 + 590, ++ .colorspace = V4L2_COLORSPACE_JPEG, ++ .priv = 1}, ++ {640, 472, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, ++ .bytesperline = 640, ++ .sizeimage = 640 * 472 * 3 / 8 + 590, ++ .colorspace = V4L2_COLORSPACE_JPEG, ++ .priv = 0}, ++}; ++ + static const struct v4l2_pix_format sif_mode[] = { + {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, + .bytesperline = 176, +@@ -209,15 +221,19 @@ static const struct v4l2_pix_format sif_mode[] = { + + /* usb exchanges */ + struct usb_action { +- __u8 req; +- __u8 val; +- __u16 idx; ++ u8 req; ++ u8 val; ++ u16 idx; + }; + + static const struct usb_action adcm2700_Initial[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ + {0xa0, 0x04, ZC3XX_R002_CLOCKSELECT}, /* 00,02,04,cc */ ++#if 1 /*jfm*/ + {0xa0, 0x00, ZC3XX_R008_CLOCKSETTING}, /* 00,08,03,cc */ ++#else ++ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00,08,03,cc */ ++#endif + {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ + {0xa0, 0xd3, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,d3,cc */ + {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ +@@ -237,7 +253,11 @@ static const struct usb_action adcm2700_Initial[] = { + {0xbb, 0x00, 0x0400}, /* 04,00,00,bb */ + {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ + {0xbb, 0x0f, 0x140f}, /* 14,0f,0f,bb */ ++#if 1 /*jfm-mswin*/ + {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,37,cc */ ++#else ++ {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,37,cc */ ++#endif + {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */ + {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /* 01,89,06,cc */ + {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */ +@@ -306,7 +326,11 @@ static const struct usb_action adcm2700_Initial[] = { + static const struct usb_action adcm2700_InitialScale[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ + {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, /* 00,02,10,cc */ ++#if 1 /*jfm*/ + {0xa0, 0x00, ZC3XX_R008_CLOCKSETTING}, /* 00,08,03,cc */ ++#else ++ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00,08,03,cc */ ++#endif + {0xa0, 0x0a, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0a,cc */ + {0xa0, 0xd3, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,d3,cc */ + {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ +@@ -326,7 +350,11 @@ static const struct usb_action adcm2700_InitialScale[] = { + {0xbb, 0x00, 0x0400}, /* 04,00,00,bb */ + {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ + {0xbb, 0x0f, 0x140f}, /* 14,0f,0f,bb */ ++#if 1 /*jfm-mswin*/ + {0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,37,cc */ ++#else ++ {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,37,cc */ ++#endif + {0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc */ + {0xa0, 0x06, ZC3XX_R189_AWBSTATUS}, /* 01,89,06,cc */ + {0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE}, /* 01,c5,03,cc */ +@@ -354,9 +382,17 @@ static const struct usb_action adcm2700_InitialScale[] = { + {0xbb, 0x5f, 0x2090}, /* 20,5f,90,bb */ + {0xbb, 0x01, 0x8000}, /* 80,01,00,bb */ + {0xbb, 0x09, 0x8400}, /* 84,09,00,bb */ ++#if 1 /*jfm-mswin*/ + {0xbb, 0x86, 0x0002}, /* 00,88,02,bb */ ++#else ++ {0xbb, 0x88, 0x0002}, /* 00,88,02,bb */ ++#endif + {0xbb, 0xe6, 0x0401}, /* 04,e6,01,bb */ ++#if 1 /*jfm-mswin*/ + {0xbb, 0x86, 0x0802}, /* 08,88,02,bb */ ++#else ++ {0xbb, 0x88, 0x0802}, /* 08,88,02,bb */ ++#endif + {0xbb, 0xe6, 0x0c01}, /* 0c,e6,01,bb */ + {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ + {0xdd, 0x00, 0x0010}, /* 00,00,10,dd */ +@@ -421,7 +457,7 @@ static const struct usb_action adcm2700_NoFliker[] = { + {0xaa, 0xfe, 0x0010}, /* 00,fe,10,aa */ + {} + }; +-static const struct usb_action cs2102_Initial[] = { /* 320x240 */ ++static const struct usb_action cs2102_InitialScale[] = { /* 320x240 */ + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, + {0xa0, 0x00, ZC3XX_R010_CMOSSENSORSELECT}, +@@ -473,7 +509,7 @@ static const struct usb_action cs2102_Initial[] = { /* 320x240 */ + {} + }; + +-static const struct usb_action cs2102_InitialScale[] = { /* 640x480 */ ++static const struct usb_action cs2102_Initial[] = { /* 640x480 */ + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, + {0xa0, 0x00, ZC3XX_R010_CMOSSENSORSELECT}, +@@ -524,7 +560,7 @@ static const struct usb_action cs2102_InitialScale[] = { /* 640x480 */ + {0xa0, 0x00, 0x01ad}, + {} + }; +-static const struct usb_action cs2102_50HZ[] = { ++static const struct usb_action cs2102_50HZScale[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, + {0xaa, 0x23, 0x0001}, + {0xaa, 0x24, 0x005f}, +@@ -546,7 +582,7 @@ static const struct usb_action cs2102_50HZ[] = { + {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, + {} + }; +-static const struct usb_action cs2102_50HZScale[] = { ++static const struct usb_action cs2102_50HZ[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, + {0xaa, 0x23, 0x0000}, + {0xaa, 0x24, 0x00af}, +@@ -568,7 +604,7 @@ static const struct usb_action cs2102_50HZScale[] = { + {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, + {} + }; +-static const struct usb_action cs2102_60HZ[] = { ++static const struct usb_action cs2102_60HZScale[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, + {0xaa, 0x23, 0x0001}, + {0xaa, 0x24, 0x0055}, +@@ -590,7 +626,7 @@ static const struct usb_action cs2102_60HZ[] = { + {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, + {} + }; +-static const struct usb_action cs2102_60HZScale[] = { ++static const struct usb_action cs2102_60HZ[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, + {0xaa, 0x23, 0x0000}, + {0xaa, 0x24, 0x00aa}, +@@ -612,7 +648,7 @@ static const struct usb_action cs2102_60HZScale[] = { + {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, + {} + }; +-static const struct usb_action cs2102_NoFliker[] = { ++static const struct usb_action cs2102_NoFlikerScale[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, + {0xaa, 0x23, 0x0001}, + {0xaa, 0x24, 0x005f}, +@@ -634,7 +670,7 @@ static const struct usb_action cs2102_NoFliker[] = { + {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, + {} + }; +-static const struct usb_action cs2102_NoFlikerScale[] = { ++static const struct usb_action cs2102_NoFliker[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, + {0xaa, 0x23, 0x0000}, + {0xaa, 0x24, 0x00af}, +@@ -658,7 +694,7 @@ static const struct usb_action cs2102_NoFlikerScale[] = { + }; + + /* CS2102_KOCOM */ +-static const struct usb_action cs2102K_Initial[] = { ++static const struct usb_action cs2102K_InitialScale[] = { + {0xa0, 0x11, ZC3XX_R002_CLOCKSELECT}, + {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, + {0xa0, 0x08, ZC3XX_R010_CMOSSENSORSELECT}, +@@ -917,7 +953,7 @@ static const struct usb_action cs2102K_Initial[] = { + {} + }; + +-static const struct usb_action cs2102K_InitialScale[] = { ++static const struct usb_action cs2102K_Initial[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, + {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, +@@ -1495,7 +1531,7 @@ static const struct usb_action gc0305_NoFliker[] = { + {} + }; + +-static const struct usb_action hdcs2020xb_Initial[] = { ++static const struct usb_action hdcs2020b_InitialScale[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + {0xa0, 0x11, ZC3XX_R002_CLOCKSELECT}, + {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* qtable 0x05 */ +@@ -1627,7 +1663,7 @@ static const struct usb_action hdcs2020xb_Initial[] = { + {0xa0, 0x40, ZC3XX_R118_BGAIN}, + {} + }; +-static const struct usb_action hdcs2020xb_InitialScale[] = { ++static const struct usb_action hdcs2020b_Initial[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, + {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, +@@ -1819,7 +1855,7 @@ static const struct usb_action hdcs2020b_NoFliker[] = { + {} + }; + +-static const struct usb_action hv7131bxx_Initial[] = { /* 320x240 */ ++static const struct usb_action hv7131b_InitialScale[] = { /* 320x240 */ + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, + {0xa0, 0x00, ZC3XX_R010_CMOSSENSORSELECT}, +@@ -1866,7 +1902,7 @@ static const struct usb_action hv7131bxx_Initial[] = { /* 320x240 */ + {} + }; + +-static const struct usb_action hv7131bxx_InitialScale[] = { /* 640x480*/ ++static const struct usb_action hv7131b_Initial[] = { /* 640x480*/ + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, + {0xa0, 0x00, ZC3XX_R010_CMOSSENSORSELECT}, +@@ -2063,7 +2099,7 @@ static const struct usb_action hv7131b_NoFlikerScale[] = { /* 320x240 */ + {} + }; + +-static const struct usb_action hv7131cxx_Initial[] = { ++static const struct usb_action hv7131r_InitialScale[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, + {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, +@@ -2157,7 +2193,7 @@ static const struct usb_action hv7131cxx_Initial[] = { + {} + }; + +-static const struct usb_action hv7131cxx_InitialScale[] = { ++static const struct usb_action hv7131r_Initial[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + + {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, /* diff */ +@@ -2259,7 +2295,7 @@ static const struct usb_action hv7131cxx_InitialScale[] = { + {} + }; + +-static const struct usb_action icm105axx_Initial[] = { ++static const struct usb_action icm105a_InitialScale[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, + {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, +@@ -2436,7 +2472,7 @@ static const struct usb_action icm105axx_Initial[] = { + {} + }; + +-static const struct usb_action icm105axx_InitialScale[] = { ++static const struct usb_action icm105a_Initial[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, + {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, +@@ -2615,7 +2651,7 @@ static const struct usb_action icm105axx_InitialScale[] = { + {0xa0, 0x40, ZC3XX_R118_BGAIN}, + {} + }; +-static const struct usb_action icm105a_50HZ[] = { ++static const struct usb_action icm105a_50HZScale[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ + {0xaa, 0x0d, 0x0003}, /* 00,0d,03,aa */ + {0xaa, 0x0c, 0x0020}, /* 00,0c,20,aa */ +@@ -2646,7 +2682,7 @@ static const struct usb_action icm105a_50HZ[] = { + {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ + {} + }; +-static const struct usb_action icm105a_50HZScale[] = { ++static const struct usb_action icm105a_50HZ[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ + {0xaa, 0x0d, 0x0003}, /* 00,0d,03,aa */ + {0xaa, 0x0c, 0x008c}, /* 00,0c,8c,aa */ +@@ -2679,7 +2715,7 @@ static const struct usb_action icm105a_50HZScale[] = { + {0xa0, 0xc0, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,c0,cc */ + {} + }; +-static const struct usb_action icm105a_60HZ[] = { ++static const struct usb_action icm105a_60HZScale[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ + {0xaa, 0x0d, 0x0003}, /* 00,0d,03,aa */ + {0xaa, 0x0c, 0x0004}, /* 00,0c,04,aa */ +@@ -2710,7 +2746,7 @@ static const struct usb_action icm105a_60HZ[] = { + {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ + {} + }; +-static const struct usb_action icm105a_60HZScale[] = { ++static const struct usb_action icm105a_60HZ[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ + {0xaa, 0x0d, 0x0003}, /* 00,0d,03,aa */ + {0xaa, 0x0c, 0x0008}, /* 00,0c,08,aa */ +@@ -2743,7 +2779,7 @@ static const struct usb_action icm105a_60HZScale[] = { + {0xa0, 0xc0, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,c0,cc */ + {} + }; +-static const struct usb_action icm105a_NoFliker[] = { ++static const struct usb_action icm105a_NoFlikerScale[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ + {0xaa, 0x0d, 0x0003}, /* 00,0d,03,aa */ + {0xaa, 0x0c, 0x0004}, /* 00,0c,04,aa */ +@@ -2774,7 +2810,7 @@ static const struct usb_action icm105a_NoFliker[] = { + {0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc */ + {} + }; +-static const struct usb_action icm105a_NoFlikerScale[] = { ++static const struct usb_action icm105a_NoFliker[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ + {0xaa, 0x0d, 0x0003}, /* 00,0d,03,aa */ + {0xaa, 0x0c, 0x0004}, /* 00,0c,04,aa */ +@@ -2808,7 +2844,7 @@ static const struct usb_action icm105a_NoFlikerScale[] = { + {} + }; + +-static const struct usb_action MC501CB_InitialScale[] = { ++static const struct usb_action mc501cb_Initial[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ + {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, /* 00,02,00,cc */ + {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ +@@ -2928,7 +2964,7 @@ static const struct usb_action MC501CB_InitialScale[] = { + {} + }; + +-static const struct usb_action MC501CB_Initial[] = { /* 320x240 */ ++static const struct usb_action mc501cb_InitialScale[] = { /* 320x240 */ + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ + {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, /* 00,02,10,cc */ + {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ +@@ -3047,7 +3083,7 @@ static const struct usb_action MC501CB_Initial[] = { /* 320x240 */ + {} + }; + +-static const struct usb_action MC501CB_50HZ[] = { ++static const struct usb_action mc501cb_50HZScale[] = { + {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ + {0xaa, 0x10, 0x00fc}, /* 00,10,fc,aa */ + {0xaa, 0x36, 0x001d}, /* 00,36,1D,aa */ +@@ -3064,7 +3100,7 @@ static const struct usb_action MC501CB_50HZ[] = { + {} + }; + +-static const struct usb_action MC501CB_50HZScale[] = { ++static const struct usb_action mc501cb_50HZ[] = { + {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ + {0xaa, 0x10, 0x00fc}, /* 00,10,fc,aa */ + {0xaa, 0x36, 0x003a}, /* 00,36,3A,aa */ +@@ -3081,7 +3117,7 @@ static const struct usb_action MC501CB_50HZScale[] = { + {} + }; + +-static const struct usb_action MC501CB_60HZ[] = { ++static const struct usb_action mc501cb_60HZScale[] = { + {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ + {0xaa, 0x10, 0x00fc}, /* 00,10,fc,aa */ + {0xaa, 0x36, 0x0018}, /* 00,36,18,aa */ +@@ -3098,7 +3134,7 @@ static const struct usb_action MC501CB_60HZ[] = { + {} + }; + +-static const struct usb_action MC501CB_60HZScale[] = { ++static const struct usb_action mc501cb_60HZ[] = { + {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ + {0xaa, 0x10, 0x00fc}, /* 00,10,fc,aa */ + {0xaa, 0x36, 0x0030}, /* 00,36,30,aa */ +@@ -3115,7 +3151,7 @@ static const struct usb_action MC501CB_60HZScale[] = { + {} + }; + +-static const struct usb_action MC501CB_NoFliker[] = { ++static const struct usb_action mc501cb_NoFlikerScale[] = { + {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ + {0xaa, 0x10, 0x00fc}, /* 00,10,fc,aa */ + {0xaa, 0x36, 0x0018}, /* 00,36,18,aa */ +@@ -3132,7 +3168,7 @@ static const struct usb_action MC501CB_NoFliker[] = { + {} + }; + +-static const struct usb_action MC501CB_NoFlikerScale[] = { ++static const struct usb_action mc501cb_NoFliker[] = { + {0xaa, 0x03, 0x0003}, /* 00,03,03,aa */ + {0xaa, 0x10, 0x00fc}, /* 00,10,fc,aa */ + {0xaa, 0x36, 0x0030}, /* 00,36,30,aa */ +@@ -3144,11 +3180,16 @@ static const struct usb_action MC501CB_NoFlikerScale[] = { + {} + }; + +-/* from zs211.inf - HKR,%OV7620%,Initial - 640x480 */ +-static const struct usb_action OV7620_mode0[] = { ++/* from zs211.inf */ ++static const struct usb_action ov7620_Initial[] = { /* 640x480 */ + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ + {0xa0, 0x40, ZC3XX_R002_CLOCKSELECT}, /* 00,02,40,cc */ ++#if 1 /*jfm*/ + {0xa0, 0x00, ZC3XX_R008_CLOCKSETTING}, /* 00,08,00,cc */ ++#else ++ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00,08,00,cc */ ++ /* mx change? */ ++#endif + {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */ + {0xa0, 0x06, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,06,cc */ + {0xa0, 0x02, ZC3XX_R083_RGAINADDR}, /* 00,83,02,cc */ +@@ -3214,9 +3255,7 @@ static const struct usb_action OV7620_mode0[] = { + {0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,50,cc */ + {} + }; +- +-/* from zs211.inf - HKR,%OV7620%,InitialScale - 320x240 */ +-static const struct usb_action OV7620_mode1[] = { ++static const struct usb_action ov7620_InitialScale[] = { /* 320x240 */ + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ + {0xa0, 0x50, ZC3XX_R002_CLOCKSELECT}, /* 00,02,50,cc */ + {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, /* 00,08,00,cc */ +@@ -3287,9 +3326,7 @@ static const struct usb_action OV7620_mode1[] = { + {0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,50,cc */ + {} + }; +- +-/* from zs211.inf - HKR,%OV7620%\AE,50HZ */ +-static const struct usb_action OV7620_50HZ[] = { ++static const struct usb_action ov7620_50HZ[] = { + {0xaa, 0x13, 0x00a3}, /* 00,13,a3,aa */ + {0xdd, 0x00, 0x0100}, /* 00,01,00,dd */ + {0xaa, 0x2b, 0x0096}, /* 00,2b,96,aa */ +@@ -3307,9 +3344,7 @@ static const struct usb_action OV7620_50HZ[] = { + if mode0 (640x480) */ + {} + }; +- +-/* from zs211.inf - HKR,%OV7620%\AE,60HZ */ +-static const struct usb_action OV7620_60HZ[] = { ++static const struct usb_action ov7620_60HZ[] = { + {0xaa, 0x13, 0x00a3}, /* 00,13,a3,aa */ + /* (bug in zs211.inf) */ + {0xdd, 0x00, 0x0100}, /* 00,01,00,dd */ +@@ -3331,9 +3366,7 @@ static const struct usb_action OV7620_60HZ[] = { + {0xa1, 0x01, 0x0037}, */ + {} + }; +- +-/* from zs211.inf - HKR,%OV7620%\AE,NoFliker */ +-static const struct usb_action OV7620_NoFliker[] = { ++static const struct usb_action ov7620_NoFliker[] = { + {0xaa, 0x13, 0x00a3}, /* 00,13,a3,aa */ + /* (bug in zs211.inf) */ + {0xdd, 0x00, 0x0100}, /* 00,01,00,dd */ +@@ -3354,7 +3387,7 @@ static const struct usb_action OV7620_NoFliker[] = { + {} + }; + +-static const struct usb_action ov7630c_Initial[] = { ++static const struct usb_action ov7630c_InitialScale[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, +@@ -3511,7 +3544,7 @@ static const struct usb_action ov7630c_Initial[] = { + {} + }; + +-static const struct usb_action ov7630c_InitialScale[] = { ++static const struct usb_action ov7630c_Initial[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, + {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, +@@ -3682,7 +3715,7 @@ static const struct usb_action pas106b_Initial_com[] = { + {} + }; + +-static const struct usb_action pas106b_Initial[] = { /* 176x144 */ ++static const struct usb_action pas106b_InitialScale[] = { /* 176x144 */ + /* JPEG control */ + {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, + /* Sream and Sensor specific */ +@@ -3800,7 +3833,7 @@ static const struct usb_action pas106b_Initial[] = { /* 176x144 */ + {} + }; + +-static const struct usb_action pas106b_InitialScale[] = { /* 352x288 */ ++static const struct usb_action pas106b_Initial[] = { /* 352x288 */ + /* JPEG control */ + {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, + /* Sream and Sensor specific */ +@@ -3972,10 +4005,10 @@ static const struct usb_action pas106b_NoFliker[] = { + {} + }; + +-/* from usbvm31b.inf */ ++/* from lvWIMv.inf 046d:08a2/:08aa 2007/06/03 */ + static const struct usb_action pas202b_Initial[] = { /* 640x480 */ + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ +- {0xa0, 0x00, ZC3XX_R008_CLOCKSETTING}, /* 00,08,00,cc */ ++ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, + {0xa0, 0x0e, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0e,cc */ + {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, /* 00,02,00,cc */ + {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ +@@ -4000,7 +4033,7 @@ static const struct usb_action pas202b_Initial[] = { /* 640x480 */ + {0xaa, 0x09, 0x0006}, /* 00,09,06,aa */ + {0xaa, 0x0a, 0x0001}, /* 00,0a,01,aa */ + {0xaa, 0x0b, 0x0001}, /* 00,0b,01,aa */ +- {0xaa, 0x0c, 0x0008}, /* 00,0c,08,aa */ ++ {0xaa, 0x0c, 0x0006}, + {0xaa, 0x0d, 0x0000}, /* 00,0d,00,aa */ + {0xaa, 0x10, 0x0000}, /* 00,10,00,aa */ + {0xaa, 0x12, 0x0005}, /* 00,12,05,aa */ +@@ -4019,13 +4052,13 @@ static const struct usb_action pas202b_Initial[] = { /* 640x480 */ + }; + static const struct usb_action pas202b_InitialScale[] = { /* 320x240 */ + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ +- {0xa0, 0x00, ZC3XX_R008_CLOCKSETTING}, /* 00,08,00,cc */ ++ {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING}, + {0xa0, 0x0e, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,0e,cc */ + {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, /* 00,02,10,cc */ + {0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc */ + {0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc */ + {0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc */ +- {0xa0, 0xd0, ZC3XX_R006_FRAMEHEIGHTLOW}, /* 00,06,d0,cc */ ++ {0xa0, 0xe0, ZC3XX_R006_FRAMEHEIGHTLOW}, + {0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING}, /* 00,01,01,cc */ + {0xa0, 0x03, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,03,cc */ + {0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,01,cc */ +@@ -4035,7 +4068,7 @@ static const struct usb_action pas202b_InitialScale[] = { /* 320x240 */ + {0xa0, 0x08, ZC3XX_R11A_FIRSTYLOW}, /* 01,1a,08,cc */ + {0xa0, 0x02, ZC3XX_R11C_FIRSTXLOW}, /* 01,1c,02,cc */ + {0xa0, 0x01, ZC3XX_R09B_WINHEIGHTHIGH}, /* 00,9b,01,cc */ +- {0xa0, 0xd8, ZC3XX_R09C_WINHEIGHTLOW}, /* 00,9c,d8,cc */ ++ {0xa0, 0xe8, ZC3XX_R09C_WINHEIGHTLOW}, + {0xa0, 0x02, ZC3XX_R09D_WINWIDTHHIGH}, /* 00,9d,02,cc */ + {0xa0, 0x88, ZC3XX_R09E_WINWIDTHLOW}, /* 00,9e,88,cc */ + {0xaa, 0x02, 0x0002}, /* 00,02,02,aa */ +@@ -4044,7 +4077,7 @@ static const struct usb_action pas202b_InitialScale[] = { /* 320x240 */ + {0xaa, 0x09, 0x0006}, /* 00,09,06,aa */ + {0xaa, 0x0a, 0x0001}, /* 00,0a,01,aa */ + {0xaa, 0x0b, 0x0001}, /* 00,0b,01,aa */ +- {0xaa, 0x0c, 0x0008}, /* 00,0c,08,aa */ ++ {0xaa, 0x0c, 0x0006}, + {0xaa, 0x0d, 0x0000}, /* 00,0d,00,aa */ + {0xaa, 0x10, 0x0000}, /* 00,10,00,aa */ + {0xaa, 0x12, 0x0005}, /* 00,12,05,aa */ +@@ -4059,6 +4092,8 @@ static const struct usb_action pas202b_InitialScale[] = { /* 320x240 */ + {0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc */ + {0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc */ + {0xa0, 0x70, ZC3XX_R18D_YTARGET}, /* 01,8d,70,cc */ ++ {0xa0, 0xff, ZC3XX_R097_WINYSTARTHIGH}, ++ {0xa0, 0xfe, ZC3XX_R098_WINYSTARTLOW}, + {} + }; + static const struct usb_action pas202b_50HZ[] = { +@@ -4066,22 +4101,22 @@ static const struct usb_action pas202b_50HZ[] = { + {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */ + {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */ + {0xaa, 0x20, 0x0002}, /* 00,20,02,aa */ +- {0xaa, 0x21, 0x0068}, /* 00,21,68,aa */ ++ {0xaa, 0x21, 0x001b}, + {0xaa, 0x03, 0x0044}, /* 00,03,44,aa */ +- {0xaa, 0x04, 0x0009}, /* 00,04,09,aa */ +- {0xaa, 0x05, 0x0028}, /* 00,05,28,aa */ ++ {0xaa, 0x04, 0x0008}, ++ {0xaa, 0x05, 0x001b}, + {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */ + {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */ +- {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,14,cc */ ++ {0xa0, 0x1c, ZC3XX_R1A9_DIGITALLIMITDIFF}, + {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */ + {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ +- {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,07,cc */ +- {0xa0, 0xd2, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,d2,cc */ ++ {0xa0, 0x02, ZC3XX_R191_EXPOSURELIMITMID}, ++ {0xa0, 0x1b, ZC3XX_R192_EXPOSURELIMITLOW}, + {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ + {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ + {0xa0, 0x4d, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,4d,cc */ +- {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ +- {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ ++ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, ++ {0xa0, 0x1b, ZC3XX_R18F_AEUNFREEZE}, + {0xa0, 0x44, ZC3XX_R01D_HSYNC_0}, /* 00,1d,44,cc */ + {0xa0, 0x6f, ZC3XX_R01E_HSYNC_1}, /* 00,1e,6f,cc */ + {0xa0, 0xad, ZC3XX_R01F_HSYNC_2}, /* 00,1f,ad,cc */ +@@ -4094,23 +4129,23 @@ static const struct usb_action pas202b_50HZScale[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ + {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */ + {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */ +- {0xaa, 0x20, 0x0002}, /* 00,20,02,aa */ +- {0xaa, 0x21, 0x006c}, /* 00,21,6c,aa */ ++ {0xaa, 0x20, 0x0004}, ++ {0xaa, 0x21, 0x003d}, + {0xaa, 0x03, 0x0041}, /* 00,03,41,aa */ +- {0xaa, 0x04, 0x0009}, /* 00,04,09,aa */ +- {0xaa, 0x05, 0x002c}, /* 00,05,2c,aa */ ++ {0xaa, 0x04, 0x0010}, ++ {0xaa, 0x05, 0x003d}, + {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */ + {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */ +- {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,14,cc */ ++ {0xa0, 0x1c, ZC3XX_R1A9_DIGITALLIMITDIFF}, + {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */ + {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ +- {0xa0, 0x0f, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,0f,cc */ +- {0xa0, 0xbe, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,be,cc */ ++ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, ++ {0xa0, 0x3d, ZC3XX_R192_EXPOSURELIMITLOW}, + {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ + {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ + {0xa0, 0x9b, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,9b,cc */ +- {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ +- {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ ++ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, ++ {0xa0, 0x1b, ZC3XX_R18F_AEUNFREEZE}, + {0xa0, 0x41, ZC3XX_R01D_HSYNC_0}, /* 00,1d,41,cc */ + {0xa0, 0x6f, ZC3XX_R01E_HSYNC_1}, /* 00,1e,6f,cc */ + {0xa0, 0xad, ZC3XX_R01F_HSYNC_2}, /* 00,1f,ad,cc */ +@@ -4130,16 +4165,16 @@ static const struct usb_action pas202b_60HZ[] = { + {0xaa, 0x05, 0x0000}, /* 00,05,00,aa */ + {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */ + {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */ +- {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,14,cc */ ++ {0xa0, 0x1c, ZC3XX_R1A9_DIGITALLIMITDIFF}, + {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */ + {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ +- {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,07,cc */ +- {0xa0, 0xc0, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,c0,cc */ ++ {0xa0, 0x02, ZC3XX_R191_EXPOSURELIMITMID}, ++ {0xa0, 0x00, ZC3XX_R192_EXPOSURELIMITLOW}, + {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ + {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ + {0xa0, 0x40, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,40,cc */ +- {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ +- {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ ++ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, ++ {0xa0, 0x1b, ZC3XX_R18F_AEUNFREEZE}, + {0xa0, 0x45, ZC3XX_R01D_HSYNC_0}, /* 00,1d,45,cc */ + {0xa0, 0x8e, ZC3XX_R01E_HSYNC_1}, /* 00,1e,8e,cc */ + {0xa0, 0xc1, ZC3XX_R01F_HSYNC_2}, /* 00,1f,c1,cc */ +@@ -4152,23 +4187,23 @@ static const struct usb_action pas202b_60HZScale[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ + {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */ + {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */ +- {0xaa, 0x20, 0x0002}, /* 00,20,02,aa */ +- {0xaa, 0x21, 0x0004}, /* 00,21,04,aa */ ++ {0xaa, 0x20, 0x0004}, ++ {0xaa, 0x21, 0x0008}, + {0xaa, 0x03, 0x0042}, /* 00,03,42,aa */ +- {0xaa, 0x04, 0x0008}, /* 00,04,08,aa */ +- {0xaa, 0x05, 0x0004}, /* 00,05,04,aa */ ++ {0xaa, 0x04, 0x0010}, ++ {0xaa, 0x05, 0x0008}, + {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */ + {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */ +- {0xa0, 0x14, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,14,cc */ ++ {0xa0, 0x1c, ZC3XX_R1A9_DIGITALLIMITDIFF}, + {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc */ + {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ +- {0xa0, 0x0f, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,0f,cc */ +- {0xa0, 0x9f, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,9f,cc */ ++ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, ++ {0xa0, 0x08, ZC3XX_R192_EXPOSURELIMITLOW}, + {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ + {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ + {0xa0, 0x81, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,81,cc */ +- {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ +- {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ ++ {0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, ++ {0xa0, 0x1b, ZC3XX_R18F_AEUNFREEZE}, + {0xa0, 0x42, ZC3XX_R01D_HSYNC_0}, /* 00,1d,42,cc */ + {0xa0, 0x6f, ZC3XX_R01E_HSYNC_1}, /* 00,1e,6f,cc */ + {0xa0, 0xaf, ZC3XX_R01F_HSYNC_2}, /* 00,1f,af,cc */ +@@ -4182,22 +4217,22 @@ static const struct usb_action pas202b_NoFliker[] = { + {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */ + {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */ + {0xaa, 0x20, 0x0002}, /* 00,20,02,aa */ +- {0xaa, 0x21, 0x0020}, /* 00,21,20,aa */ ++ {0xaa, 0x21, 0x0006}, + {0xaa, 0x03, 0x0040}, /* 00,03,40,aa */ + {0xaa, 0x04, 0x0008}, /* 00,04,08,aa */ +- {0xaa, 0x05, 0x0020}, /* 00,05,20,aa */ ++ {0xaa, 0x05, 0x0006}, + {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */ + {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */ + {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ +- {0xa0, 0x07, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,07,cc */ +- {0xa0, 0xf0, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,f0,cc */ ++ {0xa0, 0x02, ZC3XX_R191_EXPOSURELIMITMID}, ++ {0xa0, 0x06, ZC3XX_R192_EXPOSURELIMITLOW}, + {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ + {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ +- {0xa0, 0x02, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,02,cc */ ++ {0xa0, 0x01, ZC3XX_R197_ANTIFLICKERLOW}, + {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ + {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ + {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */ +- {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,00,cc */ ++ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, + {0xa0, 0x40, ZC3XX_R01D_HSYNC_0}, /* 00,1d,40,cc */ + {0xa0, 0x60, ZC3XX_R01E_HSYNC_1}, /* 00,1e,60,cc */ + {0xa0, 0x90, ZC3XX_R01F_HSYNC_2}, /* 00,1f,90,cc */ +@@ -4210,23 +4245,23 @@ static const struct usb_action pas202b_NoFlikerScale[] = { + {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* 00,19,00,cc */ + {0xa0, 0x20, ZC3XX_R087_EXPTIMEMID}, /* 00,87,20,cc */ + {0xa0, 0x21, ZC3XX_R088_EXPTIMELOW}, /* 00,88,21,cc */ +- {0xaa, 0x20, 0x0002}, /* 00,20,02,aa */ +- {0xaa, 0x21, 0x0010}, /* 00,21,10,aa */ ++ {0xaa, 0x20, 0x0004}, ++ {0xaa, 0x21, 0x000c}, + {0xaa, 0x03, 0x0040}, /* 00,03,40,aa */ +- {0xaa, 0x04, 0x0008}, /* 00,04,08,aa */ +- {0xaa, 0x05, 0x0010}, /* 00,05,10,aa */ ++ {0xaa, 0x04, 0x0010}, ++ {0xaa, 0x05, 0x000c}, + {0xaa, 0x0e, 0x0001}, /* 00,0e,01,aa */ + {0xaa, 0x0f, 0x0000}, /* 00,0f,00,aa */ + {0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc */ +- {0xa0, 0x0f, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,0f,cc */ +- {0xa0, 0xf0, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,f0,cc */ ++ {0xa0, 0x04, ZC3XX_R191_EXPOSURELIMITMID}, ++ {0xa0, 0x0c, ZC3XX_R192_EXPOSURELIMITLOW}, + {0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc */ + {0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc */ + {0xa0, 0x02, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,02,cc */ + {0xa0, 0x10, ZC3XX_R18C_AEFREEZE}, /* 01,8c,10,cc */ + {0xa0, 0x20, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,20,cc */ + {0xa0, 0x00, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,00,cc */ +- {0xa0, 0x00, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,00,cc */ ++ {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, + {0xa0, 0x40, ZC3XX_R01D_HSYNC_0}, /* 00,1d,40,cc */ + {0xa0, 0x60, ZC3XX_R01E_HSYNC_1}, /* 00,1e,60,cc */ + {0xa0, 0x90, ZC3XX_R01F_HSYNC_2}, /* 00,1f,90,cc */ +@@ -4713,8 +4748,8 @@ static const struct usb_action pb0330_NoFlikerScale[] = { + {} + }; + +-/* from oem9.inf - HKR,%PO2030%,Initial - 640x480 - (close to CS2102) */ +-static const struct usb_action PO2030_mode0[] = { ++/* from oem9.inf */ ++static const struct usb_action po2030_Initial[] = { /* 640x480 */ + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ + {0xa0, 0x04, ZC3XX_R002_CLOCKSELECT}, /* 00,02,04,cc */ + {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ +@@ -4790,8 +4825,8 @@ static const struct usb_action PO2030_mode0[] = { + {} + }; + +-/* from oem9.inf - HKR,%PO2030%,InitialScale - 320x240 */ +-static const struct usb_action PO2030_mode1[] = { ++/* from oem9.inf */ ++static const struct usb_action po2030_InitialScale[] = { /* 320x240 */ + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */ + {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, /* 00,02,10,cc */ + {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */ +@@ -4867,7 +4902,7 @@ static const struct usb_action PO2030_mode1[] = { + {} + }; + +-static const struct usb_action PO2030_50HZ[] = { ++static const struct usb_action po2030_50HZ[] = { + {0xaa, 0x8d, 0x0008}, /* 00,8d,08,aa */ + {0xaa, 0x1a, 0x0001}, /* 00,1a,01,aa */ + {0xaa, 0x1b, 0x000a}, /* 00,1b,0a,aa */ +@@ -4889,7 +4924,7 @@ static const struct usb_action PO2030_50HZ[] = { + {} + }; + +-static const struct usb_action PO2030_60HZ[] = { ++static const struct usb_action po2030_60HZ[] = { + {0xaa, 0x8d, 0x0008}, /* 00,8d,08,aa */ + {0xaa, 0x1a, 0x0000}, /* 00,1a,00,aa */ + {0xaa, 0x1b, 0x00de}, /* 00,1b,de,aa */ +@@ -4912,7 +4947,7 @@ static const struct usb_action PO2030_60HZ[] = { + {} + }; + +-static const struct usb_action PO2030_NoFliker[] = { ++static const struct usb_action po2030_NoFliker[] = { + {0xa0, 0x02, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,02,cc */ + {0xaa, 0x8d, 0x000d}, /* 00,8d,0d,aa */ + {0xaa, 0x1a, 0x0000}, /* 00,1a,00,aa */ +@@ -4924,7 +4959,7 @@ static const struct usb_action PO2030_NoFliker[] = { + }; + + /* TEST */ +-static const struct usb_action tas5130CK_Initial[] = { ++static const struct usb_action tas5130cK_InitialScale[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + {0xa0, 0x01, 0x003b}, + {0xa0, 0x0e, 0x003a}, +@@ -5127,7 +5162,7 @@ static const struct usb_action tas5130CK_Initial[] = { + {} + }; + +-static const struct usb_action tas5130CK_InitialScale[] = { ++static const struct usb_action tas5130cK_Initial[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, + {0xa0, 0x01, 0x003b}, + {0xa0, 0x0e, 0x003a}, +@@ -5560,7 +5595,7 @@ static const struct usb_action tas5130cxx_NoFlikerScale[] = { + {} + }; + +-static const struct usb_action tas5130c_vf0250_Initial[] = { ++static const struct usb_action tas5130c_vf0250_InitialScale[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc, */ + {0xa0, 0x02, ZC3XX_R008_CLOCKSETTING}, /* 00,08,02,cc, */ + {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc, */ +@@ -5627,7 +5662,7 @@ static const struct usb_action tas5130c_vf0250_Initial[] = { + {} + }; + +-static const struct usb_action tas5130c_vf0250_InitialScale[] = { ++static const struct usb_action tas5130c_vf0250_Initial[] = { + {0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc, */ + {0xa0, 0x02, ZC3XX_R008_CLOCKSETTING}, /* 00,08,02,cc, */ + {0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc, */ +@@ -5692,8 +5727,7 @@ static const struct usb_action tas5130c_vf0250_InitialScale[] = { + {0xa0, 0x65, ZC3XX_R118_BGAIN}, /* 01,18,65,cc */ + {} + }; +-/* "50HZ" light frequency banding filter */ +-static const struct usb_action tas5130c_vf0250_50HZ[] = { ++static const struct usb_action tas5130c_vf0250_50HZScale[] = { + {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ + {0xaa, 0x83, 0x0001}, /* 00,83,01,aa */ + {0xaa, 0x84, 0x00aa}, /* 00,84,aa,aa */ +@@ -5717,8 +5751,7 @@ static const struct usb_action tas5130c_vf0250_50HZ[] = { + {} + }; + +-/* "50HZScale" light frequency banding filter */ +-static const struct usb_action tas5130c_vf0250_50HZScale[] = { ++static const struct usb_action tas5130c_vf0250_50HZ[] = { + {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ + {0xaa, 0x83, 0x0003}, /* 00,83,03,aa */ + {0xaa, 0x84, 0x0054}, /* 00,84,54,aa */ +@@ -5742,8 +5775,7 @@ static const struct usb_action tas5130c_vf0250_50HZScale[] = { + {} + }; + +-/* "60HZ" light frequency banding filter */ +-static const struct usb_action tas5130c_vf0250_60HZ[] = { ++static const struct usb_action tas5130c_vf0250_60HZScale[] = { + {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ + {0xaa, 0x83, 0x0001}, /* 00,83,01,aa */ + {0xaa, 0x84, 0x0062}, /* 00,84,62,aa */ +@@ -5767,8 +5799,7 @@ static const struct usb_action tas5130c_vf0250_60HZ[] = { + {} + }; + +-/* "60HZScale" light frequency banding ilter */ +-static const struct usb_action tas5130c_vf0250_60HZScale[] = { ++static const struct usb_action tas5130c_vf0250_60HZ[] = { + {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ + {0xaa, 0x83, 0x0002}, /* 00,83,02,aa */ + {0xaa, 0x84, 0x00c4}, /* 00,84,c4,aa */ +@@ -5792,8 +5823,7 @@ static const struct usb_action tas5130c_vf0250_60HZScale[] = { + {} + }; + +-/* "NoFliker" light frequency banding flter */ +-static const struct usb_action tas5130c_vf0250_NoFliker[] = { ++static const struct usb_action tas5130c_vf0250_NoFlikerScale[] = { + {0xa0, 0x0c, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0c,cc, */ + {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ + {0xaa, 0x83, 0x0000}, /* 00,83,00,aa */ +@@ -5815,8 +5845,7 @@ static const struct usb_action tas5130c_vf0250_NoFliker[] = { + {} + }; + +-/* "NoFlikerScale" light frequency banding filter */ +-static const struct usb_action tas5130c_vf0250_NoFlikerScale[] = { ++static const struct usb_action tas5130c_vf0250_NoFliker[] = { + {0xa0, 0x0c, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0c,cc, */ + {0xaa, 0x82, 0x0000}, /* 00,82,00,aa */ + {0xaa, 0x83, 0x0000}, /* 00,83,00,aa */ +@@ -5839,7 +5868,7 @@ static const struct usb_action tas5130c_vf0250_NoFlikerScale[] = { + }; + + static u8 reg_r_i(struct gspca_dev *gspca_dev, +- __u16 index) ++ u16 index) + { + usb_control_msg(gspca_dev->dev, + usb_rcvctrlpipe(gspca_dev->dev, 0), +@@ -5852,7 +5881,7 @@ static u8 reg_r_i(struct gspca_dev *gspca_dev, + } + + static u8 reg_r(struct gspca_dev *gspca_dev, +- __u16 index) ++ u16 index) + { + u8 ret; + +@@ -5862,8 +5891,8 @@ static u8 reg_r(struct gspca_dev *gspca_dev, + } + + static void reg_w_i(struct usb_device *dev, +- __u8 value, +- __u16 index) ++ u8 value, ++ u16 index) + { + usb_control_msg(dev, + usb_sndctrlpipe(dev, 0), +@@ -5874,18 +5903,18 @@ static void reg_w_i(struct usb_device *dev, + } + + static void reg_w(struct usb_device *dev, +- __u8 value, +- __u16 index) ++ u8 value, ++ u16 index) + { + PDEBUG(D_USBO, "reg w [%04x] = %02x", index, value); + reg_w_i(dev, value, index); + } + +-static __u16 i2c_read(struct gspca_dev *gspca_dev, +- __u8 reg) ++static u16 i2c_read(struct gspca_dev *gspca_dev, ++ u8 reg) + { +- __u8 retbyte; +- __u16 retval; ++ u8 retbyte; ++ u16 retval; + + reg_w_i(gspca_dev->dev, reg, 0x0092); + reg_w_i(gspca_dev->dev, 0x02, 0x0090); /* <- read command */ +@@ -5900,12 +5929,12 @@ static __u16 i2c_read(struct gspca_dev *gspca_dev, + return retval; + } + +-static __u8 i2c_write(struct gspca_dev *gspca_dev, +- __u8 reg, +- __u8 valL, +- __u8 valH) ++static u8 i2c_write(struct gspca_dev *gspca_dev, ++ u8 reg, ++ u8 valL, ++ u8 valH) + { +- __u8 retbyte; ++ u8 retbyte; + + reg_w_i(gspca_dev->dev, reg, 0x92); + reg_w_i(gspca_dev->dev, valL, 0x93); +@@ -5957,24 +5986,24 @@ static void setmatrix(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + int i; +- const __u8 *matrix; ++ const u8 *matrix; + static const u8 adcm2700_matrix[9] = + /* {0x66, 0xed, 0xed, 0xed, 0x66, 0xed, 0xed, 0xed, 0x66}; */ + /*ms-win*/ + {0x74, 0xed, 0xed, 0xed, 0x74, 0xed, 0xed, 0xed, 0x74}; +- static const __u8 gc0305_matrix[9] = ++ static const u8 gc0305_matrix[9] = + {0x50, 0xf8, 0xf8, 0xf8, 0x50, 0xf8, 0xf8, 0xf8, 0x50}; +- static const __u8 ov7620_matrix[9] = ++ static const u8 ov7620_matrix[9] = + {0x58, 0xf4, 0xf4, 0xf4, 0x58, 0xf4, 0xf4, 0xf4, 0x58}; +- static const __u8 pas202b_matrix[9] = ++ static const u8 pas202b_matrix[9] = + {0x4c, 0xf5, 0xff, 0xf9, 0x51, 0xf5, 0xfb, 0xed, 0x5f}; +- static const __u8 po2030_matrix[9] = ++ static const u8 po2030_matrix[9] = + {0x60, 0xf0, 0xf0, 0xf0, 0x60, 0xf0, 0xf0, 0xf0, 0x60}; + static const u8 tas5130c_matrix[9] = + {0x68, 0xec, 0xec, 0xec, 0x68, 0xec, 0xec, 0xec, 0x68}; +- static const __u8 vf0250_matrix[9] = ++ static const u8 vf0250_matrix[9] = + {0x7b, 0xea, 0xea, 0xea, 0x7b, 0xea, 0xea, 0xea, 0x7b}; +- static const __u8 *matrix_tb[SENSOR_MAX] = { ++ static const u8 *matrix_tb[SENSOR_MAX] = { + adcm2700_matrix, /* SENSOR_ADCM2700 0 */ + ov7620_matrix, /* SENSOR_CS2102 1 */ + NULL, /* SENSOR_CS2102K 2 */ +@@ -6006,11 +6035,12 @@ static void setmatrix(struct gspca_dev *gspca_dev) + static void setbrightness(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- __u8 brightness; ++ u8 brightness; + + switch (sd->sensor) { + case SENSOR_GC0305: + case SENSOR_OV7620: ++ case SENSOR_PAS202B: + case SENSOR_PO2030: + return; + } +@@ -6034,7 +6064,7 @@ static void setsharpness(struct gspca_dev *gspca_dev) + struct sd *sd = (struct sd *) gspca_dev; + struct usb_device *dev = gspca_dev->dev; + int sharpness; +- static const __u8 sharpness_tb[][2] = { ++ static const u8 sharpness_tb[][2] = { + {0x02, 0x03}, + {0x04, 0x07}, + {0x08, 0x0f}, +@@ -6053,118 +6083,69 @@ static void setcontrast(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + struct usb_device *dev = gspca_dev->dev; +- const __u8 *Tgamma, *Tgradient; +- int g, i, k; +- static const __u8 kgamma_tb[16] = /* delta for contrast */ ++ const u8 *Tgamma; ++ int g, i, k, adj, gp; ++ u8 gr[16]; ++ static const u8 delta_tb[16] = /* delta for contrast */ + {0x15, 0x0d, 0x0a, 0x09, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08}; +- static const __u8 kgrad_tb[16] = +- {0x1b, 0x06, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, +- 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x04}; +- static const __u8 Tgamma_1[16] = ++ static const u8 gamma_tb[6][16] = { + {0x00, 0x00, 0x03, 0x0d, 0x1b, 0x2e, 0x45, 0x5f, +- 0x79, 0x93, 0xab, 0xc1, 0xd4, 0xe5, 0xf3, 0xff}; +- static const __u8 Tgradient_1[16] = +- {0x00, 0x01, 0x05, 0x0b, 0x10, 0x15, 0x18, 0x1a, +- 0x1a, 0x18, 0x16, 0x14, 0x12, 0x0f, 0x0d, 0x06}; +- static const __u8 Tgamma_2[16] = ++ 0x79, 0x93, 0xab, 0xc1, 0xd4, 0xe5, 0xf3, 0xff}, + {0x01, 0x0c, 0x1f, 0x3a, 0x53, 0x6d, 0x85, 0x9c, +- 0xb0, 0xc2, 0xd1, 0xde, 0xe9, 0xf2, 0xf9, 0xff}; +- static const __u8 Tgradient_2[16] = +- {0x05, 0x0f, 0x16, 0x1a, 0x19, 0x19, 0x17, 0x15, +- 0x12, 0x10, 0x0e, 0x0b, 0x09, 0x08, 0x06, 0x03}; +- static const __u8 Tgamma_3[16] = ++ 0xb0, 0xc2, 0xd1, 0xde, 0xe9, 0xf2, 0xf9, 0xff}, + {0x04, 0x16, 0x30, 0x4e, 0x68, 0x81, 0x98, 0xac, +- 0xbe, 0xcd, 0xda, 0xe4, 0xed, 0xf5, 0xfb, 0xff}; +- static const __u8 Tgradient_3[16] = +- {0x0c, 0x16, 0x1b, 0x1c, 0x19, 0x18, 0x15, 0x12, +- 0x10, 0x0d, 0x0b, 0x09, 0x08, 0x06, 0x05, 0x03}; +- static const __u8 Tgamma_4[16] = ++ 0xbe, 0xcd, 0xda, 0xe4, 0xed, 0xf5, 0xfb, 0xff}, + {0x13, 0x38, 0x59, 0x79, 0x92, 0xa7, 0xb9, 0xc8, +- 0xd4, 0xdf, 0xe7, 0xee, 0xf4, 0xf9, 0xfc, 0xff}; +- static const __u8 Tgradient_4[16] = +- {0x26, 0x22, 0x20, 0x1c, 0x16, 0x13, 0x10, 0x0d, +- 0x0b, 0x09, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02}; +- static const __u8 Tgamma_5[16] = ++ 0xd4, 0xdf, 0xe7, 0xee, 0xf4, 0xf9, 0xfc, 0xff}, + {0x20, 0x4b, 0x6e, 0x8d, 0xa3, 0xb5, 0xc5, 0xd2, +- 0xdc, 0xe5, 0xec, 0xf2, 0xf6, 0xfa, 0xfd, 0xff}; +- static const __u8 Tgradient_5[16] = +- {0x37, 0x26, 0x20, 0x1a, 0x14, 0x10, 0x0e, 0x0b, +- 0x09, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x02}; +- static const __u8 Tgamma_6[16] = /* ?? was gamma 5 */ ++ 0xdc, 0xe5, 0xec, 0xf2, 0xf6, 0xfa, 0xfd, 0xff}, + {0x24, 0x44, 0x64, 0x84, 0x9d, 0xb2, 0xc4, 0xd3, +- 0xe0, 0xeb, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff}; +- static const __u8 Tgradient_6[16] = +- {0x18, 0x20, 0x20, 0x1c, 0x16, 0x13, 0x10, 0x0e, +- 0x0b, 0x09, 0x07, 0x00, 0x00, 0x00, 0x00, 0x01}; +- static const __u8 *gamma_tb[] = { +- NULL, Tgamma_1, Tgamma_2, +- Tgamma_3, Tgamma_4, Tgamma_5, Tgamma_6 +- }; +- static const __u8 *gradient_tb[] = { +- NULL, Tgradient_1, Tgradient_2, +- Tgradient_3, Tgradient_4, Tgradient_5, Tgradient_6 ++ 0xe0, 0xeb, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff}, + }; +-#ifdef GSPCA_DEBUG +- __u8 v[16]; +-#endif + +- Tgamma = gamma_tb[sd->gamma]; +- Tgradient = gradient_tb[sd->gamma]; ++ Tgamma = gamma_tb[sd->gamma - 1]; + +- k = (sd->contrast - 128) /* -128 / 128 */ +- * Tgamma[0]; +- PDEBUG(D_CONF, "gamma:%d contrast:%d gamma coeff: %d/128", +- sd->gamma, sd->contrast, k); ++ k = ((int) sd->contrast - 128); /* -128 / 128 */ ++ adj = 0; ++ gp = 0; + for (i = 0; i < 16; i++) { +- g = Tgamma[i] + kgamma_tb[i] * k / 128; ++ g = Tgamma[i] - delta_tb[i] * k / 128 - adj / 2; + if (g > 0xff) + g = 0xff; + else if (g <= 0) + g = 1; + reg_w(dev, g, 0x0120 + i); /* gamma */ +-#ifdef GSPCA_DEBUG +- if (gspca_debug & D_CONF) +- v[i] = g; +-#endif +- } +- PDEBUG(D_CONF, "tb: %02x %02x %02x %02x %02x %02x %02x %02x", +- v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]); +- PDEBUG(D_CONF, " %02x %02x %02x %02x %02x %02x %02x %02x", +- v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15]); +- for (i = 0; i < 16; i++) { +- g = Tgradient[i] - kgrad_tb[i] * k / 128; +- if (g > 0xff) +- g = 0xff; +- else if (g <= 0) { +- if (i != 15) +- g = 0; ++ if (k > 0) ++ adj--; ++ else ++ adj++; ++ ++ if (i != 0) { ++ if (gp == 0) ++ gr[i - 1] = 0; + else +- g = 1; ++ gr[i - 1] = g - gp; + } +- reg_w(dev, g, 0x0130 + i); /* gradient */ +-#ifdef GSPCA_DEBUG +- if (gspca_debug & D_CONF) +- v[i] = g; +-#endif ++ gp = g; + } +- PDEBUG(D_CONF, " %02x %02x %02x %02x %02x %02x %02x %02x", +- v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]); +- PDEBUG(D_CONF, " %02x %02x %02x %02x %02x %02x %02x %02x", +- v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15]); ++ gr[15] = gr[14] / 2; ++ for (i = 0; i < 16; i++) ++ reg_w(dev, gr[i], 0x0130 + i); /* gradient */ + } + + static void setquality(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + struct usb_device *dev = gspca_dev->dev; +- __u8 frxt; ++ u8 frxt; + + switch (sd->sensor) { + case SENSOR_ADCM2700: + case SENSOR_GC0305: + case SENSOR_HV7131B: + case SENSOR_OV7620: ++ case SENSOR_PAS202B: + case SENSOR_PO2030: + return; + } +@@ -6218,9 +6199,9 @@ static int setlightfreq(struct gspca_dev *gspca_dev) + hdcs2020b_50HZ, hdcs2020b_50HZ, + hdcs2020b_60HZ, hdcs2020b_60HZ}, + /* SENSOR_HV7131B 5 */ +- {hv7131b_NoFlikerScale, hv7131b_NoFliker, +- hv7131b_50HZScale, hv7131b_50HZ, +- hv7131b_60HZScale, hv7131b_60HZ}, ++ {hv7131b_NoFliker, hv7131b_NoFlikerScale, ++ hv7131b_50HZ, hv7131b_50HZScale, ++ hv7131b_60HZ, hv7131b_60HZScale}, + /* SENSOR_HV7131C 6 */ + {NULL, NULL, + NULL, NULL, +@@ -6230,17 +6211,17 @@ static int setlightfreq(struct gspca_dev *gspca_dev) + icm105a_50HZ, icm105a_50HZScale, + icm105a_60HZ, icm105a_60HZScale}, + /* SENSOR_MC501CB 8 */ +- {MC501CB_NoFliker, MC501CB_NoFlikerScale, +- MC501CB_50HZ, MC501CB_50HZScale, +- MC501CB_60HZ, MC501CB_60HZScale}, ++ {mc501cb_NoFliker, mc501cb_NoFlikerScale, ++ mc501cb_50HZ, mc501cb_50HZScale, ++ mc501cb_60HZ, mc501cb_60HZScale}, + /* SENSOR_MI0360SOC 9 */ +- {mi360soc_AENoFlikerScale, mi360soc_AENoFliker, +- mi360soc_AE50HZScale, mi360soc_AE50HZ, +- mi360soc_AE60HZScale, mi360soc_AE60HZ}, ++ {mi360soc_AENoFliker, mi360soc_AENoFlikerScale, ++ mi360soc_AE50HZ, mi360soc_AE50HZScale, ++ mi360soc_AE60HZ, mi360soc_AE60HZScale}, + /* SENSOR_OV7620 10 */ +- {OV7620_NoFliker, OV7620_NoFliker, +- OV7620_50HZ, OV7620_50HZ, +- OV7620_60HZ, OV7620_60HZ}, ++ {ov7620_NoFliker, ov7620_NoFliker, ++ ov7620_50HZ, ov7620_50HZ, ++ ov7620_60HZ, ov7620_60HZ}, + /* SENSOR_OV7630C 11 */ + {NULL, NULL, + NULL, NULL, +@@ -6258,17 +6239,17 @@ static int setlightfreq(struct gspca_dev *gspca_dev) + pb0330_50HZScale, pb0330_50HZ, + pb0330_60HZScale, pb0330_60HZ}, + /* SENSOR_PO2030 15 */ +- {PO2030_NoFliker, PO2030_NoFliker, +- PO2030_50HZ, PO2030_50HZ, +- PO2030_60HZ, PO2030_60HZ}, ++ {po2030_NoFliker, po2030_NoFliker, ++ po2030_50HZ, po2030_50HZ, ++ po2030_60HZ, po2030_60HZ}, + /* SENSOR_TAS5130CK 16 */ +- {tas5130cxx_NoFlikerScale, tas5130cxx_NoFliker, +- tas5130cxx_50HZScale, tas5130cxx_50HZ, +- tas5130cxx_60HZScale, tas5130cxx_60HZ}, ++ {tas5130cxx_NoFliker, tas5130cxx_NoFlikerScale, ++ tas5130cxx_50HZ, tas5130cxx_50HZScale, ++ tas5130cxx_60HZ, tas5130cxx_60HZScale}, + /* SENSOR_TAS5130CXX 17 */ +- {tas5130cxx_NoFlikerScale, tas5130cxx_NoFliker, +- tas5130cxx_50HZScale, tas5130cxx_50HZ, +- tas5130cxx_60HZScale, tas5130cxx_60HZ}, ++ {tas5130cxx_NoFliker, tas5130cxx_NoFlikerScale, ++ tas5130cxx_50HZ, tas5130cxx_50HZScale, ++ tas5130cxx_60HZ, tas5130cxx_60HZScale}, + /* SENSOR_TAS5130C_VF0250 18 */ + {tas5130c_vf0250_NoFliker, tas5130c_vf0250_NoFlikerScale, + tas5130c_vf0250_50HZ, tas5130c_vf0250_50HZScale, +@@ -6277,9 +6258,9 @@ static int setlightfreq(struct gspca_dev *gspca_dev) + + i = sd->lightfreq * 2; + mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; +- if (!mode) +- i++; /* 640x480 */ +- zc3_freq = freq_tb[(int) sd->sensor][i]; ++ if (mode) ++ i++; /* 320x240 */ ++ zc3_freq = freq_tb[sd->sensor][i]; + if (zc3_freq != NULL) { + usb_exchange(gspca_dev, zc3_freq); + switch (sd->sensor) { +@@ -6297,6 +6278,9 @@ static int setlightfreq(struct gspca_dev *gspca_dev) + reg_w(gspca_dev->dev, 0x44, 0x0002); + } + break; ++ case SENSOR_PAS202B: ++ reg_w(gspca_dev->dev, 0x00, 0x01a7); ++ break; + } + } + return 0; +@@ -6305,7 +6289,7 @@ static int setlightfreq(struct gspca_dev *gspca_dev) + static void setautogain(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; +- __u8 autoval; ++ u8 autoval; + + if (sd->autogain) + autoval = 0x42; +@@ -6333,6 +6317,12 @@ static void send_unknown(struct usb_device *dev, int sensor) + reg_w(dev, 0x02, 0x003b); + reg_w(dev, 0x00, 0x0038); + break; ++ case SENSOR_PAS202B: ++ reg_w(dev, 0x03, 0x003b); ++ reg_w(dev, 0x0c, 0x003a); ++ reg_w(dev, 0x0b, 0x0039); ++ reg_w(dev, 0x0b, 0x0038); ++ break; + } + } + +@@ -6349,7 +6339,7 @@ static void start_2wr_probe(struct usb_device *dev, int sensor) + + static int sif_probe(struct gspca_dev *gspca_dev) + { +- __u16 checkword; ++ u16 checkword; + + start_2wr_probe(gspca_dev->dev, 0x0f); /* PAS106 */ + reg_w(gspca_dev->dev, 0x08, 0x008d); +@@ -6392,6 +6382,7 @@ static int vga_2wr_probe(struct gspca_dev *gspca_dev) + } + + start_2wr_probe(dev, 0x08); /* HDCS2020 */ ++ i2c_write(gspca_dev, 0x1c, 0x00, 0x00); + i2c_write(gspca_dev, 0x15, 0xaa, 0x00); + retword = i2c_read(gspca_dev, 0x15); + if (retword != 0) +@@ -6410,7 +6401,11 @@ static int vga_2wr_probe(struct gspca_dev *gspca_dev) + return 0x0a; /* PB0330 ?? */ + + start_2wr_probe(dev, 0x0c); /* ICM105A */ ++#if 1 + i2c_write(gspca_dev, 0x01, 0x11, 0x00); ++#else ++ i2c_write(gspca_dev, 0x01, 0xaa, 0x00); ++#endif + retword = i2c_read(gspca_dev, 0x01); + if (retword != 0) + return 0x0c; /* ICM105A */ +@@ -6420,8 +6415,10 @@ static int vga_2wr_probe(struct gspca_dev *gspca_dev) + i2c_write(gspca_dev, 0x03, 0xaa, 0x00); + msleep(50); + retword = i2c_read(gspca_dev, 0x03); +- if (retword != 0) ++ if (retword != 0) { ++ send_unknown(dev, SENSOR_PAS202B); + return 0x0e; /* PAS202BCB */ ++ } + + start_2wr_probe(dev, 0x02); /* TAS5130C */ + i2c_write(gspca_dev, 0x01, 0xaa, 0x00); +@@ -6457,8 +6454,8 @@ ov_check: + } + + struct sensor_by_chipset_revision { +- __u16 revision; +- __u8 internal_sensor_id; ++ u16 revision; ++ u8 internal_sensor_id; + }; + static const struct sensor_by_chipset_revision chipset_revision_sensor[] = { + {0xc000, 0x12}, /* TAS5130C */ +@@ -6467,6 +6464,7 @@ static const struct sensor_by_chipset_revision chipset_revision_sensor[] = { + {0x8001, 0x13}, + {0x8000, 0x14}, /* CS2102K */ + {0x8400, 0x15}, /* TAS5130K */ ++ {0xe400, 0x15}, + }; + + static int vga_3wr_probe(struct gspca_dev *gspca_dev) +@@ -6474,7 +6472,7 @@ static int vga_3wr_probe(struct gspca_dev *gspca_dev) + struct sd *sd = (struct sd *) gspca_dev; + struct usb_device *dev = gspca_dev->dev; + int i; +- __u8 retbyte; ++ u8 retbyte; + u16 retword; + + /*fixme: lack of 8b=b3 (11,12)-> 10, 8b=e0 (14,15,16)-> 12 found in gspcav1*/ +@@ -6622,8 +6620,7 @@ static int sd_config(struct gspca_dev *gspca_dev, + struct sd *sd = (struct sd *) gspca_dev; + struct cam *cam; + int sensor; +- int vga = 1; /* 1: vga, 0: sif */ +- static const __u8 gamma[SENSOR_MAX] = { ++ static const u8 gamma[SENSOR_MAX] = { + 4, /* SENSOR_ADCM2700 0 */ + 4, /* SENSOR_CS2102 1 */ + 5, /* SENSOR_CS2102K 2 */ +@@ -6644,9 +6641,30 @@ static int sd_config(struct gspca_dev *gspca_dev, + 3, /* SENSOR_TAS5130CXX 17 */ + 3, /* SENSOR_TAS5130C_VF0250 18 */ + }; ++ static const u8 mode_tb[SENSOR_MAX] = { ++ 2, /* SENSOR_ADCM2700 0 */ ++ 1, /* SENSOR_CS2102 1 */ ++ 1, /* SENSOR_CS2102K 2 */ ++ 1, /* SENSOR_GC0305 3 */ ++ 1, /* SENSOR_HDCS2020b 4 */ ++ 1, /* SENSOR_HV7131B 5 */ ++ 1, /* SENSOR_HV7131C 6 */ ++ 1, /* SENSOR_ICM105A 7 */ ++ 2, /* SENSOR_MC501CB 8 */ ++ 1, /* SENSOR_MI0360SOC 9 */ ++ 2, /* SENSOR_OV7620 10 */ ++ 1, /* SENSOR_OV7630C 11 */ ++ 0, /* SENSOR_PAS106 12 */ ++ 1, /* SENSOR_PAS202B 13 */ ++ 1, /* SENSOR_PB0330 14 */ ++ 1, /* SENSOR_PO2030 15 */ ++ 1, /* SENSOR_TAS5130CK 16 */ ++ 1, /* SENSOR_TAS5130CXX 17 */ ++ 1, /* SENSOR_TAS5130C_VF0250 18 */ ++ }; + + /* define some sensors from the vendor/product */ +- sd->sharpness = 2; ++ sd->sharpness = SHARPNESS_DEF; + sd->sensor = id->driver_info; + sensor = zcxx_probeSensor(gspca_dev); + if (sensor >= 0) +@@ -6671,8 +6689,21 @@ static int sd_config(struct gspca_dev *gspca_dev, + } + break; + case 0: +- PDEBUG(D_PROBE, "Find Sensor HV7131B"); +- sd->sensor = SENSOR_HV7131B; ++ /* check the sensor type */ ++ sensor = i2c_read(gspca_dev, 0x00); ++ PDEBUG(D_PROBE, "Sensor hv7131 type %d", sensor); ++ switch (sensor) { ++ case 0: /* hv7131b */ ++ case 1: /* hv7131e */ ++ PDEBUG(D_PROBE, "Find Sensor HV7131B"); ++ sd->sensor = SENSOR_HV7131B; ++ break; ++ default: ++/* case 2: * hv7131r */ ++ PDEBUG(D_PROBE, "Find Sensor HV7131R(c)"); ++ sd->sensor = SENSOR_HV7131C; ++ break; ++ } + break; + case 0x02: + PDEBUG(D_PROBE, "Sensor TAS5130C"); +@@ -6699,12 +6730,11 @@ static int sd_config(struct gspca_dev *gspca_dev, + case 0x0e: + PDEBUG(D_PROBE, "Find Sensor PAS202B"); + sd->sensor = SENSOR_PAS202B; +- sd->sharpness = 1; ++/* sd->sharpness = 1; */ + break; + case 0x0f: + PDEBUG(D_PROBE, "Find Sensor PAS106"); + sd->sensor = SENSOR_PAS106; +- vga = 0; /* SIF */ + break; + case 0x10: + case 0x12: +@@ -6770,31 +6800,42 @@ static int sd_config(struct gspca_dev *gspca_dev, + if (sensor < 0x20) { + if (sensor == -1 || sensor == 0x10 || sensor == 0x12) + reg_w(gspca_dev->dev, 0x02, 0x0010); ++#if 0 + else + reg_w(gspca_dev->dev, sensor & 0x0f, 0x0010); ++#endif + reg_r(gspca_dev, 0x0010); + } + + cam = &gspca_dev->cam; + /*fixme:test*/ + gspca_dev->nbalt--; +- if (vga) { +- cam->cam_mode = vga_mode; +- cam->nmodes = ARRAY_SIZE(vga_mode); +- } else { ++ switch (mode_tb[sd->sensor]) { ++ case 0: + cam->cam_mode = sif_mode; + cam->nmodes = ARRAY_SIZE(sif_mode); ++ break; ++ case 1: ++ cam->cam_mode = vga_mode; ++ cam->nmodes = ARRAY_SIZE(vga_mode); ++ break; ++ default: ++/* case 2: */ ++ cam->cam_mode = broken_vga_mode; ++ cam->nmodes = ARRAY_SIZE(broken_vga_mode); ++ break; + } +- sd->brightness = sd_ctrls[SD_BRIGHTNESS].qctrl.default_value; +- sd->contrast = sd_ctrls[SD_CONTRAST].qctrl.default_value; +- sd->gamma = gamma[(int) sd->sensor]; +- sd->autogain = sd_ctrls[SD_AUTOGAIN].qctrl.default_value; +- sd->lightfreq = sd_ctrls[SD_FREQ].qctrl.default_value; ++ sd->brightness = BRIGHTNESS_DEF; ++ sd->contrast = CONTRAST_DEF; ++ sd->gamma = gamma[sd->sensor]; ++ sd->autogain = AUTOGAIN_DEF; ++ sd->lightfreq = FREQ_DEF; + sd->quality = QUALITY_DEF; + + switch (sd->sensor) { + case SENSOR_GC0305: + case SENSOR_OV7620: ++ case SENSOR_PAS202B: + case SENSOR_PO2030: + gspca_dev->ctrl_dis = (1 << BRIGHTNESS_IDX); + break; +@@ -6805,14 +6846,13 @@ static int sd_config(struct gspca_dev *gspca_dev, + break; + } + +- /* switch the led off */ +- reg_w(gspca_dev->dev, 0x01, 0x0000); + return 0; + } + + /* this function is called at probe and resume time */ + static int sd_init(struct gspca_dev *gspca_dev) + { ++ /* switch off the led */ + reg_w(gspca_dev->dev, 0x01, 0x0000); + return 0; + } +@@ -6821,28 +6861,27 @@ static int sd_start(struct gspca_dev *gspca_dev) + { + struct sd *sd = (struct sd *) gspca_dev; + struct usb_device *dev = gspca_dev->dev; +- const struct usb_action *zc3_init; + int mode; + static const struct usb_action *init_tb[SENSOR_MAX][2] = { + {adcm2700_Initial, adcm2700_InitialScale}, /* 0 */ +- {cs2102_InitialScale, cs2102_Initial}, /* 1 */ +- {cs2102K_InitialScale, cs2102K_Initial}, /* 2 */ ++ {cs2102_Initial, cs2102_InitialScale}, /* 1 */ ++ {cs2102K_Initial, cs2102K_InitialScale}, /* 2 */ + {gc0305_Initial, gc0305_InitialScale}, /* 3 */ +- {hdcs2020xb_InitialScale, hdcs2020xb_Initial}, /* 4 */ +- {hv7131bxx_InitialScale, hv7131bxx_Initial}, /* 5 */ +- {hv7131cxx_InitialScale, hv7131cxx_Initial}, /* 6 */ +- {icm105axx_InitialScale, icm105axx_Initial}, /* 7 */ +- {MC501CB_InitialScale, MC501CB_Initial}, /* 8 */ ++ {hdcs2020b_Initial, hdcs2020b_InitialScale}, /* 4 */ ++ {hv7131b_Initial, hv7131b_InitialScale}, /* 5 */ ++ {hv7131r_Initial, hv7131r_InitialScale}, /* 6 */ ++ {icm105a_Initial, icm105a_InitialScale}, /* 7 */ ++ {mc501cb_Initial, mc501cb_InitialScale}, /* 8 */ + {mi0360soc_Initial, mi0360soc_InitialScale}, /* 9 */ +- {OV7620_mode0, OV7620_mode1}, /* 10 */ +- {ov7630c_InitialScale, ov7630c_Initial}, /* 11 */ +- {pas106b_InitialScale, pas106b_Initial}, /* 12 */ ++ {ov7620_Initial, ov7620_InitialScale}, /* 10 */ ++ {ov7630c_Initial, ov7630c_InitialScale}, /* 11 */ ++ {pas106b_Initial, pas106b_InitialScale}, /* 12 */ + {pas202b_Initial, pas202b_InitialScale}, /* 13 */ + {pb0330_Initial, pb0330_InitialScale}, /* 14 */ +- {PO2030_mode0, PO2030_mode1}, /* 15 */ +- {tas5130CK_InitialScale, tas5130CK_Initial}, /* 16 */ ++ {po2030_Initial, po2030_InitialScale}, /* 15 */ ++ {tas5130cK_Initial, tas5130cK_InitialScale}, /* 16 */ + {tas5130cxx_Initial, tas5130cxx_InitialScale}, /* 17 */ +- {tas5130c_vf0250_InitialScale, tas5130c_vf0250_Initial}, ++ {tas5130c_vf0250_Initial, tas5130c_vf0250_InitialScale}, + /* 18 */ + }; + +@@ -6854,8 +6893,7 @@ static int sd_start(struct gspca_dev *gspca_dev) + 0x21); /* JPEG 422 */ + jpeg_set_qual(sd->jpeg_hdr, sd->quality); + +- mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; +- zc3_init = init_tb[(int) sd->sensor][mode]; ++ mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; + switch (sd->sensor) { + case SENSOR_HV7131C: + zcxx_probeSensor(gspca_dev); +@@ -6864,7 +6902,7 @@ static int sd_start(struct gspca_dev *gspca_dev) + usb_exchange(gspca_dev, pas106b_Initial_com); + break; + } +- usb_exchange(gspca_dev, zc3_init); ++ usb_exchange(gspca_dev, init_tb[sd->sensor][mode]); + + switch (sd->sensor) { + case SENSOR_ADCM2700: +@@ -6883,6 +6921,11 @@ static int sd_start(struct gspca_dev *gspca_dev) + reg_w(dev, 0x02, 0x003b); + reg_w(dev, 0x00, 0x0038); + break; ++ case SENSOR_PAS202B: ++ reg_w(dev, 0x03, 0x003b); ++ reg_w(dev, 0x0c, 0x003a); ++ reg_w(dev, 0x0b, 0x0039); ++ break; + } + + setmatrix(gspca_dev); +@@ -6961,13 +7004,13 @@ static int sd_start(struct gspca_dev *gspca_dev) + switch (sd->sensor) { + case SENSOR_PO2030: + msleep(50); +- reg_r(gspca_dev, 0x0008); +- reg_r(gspca_dev, 0x0007); +- /*fall thru*/ +- case SENSOR_PAS202B: + reg_w(dev, 0x00, 0x0007); /* (from win traces) */ + reg_w(dev, 0x02, ZC3XX_R008_CLOCKSETTING); + break; ++ case SENSOR_PAS202B: ++ reg_w(dev, 0x32, 0x0007); /* (from win traces) */ ++ reg_w(dev, 0x02, ZC3XX_R008_CLOCKSETTING); ++ break; + } + return 0; + } +@@ -7165,6 +7208,22 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev, + return 0; + } + ++#ifdef CONFIG_INPUT ++static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, ++ u8 *data, /* interrupt packet data */ ++ int len) /* interrput packet length */ ++{ ++ if (len == 8 && data[4] == 1) { ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); ++ input_sync(gspca_dev->input_dev); ++ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); ++ input_sync(gspca_dev->input_dev); ++ } ++ ++ return 0; ++} ++#endif ++ + static const struct sd_desc sd_desc = { + .name = MODULE_NAME, + .ctrls = sd_ctrls, +@@ -7177,6 +7236,9 @@ static const struct sd_desc sd_desc = { + .querymenu = sd_querymenu, + .get_jcomp = sd_get_jcomp, + .set_jcomp = sd_set_jcomp, ++#ifdef CONFIG_INPUT ++ .int_pkt_scan = sd_int_pkt_scan, ++#endif + }; + + static const __devinitdata struct usb_device_id device_table[] = { +diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h +index d4962a7..3793d16 100644 +--- a/include/linux/videodev2.h ++++ b/include/linux/videodev2.h +@@ -350,6 +350,7 @@ struct v4l2_pix_format { + #define V4L2_PIX_FMT_MPEG v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 */ + + /* Vendor-specific formats */ ++#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */ + #define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */ + #define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */ + #define V4L2_PIX_FMT_SN9C20X_I420 v4l2_fourcc('S', '9', '2', '0') /* SN9C20x YUV 4:2:0 */ +@@ -362,6 +363,7 @@ struct v4l2_pix_format { + #define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */ + #define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */ + #define V4L2_PIX_FMT_MR97310A v4l2_fourcc('M', '3', '1', '0') /* compressed BGGR bayer */ ++#define V4L2_PIX_FMT_SN9C2028 v4l2_fourcc('S', 'O', 'N', 'X') /* compressed GBRG bayer */ + #define V4L2_PIX_FMT_SQ905C v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */ + #define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */ + #define V4L2_PIX_FMT_OV511 v4l2_fourcc('O', '5', '1', '1') /* ov511 JPEG */ +-- +1.7.0 + diff --git a/linux-2.6-v4l-dvb-uvcvideo-update.patch b/linux-2.6-v4l-dvb-uvcvideo-update.patch deleted file mode 100644 index 4ae232e..0000000 --- a/linux-2.6-v4l-dvb-uvcvideo-update.patch +++ /dev/null @@ -1,557 +0,0 @@ -From: Laurent Pinchart -Date: Sat, 13 Mar 2010 21:12:15 +0000 (-0300) -Subject: V4L/DVB: uvcvideo: Add support for Packard Bell EasyNote MX52 integrated webcam -X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=513640877c336551f7a4428eaff7a4eb0f42cb9e - -V4L/DVB: uvcvideo: Add support for Packard Bell EasyNote MX52 integrated webcam - -The camera requires the STREAM_NO_FID quirk. Add a corresponding entry -in the device IDs list. - -Signed-off-by: Laurent Pinchart -Signed-off-by: Mauro Carvalho Chehab ---- - -diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c -index a192c51..43892bf 100644 ---- a/drivers/media/video/uvc/uvc_driver.c -+++ b/drivers/media/video/uvc/uvc_driver.c -@@ -2104,6 +2104,15 @@ static struct usb_device_id uvc_ids[] = { - .bInterfaceSubClass = 1, - .bInterfaceProtocol = 0, - .driver_info = UVC_QUIRK_STREAM_NO_FID }, -+ /* Syntek (Packard Bell EasyNote MX52 */ -+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE -+ | USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x174f, -+ .idProduct = 0x8a12, -+ .bInterfaceClass = USB_CLASS_VIDEO, -+ .bInterfaceSubClass = 1, -+ .bInterfaceProtocol = 0, -+ .driver_info = UVC_QUIRK_STREAM_NO_FID }, - /* Syntek (Asus F9SG) */ - { .match_flags = USB_DEVICE_ID_MATCH_DEVICE - | USB_DEVICE_ID_MATCH_INT_INFO, -From: Laurent Pinchart -Date: Wed, 31 Mar 2010 15:29:26 +0000 (-0300) -Subject: V4L/DVB: uvcvideo: Use POLLOUT and POLLWRNORM for output devices -X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=a6eb7bc8e0eea78f96ad1b0f0195ec52b88c6a00 - -V4L/DVB: uvcvideo: Use POLLOUT and POLLWRNORM for output devices - -The V4L2 specification requires drivers to use the write events in the -file operations poll handler for output devices. The uvcvideo driver -erroneously used read events for all devices. Fix this. - -Signed-off-by: Laurent Pinchart -Signed-off-by: Mauro Carvalho Chehab ---- - -diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c -index 4a925a3..133c78d 100644 ---- a/drivers/media/video/uvc/uvc_queue.c -+++ b/drivers/media/video/uvc/uvc_queue.c -@@ -388,8 +388,12 @@ unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, - - poll_wait(file, &buf->wait, wait); - if (buf->state == UVC_BUF_STATE_DONE || -- buf->state == UVC_BUF_STATE_ERROR) -- mask |= POLLIN | POLLRDNORM; -+ buf->state == UVC_BUF_STATE_ERROR) { -+ if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) -+ mask |= POLLIN | POLLRDNORM; -+ else -+ mask |= POLLOUT | POLLWRNORM; -+ } - - done: - mutex_unlock(&queue->mutex); -From: Laurent Pinchart -Date: Sun, 25 Apr 2010 19:23:24 +0000 (-0300) -Subject: V4L/DVB: uvcvideo: Flag relative controls as write-only -X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=4ac25db7e7f6116213f1c03039df00b18466a0dc - -V4L/DVB: uvcvideo: Flag relative controls as write-only - -The UVC relative controls (exposure time, iris, focus, zoom, pan/tilt) -are write-only (despite the UVC specification stating that the GET_CUR -request is mandatory). Mark the controls as such, and report the related -V4L2 controls V4L2_CTRL_FLAG_WRITE_ONLY. - -Signed-off-by: Laurent Pinchart -Signed-off-by: Mauro Carvalho Chehab ---- - -diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c -index 3697d72..bf2a333 100644 ---- a/drivers/media/video/uvc/uvc_ctrl.c -+++ b/drivers/media/video/uvc/uvc_ctrl.c -@@ -216,8 +216,7 @@ static struct uvc_control_info uvc_ctrls[] = { - .selector = UVC_CT_EXPOSURE_TIME_RELATIVE_CONTROL, - .index = 4, - .size = 1, -- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR -- | UVC_CONTROL_RESTORE, -+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_RESTORE, - }, - { - .entity = UVC_GUID_UVC_CAMERA, -@@ -232,8 +231,9 @@ static struct uvc_control_info uvc_ctrls[] = { - .selector = UVC_CT_FOCUS_RELATIVE_CONTROL, - .index = 6, - .size = 2, -- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE -- | UVC_CONTROL_AUTO_UPDATE, -+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN -+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES -+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE, - }, - { - .entity = UVC_GUID_UVC_CAMERA, -@@ -248,8 +248,7 @@ static struct uvc_control_info uvc_ctrls[] = { - .selector = UVC_CT_IRIS_RELATIVE_CONTROL, - .index = 8, - .size = 1, -- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR -- | UVC_CONTROL_AUTO_UPDATE, -+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_AUTO_UPDATE, - }, - { - .entity = UVC_GUID_UVC_CAMERA, -@@ -264,8 +263,9 @@ static struct uvc_control_info uvc_ctrls[] = { - .selector = UVC_CT_ZOOM_RELATIVE_CONTROL, - .index = 10, - .size = 3, -- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE -- | UVC_CONTROL_AUTO_UPDATE, -+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN -+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES -+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE, - }, - { - .entity = UVC_GUID_UVC_CAMERA, -@@ -280,8 +280,9 @@ static struct uvc_control_info uvc_ctrls[] = { - .selector = UVC_CT_PANTILT_RELATIVE_CONTROL, - .index = 12, - .size = 4, -- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE -- | UVC_CONTROL_AUTO_UPDATE, -+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN -+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES -+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE, - }, - { - .entity = UVC_GUID_UVC_CAMERA, -@@ -296,8 +297,9 @@ static struct uvc_control_info uvc_ctrls[] = { - .selector = UVC_CT_ROLL_RELATIVE_CONTROL, - .index = 14, - .size = 2, -- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE -- | UVC_CONTROL_AUTO_UPDATE, -+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN -+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES -+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE, - }, - { - .entity = UVC_GUID_UVC_CAMERA, -@@ -841,6 +843,8 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain, - strlcpy(v4l2_ctrl->name, mapping->name, sizeof v4l2_ctrl->name); - v4l2_ctrl->flags = 0; - -+ if (!(ctrl->info->flags & UVC_CONTROL_GET_CUR)) -+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY; - if (!(ctrl->info->flags & UVC_CONTROL_SET_CUR)) - v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY; - -From: Laurent Pinchart -Date: Mon, 5 Jul 2010 18:24:39 +0000 (+0200) -Subject: uvcvideo: Power line frequency control doesn't support GET_MIN/MAX/RES -X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=2596b09e32b45967dbdbfe80a10fb51d9a6c6839 - -uvcvideo: Power line frequency control doesn't support GET_MIN/MAX/RES - -Issuing a GET_MIN request on the power line frequency control times out -on at least the Apple iSight. As the UVC specification doesn't list -GET_MIN/MAX/RES as supported on that control, remove them from the -uvc_ctrls array. - -Signed-off-by: Laurent Pinchart ---- - -diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c -index aa0720a..27a79f0 100644 ---- a/drivers/media/video/uvc/uvc_ctrl.c -+++ b/drivers/media/video/uvc/uvc_ctrl.c -@@ -122,8 +122,8 @@ static struct uvc_control_info uvc_ctrls[] = { - .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL, - .index = 10, - .size = 1, -- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE -- | UVC_CONTROL_RESTORE, -+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR -+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_RESTORE, - }, - { - .entity = UVC_GUID_UVC_PROCESSING, -From: Martin Rubli -Date: Wed, 19 May 2010 22:51:56 +0000 (+0200) -Subject: uvcvideo: Add support for absolute pan/tilt controls -X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=d3c2f664ec76aff14c3841c99e84cd78d7227f79 - -uvcvideo: Add support for absolute pan/tilt controls - -Signed-off-by: Martin Rubli ---- - -diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c -index aa0720a..5ec2f4a 100644 ---- a/drivers/media/video/uvc/uvc_ctrl.c -+++ b/drivers/media/video/uvc/uvc_ctrl.c -@@ -606,6 +606,26 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = { - .set = uvc_ctrl_set_zoom, - }, - { -+ .id = V4L2_CID_PAN_ABSOLUTE, -+ .name = "Pan (Absolute)", -+ .entity = UVC_GUID_UVC_CAMERA, -+ .selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL, -+ .size = 32, -+ .offset = 0, -+ .v4l2_type = V4L2_CTRL_TYPE_INTEGER, -+ .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, -+ }, -+ { -+ .id = V4L2_CID_TILT_ABSOLUTE, -+ .name = "Tilt (Absolute)", -+ .entity = UVC_GUID_UVC_CAMERA, -+ .selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL, -+ .size = 32, -+ .offset = 32, -+ .v4l2_type = V4L2_CTRL_TYPE_INTEGER, -+ .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, -+ }, -+ { - .id = V4L2_CID_PRIVACY, - .name = "Privacy", - .entity = UVC_GUID_UVC_CAMERA, -From: Hans de Goede -Date: Wed, 19 May 2010 23:15:00 +0000 (+0200) -Subject: uvcvideo: Make button controls work properly -X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=2bd47ad4894bfaf1a97660b821cbc46439a614d6 - -uvcvideo: Make button controls work properly - -According to the v4l2 spec, writing any value to a button control should -result in the action belonging to the button control being triggered. -UVC cams however want to see a 1 written, this patch fixes this by -overriding whatever value user space passed in with -1 (0xffffffff) when -the control is a button control. - -Signed-off-by: Hans de Goede ---- - -diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c -index 5ec2f4a..8bb825d 100644 ---- a/drivers/media/video/uvc/uvc_ctrl.c -+++ b/drivers/media/video/uvc/uvc_ctrl.c -@@ -698,6 +698,14 @@ static void uvc_set_le_value(struct uvc_control_mapping *mapping, - int offset = mapping->offset; - __u8 mask; - -+ /* According to the v4l2 spec, writing any value to a button control -+ * should result in the action belonging to the button control being -+ * triggered. UVC devices however want to see a 1 written -> override -+ * value. -+ */ -+ if (mapping->v4l2_type == V4L2_CTRL_TYPE_BUTTON) -+ value = -1; -+ - data += offset / 8; - offset &= 7; - -From: Laurent Pinchart -Date: Thu, 18 Feb 2010 19:38:52 +0000 (+0100) -Subject: uvcvideo: Support menu controls in the control mapping API -X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=4930f2662e47d33e5baedac620da401a225bc3a8 - -uvcvideo: Support menu controls in the control mapping API - -The UVCIOC_CTRL_MAP ioctl doesn't support menu entries for menu -controls. As the uvc_xu_control_mapping structure has no reserved -fields, this can't be fixed while keeping ABI compatibility. - -Modify the UVCIOC_CTRL_MAP ioctl to add menu entries support, and define -UVCIOC_CTRL_MAP_OLD that supports the old ABI without any ability to add -menu controls. - -Signed-off-by: Laurent Pinchart ---- - -diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c -index 8bb825d..c88d72e 100644 ---- a/drivers/media/video/uvc/uvc_ctrl.c -+++ b/drivers/media/video/uvc/uvc_ctrl.c -@@ -1606,6 +1606,28 @@ void uvc_ctrl_cleanup_device(struct uvc_device *dev) - } - } - -+void uvc_ctrl_cleanup(void) -+{ -+ struct uvc_control_info *info; -+ struct uvc_control_info *ni; -+ struct uvc_control_mapping *mapping; -+ struct uvc_control_mapping *nm; -+ -+ list_for_each_entry_safe(info, ni, &uvc_driver.controls, list) { -+ if (!(info->flags & UVC_CONTROL_EXTENSION)) -+ continue; -+ -+ list_for_each_entry_safe(mapping, nm, &info->mappings, list) { -+ list_del(&mapping->list); -+ kfree(mapping->menu_info); -+ kfree(mapping); -+ } -+ -+ list_del(&info->list); -+ kfree(info); -+ } -+} -+ - void uvc_ctrl_init(void) - { - struct uvc_control_info *ctrl = uvc_ctrls; -diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c -index 838b56f..34818c1 100644 ---- a/drivers/media/video/uvc/uvc_driver.c -+++ b/drivers/media/video/uvc/uvc_driver.c -@@ -2261,6 +2261,7 @@ static int __init uvc_init(void) - static void __exit uvc_cleanup(void) - { - usb_deregister(&uvc_driver.driver); -+ uvc_ctrl_cleanup(); - } - - module_init(uvc_init); -diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c -index 7c9ab29..485a899 100644 ---- a/drivers/media/video/uvc/uvc_v4l2.c -+++ b/drivers/media/video/uvc/uvc_v4l2.c -@@ -29,6 +29,71 @@ - #include "uvcvideo.h" - - /* ------------------------------------------------------------------------ -+ * UVC ioctls -+ */ -+static int uvc_ioctl_ctrl_map(struct uvc_xu_control_mapping *xmap, int old) -+{ -+ struct uvc_control_mapping *map; -+ unsigned int size; -+ int ret; -+ -+ map = kzalloc(sizeof *map, GFP_KERNEL); -+ if (map == NULL) -+ return -ENOMEM; -+ -+ map->id = xmap->id; -+ memcpy(map->name, xmap->name, sizeof map->name); -+ memcpy(map->entity, xmap->entity, sizeof map->entity); -+ map->selector = xmap->selector; -+ map->size = xmap->size; -+ map->offset = xmap->offset; -+ map->v4l2_type = xmap->v4l2_type; -+ map->data_type = xmap->data_type; -+ -+ switch (xmap->v4l2_type) { -+ case V4L2_CTRL_TYPE_INTEGER: -+ case V4L2_CTRL_TYPE_BOOLEAN: -+ case V4L2_CTRL_TYPE_BUTTON: -+ break; -+ -+ case V4L2_CTRL_TYPE_MENU: -+ if (old) { -+ ret = -EINVAL; -+ goto done; -+ } -+ -+ size = xmap->menu_count * sizeof(*map->menu_info); -+ map->menu_info = kmalloc(size, GFP_KERNEL); -+ if (map->menu_info == NULL) { -+ ret = -ENOMEM; -+ goto done; -+ } -+ -+ if (copy_from_user(map->menu_info, xmap->menu_info, size)) { -+ ret = -EFAULT; -+ goto done; -+ } -+ -+ map->menu_count = xmap->menu_count; -+ break; -+ -+ default: -+ ret = -EINVAL; -+ goto done; -+ } -+ -+ ret = uvc_ctrl_add_mapping(map); -+ -+done: -+ if (ret < 0) { -+ kfree(map->menu_info); -+ kfree(map); -+ } -+ -+ return ret; -+} -+ -+/* ------------------------------------------------------------------------ - * V4L2 interface - */ - -@@ -974,7 +1039,8 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) - info->flags = xinfo->flags; - - info->flags |= UVC_CONTROL_GET_MIN | UVC_CONTROL_GET_MAX | -- UVC_CONTROL_GET_RES | UVC_CONTROL_GET_DEF; -+ UVC_CONTROL_GET_RES | UVC_CONTROL_GET_DEF | -+ UVC_CONTROL_EXTENSION; - - ret = uvc_ctrl_add_info(info); - if (ret < 0) -@@ -982,32 +1048,12 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) - break; - } - -+ case UVCIOC_CTRL_MAP_OLD: - case UVCIOC_CTRL_MAP: -- { -- struct uvc_xu_control_mapping *xmap = arg; -- struct uvc_control_mapping *map; -- - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - -- map = kzalloc(sizeof *map, GFP_KERNEL); -- if (map == NULL) -- return -ENOMEM; -- -- map->id = xmap->id; -- memcpy(map->name, xmap->name, sizeof map->name); -- memcpy(map->entity, xmap->entity, sizeof map->entity); -- map->selector = xmap->selector; -- map->size = xmap->size; -- map->offset = xmap->offset; -- map->v4l2_type = xmap->v4l2_type; -- map->data_type = xmap->data_type; -- -- ret = uvc_ctrl_add_mapping(map); -- if (ret < 0) -- kfree(map); -- break; -- } -+ return uvc_ioctl_ctrl_map(arg, cmd == UVCIOC_CTRL_MAP_OLD); - - case UVCIOC_CTRL_GET: - return uvc_xu_ctrl_query(chain, arg, 0); -diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h -index d1f8840..14f77e4 100644 ---- a/drivers/media/video/uvc/uvcvideo.h -+++ b/drivers/media/video/uvc/uvcvideo.h -@@ -27,6 +27,8 @@ - #define UVC_CONTROL_RESTORE (1 << 6) - /* Control can be updated by the camera. */ - #define UVC_CONTROL_AUTO_UPDATE (1 << 7) -+/* Control is an extension unit control. */ -+#define UVC_CONTROL_EXTENSION (1 << 8) - - #define UVC_CONTROL_GET_RANGE (UVC_CONTROL_GET_CUR | UVC_CONTROL_GET_MIN | \ - UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES | \ -@@ -40,6 +42,15 @@ struct uvc_xu_control_info { - __u32 flags; - }; - -+struct uvc_menu_info { -+ __u32 value; -+ __u8 name[32]; -+}; -+ -+struct uvc_xu_control_mapping_old { -+ __u8 reserved[64]; -+}; -+ - struct uvc_xu_control_mapping { - __u32 id; - __u8 name[32]; -@@ -50,6 +61,11 @@ struct uvc_xu_control_mapping { - __u8 offset; - enum v4l2_ctrl_type v4l2_type; - __u32 data_type; -+ -+ struct uvc_menu_info __user *menu_info; -+ __u32 menu_count; -+ -+ __u32 reserved[4]; - }; - - struct uvc_xu_control { -@@ -60,6 +76,7 @@ struct uvc_xu_control { - }; - - #define UVCIOC_CTRL_ADD _IOW('U', 1, struct uvc_xu_control_info) -+#define UVCIOC_CTRL_MAP_OLD _IOWR('U', 2, struct uvc_xu_control_mapping_old) - #define UVCIOC_CTRL_MAP _IOWR('U', 2, struct uvc_xu_control_mapping) - #define UVCIOC_CTRL_GET _IOWR('U', 3, struct uvc_xu_control) - #define UVCIOC_CTRL_SET _IOW('U', 4, struct uvc_xu_control) -@@ -198,11 +215,6 @@ struct uvc_streaming_control { - __u8 bMaxVersion; - }; - --struct uvc_menu_info { -- __u32 value; -- __u8 name[32]; --}; -- - struct uvc_control_info { - struct list_head list; - struct list_head mappings; -@@ -625,6 +637,7 @@ extern int uvc_ctrl_init_device(struct uvc_device *dev); - extern void uvc_ctrl_cleanup_device(struct uvc_device *dev); - extern int uvc_ctrl_resume_device(struct uvc_device *dev); - extern void uvc_ctrl_init(void); -+extern void uvc_ctrl_cleanup(void); - - extern int uvc_ctrl_begin(struct uvc_video_chain *chain); - extern int __uvc_ctrl_commit(struct uvc_video_chain *chain, int rollback); -From: Laurent Pinchart -Date: Fri, 25 Jun 2010 07:58:43 +0000 (+0200) -Subject: uvcvideo: Add support for Manta MM-353 Plako -X-Git-Url: http://git.linuxtv.org/pinchartl/uvcvideo.git?a=commitdiff_plain;h=352e661e1f347390a86cf34bc5e41adbdd1caa41 - -uvcvideo: Add support for Manta MM-353 Plako - -The camera requires the PROBE_MINMAX quirk. Add a corresponding entry -in the device IDs list - -Signed-off-by: Laurent Pinchart ---- - -diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c -index 34818c1..1a89384 100644 ---- a/drivers/media/video/uvc/uvc_driver.c -+++ b/drivers/media/video/uvc/uvc_driver.c -@@ -2174,6 +2174,15 @@ static struct usb_device_id uvc_ids[] = { - .bInterfaceSubClass = 1, - .bInterfaceProtocol = 0, - .driver_info = UVC_QUIRK_PROBE_EXTRAFIELDS }, -+ /* Manta MM-353 Plako */ -+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE -+ | USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x18ec, -+ .idProduct = 0x3188, -+ .bInterfaceClass = USB_CLASS_VIDEO, -+ .bInterfaceSubClass = 1, -+ .bInterfaceProtocol = 0, -+ .driver_info = UVC_QUIRK_PROBE_MINMAX }, - /* FSC WebCam V30S */ - { .match_flags = USB_DEVICE_ID_MATCH_DEVICE - | USB_DEVICE_ID_MATCH_INT_INFO, diff --git a/linux-2.6-vio-modalias.patch b/linux-2.6-vio-modalias.patch index 2d56d6e..057eac7 100644 --- a/linux-2.6-vio-modalias.patch +++ b/linux-2.6-vio-modalias.patch @@ -1,37 +1,8 @@ -From: Benjamin Herrenschmidt -Date: Wed, 7 Apr 2010 04:44:28 +0000 (+1000) -Subject: powerpc/vio: Add modalias support -X-Git-Tag: v2.6.35-rc1~450^2~88 -X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=578b7cd1518f8d1b17a7fb1671d3d756c9cb49f1 - -powerpc/vio: Add modalias support - -BenH: Added to vio_cmo_dev_attrs as well - -Provide a modalias entry for VIO devices in sysfs. I believe -this was another initrd generation bugfix for anaconda. -Signed-off-by: David Woodhouse -Signed-off-by: Benjamin Herrenschmidt ---- - diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c -index 8223717..2f57956 100644 +index f988672..12a0851 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c -@@ -958,9 +958,12 @@ viodev_cmo_rd_attr(allocated); - - static ssize_t name_show(struct device *, struct device_attribute *, char *); - static ssize_t devspec_show(struct device *, struct device_attribute *, char *); -+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, -+ char *buf); - static struct device_attribute vio_cmo_dev_attrs[] = { - __ATTR_RO(name), - __ATTR_RO(devspec), -+ __ATTR_RO(modalias), - __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, - viodev_cmo_desired_show, viodev_cmo_desired_set), - __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL), -@@ -1320,9 +1323,27 @@ static ssize_t devspec_show(struct device *dev, +@@ -294,9 +294,27 @@ static ssize_t devspec_show(struct device *dev, return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); } diff --git a/linux-2.6-x86-64-fbdev-primary.patch b/linux-2.6-x86-64-fbdev-primary.patch new file mode 100644 index 0000000..b35096f --- /dev/null +++ b/linux-2.6-x86-64-fbdev-primary.patch @@ -0,0 +1,49 @@ +From cdd54d73203838f249291988d5f79e40fee00a05 Mon Sep 17 00:00:00 2001 +From: Dave Airlie +Date: Thu, 7 Jan 2010 16:59:06 +1000 +Subject: [PATCH] x86: allow fbdev primary video code on 64-bit. + +For some reason the 64-bit tree was doing this differently and +I can't see why it would need to. + +This correct behaviour when you have two GPUs plugged in and +32-bit put the console in one place and 64-bit in another. + +Signed-off-by: Dave Airlie +--- + arch/x86/Makefile | 2 -- + arch/x86/include/asm/fb.h | 4 ---- + 2 files changed, 0 insertions(+), 6 deletions(-) + +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index 78b32be..0a43dc5 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -135,9 +135,7 @@ drivers-$(CONFIG_OPROFILE) += arch/x86/oprofile/ + # suspend and hibernation support + drivers-$(CONFIG_PM) += arch/x86/power/ + +-ifeq ($(CONFIG_X86_32),y) + drivers-$(CONFIG_FB) += arch/x86/video/ +-endif + + #### + # boot loader support. Several targets are kept for legacy purposes +diff --git a/arch/x86/include/asm/fb.h b/arch/x86/include/asm/fb.h +index 5301846..2519d06 100644 +--- a/arch/x86/include/asm/fb.h ++++ b/arch/x86/include/asm/fb.h +@@ -12,10 +12,6 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, + pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; + } + +-#ifdef CONFIG_X86_32 + extern int fb_is_primary_device(struct fb_info *info); +-#else +-static inline int fb_is_primary_device(struct fb_info *info) { return 0; } +-#endif + + #endif /* _ASM_X86_FB_H */ +-- +1.6.5.2 + diff --git a/linux-2.6.30-hush-rom-warning.patch b/linux-2.6.30-hush-rom-warning.patch new file mode 100644 index 0000000..3486ea7 --- /dev/null +++ b/linux-2.6.30-hush-rom-warning.patch @@ -0,0 +1,41 @@ +From fe97564a6c40c1dd1e760ea9543d63709907d577 Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Mon, 11 Jan 2010 08:31:34 -0500 +Subject: linux-2.6.30-hush-rom-warning.patch + +--- + drivers/pci/setup-res.c | 11 ++++++++--- + 1 files changed, 8 insertions(+), 3 deletions(-) + +diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c +index 7d678bb..5acac72 100644 +--- a/drivers/pci/setup-res.c ++++ b/drivers/pci/setup-res.c +@@ -95,6 +95,7 @@ int pci_claim_resource(struct pci_dev *dev, int resource) + struct resource *res = &dev->resource[resource]; + struct resource *root; + int err; ++ const char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge"; + + root = pci_find_parent_resource(dev, res); + if (!root) { +@@ -104,9 +105,13 @@ int pci_claim_resource(struct pci_dev *dev, int resource) + } + + err = request_resource(root, res); +- if (err) +- dev_err(&dev->dev, +- "address space collision: %pR already in use\n", res); ++ if (err) ++ if (resource == 6) /* KERN_INFO on ROM. */ ++ dev_info(&dev->dev, ++ "address space collision: %pR already in use\n", res); ++ else ++ dev_err(&dev->dev, ++ "address space collision: %pR already in use\n", res); + + return err; + } +-- +1.6.5.2 + diff --git a/lirc-2.6.33.patch b/lirc-2.6.33.patch index 0aab0c7..ea4f28d 100644 --- a/lirc-2.6.33.patch +++ b/lirc-2.6.33.patch @@ -1,133 +1,64 @@ - include/linux/lirc.h | 94 ++ +lirc drivers, 2010.04.05 + +Generated against linus/master from the linux-2.6-lirc.git tree at +http://git.kernel.org/?p=linux/kernel/git/jarod/linux-2.6-lirc.git;a=summary + +Also includes a pure input imon driver, which used to be part of lirc_imon... + +Signed-off-by: Jarod Wilson + +--- + MAINTAINERS | 9 + drivers/input/Kconfig | 2 + drivers/input/Makefile | 2 + drivers/input/lirc/Kconfig | 116 ++ drivers/input/lirc/Makefile | 21 + - drivers/input/lirc/lirc_bt829.c | 383 ++++++ - drivers/input/lirc/lirc_dev.c | 736 ++++++++++ - drivers/input/lirc/lirc_dev.h | 225 +++ + drivers/input/lirc/lirc_bt829.c | 383 +++++ + drivers/input/lirc/lirc_dev.c | 850 +++++++++++ + drivers/input/lirc/lirc_dev.h | 228 +++ drivers/input/lirc/lirc_ene0100.c | 646 +++++++++ drivers/input/lirc/lirc_ene0100.h | 169 +++ - drivers/input/lirc/lirc_i2c.c | 536 ++++++++ - drivers/input/lirc/lirc_igorplugusb.c | 556 ++++++++ - drivers/input/lirc/lirc_imon.c | 1054 ++++++++++++++ - drivers/input/lirc/lirc_it87.c | 991 ++++++++++++++ + drivers/input/lirc/lirc_i2c.c | 536 +++++++ + drivers/input/lirc/lirc_igorplugusb.c | 555 ++++++++ + drivers/input/lirc/lirc_imon.c | 1053 ++++++++++++++ + drivers/input/lirc/lirc_it87.c | 1021 +++++++++++++ drivers/input/lirc/lirc_it87.h | 116 ++ - drivers/input/lirc/lirc_ite8709.c | 540 ++++++++ - drivers/input/lirc/lirc_mceusb.c | 1222 +++++++++++++++++ - drivers/input/lirc/lirc_parallel.c | 709 ++++++++++ + drivers/input/lirc/lirc_ite8709.c | 540 +++++++ + drivers/input/lirc/lirc_mceusb.c | 1385 ++++++++++++++++++ + drivers/input/lirc/lirc_parallel.c | 709 +++++++++ drivers/input/lirc/lirc_parallel.h | 26 + - drivers/input/lirc/lirc_sasem.c | 931 +++++++++++++ - drivers/input/lirc/lirc_serial.c | 1317 ++++++++++++++++++ + drivers/input/lirc/lirc_sasem.c | 931 ++++++++++++ + drivers/input/lirc/lirc_serial.c | 1317 +++++++++++++++++ drivers/input/lirc/lirc_sir.c | 1283 +++++++++++++++++ - drivers/input/lirc/lirc_streamzap.c | 794 +++++++++++ + drivers/input/lirc/lirc_streamzap.c | 821 +++++++++++ drivers/input/lirc/lirc_ttusbir.c | 397 ++++++ - drivers/input/lirc/lirc_zilog.c | 1396 +++++++++++++++++++ + drivers/input/lirc/lirc_zilog.c | 1388 ++++++++++++++++++ drivers/input/misc/Kconfig | 12 + drivers/input/misc/Makefile | 1 + - drivers/input/misc/imon.c | 2430 +++++++++++++++++++++++++++++++++ - 28 files changed, 16705 insertions(+), 0 deletions(-) + drivers/input/misc/imon.c | 2523 +++++++++++++++++++++++++++++++++ + include/linux/lirc.h | 159 +++ + 29 files changed, 17199 insertions(+), 0 deletions(-) -diff --git a/include/linux/lirc.h b/include/linux/lirc.h -new file mode 100644 -index 0000000..8ae64fa ---- /dev/null -+++ b/include/linux/lirc.h -@@ -0,0 +1,94 @@ -+/* -+ * lirc.h - linux infrared remote control header file -+ * last modified 2007/09/27 -+ */ -+ -+#ifndef _LINUX_LIRC_H -+#define _LINUX_LIRC_H -+ -+#include -+#include -+ -+#define PULSE_BIT 0x01000000 -+#define PULSE_MASK 0x00FFFFFF -+ -+/*** lirc compatible hardware features ***/ -+ -+#define LIRC_MODE2SEND(x) (x) -+#define LIRC_SEND2MODE(x) (x) -+#define LIRC_MODE2REC(x) ((x) << 16) -+#define LIRC_REC2MODE(x) ((x) >> 16) -+ -+#define LIRC_MODE_RAW 0x00000001 -+#define LIRC_MODE_PULSE 0x00000002 -+#define LIRC_MODE_MODE2 0x00000004 -+#define LIRC_MODE_LIRCCODE 0x00000010 -+ -+ -+#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW) -+#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE) -+#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2) -+#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE) -+ -+#define LIRC_CAN_SEND_MASK 0x0000003f -+ -+#define LIRC_CAN_SET_SEND_CARRIER 0x00000100 -+#define LIRC_CAN_SET_SEND_DUTY_CYCLE 0x00000200 -+#define LIRC_CAN_SET_TRANSMITTER_MASK 0x00000400 -+ -+#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW) -+#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE) -+#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2) -+#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE) -+ -+#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) -+ -+#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16) -+#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16) -+ -+#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000 -+#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000 -+#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000 -+ -+#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK) -+#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK) -+ -+#define LIRC_CAN_NOTIFY_DECODE 0x01000000 -+ -+/*** IOCTL commands for lirc driver ***/ -+ -+#define LIRC_GET_FEATURES _IOR('i', 0x00000000, uint64_t) -+ -+#define LIRC_GET_SEND_MODE _IOR('i', 0x00000001, uint64_t) -+#define LIRC_GET_REC_MODE _IOR('i', 0x00000002, uint64_t) -+#define LIRC_GET_SEND_CARRIER _IOR('i', 0x00000003, uint32_t) -+#define LIRC_GET_REC_CARRIER _IOR('i', 0x00000004, uint32_t) -+#define LIRC_GET_SEND_DUTY_CYCLE _IOR('i', 0x00000005, uint32_t) -+#define LIRC_GET_REC_DUTY_CYCLE _IOR('i', 0x00000006, uint32_t) -+#define LIRC_GET_REC_RESOLUTION _IOR('i', 0x00000007, uint32_t) -+ -+/* code length in bits, currently only for LIRC_MODE_LIRCCODE */ -+#define LIRC_GET_LENGTH _IOR('i', 0x0000000f, uint64_t) -+ -+#define LIRC_SET_SEND_MODE _IOW('i', 0x00000011, uint64_t) -+#define LIRC_SET_REC_MODE _IOW('i', 0x00000012, uint64_t) -+/* Note: these can reset the according pulse_width */ -+#define LIRC_SET_SEND_CARRIER _IOW('i', 0x00000013, uint32_t) -+#define LIRC_SET_REC_CARRIER _IOW('i', 0x00000014, uint32_t) -+#define LIRC_SET_SEND_DUTY_CYCLE _IOW('i', 0x00000015, uint32_t) -+#define LIRC_SET_REC_DUTY_CYCLE _IOW('i', 0x00000016, uint32_t) -+#define LIRC_SET_TRANSMITTER_MASK _IOW('i', 0x00000017, uint32_t) -+ -+/* -+ * to set a range use -+ * LIRC_SET_REC_DUTY_CYCLE_RANGE/LIRC_SET_REC_CARRIER_RANGE with the -+ * lower bound first and later -+ * LIRC_SET_REC_DUTY_CYCLE/LIRC_SET_REC_CARRIER with the upper bound -+ */ -+ -+#define LIRC_SET_REC_DUTY_CYCLE_RANGE _IOW('i', 0x0000001e, uint32_t) -+#define LIRC_SET_REC_CARRIER_RANGE _IOW('i', 0x0000001f, uint32_t) -+ -+#define LIRC_NOTIFY_DECODE _IO('i', 0x00000020) -+ -+#endif +diff --git a/MAINTAINERS b/MAINTAINERS +index 47cc449..ae38439 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -3427,6 +3427,15 @@ F: arch/powerpc/platforms/pasemi/ + F: drivers/*/*pasemi* + F: drivers/*/*/*pasemi* + ++LINUX INFRARED REMOTE CONTROL DRIVERS (LIRC) ++P: Jarod Wilson ++M: jarod@redhat.com ++P: Christoph Bartelmus ++M: lirc@bartelmus.de ++W: http://www.lirc.org/ ++L: lirc-list@lists.sourceforge.net ++S: Maintained ++ + LINUX SECURITY MODULE (LSM) FRAMEWORK + M: Chris Wright + L: linux-security-module@vger.kernel.org diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig index 07c2cd4..ebc8743 100644 --- a/drivers/input/Kconfig @@ -691,10 +622,10 @@ index 0000000..0485884 +MODULE_PARM_DESC(debug, "Debug enabled or not"); diff --git a/drivers/input/lirc/lirc_dev.c b/drivers/input/lirc/lirc_dev.c new file mode 100644 -index 0000000..504e122 +index 0000000..cd5d75a --- /dev/null +++ b/drivers/input/lirc/lirc_dev.c -@@ -0,0 +1,736 @@ +@@ -0,0 +1,850 @@ +/* + * LIRC base driver + * @@ -733,6 +664,9 @@ index 0000000..504e122 +#include +#include +#include ++#ifdef CONFIG_COMPAT ++#include ++#endif + +#include +#include "lirc_dev.h" @@ -859,6 +793,9 @@ index 0000000..504e122 + .write = lirc_dev_fop_write, + .poll = lirc_dev_fop_poll, + .ioctl = lirc_dev_fop_ioctl, ++#ifdef CONFIG_COMPAT ++ .compat_ioctl = lirc_dev_fop_compat_ioctl, ++#endif + .open = lirc_dev_fop_open, + .release = lirc_dev_fop_close, +}; @@ -1257,6 +1194,20 @@ index 0000000..504e122 + case LIRC_GET_LENGTH: + result = put_user(ir->d.code_length, (unsigned long *)arg); + break; ++ case LIRC_GET_MIN_TIMEOUT: ++ if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) || ++ ir->d.min_timeout == 0) ++ return -ENOSYS; ++ ++ result = put_user(ir->d.min_timeout, (int *) arg); ++ break; ++ case LIRC_GET_MAX_TIMEOUT: ++ if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) || ++ ir->d.max_timeout == 0) ++ return -ENOSYS; ++ ++ result = put_user(ir->d.max_timeout, (int *) arg); ++ break; + default: + result = -EINVAL; + } @@ -1268,6 +1219,100 @@ index 0000000..504e122 +} +EXPORT_SYMBOL(lirc_dev_fop_ioctl); + ++#ifdef CONFIG_COMPAT ++#define LIRC_GET_FEATURES_COMPAT32 _IOR('i', 0x00000000, __u32) ++ ++#define LIRC_GET_SEND_MODE_COMPAT32 _IOR('i', 0x00000001, __u32) ++#define LIRC_GET_REC_MODE_COMPAT32 _IOR('i', 0x00000002, __u32) ++ ++#define LIRC_GET_LENGTH_COMPAT32 _IOR('i', 0x0000000f, __u32) ++ ++#define LIRC_SET_SEND_MODE_COMPAT32 _IOW('i', 0x00000011, __u32) ++#define LIRC_SET_REC_MODE_COMPAT32 _IOW('i', 0x00000012, __u32) ++ ++long lirc_dev_fop_compat_ioctl(struct file *file, ++ unsigned int cmd32, ++ unsigned long arg) ++{ ++ mm_segment_t old_fs; ++ int ret; ++ unsigned long val; ++ unsigned int cmd; ++ ++ switch (cmd32) { ++ case LIRC_GET_FEATURES_COMPAT32: ++ case LIRC_GET_SEND_MODE_COMPAT32: ++ case LIRC_GET_REC_MODE_COMPAT32: ++ case LIRC_GET_LENGTH_COMPAT32: ++ case LIRC_SET_SEND_MODE_COMPAT32: ++ case LIRC_SET_REC_MODE_COMPAT32: ++ /* ++ * These commands expect (unsigned long *) arg ++ * but the 32-bit app supplied (__u32 *). ++ * Conversion is required. ++ */ ++ if (get_user(val, (__u32 *)compat_ptr(arg))) ++ return -EFAULT; ++ lock_kernel(); ++ /* ++ * tell lirc_dev_fop_ioctl that it's safe to use the pointer ++ * to val which is in kernel address space and not in ++ * user address space. ++ */ ++ old_fs = get_fs(); ++ set_fs(KERNEL_DS); ++ ++ cmd = _IOC(_IOC_DIR(cmd32), _IOC_TYPE(cmd32), _IOC_NR(cmd32), ++ (_IOC_TYPECHECK(unsigned long))); ++ ret = lirc_dev_fop_ioctl(file->f_path.dentry->d_inode, file, ++ cmd, (unsigned long)(&val)); ++ ++ set_fs(old_fs); ++ unlock_kernel(); ++ switch (cmd) { ++ case LIRC_GET_FEATURES: ++ case LIRC_GET_SEND_MODE: ++ case LIRC_GET_REC_MODE: ++ case LIRC_GET_LENGTH: ++ if (!ret && put_user(val, (__u32 *)compat_ptr(arg))) ++ return -EFAULT; ++ break; ++ } ++ return ret; ++ ++ case LIRC_GET_SEND_CARRIER: ++ case LIRC_GET_REC_CARRIER: ++ case LIRC_GET_SEND_DUTY_CYCLE: ++ case LIRC_GET_REC_DUTY_CYCLE: ++ case LIRC_GET_REC_RESOLUTION: ++ case LIRC_SET_SEND_CARRIER: ++ case LIRC_SET_REC_CARRIER: ++ case LIRC_SET_SEND_DUTY_CYCLE: ++ case LIRC_SET_REC_DUTY_CYCLE: ++ case LIRC_SET_TRANSMITTER_MASK: ++ case LIRC_SET_REC_DUTY_CYCLE_RANGE: ++ case LIRC_SET_REC_CARRIER_RANGE: ++ /* ++ * These commands expect (unsigned int *)arg ++ * so no problems here. Just handle the locking. ++ */ ++ lock_kernel(); ++ cmd = cmd32; ++ ret = lirc_dev_fop_ioctl(file->f_path.dentry->d_inode, ++ file, cmd, arg); ++ unlock_kernel(); ++ return ret; ++ default: ++ /* unknown */ ++ printk(KERN_ERR "lirc_dev: %s(%s:%d): Unknown cmd %08x\n", ++ __func__, current->comm, current->pid, cmd32); ++ return -ENOIOCTLCMD; ++ } ++} ++EXPORT_SYMBOL(lirc_dev_fop_compat_ioctl); ++#endif ++ ++ +ssize_t lirc_dev_fop_read(struct file *file, + char *buffer, + size_t length, @@ -1433,10 +1478,10 @@ index 0000000..504e122 +MODULE_PARM_DESC(debug, "Enable debugging messages"); diff --git a/drivers/input/lirc/lirc_dev.h b/drivers/input/lirc/lirc_dev.h new file mode 100644 -index 0000000..99d0442 +index 0000000..56020e8 --- /dev/null +++ b/drivers/input/lirc/lirc_dev.h -@@ -0,0 +1,225 @@ +@@ -0,0 +1,228 @@ +/* + * LIRC base driver + * @@ -1458,6 +1503,7 @@ index 0000000..99d0442 +#include +#include +#include ++#include + +struct lirc_buffer { + wait_queue_head_t wait_poll; @@ -1571,6 +1617,8 @@ index 0000000..99d0442 + unsigned int chunk_size; + + void *data; ++ int min_timeout; ++ int max_timeout; + int (*add_to_buf) (void *data, struct lirc_buffer *buf); + struct lirc_buffer *rbuf; + int (*set_use_inc) (void *data); @@ -3033,10 +3081,10 @@ index 0000000..f3f8c2e +module_exit(lirc_i2c_exit); diff --git a/drivers/input/lirc/lirc_igorplugusb.c b/drivers/input/lirc/lirc_igorplugusb.c new file mode 100644 -index 0000000..599037d +index 0000000..d1c02c2 --- /dev/null +++ b/drivers/input/lirc/lirc_igorplugusb.c -@@ -0,0 +1,556 @@ +@@ -0,0 +1,555 @@ +/* + * lirc_igorplugusb - USB remote support for LIRC + * @@ -3086,7 +3134,6 @@ index 0000000..599037d +#include +#include +#include -+#include +#include + +#include @@ -3595,10 +3642,10 @@ index 0000000..599037d + diff --git a/drivers/input/lirc/lirc_imon.c b/drivers/input/lirc/lirc_imon.c new file mode 100644 -index 0000000..5bea43b +index 0000000..af5eec8 --- /dev/null +++ b/drivers/input/lirc/lirc_imon.c -@@ -0,0 +1,1054 @@ +@@ -0,0 +1,1053 @@ +/* + * lirc_imon.c: LIRC/VFD/LCD driver for SoundGraph iMON IR/VFD/LCD + * including the iMON PAD model @@ -3790,6 +3837,7 @@ index 0000000..5bea43b + +static void free_imon_context(struct imon_context *context) +{ ++ struct device *dev = context->driver->dev; + usb_free_urb(context->tx_urb); + usb_free_urb(context->rx_urb); + lirc_buffer_free(context->driver->rbuf); @@ -3797,7 +3845,7 @@ index 0000000..5bea43b + kfree(context->driver); + kfree(context); + -+ dev_dbg(context->driver->dev, "%s: iMON context freed\n", __func__); ++ dev_dbg(dev, "%s: iMON context freed\n", __func__); +} + +static void deregister_from_lirc(struct imon_context *context) @@ -4315,7 +4363,6 @@ index 0000000..5bea43b + struct urb *tx_urb = NULL; + struct lirc_driver *driver = NULL; + struct lirc_buffer *rbuf = NULL; -+ struct usb_interface *first_if; + struct device *dev = &interface->dev; + int ifnum; + int lirc_minor = 0; @@ -4327,10 +4374,16 @@ index 0000000..5bea43b + int vfd_proto_6p = 0; + int code_length; + struct imon_context *context = NULL; -+ struct imon_context *first_if_context = NULL; + int i; + u16 vendor, product; + ++ context = kzalloc(sizeof(struct imon_context), GFP_KERNEL); ++ if (!context) { ++ err("%s: kzalloc failed for context", __func__); ++ alloc_status = 1; ++ goto alloc_status_switch; ++ } ++ + /* + * Try to auto-detect the type of display if the user hasn't set + * it by hand via the display_type modparam. Default is VFD. @@ -4355,9 +4408,6 @@ index 0000000..5bea43b + /* prevent races probing devices w/multiple interfaces */ + mutex_lock(&driver_lock); + -+ first_if = usb_ifnum_to_if(usbdev, 0); -+ first_if_context = (struct imon_context *)usb_get_intfdata(first_if); -+ + /* + * Scan the endpoint list and set: + * first input endpoint = IR endpoint @@ -4401,7 +4451,8 @@ index 0000000..5bea43b + if (!ir_ep_found) { + err("%s: no valid input (IR) endpoint found.", __func__); + retval = -ENODEV; -+ goto exit; ++ alloc_status = 2; ++ goto alloc_status_switch; + } + + /* Determine if display requires 6 packets */ @@ -4413,12 +4464,6 @@ index 0000000..5bea43b + __func__, vfd_proto_6p); + } + -+ context = kzalloc(sizeof(struct imon_context), GFP_KERNEL); -+ if (!context) { -+ err("%s: kzalloc failed for context", __func__); -+ alloc_status = 1; -+ goto alloc_status_switch; -+ } + driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL); + if (!driver) { + err("%s: kzalloc failed for lirc_driver", __func__); @@ -4549,7 +4594,8 @@ index 0000000..5bea43b + kfree(context); + context = NULL; + case 1: -+ retval = -ENOMEM; ++ if (retval != -ENODEV) ++ retval = -ENOMEM; + break; + case 0: + retval = 0; @@ -4655,10 +4701,10 @@ index 0000000..5bea43b +module_exit(imon_exit); diff --git a/drivers/input/lirc/lirc_it87.c b/drivers/input/lirc/lirc_it87.c new file mode 100644 -index 0000000..c69662d +index 0000000..a899d00 --- /dev/null +++ b/drivers/input/lirc/lirc_it87.c -@@ -0,0 +1,991 @@ +@@ -0,0 +1,1021 @@ +/* + * LIRC driver for ITE IT8712/IT8705 CIR port + * @@ -4702,7 +4748,6 @@ index 0000000..c69662d +#include +#include +#include -+#include +#include +#include +#include @@ -4716,6 +4761,7 @@ index 0000000..c69662d +#include + +#include ++#include + +#include +#include "lirc_dev.h" @@ -4776,6 +4822,8 @@ index 0000000..c69662d +unsigned int rx_tail, rx_head; +static int tx_buf[WBUF_LEN]; + ++static struct pnp_driver it87_pnp_driver; ++ +/* SECTION: Prototypes */ + +/* Communication with user-space */ @@ -5583,20 +5631,32 @@ index 0000000..c69662d + return 0; +} + -+ -+static int __init lirc_it87_init(void) ++static int it87_probe(struct pnp_dev *pnp_dev, ++ const struct pnp_device_id *dev_id) +{ + int retval; + ++ driver.dev = &pnp_dev->dev; ++ + retval = init_chrdev(); + if (retval < 0) + return retval; ++ + retval = init_lirc_it87(); -+ if (retval) { -+ drop_chrdev(); -+ return retval; -+ } ++ if (retval) ++ goto init_lirc_it87_failed; ++ + return 0; ++ ++init_lirc_it87_failed: ++ drop_chrdev(); ++ ++ return retval; ++} ++ ++static int __init lirc_it87_init(void) ++{ ++ return pnp_register_driver(&it87_pnp_driver); +} + + @@ -5605,9 +5665,25 @@ index 0000000..c69662d + drop_hardware(); + drop_chrdev(); + drop_port(); ++ pnp_unregister_driver(&it87_pnp_driver); + printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n"); +} + ++/* SECTION: PNP for ITE8704/18 */ ++ ++static const struct pnp_device_id pnp_dev_table[] = { ++ {"ITE8704", 0}, ++ {} ++}; ++ ++MODULE_DEVICE_TABLE(pnp, pnp_dev_table); ++ ++static struct pnp_driver it87_pnp_driver = { ++ .name = LIRC_DRIVER_NAME, ++ .id_table = pnp_dev_table, ++ .probe = it87_probe, ++}; ++ +module_init(lirc_it87_init); +module_exit(lirc_it87_exit); + @@ -5774,7 +5850,7 @@ index 0000000..cf021c8 +/********************************* ITE IT87xx ************************/ diff --git a/drivers/input/lirc/lirc_ite8709.c b/drivers/input/lirc/lirc_ite8709.c new file mode 100644 -index 0000000..6210847 +index 0000000..4c3d3ad --- /dev/null +++ b/drivers/input/lirc/lirc_ite8709.c @@ -0,0 +1,540 @@ @@ -6187,8 +6263,8 @@ index 0000000..6210847 + ite8709_dev->use_count = 0; + ite8709_dev->irq = pnp_irq(dev, 0); + ite8709_dev->io = pnp_port_start(dev, 2); -+ ite8709_dev->hardware_lock = __SPIN_LOCK_UNLOCKED( -+ ite8709_dev->hardware_lock); ++ ite8709_dev->hardware_lock = ++ __SPIN_LOCK_UNLOCKED(ite8709_dev->hardware_lock); + ite8709_dev->acc_pulse = 0; + ite8709_dev->acc_space = 0; + ite8709_dev->lastbit = 0; @@ -6320,10 +6396,10 @@ index 0000000..6210847 +MODULE_PARM_DESC(debug, "Enable debugging messages"); diff --git a/drivers/input/lirc/lirc_mceusb.c b/drivers/input/lirc/lirc_mceusb.c new file mode 100644 -index 0000000..8b404e2 +index 0000000..c0869d8 --- /dev/null +++ b/drivers/input/lirc/lirc_mceusb.c -@@ -0,0 +1,1222 @@ +@@ -0,0 +1,1385 @@ +/* + * LIRC driver for Windows Media Center Edition USB Infrared Transceivers + * @@ -6488,6 +6564,8 @@ index 0000000..8b404e2 + { USB_DEVICE(VENDOR_TOPSEED, 0x0008) }, + /* Topseed eHome Infrared Transceiver */ + { USB_DEVICE(VENDOR_TOPSEED, 0x000a) }, ++ /* Topseed eHome Infrared Transceiver */ ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0011) }, + /* Ricavision internal Infrared Transceiver */ + { USB_DEVICE(VENDOR_RICAVISION, 0x0010) }, + /* Itron ione Libra Q-11 */ @@ -6532,6 +6610,12 @@ index 0000000..8b404e2 + { } +}; + ++static struct usb_device_id gen3_list[] = { ++ { USB_DEVICE(VENDOR_PINNACLE, 0x0225) }, ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0008) }, ++ {} ++}; ++ +static struct usb_device_id pinnacle_list[] = { + { USB_DEVICE(VENDOR_PINNACLE, 0x0225) }, + {} @@ -6553,6 +6637,7 @@ index 0000000..8b404e2 + { USB_DEVICE(VENDOR_TOPSEED, 0x0007) }, + { USB_DEVICE(VENDOR_TOPSEED, 0x0008) }, + { USB_DEVICE(VENDOR_TOPSEED, 0x000a) }, ++ { USB_DEVICE(VENDOR_TOPSEED, 0x0011) }, + { USB_DEVICE(VENDOR_PINNACLE, 0x0225) }, + {} +}; @@ -6580,7 +6665,7 @@ index 0000000..8b404e2 + unsigned char is_pulse; + struct { + u32 connected:1; -+ u32 pinnacle:1; ++ u32 gen3:1; + u32 transmitter_mask_inverted:1; + u32 microsoft_gen1:1; + u32 reserved:28; @@ -6596,18 +6681,49 @@ index 0000000..8b404e2 + struct mutex dev_lock; +}; + -+/* init strings */ -+static char init1[] = {0x00, 0xff, 0xaa, 0xff, 0x0b}; -+static char init2[] = {0xff, 0x18}; -+ -+static char pin_init1[] = { 0x9f, 0x07}; -+static char pin_init2[] = { 0x9f, 0x13}; -+static char pin_init3[] = { 0x9f, 0x0d}; -+ -+static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf, int len) ++/* ++ * MCE Device Command Strings ++ * Device command responses vary from device to device... ++ * - DEVICE_RESET resets the hardware to its default state ++ * - GET_REVISION fetches the hardware/software revision, common ++ * replies are ff 0b 45 ff 1b 08 and ff 0b 50 ff 1b 42 ++ * - GET_CARRIER_FREQ gets the carrier mode and frequency of the ++ * device, with replies in the form of 9f 06 MM FF, where MM is 0-3, ++ * meaning clk of 10000000, 2500000, 625000 or 156250, and FF is ++ * ((clk / frequency) - 1) ++ * - GET_RX_TIMEOUT fetches the receiver timeout in units of 50us, ++ * response in the form of 9f 0c msb lsb ++ * - GET_TX_BITMASK fetches the transmitter bitmask, replies in ++ * the form of 9f 08 bm, where bm is the bitmask ++ * - GET_RX_SENSOR fetches the RX sensor setting -- long-range ++ * general use one or short-range learning one, in the form of ++ * 9f 14 ss, where ss is either 01 for long-range or 02 for short ++ * - SET_CARRIER_FREQ sets a new carrier mode and frequency ++ * - SET_TX_BITMASK sets the transmitter bitmask ++ * - SET_RX_TIMEOUT sets the receiver timeout ++ * - SET_RX_SENSOR sets which receiver sensor to use ++ */ ++static char DEVICE_RESET[] = {0x00, 0xff, 0xaa}; ++static char GET_REVISION[] = {0xff, 0x0b}; ++static char GET_UNKNOWN[] = {0xff, 0x18}; ++static char GET_CARRIER_FREQ[] = {0x9f, 0x07}; ++static char GET_RX_TIMEOUT[] = {0x9f, 0x0d}; ++static char GET_TX_BITMASK[] = {0x9f, 0x13}; ++static char GET_RX_SENSOR[] = {0x9f, 0x15}; ++/* sub in desired values in lower byte or bytes for full command */ ++//static char SET_CARRIER_FREQ[] = {0x9f, 0x06, 0x00, 0x00}; ++//static char SET_TX_BITMASK[] = {0x9f, 0x08, 0x00}; ++//static char SET_RX_TIMEOUT[] = {0x9f, 0x0c, 0x00, 0x00}; ++//static char SET_RX_SENSOR[] = {0x9f, 0x14, 0x00}; ++ ++static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf, ++ int len, bool out) +{ + char codes[USB_BUFLEN * 3 + 1]; ++ char inout[9]; + int i; ++ u8 cmd, subcmd, data1, data2; ++ struct device *dev = ir->d->dev; + + if (len <= 0) + return; @@ -6618,7 +6734,108 @@ index 0000000..8b404e2 + for (i = 0; i < len && i < USB_BUFLEN; i++) + snprintf(codes + i * 3, 4, "%02x ", buf[i] & 0xFF); + -+ dev_info(ir->d->dev, "data received %s (length=%d)\n", codes, len); ++ dev_info(dev, "%sbound data: %s (length=%d)\n", ++ (out ? "out" : " in"), codes, len); ++ ++ if (out) ++ strcpy(inout, "Request\0"); ++ else ++ strcpy(inout, "Got\0"); ++ ++ cmd = buf[0] & 0xff; ++ subcmd = buf[1] & 0xff; ++ data1 = buf[2] & 0xff; ++ data2 = buf[3] & 0xff; ++ ++ switch (cmd) { ++ case 0x00: ++ if (subcmd == 0xff && data1 == 0xaa) ++ dev_info(dev, "Device reset requested\n"); ++ else ++ dev_info(dev, "Unknown command 0x%02x 0x%02x\n", ++ cmd, subcmd); ++ break; ++ case 0xff: ++ switch (subcmd) { ++ case 0x0b: ++ if (len == 2) ++ dev_info(dev, "Get hw/sw rev?\n"); ++ else ++ dev_info(dev, "hw/sw rev 0x%02x 0x%02x " ++ "0x%02x 0x%02x\n", data1, data2, ++ buf[4], buf[5]); ++ break; ++ case 0xaa: ++ dev_info(dev, "Device reset requested\n"); ++ break; ++ case 0xfe: ++ dev_info(dev, "Previous command not supported\n"); ++ break; ++ case 0x18: ++ case 0x1b: ++ default: ++ dev_info(dev, "Unknown command 0x%02x 0x%02x\n", ++ cmd, subcmd); ++ break; ++ } ++ break; ++ case 0x9f: ++ switch (subcmd) { ++ case 0x03: ++ dev_info(dev, "Ping\n"); ++ break; ++ case 0x04: ++ dev_info(dev, "Resp to 9f 05 of 0x%02x 0x%02x\n", ++ data1, data2); ++ break; ++ case 0x06: ++ dev_info(dev, "%s carrier mode and freq of 0x%02x 0x%02x\n", ++ inout, data1, data2); ++ break; ++ case 0x07: ++ dev_info(dev, "Get carrier mode and freq\n"); ++ break; ++ case 0x08: ++ dev_info(dev, "%s transmit blaster mask of 0x%02x\n", ++ inout, data1); ++ break; ++ case 0x0c: ++ /* value is in units of 50us, so x*50/100 or x/2 ms */ ++ dev_info(dev, "%s receive timeout of %d ms\n", ++ inout, ((data1 << 8) | data2) / 2); ++ break; ++ case 0x0d: ++ dev_info(dev, "Get receive timeout\n"); ++ break; ++ case 0x13: ++ dev_info(dev, "Get transmit blaster mask\n"); ++ break; ++ case 0x14: ++ dev_info(dev, "%s %s-range receive sensor in use\n", ++ inout, data1 == 0x02 ? "short" : "long"); ++ break; ++ case 0x15: ++ if (len == 2) ++ dev_info(dev, "Get receive sensor\n"); ++ else ++ dev_info(dev, "Received pulse count is %d\n", ++ ((data1 << 8) | data2)); ++ break; ++ case 0xfe: ++ dev_info(dev, "Error! Hardware is likely wedged...\n"); ++ break; ++ case 0x05: ++ case 0x09: ++ case 0x0f: ++ default: ++ dev_info(dev, "Unknown command 0x%02x 0x%02x\n", ++ cmd, subcmd); ++ break; ++ } ++ break; ++ default: ++ break; ++ } +} + +static void usb_async_callback(struct urb *urb, struct pt_regs *regs) @@ -6637,54 +6854,49 @@ index 0000000..8b404e2 + urb->status, len); + + if (debug) -+ mceusb_dev_printdata(ir, urb->transfer_buffer, len); ++ mceusb_dev_printdata(ir, urb->transfer_buffer, len, true); + } + +} + +/* request incoming or send outgoing usb packet - used to initialize remote */ -+static void request_packet_async(struct mceusb_dev *ir, -+ struct usb_endpoint_descriptor *ep, -+ unsigned char *data, int size, int urb_type) ++static void mce_request_packet(struct mceusb_dev *ir, ++ struct usb_endpoint_descriptor *ep, ++ unsigned char *data, int size, int urb_type) +{ + int res; + struct urb *async_urb; + unsigned char *async_buf; + -+ if (urb_type) { ++ if (urb_type == MCEUSB_OUTBOUND) { + async_urb = usb_alloc_urb(0, GFP_KERNEL); -+ if (unlikely(!async_urb)) ++ if (unlikely(!async_urb)) { ++ dev_err(ir->d->dev, "Error, couldn't allocate urb!\n"); + return; ++ } + + async_buf = kzalloc(size, GFP_KERNEL); + if (!async_buf) { ++ dev_err(ir->d->dev, "Error, couldn't allocate buf!\n"); + usb_free_urb(async_urb); + return; + } + -+ if (urb_type == MCEUSB_OUTBOUND) { -+ /* outbound data */ -+ usb_fill_int_urb(async_urb, ir->usbdev, -+ usb_sndintpipe(ir->usbdev, -+ ep->bEndpointAddress), -+ async_buf, size, -+ (usb_complete_t) usb_async_callback, -+ ir, ep->bInterval); -+ memcpy(async_buf, data, size); -+ } else { -+ /* inbound data */ -+ usb_fill_int_urb(async_urb, ir->usbdev, -+ usb_rcvintpipe(ir->usbdev, -+ ep->bEndpointAddress), -+ async_buf, size, -+ (usb_complete_t) usb_async_callback, -+ ir, ep->bInterval); -+ } ++ /* outbound data */ ++ usb_fill_int_urb(async_urb, ir->usbdev, ++ usb_sndintpipe(ir->usbdev, ep->bEndpointAddress), ++ async_buf, size, (usb_complete_t) usb_async_callback, ++ ir, ep->bInterval); ++ memcpy(async_buf, data, size); + -+ } else { ++ } else if (urb_type == MCEUSB_INBOUND) { + /* standard request */ + async_urb = ir->urb_in; + ir->send_flags = RECV_FLAG_IN_PROGRESS; ++ ++ } else { ++ dev_err(ir->d->dev, "Error! Unknown urb type %d\n", urb_type); ++ return; + } + + dev_dbg(ir->d->dev, "receive request called (size=%#x)\n", size); @@ -6700,6 +6912,16 @@ index 0000000..8b404e2 + dev_dbg(ir->d->dev, "receive request complete (res=%d)\n", res); +} + ++static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size) ++{ ++ mce_request_packet(ir, ir->usb_ep_out, data, size, MCEUSB_OUTBOUND); ++} ++ ++static void mce_sync_in(struct mceusb_dev *ir, unsigned char *data, int size) ++{ ++ mce_request_packet(ir, ir->usb_ep_in, data, size, MCEUSB_INBOUND); ++} ++ +static int unregister_from_lirc(struct mceusb_dev *ir) +{ + struct lirc_driver *d = ir->d; @@ -6894,7 +7116,7 @@ index 0000000..8b404e2 + buf_len = urb->actual_length; + + if (debug) -+ mceusb_dev_printdata(ir, urb->transfer_buffer, buf_len); ++ mceusb_dev_printdata(ir, urb->transfer_buffer, buf_len, false); + + if (ir->send_flags == RECV_FLAG_IN_PROGRESS) { + ir->send_flags = SEND_FLAG_COMPLETE; @@ -6992,8 +7214,7 @@ index 0000000..8b404e2 + cmdbuf[cmdcount++] = 0x80; + + /* Transmit the command to the mce device */ -+ request_packet_async(ir, ir->usb_ep_out, cmdbuf, -+ cmdcount, MCEUSB_OUTBOUND); ++ mce_async_out(ir, cmdbuf, cmdcount); + + /* + * The lircd gap calculation expects the write function to @@ -7034,9 +7255,7 @@ index 0000000..8b404e2 + ir->carrier_freq = carrier; + dev_dbg(ir->d->dev, "SET_CARRIER disabling carrier " + "modulation\n"); -+ request_packet_async(ir, ir->usb_ep_out, -+ cmdbuf, sizeof(cmdbuf), -+ MCEUSB_OUTBOUND); ++ mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + return carrier; + } + @@ -7050,9 +7269,7 @@ index 0000000..8b404e2 + "%d Hz\n", carrier); + + /* Transmit new carrier to mce device */ -+ request_packet_async(ir, ir->usb_ep_out, -+ cmdbuf, sizeof(cmdbuf), -+ MCEUSB_OUTBOUND); ++ mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + return carrier; + } + } @@ -7229,8 +7446,9 @@ index 0000000..8b404e2 + int i; + char buf[63], name[128] = ""; + int mem_failure = 0; -+ int is_pinnacle; -+ int is_microsoft_gen1; ++ bool is_gen3; ++ bool is_microsoft_gen1; ++ bool is_pinnacle; + + dev_dbg(&intf->dev, ": %s called\n", __func__); + @@ -7240,10 +7458,12 @@ index 0000000..8b404e2 + + idesc = intf->cur_altsetting; + -+ is_pinnacle = usb_match_id(intf, pinnacle_list) ? 1 : 0; ++ is_gen3 = usb_match_id(intf, gen3_list) ? 1 : 0; + + is_microsoft_gen1 = usb_match_id(intf, microsoft_gen1_list) ? 1 : 0; + ++ is_pinnacle = usb_match_id(intf, pinnacle_list) ? 1 : 0; ++ + /* step through the endpoints to find first bulk in and out endpoint */ + for (i = 0; i < idesc->desc.bNumEndpoints; ++i) { + ep = &idesc->endpoint[i].desc; @@ -7260,13 +7480,14 @@ index 0000000..8b404e2 + "found\n"); + ep_in = ep; + ep_in->bmAttributes = USB_ENDPOINT_XFER_INT; -+ if (is_pinnacle) ++ if (!is_pinnacle) + /* -+ * setting seems to 1 seem to cause issues with -+ * Pinnacle timing out on transfer. ++ * Ideally, we'd use what the device offers up, ++ * but that leads to non-functioning first and ++ * second-gen devices, and many devices have an ++ * invalid bInterval of 0. Pinnacle devices ++ * don't work witha bInterval of 1 though. + */ -+ ep_in->bInterval = ep->bInterval; -+ else + ep_in->bInterval = 1; + } + @@ -7282,13 +7503,14 @@ index 0000000..8b404e2 + "found\n"); + ep_out = ep; + ep_out->bmAttributes = USB_ENDPOINT_XFER_INT; -+ if (is_pinnacle) ++ if (!is_pinnacle) + /* -+ * setting seems to 1 seem to cause issues with -+ * Pinnacle timing out on transfer. ++ * Ideally, we'd use what the device offers up, ++ * but that leads to non-functioning first and ++ * second-gen devices, and many devices have an ++ * invalid bInterval of 0. Pinnacle devices ++ * don't work witha bInterval of 1 though. + */ -+ ep_out->bInterval = ep->bInterval; -+ else + ep_out->bInterval = 1; + } + } @@ -7354,7 +7576,7 @@ index 0000000..8b404e2 + ir->len_in = maxp; + ir->overflow_len = 0; + ir->flags.connected = 0; -+ ir->flags.pinnacle = is_pinnacle; ++ ir->flags.gen3 = is_gen3; + ir->flags.microsoft_gen1 = is_microsoft_gen1; + ir->flags.transmitter_mask_inverted = + usb_match_id(intf, transmitter_mask_list) ? 0 : 1; @@ -7384,8 +7606,7 @@ index 0000000..8b404e2 + ir->urb_in->transfer_dma = ir->dma_in; + ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + -+ /* initialize device */ -+ if (ir->flags.pinnacle) { ++ if (is_pinnacle) { + int usbret; + + /* @@ -7396,46 +7617,64 @@ index 0000000..8b404e2 + * interpreted by the device and the host never does the + * completion routine + */ -+ + usbret = usb_reset_configuration(dev); + dev_info(ir->d->dev, "usb reset config ret %x\n", usbret); ++ } ++ ++ /* initialize device */ ++ if (ir->flags.gen3) { ++ mce_sync_in(ir, NULL, maxp); ++ ++ /* device reset */ ++ mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET)); ++ mce_sync_in(ir, NULL, maxp); ++ ++ /* get the carrier and frequency */ ++ mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ)); ++ mce_sync_in(ir, NULL, maxp); ++ ++ /* get the transmitter bitmask */ ++ mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK)); ++ mce_sync_in(ir, NULL, maxp); ++ ++ /* get receiver timeout value */ ++ mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT)); ++ mce_sync_in(ir, NULL, maxp); ++ ++ /* get receiver sensor setting */ ++ mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR)); ++ mce_sync_in(ir, NULL, maxp); + -+ /* -+ * its possible we really should wait for a return -+ * for each of these... -+ */ -+ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND); -+ request_packet_async(ir, ep_out, pin_init1, sizeof(pin_init1), -+ MCEUSB_OUTBOUND); -+ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND); -+ request_packet_async(ir, ep_out, pin_init2, sizeof(pin_init2), -+ MCEUSB_OUTBOUND); -+ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND); -+ request_packet_async(ir, ep_out, pin_init3, sizeof(pin_init3), -+ MCEUSB_OUTBOUND); + } else if (ir->flags.microsoft_gen1) { + /* original ms mce device requires some additional setup */ + mceusb_gen1_init(ir); ++ + } else { ++ mce_sync_in(ir, NULL, maxp); ++ mce_sync_in(ir, NULL, maxp); ++ ++ /* device reset */ ++ mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET)); ++ mce_sync_in(ir, NULL, maxp); + -+ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND); -+ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND); -+ request_packet_async(ir, ep_out, init1, -+ sizeof(init1), MCEUSB_OUTBOUND); -+ request_packet_async(ir, ep_in, NULL, maxp, MCEUSB_INBOUND); -+ request_packet_async(ir, ep_out, init2, -+ sizeof(init2), MCEUSB_OUTBOUND); ++ /* get hw/sw revision? */ ++ mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION)); ++ mce_sync_in(ir, NULL, maxp); ++ ++ /* unknown what this actually returns... */ ++ mce_async_out(ir, GET_UNKNOWN, sizeof(GET_UNKNOWN)); ++ mce_sync_in(ir, NULL, maxp); + } + + /* -+ * if we don't issue the correct number of receives (MCEUSB_INBOUND) ++ * if we don't issue the correct number of receives (mce_sync_in()) + * for each outbound, then the first few ir pulses will be interpreted + * by the usb_async_callback routine - we should ensure we have the + * right amount OR less - as the mceusb_dev_recv routine will handle + * the control packets OK - they start with 0x9f - but the async + * callback doesn't handle ir pulse packets + */ -+ request_packet_async(ir, ep_in, NULL, maxp, 0); ++ mce_sync_in(ir, NULL, maxp); + + usb_set_intfdata(intf, ir); + @@ -11844,10 +12083,10 @@ index 0000000..4a471d6 +MODULE_PARM_DESC(debug, "Enable debugging messages"); diff --git a/drivers/input/lirc/lirc_streamzap.c b/drivers/input/lirc/lirc_streamzap.c new file mode 100644 -index 0000000..f4374e8 +index 0000000..87db864 --- /dev/null +++ b/drivers/input/lirc/lirc_streamzap.c -@@ -0,0 +1,794 @@ +@@ -0,0 +1,821 @@ +/* + * Streamzap Remote Control driver + * @@ -11920,6 +12159,7 @@ index 0000000..f4374e8 + +#define STREAMZAP_PULSE_MASK 0xf0 +#define STREAMZAP_SPACE_MASK 0x0f ++#define STREAMZAP_TIMEOUT 0xff +#define STREAMZAP_RESOLUTION 256 + +/* number of samples buffered */ @@ -11990,6 +12230,7 @@ index 0000000..f4374e8 + struct timer_list flush_timer; + int flush; + int in_use; ++ int timeout_enabled; +}; + + @@ -12138,12 +12379,14 @@ index 0000000..f4374e8 + + deltv = sz->signal_start.tv_sec-sz->signal_last.tv_sec; + if (deltv > 15) { -+ tmp = PULSE_MASK; /* really long time */ ++ /* really long time */ ++ tmp = LIRC_SPACE(LIRC_VALUE_MASK); + } else { + tmp = (int) (deltv*1000000+ + sz->signal_start.tv_usec - + sz->signal_last.tv_usec); + tmp -= sz->sum; ++ tmp = LIRC_SPACE(tmp); + } + dprintk("ls %u", sz->driver->minor, tmp); + push(sz, (char *)&tmp); @@ -12155,7 +12398,7 @@ index 0000000..f4374e8 + pulse = ((int) value) * STREAMZAP_RESOLUTION; + pulse += STREAMZAP_RESOLUTION / 2; + sz->sum += pulse; -+ pulse |= PULSE_BIT; ++ pulse = LIRC_PULSE(pulse); + + dprintk("p %u", sz->driver->minor, pulse & PULSE_MASK); + push(sz, (char *)&pulse); @@ -12175,6 +12418,7 @@ index 0000000..f4374e8 + space = ((int) value)*STREAMZAP_RESOLUTION; + space += STREAMZAP_RESOLUTION/2; + sz->sum += space; ++ space = LIRC_SPACE(space); + dprintk("s %u", sz->driver->minor, space); + push(sz, (char *)&space); +} @@ -12243,9 +12487,16 @@ index 0000000..f4374e8 + sz->decoder_state = IgnorePulse; + break; + case FullSpace: -+ if (sz->buf_in[i] == 0xff) { ++ if (sz->buf_in[i] == STREAMZAP_TIMEOUT) { + sz->idle = 1; + stop_timer(sz); ++ if (sz->timeout_enabled) { ++ int timeout = ++ LIRC_TIMEOUT ++ (STREAMZAP_TIMEOUT * ++ STREAMZAP_RESOLUTION); ++ push(sz, (char *)&timeout); ++ } + flush_delay_buffer(sz); + } else + push_full_space(sz, sz->buf_in[i]); @@ -12375,8 +12626,12 @@ index 0000000..f4374e8 + sz->driver->minor = -1; + sz->driver->sample_rate = 0; + sz->driver->code_length = sizeof(int) * 8; -+ sz->driver->features = LIRC_CAN_REC_MODE2 | LIRC_CAN_GET_REC_RESOLUTION; ++ sz->driver->features = LIRC_CAN_REC_MODE2 | ++ LIRC_CAN_GET_REC_RESOLUTION | ++ LIRC_CAN_SET_REC_TIMEOUT; + sz->driver->data = sz; ++ sz->driver->min_timeout = STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION; ++ sz->driver->max_timeout = STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION; + sz->driver->rbuf = lirc_buf; + sz->delay_buf = delay_buf; + sz->driver->set_use_inc = &streamzap_use_inc; @@ -12507,18 +12762,29 @@ index 0000000..f4374e8 +static int streamzap_ioctl(struct inode *node, struct file *filep, + unsigned int cmd, unsigned long arg) +{ -+ int result; ++ int result = 0; ++ int val; ++ struct usb_streamzap *sz = lirc_get_pdata(filep); + + switch (cmd) { + case LIRC_GET_REC_RESOLUTION: + result = put_user(STREAMZAP_RESOLUTION, (unsigned int *) arg); -+ if (result) -+ return result; ++ break; ++ case LIRC_SET_REC_TIMEOUT: ++ result = get_user(val, (int *)arg); ++ if (result == 0) { ++ if (val == STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION) ++ sz->timeout_enabled = 1; ++ else if (val == 0) ++ sz->timeout_enabled = 0; ++ else ++ result = -EINVAL; ++ } + break; + default: + return lirc_dev_fop_ioctl(node, filep, cmd, arg); + } -+ return 0; ++ return result; +} + +/** @@ -13047,10 +13313,10 @@ index 0000000..b0a4e8b +module_exit(ttusbir_exit_module); diff --git a/drivers/input/lirc/lirc_zilog.c b/drivers/input/lirc/lirc_zilog.c new file mode 100644 -index 0000000..3a5bc34 +index 0000000..9f73430 --- /dev/null +++ b/drivers/input/lirc/lirc_zilog.c -@@ -0,0 +1,1396 @@ +@@ -0,0 +1,1388 @@ +/* + * i2c IR lirc driver for devices with zilog IR processors + * @@ -14251,14 +14517,6 @@ index 0000000..3a5bc34 + dprintk("%s: adapter id=0x%x, client addr=0x%02x\n", + __func__, adap->id, client->addr); + -+ /* if this isn't an appropriate device, bail w/-ENODEV now */ -+ if (!(adap->id == I2C_HW_B_BT848 || -+#ifdef I2C_HW_B_HDPVR -+ adap->id == I2C_HW_B_HDPVR || -+#endif -+ adap->id == I2C_HW_B_CX2341X)) -+ goto out_nodev; -+ + /* + * The external IR receiver is at i2c address 0x71. + * The IR transmitter is at 0x70. @@ -14314,7 +14572,7 @@ index 0000000..3a5bc34 + memcpy(&ir->c_rx, client, sizeof(struct i2c_client)); + + ir->c_rx.addr = 0x71; -+ strncpy(ir->c_rx.name, ZILOG_HAUPPAUGE_IR_RX_NAME, ++ strlcpy(ir->c_rx.name, ZILOG_HAUPPAUGE_IR_RX_NAME, + I2C_NAME_SIZE); + + /* try to fire up polling thread */ @@ -14335,7 +14593,7 @@ index 0000000..3a5bc34 + if (have_tx) { + memcpy(&ir->c_tx, client, sizeof(struct i2c_client)); + ir->c_tx.addr = 0x70; -+ strncpy(ir->c_tx.name, ZILOG_HAUPPAUGE_IR_TX_NAME, ++ strlcpy(ir->c_tx.name, ZILOG_HAUPPAUGE_IR_TX_NAME, + I2C_NAME_SIZE); + ir->have_tx = 1; + } @@ -14448,10 +14706,10 @@ index 0000000..3a5bc34 +module_param(disable_tx, bool, 0644); +MODULE_PARM_DESC(disable_tx, "Disable the IR transmitter device"); diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig -index 16ec523..1196110 100644 +index 23140a3..7085225 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig -@@ -319,4 +319,16 @@ config INPUT_PCAP +@@ -340,4 +340,16 @@ config INPUT_PCAP To compile this driver as a module, choose M here: the module will be called pcap_keys. @@ -14469,10 +14727,10 @@ index 16ec523..1196110 100644 + endif diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile -index a8b8485..79358ff 100644 +index 7e95a5d..8918ce7 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile -@@ -13,6 +13,7 @@ obj-$(CONFIG_INPUT_CM109) += cm109.o +@@ -14,6 +14,7 @@ obj-$(CONFIG_INPUT_CM109) += cm109.o obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o @@ -14482,10 +14740,10 @@ index a8b8485..79358ff 100644 obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o diff --git a/drivers/input/misc/imon.c b/drivers/input/misc/imon.c new file mode 100644 -index 0000000..71223e2 +index 0000000..58a2130 --- /dev/null +++ b/drivers/input/misc/imon.c -@@ -0,0 +1,2430 @@ +@@ -0,0 +1,2523 @@ +/* + * imon.c: input and display driver for SoundGraph iMON IR/VFD/LCD + * @@ -14770,7 +15028,11 @@ index 0000000..71223e2 + .minor_base = DISPLAY_MINOR_BASE, +}; + -+/* standard imon remote key table */ ++/* ++ * standard imon remote key table, which isn't really entirely ++ * "standard", as different receivers decode the same key on the ++ * same remote to different hex codes... ugh. ++ */ +static const struct key_entry imon_remote_key_table[] = { + /* keys sorted mostly by frequency of use to optimize lookups */ + { KE_KEY, 0x2a8195b7, { KEY_REWIND } }, @@ -14797,10 +15059,14 @@ index 0000000..71223e2 + { KE_KEY, 0x2ba515b7, { KEY_RIGHT } }, + + { KE_KEY, 0x0200002c, { KEY_SPACE } }, /* Select/Space */ ++ { KE_KEY, 0x2a9315b7, { KEY_SPACE } }, /* Select/Space */ + { KE_KEY, 0x02000028, { KEY_ENTER } }, ++ { KE_KEY, 0x28a195b7, { KEY_ENTER } }, + { KE_KEY, 0x288195b7, { KEY_EXIT } }, + { KE_KEY, 0x02000029, { KEY_ESC } }, ++ { KE_KEY, 0x2bb715b7, { KEY_ESC } }, + { KE_KEY, 0x0200002a, { KEY_BACKSPACE } }, ++ { KE_KEY, 0x28a115b7, { KEY_BACKSPACE } }, + + { KE_KEY, 0x2b9595b7, { KEY_MUTE } }, + { KE_KEY, 0x28a395b7, { KEY_VOLUMEUP } }, @@ -14819,8 +15085,21 @@ index 0000000..71223e2 + { KE_KEY, 0x02000026, { KEY_NUMERIC_9 } }, + { KE_KEY, 0x02000027, { KEY_NUMERIC_0 } }, + ++ { KE_KEY, 0x28b595b7, { KEY_NUMERIC_1 } }, ++ { KE_KEY, 0x2bb195b7, { KEY_NUMERIC_2 } }, ++ { KE_KEY, 0x28b195b7, { KEY_NUMERIC_3 } }, ++ { KE_KEY, 0x2a8595b7, { KEY_NUMERIC_4 } }, ++ { KE_KEY, 0x299595b7, { KEY_NUMERIC_5 } }, ++ { KE_KEY, 0x2aa595b7, { KEY_NUMERIC_6 } }, ++ { KE_KEY, 0x2b9395b7, { KEY_NUMERIC_7 } }, ++ { KE_KEY, 0x2a8515b7, { KEY_NUMERIC_8 } }, ++ { KE_KEY, 0x2aa115b7, { KEY_NUMERIC_9 } }, ++ { KE_KEY, 0x2ba595b7, { KEY_NUMERIC_0 } }, ++ + { KE_KEY, 0x02200025, { KEY_NUMERIC_STAR } }, ++ { KE_KEY, 0x28b515b7, { KEY_NUMERIC_STAR } }, + { KE_KEY, 0x02200020, { KEY_NUMERIC_POUND } }, ++ { KE_KEY, 0x29a115b7, { KEY_NUMERIC_POUND } }, + + { KE_KEY, 0x2b8515b7, { KEY_VIDEO } }, + { KE_KEY, 0x299195b7, { KEY_AUDIO } }, @@ -14848,6 +15127,8 @@ index 0000000..71223e2 + { KE_KEY, 0x01020000, { BTN_RIGHT } }, + { KE_KEY, 0x01010080, { BTN_LEFT } }, + { KE_KEY, 0x01020080, { BTN_RIGHT } }, ++ { KE_KEY, 0x688301b7, { BTN_LEFT } }, ++ { KE_KEY, 0x688481b7, { BTN_RIGHT } }, + + { KE_KEY, 0x2a9395b7, { KEY_CYCLEWINDOWS } }, /* TaskSwitcher */ + { KE_KEY, 0x2b8395b7, { KEY_TIME } }, /* Timer */ @@ -14856,9 +15137,11 @@ index 0000000..71223e2 + { KE_KEY, 0x29b195b7, { KEY_EJECTCD } }, /* the one next to play */ + { KE_KEY, 0x299395b7, { KEY_EJECTCLOSECD } }, /* eject (by TaskSw) */ + -+ { KE_KEY, 0x02800000, { KEY_MENU } }, /* Left Menu */ ++ { KE_KEY, 0x02800000, { KEY_CONTEXT_MENU } }, /* Left Menu */ ++ { KE_KEY, 0x2b8195b7, { KEY_CONTEXT_MENU } }, /* Left Menu*/ + { KE_KEY, 0x02000065, { KEY_COMPOSE } }, /* RightMenu */ -+ { KE_KEY, 0x2ab195b7, { KEY_PROG1 } }, /* Go */ ++ { KE_KEY, 0x28b715b7, { KEY_COMPOSE } }, /* RightMenu */ ++ { KE_KEY, 0x2ab195b7, { KEY_PROG1 } }, /* Go or MultiMon */ + { KE_KEY, 0x29b715b7, { KEY_DASHBOARD } }, /* AppLauncher */ + { KE_END, 0 } +}; @@ -14866,33 +15149,43 @@ index 0000000..71223e2 +/* mce-mode imon mce remote key table */ +static const struct key_entry imon_mce_key_table[] = { + /* keys sorted mostly by frequency of use to optimize lookups */ -+ { KE_KEY, 0x800f8415, { KEY_REWIND } }, -+ { KE_KEY, 0x800f8414, { KEY_FASTFORWARD } }, -+ { KE_KEY, 0x800f841b, { KEY_PREVIOUS } }, -+ { KE_KEY, 0x800f841a, { KEY_NEXT } }, ++ { KE_KEY, 0x800ff415, { KEY_REWIND } }, ++ { KE_KEY, 0x800ff414, { KEY_FASTFORWARD } }, ++ { KE_KEY, 0x800ff41b, { KEY_PREVIOUS } }, ++ { KE_KEY, 0x800ff41a, { KEY_NEXT } }, + -+ { KE_KEY, 0x800f8416, { KEY_PLAY } }, -+ { KE_KEY, 0x800f8418, { KEY_PAUSE } }, -+ { KE_KEY, 0x800f8418, { KEY_PAUSE } }, -+ { KE_KEY, 0x800f8419, { KEY_STOP } }, -+ { KE_KEY, 0x800f8417, { KEY_RECORD } }, ++ { KE_KEY, 0x800ff416, { KEY_PLAY } }, ++ { KE_KEY, 0x800ff418, { KEY_PAUSE } }, ++ { KE_KEY, 0x800ff419, { KEY_STOP } }, ++ { KE_KEY, 0x800ff417, { KEY_RECORD } }, + + { KE_KEY, 0x02000052, { KEY_UP } }, + { KE_KEY, 0x02000051, { KEY_DOWN } }, + { KE_KEY, 0x02000050, { KEY_LEFT } }, + { KE_KEY, 0x0200004f, { KEY_RIGHT } }, + ++ { KE_KEY, 0x800ff41e, { KEY_UP } }, ++ { KE_KEY, 0x800ff41f, { KEY_DOWN } }, ++ { KE_KEY, 0x800ff420, { KEY_LEFT } }, ++ { KE_KEY, 0x800ff421, { KEY_RIGHT } }, ++ ++ /* 0x800ff40b also KEY_NUMERIC_POUND on some receivers */ ++ { KE_KEY, 0x800ff40b, { KEY_ENTER } }, + { KE_KEY, 0x02000028, { KEY_ENTER } }, -+/* the OK and Enter buttons decode to the same value ++/* the OK and Enter buttons decode to the same value on some remotes + { KE_KEY, 0x02000028, { KEY_OK } }, */ ++ { KE_KEY, 0x800ff422, { KEY_OK } }, + { KE_KEY, 0x0200002a, { KEY_EXIT } }, ++ { KE_KEY, 0x800ff423, { KEY_EXIT } }, + { KE_KEY, 0x02000029, { KEY_DELETE } }, ++ /* 0x800ff40a also KEY_NUMERIC_STAR on some receivers */ ++ { KE_KEY, 0x800ff40a, { KEY_DELETE } }, + -+ { KE_KEY, 0x800f840e, { KEY_MUTE } }, -+ { KE_KEY, 0x800f8410, { KEY_VOLUMEUP } }, -+ { KE_KEY, 0x800f8411, { KEY_VOLUMEDOWN } }, -+ { KE_KEY, 0x800f8412, { KEY_CHANNELUP } }, -+ { KE_KEY, 0x800f8413, { KEY_CHANNELDOWN } }, ++ { KE_KEY, 0x800ff40e, { KEY_MUTE } }, ++ { KE_KEY, 0x800ff410, { KEY_VOLUMEUP } }, ++ { KE_KEY, 0x800ff411, { KEY_VOLUMEDOWN } }, ++ { KE_KEY, 0x800ff412, { KEY_CHANNELUP } }, ++ { KE_KEY, 0x800ff413, { KEY_CHANNELDOWN } }, + + { KE_KEY, 0x0200001e, { KEY_NUMERIC_1 } }, + { KE_KEY, 0x0200001f, { KEY_NUMERIC_2 } }, @@ -14905,28 +15198,55 @@ index 0000000..71223e2 + { KE_KEY, 0x02000026, { KEY_NUMERIC_9 } }, + { KE_KEY, 0x02000027, { KEY_NUMERIC_0 } }, + ++ { KE_KEY, 0x800ff401, { KEY_NUMERIC_1 } }, ++ { KE_KEY, 0x800ff402, { KEY_NUMERIC_2 } }, ++ { KE_KEY, 0x800ff403, { KEY_NUMERIC_3 } }, ++ { KE_KEY, 0x800ff404, { KEY_NUMERIC_4 } }, ++ { KE_KEY, 0x800ff405, { KEY_NUMERIC_5 } }, ++ { KE_KEY, 0x800ff406, { KEY_NUMERIC_6 } }, ++ { KE_KEY, 0x800ff407, { KEY_NUMERIC_7 } }, ++ { KE_KEY, 0x800ff408, { KEY_NUMERIC_8 } }, ++ { KE_KEY, 0x800ff409, { KEY_NUMERIC_9 } }, ++ { KE_KEY, 0x800ff400, { KEY_NUMERIC_0 } }, ++ + { KE_KEY, 0x02200025, { KEY_NUMERIC_STAR } }, + { KE_KEY, 0x02200020, { KEY_NUMERIC_POUND } }, -+ -+ { KE_KEY, 0x800f8446, { KEY_TV } }, -+ { KE_KEY, 0x800f8447, { KEY_AUDIO } }, -+ { KE_KEY, 0x800f8448, { KEY_PVR } }, /* RecordedTV */ -+ { KE_KEY, 0x800f8449, { KEY_CAMERA } }, -+ { KE_KEY, 0x800f844a, { KEY_VIDEO } }, -+ { KE_KEY, 0x800f8424, { KEY_DVD } }, -+ { KE_KEY, 0x800f8425, { KEY_TUNER } }, /* LiveTV */ -+ -+ { KE_KEY, 0x800f845b, { KEY_RED } }, -+ { KE_KEY, 0x800f845c, { KEY_GREEN } }, -+ { KE_KEY, 0x800f845d, { KEY_YELLOW } }, -+ { KE_KEY, 0x800f845e, { KEY_BLUE } }, -+ -+ { KE_KEY, 0x800f840f, { KEY_INFO } }, -+ { KE_KEY, 0x800f8426, { KEY_EPG } }, /* Guide */ -+ { KE_KEY, 0x800f845a, { KEY_SUBTITLE } }, /* Caption */ -+ -+ { KE_KEY, 0x800f840c, { KEY_POWER } }, -+ { KE_KEY, 0x800f840d, { KEY_PROG1 } }, /* Windows MCE button */ ++ /* 0x800ff41d also KEY_BLUE on some receivers */ ++ { KE_KEY, 0x800ff41d, { KEY_NUMERIC_STAR } }, ++ /* 0x800ff41c also KEY_PREVIOUS on some receivers */ ++ { KE_KEY, 0x800ff41c, { KEY_NUMERIC_POUND } }, ++ ++ { KE_KEY, 0x800ff446, { KEY_TV } }, ++ { KE_KEY, 0x800ff447, { KEY_AUDIO } }, /* My Music */ ++ { KE_KEY, 0x800ff448, { KEY_PVR } }, /* RecordedTV */ ++ { KE_KEY, 0x800ff449, { KEY_CAMERA } }, ++ { KE_KEY, 0x800ff44a, { KEY_VIDEO } }, ++ /* 0x800ff424 also KEY_MENU on some receivers */ ++ { KE_KEY, 0x800ff424, { KEY_DVD } }, ++ /* 0x800ff425 also KEY_GREEN on some receivers */ ++ { KE_KEY, 0x800ff425, { KEY_TUNER } }, /* LiveTV */ ++ { KE_KEY, 0x800ff450, { KEY_RADIO } }, ++ ++ { KE_KEY, 0x800ff44c, { KEY_LANGUAGE } }, ++ { KE_KEY, 0x800ff427, { KEY_ZOOM } }, /* Aspect */ ++ ++ { KE_KEY, 0x800ff45b, { KEY_RED } }, ++ { KE_KEY, 0x800ff45c, { KEY_GREEN } }, ++ { KE_KEY, 0x800ff45d, { KEY_YELLOW } }, ++ { KE_KEY, 0x800ff45e, { KEY_BLUE } }, ++ ++ { KE_KEY, 0x800ff466, { KEY_RED } }, ++ /* { KE_KEY, 0x800ff425, { KEY_GREEN } }, */ ++ { KE_KEY, 0x800ff468, { KEY_YELLOW } }, ++ /* { KE_KEY, 0x800ff41d, { KEY_BLUE } }, */ ++ ++ { KE_KEY, 0x800ff40f, { KEY_INFO } }, ++ { KE_KEY, 0x800ff426, { KEY_EPG } }, /* Guide */ ++ { KE_KEY, 0x800ff45a, { KEY_SUBTITLE } }, /* Caption/Teletext */ ++ { KE_KEY, 0x800ff44d, { KEY_TITLE } }, ++ ++ { KE_KEY, 0x800ff40c, { KEY_POWER } }, ++ { KE_KEY, 0x800ff40d, { KEY_PROG1 } }, /* Windows MCE button */ + { KE_END, 0 } + +}; @@ -14936,25 +15256,28 @@ index 0000000..71223e2 + u64 hw_code; + u16 keycode; +} imon_panel_key_table[] = { -+ { 0x000000000f000fee, KEY_PROG1 }, /* Go */ -+ { 0x000000001f000fee, KEY_AUDIO }, -+ { 0x0000000020000fee, KEY_VIDEO }, -+ { 0x0000000021000fee, KEY_CAMERA }, -+ { 0x0000000027000fee, KEY_DVD }, -+/* the TV key on my panel is broken, doesn't work under any OS -+ { 0x0000000000000fee, KEY_TV }, */ -+ { 0x0000000005000fee, KEY_PREVIOUS }, -+ { 0x0000000007000fee, KEY_REWIND }, -+ { 0x0000000004000fee, KEY_STOP }, -+ { 0x000000003c000fee, KEY_PLAYPAUSE }, -+ { 0x0000000008000fee, KEY_FASTFORWARD }, -+ { 0x0000000006000fee, KEY_NEXT }, -+ { 0x0000000100000fee, KEY_RIGHT }, -+ { 0x0000010000000fee, KEY_LEFT }, -+ { 0x000000003d000fee, KEY_SELECT }, -+ { 0x0001000000000fee, KEY_VOLUMEUP }, -+ { 0x0100000000000fee, KEY_VOLUMEDOWN }, -+ { 0x0000000001000fee, KEY_MUTE }, ++ { 0x000000000f00ffee, KEY_PROG1 }, /* Go */ ++ { 0x000000001f00ffee, KEY_AUDIO }, ++ { 0x000000002000ffee, KEY_VIDEO }, ++ { 0x000000002100ffee, KEY_CAMERA }, ++ { 0x000000002700ffee, KEY_DVD }, ++ { 0x000000002300ffee, KEY_TV }, ++ { 0x000000000500ffee, KEY_PREVIOUS }, ++ { 0x000000000700ffee, KEY_REWIND }, ++ { 0x000000000400ffee, KEY_STOP }, ++ { 0x000000003c00ffee, KEY_PLAYPAUSE }, ++ { 0x000000000800ffee, KEY_FASTFORWARD }, ++ { 0x000000000600ffee, KEY_NEXT }, ++ { 0x000000010000ffee, KEY_RIGHT }, ++ { 0x000001000000ffee, KEY_LEFT }, ++ { 0x000000003d00ffee, KEY_SELECT }, ++ { 0x000100000000ffee, KEY_VOLUMEUP }, ++ { 0x010000000000ffee, KEY_VOLUMEDOWN }, ++ { 0x000000000100ffee, KEY_MUTE }, ++ /* iMON Knob values */ ++ { 0x000100ffffffffee, KEY_VOLUMEUP }, ++ { 0x010000ffffffffee, KEY_VOLUMEDOWN }, ++ { 0x000008ffffffffee, KEY_MUTE }, +}; + +/* to prevent races between open() and disconnect(), probing, etc */ @@ -15612,6 +15935,33 @@ index 0000000..71223e2 +} + +/** ++ * mce/rc6 keypresses have no distinct release code, use timer ++ */ ++static void imon_mce_timeout(unsigned long data) ++{ ++ struct imon_context *ictx = (struct imon_context *)data; ++ ++ input_report_key(ictx->idev, ictx->last_keycode, 0); ++ input_sync(ictx->idev); ++} ++ ++/** ++ * report touchscreen input ++ */ ++static void imon_touch_display_timeout(unsigned long data) ++{ ++ struct imon_context *ictx = (struct imon_context *)data; ++ ++ if (!ictx->display_type == IMON_DISPLAY_TYPE_VGA) ++ return; ++ ++ input_report_abs(ictx->touch, ABS_X, ictx->touch_x); ++ input_report_abs(ictx->touch, ABS_Y, ictx->touch_y); ++ input_report_key(ictx->touch, BTN_TOUCH, 0x00); ++ input_sync(ictx->touch); ++} ++ ++/** + * iMON IR receivers support two different signal sets -- those used by + * the iMON remotes, and those used by the Windows MCE remotes (which is + * really just RC-6), but only one or the other at a time, as the signals @@ -15630,6 +15980,9 @@ index 0000000..71223e2 + ir_proto_packet[0] = 0x01; + ictx->ir_protocol = IMON_IR_PROTOCOL_MCE; + ictx->pad_mouse = 0; ++ init_timer(&ictx->itimer); ++ ictx->itimer.data = (unsigned long)ictx; ++ ictx->itimer.function = imon_mce_timeout; + break; + case IMON_IR_PROTOCOL_IMON: + dev_dbg(dev, "Configuring IR receiver for iMON protocol\n"); @@ -15784,12 +16137,22 @@ index 0000000..71223e2 + int i; + u32 code = be32_to_cpu(hw_code); + -+ for (i = 0; i < ARRAY_SIZE(imon_mce_key_table); i++) -+ if (imon_mce_key_table[i].code == code) -+ return i; ++#define MCE_KEY_MASK 0x7000 ++#define MCE_TOGGLE_BIT 0x8000 ++ ++ /* ++ * On some receivers, mce keys decode to 0x8000f04xx and 0x8000f84xx ++ * (the toggle bit flipping between alternating key presses), while ++ * on other receivers, we see 0x8000f74xx and 0x8000ff4xx. To keep ++ * the table trim, we always or in the bits to look up 0x8000ff4xx, ++ * but we can't or them into all codes, as some keys are decoded in ++ * a different way w/o the same use of the toggle bit... ++ */ ++ if ((code >> 24) & 0x80) ++ code = code | MCE_KEY_MASK | MCE_TOGGLE_BIT; + + for (i = 0; i < ARRAY_SIZE(imon_mce_key_table); i++) -+ if (imon_mce_key_table[i].code == (code | 0x8000)) ++ if (imon_mce_key_table[i].code == code) + return i; + + return -1; @@ -15801,7 +16164,7 @@ index 0000000..71223e2 + u64 code = be64_to_cpu(hw_code); + + for (i = 0; i < ARRAY_SIZE(imon_panel_key_table); i++) -+ if (imon_panel_key_table[i].hw_code == (code | 0xfee)) ++ if (imon_panel_key_table[i].hw_code == (code | 0xffee)) + return i; + + return -1; @@ -15999,6 +16362,16 @@ index 0000000..71223e2 + if (ictx->ki == -1 && buf[0] == 0x02 && buf[3] == 0x00) + ictx->kc = ictx->last_keycode; + ++ /* mouse button release on (some) 0xffdc devices */ ++ else if (ictx->ki == -1 && buf[0] == 0x68 && buf[1] == 0x82 && ++ buf[2] == 0x81 && buf[3] == 0xb7) ++ ictx->kc = ictx->last_keycode; ++ ++ /* mouse button release on (some other) 0xffdc devices */ ++ else if (ictx->ki == -1 && buf[0] == 0x01 && buf[1] == 0x00 && ++ buf[2] == 0x81 && buf[3] == 0xb7) ++ ictx->kc = ictx->last_keycode; ++ + /* mce-specific button handling */ + else if (ksrc == IMON_BUTTON_MCE) { + /* initial press */ @@ -16046,7 +16419,7 @@ index 0000000..71223e2 + int offset = IMON_KEY_RELEASE_OFFSET; + u64 temp_key; + u64 panel_key = 0; -+ u32 remote_key; ++ u32 remote_key = 0; + struct input_dev *idev = NULL; + int press_type = 0; + int msec; @@ -16056,38 +16429,51 @@ index 0000000..71223e2 + + idev = ictx->idev; + ++ /* filter out junk data on the older 0xffdc imon devices */ ++ if ((buf[0] == 0xff) && (buf[7] == 0xff)) ++ return; ++ + /* Figure out what key was pressed */ + memcpy(&temp_key, buf, sizeof(temp_key)); + if (len == 8 && buf[7] == 0xee) { + ksrc = IMON_BUTTON_PANEL; + panel_key = le64_to_cpu(temp_key); + ki = imon_panel_key_lookup(panel_key); -+ kc = imon_panel_key_table[ki].keycode; ++ if (ki < 0) ++ kc = KEY_UNKNOWN; ++ else ++ kc = imon_panel_key_table[ki].keycode; + } else { + remote_key = (u32) (le64_to_cpu(temp_key) & 0xffffffff); + if (ictx->ir_protocol == IMON_IR_PROTOCOL_MCE) { + if (buf[0] == 0x80) + ksrc = IMON_BUTTON_MCE; + ki = imon_mce_key_lookup(remote_key); -+ kc = imon_mce_key_table[ki].keycode; ++ if (ki < 0) ++ kc = KEY_UNKNOWN; ++ else ++ kc = imon_mce_key_table[ki].keycode; + } else { + ki = imon_remote_key_lookup(remote_key); -+ kc = imon_remote_key_table[ki % offset].keycode; ++ if (ki < 0) ++ kc = KEY_UNKNOWN; ++ else ++ kc = imon_remote_key_table[ki % offset].keycode; + } + } + + /* keyboard/mouse mode toggle button */ + if (kc == KEY_KEYBOARD && ki < offset) { ++ ictx->last_keycode = kc; + if (!nomouse) { + ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1; + dev_dbg(dev, "toggling to %s mode\n", + ictx->pad_mouse ? "mouse" : "keyboard"); ++ return; + } else { + ictx->pad_mouse = 0; -+ dev_dbg(dev, "mouse mode was disabled by modparam\n"); ++ dev_dbg(dev, "mouse mode disabled, passing key value\n"); + } -+ ictx->last_keycode = kc; -+ return; + } + + ictx->ki = ki; @@ -16124,6 +16510,9 @@ index 0000000..71223e2 + if (press_type < 0) + goto not_input_data; + ++ if (ictx->kc == KEY_UNKNOWN) ++ goto unknown_key; ++ + /* KEY_MUTE repeats from MCE and knob need to be suppressed */ + if ((ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode) + && (buf[7] == 0xee || ksrc == IMON_BUTTON_MCE)) { @@ -16147,6 +16536,12 @@ index 0000000..71223e2 + + return; + ++unknown_key: ++ dev_info(dev, "%s: unknown keypress, code 0x%llx\n", __func__, ++ (panel_key ? be64_to_cpu(panel_key) : ++ be32_to_cpu(remote_key))); ++ return; ++ +not_input_data: + if (len != 8) { + dev_warn(dev, "imon %s: invalid incoming packet " @@ -16169,40 +16564,11 @@ index 0000000..71223e2 +} + +/** -+ * mce/rc6 keypresses have no distinct release code, use timer -+ */ -+static void imon_mce_timeout(unsigned long data) -+{ -+ struct imon_context *ictx = (struct imon_context *)data; -+ -+ input_report_key(ictx->idev, ictx->last_keycode, 0); -+ input_sync(ictx->idev); -+} -+ -+/** -+ * report touchscreen input -+ */ -+static void imon_touch_display_timeout(unsigned long data) -+{ -+ struct imon_context *ictx = (struct imon_context *)data; -+ -+ if (!ictx->display_type == IMON_DISPLAY_TYPE_VGA) -+ return; -+ -+ input_report_abs(ictx->touch, ABS_X, ictx->touch_x); -+ input_report_abs(ictx->touch, ABS_Y, ictx->touch_y); -+ input_report_key(ictx->touch, BTN_TOUCH, 0x00); -+ input_sync(ictx->touch); -+} -+ -+/** + * Callback function for USB core API: receive data + */ +static void usb_rx_callback_intf0(struct urb *urb) +{ + struct imon_context *ictx; -+ unsigned char *buf; -+ int len; + int intfnum = 0; + + if (!urb) @@ -16212,9 +16578,6 @@ index 0000000..71223e2 + if (!ictx) + return; + -+ buf = urb->transfer_buffer; -+ len = urb->actual_length; -+ + switch (urb->status) { + case -ENOENT: /* usbcore unlink successful! */ + return; @@ -16238,8 +16601,6 @@ index 0000000..71223e2 +static void usb_rx_callback_intf1(struct urb *urb) +{ + struct imon_context *ictx; -+ unsigned char *buf; -+ int len; + int intfnum = 1; + + if (!urb) @@ -16249,9 +16610,6 @@ index 0000000..71223e2 + if (!ictx) + return; + -+ buf = urb->transfer_buffer; -+ len = urb->actual_length; -+ + switch (urb->status) { + case -ENOENT: /* usbcore unlink successful! */ + return; @@ -16505,12 +16863,6 @@ index 0000000..71223e2 + + mutex_lock(&ictx->lock); + -+ if (ir_protocol == IMON_IR_PROTOCOL_MCE) { -+ init_timer(&ictx->itimer); -+ ictx->itimer.data = (unsigned long)ictx; -+ ictx->itimer.function = imon_mce_timeout; -+ } -+ + ictx->dev = dev; + ictx->usbdev_intf0 = usb_get_dev(interface_to_usbdev(intf)); + ictx->dev_present_intf0 = 1; @@ -16727,7 +17079,6 @@ index 0000000..71223e2 + first_if = usb_ifnum_to_if(usbdev, 0); + first_if_ctx = (struct imon_context *)usb_get_intfdata(first_if); + -+ + if (ifnum == 0) { + ictx = imon_init_intf0(interface); + if (!ictx) { @@ -16916,3 +17267,168 @@ index 0000000..71223e2 + +module_init(imon_init); +module_exit(imon_exit); +diff --git a/include/linux/lirc.h b/include/linux/lirc.h +new file mode 100644 +index 0000000..9ca6876 +--- /dev/null ++++ b/include/linux/lirc.h +@@ -0,0 +1,159 @@ ++/* ++ * lirc.h - linux infrared remote control header file ++ * last modified 2007/09/27 ++ */ ++ ++#ifndef _LINUX_LIRC_H ++#define _LINUX_LIRC_H ++ ++#include ++#include ++ ++/* */ ++#define PULSE_BIT 0x01000000 ++#define PULSE_MASK 0x00FFFFFF ++/* */ ++ ++#define LIRC_MODE2_SPACE 0x00000000 ++#define LIRC_MODE2_PULSE 0x01000000 ++#define LIRC_MODE2_FREQUENCY 0x02000000 ++#define LIRC_MODE2_TIMEOUT 0x03000000 ++ ++#define LIRC_VALUE_MASK 0x00FFFFFF ++#define LIRC_MODE2_MASK 0xFF000000 ++ ++#define LIRC_SPACE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_SPACE) ++#define LIRC_PULSE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_PULSE) ++#define LIRC_FREQUENCY(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY) ++#define LIRC_TIMEOUT(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT) ++ ++#define LIRC_VALUE(val) ((val)&LIRC_VALUE_MASK) ++#define LIRC_MODE2(val) ((val)&LIRC_MODE2_MASK) ++ ++#define LIRC_IS_SPACE(val) (LIRC_MODE2(val) == LIRC_MODE2_SPACE) ++#define LIRC_IS_PULSE(val) (LIRC_MODE2(val) == LIRC_MODE2_PULSE) ++#define LIRC_IS_FREQUENCY(val) (LIRC_MODE2(val) == LIRC_MODE2_FREQUENCY) ++#define LIRC_IS_TIMEOUT(val) (LIRC_MODE2(val) == LIRC_MODE2_TIMEOUT) ++ ++/*** lirc compatible hardware features ***/ ++ ++#define LIRC_MODE2SEND(x) (x) ++#define LIRC_SEND2MODE(x) (x) ++#define LIRC_MODE2REC(x) ((x) << 16) ++#define LIRC_REC2MODE(x) ((x) >> 16) ++ ++#define LIRC_MODE_RAW 0x00000001 ++#define LIRC_MODE_PULSE 0x00000002 ++#define LIRC_MODE_MODE2 0x00000004 ++#define LIRC_MODE_LIRCCODE 0x00000010 ++ ++ ++#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW) ++#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE) ++#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2) ++#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE) ++ ++#define LIRC_CAN_SEND_MASK 0x0000003f ++ ++#define LIRC_CAN_SET_SEND_CARRIER 0x00000100 ++#define LIRC_CAN_SET_SEND_DUTY_CYCLE 0x00000200 ++#define LIRC_CAN_SET_TRANSMITTER_MASK 0x00000400 ++ ++#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW) ++#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE) ++#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2) ++#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE) ++ ++#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) ++ ++#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16) ++#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16) ++ ++#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000 ++#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000 ++#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000 ++#define LIRC_CAN_SET_REC_TIMEOUT 0x10000000 ++#define LIRC_CAN_SET_REC_FILTER 0x08000000 ++ ++#define LIRC_CAN_MEASURE_CARRIER 0x02000000 ++ ++#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK) ++#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK) ++ ++#define LIRC_CAN_NOTIFY_DECODE 0x01000000 ++ ++/*** IOCTL commands for lirc driver ***/ ++ ++#define LIRC_GET_FEATURES _IOR('i', 0x00000000, unsigned long) ++ ++#define LIRC_GET_SEND_MODE _IOR('i', 0x00000001, unsigned long) ++#define LIRC_GET_REC_MODE _IOR('i', 0x00000002, unsigned long) ++#define LIRC_GET_SEND_CARRIER _IOR('i', 0x00000003, unsigned int) ++#define LIRC_GET_REC_CARRIER _IOR('i', 0x00000004, unsigned int) ++#define LIRC_GET_SEND_DUTY_CYCLE _IOR('i', 0x00000005, unsigned int) ++#define LIRC_GET_REC_DUTY_CYCLE _IOR('i', 0x00000006, unsigned int) ++#define LIRC_GET_REC_RESOLUTION _IOR('i', 0x00000007, unsigned int) ++ ++#define LIRC_GET_MIN_TIMEOUT _IOR('i', 0x00000008, uint32_t) ++#define LIRC_GET_MAX_TIMEOUT _IOR('i', 0x00000009, uint32_t) ++ ++#define LIRC_GET_MIN_FILTER_PULSE _IOR('i', 0x0000000a, uint32_t) ++#define LIRC_GET_MAX_FILTER_PULSE _IOR('i', 0x0000000b, uint32_t) ++#define LIRC_GET_MIN_FILTER_SPACE _IOR('i', 0x0000000c, uint32_t) ++#define LIRC_GET_MAX_FILTER_SPACE _IOR('i', 0x0000000d, uint32_t) ++ ++/* code length in bits, currently only for LIRC_MODE_LIRCCODE */ ++#define LIRC_GET_LENGTH _IOR('i', 0x0000000f, unsigned long) ++ ++#define LIRC_SET_SEND_MODE _IOW('i', 0x00000011, unsigned long) ++#define LIRC_SET_REC_MODE _IOW('i', 0x00000012, unsigned long) ++/* Note: these can reset the according pulse_width */ ++#define LIRC_SET_SEND_CARRIER _IOW('i', 0x00000013, unsigned int) ++#define LIRC_SET_REC_CARRIER _IOW('i', 0x00000014, unsigned int) ++#define LIRC_SET_SEND_DUTY_CYCLE _IOW('i', 0x00000015, unsigned int) ++#define LIRC_SET_REC_DUTY_CYCLE _IOW('i', 0x00000016, unsigned int) ++#define LIRC_SET_TRANSMITTER_MASK _IOW('i', 0x00000017, unsigned int) ++ ++/* ++ * when a timeout != 0 is set the driver will send a ++ * LIRC_MODE2_TIMEOUT data packet, otherwise LIRC_MODE2_TIMEOUT is ++ * never sent, timeout is disabled by default ++ */ ++#define LIRC_SET_REC_TIMEOUT _IOW('i', 0x00000018, uint32_t) ++ ++/* ++ * pulses shorter than this are filtered out by hardware (software ++ * emulation in lirc_dev?) ++ */ ++#define LIRC_SET_REC_FILTER_PULSE _IOW('i', 0x00000019, uint32_t) ++/* ++ * spaces shorter than this are filtered out by hardware (software ++ * emulation in lirc_dev?) ++ */ ++#define LIRC_SET_REC_FILTER_SPACE _IOW('i', 0x0000001a, uint32_t) ++/* ++ * if filter cannot be set independantly for pulse/space, this should ++ * be used ++ */ ++#define LIRC_SET_REC_FILTER _IOW('i', 0x0000001b, uint32_t) ++ ++/* ++ * to set a range use ++ * LIRC_SET_REC_DUTY_CYCLE_RANGE/LIRC_SET_REC_CARRIER_RANGE with the ++ * lower bound first and later ++ * LIRC_SET_REC_DUTY_CYCLE/LIRC_SET_REC_CARRIER with the upper bound ++ */ ++ ++#define LIRC_SET_REC_DUTY_CYCLE_RANGE _IOW('i', 0x0000001e, unsigned int) ++#define LIRC_SET_REC_CARRIER_RANGE _IOW('i', 0x0000001f, unsigned int) ++ ++#define LIRC_NOTIFY_DECODE _IO('i', 0x00000020) ++ ++/* ++ * from the next key press on the driver will send ++ * LIRC_MODE2_FREQUENCY packets ++ */ ++#define LIRC_MEASURE_CARRIER_ENABLE _IO('i', 0x00000021) ++#define LIRC_MEASURE_CARRIER_DISABLE _IO('i', 0x00000022) ++ ++#endif diff --git a/mac80211-do-not-wipe-out-old-supported-rates.patch b/mac80211-do-not-wipe-out-old-supported-rates.patch new file mode 100644 index 0000000..3c300df --- /dev/null +++ b/mac80211-do-not-wipe-out-old-supported-rates.patch @@ -0,0 +1,70 @@ +From: Stanislaw Gruszka +To: kernel@lists.fedoraproject.org, "John W. Linville" +Subject: [PATCH 3/4 2.6.33.y] mac80211: do not wip out old supported rates +Date: Fri, 11 Jun 2010 17:04:19 +0200 + +commit f0b058b61711ebf5be94d6865ca7b2c259b71d37 upstream. + +Use old supported rates, if some buggy AP do not provide +supported rates information element in managment frame. + +Signed-off-by: Stanislaw Gruszka +--- + net/mac80211/scan.c | 21 +++++++++++---------- + 1 files changed, 11 insertions(+), 10 deletions(-) + +diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c +index bc17cf7..697dc54 100644 +--- a/net/mac80211/scan.c ++++ b/net/mac80211/scan.c +@@ -60,7 +60,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local, + bool beacon) + { + struct ieee80211_bss *bss; +- int clen; ++ int clen, srlen; + s32 signal = 0; + + if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) +@@ -92,23 +92,24 @@ ieee80211_bss_info_update(struct ieee80211_local *local, + if (bss->dtim_period == 0) + bss->dtim_period = 1; + +- bss->supp_rates_len = 0; ++ /* replace old supported rates if we get new values */ ++ srlen = 0; + if (elems->supp_rates) { +- clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; ++ clen = IEEE80211_MAX_SUPP_RATES; + if (clen > elems->supp_rates_len) + clen = elems->supp_rates_len; +- memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates, +- clen); +- bss->supp_rates_len += clen; ++ memcpy(bss->supp_rates, elems->supp_rates, clen); ++ srlen += clen; + } + if (elems->ext_supp_rates) { +- clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; ++ clen = IEEE80211_MAX_SUPP_RATES - srlen; + if (clen > elems->ext_supp_rates_len) + clen = elems->ext_supp_rates_len; +- memcpy(&bss->supp_rates[bss->supp_rates_len], +- elems->ext_supp_rates, clen); +- bss->supp_rates_len += clen; ++ memcpy(bss->supp_rates + srlen, elems->ext_supp_rates, clen); ++ srlen += clen; + } ++ if (srlen) ++ bss->supp_rates_len = srlen; + + bss->wmm_used = elems->wmm_param || elems->wmm_info; + +-- +1.6.2.5 + +_______________________________________________ +kernel mailing list +kernel@lists.fedoraproject.org +https://admin.fedoraproject.org/mailman/listinfo/kernel + diff --git a/mac80211-explicitly-disable-enable-QoS.patch b/mac80211-explicitly-disable-enable-QoS.patch index 3d00051..6bd4fcd 100644 --- a/mac80211-explicitly-disable-enable-QoS.patch +++ b/mac80211-explicitly-disable-enable-QoS.patch @@ -1,7 +1,7 @@ From: Stanislaw Gruszka To: kernel@lists.fedoraproject.org, "John W. Linville" -Subject: [PATCH 1/4 2.6.34.y] mac80211: explicitly disable/enable QoS -Date: Fri, 11 Jun 2010 17:05:11 +0200 +Subject: [PATCH 1/4 2.6.33.y] mac80211: explicitly disable/enable QoS +Date: Fri, 11 Jun 2010 17:04:17 +0200 commit e1b3ec1a2a336c328c336cfa5485a5f0484cc90d upstream. @@ -11,7 +11,6 @@ Disabling is needed for some APs, which do not support QoS, such we should send QoS frames to them. Signed-off-by: Stanislaw Gruszka -Signed-off-by: John W. Linville --- include/net/mac80211.h | 5 +++++ net/mac80211/mlme.c | 9 ++++++++- @@ -19,10 +18,10 @@ Signed-off-by: John W. Linville 3 files changed, 18 insertions(+), 1 deletions(-) diff --git a/include/net/mac80211.h b/include/net/mac80211.h -index 45d7d44..ea607d6 100644 +index f39b303..8c1f0ee 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h -@@ -580,11 +580,15 @@ struct ieee80211_rx_status { +@@ -577,11 +577,15 @@ struct ieee80211_rx_status { * may turn the device off as much as possible. Typically, this flag will * be set when an interface is set UP but not associated or scanning, but * it can also be unset in that case when monitor interfaces are active. @@ -38,7 +37,7 @@ index 45d7d44..ea607d6 100644 }; -@@ -609,6 +613,7 @@ enum ieee80211_conf_changed { +@@ -604,6 +608,7 @@ enum ieee80211_conf_changed { IEEE80211_CONF_CHANGE_CHANNEL = BIT(6), IEEE80211_CONF_CHANGE_RETRY_LIMITS = BIT(7), IEEE80211_CONF_CHANGE_IDLE = BIT(8), @@ -47,22 +46,22 @@ index 45d7d44..ea607d6 100644 /** diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c -index 875c8de..1b80e2b 100644 +index 1a209ac..950088d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c -@@ -592,6 +592,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, +@@ -798,6 +798,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, int count; - u8 *pos, uapsd_queues = 0; + u8 *pos; + if (!local->ops->conf_tx) + return; + - if (local->hw.queues < 4) + if (!(ifmgd->flags & IEEE80211_STA_WMM_ENABLED)) return; -@@ -666,11 +669,15 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, - params.aifs, params.cw_min, params.cw_max, params.txop, - params.uapsd); +@@ -856,11 +859,15 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, + wiphy_name(local->hw.wiphy), queue, aci, acm, + params.aifs, params.cw_min, params.cw_max, params.txop); #endif - if (drv_conf_tx(local, queue, ¶ms) && local->ops->conf_tx) + if (drv_conf_tx(local, queue, ¶ms)) @@ -78,10 +77,10 @@ index 875c8de..1b80e2b 100644 static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/util.c b/net/mac80211/util.c -index 53af570..582f43a 100644 +index 27212e8..9e35dcb 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c -@@ -796,6 +796,11 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) +@@ -795,6 +795,11 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) drv_conf_tx(local, queue, &qparam); } diff --git a/mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch b/mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch new file mode 100644 index 0000000..87ea093 --- /dev/null +++ b/mac80211-fix-supported-rates-IE-if-AP-doesnt-give-us-its-rates.patch @@ -0,0 +1,123 @@ +Return-path: +X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on + bombadil.infradead.org +X-Spam-Level: +X-Spam-Status: No, score=-0.0 required=5.0 tests=T_RP_MATCHES_RCVD + autolearn=ham version=3.3.1 +Envelope-to: kyle@mcmartin.ca +Delivery-date: Fri, 11 Jun 2010 15:05:50 +0000 +Received: from bastion02.fedoraproject.org ([209.132.181.3] helo=bastion.fedoraproject.org) + by bombadil.infradead.org with esmtp (Exim 4.72 #1 (Red Hat Linux)) + id 1ON5np-0006qq-A4 + for kyle@mcmartin.ca; Fri, 11 Jun 2010 15:05:50 +0000 +Received: from lists.fedoraproject.org (collab1.vpn.fedoraproject.org [192.168.1.21]) + by bastion02.phx2.fedoraproject.org (Postfix) with ESMTP id C8AFE110FA4; + Fri, 11 Jun 2010 15:05:48 +0000 (UTC) +Received: from collab1.fedoraproject.org (localhost.localdomain [127.0.0.1]) + by lists.fedoraproject.org (Postfix) with ESMTP id 7CD7932677B; + Fri, 11 Jun 2010 15:05:48 +0000 (UTC) +X-Original-To: kernel@lists.fedoraproject.org +Delivered-To: kernel@lists.fedoraproject.org +Received: from smtp-mm2.fedoraproject.org (smtp-mm2.fedoraproject.org + [66.35.62.164]) + by lists.fedoraproject.org (Postfix) with ESMTP id EBB823267E8 + for ; + Fri, 11 Jun 2010 15:05:45 +0000 (UTC) +Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) + by smtp-mm2.fedoraproject.org (Postfix) with ESMTP id 5F31DE71E6 + for ; + Fri, 11 Jun 2010 15:05:45 +0000 (UTC) +Received: from int-mx01.intmail.prod.int.phx2.redhat.com + (int-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.11]) + by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id o5BF5ifi002333 + (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK) + for ; Fri, 11 Jun 2010 11:05:45 -0400 +Received: from localhost (vpn-10-251.rdu.redhat.com [10.11.10.251]) + by int-mx01.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP + id o5BF5h8Z029086; Fri, 11 Jun 2010 11:05:44 -0400 +From: Stanislaw Gruszka +To: kernel@lists.fedoraproject.org, "John W. Linville" +Subject: [PATCH 4/4 2.6.33.y] mac80211: fix supported rates IE if AP doesn't + give us it's rates +Date: Fri, 11 Jun 2010 17:04:20 +0200 +Message-Id: <1276268660-18830-4-git-send-email-sgruszka@redhat.com> +In-Reply-To: <1276268660-18830-3-git-send-email-sgruszka@redhat.com> +References: <1276268660-18830-1-git-send-email-sgruszka@redhat.com> + <1276268660-18830-2-git-send-email-sgruszka@redhat.com> + <1276268660-18830-3-git-send-email-sgruszka@redhat.com> +X-Scanned-By: MIMEDefang 2.67 on 10.5.11.11 +Cc: Stanislaw Gruszka +X-BeenThere: kernel@lists.fedoraproject.org +X-Mailman-Version: 2.1.9 +Precedence: list +List-Id: "Fedora kernel development." +List-Unsubscribe: , + +List-Archive: +List-Post: +List-Help: +List-Subscribe: , + +MIME-Version: 1.0 +Content-Type: text/plain; charset="us-ascii" +Content-Transfer-Encoding: 7bit +Sender: kernel-bounces@lists.fedoraproject.org +Errors-To: kernel-bounces@lists.fedoraproject.org +X-CRM114-Version: 20090807-BlameThorstenAndJenny ( TRE 0.7.6 (BSD) ) MR-646709E3 +X-CRM114-CacheID: sfid-20100611_110549_564657_0ED6FEC7 +X-CRM114-Status: GOOD ( 17.72 ) +Content-Length: 1846 + +commit 76f273640134f3eb8257179cd5b3bc6ba5fe4a96 upstream. + +If AP do not provide us supported rates before assiociation, send +all rates we are supporting instead of empty information element. + +Signed-off-by: Stanislaw Gruszka +--- + net/mac80211/mlme.c | 17 +++++++++++------ + 1 files changed, 11 insertions(+), 6 deletions(-) + +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c +index 950088d..aa90100 100644 +--- a/net/mac80211/mlme.c ++++ b/net/mac80211/mlme.c +@@ -270,12 +270,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, + if (wk->bss->wmm_used) + wmm = 1; + +- /* get all rates supported by the device and the AP as +- * some APs don't like getting a superset of their rates +- * in the association request (e.g. D-Link DAP 1353 in +- * b-only mode) */ +- rates_len = ieee80211_compatible_rates(wk->bss, sband, &rates); +- + if ((wk->bss->cbss.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) && + (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)) + capab |= WLAN_CAPABILITY_SPECTRUM_MGMT; +@@ -310,6 +304,17 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, + *pos++ = wk->ssid_len; + memcpy(pos, wk->ssid, wk->ssid_len); + ++ if (wk->bss->supp_rates_len) { ++ /* get all rates supported by the device and the AP as ++ * some APs don't like getting a superset of their rates ++ * in the association request (e.g. D-Link DAP 1353 in ++ * b-only mode) */ ++ rates_len = ieee80211_compatible_rates(wk->bss, sband, &rates); ++ } else { ++ rates = ~0; ++ rates_len = sband->n_bitrates; ++ } ++ + /* add all rates which were marked to be used above */ + supp_rates_len = rates_len; + if (supp_rates_len > 8) +-- +1.6.2.5 + +_______________________________________________ +kernel mailing list +kernel@lists.fedoraproject.org +https://admin.fedoraproject.org/mailman/listinfo/kernel + diff --git a/matroxfb-fix-font-corruption.patch b/matroxfb-fix-font-corruption.patch deleted file mode 100644 index a3f03a5..0000000 --- a/matroxfb-fix-font-corruption.patch +++ /dev/null @@ -1,23 +0,0 @@ -Fix incorrect use of memcpy_toio() in matroxfb that broke in 2.6.34. - -Signed-off-by: Ondrej Zary - ---- linux-2.6.35-rc2/drivers/video/matrox/matroxfb_base.h 2010-06-06 05:43:24.000000000 +0200 -+++ linux-2.6.35-rc3/drivers/video/matrox/matroxfb_base.h 2010-08-03 18:13:46.000000000 +0200 -@@ -151,13 +151,13 @@ static inline void mga_writel(vaddr_t va - static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) { - #if defined(__alpha__) || defined(__i386__) || defined(__x86_64__) - /* -- * memcpy_toio works for us if: -+ * iowrite32_rep works for us if: - * (1) Copies data as 32bit quantities, not byte after byte, - * (2) Performs LE ordered stores, and - * (3) It copes with unaligned source (destination is guaranteed to be page - * aligned and length is guaranteed to be multiple of 4). - */ -- memcpy_toio(va.vaddr, src, len); -+ iowrite32_rep(va.vaddr, src, len >> 2); - #else - u_int32_t __iomem* addr = va.vaddr; - - diff --git a/ntrig-backport.patch b/ntrig-backport.patch new file mode 100644 index 0000000..69ffd77 --- /dev/null +++ b/ntrig-backport.patch @@ -0,0 +1,512 @@ +commit 094693bee2b1fd8628846040f4a07cc4725b6011 +Author: Rafi Rubin +Date: Mon May 3 05:08:30 2010 -0400 + + HID: ntrig: Remove unused macro, TripleTap and QuadTap + + Removing the higher number taps. Their usage was incorrect + and even if correct they should not be used for a touch screen. + _MT_ events should be used to communicate multiple fingers. + + Signed-off-by: Rafi Rubin + Signed-off-by: Jiri Kosina + +commit e4ca1ed6c39920c6be60e26c8b1c61b82792906b +Author: Rafi Rubin +Date: Mon May 3 05:08:29 2010 -0400 + + HID: ntrig: TipSwitch for single touch mode touch. + + Include TipSwitch in the touch detection decision for some single touch + firmwares. Confidence and InRange are high for all finger events + including those used to indicate the finger is no longer in contact with + the sensor. + + Signed-off-by: Rafi Rubin + Signed-off-by: Jiri Kosina + +commit 2a85cd7c45cf219fe27e5f8c031f954fdec637fd +Author: Rafi Rubin +Date: Fri Apr 9 17:58:25 2010 -0400 + + HID: ntrig: Emit TOUCH with DOUBLETAP for single touch + + I squelched TipSwitch in a recent patch which resulted in the loss + of Touch events for single touch firmwares. This patch just puts Touch back + in for single touch, and bundles it with DoubleTap (like the multitouch code). + The two events are used to convey the same message to different drivers. + + Signed-off-by: Rafi Rubin + Signed-off-by: Jiri Kosina + +commit 811b16c09476e2a72736dd55f3fe62549c4d722c +Author: Jiri Kosina +Date: Wed Apr 7 12:10:29 2010 +0200 + + HID: ntrig: explain firmware quirk + + Commit 6549981bc54777c ("HID: fix N-trig touch panel with recent firmware") + adds a quirk that is needed for devices with more recent firmware so that + they become operational. + + As it's not directly obvious from the code why is it needed, a comment + is worthwile. + + Signed-off-by: Jiri Kosina + +commit aa604050185ad5b17e8bf838d73d76382ee3e3e5 +Author: Stephane Chatty +Date: Tue Apr 6 22:22:58 2010 +0200 + + HID: fix N-trig touch panel with recent firmware + + Added an init message that avoids device freeze with recent firmware. + + Signed-off-by: Stephane Chatty + Tested-by: Rafi Rubin + Signed-off-by: Jiri Kosina + +commit 26196d5d0f6884f632e978103f435e2ba001a7ce +Author: Rafi Rubin +Date: Wed Mar 10 16:10:28 2010 +0100 + + HID: ntrig: fix touch events + + This reinstates the lost unpressing of BTN_TOUCH. To prevent undesireably + touch toggles this also deals with tip switch events. + + Added a trap to prevent going out of bounds for hidinputs with empty reports. + + Clear bits of unused buttons which result in misidentification. + + Signed-off-by: Rafi Rubin + Signed-off-by: Jiri Kosina + +commit 04b1217771e454eea260d5cf8e66fa75cd7863bf +Author: Rafi Rubin +Date: Tue Feb 16 10:22:11 2010 -0500 + + HID: hid-ntrig: Single touch mode tap + + Add DOUBLETAP to events emitted when in single touch only mode. + + Users with a single touch firmware report not seeing the DOUBLETAP events; this + is a side effect of dropping old mapping for confidence. The confidence mapping + may be fine for singletouch mode but causes problems in multitouch mode. + + Signed-off-by: Rafi Rubin + Signed-off-by: Jiri Kosina + +commit 52bd98e1c2e3b0587f8c45c15f3ff8af69353044 +Author: Rafi Rubin +Date: Fri Feb 12 21:13:05 2010 -0500 + + HID: hid-ntrig: multitouch cleanup and fix + + This cleans up the identification of multitouch groups and enables + the end of group sync. + + Taps are now explicitly handled to adjust for the changes in the + event stream in multitouch mode. Added triple and quad tap for the + benefit of tools that recognize different tap types but do not have + full multi touch support. + + This cleans up the behavior particularly for the latest firmware, which + didn't work particularly well with the older version of the driver. + + In this form, when multitouch is active, both mt and st events will come out of + the "N-Trig MultiTouch" device. And when its not st events will come out of + "N-Trig Touchscreen". + + Signed-off-by: Rafi Rubin + Signed-off-by: Jiri Kosina + +commit 9ff5ed6b935929019c459e4d853c3afe52a4d8f2 +Author: Rafi Rubin +Date: Thu Feb 11 22:14:06 2010 -0500 + + HID: n-trig: remove unnecessary tool switching + + With the pen and touch split apart, we no longer need to inject + additional tool switching events. + + Signed-off-by: Rafi Rubin + Signed-off-by: Jiri Kosina + +commit 0336d54acf59a4ccd97fc9d983518c6d60639f5b +Author: Rafi Rubin +Date: Thu Feb 11 22:14:05 2010 -0500 + + HID: hid-ntrig add multi input quirk and clean up + + Added a quirk to enable distinct input devices. The digitizer utilizes + three inputs to represent pen, multitouch and a normal touch screen. + + With the Pen partitioned, it behaves well and does not need special + handling. + + Also, I set names to the input devices to clarify the functions of the + various inputs. + + Signed-off-by: Rafi Rubin + Signed-off-by: Jiri Kosina + + drivers/hid/hid-ntrig.c | 229 ++++++++++++++++++++++++++++++++-------------- + 1 files changed, 159 insertions(+), 70 deletions(-) + +diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c +index 49ce69d..836a4ba 100644 +--- a/drivers/hid/hid-ntrig.c ++++ b/drivers/hid/hid-ntrig.c +@@ -1,8 +1,8 @@ + /* + * HID driver for N-Trig touchscreens + * +- * Copyright (c) 2008 Rafi Rubin +- * Copyright (c) 2009 Stephane Chatty ++ * Copyright (c) 2008-2010 Rafi Rubin ++ * Copyright (c) 2009-2010 Stephane Chatty + * + */ + +@@ -15,21 +15,27 @@ + + #include + #include ++#include ++#include "usbhid/usbhid.h" + #include + + #include "hid-ids.h" + + #define NTRIG_DUPLICATE_USAGES 0x001 + +-#define nt_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ +- EV_KEY, (c)) +- + struct ntrig_data { +- __s32 x, y, id, w, h; +- char reading_a_point, found_contact_id; +- char pen_active; +- char finger_active; +- char inverted; ++ /* Incoming raw values for a single contact */ ++ __u16 x, y, w, h; ++ __u16 id; ++ ++ bool tipswitch; ++ bool confidence; ++ bool first_contact_touch; ++ ++ bool reading_mt; ++ ++ __u8 mt_footer[4]; ++ __u8 mt_foot_count; + }; + + /* +@@ -42,8 +48,11 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) + { +- switch (usage->hid & HID_USAGE_PAGE) { ++ /* No special mappings needed for the pen and single touch */ ++ if (field->physical) ++ return 0; + ++ switch (usage->hid & HID_USAGE_PAGE) { + case HID_UP_GENDESK: + switch (usage->hid) { + case HID_GD_X: +@@ -66,18 +75,12 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi, + case HID_UP_DIGITIZER: + switch (usage->hid) { + /* we do not want to map these for now */ +- case HID_DG_CONTACTID: /* value is useless */ ++ case HID_DG_CONTACTID: /* Not trustworthy, squelch for now */ + case HID_DG_INPUTMODE: + case HID_DG_DEVICEINDEX: +- case HID_DG_CONTACTCOUNT: + case HID_DG_CONTACTMAX: + return -1; + +- /* original mapping by Rafi Rubin */ +- case HID_DG_CONFIDENCE: +- nt_map_key_clear(BTN_TOOL_DOUBLETAP); +- return 1; +- + /* width/height mapped on TouchMajor/TouchMinor/Orientation */ + case HID_DG_WIDTH: + hid_map_usage(hi, usage, bit, max, +@@ -104,6 +107,10 @@ static int ntrig_input_mapped(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) + { ++ /* No special mappings needed for the pen and single touch */ ++ if (field->physical) ++ return 0; ++ + if (usage->type == EV_KEY || usage->type == EV_REL + || usage->type == EV_ABS) + clear_bit(usage->code, *bit); +@@ -123,31 +130,34 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, + struct input_dev *input = field->hidinput->input; + struct ntrig_data *nd = hid_get_drvdata(hid); + ++ /* No special handling needed for the pen */ ++ if (field->application == HID_DG_PEN) ++ return 0; ++ + if (hid->claimed & HID_CLAIMED_INPUT) { + switch (usage->hid) { +- +- case HID_DG_INRANGE: +- if (field->application & 0x3) +- nd->pen_active = (value != 0); +- else +- nd->finger_active = (value != 0); +- return 0; +- +- case HID_DG_INVERT: +- nd->inverted = value; +- return 0; +- ++ case 0xff000001: ++ /* Tag indicating the start of a multitouch group */ ++ nd->reading_mt = 1; ++ nd->first_contact_touch = 0; ++ break; ++ case HID_DG_TIPSWITCH: ++ nd->tipswitch = value; ++ /* Prevent emission of touch until validated */ ++ return 1; ++ case HID_DG_CONFIDENCE: ++ nd->confidence = value; ++ break; + case HID_GD_X: + nd->x = value; +- nd->reading_a_point = 1; ++ /* Clear the contact footer */ ++ nd->mt_foot_count = 0; + break; + case HID_GD_Y: + nd->y = value; + break; + case HID_DG_CONTACTID: + nd->id = value; +- /* we receive this only when in multitouch mode */ +- nd->found_contact_id = 1; + break; + case HID_DG_WIDTH: + nd->w = value; +@@ -159,33 +169,17 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, + * report received in a finger event. We want + * to emit a normal (X, Y) position + */ +- if (!nd->found_contact_id) { +- if (nd->pen_active && nd->finger_active) { +- input_report_key(input, BTN_TOOL_DOUBLETAP, 0); +- input_report_key(input, BTN_TOOL_DOUBLETAP, 1); +- } +- input_event(input, EV_ABS, ABS_X, nd->x); +- input_event(input, EV_ABS, ABS_Y, nd->y); +- } +- break; +- case HID_DG_TIPPRESSURE: +- /* +- * when in single touch mode, this is the last +- * report received in a pen event. We want +- * to emit a normal (X, Y) position +- */ +- if (! nd->found_contact_id) { +- if (nd->pen_active && nd->finger_active) { +- input_report_key(input, +- nd->inverted ? BTN_TOOL_RUBBER : BTN_TOOL_PEN +- , 0); +- input_report_key(input, +- nd->inverted ? BTN_TOOL_RUBBER : BTN_TOOL_PEN +- , 1); +- } ++ if (!nd->reading_mt) { ++ /* ++ * TipSwitch indicates the presence of a ++ * finger in single touch mode. ++ */ ++ input_report_key(input, BTN_TOUCH, ++ nd->tipswitch); ++ input_report_key(input, BTN_TOOL_DOUBLETAP, ++ nd->tipswitch); + input_event(input, EV_ABS, ABS_X, nd->x); + input_event(input, EV_ABS, ABS_Y, nd->y); +- input_event(input, EV_ABS, ABS_PRESSURE, value); + } + break; + case 0xff000002: +@@ -195,10 +189,40 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, + * this usage tells if the contact point is real + * or a placeholder + */ +- if (!nd->reading_a_point || value != 1) ++ ++ /* Shouldn't get more than 4 footer packets, so skip */ ++ if (nd->mt_foot_count >= 4) + break; ++ ++ nd->mt_footer[nd->mt_foot_count++] = value; ++ ++ /* if the footer isn't complete break */ ++ if (nd->mt_foot_count != 4) ++ break; ++ ++ /* Pen activity signal, trigger end of touch. */ ++ if (nd->mt_footer[2]) { ++ nd->confidence = 0; ++ break; ++ } ++ ++ /* If the contact was invalid */ ++ if (!(nd->confidence && nd->mt_footer[0]) ++ || nd->w <= 250 ++ || nd->h <= 190) { ++ nd->confidence = 0; ++ break; ++ } ++ + /* emit a normal (X, Y) for the first point only */ + if (nd->id == 0) { ++ /* ++ * TipSwitch is superfluous in multitouch ++ * mode. The footer events tell us ++ * if there is a finger on the screen or ++ * not. ++ */ ++ nd->first_contact_touch = nd->confidence; + input_event(input, EV_ABS, ABS_X, nd->x); + input_event(input, EV_ABS, ABS_Y, nd->y); + } +@@ -220,8 +244,21 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, + ABS_MT_TOUCH_MINOR, nd->w); + } + input_mt_sync(field->hidinput->input); +- nd->reading_a_point = 0; +- nd->found_contact_id = 0; ++ break; ++ ++ case HID_DG_CONTACTCOUNT: /* End of a multitouch group */ ++ if (!nd->reading_mt) ++ break; ++ ++ nd->reading_mt = 0; ++ ++ if (nd->first_contact_touch) { ++ input_report_key(input, BTN_TOOL_DOUBLETAP, 1); ++ input_report_key(input, BTN_TOUCH, 1); ++ } else { ++ input_report_key(input, BTN_TOOL_DOUBLETAP, 0); ++ input_report_key(input, BTN_TOUCH, 0); ++ } + break; + + default: +@@ -231,8 +268,8 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, + } + + /* we have handled the hidinput part, now remains hiddev */ +- if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) +- hid->hiddev_hid_event(hid, field, usage, value); ++ if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_hid_event) ++ hid->hiddev_hid_event(hid, field, usage, value); + + return 1; + } +@@ -241,23 +278,75 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id) + { + int ret; + struct ntrig_data *nd; ++ struct hid_input *hidinput; ++ struct input_dev *input; ++ struct hid_report *report; ++ ++ if (id->driver_data) ++ hdev->quirks |= HID_QUIRK_MULTI_INPUT; + + nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL); + if (!nd) { + dev_err(&hdev->dev, "cannot allocate N-Trig data\n"); + return -ENOMEM; + } +- nd->reading_a_point = 0; +- nd->found_contact_id = 0; ++ ++ nd->reading_mt = 0; + hid_set_drvdata(hdev, nd); + + ret = hid_parse(hdev); +- if (!ret) +- ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); ++ if (ret) { ++ dev_err(&hdev->dev, "parse failed\n"); ++ goto err_free; ++ } ++ ++ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); ++ if (ret) { ++ dev_err(&hdev->dev, "hw start failed\n"); ++ goto err_free; ++ } ++ ++ ++ list_for_each_entry(hidinput, &hdev->inputs, list) { ++ if (hidinput->report->maxfield < 1) ++ continue; ++ ++ input = hidinput->input; ++ switch (hidinput->report->field[0]->application) { ++ case HID_DG_PEN: ++ input->name = "N-Trig Pen"; ++ break; ++ case HID_DG_TOUCHSCREEN: ++ /* These keys are redundant for fingers, clear them ++ * to prevent incorrect identification */ ++ __clear_bit(BTN_TOOL_PEN, input->keybit); ++ __clear_bit(BTN_TOOL_FINGER, input->keybit); ++ __clear_bit(BTN_0, input->keybit); ++ __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); ++ /* ++ * The physical touchscreen (single touch) ++ * input has a value for physical, whereas ++ * the multitouch only has logical input ++ * fields. ++ */ ++ input->name = ++ (hidinput->report->field[0] ++ ->physical) ? ++ "N-Trig Touchscreen" : ++ "N-Trig MultiTouch"; ++ break; ++ } ++ } + +- if (ret) +- kfree (nd); ++ /* This is needed for devices with more recent firmware versions */ ++ report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0x0a]; ++ if (report) ++ usbhid_submit_report(hdev, report, USB_DIR_OUT); + ++ ++ return 0; ++err_free: ++ kfree(nd); + return ret; + } + +@@ -276,7 +365,7 @@ MODULE_DEVICE_TABLE(hid, ntrig_devices); + + static const struct hid_usage_id ntrig_grabbed_usages[] = { + { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, +- { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} ++ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1 } + }; + + static struct hid_driver ntrig_driver = { diff --git a/pci-acpi-disable-aspm-if-no-osc.patch b/pci-acpi-disable-aspm-if-no-osc.patch deleted file mode 100644 index 044f389..0000000 --- a/pci-acpi-disable-aspm-if-no-osc.patch +++ /dev/null @@ -1,53 +0,0 @@ -From: Matthew Garrett -Subject: ACPI: Disable ASPM if the platform won't provide _OSC control for PCIe - -ACPI: Disable ASPM if the platform won't provide _OSC control for PCIe - -The PCI SIG documentation for the _OSC OS/firmware handshaking interface -states: - -"If the _OSC control method is absent from the scope of a host bridge -device, then the operating system must not enable or attempt to use any -features defined in this section for the hierarchy originated by the host -bridge." - -The obvious interpretation of this is that the OS should not attempt to use -PCIe hotplug, PME or AER - however, the specification also notes that an -_OSC method is *required* for PCIe hierarchies, and experimental validation -with An Alternative OS indicates that it doesn't use any PCIe functionality -if the _OSC method is missing. That arguably means we shouldn't be using -MSI or extended config space, but right now our problems seem to be limited -to vendors being surprised when ASPM gets enabled on machines when other -OSs refuse to do so. So, for now, let's just disable ASPM if the _OSC -method doesn't exist or refuses to hand over PCIe capability control. - -Signed-off-by: Matthew Garrett ---- - -diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c -index 4eac593..1f67057 100644 ---- a/drivers/acpi/pci_root.c -+++ b/drivers/acpi/pci_root.c -@@ -33,6 +33,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -543,6 +544,14 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) - if (flags != base_flags) - acpi_pci_osc_support(root, flags); - -+ status = acpi_pci_osc_control_set(root->device->handle, -+ 0); -+ -+ if (status == AE_NOT_EXIST) { -+ printk(KERN_INFO "Unable to assume PCIe control: Disabling ASPM\n"); -+ pcie_no_aspm(); -+ } -+ - pci_acpi_add_bus_pm_notifier(device, root->bus); - if (device->wakeup.flags.run_wake) - device_set_run_wake(root->bus->bridge, true); diff --git a/pci-aspm-dont-enable-too-early.patch b/pci-aspm-dont-enable-too-early.patch deleted file mode 100644 index ea91a25..0000000 --- a/pci-aspm-dont-enable-too-early.patch +++ /dev/null @@ -1,50 +0,0 @@ -From: Matthew Garrett -Date: Wed, 9 Jun 2010 20:05:07 +0000 (-0400) -Subject: PCI: Don't enable aspm before drivers have had a chance to veto it -X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fjbarnes%2Fpci-2.6.git;a=commitdiff_plain;h=8f0b08c29f1df91315e48adce04462eb23671099 - -PCI: Don't enable aspm before drivers have had a chance to veto it - -The aspm code will currently set the configured aspm policy before drivers -have had an opportunity to indicate that their hardware doesn't support it. -Unfortunately, putting some hardware in L0 or L1 can result in the hardware -no longer responding to any requests, even after aspm is disabled. It makes -more sense to leave aspm policy at the BIOS defaults at initial setup time, -reconfiguring it after pci_enable_device() is called. This allows the -driver to blacklist individual devices beforehand. - -Reviewed-by: Kenji Kaneshige -Signed-off-by: Matthew Garrett -Signed-off-by: Jesse Barnes ---- - -diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c -index be53d98..7122281 100644 ---- a/drivers/pci/pcie/aspm.c -+++ b/drivers/pci/pcie/aspm.c -@@ -588,11 +588,23 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) - * update through pcie_aspm_cap_init(). - */ - pcie_aspm_cap_init(link, blacklist); -- pcie_config_aspm_path(link); - - /* Setup initial Clock PM state */ - pcie_clkpm_cap_init(link, blacklist); -- pcie_set_clkpm(link, policy_to_clkpm_state(link)); -+ -+ /* -+ * At this stage drivers haven't had an opportunity to change the -+ * link policy setting. Enabling ASPM on broken hardware can cripple -+ * it even before the driver has had a chance to disable ASPM, so -+ * default to a safe level right now. If we're enabling ASPM beyond -+ * the BIOS's expectation, we'll do so once pci_enable_device() is -+ * called. -+ */ -+ if (aspm_policy != POLICY_POWERSAVE) { -+ pcie_config_aspm_path(link); -+ pcie_set_clkpm(link, policy_to_clkpm_state(link)); -+ } -+ - unlock: - mutex_unlock(&aspm_lock); - out: diff --git a/pci-change-error-messages-to-kern-info.patch b/pci-change-error-messages-to-kern-info.patch deleted file mode 100644 index e0ce16c..0000000 --- a/pci-change-error-messages-to-kern-info.patch +++ /dev/null @@ -1,43 +0,0 @@ -From: Bjorn Helgaas -Date: Thu, 3 Jun 2010 19:47:18 +0000 (-0600) -Subject: PCI: change resource collision messages from KERN_ERR to KERN_INFO -X-Git-Tag: v2.6.35-rc3~4^2~3 -X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=f6d440daebd12be66ea1f834faf2966a49a07bd6 - -PCI: change resource collision messages from KERN_ERR to KERN_INFO - -We can often deal with PCI resource issues by moving devices around. In -that case, there's no point in alarming the user with messages like these. -There are many bug reports where the message itself is the only problem, -e.g., https://bugs.launchpad.net/ubuntu/+source/linux/+bug/413419 . - -Signed-off-by: Bjorn Helgaas -Signed-off-by: Jesse Barnes ---- - -diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c -index 17bed18..92379e2 100644 ---- a/drivers/pci/setup-res.c -+++ b/drivers/pci/setup-res.c -@@ -97,16 +97,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource) - - root = pci_find_parent_resource(dev, res); - if (!root) { -- dev_err(&dev->dev, "no compatible bridge window for %pR\n", -- res); -+ dev_info(&dev->dev, "no compatible bridge window for %pR\n", -+ res); - return -EINVAL; - } - - conflict = request_resource_conflict(root, res); - if (conflict) { -- dev_err(&dev->dev, -- "address space collision: %pR conflicts with %s %pR\n", -- res, conflict->name, conflict); -+ dev_info(&dev->dev, -+ "address space collision: %pR conflicts with %s %pR\n", -+ res, conflict->name, conflict); - return -EBUSY; - } - diff --git a/pci-fall-back-to-original-bios-bar-addresses.patch b/pci-fall-back-to-original-bios-bar-addresses.patch deleted file mode 100644 index e65e0c1..0000000 --- a/pci-fall-back-to-original-bios-bar-addresses.patch +++ /dev/null @@ -1,103 +0,0 @@ -From: Bjorn Helgaas -Date: Thu, 15 Jul 2010 15:41:42 +0000 (-0600) -Subject: PCI: fall back to original BIOS BAR addresses -X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=58c84eda07560a6b75b03e8d3b26d6eddfc14011 - -PCI: fall back to original BIOS BAR addresses - -If we fail to assign resources to a PCI BAR, this patch makes us try the -original address from BIOS rather than leaving it disabled. - -Linux tries to make sure all PCI device BARs are inside the upstream -PCI host bridge or P2P bridge apertures, reassigning BARs if necessary. -Windows does similar reassignment. - -Before this patch, if we could not move a BAR into an aperture, we left -the resource unassigned, i.e., at address zero. Windows leaves such BARs -at the original BIOS addresses, and this patch makes Linux do the same. - -This is a bit ugly because we disable the resource long before we try to -reassign it, so we have to keep track of the BIOS BAR address somewhere. -For lack of a better place, I put it in the struct pci_dev. - -I think it would be cleaner to attempt the assignment immediately when the -claim fails, so we could easily remember the original address. But we -currently claim motherboard resources in the middle, after attempting to -claim PCI resources and before assigning new PCI resources, and changing -that is a fairly big job. - -Addresses https://bugzilla.kernel.org/show_bug.cgi?id=16263 - -Reported-by: Andrew -Tested-by: Andrew -Signed-off-by: Bjorn Helgaas -Signed-off-by: Jesse Barnes ---- - -diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c -index 6fdb3ec..5525309 100644 ---- a/arch/x86/pci/i386.c -+++ b/arch/x86/pci/i386.c -@@ -184,6 +184,7 @@ static void __init pcibios_allocate_resources(int pass) - idx, r, disabled, pass); - if (pci_claim_resource(dev, idx) < 0) { - /* We'll assign a new address later */ -+ dev->fw_addr[idx] = r->start; - r->end -= r->start; - r->start = 0; - } -diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c -index 92379e2..2aaa131 100644 ---- a/drivers/pci/setup-res.c -+++ b/drivers/pci/setup-res.c -@@ -156,6 +156,38 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, - pcibios_align_resource, dev); - } - -+ if (ret < 0 && dev->fw_addr[resno]) { -+ struct resource *root, *conflict; -+ resource_size_t start, end; -+ -+ /* -+ * If we failed to assign anything, let's try the address -+ * where firmware left it. That at least has a chance of -+ * working, which is better than just leaving it disabled. -+ */ -+ -+ if (res->flags & IORESOURCE_IO) -+ root = &ioport_resource; -+ else -+ root = &iomem_resource; -+ -+ start = res->start; -+ end = res->end; -+ res->start = dev->fw_addr[resno]; -+ res->end = res->start + size - 1; -+ dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", -+ resno, res); -+ conflict = request_resource_conflict(root, res); -+ if (conflict) { -+ dev_info(&dev->dev, -+ "BAR %d: %pR conflicts with %s %pR\n", resno, -+ res, conflict->name, conflict); -+ res->start = start; -+ res->end = end; -+ } else -+ ret = 0; -+ } -+ - if (!ret) { - res->flags &= ~IORESOURCE_STARTALIGN; - dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); -diff --git a/include/linux/pci.h b/include/linux/pci.h -index 7cb0084..f26fda7 100644 ---- a/include/linux/pci.h -+++ b/include/linux/pci.h -@@ -288,6 +288,7 @@ struct pci_dev { - */ - unsigned int irq; - struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ -+ resource_size_t fw_addr[DEVICE_COUNT_RESOURCE]; /* FW-assigned addr */ - - /* These fields are used by common fixups */ - unsigned int transparent:1; /* Transparent PCI bridge */ diff --git a/perf-mount-debugfs-automatically.patch b/perf-mount-debugfs-automatically.patch new file mode 100644 index 0000000..caf6a25 --- /dev/null +++ b/perf-mount-debugfs-automatically.patch @@ -0,0 +1,93 @@ +From 29c52aa2300173dd45df04dae1f5acc81a2c93b1 Mon Sep 17 00:00:00 2001 +From: Xiao Guangrong +Date: Mon, 28 Dec 2009 16:47:12 +0800 +Subject: [PATCH] perf tools: Mount debugfs automatically + +Mount debugfs filesystem under '/sys/kernel/debug', if it's not +mounted. + +Signed-off-by: Xiao Guangrong +Cc: Peter Zijlstra +Cc: Paul Mackerras +Cc: Frederic Weisbecker +Cc: Clark Williams +Cc: John Kacur +LKML-Reference: <4B387090.7080407@cn.fujitsu.com> +Signed-off-by: Ingo Molnar +--- + tools/perf/perf.c | 2 +- + tools/perf/util/debugfs.c | 16 +++++++--------- + tools/perf/util/debugfs.h | 2 +- + 3 files changed, 9 insertions(+), 11 deletions(-) + +diff --git a/tools/perf/perf.c b/tools/perf/perf.c +index 873e55f..fc89005 100644 +--- a/tools/perf/perf.c ++++ b/tools/perf/perf.c +@@ -388,7 +388,7 @@ static int run_argv(int *argcp, const char ***argv) + /* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */ + static void get_debugfs_mntpt(void) + { +- const char *path = debugfs_find_mountpoint(); ++ const char *path = debugfs_mount(NULL); + + if (path) + strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt)); +diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c +index 06b73ee..1f805fd 100644 +--- a/tools/perf/util/debugfs.c ++++ b/tools/perf/util/debugfs.c +@@ -106,16 +106,14 @@ int debugfs_valid_entry(const char *path) + return 0; + } + +-/* mount the debugfs somewhere */ ++/* mount the debugfs somewhere if it's not mounted */ + +-int debugfs_mount(const char *mountpoint) ++char *debugfs_mount(const char *mountpoint) + { +- char mountcmd[128]; +- + /* see if it's already mounted */ + if (debugfs_find_mountpoint()) { + debugfs_premounted = 1; +- return 0; ++ return debugfs_mountpoint; + } + + /* if not mounted and no argument */ +@@ -127,13 +125,13 @@ int debugfs_mount(const char *mountpoint) + mountpoint = "/sys/kernel/debug"; + } + ++ if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0) ++ return NULL; ++ + /* save the mountpoint */ + strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint)); + +- /* mount it */ +- snprintf(mountcmd, sizeof(mountcmd), +- "/bin/mount -t debugfs debugfs %s", mountpoint); +- return system(mountcmd); ++ return debugfs_mountpoint; + } + + /* umount the debugfs */ +diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h +index 3cd14f9..83a0287 100644 +--- a/tools/perf/util/debugfs.h ++++ b/tools/perf/util/debugfs.h +@@ -15,7 +15,7 @@ + extern const char *debugfs_find_mountpoint(void); + extern int debugfs_valid_mountpoint(const char *debugfs); + extern int debugfs_valid_entry(const char *path); +-extern int debugfs_mount(const char *mountpoint); ++extern char *debugfs_mount(const char *mountpoint); + extern int debugfs_umount(void); + extern int debugfs_write(const char *entry, const char *value); + extern int debugfs_read(const char *entry, char *buffer, size_t size); +-- +1.7.1 + diff --git a/quiet-prove_RCU-in-cgroups.patch b/quiet-prove_RCU-in-cgroups.patch deleted file mode 100644 index f043ef5..0000000 --- a/quiet-prove_RCU-in-cgroups.patch +++ /dev/null @@ -1,36 +0,0 @@ -diff --git a/kernel/softlockup.c b/kernel/softlockup.c -index 4b493f6..ada1fcd 100644 ---- a/kernel/softlockup.c -+++ b/kernel/softlockup.c -@@ -187,7 +187,9 @@ static int watchdog(void *__bind_cpu) - { - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - -+ rcu_read_lock(); - sched_setscheduler(current, SCHED_FIFO, ¶m); -+ rcu_read_unlock(); - - /* initialize timestamp */ - __touch_softlockup_watchdog(); -diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c -index 5a5ea2c..47ecc56 100644 ---- a/kernel/sched_fair.c -+++ b/kernel/sched_fair.c -@@ -1272,6 +1272,9 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) - * effect of the currently running task from the load - * of the current CPU: - */ -+ -+ rcu_read_lock(); -+ - if (sync) { - tg = task_group(current); - weight = current->se.load.weight; -@@ -1298,6 +1301,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) - 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <= - imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); - -+ rcu_read_unlock(); - /* - * If the currently running task will sleep within - * a reasonable amount of time then attract this newly diff --git a/revert-drm-kms-toggle-poll-around-switcheroo.patch b/revert-drm-kms-toggle-poll-around-switcheroo.patch deleted file mode 100644 index f83fc2f..0000000 --- a/revert-drm-kms-toggle-poll-around-switcheroo.patch +++ /dev/null @@ -1,65 +0,0 @@ -From 69b711c0c5e3d9cb3a5b9f741fb4cdc96b5739cb Mon Sep 17 00:00:00 2001 -From: Kyle McMartin -Subject: Revert "drm/kms: disable/enable poll around switcheroo on/off" - -This reverts commit fbf81762e385d3d45acad057b654d56972acf58c, mostly. ---- - drivers/gpu/drm/i915/i915_dma.c | 4 +--- - drivers/gpu/drm/nouveau/nouveau_state.c | 3 --- - drivers/gpu/drm/radeon/radeon_device.c | 2 -- - -diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c -index 59a2bf8..2df3286 100644 ---- a/drivers/gpu/drm/i915/i915_dma.c -+++ b/drivers/gpu/drm/i915/i915_dma.c -@@ -1320,14 +1320,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ - struct drm_device *dev = pci_get_drvdata(pdev); - pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; - if (state == VGA_SWITCHEROO_ON) { -- printk(KERN_INFO "i915: switched on\n"); -+ printk(KERN_INFO "i915: switched off\n"); - /* i915 resume handler doesn't set to D0 */ - pci_set_power_state(dev->pdev, PCI_D0); - i915_resume(dev); -- drm_kms_helper_poll_enable(dev); - } else { - printk(KERN_ERR "i915: switched off\n"); -- drm_kms_helper_poll_disable(dev); - i915_suspend(dev, pmm); - } - } -diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c -index b02a231..0c28266 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_state.c -+++ b/drivers/gpu/drm/nouveau/nouveau_state.c -@@ -376,15 +376,12 @@ out_err: - static void nouveau_switcheroo_set_state(struct pci_dev *pdev, - enum vga_switcheroo_state state) - { -- struct drm_device *dev = pci_get_drvdata(pdev); - pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; - if (state == VGA_SWITCHEROO_ON) { - printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); - nouveau_pci_resume(pdev); -- drm_kms_helper_poll_enable(dev); - } else { - printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); -- drm_kms_helper_poll_disable(dev); - nouveau_pci_suspend(pdev, pmm); - } - } -diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c -index f10faed..225a9f2 100644 ---- a/drivers/gpu/drm/radeon/radeon_device.c -+++ b/drivers/gpu/drm/radeon/radeon_device.c -@@ -546,10 +546,8 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero - /* don't suspend or resume card normally */ - rdev->powered_down = false; - radeon_resume_kms(dev); -- drm_kms_helper_poll_enable(dev); - } else { - printk(KERN_INFO "radeon: switched off\n"); -- drm_kms_helper_poll_disable(dev); - radeon_suspend_kms(dev, pmm); - /* don't suspend or resume card normally */ - rdev->powered_down = true; diff --git a/rt2x00-rt2800-Make-rt30xx-and-rt35xx-chipsets-configurable.patch b/rt2x00-rt2800-Make-rt30xx-and-rt35xx-chipsets-configurable.patch new file mode 100644 index 0000000..39db169 --- /dev/null +++ b/rt2x00-rt2800-Make-rt30xx-and-rt35xx-chipsets-configurable.patch @@ -0,0 +1,559 @@ +commit de1ebdceb6a4fe1b7073b81d273285b7c8bed312 +Author: Gertjan van Wingerde +Date: Sun Feb 14 12:52:05 2010 +0100 + + rt2x00: rt2800 - Make rt30xx and rt35xx chipsets configurable. + + Support for rt30xx- and rt35xx-based devices is currently not functional + in rt2800pci and rt2800usb. + In order to not confuse users we shouldn't claim the PCI and USB device + ID's for these devices. However, to allow for testing it is good to still + have them available, although disabled by default. + Make support for these device configuration options that default to off. + + For rt2800usb a 3rd class of devices is added, which are the unknown + devices. For these devices it is known that they are either based on + rt28xx, rt30xx or rt35xx, but it is not known on what chipset exactly. + These devices are disabled by default as well, until it can be established + on what chipset exactly they are based. + + Signed-off-by: Gertjan van Wingerde + Signed-off-by: John W. Linville + +diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig +index 3ca824a..5239e08 100644 +--- a/drivers/net/wireless/rt2x00/Kconfig ++++ b/drivers/net/wireless/rt2x00/Kconfig +@@ -64,7 +64,7 @@ config RT2800PCI_SOC + default y + + config RT2800PCI +- tristate "Ralink rt2800 (PCI/PCMCIA) support (VERY EXPERIMENTAL)" ++ tristate "Ralink rt28xx/rt30xx/rt35xx (PCI/PCIe/PCMCIA) support (EXPERIMENTAL)" + depends on (RT2800PCI_PCI || RT2800PCI_SOC) && EXPERIMENTAL + select RT2800_LIB + select RT2X00_LIB_PCI if RT2800PCI_PCI +@@ -75,7 +75,7 @@ config RT2800PCI + select CRC_CCITT + select EEPROM_93CX6 + ---help--- +- This adds support for rt2800 wireless chipset family. ++ This adds support for rt2800/rt3000/rt3500 wireless chipset family. + Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890 & RT3052 + + This driver is non-functional at the moment and is intended for +@@ -83,6 +83,32 @@ config RT2800PCI + + When compiled as a module, this driver will be called "rt2800pci.ko". + ++if RT2800PCI ++ ++config RT2800PCI_RT30XX ++ bool "rt2800pci - Include support for rt30xx (PCI/PCIe/PCMCIA) devices" ++ default n ++ ---help--- ++ This adds support for rt30xx wireless chipset family to the ++ rt2800pci driver. ++ Supported chips: RT3090, RT3091 & RT3092 ++ ++ Support for these devices is non-functional at the moment and is ++ intended for testers and developers. ++ ++config RT2800PCI_RT35XX ++ bool "rt2800pci - Include support for rt35xx (PCI/PCIe/PCMCIA) devices" ++ default n ++ ---help--- ++ This adds support for rt35xx wireless chipset family to the ++ rt2800pci driver. ++ Supported chips: RT3060, RT3062, RT3562, RT3592 ++ ++ Support for these devices is non-functional at the moment and is ++ intended for testers and developers. ++ ++endif ++ + config RT2500USB + tristate "Ralink rt2500 (USB) support" + depends on USB +@@ -126,6 +152,43 @@ config RT2800USB + + When compiled as a module, this driver will be called "rt2800usb.ko". + ++if RT2800USB ++ ++config RT2800USB_RT30XX ++ bool "rt2800usb - Include support for rt30xx (USB) devices" ++ default n ++ ---help--- ++ This adds support for rt30xx wireless chipset family to the ++ rt2800usb driver. ++ Supported chips: RT3070, RT3071 & RT3072 ++ ++ Support for these devices is non-functional at the moment and is ++ intended for testers and developers. ++ ++config RT2800USB_RT35XX ++ bool "rt2800usb - Include support for rt35xx (USB) devices" ++ default n ++ ---help--- ++ This adds support for rt35xx wireless chipset family to the ++ rt2800usb driver. ++ Supported chips: RT3572 ++ ++ Support for these devices is non-functional at the moment and is ++ intended for testers and developers. ++ ++config RT2800USB_UNKNOWN ++ bool "rt2800usb - Include support for unknown (USB) devices" ++ default n ++ ---help--- ++ This adds support for rt2800 family devices that are known to ++ have a rt2800 family chipset, but for which the exact chipset ++ is unknown. ++ ++ Support status for these devices is unknown, and enabling these ++ devices may or may not work. ++ ++endif ++ + config RT2800_LIB + tristate + +diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c +index fc35105..0e4c417 100644 +--- a/drivers/net/wireless/rt2x00/rt2800pci.c ++++ b/drivers/net/wireless/rt2x00/rt2800pci.c +@@ -1185,7 +1185,10 @@ static const struct rt2x00_ops rt2800pci_ops = { + * RT2800pci module information. + */ + static struct pci_device_id rt2800pci_device_table[] = { +- { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) }, ++ { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) }, ++ { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) }, ++ { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) }, ++ { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) }, +@@ -1193,18 +1196,19 @@ static struct pci_device_id rt2800pci_device_table[] = { + { PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) }, +- { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) }, +- { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) }, +- { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) }, +- { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) }, +- { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, +- { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, ++ { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) }, ++#ifdef CONFIG_RT2800PCI_RT30XX + { PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) }, ++ { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) }, ++#endif ++#ifdef CONFIG_RT2800PCI_RT35XX ++ { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, ++ { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) }, +- { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) }, ++#endif + { 0, } + }; + +diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c +index 79ea379..5e4ee20 100644 +--- a/drivers/net/wireless/rt2x00/rt2800usb.c ++++ b/drivers/net/wireless/rt2x00/rt2800usb.c +@@ -805,51 +805,27 @@ static struct usb_device_id rt2800usb_device_table[] = { + /* Abocom */ + { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, +- /* AirTies */ +- { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) }, +- /* Amigo */ +- { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Amit */ + { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Askey */ + { USB_DEVICE(0x1690, 0x0740), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* ASUS */ + { USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* AzureWave */ + { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Belkin */ + { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x050d, 0x815c), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Buffalo */ + { USB_DEVICE(0x0411, 0x00e8), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) }, +- /* Cisco */ +- { USB_DEVICE(0x167b, 0x4001), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Conceptronic */ + { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x14b2, 0x3c08), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x14b2, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x14b2, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x14b2, 0x3c23), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x14b2, 0x3c25), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x14b2, 0x3c27), USB_DEVICE_DATA(&rt2800usb_ops) }, +@@ -858,157 +834,257 @@ static struct usb_device_id rt2800usb_device_table[] = { + { USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x07aa, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x07aa, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* D-Link */ + { USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Edimax */ ++ { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* EnGenius */ ++ { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Gigabyte */ ++ { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Hawking */ ++ { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Linksys */ ++ { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Logitec */ ++ { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Motorola */ ++ { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* MSI */ ++ { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Philips */ ++ { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Planex */ ++ { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Ralink */ ++ { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Samsung */ ++ { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Siemens */ ++ { USB_DEVICE(0x129b, 0x1828), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Sitecom */ ++ { USB_DEVICE(0x0df6, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0df6, 0x002b), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* SMC */ ++ { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Sparklan */ ++ { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Sweex */ ++ { USB_DEVICE(0x177f, 0x0302), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* U-Media*/ ++ { USB_DEVICE(0x157e, 0x300e), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* ZCOM */ ++ { USB_DEVICE(0x0cde, 0x0022), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0cde, 0x0025), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Zinwell */ ++ { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Zyxel */ ++ { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) }, ++#ifdef CONFIG_RT2800USB_RT30XX ++ /* Abocom */ ++ { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* AirTies */ ++ { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* AzureWave */ ++ { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Conceptronic */ ++ { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Corega */ ++ { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* D-Link */ + { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Edimax */ + { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Encore */ + { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* EnGenius */ +- { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Gigabyte */ ++ { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* I-O DATA */ ++ { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* MSI */ ++ { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Pegatron */ ++ { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Planex */ ++ { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Quanta */ ++ { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Ralink */ ++ { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Sitecom */ ++ { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* SMC */ ++ { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Zinwell */ ++ { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) }, ++#endif ++#ifdef CONFIG_RT2800USB_RT35XX ++ /* Askey */ ++ { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Cisco */ ++ { USB_DEVICE(0x167b, 0x4001), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* EnGenius */ ++ { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* I-O DATA */ ++ { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Ralink */ ++ { USB_DEVICE(0x148f, 0x3370), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Sitecom */ ++ { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Zinwell */ ++ { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) }, ++#endif ++#ifdef CONFIG_RT2800USB_UNKNOWN ++ /* ++ * Unclear what kind of devices these are (they aren't supported by the ++ * vendor driver). ++ */ ++ /* Allwin */ ++ { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Amigo */ ++ { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Askey */ ++ { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* ASUS */ ++ { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* AzureWave */ ++ { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Belkin */ ++ { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Buffalo */ ++ { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0411, 0x0148), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0411, 0x0150), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0411, 0x015d), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Conceptronic */ ++ { USB_DEVICE(0x14b2, 0x3c08), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x14b2, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Corega */ ++ { USB_DEVICE(0x07aa, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x07aa, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* D-Link */ ++ { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* Encore */ ++ { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ /* EnGenius */ + { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Gemtek */ + { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Gigabyte */ +- { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Hawking */ +- { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* I-O DATA */ +- { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* LevelOne */ + { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Linksys */ +- { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x1737, 0x0078), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) }, +- /* Logitec */ +- { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Motorola */ +- { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* MSI */ +- { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Ovislink */ + { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Para */ + { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Pegatron */ ++ { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) }, +- /* Philips */ +- { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Planex */ +- { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Qcom */ + { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) }, +- /* Quanta */ +- { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) }, +- /* Ralink */ +- { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) }, +- /* Samsung */ +- { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) }, +- /* Siemens */ +- { USB_DEVICE(0x129b, 0x1828), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Sitecom */ +- { USB_DEVICE(0x0df6, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0df6, 0x002b), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x004a), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x004d), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* SMC */ +- { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) }, +- /* Sparklan */ +- { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) }, ++ { USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Sweex */ + { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x177f, 0x0302), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) }, +- /* U-Media*/ +- { USB_DEVICE(0x157e, 0x300e), USB_DEVICE_DATA(&rt2800usb_ops) }, +- /* ZCOM */ +- { USB_DEVICE(0x0cde, 0x0022), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x0cde, 0x0025), USB_DEVICE_DATA(&rt2800usb_ops) }, +- /* Zinwell */ +- { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) }, +- { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Zyxel */ +- { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) }, ++#endif + { 0, } + }; + diff --git a/sched-fix-over-scheduling-bug.patch b/sched-fix-over-scheduling-bug.patch new file mode 100644 index 0000000..b09c101 --- /dev/null +++ b/sched-fix-over-scheduling-bug.patch @@ -0,0 +1,60 @@ +From: Alex,Shi +Date: Thu, 17 Jun 2010 06:08:13 +0000 (+0800) +Subject: sched: Fix over-scheduling bug +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=3c93717cfa51316e4dbb471e7c0f9d243359d5f8 + +sched: Fix over-scheduling bug + +Commit e70971591 ("sched: Optimize unused cgroup configuration") introduced +an imbalanced scheduling bug. [[ in 2.6.32-rc1 ]] + +If we do not use CGROUP, function update_h_load won't update h_load. When the +system has a large number of tasks far more than logical CPU number, the +incorrect cfs_rq[cpu]->h_load value will cause load_balance() to pull too +many tasks to the local CPU from the busiest CPU. So the busiest CPU keeps +going in a round robin. That will hurt performance. + +The issue was found originally by a scientific calculation workload that +developed by Yanmin. With that commit, the workload performance drops +about 40%. + + CPU before after + + 00 : 2 : 7 + 01 : 1 : 7 + 02 : 11 : 6 + 03 : 12 : 7 + 04 : 6 : 6 + 05 : 11 : 7 + 06 : 10 : 6 + 07 : 12 : 7 + 08 : 11 : 6 + 09 : 12 : 6 + 10 : 1 : 6 + 11 : 1 : 6 + 12 : 6 : 6 + 13 : 2 : 6 + 14 : 2 : 6 + 15 : 1 : 6 + +Reviewed-by: Yanmin zhang +Signed-off-by: Alex Shi +Signed-off-by: Peter Zijlstra +LKML-Reference: <1276754893.9452.5442.camel@debian> +Signed-off-by: Ingo Molnar +--- + +diff --git a/kernel/sched.c b/kernel/sched.c +index 2aaceeb..6c9e7c8 100644 +--- a/kernel/sched.c ++++ b/kernel/sched.c +@@ -1657,9 +1657,6 @@ static void update_shares(struct sched_domain *sd) + + static void update_h_load(long cpu) + { +- if (root_task_group_empty()) +- return; +- + walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); + } + diff --git a/sources b/sources index 4e67ad1..cd05c3b 100644 --- a/sources +++ b/sources @@ -1,3 +1,2 @@ -10eebcb0178fb4540e2165bfd7efc7ad linux-2.6.34.tar.bz2 -53c4176b48a615b2837ed1a2db2f3c1f patch-2.6.34.3.bz2 -299d75a98315227989e8ce4e26661b08 patch-2.6.34.4-rc1.bz2 +c3883760b18d50e8d78819c54d579b00 linux-2.6.33.tar.bz2 +88390e48c301f9eaeb455d8c00cfda57 patch-2.6.33.6.bz2 diff --git a/ssb_check_for_sprom.patch b/ssb_check_for_sprom.patch index 9415e13..7df784f 100644 --- a/ssb_check_for_sprom.patch +++ b/ssb_check_for_sprom.patch @@ -1,4 +1,4 @@ -From 4d9d1ff88f920e9fcdde155c0a1366b7e0462d14 Mon Sep 17 00:00:00 2001 +From 380bed7aa858cbe2d4eeb783e2bed7d01828518d Mon Sep 17 00:00:00 2001 From: John W. Linville Date: Fri, 19 Mar 2010 14:58:01 -0400 Subject: [PATCH v4] ssb: do not read SPROM if it does not exist @@ -20,12 +20,16 @@ Cc: Larry Finger Cc: Michael Buesch Cc: stable@kernel.org --- +Version 4, move read of ChipCommon status register to ssb_chipcommon_init +Version 3, add missing semi-colon... :-( +Version 2, check the correct place for ChipCommon core revision... :-) + drivers/ssb/driver_chipcommon.c | 3 +++ drivers/ssb/pci.c | 3 +++ - drivers/ssb/sprom.c | 26 ++++++++++++++++++++++++++ + drivers/ssb/sprom.c | 22 ++++++++++++++++++++++ include/linux/ssb/ssb.h | 3 +++ include/linux/ssb/ssb_driver_chipcommon.h | 15 +++++++++++++++ - 5 files changed, 50 insertions(+), 0 deletions(-) + 5 files changed, 46 insertions(+), 0 deletions(-) diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c index 9681536..6cf288d 100644 @@ -42,10 +46,10 @@ index 9681536..6cf288d 100644 chipco_powercontrol_init(cc); ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST); diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c -index a8dbb06..89d7ab1 100644 +index 9e50896..2f7b16d 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c -@@ -621,6 +621,9 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, +@@ -620,6 +620,9 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, int err = -ENOMEM; u16 *buf; @@ -56,20 +60,16 @@ index a8dbb06..89d7ab1 100644 if (!buf) goto out; diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c -index f2f920f..c690f58 100644 +index d0e6762..55eb9b0 100644 --- a/drivers/ssb/sprom.c +++ b/drivers/ssb/sprom.c -@@ -176,3 +176,29 @@ const struct ssb_sprom *ssb_get_fallback_sprom(void) +@@ -175,3 +175,25 @@ const struct ssb_sprom *ssb_get_fallback_sprom(void) { return fallback_sprom; } + +bool ssb_is_sprom_available(struct ssb_bus *bus) +{ -+ /* some older devices don't have chipcommon, but they have sprom */ -+ if (!bus->chipco.dev) -+ return true; -+ + /* status register only exists on chipcomon rev >= 11 */ + if (bus->chipco.dev->id.revision < 11) + return true; @@ -151,5 +151,35 @@ index 4e27acf..2cdf249 100644 u16 fast_pwrup_delay; struct ssb_chipcommon_pmu pmu; -- -1.7.0.1 +1.6.2.5 + +From ec032742062ad1b01dfe75cfccdbc5b850837c23 Mon Sep 17 00:00:00 2001 +From: John W. Linville +Date: Tue, 30 Mar 2010 13:47:39 -0400 +Subject: [PATCH] ssb: avoid null ptr deref in ssb_is_sprom_available + +Some older devices don't have chipcommon, but they do have SPROM. + +Signed-off-by: John W. Linville +--- + drivers/ssb/sprom.c | 4 ++++ + 1 files changed, 4 insertions(+), 0 deletions(-) + +diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c +index 55eb9b0..874d8f1 100644 +--- a/drivers/ssb/sprom.c ++++ b/drivers/ssb/sprom.c +@@ -178,6 +178,10 @@ const struct ssb_sprom *ssb_get_fallback_sprom(void) + + bool ssb_is_sprom_available(struct ssb_bus *bus) + { ++ /* some older devices don't have chipcommon, but they have sprom */ ++ if (!bus->chipco.dev) ++ return true; ++ + /* status register only exists on chipcomon rev >= 11 */ + if (bus->chipco.dev->id.revision < 11) + return true; +-- +1.6.2.5 diff --git a/thinkpad-acpi-add-x100e.patch b/thinkpad-acpi-add-x100e.patch index 216fb89..9609ec4 100644 --- a/thinkpad-acpi-add-x100e.patch +++ b/thinkpad-acpi-add-x100e.patch @@ -1,11 +1,10 @@ diff -up linux-2.6.33.noarch/drivers/platform/x86/thinkpad_acpi.c.mjg linux-2.6.33.noarch/drivers/platform/x86/thinkpad_acpi.c ---- linux-2.6.33.noarch/drivers/platform/x86/thinkpad_acpi.c.mjg 2010-04-21 10:07:07.690036629 -0400 -+++ linux-2.6.33.noarch/drivers/platform/x86/thinkpad_acpi.c 2010-04-21 10:07:24.227030266 -0400 -@@ -507,6 +507,7 @@ TPACPI_HANDLE(ec, root, "\\_SB.PCI0.ISA. +--- linux-2.6.33.noarch/drivers/platform/x86/thinkpad_acpi.c.mjg 2010-04-21 10:05:14.125030722 -0400 ++++ linux-2.6.33.noarch/drivers/platform/x86/thinkpad_acpi.c 2010-04-21 10:05:17.732030163 -0400 +@@ -506,6 +506,7 @@ TPACPI_HANDLE(ec, root, "\\_SB.PCI0.ISA. "\\_SB.PCI0.ISA.EC", /* A21e, A2xm/p, T20-22, X20-21 */ "\\_SB.PCI0.AD4S.EC0", /* i1400, R30 */ "\\_SB.PCI0.ICH3.EC0", /* R31 */ + "\\_SB.PCI0.LPC0.EC", /* X100e */ "\\_SB.PCI0.LPC.EC", /* all others */ ); - diff --git a/thinkpad-acpi-fix-backlight.patch b/thinkpad-acpi-fix-backlight.patch index 5ed2544..3b38dc5 100644 --- a/thinkpad-acpi-fix-backlight.patch +++ b/thinkpad-acpi-fix-backlight.patch @@ -1,7 +1,7 @@ -diff -up linux-2.6.34.noarch/drivers/platform/x86/thinkpad_acpi.c.orig linux-2.6.34.noarch/drivers/platform/x86/thinkpad_acpi.c ---- linux-2.6.34.noarch/drivers/platform/x86/thinkpad_acpi.c.orig 2010-05-17 16:28:13.254200070 -0400 -+++ linux-2.6.34.noarch/drivers/platform/x86/thinkpad_acpi.c 2010-05-17 16:29:56.471200083 -0400 -@@ -3397,7 +3397,7 @@ static int __init hotkey_init(struct ibm +diff -ur linux-2.6.33.noarch.bak/drivers/platform/x86/thinkpad_acpi.c linux-2.6.33.noarch/drivers/platform/x86/thinkpad_acpi.c +--- linux-2.6.33.noarch.bak/drivers/platform/x86/thinkpad_acpi.c 2010-05-17 15:56:10.864200185 -0400 ++++ linux-2.6.33.noarch/drivers/platform/x86/thinkpad_acpi.c 2010-05-17 16:12:20.232074844 -0400 +@@ -3396,7 +3396,7 @@ /* update bright_acpimode... */ tpacpi_check_std_acpi_brightness_support(); @@ -10,7 +10,7 @@ diff -up linux-2.6.34.noarch/drivers/platform/x86/thinkpad_acpi.c.orig linux-2.6 printk(TPACPI_INFO "This ThinkPad has standard ACPI backlight " "brightness control, supported by the ACPI " -@@ -6189,26 +6189,24 @@ static int __init brightness_init(struct +@@ -6187,26 +6187,24 @@ * going to publish a backlight interface */ b = tpacpi_check_std_acpi_brightness_support(); diff --git a/vhost_net-rollup.patch b/vhost_net-rollup.patch new file mode 100644 index 0000000..816d6d6 --- /dev/null +++ b/vhost_net-rollup.patch @@ -0,0 +1,2542 @@ +commit fe512819010f71694384621b34ec32f8be678bbb +Author: Michael S. Tsirkin +Date: Thu Jan 14 06:17:27 2010 +0000 + + vhost_net: a kernel-level virtio server + + What it is: vhost net is a character device that can be used to reduce + the number of system calls involved in virtio networking. + Existing virtio net code is used in the guest without modification. + + There's similarity with vringfd, with some differences and reduced scope + - uses eventfd for signalling + - structures can be moved around in memory at any time (good for + migration, bug work-arounds in userspace) + - write logging is supported (good for migration) + - support memory table and not just an offset (needed for kvm) + + common virtio related code has been put in a separate file vhost.c and + can be made into a separate module if/when more backends appear. I used + Rusty's lguest.c as the source for developing this part : this supplied + me with witty comments I wouldn't be able to write myself. + + What it is not: vhost net is not a bus, and not a generic new system + call. No assumptions are made on how guest performs hypercalls. + Userspace hypervisors are supported as well as kvm. + + How it works: Basically, we connect virtio frontend (configured by + userspace) to a backend. The backend could be a network device, or a tap + device. Backend is also configured by userspace, including vlan/mac + etc. + + Status: This works for me, and I haven't see any crashes. + Compared to userspace, people reported improved latency (as I save up to + 4 system calls per packet), as well as better bandwidth and CPU + utilization. + + Features that I plan to look at in the future: + - mergeable buffers + - zero copy + - scalability tuning: figure out the best threading model to use + + Note on RCU usage (this is also documented in vhost.h, near + private_pointer which is the value protected by this variant of RCU): + what is happening is that the rcu_dereference() is being used in a + workqueue item. The role of rcu_read_lock() is taken on by the start of + execution of the workqueue item, of rcu_read_unlock() by the end of + execution of the workqueue item, and of synchronize_rcu() by + flush_workqueue()/flush_work(). In the future we might need to apply + some gcc attribute or sparse annotation to the function passed to + INIT_WORK(). Paul's ack below is for this RCU usage. + + (Includes fixes by Alan Cox , + David L Stevens , + Chris Wright ) + + Acked-by: Rusty Russell + Acked-by: Arnd Bergmann + Acked-by: "Paul E. McKenney" + Signed-off-by: Michael S. Tsirkin + Signed-off-by: David S. Miller + (cherry picked from commit 3a4d5c94e959359ece6d6b55045c3f046677f55c) + +commit 7b6a72cf7f96d5c75ce91e826372c6d488ac1e18 +Author: Michael S. Tsirkin +Date: Thu Jan 14 06:17:18 2010 +0000 + + mm: export use_mm/unuse_mm to modules + + vhost net module wants to do copy to/from user from a kernel thread, + which needs use_mm. Export it to modules. + + Acked-by: Andrea Arcangeli + Acked-by: Andrew Morton + Signed-off-by: Michael S. Tsirkin + Signed-off-by: David S. Miller + (cherry picked from commit 5da779c34ccff5e1e617892b6c8bd8260fb1f04c) + +commit 24f4237bf319b27e7c28544425e2cd52abe9332c +Author: Michael S. Tsirkin +Date: Thu Jan 14 06:17:09 2010 +0000 + + tun: export underlying socket + + Tun device looks similar to a packet socket + in that both pass complete frames from/to userspace. + + This patch fills in enough fields in the socket underlying tun driver + to support sendmsg/recvmsg operations, and message flags + MSG_TRUNC and MSG_DONTWAIT, and exports access to this socket + to modules. Regular read/write behaviour is unchanged. + + This way, code using raw sockets to inject packets + into a physical device, can support injecting + packets into host network stack almost without modification. + + First user of this interface will be vhost virtualization + accelerator. + + Signed-off-by: Michael S. Tsirkin + Acked-by: Herbert Xu + Acked-by: David S. Miller + Signed-off-by: David S. Miller + (cherry picked from commit 05c2828c72c4eabf62376adfe27bd24797621f62) + +diff --git a/MAINTAINERS b/MAINTAINERS +index 3f59162..0b4c8be 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -5795,6 +5795,15 @@ S: Maintained + F: Documentation/filesystems/vfat.txt + F: fs/fat/ + ++VIRTIO HOST (VHOST) ++M: "Michael S. Tsirkin" ++L: kvm@vger.kernel.org ++L: virtualization@lists.osdl.org ++L: netdev@vger.kernel.org ++S: Maintained ++F: drivers/vhost/ ++F: include/linux/vhost.h ++ + VIA RHINE NETWORK DRIVER + M: Roger Luethi + S: Maintained +diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig +index ef3e7be..01c7579 100644 +--- a/arch/ia64/kvm/Kconfig ++++ b/arch/ia64/kvm/Kconfig +@@ -47,6 +47,7 @@ config KVM_INTEL + Provides support for KVM on Itanium 2 processors equipped with the VT + extensions. + ++source drivers/vhost/Kconfig + source drivers/virtio/Kconfig + + endif # VIRTUALIZATION +diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig +index 07703f7..e28841f 100644 +--- a/arch/powerpc/kvm/Kconfig ++++ b/arch/powerpc/kvm/Kconfig +@@ -75,6 +75,7 @@ config KVM_E500 + + If unsure, say N. + ++source drivers/vhost/Kconfig + source drivers/virtio/Kconfig + + endif # VIRTUALIZATION +diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig +index 6ee55ae..a725158 100644 +--- a/arch/s390/kvm/Kconfig ++++ b/arch/s390/kvm/Kconfig +@@ -35,6 +35,7 @@ config KVM + + # OK, it's a little counter-intuitive to do this, but it puts it neatly under + # the virtualization menu. ++source drivers/vhost/Kconfig + source drivers/virtio/Kconfig + + endif # VIRTUALIZATION +diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig +index 4cd4983..3c4d010 100644 +--- a/arch/x86/kvm/Kconfig ++++ b/arch/x86/kvm/Kconfig +@@ -65,6 +65,7 @@ config KVM_AMD + + # OK, it's a little counter-intuitive to do this, but it puts it neatly under + # the virtualization menu. ++source drivers/vhost/Kconfig + source drivers/lguest/Kconfig + source drivers/virtio/Kconfig + +diff --git a/drivers/Makefile b/drivers/Makefile +index 6ee53c7..81e3659 100644 +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -106,6 +106,7 @@ obj-$(CONFIG_HID) += hid/ + obj-$(CONFIG_PPC_PS3) += ps3/ + obj-$(CONFIG_OF) += of/ + obj-$(CONFIG_SSB) += ssb/ ++obj-$(CONFIG_VHOST_NET) += vhost/ + obj-$(CONFIG_VLYNQ) += vlynq/ + obj-$(CONFIG_STAGING) += staging/ + obj-y += platform/ +diff --git a/drivers/net/tun.c b/drivers/net/tun.c +index 2834a01..5adb3d1 100644 +--- a/drivers/net/tun.c ++++ b/drivers/net/tun.c +@@ -144,6 +144,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file) + err = 0; + tfile->tun = tun; + tun->tfile = tfile; ++ tun->socket.file = file; + dev_hold(tun->dev); + sock_hold(tun->socket.sk); + atomic_inc(&tfile->count); +@@ -158,6 +159,7 @@ static void __tun_detach(struct tun_struct *tun) + /* Detach from net device */ + netif_tx_lock_bh(tun->dev); + tun->tfile = NULL; ++ tun->socket.file = NULL; + netif_tx_unlock_bh(tun->dev); + + /* Drop read queue */ +@@ -387,7 +389,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) + /* Notify and wake up reader process */ + if (tun->flags & TUN_FASYNC) + kill_fasync(&tun->fasync, SIGIO, POLL_IN); +- wake_up_interruptible(&tun->socket.wait); ++ wake_up_interruptible_poll(&tun->socket.wait, POLLIN | ++ POLLRDNORM | POLLRDBAND); + return NETDEV_TX_OK; + + drop: +@@ -743,7 +746,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun, + len = min_t(int, skb->len, len); + + skb_copy_datagram_const_iovec(skb, 0, iv, total, len); +- total += len; ++ total += skb->len; + + tun->dev->stats.tx_packets++; + tun->dev->stats.tx_bytes += len; +@@ -751,34 +754,23 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun, + return total; + } + +-static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, +- unsigned long count, loff_t pos) ++static ssize_t tun_do_read(struct tun_struct *tun, ++ struct kiocb *iocb, const struct iovec *iv, ++ ssize_t len, int noblock) + { +- struct file *file = iocb->ki_filp; +- struct tun_file *tfile = file->private_data; +- struct tun_struct *tun = __tun_get(tfile); + DECLARE_WAITQUEUE(wait, current); + struct sk_buff *skb; +- ssize_t len, ret = 0; +- +- if (!tun) +- return -EBADFD; ++ ssize_t ret = 0; + + DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); + +- len = iov_length(iv, count); +- if (len < 0) { +- ret = -EINVAL; +- goto out; +- } +- + add_wait_queue(&tun->socket.wait, &wait); + while (len) { + current->state = TASK_INTERRUPTIBLE; + + /* Read frames from the queue */ + if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) { +- if (file->f_flags & O_NONBLOCK) { ++ if (noblock) { + ret = -EAGAIN; + break; + } +@@ -805,6 +797,27 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, + current->state = TASK_RUNNING; + remove_wait_queue(&tun->socket.wait, &wait); + ++ return ret; ++} ++ ++static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, ++ unsigned long count, loff_t pos) ++{ ++ struct file *file = iocb->ki_filp; ++ struct tun_file *tfile = file->private_data; ++ struct tun_struct *tun = __tun_get(tfile); ++ ssize_t len, ret; ++ ++ if (!tun) ++ return -EBADFD; ++ len = iov_length(iv, count); ++ if (len < 0) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK); ++ ret = min_t(ssize_t, ret, len); + out: + tun_put(tun); + return ret; +@@ -847,7 +860,8 @@ static void tun_sock_write_space(struct sock *sk) + return; + + if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) +- wake_up_interruptible_sync(sk->sk_sleep); ++ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | ++ POLLWRNORM | POLLWRBAND); + + tun = tun_sk(sk)->tun; + kill_fasync(&tun->fasync, SIGIO, POLL_OUT); +@@ -858,6 +872,37 @@ static void tun_sock_destruct(struct sock *sk) + free_netdev(tun_sk(sk)->tun->dev); + } + ++static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, ++ struct msghdr *m, size_t total_len) ++{ ++ struct tun_struct *tun = container_of(sock, struct tun_struct, socket); ++ return tun_get_user(tun, m->msg_iov, total_len, ++ m->msg_flags & MSG_DONTWAIT); ++} ++ ++static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, ++ struct msghdr *m, size_t total_len, ++ int flags) ++{ ++ struct tun_struct *tun = container_of(sock, struct tun_struct, socket); ++ int ret; ++ if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) ++ return -EINVAL; ++ ret = tun_do_read(tun, iocb, m->msg_iov, total_len, ++ flags & MSG_DONTWAIT); ++ if (ret > total_len) { ++ m->msg_flags |= MSG_TRUNC; ++ ret = flags & MSG_TRUNC ? ret : total_len; ++ } ++ return ret; ++} ++ ++/* Ops structure to mimic raw sockets with tun */ ++static const struct proto_ops tun_socket_ops = { ++ .sendmsg = tun_sendmsg, ++ .recvmsg = tun_recvmsg, ++}; ++ + static struct proto tun_proto = { + .name = "tun", + .owner = THIS_MODULE, +@@ -986,6 +1031,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) + goto err_free_dev; + + init_waitqueue_head(&tun->socket.wait); ++ tun->socket.ops = &tun_socket_ops; + sock_init_data(&tun->socket, sk); + sk->sk_write_space = tun_sock_write_space; + sk->sk_sndbuf = INT_MAX; +@@ -1525,6 +1571,23 @@ static void tun_cleanup(void) + rtnl_link_unregister(&tun_link_ops); + } + ++/* Get an underlying socket object from tun file. Returns error unless file is ++ * attached to a device. The returned object works like a packet socket, it ++ * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for ++ * holding a reference to the file for as long as the socket is in use. */ ++struct socket *tun_get_socket(struct file *file) ++{ ++ struct tun_struct *tun; ++ if (file->f_op != &tun_fops) ++ return ERR_PTR(-EINVAL); ++ tun = tun_get(file); ++ if (!tun) ++ return ERR_PTR(-EBADFD); ++ tun_put(tun); ++ return &tun->socket; ++} ++EXPORT_SYMBOL_GPL(tun_get_socket); ++ + module_init(tun_init); + module_exit(tun_cleanup); + MODULE_DESCRIPTION(DRV_DESCRIPTION); +diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig +new file mode 100644 +index 0000000..9f409f4 +--- /dev/null ++++ b/drivers/vhost/Kconfig +@@ -0,0 +1,11 @@ ++config VHOST_NET ++ tristate "Host kernel accelerator for virtio net (EXPERIMENTAL)" ++ depends on NET && EVENTFD && EXPERIMENTAL ++ ---help--- ++ This kernel module can be loaded in host kernel to accelerate ++ guest networking with virtio_net. Not to be confused with virtio_net ++ module itself which needs to be loaded in guest kernel. ++ ++ To compile this driver as a module, choose M here: the module will ++ be called vhost_net. ++ +diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile +new file mode 100644 +index 0000000..72dd020 +--- /dev/null ++++ b/drivers/vhost/Makefile +@@ -0,0 +1,2 @@ ++obj-$(CONFIG_VHOST_NET) += vhost_net.o ++vhost_net-y := vhost.o net.o +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c +new file mode 100644 +index 0000000..4c89283 +--- /dev/null ++++ b/drivers/vhost/net.c +@@ -0,0 +1,661 @@ ++/* Copyright (C) 2009 Red Hat, Inc. ++ * Author: Michael S. Tsirkin ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2. ++ * ++ * virtio-net server in host kernel. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "vhost.h" ++ ++/* Max number of bytes transferred before requeueing the job. ++ * Using this limit prevents one virtqueue from starving others. */ ++#define VHOST_NET_WEIGHT 0x80000 ++ ++enum { ++ VHOST_NET_VQ_RX = 0, ++ VHOST_NET_VQ_TX = 1, ++ VHOST_NET_VQ_MAX = 2, ++}; ++ ++enum vhost_net_poll_state { ++ VHOST_NET_POLL_DISABLED = 0, ++ VHOST_NET_POLL_STARTED = 1, ++ VHOST_NET_POLL_STOPPED = 2, ++}; ++ ++struct vhost_net { ++ struct vhost_dev dev; ++ struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX]; ++ struct vhost_poll poll[VHOST_NET_VQ_MAX]; ++ /* Tells us whether we are polling a socket for TX. ++ * We only do this when socket buffer fills up. ++ * Protected by tx vq lock. */ ++ enum vhost_net_poll_state tx_poll_state; ++}; ++ ++/* Pop first len bytes from iovec. Return number of segments used. */ ++static int move_iovec_hdr(struct iovec *from, struct iovec *to, ++ size_t len, int iov_count) ++{ ++ int seg = 0; ++ size_t size; ++ while (len && seg < iov_count) { ++ size = min(from->iov_len, len); ++ to->iov_base = from->iov_base; ++ to->iov_len = size; ++ from->iov_len -= size; ++ from->iov_base += size; ++ len -= size; ++ ++from; ++ ++to; ++ ++seg; ++ } ++ return seg; ++} ++ ++/* Caller must have TX VQ lock */ ++static void tx_poll_stop(struct vhost_net *net) ++{ ++ if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED)) ++ return; ++ vhost_poll_stop(net->poll + VHOST_NET_VQ_TX); ++ net->tx_poll_state = VHOST_NET_POLL_STOPPED; ++} ++ ++/* Caller must have TX VQ lock */ ++static void tx_poll_start(struct vhost_net *net, struct socket *sock) ++{ ++ if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) ++ return; ++ vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); ++ net->tx_poll_state = VHOST_NET_POLL_STARTED; ++} ++ ++/* Expects to be always run from workqueue - which acts as ++ * read-size critical section for our kind of RCU. */ ++static void handle_tx(struct vhost_net *net) ++{ ++ struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX]; ++ unsigned head, out, in, s; ++ struct msghdr msg = { ++ .msg_name = NULL, ++ .msg_namelen = 0, ++ .msg_control = NULL, ++ .msg_controllen = 0, ++ .msg_iov = vq->iov, ++ .msg_flags = MSG_DONTWAIT, ++ }; ++ size_t len, total_len = 0; ++ int err, wmem; ++ size_t hdr_size; ++ struct socket *sock = rcu_dereference(vq->private_data); ++ if (!sock) ++ return; ++ ++ wmem = atomic_read(&sock->sk->sk_wmem_alloc); ++ if (wmem >= sock->sk->sk_sndbuf) ++ return; ++ ++ use_mm(net->dev.mm); ++ mutex_lock(&vq->mutex); ++ vhost_disable_notify(vq); ++ ++ if (wmem < sock->sk->sk_sndbuf * 2) ++ tx_poll_stop(net); ++ hdr_size = vq->hdr_size; ++ ++ for (;;) { ++ head = vhost_get_vq_desc(&net->dev, vq, vq->iov, ++ ARRAY_SIZE(vq->iov), ++ &out, &in, ++ NULL, NULL); ++ /* Nothing new? Wait for eventfd to tell us they refilled. */ ++ if (head == vq->num) { ++ wmem = atomic_read(&sock->sk->sk_wmem_alloc); ++ if (wmem >= sock->sk->sk_sndbuf * 3 / 4) { ++ tx_poll_start(net, sock); ++ set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); ++ break; ++ } ++ if (unlikely(vhost_enable_notify(vq))) { ++ vhost_disable_notify(vq); ++ continue; ++ } ++ break; ++ } ++ if (in) { ++ vq_err(vq, "Unexpected descriptor format for TX: " ++ "out %d, int %d\n", out, in); ++ break; ++ } ++ /* Skip header. TODO: support TSO. */ ++ s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out); ++ msg.msg_iovlen = out; ++ len = iov_length(vq->iov, out); ++ /* Sanity check */ ++ if (!len) { ++ vq_err(vq, "Unexpected header len for TX: " ++ "%zd expected %zd\n", ++ iov_length(vq->hdr, s), hdr_size); ++ break; ++ } ++ /* TODO: Check specific error and bomb out unless ENOBUFS? */ ++ err = sock->ops->sendmsg(NULL, sock, &msg, len); ++ if (unlikely(err < 0)) { ++ vhost_discard_vq_desc(vq); ++ tx_poll_start(net, sock); ++ break; ++ } ++ if (err != len) ++ pr_err("Truncated TX packet: " ++ " len %d != %zd\n", err, len); ++ vhost_add_used_and_signal(&net->dev, vq, head, 0); ++ total_len += len; ++ if (unlikely(total_len >= VHOST_NET_WEIGHT)) { ++ vhost_poll_queue(&vq->poll); ++ break; ++ } ++ } ++ ++ mutex_unlock(&vq->mutex); ++ unuse_mm(net->dev.mm); ++} ++ ++/* Expects to be always run from workqueue - which acts as ++ * read-size critical section for our kind of RCU. */ ++static void handle_rx(struct vhost_net *net) ++{ ++ struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; ++ unsigned head, out, in, log, s; ++ struct vhost_log *vq_log; ++ struct msghdr msg = { ++ .msg_name = NULL, ++ .msg_namelen = 0, ++ .msg_control = NULL, /* FIXME: get and handle RX aux data. */ ++ .msg_controllen = 0, ++ .msg_iov = vq->iov, ++ .msg_flags = MSG_DONTWAIT, ++ }; ++ ++ struct virtio_net_hdr hdr = { ++ .flags = 0, ++ .gso_type = VIRTIO_NET_HDR_GSO_NONE ++ }; ++ ++ size_t len, total_len = 0; ++ int err; ++ size_t hdr_size; ++ struct socket *sock = rcu_dereference(vq->private_data); ++ if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) ++ return; ++ ++ use_mm(net->dev.mm); ++ mutex_lock(&vq->mutex); ++ vhost_disable_notify(vq); ++ hdr_size = vq->hdr_size; ++ ++ vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? ++ vq->log : NULL; ++ ++ for (;;) { ++ head = vhost_get_vq_desc(&net->dev, vq, vq->iov, ++ ARRAY_SIZE(vq->iov), ++ &out, &in, ++ vq_log, &log); ++ /* OK, now we need to know about added descriptors. */ ++ if (head == vq->num) { ++ if (unlikely(vhost_enable_notify(vq))) { ++ /* They have slipped one in as we were ++ * doing that: check again. */ ++ vhost_disable_notify(vq); ++ continue; ++ } ++ /* Nothing new? Wait for eventfd to tell us ++ * they refilled. */ ++ break; ++ } ++ /* We don't need to be notified again. */ ++ if (out) { ++ vq_err(vq, "Unexpected descriptor format for RX: " ++ "out %d, int %d\n", ++ out, in); ++ break; ++ } ++ /* Skip header. TODO: support TSO/mergeable rx buffers. */ ++ s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, in); ++ msg.msg_iovlen = in; ++ len = iov_length(vq->iov, in); ++ /* Sanity check */ ++ if (!len) { ++ vq_err(vq, "Unexpected header len for RX: " ++ "%zd expected %zd\n", ++ iov_length(vq->hdr, s), hdr_size); ++ break; ++ } ++ err = sock->ops->recvmsg(NULL, sock, &msg, ++ len, MSG_DONTWAIT | MSG_TRUNC); ++ /* TODO: Check specific error and bomb out unless EAGAIN? */ ++ if (err < 0) { ++ vhost_discard_vq_desc(vq); ++ break; ++ } ++ /* TODO: Should check and handle checksum. */ ++ if (err > len) { ++ pr_err("Discarded truncated rx packet: " ++ " len %d > %zd\n", err, len); ++ vhost_discard_vq_desc(vq); ++ continue; ++ } ++ len = err; ++ err = memcpy_toiovec(vq->hdr, (unsigned char *)&hdr, hdr_size); ++ if (err) { ++ vq_err(vq, "Unable to write vnet_hdr at addr %p: %d\n", ++ vq->iov->iov_base, err); ++ break; ++ } ++ len += hdr_size; ++ vhost_add_used_and_signal(&net->dev, vq, head, len); ++ if (unlikely(vq_log)) ++ vhost_log_write(vq, vq_log, log, len); ++ total_len += len; ++ if (unlikely(total_len >= VHOST_NET_WEIGHT)) { ++ vhost_poll_queue(&vq->poll); ++ break; ++ } ++ } ++ ++ mutex_unlock(&vq->mutex); ++ unuse_mm(net->dev.mm); ++} ++ ++static void handle_tx_kick(struct work_struct *work) ++{ ++ struct vhost_virtqueue *vq; ++ struct vhost_net *net; ++ vq = container_of(work, struct vhost_virtqueue, poll.work); ++ net = container_of(vq->dev, struct vhost_net, dev); ++ handle_tx(net); ++} ++ ++static void handle_rx_kick(struct work_struct *work) ++{ ++ struct vhost_virtqueue *vq; ++ struct vhost_net *net; ++ vq = container_of(work, struct vhost_virtqueue, poll.work); ++ net = container_of(vq->dev, struct vhost_net, dev); ++ handle_rx(net); ++} ++ ++static void handle_tx_net(struct work_struct *work) ++{ ++ struct vhost_net *net; ++ net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_TX].work); ++ handle_tx(net); ++} ++ ++static void handle_rx_net(struct work_struct *work) ++{ ++ struct vhost_net *net; ++ net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_RX].work); ++ handle_rx(net); ++} ++ ++static int vhost_net_open(struct inode *inode, struct file *f) ++{ ++ struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); ++ int r; ++ if (!n) ++ return -ENOMEM; ++ n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick; ++ n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick; ++ r = vhost_dev_init(&n->dev, n->vqs, VHOST_NET_VQ_MAX); ++ if (r < 0) { ++ kfree(n); ++ return r; ++ } ++ ++ vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT); ++ vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN); ++ n->tx_poll_state = VHOST_NET_POLL_DISABLED; ++ ++ f->private_data = n; ++ ++ return 0; ++} ++ ++static void vhost_net_disable_vq(struct vhost_net *n, ++ struct vhost_virtqueue *vq) ++{ ++ if (!vq->private_data) ++ return; ++ if (vq == n->vqs + VHOST_NET_VQ_TX) { ++ tx_poll_stop(n); ++ n->tx_poll_state = VHOST_NET_POLL_DISABLED; ++ } else ++ vhost_poll_stop(n->poll + VHOST_NET_VQ_RX); ++} ++ ++static void vhost_net_enable_vq(struct vhost_net *n, ++ struct vhost_virtqueue *vq) ++{ ++ struct socket *sock = vq->private_data; ++ if (!sock) ++ return; ++ if (vq == n->vqs + VHOST_NET_VQ_TX) { ++ n->tx_poll_state = VHOST_NET_POLL_STOPPED; ++ tx_poll_start(n, sock); ++ } else ++ vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); ++} ++ ++static struct socket *vhost_net_stop_vq(struct vhost_net *n, ++ struct vhost_virtqueue *vq) ++{ ++ struct socket *sock; ++ ++ mutex_lock(&vq->mutex); ++ sock = vq->private_data; ++ vhost_net_disable_vq(n, vq); ++ rcu_assign_pointer(vq->private_data, NULL); ++ mutex_unlock(&vq->mutex); ++ return sock; ++} ++ ++static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, ++ struct socket **rx_sock) ++{ ++ *tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX); ++ *rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX); ++} ++ ++static void vhost_net_flush_vq(struct vhost_net *n, int index) ++{ ++ vhost_poll_flush(n->poll + index); ++ vhost_poll_flush(&n->dev.vqs[index].poll); ++} ++ ++static void vhost_net_flush(struct vhost_net *n) ++{ ++ vhost_net_flush_vq(n, VHOST_NET_VQ_TX); ++ vhost_net_flush_vq(n, VHOST_NET_VQ_RX); ++} ++ ++static int vhost_net_release(struct inode *inode, struct file *f) ++{ ++ struct vhost_net *n = f->private_data; ++ struct socket *tx_sock; ++ struct socket *rx_sock; ++ ++ vhost_net_stop(n, &tx_sock, &rx_sock); ++ vhost_net_flush(n); ++ vhost_dev_cleanup(&n->dev); ++ if (tx_sock) ++ fput(tx_sock->file); ++ if (rx_sock) ++ fput(rx_sock->file); ++ /* We do an extra flush before freeing memory, ++ * since jobs can re-queue themselves. */ ++ vhost_net_flush(n); ++ kfree(n); ++ return 0; ++} ++ ++static struct socket *get_raw_socket(int fd) ++{ ++ struct { ++ struct sockaddr_ll sa; ++ char buf[MAX_ADDR_LEN]; ++ } uaddr; ++ int uaddr_len = sizeof uaddr, r; ++ struct socket *sock = sockfd_lookup(fd, &r); ++ if (!sock) ++ return ERR_PTR(-ENOTSOCK); ++ ++ /* Parameter checking */ ++ if (sock->sk->sk_type != SOCK_RAW) { ++ r = -ESOCKTNOSUPPORT; ++ goto err; ++ } ++ ++ r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, ++ &uaddr_len, 0); ++ if (r) ++ goto err; ++ ++ if (uaddr.sa.sll_family != AF_PACKET) { ++ r = -EPFNOSUPPORT; ++ goto err; ++ } ++ return sock; ++err: ++ fput(sock->file); ++ return ERR_PTR(r); ++} ++ ++static struct socket *get_tun_socket(int fd) ++{ ++ struct file *file = fget(fd); ++ struct socket *sock; ++ if (!file) ++ return ERR_PTR(-EBADF); ++ sock = tun_get_socket(file); ++ if (IS_ERR(sock)) ++ fput(file); ++ return sock; ++} ++ ++static struct socket *get_socket(int fd) ++{ ++ struct socket *sock; ++ /* special case to disable backend */ ++ if (fd == -1) ++ return NULL; ++ sock = get_raw_socket(fd); ++ if (!IS_ERR(sock)) ++ return sock; ++ sock = get_tun_socket(fd); ++ if (!IS_ERR(sock)) ++ return sock; ++ return ERR_PTR(-ENOTSOCK); ++} ++ ++static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) ++{ ++ struct socket *sock, *oldsock; ++ struct vhost_virtqueue *vq; ++ int r; ++ ++ mutex_lock(&n->dev.mutex); ++ r = vhost_dev_check_owner(&n->dev); ++ if (r) ++ goto err; ++ ++ if (index >= VHOST_NET_VQ_MAX) { ++ r = -ENOBUFS; ++ goto err; ++ } ++ vq = n->vqs + index; ++ mutex_lock(&vq->mutex); ++ ++ /* Verify that ring has been setup correctly. */ ++ if (!vhost_vq_access_ok(vq)) { ++ r = -EFAULT; ++ goto err; ++ } ++ sock = get_socket(fd); ++ if (IS_ERR(sock)) { ++ r = PTR_ERR(sock); ++ goto err; ++ } ++ ++ /* start polling new socket */ ++ oldsock = vq->private_data; ++ if (sock == oldsock) ++ goto done; ++ ++ vhost_net_disable_vq(n, vq); ++ rcu_assign_pointer(vq->private_data, sock); ++ vhost_net_enable_vq(n, vq); ++ mutex_unlock(&vq->mutex); ++done: ++ if (oldsock) { ++ vhost_net_flush_vq(n, index); ++ fput(oldsock->file); ++ } ++err: ++ mutex_unlock(&n->dev.mutex); ++ return r; ++} ++ ++static long vhost_net_reset_owner(struct vhost_net *n) ++{ ++ struct socket *tx_sock = NULL; ++ struct socket *rx_sock = NULL; ++ long err; ++ mutex_lock(&n->dev.mutex); ++ err = vhost_dev_check_owner(&n->dev); ++ if (err) ++ goto done; ++ vhost_net_stop(n, &tx_sock, &rx_sock); ++ vhost_net_flush(n); ++ err = vhost_dev_reset_owner(&n->dev); ++done: ++ mutex_unlock(&n->dev.mutex); ++ if (tx_sock) ++ fput(tx_sock->file); ++ if (rx_sock) ++ fput(rx_sock->file); ++ return err; ++} ++ ++static int vhost_net_set_features(struct vhost_net *n, u64 features) ++{ ++ size_t hdr_size = features & (1 << VHOST_NET_F_VIRTIO_NET_HDR) ? ++ sizeof(struct virtio_net_hdr) : 0; ++ int i; ++ mutex_lock(&n->dev.mutex); ++ if ((features & (1 << VHOST_F_LOG_ALL)) && ++ !vhost_log_access_ok(&n->dev)) { ++ mutex_unlock(&n->dev.mutex); ++ return -EFAULT; ++ } ++ n->dev.acked_features = features; ++ smp_wmb(); ++ for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { ++ mutex_lock(&n->vqs[i].mutex); ++ n->vqs[i].hdr_size = hdr_size; ++ mutex_unlock(&n->vqs[i].mutex); ++ } ++ vhost_net_flush(n); ++ mutex_unlock(&n->dev.mutex); ++ return 0; ++} ++ ++static long vhost_net_ioctl(struct file *f, unsigned int ioctl, ++ unsigned long arg) ++{ ++ struct vhost_net *n = f->private_data; ++ void __user *argp = (void __user *)arg; ++ u64 __user *featurep = argp; ++ struct vhost_vring_file backend; ++ u64 features; ++ int r; ++ switch (ioctl) { ++ case VHOST_NET_SET_BACKEND: ++ r = copy_from_user(&backend, argp, sizeof backend); ++ if (r < 0) ++ return r; ++ return vhost_net_set_backend(n, backend.index, backend.fd); ++ case VHOST_GET_FEATURES: ++ features = VHOST_FEATURES; ++ return copy_to_user(featurep, &features, sizeof features); ++ case VHOST_SET_FEATURES: ++ r = copy_from_user(&features, featurep, sizeof features); ++ if (r < 0) ++ return r; ++ if (features & ~VHOST_FEATURES) ++ return -EOPNOTSUPP; ++ return vhost_net_set_features(n, features); ++ case VHOST_RESET_OWNER: ++ return vhost_net_reset_owner(n); ++ default: ++ mutex_lock(&n->dev.mutex); ++ r = vhost_dev_ioctl(&n->dev, ioctl, arg); ++ vhost_net_flush(n); ++ mutex_unlock(&n->dev.mutex); ++ return r; ++ } ++} ++ ++#ifdef CONFIG_COMPAT ++static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl, ++ unsigned long arg) ++{ ++ return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); ++} ++#endif ++ ++const static struct file_operations vhost_net_fops = { ++ .owner = THIS_MODULE, ++ .release = vhost_net_release, ++ .unlocked_ioctl = vhost_net_ioctl, ++#ifdef CONFIG_COMPAT ++ .compat_ioctl = vhost_net_compat_ioctl, ++#endif ++ .open = vhost_net_open, ++}; ++ ++static struct miscdevice vhost_net_misc = { ++ VHOST_NET_MINOR, ++ "vhost-net", ++ &vhost_net_fops, ++}; ++ ++int vhost_net_init(void) ++{ ++ int r = vhost_init(); ++ if (r) ++ goto err_init; ++ r = misc_register(&vhost_net_misc); ++ if (r) ++ goto err_reg; ++ return 0; ++err_reg: ++ vhost_cleanup(); ++err_init: ++ return r; ++ ++} ++module_init(vhost_net_init); ++ ++void vhost_net_exit(void) ++{ ++ misc_deregister(&vhost_net_misc); ++ vhost_cleanup(); ++} ++module_exit(vhost_net_exit); ++ ++MODULE_VERSION("0.0.1"); ++MODULE_LICENSE("GPL v2"); ++MODULE_AUTHOR("Michael S. Tsirkin"); ++MODULE_DESCRIPTION("Host kernel accelerator for virtio net"); +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c +new file mode 100644 +index 0000000..c8c25db +--- /dev/null ++++ b/drivers/vhost/vhost.c +@@ -0,0 +1,1098 @@ ++/* Copyright (C) 2009 Red Hat, Inc. ++ * Copyright (C) 2006 Rusty Russell IBM Corporation ++ * ++ * Author: Michael S. Tsirkin ++ * ++ * Inspiration, some code, and most witty comments come from ++ * Documentation/lguest/lguest.c, by Rusty Russell ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2. ++ * ++ * Generic code for virtio server in host kernel. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include ++ ++#include "vhost.h" ++ ++enum { ++ VHOST_MEMORY_MAX_NREGIONS = 64, ++ VHOST_MEMORY_F_LOG = 0x1, ++}; ++ ++static struct workqueue_struct *vhost_workqueue; ++ ++static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, ++ poll_table *pt) ++{ ++ struct vhost_poll *poll; ++ poll = container_of(pt, struct vhost_poll, table); ++ ++ poll->wqh = wqh; ++ add_wait_queue(wqh, &poll->wait); ++} ++ ++static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, ++ void *key) ++{ ++ struct vhost_poll *poll; ++ poll = container_of(wait, struct vhost_poll, wait); ++ if (!((unsigned long)key & poll->mask)) ++ return 0; ++ ++ queue_work(vhost_workqueue, &poll->work); ++ return 0; ++} ++ ++/* Init poll structure */ ++void vhost_poll_init(struct vhost_poll *poll, work_func_t func, ++ unsigned long mask) ++{ ++ INIT_WORK(&poll->work, func); ++ init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); ++ init_poll_funcptr(&poll->table, vhost_poll_func); ++ poll->mask = mask; ++} ++ ++/* Start polling a file. We add ourselves to file's wait queue. The caller must ++ * keep a reference to a file until after vhost_poll_stop is called. */ ++void vhost_poll_start(struct vhost_poll *poll, struct file *file) ++{ ++ unsigned long mask; ++ mask = file->f_op->poll(file, &poll->table); ++ if (mask) ++ vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); ++} ++ ++/* Stop polling a file. After this function returns, it becomes safe to drop the ++ * file reference. You must also flush afterwards. */ ++void vhost_poll_stop(struct vhost_poll *poll) ++{ ++ remove_wait_queue(poll->wqh, &poll->wait); ++} ++ ++/* Flush any work that has been scheduled. When calling this, don't hold any ++ * locks that are also used by the callback. */ ++void vhost_poll_flush(struct vhost_poll *poll) ++{ ++ flush_work(&poll->work); ++} ++ ++void vhost_poll_queue(struct vhost_poll *poll) ++{ ++ queue_work(vhost_workqueue, &poll->work); ++} ++ ++static void vhost_vq_reset(struct vhost_dev *dev, ++ struct vhost_virtqueue *vq) ++{ ++ vq->num = 1; ++ vq->desc = NULL; ++ vq->avail = NULL; ++ vq->used = NULL; ++ vq->last_avail_idx = 0; ++ vq->avail_idx = 0; ++ vq->last_used_idx = 0; ++ vq->used_flags = 0; ++ vq->used_flags = 0; ++ vq->log_used = false; ++ vq->log_addr = -1ull; ++ vq->hdr_size = 0; ++ vq->private_data = NULL; ++ vq->log_base = NULL; ++ vq->error_ctx = NULL; ++ vq->error = NULL; ++ vq->kick = NULL; ++ vq->call_ctx = NULL; ++ vq->call = NULL; ++} ++ ++long vhost_dev_init(struct vhost_dev *dev, ++ struct vhost_virtqueue *vqs, int nvqs) ++{ ++ int i; ++ dev->vqs = vqs; ++ dev->nvqs = nvqs; ++ mutex_init(&dev->mutex); ++ dev->log_ctx = NULL; ++ dev->log_file = NULL; ++ dev->memory = NULL; ++ dev->mm = NULL; ++ ++ for (i = 0; i < dev->nvqs; ++i) { ++ dev->vqs[i].dev = dev; ++ mutex_init(&dev->vqs[i].mutex); ++ vhost_vq_reset(dev, dev->vqs + i); ++ if (dev->vqs[i].handle_kick) ++ vhost_poll_init(&dev->vqs[i].poll, ++ dev->vqs[i].handle_kick, ++ POLLIN); ++ } ++ return 0; ++} ++ ++/* Caller should have device mutex */ ++long vhost_dev_check_owner(struct vhost_dev *dev) ++{ ++ /* Are you the owner? If not, I don't think you mean to do that */ ++ return dev->mm == current->mm ? 0 : -EPERM; ++} ++ ++/* Caller should have device mutex */ ++static long vhost_dev_set_owner(struct vhost_dev *dev) ++{ ++ /* Is there an owner already? */ ++ if (dev->mm) ++ return -EBUSY; ++ /* No owner, become one */ ++ dev->mm = get_task_mm(current); ++ return 0; ++} ++ ++/* Caller should have device mutex */ ++long vhost_dev_reset_owner(struct vhost_dev *dev) ++{ ++ struct vhost_memory *memory; ++ ++ /* Restore memory to default empty mapping. */ ++ memory = kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL); ++ if (!memory) ++ return -ENOMEM; ++ ++ vhost_dev_cleanup(dev); ++ ++ memory->nregions = 0; ++ dev->memory = memory; ++ return 0; ++} ++ ++/* Caller should have device mutex */ ++void vhost_dev_cleanup(struct vhost_dev *dev) ++{ ++ int i; ++ for (i = 0; i < dev->nvqs; ++i) { ++ if (dev->vqs[i].kick && dev->vqs[i].handle_kick) { ++ vhost_poll_stop(&dev->vqs[i].poll); ++ vhost_poll_flush(&dev->vqs[i].poll); ++ } ++ if (dev->vqs[i].error_ctx) ++ eventfd_ctx_put(dev->vqs[i].error_ctx); ++ if (dev->vqs[i].error) ++ fput(dev->vqs[i].error); ++ if (dev->vqs[i].kick) ++ fput(dev->vqs[i].kick); ++ if (dev->vqs[i].call_ctx) ++ eventfd_ctx_put(dev->vqs[i].call_ctx); ++ if (dev->vqs[i].call) ++ fput(dev->vqs[i].call); ++ vhost_vq_reset(dev, dev->vqs + i); ++ } ++ if (dev->log_ctx) ++ eventfd_ctx_put(dev->log_ctx); ++ dev->log_ctx = NULL; ++ if (dev->log_file) ++ fput(dev->log_file); ++ dev->log_file = NULL; ++ /* No one will access memory at this point */ ++ kfree(dev->memory); ++ dev->memory = NULL; ++ if (dev->mm) ++ mmput(dev->mm); ++ dev->mm = NULL; ++} ++ ++static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) ++{ ++ u64 a = addr / VHOST_PAGE_SIZE / 8; ++ /* Make sure 64 bit math will not overflow. */ ++ if (a > ULONG_MAX - (unsigned long)log_base || ++ a + (unsigned long)log_base > ULONG_MAX) ++ return -EFAULT; ++ ++ return access_ok(VERIFY_WRITE, log_base + a, ++ (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); ++} ++ ++/* Caller should have vq mutex and device mutex. */ ++static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem, ++ int log_all) ++{ ++ int i; ++ for (i = 0; i < mem->nregions; ++i) { ++ struct vhost_memory_region *m = mem->regions + i; ++ unsigned long a = m->userspace_addr; ++ if (m->memory_size > ULONG_MAX) ++ return 0; ++ else if (!access_ok(VERIFY_WRITE, (void __user *)a, ++ m->memory_size)) ++ return 0; ++ else if (log_all && !log_access_ok(log_base, ++ m->guest_phys_addr, ++ m->memory_size)) ++ return 0; ++ } ++ return 1; ++} ++ ++/* Can we switch to this memory table? */ ++/* Caller should have device mutex but not vq mutex */ ++static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem, ++ int log_all) ++{ ++ int i; ++ for (i = 0; i < d->nvqs; ++i) { ++ int ok; ++ mutex_lock(&d->vqs[i].mutex); ++ /* If ring is inactive, will check when it's enabled. */ ++ if (d->vqs[i].private_data) ++ ok = vq_memory_access_ok(d->vqs[i].log_base, mem, ++ log_all); ++ else ++ ok = 1; ++ mutex_unlock(&d->vqs[i].mutex); ++ if (!ok) ++ return 0; ++ } ++ return 1; ++} ++ ++static int vq_access_ok(unsigned int num, ++ struct vring_desc __user *desc, ++ struct vring_avail __user *avail, ++ struct vring_used __user *used) ++{ ++ return access_ok(VERIFY_READ, desc, num * sizeof *desc) && ++ access_ok(VERIFY_READ, avail, ++ sizeof *avail + num * sizeof *avail->ring) && ++ access_ok(VERIFY_WRITE, used, ++ sizeof *used + num * sizeof *used->ring); ++} ++ ++/* Can we log writes? */ ++/* Caller should have device mutex but not vq mutex */ ++int vhost_log_access_ok(struct vhost_dev *dev) ++{ ++ return memory_access_ok(dev, dev->memory, 1); ++} ++ ++/* Verify access for write logging. */ ++/* Caller should have vq mutex and device mutex */ ++static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base) ++{ ++ return vq_memory_access_ok(log_base, vq->dev->memory, ++ vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && ++ (!vq->log_used || log_access_ok(log_base, vq->log_addr, ++ sizeof *vq->used + ++ vq->num * sizeof *vq->used->ring)); ++} ++ ++/* Can we start vq? */ ++/* Caller should have vq mutex and device mutex */ ++int vhost_vq_access_ok(struct vhost_virtqueue *vq) ++{ ++ return vq_access_ok(vq->num, vq->desc, vq->avail, vq->used) && ++ vq_log_access_ok(vq, vq->log_base); ++} ++ ++static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) ++{ ++ struct vhost_memory mem, *newmem, *oldmem; ++ unsigned long size = offsetof(struct vhost_memory, regions); ++ long r; ++ r = copy_from_user(&mem, m, size); ++ if (r) ++ return r; ++ if (mem.padding) ++ return -EOPNOTSUPP; ++ if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) ++ return -E2BIG; ++ newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL); ++ if (!newmem) ++ return -ENOMEM; ++ ++ memcpy(newmem, &mem, size); ++ r = copy_from_user(newmem->regions, m->regions, ++ mem.nregions * sizeof *m->regions); ++ if (r) { ++ kfree(newmem); ++ return r; ++ } ++ ++ if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) ++ return -EFAULT; ++ oldmem = d->memory; ++ rcu_assign_pointer(d->memory, newmem); ++ synchronize_rcu(); ++ kfree(oldmem); ++ return 0; ++} ++ ++static int init_used(struct vhost_virtqueue *vq, ++ struct vring_used __user *used) ++{ ++ int r = put_user(vq->used_flags, &used->flags); ++ if (r) ++ return r; ++ return get_user(vq->last_used_idx, &used->idx); ++} ++ ++static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) ++{ ++ struct file *eventfp, *filep = NULL, ++ *pollstart = NULL, *pollstop = NULL; ++ struct eventfd_ctx *ctx = NULL; ++ u32 __user *idxp = argp; ++ struct vhost_virtqueue *vq; ++ struct vhost_vring_state s; ++ struct vhost_vring_file f; ++ struct vhost_vring_addr a; ++ u32 idx; ++ long r; ++ ++ r = get_user(idx, idxp); ++ if (r < 0) ++ return r; ++ if (idx > d->nvqs) ++ return -ENOBUFS; ++ ++ vq = d->vqs + idx; ++ ++ mutex_lock(&vq->mutex); ++ ++ switch (ioctl) { ++ case VHOST_SET_VRING_NUM: ++ /* Resizing ring with an active backend? ++ * You don't want to do that. */ ++ if (vq->private_data) { ++ r = -EBUSY; ++ break; ++ } ++ r = copy_from_user(&s, argp, sizeof s); ++ if (r < 0) ++ break; ++ if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) { ++ r = -EINVAL; ++ break; ++ } ++ vq->num = s.num; ++ break; ++ case VHOST_SET_VRING_BASE: ++ /* Moving base with an active backend? ++ * You don't want to do that. */ ++ if (vq->private_data) { ++ r = -EBUSY; ++ break; ++ } ++ r = copy_from_user(&s, argp, sizeof s); ++ if (r < 0) ++ break; ++ if (s.num > 0xffff) { ++ r = -EINVAL; ++ break; ++ } ++ vq->last_avail_idx = s.num; ++ /* Forget the cached index value. */ ++ vq->avail_idx = vq->last_avail_idx; ++ break; ++ case VHOST_GET_VRING_BASE: ++ s.index = idx; ++ s.num = vq->last_avail_idx; ++ r = copy_to_user(argp, &s, sizeof s); ++ break; ++ case VHOST_SET_VRING_ADDR: ++ r = copy_from_user(&a, argp, sizeof a); ++ if (r < 0) ++ break; ++ if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) { ++ r = -EOPNOTSUPP; ++ break; ++ } ++ /* For 32bit, verify that the top 32bits of the user ++ data are set to zero. */ ++ if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr || ++ (u64)(unsigned long)a.used_user_addr != a.used_user_addr || ++ (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) { ++ r = -EFAULT; ++ break; ++ } ++ if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) || ++ (a.used_user_addr & (sizeof *vq->used->ring - 1)) || ++ (a.log_guest_addr & (sizeof *vq->used->ring - 1))) { ++ r = -EINVAL; ++ break; ++ } ++ ++ /* We only verify access here if backend is configured. ++ * If it is not, we don't as size might not have been setup. ++ * We will verify when backend is configured. */ ++ if (vq->private_data) { ++ if (!vq_access_ok(vq->num, ++ (void __user *)(unsigned long)a.desc_user_addr, ++ (void __user *)(unsigned long)a.avail_user_addr, ++ (void __user *)(unsigned long)a.used_user_addr)) { ++ r = -EINVAL; ++ break; ++ } ++ ++ /* Also validate log access for used ring if enabled. */ ++ if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) && ++ !log_access_ok(vq->log_base, a.log_guest_addr, ++ sizeof *vq->used + ++ vq->num * sizeof *vq->used->ring)) { ++ r = -EINVAL; ++ break; ++ } ++ } ++ ++ r = init_used(vq, (struct vring_used __user *)(unsigned long) ++ a.used_user_addr); ++ if (r) ++ break; ++ vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); ++ vq->desc = (void __user *)(unsigned long)a.desc_user_addr; ++ vq->avail = (void __user *)(unsigned long)a.avail_user_addr; ++ vq->log_addr = a.log_guest_addr; ++ vq->used = (void __user *)(unsigned long)a.used_user_addr; ++ break; ++ case VHOST_SET_VRING_KICK: ++ r = copy_from_user(&f, argp, sizeof f); ++ if (r < 0) ++ break; ++ eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); ++ if (IS_ERR(eventfp)) ++ return PTR_ERR(eventfp); ++ if (eventfp != vq->kick) { ++ pollstop = filep = vq->kick; ++ pollstart = vq->kick = eventfp; ++ } else ++ filep = eventfp; ++ break; ++ case VHOST_SET_VRING_CALL: ++ r = copy_from_user(&f, argp, sizeof f); ++ if (r < 0) ++ break; ++ eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); ++ if (IS_ERR(eventfp)) ++ return PTR_ERR(eventfp); ++ if (eventfp != vq->call) { ++ filep = vq->call; ++ ctx = vq->call_ctx; ++ vq->call = eventfp; ++ vq->call_ctx = eventfp ? ++ eventfd_ctx_fileget(eventfp) : NULL; ++ } else ++ filep = eventfp; ++ break; ++ case VHOST_SET_VRING_ERR: ++ r = copy_from_user(&f, argp, sizeof f); ++ if (r < 0) ++ break; ++ eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); ++ if (IS_ERR(eventfp)) ++ return PTR_ERR(eventfp); ++ if (eventfp != vq->error) { ++ filep = vq->error; ++ vq->error = eventfp; ++ ctx = vq->error_ctx; ++ vq->error_ctx = eventfp ? ++ eventfd_ctx_fileget(eventfp) : NULL; ++ } else ++ filep = eventfp; ++ break; ++ default: ++ r = -ENOIOCTLCMD; ++ } ++ ++ if (pollstop && vq->handle_kick) ++ vhost_poll_stop(&vq->poll); ++ ++ if (ctx) ++ eventfd_ctx_put(ctx); ++ if (filep) ++ fput(filep); ++ ++ if (pollstart && vq->handle_kick) ++ vhost_poll_start(&vq->poll, vq->kick); ++ ++ mutex_unlock(&vq->mutex); ++ ++ if (pollstop && vq->handle_kick) ++ vhost_poll_flush(&vq->poll); ++ return r; ++} ++ ++/* Caller must have device mutex */ ++long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg) ++{ ++ void __user *argp = (void __user *)arg; ++ struct file *eventfp, *filep = NULL; ++ struct eventfd_ctx *ctx = NULL; ++ u64 p; ++ long r; ++ int i, fd; ++ ++ /* If you are not the owner, you can become one */ ++ if (ioctl == VHOST_SET_OWNER) { ++ r = vhost_dev_set_owner(d); ++ goto done; ++ } ++ ++ /* You must be the owner to do anything else */ ++ r = vhost_dev_check_owner(d); ++ if (r) ++ goto done; ++ ++ switch (ioctl) { ++ case VHOST_SET_MEM_TABLE: ++ r = vhost_set_memory(d, argp); ++ break; ++ case VHOST_SET_LOG_BASE: ++ r = copy_from_user(&p, argp, sizeof p); ++ if (r < 0) ++ break; ++ if ((u64)(unsigned long)p != p) { ++ r = -EFAULT; ++ break; ++ } ++ for (i = 0; i < d->nvqs; ++i) { ++ struct vhost_virtqueue *vq; ++ void __user *base = (void __user *)(unsigned long)p; ++ vq = d->vqs + i; ++ mutex_lock(&vq->mutex); ++ /* If ring is inactive, will check when it's enabled. */ ++ if (vq->private_data && !vq_log_access_ok(vq, base)) ++ r = -EFAULT; ++ else ++ vq->log_base = base; ++ mutex_unlock(&vq->mutex); ++ } ++ break; ++ case VHOST_SET_LOG_FD: ++ r = get_user(fd, (int __user *)argp); ++ if (r < 0) ++ break; ++ eventfp = fd == -1 ? NULL : eventfd_fget(fd); ++ if (IS_ERR(eventfp)) { ++ r = PTR_ERR(eventfp); ++ break; ++ } ++ if (eventfp != d->log_file) { ++ filep = d->log_file; ++ ctx = d->log_ctx; ++ d->log_ctx = eventfp ? ++ eventfd_ctx_fileget(eventfp) : NULL; ++ } else ++ filep = eventfp; ++ for (i = 0; i < d->nvqs; ++i) { ++ mutex_lock(&d->vqs[i].mutex); ++ d->vqs[i].log_ctx = d->log_ctx; ++ mutex_unlock(&d->vqs[i].mutex); ++ } ++ if (ctx) ++ eventfd_ctx_put(ctx); ++ if (filep) ++ fput(filep); ++ break; ++ default: ++ r = vhost_set_vring(d, ioctl, argp); ++ break; ++ } ++done: ++ return r; ++} ++ ++static const struct vhost_memory_region *find_region(struct vhost_memory *mem, ++ __u64 addr, __u32 len) ++{ ++ struct vhost_memory_region *reg; ++ int i; ++ /* linear search is not brilliant, but we really have on the order of 6 ++ * regions in practice */ ++ for (i = 0; i < mem->nregions; ++i) { ++ reg = mem->regions + i; ++ if (reg->guest_phys_addr <= addr && ++ reg->guest_phys_addr + reg->memory_size - 1 >= addr) ++ return reg; ++ } ++ return NULL; ++} ++ ++/* TODO: This is really inefficient. We need something like get_user() ++ * (instruction directly accesses the data, with an exception table entry ++ * returning -EFAULT). See Documentation/x86/exception-tables.txt. ++ */ ++static int set_bit_to_user(int nr, void __user *addr) ++{ ++ unsigned long log = (unsigned long)addr; ++ struct page *page; ++ void *base; ++ int bit = nr + (log % PAGE_SIZE) * 8; ++ int r; ++ r = get_user_pages_fast(log, 1, 1, &page); ++ if (r) ++ return r; ++ base = kmap_atomic(page, KM_USER0); ++ set_bit(bit, base); ++ kunmap_atomic(base, KM_USER0); ++ set_page_dirty_lock(page); ++ put_page(page); ++ return 0; ++} ++ ++static int log_write(void __user *log_base, ++ u64 write_address, u64 write_length) ++{ ++ int r; ++ if (!write_length) ++ return 0; ++ write_address /= VHOST_PAGE_SIZE; ++ for (;;) { ++ u64 base = (u64)(unsigned long)log_base; ++ u64 log = base + write_address / 8; ++ int bit = write_address % 8; ++ if ((u64)(unsigned long)log != log) ++ return -EFAULT; ++ r = set_bit_to_user(bit, (void __user *)(unsigned long)log); ++ if (r < 0) ++ return r; ++ if (write_length <= VHOST_PAGE_SIZE) ++ break; ++ write_length -= VHOST_PAGE_SIZE; ++ write_address += VHOST_PAGE_SIZE; ++ } ++ return r; ++} ++ ++int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, ++ unsigned int log_num, u64 len) ++{ ++ int i, r; ++ ++ /* Make sure data written is seen before log. */ ++ wmb(); ++ for (i = 0; i < log_num; ++i) { ++ u64 l = min(log[i].len, len); ++ r = log_write(vq->log_base, log[i].addr, l); ++ if (r < 0) ++ return r; ++ len -= l; ++ if (!len) ++ return 0; ++ } ++ if (vq->log_ctx) ++ eventfd_signal(vq->log_ctx, 1); ++ /* Length written exceeds what we have stored. This is a bug. */ ++ BUG(); ++ return 0; ++} ++ ++int translate_desc(struct vhost_dev *dev, u64 addr, u32 len, ++ struct iovec iov[], int iov_size) ++{ ++ const struct vhost_memory_region *reg; ++ struct vhost_memory *mem; ++ struct iovec *_iov; ++ u64 s = 0; ++ int ret = 0; ++ ++ rcu_read_lock(); ++ ++ mem = rcu_dereference(dev->memory); ++ while ((u64)len > s) { ++ u64 size; ++ if (ret >= iov_size) { ++ ret = -ENOBUFS; ++ break; ++ } ++ reg = find_region(mem, addr, len); ++ if (!reg) { ++ ret = -EFAULT; ++ break; ++ } ++ _iov = iov + ret; ++ size = reg->memory_size - addr + reg->guest_phys_addr; ++ _iov->iov_len = min((u64)len, size); ++ _iov->iov_base = (void *)(unsigned long) ++ (reg->userspace_addr + addr - reg->guest_phys_addr); ++ s += size; ++ addr += size; ++ ++ret; ++ } ++ ++ rcu_read_unlock(); ++ return ret; ++} ++ ++/* Each buffer in the virtqueues is actually a chain of descriptors. This ++ * function returns the next descriptor in the chain, ++ * or -1U if we're at the end. */ ++static unsigned next_desc(struct vring_desc *desc) ++{ ++ unsigned int next; ++ ++ /* If this descriptor says it doesn't chain, we're done. */ ++ if (!(desc->flags & VRING_DESC_F_NEXT)) ++ return -1U; ++ ++ /* Check they're not leading us off end of descriptors. */ ++ next = desc->next; ++ /* Make sure compiler knows to grab that: we don't want it changing! */ ++ /* We will use the result as an index in an array, so most ++ * architectures only need a compiler barrier here. */ ++ read_barrier_depends(); ++ ++ return next; ++} ++ ++static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, ++ struct iovec iov[], unsigned int iov_size, ++ unsigned int *out_num, unsigned int *in_num, ++ struct vhost_log *log, unsigned int *log_num, ++ struct vring_desc *indirect) ++{ ++ struct vring_desc desc; ++ unsigned int i = 0, count, found = 0; ++ int ret; ++ ++ /* Sanity check */ ++ if (indirect->len % sizeof desc) { ++ vq_err(vq, "Invalid length in indirect descriptor: " ++ "len 0x%llx not multiple of 0x%zx\n", ++ (unsigned long long)indirect->len, ++ sizeof desc); ++ return -EINVAL; ++ } ++ ++ ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect, ++ ARRAY_SIZE(vq->indirect)); ++ if (ret < 0) { ++ vq_err(vq, "Translation failure %d in indirect.\n", ret); ++ return ret; ++ } ++ ++ /* We will use the result as an address to read from, so most ++ * architectures only need a compiler barrier here. */ ++ read_barrier_depends(); ++ ++ count = indirect->len / sizeof desc; ++ /* Buffers are chained via a 16 bit next field, so ++ * we can have at most 2^16 of these. */ ++ if (count > USHORT_MAX + 1) { ++ vq_err(vq, "Indirect buffer length too big: %d\n", ++ indirect->len); ++ return -E2BIG; ++ } ++ ++ do { ++ unsigned iov_count = *in_num + *out_num; ++ if (++found > count) { ++ vq_err(vq, "Loop detected: last one at %u " ++ "indirect size %u\n", ++ i, count); ++ return -EINVAL; ++ } ++ if (memcpy_fromiovec((unsigned char *)&desc, vq->indirect, ++ sizeof desc)) { ++ vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", ++ i, (size_t)indirect->addr + i * sizeof desc); ++ return -EINVAL; ++ } ++ if (desc.flags & VRING_DESC_F_INDIRECT) { ++ vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", ++ i, (size_t)indirect->addr + i * sizeof desc); ++ return -EINVAL; ++ } ++ ++ ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count, ++ iov_size - iov_count); ++ if (ret < 0) { ++ vq_err(vq, "Translation failure %d indirect idx %d\n", ++ ret, i); ++ return ret; ++ } ++ /* If this is an input descriptor, increment that count. */ ++ if (desc.flags & VRING_DESC_F_WRITE) { ++ *in_num += ret; ++ if (unlikely(log)) { ++ log[*log_num].addr = desc.addr; ++ log[*log_num].len = desc.len; ++ ++*log_num; ++ } ++ } else { ++ /* If it's an output descriptor, they're all supposed ++ * to come before any input descriptors. */ ++ if (*in_num) { ++ vq_err(vq, "Indirect descriptor " ++ "has out after in: idx %d\n", i); ++ return -EINVAL; ++ } ++ *out_num += ret; ++ } ++ } while ((i = next_desc(&desc)) != -1); ++ return 0; ++} ++ ++/* This looks in the virtqueue and for the first available buffer, and converts ++ * it to an iovec for convenient access. Since descriptors consist of some ++ * number of output then some number of input descriptors, it's actually two ++ * iovecs, but we pack them into one and note how many of each there were. ++ * ++ * This function returns the descriptor number found, or vq->num (which ++ * is never a valid descriptor number) if none was found. */ ++unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, ++ struct iovec iov[], unsigned int iov_size, ++ unsigned int *out_num, unsigned int *in_num, ++ struct vhost_log *log, unsigned int *log_num) ++{ ++ struct vring_desc desc; ++ unsigned int i, head, found = 0; ++ u16 last_avail_idx; ++ int ret; ++ ++ /* Check it isn't doing very strange things with descriptor numbers. */ ++ last_avail_idx = vq->last_avail_idx; ++ if (get_user(vq->avail_idx, &vq->avail->idx)) { ++ vq_err(vq, "Failed to access avail idx at %p\n", ++ &vq->avail->idx); ++ return vq->num; ++ } ++ ++ if ((u16)(vq->avail_idx - last_avail_idx) > vq->num) { ++ vq_err(vq, "Guest moved used index from %u to %u", ++ last_avail_idx, vq->avail_idx); ++ return vq->num; ++ } ++ ++ /* If there's nothing new since last we looked, return invalid. */ ++ if (vq->avail_idx == last_avail_idx) ++ return vq->num; ++ ++ /* Only get avail ring entries after they have been exposed by guest. */ ++ rmb(); ++ ++ /* Grab the next descriptor number they're advertising, and increment ++ * the index we've seen. */ ++ if (get_user(head, &vq->avail->ring[last_avail_idx % vq->num])) { ++ vq_err(vq, "Failed to read head: idx %d address %p\n", ++ last_avail_idx, ++ &vq->avail->ring[last_avail_idx % vq->num]); ++ return vq->num; ++ } ++ ++ /* If their number is silly, that's an error. */ ++ if (head >= vq->num) { ++ vq_err(vq, "Guest says index %u > %u is available", ++ head, vq->num); ++ return vq->num; ++ } ++ ++ /* When we start there are none of either input nor output. */ ++ *out_num = *in_num = 0; ++ if (unlikely(log)) ++ *log_num = 0; ++ ++ i = head; ++ do { ++ unsigned iov_count = *in_num + *out_num; ++ if (i >= vq->num) { ++ vq_err(vq, "Desc index is %u > %u, head = %u", ++ i, vq->num, head); ++ return vq->num; ++ } ++ if (++found > vq->num) { ++ vq_err(vq, "Loop detected: last one at %u " ++ "vq size %u head %u\n", ++ i, vq->num, head); ++ return vq->num; ++ } ++ ret = copy_from_user(&desc, vq->desc + i, sizeof desc); ++ if (ret) { ++ vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", ++ i, vq->desc + i); ++ return vq->num; ++ } ++ if (desc.flags & VRING_DESC_F_INDIRECT) { ++ ret = get_indirect(dev, vq, iov, iov_size, ++ out_num, in_num, ++ log, log_num, &desc); ++ if (ret < 0) { ++ vq_err(vq, "Failure detected " ++ "in indirect descriptor at idx %d\n", i); ++ return vq->num; ++ } ++ continue; ++ } ++ ++ ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count, ++ iov_size - iov_count); ++ if (ret < 0) { ++ vq_err(vq, "Translation failure %d descriptor idx %d\n", ++ ret, i); ++ return vq->num; ++ } ++ if (desc.flags & VRING_DESC_F_WRITE) { ++ /* If this is an input descriptor, ++ * increment that count. */ ++ *in_num += ret; ++ if (unlikely(log)) { ++ log[*log_num].addr = desc.addr; ++ log[*log_num].len = desc.len; ++ ++*log_num; ++ } ++ } else { ++ /* If it's an output descriptor, they're all supposed ++ * to come before any input descriptors. */ ++ if (*in_num) { ++ vq_err(vq, "Descriptor has out after in: " ++ "idx %d\n", i); ++ return vq->num; ++ } ++ *out_num += ret; ++ } ++ } while ((i = next_desc(&desc)) != -1); ++ ++ /* On success, increment avail index. */ ++ vq->last_avail_idx++; ++ return head; ++} ++ ++/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ ++void vhost_discard_vq_desc(struct vhost_virtqueue *vq) ++{ ++ vq->last_avail_idx--; ++} ++ ++/* After we've used one of their buffers, we tell them about it. We'll then ++ * want to notify the guest, using eventfd. */ ++int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) ++{ ++ struct vring_used_elem *used; ++ ++ /* The virtqueue contains a ring of used buffers. Get a pointer to the ++ * next entry in that used ring. */ ++ used = &vq->used->ring[vq->last_used_idx % vq->num]; ++ if (put_user(head, &used->id)) { ++ vq_err(vq, "Failed to write used id"); ++ return -EFAULT; ++ } ++ if (put_user(len, &used->len)) { ++ vq_err(vq, "Failed to write used len"); ++ return -EFAULT; ++ } ++ /* Make sure buffer is written before we update index. */ ++ wmb(); ++ if (put_user(vq->last_used_idx + 1, &vq->used->idx)) { ++ vq_err(vq, "Failed to increment used idx"); ++ return -EFAULT; ++ } ++ if (unlikely(vq->log_used)) { ++ /* Make sure data is seen before log. */ ++ wmb(); ++ log_write(vq->log_base, vq->log_addr + sizeof *vq->used->ring * ++ (vq->last_used_idx % vq->num), ++ sizeof *vq->used->ring); ++ log_write(vq->log_base, vq->log_addr, sizeof *vq->used->ring); ++ if (vq->log_ctx) ++ eventfd_signal(vq->log_ctx, 1); ++ } ++ vq->last_used_idx++; ++ return 0; ++} ++ ++/* This actually signals the guest, using eventfd. */ ++void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) ++{ ++ __u16 flags = 0; ++ if (get_user(flags, &vq->avail->flags)) { ++ vq_err(vq, "Failed to get flags"); ++ return; ++ } ++ ++ /* If they don't want an interrupt, don't signal, unless empty. */ ++ if ((flags & VRING_AVAIL_F_NO_INTERRUPT) && ++ (vq->avail_idx != vq->last_avail_idx || ++ !vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY))) ++ return; ++ ++ /* Signal the Guest tell them we used something up. */ ++ if (vq->call_ctx) ++ eventfd_signal(vq->call_ctx, 1); ++} ++ ++/* And here's the combo meal deal. Supersize me! */ ++void vhost_add_used_and_signal(struct vhost_dev *dev, ++ struct vhost_virtqueue *vq, ++ unsigned int head, int len) ++{ ++ vhost_add_used(vq, head, len); ++ vhost_signal(dev, vq); ++} ++ ++/* OK, now we need to know about added descriptors. */ ++bool vhost_enable_notify(struct vhost_virtqueue *vq) ++{ ++ u16 avail_idx; ++ int r; ++ if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) ++ return false; ++ vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; ++ r = put_user(vq->used_flags, &vq->used->flags); ++ if (r) { ++ vq_err(vq, "Failed to enable notification at %p: %d\n", ++ &vq->used->flags, r); ++ return false; ++ } ++ /* They could have slipped one in as we were doing that: make ++ * sure it's written, then check again. */ ++ mb(); ++ r = get_user(avail_idx, &vq->avail->idx); ++ if (r) { ++ vq_err(vq, "Failed to check avail idx at %p: %d\n", ++ &vq->avail->idx, r); ++ return false; ++ } ++ ++ return avail_idx != vq->last_avail_idx; ++} ++ ++/* We don't need to be notified again. */ ++void vhost_disable_notify(struct vhost_virtqueue *vq) ++{ ++ int r; ++ if (vq->used_flags & VRING_USED_F_NO_NOTIFY) ++ return; ++ vq->used_flags |= VRING_USED_F_NO_NOTIFY; ++ r = put_user(vq->used_flags, &vq->used->flags); ++ if (r) ++ vq_err(vq, "Failed to enable notification at %p: %d\n", ++ &vq->used->flags, r); ++} ++ ++int vhost_init(void) ++{ ++ vhost_workqueue = create_singlethread_workqueue("vhost"); ++ if (!vhost_workqueue) ++ return -ENOMEM; ++ return 0; ++} ++ ++void vhost_cleanup(void) ++{ ++ destroy_workqueue(vhost_workqueue); ++} +diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h +new file mode 100644 +index 0000000..44591ba +--- /dev/null ++++ b/drivers/vhost/vhost.h +@@ -0,0 +1,161 @@ ++#ifndef _VHOST_H ++#define _VHOST_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct vhost_device; ++ ++enum { ++ /* Enough place for all fragments, head, and virtio net header. */ ++ VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2, ++}; ++ ++/* Poll a file (eventfd or socket) */ ++/* Note: there's nothing vhost specific about this structure. */ ++struct vhost_poll { ++ poll_table table; ++ wait_queue_head_t *wqh; ++ wait_queue_t wait; ++ /* struct which will handle all actual work. */ ++ struct work_struct work; ++ unsigned long mask; ++}; ++ ++void vhost_poll_init(struct vhost_poll *poll, work_func_t func, ++ unsigned long mask); ++void vhost_poll_start(struct vhost_poll *poll, struct file *file); ++void vhost_poll_stop(struct vhost_poll *poll); ++void vhost_poll_flush(struct vhost_poll *poll); ++void vhost_poll_queue(struct vhost_poll *poll); ++ ++struct vhost_log { ++ u64 addr; ++ u64 len; ++}; ++ ++/* The virtqueue structure describes a queue attached to a device. */ ++struct vhost_virtqueue { ++ struct vhost_dev *dev; ++ ++ /* The actual ring of buffers. */ ++ struct mutex mutex; ++ unsigned int num; ++ struct vring_desc __user *desc; ++ struct vring_avail __user *avail; ++ struct vring_used __user *used; ++ struct file *kick; ++ struct file *call; ++ struct file *error; ++ struct eventfd_ctx *call_ctx; ++ struct eventfd_ctx *error_ctx; ++ struct eventfd_ctx *log_ctx; ++ ++ struct vhost_poll poll; ++ ++ /* The routine to call when the Guest pings us, or timeout. */ ++ work_func_t handle_kick; ++ ++ /* Last available index we saw. */ ++ u16 last_avail_idx; ++ ++ /* Caches available index value from user. */ ++ u16 avail_idx; ++ ++ /* Last index we used. */ ++ u16 last_used_idx; ++ ++ /* Used flags */ ++ u16 used_flags; ++ ++ /* Log writes to used structure. */ ++ bool log_used; ++ u64 log_addr; ++ ++ struct iovec indirect[VHOST_NET_MAX_SG]; ++ struct iovec iov[VHOST_NET_MAX_SG]; ++ struct iovec hdr[VHOST_NET_MAX_SG]; ++ size_t hdr_size; ++ /* We use a kind of RCU to access private pointer. ++ * All readers access it from workqueue, which makes it possible to ++ * flush the workqueue instead of synchronize_rcu. Therefore readers do ++ * not need to call rcu_read_lock/rcu_read_unlock: the beginning of ++ * work item execution acts instead of rcu_read_lock() and the end of ++ * work item execution acts instead of rcu_read_lock(). ++ * Writers use virtqueue mutex. */ ++ void *private_data; ++ /* Log write descriptors */ ++ void __user *log_base; ++ struct vhost_log log[VHOST_NET_MAX_SG]; ++}; ++ ++struct vhost_dev { ++ /* Readers use RCU to access memory table pointer ++ * log base pointer and features. ++ * Writers use mutex below.*/ ++ struct vhost_memory *memory; ++ struct mm_struct *mm; ++ struct mutex mutex; ++ unsigned acked_features; ++ struct vhost_virtqueue *vqs; ++ int nvqs; ++ struct file *log_file; ++ struct eventfd_ctx *log_ctx; ++}; ++ ++long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs); ++long vhost_dev_check_owner(struct vhost_dev *); ++long vhost_dev_reset_owner(struct vhost_dev *); ++void vhost_dev_cleanup(struct vhost_dev *); ++long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg); ++int vhost_vq_access_ok(struct vhost_virtqueue *vq); ++int vhost_log_access_ok(struct vhost_dev *); ++ ++unsigned vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *, ++ struct iovec iov[], unsigned int iov_count, ++ unsigned int *out_num, unsigned int *in_num, ++ struct vhost_log *log, unsigned int *log_num); ++void vhost_discard_vq_desc(struct vhost_virtqueue *); ++ ++int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len); ++void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *); ++void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *, ++ unsigned int head, int len); ++void vhost_disable_notify(struct vhost_virtqueue *); ++bool vhost_enable_notify(struct vhost_virtqueue *); ++ ++int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, ++ unsigned int log_num, u64 len); ++ ++int vhost_init(void); ++void vhost_cleanup(void); ++ ++#define vq_err(vq, fmt, ...) do { \ ++ pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \ ++ if ((vq)->error_ctx) \ ++ eventfd_signal((vq)->error_ctx, 1);\ ++ } while (0) ++ ++enum { ++ VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) | ++ (1 << VIRTIO_RING_F_INDIRECT_DESC) | ++ (1 << VHOST_F_LOG_ALL) | ++ (1 << VHOST_NET_F_VIRTIO_NET_HDR), ++}; ++ ++static inline int vhost_has_feature(struct vhost_dev *dev, int bit) ++{ ++ unsigned acked_features = rcu_dereference(dev->acked_features); ++ return acked_features & (1 << bit); ++} ++ ++#endif +diff --git a/include/linux/Kbuild b/include/linux/Kbuild +index 756f831..d930807 100644 +--- a/include/linux/Kbuild ++++ b/include/linux/Kbuild +@@ -362,6 +362,7 @@ unifdef-y += uio.h + unifdef-y += unistd.h + unifdef-y += usbdevice_fs.h + unifdef-y += utsname.h ++unifdef-y += vhost.h + unifdef-y += videodev2.h + unifdef-y += videodev.h + unifdef-y += virtio_config.h +diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h +index 3f5fd52..404abe0 100644 +--- a/include/linux/if_tun.h ++++ b/include/linux/if_tun.h +@@ -86,4 +86,18 @@ struct tun_filter { + __u8 addr[0][ETH_ALEN]; + }; + ++#ifdef __KERNEL__ ++#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) ++struct socket *tun_get_socket(struct file *); ++#else ++#include ++#include ++struct file; ++struct socket; ++static inline struct socket *tun_get_socket(struct file *f) ++{ ++ return ERR_PTR(-EINVAL); ++} ++#endif /* CONFIG_TUN */ ++#endif /* __KERNEL__ */ + #endif /* __IF_TUN_H */ +diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h +index adaf3c1..8b5f7cc 100644 +--- a/include/linux/miscdevice.h ++++ b/include/linux/miscdevice.h +@@ -30,6 +30,7 @@ + #define HPET_MINOR 228 + #define FUSE_MINOR 229 + #define KVM_MINOR 232 ++#define VHOST_NET_MINOR 233 + #define MISC_DYNAMIC_MINOR 255 + + struct device; +diff --git a/include/linux/vhost.h b/include/linux/vhost.h +new file mode 100644 +index 0000000..e847f1e +--- /dev/null ++++ b/include/linux/vhost.h +@@ -0,0 +1,130 @@ ++#ifndef _LINUX_VHOST_H ++#define _LINUX_VHOST_H ++/* Userspace interface for in-kernel virtio accelerators. */ ++ ++/* vhost is used to reduce the number of system calls involved in virtio. ++ * ++ * Existing virtio net code is used in the guest without modification. ++ * ++ * This header includes interface used by userspace hypervisor for ++ * device configuration. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++struct vhost_vring_state { ++ unsigned int index; ++ unsigned int num; ++}; ++ ++struct vhost_vring_file { ++ unsigned int index; ++ int fd; /* Pass -1 to unbind from file. */ ++ ++}; ++ ++struct vhost_vring_addr { ++ unsigned int index; ++ /* Option flags. */ ++ unsigned int flags; ++ /* Flag values: */ ++ /* Whether log address is valid. If set enables logging. */ ++#define VHOST_VRING_F_LOG 0 ++ ++ /* Start of array of descriptors (virtually contiguous) */ ++ __u64 desc_user_addr; ++ /* Used structure address. Must be 32 bit aligned */ ++ __u64 used_user_addr; ++ /* Available structure address. Must be 16 bit aligned */ ++ __u64 avail_user_addr; ++ /* Logging support. */ ++ /* Log writes to used structure, at offset calculated from specified ++ * address. Address must be 32 bit aligned. */ ++ __u64 log_guest_addr; ++}; ++ ++struct vhost_memory_region { ++ __u64 guest_phys_addr; ++ __u64 memory_size; /* bytes */ ++ __u64 userspace_addr; ++ __u64 flags_padding; /* No flags are currently specified. */ ++}; ++ ++/* All region addresses and sizes must be 4K aligned. */ ++#define VHOST_PAGE_SIZE 0x1000 ++ ++struct vhost_memory { ++ __u32 nregions; ++ __u32 padding; ++ struct vhost_memory_region regions[0]; ++}; ++ ++/* ioctls */ ++ ++#define VHOST_VIRTIO 0xAF ++ ++/* Features bitmask for forward compatibility. Transport bits are used for ++ * vhost specific features. */ ++#define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64) ++#define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64) ++ ++/* Set current process as the (exclusive) owner of this file descriptor. This ++ * must be called before any other vhost command. Further calls to ++ * VHOST_OWNER_SET fail until VHOST_OWNER_RESET is called. */ ++#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01) ++/* Give up ownership, and reset the device to default values. ++ * Allows subsequent call to VHOST_OWNER_SET to succeed. */ ++#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02) ++ ++/* Set up/modify memory layout */ ++#define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, struct vhost_memory) ++ ++/* Write logging setup. */ ++/* Memory writes can optionally be logged by setting bit at an offset ++ * (calculated from the physical address) from specified log base. ++ * The bit is set using an atomic 32 bit operation. */ ++/* Set base address for logging. */ ++#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64) ++/* Specify an eventfd file descriptor to signal on log write. */ ++#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int) ++ ++/* Ring setup. */ ++/* Set number of descriptors in ring. This parameter can not ++ * be modified while ring is running (bound to a device). */ ++#define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state) ++/* Set addresses for the ring. */ ++#define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr) ++/* Base value where queue looks for available descriptors */ ++#define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state) ++/* Get accessor: reads index, writes value in num */ ++#define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state) ++ ++/* The following ioctls use eventfd file descriptors to signal and poll ++ * for events. */ ++ ++/* Set eventfd to poll for added buffers */ ++#define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file) ++/* Set eventfd to signal when buffers have beed used */ ++#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file) ++/* Set eventfd to signal an error */ ++#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file) ++ ++/* VHOST_NET specific defines */ ++ ++/* Attach virtio net ring to a raw socket, or tap device. ++ * The socket must be already bound to an ethernet device, this device will be ++ * used for transmit. Pass fd -1 to unbind from the socket and the transmit ++ * device. This can be used to stop the ring (e.g. for migration). */ ++#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file) ++ ++/* Feature bits */ ++/* Log all write descriptors. Can be changed while device is active. */ ++#define VHOST_F_LOG_ALL 26 ++/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */ ++#define VHOST_NET_F_VIRTIO_NET_HDR 27 ++ ++#endif +diff --git a/mm/mmu_context.c b/mm/mmu_context.c +index ded9081..0777654 100644 +--- a/mm/mmu_context.c ++++ b/mm/mmu_context.c +@@ -5,6 +5,7 @@ + + #include + #include ++#include + #include + + #include +@@ -37,6 +38,7 @@ void use_mm(struct mm_struct *mm) + if (active_mm != mm) + mmdrop(active_mm); + } ++EXPORT_SYMBOL_GPL(use_mm); + + /* + * unuse_mm +@@ -56,3 +58,4 @@ void unuse_mm(struct mm_struct *mm) + enter_lazy_tlb(mm, tsk); + task_unlock(tsk); + } ++EXPORT_SYMBOL_GPL(unuse_mm); diff --git a/vhost_net-rollup2.patch b/vhost_net-rollup2.patch new file mode 100644 index 0000000..a05272a --- /dev/null +++ b/vhost_net-rollup2.patch @@ -0,0 +1,377 @@ +commit 17660f81243e998f36257881ac3ae61685bf91c1 +Author: Michael S. Tsirkin +Date: Thu Jan 21 01:28:45 2010 -0800 + + vhost: fix TUN=m VHOST_NET=y + + drivers/built-in.o: In function `get_tun_socket': + net.c:(.text+0x15436e): undefined reference to `tun_get_socket' + + If tun is a module, vhost must be a module, too. + If tun is built-in or disabled, vhost can be built-in. + + Note: TUN || !TUN might look a bit strange until you realize + that boolean logic rules do not apply for tristate variables. + + Reported-by: Randy Dunlap + Signed-off-by: Michael S. Tsirkin + Acked-by: Randy Dunlap + Signed-off-by: David S. Miller + +diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig +index 9f409f4..9e93553 100644 +--- a/drivers/vhost/Kconfig ++++ b/drivers/vhost/Kconfig +@@ -1,6 +1,6 @@ + config VHOST_NET + tristate "Host kernel accelerator for virtio net (EXPERIMENTAL)" +- depends on NET && EVENTFD && EXPERIMENTAL ++ depends on NET && EVENTFD && (TUN || !TUN) && EXPERIMENTAL + ---help--- + This kernel module can be loaded in host kernel to accelerate + guest networking with virtio_net. Not to be confused with virtio_net +commit 5659338c88963ea791118e5e11e314b24f90c3eb +Author: Michael S. Tsirkin +Date: Mon Feb 1 07:21:02 2010 +0000 + + vhost-net: switch to smp barriers + + vhost-net only uses memory barriers to control SMP effects + (communication with userspace potentially running on a different CPU), + so it should use SMP barriers and not mandatory barriers for memory + access ordering, as suggested by Documentation/memory-barriers.txt + + Signed-off-by: Michael S. Tsirkin + Acked-by: Rusty Russell + Signed-off-by: David S. Miller + +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c +index c8c25db..6eb1525 100644 +--- a/drivers/vhost/vhost.c ++++ b/drivers/vhost/vhost.c +@@ -685,7 +685,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, + int i, r; + + /* Make sure data written is seen before log. */ +- wmb(); ++ smp_wmb(); + for (i = 0; i < log_num; ++i) { + u64 l = min(log[i].len, len); + r = log_write(vq->log_base, log[i].addr, l); +@@ -884,7 +884,7 @@ unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, + return vq->num; + + /* Only get avail ring entries after they have been exposed by guest. */ +- rmb(); ++ smp_rmb(); + + /* Grab the next descriptor number they're advertising, and increment + * the index we've seen. */ +@@ -996,14 +996,14 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) + return -EFAULT; + } + /* Make sure buffer is written before we update index. */ +- wmb(); ++ smp_wmb(); + if (put_user(vq->last_used_idx + 1, &vq->used->idx)) { + vq_err(vq, "Failed to increment used idx"); + return -EFAULT; + } + if (unlikely(vq->log_used)) { + /* Make sure data is seen before log. */ +- wmb(); ++ smp_wmb(); + log_write(vq->log_base, vq->log_addr + sizeof *vq->used->ring * + (vq->last_used_idx % vq->num), + sizeof *vq->used->ring); +@@ -1060,7 +1060,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq) + } + /* They could have slipped one in as we were doing that: make + * sure it's written, then check again. */ +- mb(); ++ smp_mb(); + r = get_user(avail_idx, &vq->avail->idx); + if (r) { + vq_err(vq, "Failed to check avail idx at %p: %d\n", +commit 86e9424d7252bae5ad1c17b4b8088193e6b27cbe +Author: Michael S. Tsirkin +Date: Wed Feb 17 19:11:33 2010 +0200 + + vhost: logging thinko fix + + vhost was dong some complex math to get + offset to log at, and got it wrong by a couple of bytes, + while in fact it's simple: get address where we write, + subtract start of buffer, add log base. + + Do it this way. + + Reviewed-by: Juan Quintela + Signed-off-by: Michael S. Tsirkin + +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c +index 6eb1525..db21518 100644 +--- a/drivers/vhost/vhost.c ++++ b/drivers/vhost/vhost.c +@@ -1004,10 +1004,14 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) + if (unlikely(vq->log_used)) { + /* Make sure data is seen before log. */ + smp_wmb(); +- log_write(vq->log_base, vq->log_addr + sizeof *vq->used->ring * +- (vq->last_used_idx % vq->num), +- sizeof *vq->used->ring); +- log_write(vq->log_base, vq->log_addr, sizeof *vq->used->ring); ++ /* Log used ring entry write. */ ++ log_write(vq->log_base, ++ vq->log_addr + ((void *)used - (void *)vq->used), ++ sizeof *used); ++ /* Log used index update. */ ++ log_write(vq->log_base, ++ vq->log_addr + offsetof(struct vring_used, idx), ++ sizeof vq->used->idx); + if (vq->log_ctx) + eventfd_signal(vq->log_ctx, 1); + } +commit 73a99f083009d67d8e12603420e008d5c21b0b7d +Author: Michael S. Tsirkin +Date: Tue Feb 23 11:23:45 2010 +0200 + + vhost: initialize log eventfd context pointer + + vq log eventfd context pointer needs to be initialized, otherwise + operation may fail or oops if log is enabled but log eventfd not set by + userspace. When log_ctx for device is created, it is copied to the vq. + This reset was missing. + + Reviewed-by: Juan Quintela + Signed-off-by: Michael S. Tsirkin + +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c +index db21518..6c31c0c 100644 +--- a/drivers/vhost/vhost.c ++++ b/drivers/vhost/vhost.c +@@ -121,6 +121,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, + vq->kick = NULL; + vq->call_ctx = NULL; + vq->call = NULL; ++ vq->log_ctx = NULL; + } + + long vhost_dev_init(struct vhost_dev *dev, +commit d6db3f5c11dc7ed5712d5d5682aa34025ee5248e +Author: Michael S. Tsirkin +Date: Tue Feb 23 11:25:23 2010 +0200 + + vhost: fix get_user_pages_fast error handling + + get_user_pages_fast returns number of pages on success, negative value + on failure, but never 0. Fix vhost code to match this logic. + + Reviewed-by: Juan Quintela + Signed-off-by: Michael S. Tsirkin + +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c +index 6c31c0c..7cd55e0 100644 +--- a/drivers/vhost/vhost.c ++++ b/drivers/vhost/vhost.c +@@ -646,8 +646,9 @@ static int set_bit_to_user(int nr, void __user *addr) + int bit = nr + (log % PAGE_SIZE) * 8; + int r; + r = get_user_pages_fast(log, 1, 1, &page); +- if (r) ++ if (r < 0) + return r; ++ BUG_ON(r != 1); + base = kmap_atomic(page, KM_USER0); + set_bit(bit, base); + kunmap_atomic(base, KM_USER0); +commit 39286fa41a8b2c6a9c1f656a7b3c3efca95bc1b9 +Author: Sridhar Samudrala +Date: Sun Feb 28 19:39:16 2010 +0200 + + vhost-net: restart tx poll on sk_sndbuf full + + guest to remote communication with vhost net sometimes stops until + guest driver is restarted. This happens when we get guest kick precisely + when the backend send queue is full, as a result handle_tx() returns without + polling backend. This patch fixes this by restarting tx poll on this condition. + + Signed-off-by: Sridhar Samudrala + Signed-off-by: Michael S. Tsirkin + Tested-by: Tom Lendacky + +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c +index 91a324c..ad37da2 100644 +--- a/drivers/vhost/net.c ++++ b/drivers/vhost/net.c +@@ -114,8 +114,12 @@ static void handle_tx(struct vhost_net *net) + return; + + wmem = atomic_read(&sock->sk->sk_wmem_alloc); +- if (wmem >= sock->sk->sk_sndbuf) ++ if (wmem >= sock->sk->sk_sndbuf) { ++ mutex_lock(&vq->mutex); ++ tx_poll_start(net, sock); ++ mutex_unlock(&vq->mutex); + return; ++ } + + use_mm(net->dev.mm); + mutex_lock(&vq->mutex); +commit 1dace8c801ac531022bd31a7316a6b4351837617 +Author: Jeff Dike +Date: Thu Mar 4 16:10:14 2010 -0500 + + vhost: fix error path in vhost_net_set_backend + + An error could cause vhost_net_set_backend to exit without unlocking + vq->mutex. Fix this. + + Signed-off-by: Jeff Dike + Signed-off-by: Michael S. Tsirkin + +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c +index ad37da2..fcafb6b 100644 +--- a/drivers/vhost/net.c ++++ b/drivers/vhost/net.c +@@ -508,12 +508,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) + /* Verify that ring has been setup correctly. */ + if (!vhost_vq_access_ok(vq)) { + r = -EFAULT; +- goto err; ++ goto err_vq; + } + sock = get_socket(fd); + if (IS_ERR(sock)) { + r = PTR_ERR(sock); +- goto err; ++ goto err_vq; + } + + /* start polling new socket */ +@@ -524,12 +524,14 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) + vhost_net_disable_vq(n, vq); + rcu_assign_pointer(vq->private_data, sock); + vhost_net_enable_vq(n, vq); +- mutex_unlock(&vq->mutex); + done: + if (oldsock) { + vhost_net_flush_vq(n, index); + fput(oldsock->file); + } ++ ++err_vq: ++ mutex_unlock(&vq->mutex); + err: + mutex_unlock(&n->dev.mutex); + return r; +commit 0e255572121180c900e24e33b87047abd8153cce +Author: Michael S. Tsirkin +Date: Mon Mar 8 23:24:22 2010 +0200 + + vhost: fix interrupt mitigation with raw sockets + + A thinko in code means we never trigger interrupt + mitigation. Fix this. + + Reported-by: Juan Quintela + Reported-by: Unai Uribarri + Signed-off-by: Michael S. Tsirkin + +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c +index fcafb6b..a6a88df 100644 +--- a/drivers/vhost/net.c ++++ b/drivers/vhost/net.c +@@ -125,7 +125,7 @@ static void handle_tx(struct vhost_net *net) + mutex_lock(&vq->mutex); + vhost_disable_notify(vq); + +- if (wmem < sock->sk->sk_sndbuf * 2) ++ if (wmem < sock->sk->sk_sndbuf / 2) + tx_poll_stop(net); + hdr_size = vq->hdr_size; + +commit 535297a6ae4c3b7a0562e71fac15c213eeec68e7 +Author: Michael S. Tsirkin +Date: Wed Mar 17 16:06:11 2010 +0200 + + vhost: fix error handling in vring ioctls + + Stanse found a locking problem in vhost_set_vring: + several returns from VHOST_SET_VRING_KICK, VHOST_SET_VRING_CALL, + VHOST_SET_VRING_ERR with the vq->mutex held. + Fix these up. + + Reported-by: Jiri Slaby + Acked-by: Laurent Chavey + Signed-off-by: Michael S. Tsirkin + +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c +index 7cd55e0..7bd7a1e 100644 +--- a/drivers/vhost/vhost.c ++++ b/drivers/vhost/vhost.c +@@ -476,8 +476,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) + if (r < 0) + break; + eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); +- if (IS_ERR(eventfp)) +- return PTR_ERR(eventfp); ++ if (IS_ERR(eventfp)) { ++ r = PTR_ERR(eventfp); ++ break; ++ } + if (eventfp != vq->kick) { + pollstop = filep = vq->kick; + pollstart = vq->kick = eventfp; +@@ -489,8 +491,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) + if (r < 0) + break; + eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); +- if (IS_ERR(eventfp)) +- return PTR_ERR(eventfp); ++ if (IS_ERR(eventfp)) { ++ r = PTR_ERR(eventfp); ++ break; ++ } + if (eventfp != vq->call) { + filep = vq->call; + ctx = vq->call_ctx; +@@ -505,8 +509,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) + if (r < 0) + break; + eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); +- if (IS_ERR(eventfp)) +- return PTR_ERR(eventfp); ++ if (IS_ERR(eventfp)) { ++ r = PTR_ERR(eventfp); ++ break; ++ } + if (eventfp != vq->error) { + filep = vq->error; + vq->error = eventfp; +commit 179b284e2fc0c638035843968f7d7ab8ab701525 +Author: Jeff Dike +Date: Wed Apr 7 09:59:10 2010 -0400 + + vhost-net: fix vq_memory_access_ok error checking + + vq_memory_access_ok needs to check whether mem == NULL + + Signed-off-by: Jeff Dike + Signed-off-by: Michael S. Tsirkin + +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c +index 7bd7a1e..b8e1127 100644 +--- a/drivers/vhost/vhost.c ++++ b/drivers/vhost/vhost.c +@@ -235,6 +235,10 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem, + int log_all) + { + int i; ++ ++ if (!mem) ++ return 0; ++ + for (i = 0; i < mem->nregions; ++i) { + struct vhost_memory_region *m = mem->regions + i; + unsigned long a = m->userspace_addr; diff --git a/virt_console-fix-fix-race.patch b/virt_console-fix-fix-race.patch new file mode 100644 index 0000000..816022c --- /dev/null +++ b/virt_console-fix-fix-race.patch @@ -0,0 +1,68 @@ +From: Anton Blanchard +Date: Tue, 6 Apr 2010 11:42:38 +0000 (+1000) +Subject: hvc_console: Fix race between hvc_close and hvc_remove +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=320718ee074acce5ffced6506cb51af1388942aa + +hvc_console: Fix race between hvc_close and hvc_remove + +I don't claim to understand the tty layer, but it seems like hvc_open and +hvc_close should be balanced in their kref reference counting. + +Right now we get a kref every call to hvc_open: + + if (hp->count++ > 0) { + tty_kref_get(tty); <----- here + spin_unlock_irqrestore(&hp->lock, flags); + hvc_kick(); + return 0; + } /* else count == 0 */ + + tty->driver_data = hp; + + hp->tty = tty_kref_get(tty); <------ or here if hp->count was 0 + +But hvc_close has: + + tty_kref_get(tty); + + if (--hp->count == 0) { +... + /* Put the ref obtained in hvc_open() */ + tty_kref_put(tty); +... + } + + tty_kref_put(tty); + +Since the outside kref get/put balance we only do a single kref_put when +count reaches 0. + +The patch below changes things to call tty_kref_put once for every +hvc_close call, and with that my machine boots fine. + +Signed-off-by: Anton Blanchard +Acked-by: Amit Shah +Signed-off-by: Rusty Russell +--- + +diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c +index d3890e8..35cca4c 100644 +--- a/drivers/char/hvc_console.c ++++ b/drivers/char/hvc_console.c +@@ -368,16 +368,12 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) + hp = tty->driver_data; + + spin_lock_irqsave(&hp->lock, flags); +- tty_kref_get(tty); + + if (--hp->count == 0) { + /* We are done with the tty pointer now. */ + hp->tty = NULL; + spin_unlock_irqrestore(&hp->lock, flags); + +- /* Put the ref obtained in hvc_open() */ +- tty_kref_put(tty); +- + if (hp->ops->notifier_del) + hp->ops->notifier_del(hp, hp->data); + diff --git a/virt_console-fix-race.patch b/virt_console-fix-race.patch new file mode 100644 index 0000000..344430c --- /dev/null +++ b/virt_console-fix-race.patch @@ -0,0 +1,168 @@ +From: Amit Shah +Date: Fri, 12 Mar 2010 06:23:15 +0000 (+0530) +Subject: hvc_console: Fix race between hvc_close and hvc_remove +X-Git-Tag: v2.6.34-rc2~6^2~3 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=e74d098c66543d0731de62eb747ccd5b636a6f4c + +hvc_console: Fix race between hvc_close and hvc_remove + +Alan pointed out a race in the code where hvc_remove is invoked. The +recent virtio_console work is the first user of hvc_remove(). + +Alan describes it thus: + +The hvc_console assumes that a close and remove call can't occur at the +same time. + +In addition tty_hangup(tty) is problematic as tty_hangup is asynchronous +itself.... + +So this can happen + + hvc_close hvc_remove + hung up ? - no + lock + tty = hp->tty + unlock + lock + hp->tty = NULL + unlock + notify del + kref_put the hvc struct + close completes + tty is destroyed + tty_hangup dead tty + tty->ops will be NULL + NULL->... + +This patch adds some tty krefs and also converts to using tty_vhangup(). + +Reported-by: Alan Cox +Signed-off-by: Amit Shah +CC: Alan Cox +CC: linuxppc-dev@ozlabs.org +CC: Rusty Russell +Signed-off-by: Greg Kroah-Hartman +--- + +diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c +index 465185f..ba55bba 100644 +--- a/drivers/char/hvc_console.c ++++ b/drivers/char/hvc_console.c +@@ -312,6 +312,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) + spin_lock_irqsave(&hp->lock, flags); + /* Check and then increment for fast path open. */ + if (hp->count++ > 0) { ++ tty_kref_get(tty); + spin_unlock_irqrestore(&hp->lock, flags); + hvc_kick(); + return 0; +@@ -319,7 +320,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) + + tty->driver_data = hp; + +- hp->tty = tty; ++ hp->tty = tty_kref_get(tty); + + spin_unlock_irqrestore(&hp->lock, flags); + +@@ -336,6 +337,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) + spin_lock_irqsave(&hp->lock, flags); + hp->tty = NULL; + spin_unlock_irqrestore(&hp->lock, flags); ++ tty_kref_put(tty); + tty->driver_data = NULL; + kref_put(&hp->kref, destroy_hvc_struct); + printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc); +@@ -363,13 +365,18 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) + return; + + hp = tty->driver_data; ++ + spin_lock_irqsave(&hp->lock, flags); ++ tty_kref_get(tty); + + if (--hp->count == 0) { + /* We are done with the tty pointer now. */ + hp->tty = NULL; + spin_unlock_irqrestore(&hp->lock, flags); + ++ /* Put the ref obtained in hvc_open() */ ++ tty_kref_put(tty); ++ + if (hp->ops->notifier_del) + hp->ops->notifier_del(hp, hp->data); + +@@ -389,6 +396,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) + spin_unlock_irqrestore(&hp->lock, flags); + } + ++ tty_kref_put(tty); + kref_put(&hp->kref, destroy_hvc_struct); + } + +@@ -424,10 +432,11 @@ static void hvc_hangup(struct tty_struct *tty) + spin_unlock_irqrestore(&hp->lock, flags); + + if (hp->ops->notifier_hangup) +- hp->ops->notifier_hangup(hp, hp->data); ++ hp->ops->notifier_hangup(hp, hp->data); + + while(temp_open_count) { + --temp_open_count; ++ tty_kref_put(tty); + kref_put(&hp->kref, destroy_hvc_struct); + } + } +@@ -592,7 +601,7 @@ int hvc_poll(struct hvc_struct *hp) + } + + /* No tty attached, just skip */ +- tty = hp->tty; ++ tty = tty_kref_get(hp->tty); + if (tty == NULL) + goto bail; + +@@ -672,6 +681,8 @@ int hvc_poll(struct hvc_struct *hp) + + tty_flip_buffer_push(tty); + } ++ if (tty) ++ tty_kref_put(tty); + + return poll_mask; + } +@@ -807,7 +818,7 @@ int hvc_remove(struct hvc_struct *hp) + struct tty_struct *tty; + + spin_lock_irqsave(&hp->lock, flags); +- tty = hp->tty; ++ tty = tty_kref_get(hp->tty); + + if (hp->index < MAX_NR_HVC_CONSOLES) + vtermnos[hp->index] = -1; +@@ -819,18 +830,18 @@ int hvc_remove(struct hvc_struct *hp) + /* + * We 'put' the instance that was grabbed when the kref instance + * was initialized using kref_init(). Let the last holder of this +- * kref cause it to be removed, which will probably be the tty_hangup ++ * kref cause it to be removed, which will probably be the tty_vhangup + * below. + */ + kref_put(&hp->kref, destroy_hvc_struct); + + /* +- * This function call will auto chain call hvc_hangup. The tty should +- * always be valid at this time unless a simultaneous tty close already +- * cleaned up the hvc_struct. ++ * This function call will auto chain call hvc_hangup. + */ +- if (tty) +- tty_hangup(tty); ++ if (tty) { ++ tty_vhangup(tty); ++ tty_kref_put(tty); ++ } + return 0; + } + EXPORT_SYMBOL_GPL(hvc_remove); diff --git a/virt_console-rollup.patch b/virt_console-rollup.patch index 57fd9b2..20b475d 100644 --- a/virt_console-rollup.patch +++ b/virt_console-rollup.patch @@ -1,420 +1,1390 @@ -diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c -index 48ce834..8c99bf1 100644 ---- a/drivers/char/virtio_console.c -+++ b/drivers/char/virtio_console.c -@@ -33,35 +33,6 @@ - #include - #include "hvc_console.h" +diff --git a/MAINTAINERS b/MAINTAINERS +index 03f38c1..3118dfa 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -2394,6 +2394,12 @@ L: linuxppc-dev@ozlabs.org + S: Odd Fixes + F: drivers/char/hvc_* + ++VIRTIO CONSOLE DRIVER ++M: Amit Shah ++L: virtualization@lists.linux-foundation.org ++S: Maintained ++F: drivers/char/virtio_console.c ++ + GSPCA FINEPIX SUBDRIVER + M: Frank Zago + L: linux-media@vger.kernel.org +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig +index e023682..3141dd3 100644 +--- a/drivers/char/Kconfig ++++ b/drivers/char/Kconfig +@@ -666,6 +666,14 @@ config VIRTIO_CONSOLE + help + Virtio console for use with lguest and other hypervisors. + ++ Also serves as a general-purpose serial device for data ++ transfer between the guest and host. Character devices at ++ /dev/vportNpn will be created when corresponding ports are ++ found, where N is the device number and n is the port number ++ within that device. If specified by the host, a sysfs ++ attribute called 'name' will be populated with a name for ++ the port which can be used by udev scripts to create a ++ symlink to the device. + + config HVCS + tristate "IBM Hypervisor Virtual Console Server support" +diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c +index 0afc8b8..6913fc3 100644 +--- a/drivers/char/hvc_beat.c ++++ b/drivers/char/hvc_beat.c +@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt) + return cnt; + } --/* Moved here from .h file in order to disable MULTIPORT. */ --#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ -- --struct virtio_console_multiport_conf { -- struct virtio_console_config config; -- /* max. number of ports this device can hold */ -- __u32 max_nr_ports; -- /* number of ports added so far */ -- __u32 nr_ports; --} __attribute__((packed)); -- --/* -- * A message that's passed between the Host and the Guest for a -- * particular port. -- */ --struct virtio_console_control { -- __u32 id; /* Port number */ -- __u16 event; /* The kind of control event (see below) */ -- __u16 value; /* Extra information for the key */ --}; -- --/* Some events for control messages */ --#define VIRTIO_CONSOLE_PORT_READY 0 --#define VIRTIO_CONSOLE_CONSOLE_PORT 1 --#define VIRTIO_CONSOLE_RESIZE 2 --#define VIRTIO_CONSOLE_PORT_OPEN 3 --#define VIRTIO_CONSOLE_PORT_NAME 4 --#define VIRTIO_CONSOLE_PORT_REMOVE 5 -- - /* - * This is a global struct for storing common data for all the devices - * this driver handles. -@@ -107,6 +78,9 @@ struct console { - /* The hvc device associated with this console port */ - struct hvc_struct *hvc; +-static struct hv_ops hvc_beat_get_put_ops = { ++static const struct hv_ops hvc_beat_get_put_ops = { + .get_chars = hvc_beat_get_chars, + .put_chars = hvc_beat_put_chars, + }; +diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c +index 416d342..4c3b59b 100644 +--- a/drivers/char/hvc_console.c ++++ b/drivers/char/hvc_console.c +@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index) + * console interfaces but can still be used as a tty device. This has to be + * static because kmalloc will not work during early console init. + */ +-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES]; ++static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES]; + static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] = + {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1}; + +@@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kref *kref) + * vty adapters do NOT get an hvc_instantiate() callback since they + * appear after early console init. + */ +-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops) ++int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops) + { + struct hvc_struct *hp; -+ /* The size of the console */ -+ struct winsize ws; -+ - /* - * This number identifies the number that we used to register - * with hvc in hvc_instantiate() and hvc_alloc(); this is the -@@ -139,7 +113,6 @@ struct ports_device { - * notification - */ - struct work_struct control_work; -- struct work_struct config_work; +@@ -748,8 +748,9 @@ static const struct tty_operations hvc_ops = { + .chars_in_buffer = hvc_chars_in_buffer, + }; - struct list_head ports; +-struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data, +- struct hv_ops *ops, int outbuf_size) ++struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, ++ const struct hv_ops *ops, ++ int outbuf_size) + { + struct hvc_struct *hp; + int i; +diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h +index 10950ca..54381eb 100644 +--- a/drivers/char/hvc_console.h ++++ b/drivers/char/hvc_console.h +@@ -55,7 +55,7 @@ struct hvc_struct { + int outbuf_size; + int n_outbuf; + uint32_t vtermno; +- struct hv_ops *ops; ++ const struct hv_ops *ops; + int irq_requested; + int data; + struct winsize ws; +@@ -76,11 +76,12 @@ struct hv_ops { + }; -@@ -150,7 +123,7 @@ struct ports_device { - spinlock_t cvq_lock; + /* Register a vterm and a slot index for use as a console (console_init) */ +-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops); ++extern int hvc_instantiate(uint32_t vtermno, int index, ++ const struct hv_ops *ops); + + /* register a vterm for hvc tty operation (module_init or hotplug add) */ +-extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data, +- struct hv_ops *ops, int outbuf_size); ++extern struct hvc_struct * hvc_alloc(uint32_t vtermno, int data, ++ const struct hv_ops *ops, int outbuf_size); + /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */ + extern int hvc_remove(struct hvc_struct *hp); + +diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c +index 936d05b..fd02426 100644 +--- a/drivers/char/hvc_iseries.c ++++ b/drivers/char/hvc_iseries.c +@@ -197,7 +197,7 @@ done: + return sent; + } - /* The current config space is stored here */ -- struct virtio_console_multiport_conf config; -+ struct virtio_console_config config; +-static struct hv_ops hvc_get_put_ops = { ++static const struct hv_ops hvc_get_put_ops = { + .get_chars = get_chars, + .put_chars = put_chars, + .notifier_add = notifier_add_irq, +diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c +index fe62bd0..21681a8 100644 +--- a/drivers/char/hvc_iucv.c ++++ b/drivers/char/hvc_iucv.c +@@ -922,7 +922,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev) + + + /* HVC operations */ +-static struct hv_ops hvc_iucv_ops = { ++static const struct hv_ops hvc_iucv_ops = { + .get_chars = hvc_iucv_get_chars, + .put_chars = hvc_iucv_put_chars, + .notifier_add = hvc_iucv_notifier_add, +diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c +index 88590d0..61c4a61 100644 +--- a/drivers/char/hvc_rtas.c ++++ b/drivers/char/hvc_rtas.c +@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count) + return i; + } - /* The virtio device we're associated with */ - struct virtio_device *vdev; -@@ -189,6 +162,9 @@ struct port { - */ - spinlock_t inbuf_lock; +-static struct hv_ops hvc_rtas_get_put_ops = { ++static const struct hv_ops hvc_rtas_get_put_ops = { + .get_chars = hvc_rtas_read_console, + .put_chars = hvc_rtas_write_console, + }; +diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c +index bd63ba8..b0957e6 100644 +--- a/drivers/char/hvc_udbg.c ++++ b/drivers/char/hvc_udbg.c +@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count) + return i; + } + +-static struct hv_ops hvc_udbg_ops = { ++static const struct hv_ops hvc_udbg_ops = { + .get_chars = hvc_udbg_get, + .put_chars = hvc_udbg_put, + }; +diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c +index 10be343..27370e9 100644 +--- a/drivers/char/hvc_vio.c ++++ b/drivers/char/hvc_vio.c +@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count) + return got; + } + +-static struct hv_ops hvc_get_put_ops = { ++static const struct hv_ops hvc_get_put_ops = { + .get_chars = filtered_get_chars, + .put_chars = hvc_put_chars, + .notifier_add = notifier_add_irq, +diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c +index b1a7163..60446f8 100644 +--- a/drivers/char/hvc_xen.c ++++ b/drivers/char/hvc_xen.c +@@ -122,7 +122,7 @@ static int read_console(uint32_t vtermno, char *buf, int len) + return recv; + } -+ /* Protect the operations on the out_vq. */ -+ spinlock_t outvq_lock; +-static struct hv_ops hvc_ops = { ++static const struct hv_ops hvc_ops = { + .get_chars = read_console, + .put_chars = write_console, + .notifier_add = notifier_add_irq, +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index a035ae3..213373b 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -1,18 +1,6 @@ +-/*D:300 +- * The Guest console driver +- * +- * Writing console drivers is one of the few remaining Dark Arts in Linux. +- * Fortunately for us, the path of virtual consoles has been well-trodden by +- * the PowerPC folks, who wrote "hvc_console.c" to generically support any +- * virtual console. We use that infrastructure which only requires us to write +- * the basic put_chars and get_chars functions and call the right register +- * functions. +- :*/ +- +-/*M:002 The console can be flooded: while the Guest is processing input the +- * Host can send more. Buffering in the Host could alleviate this, but it is a +- * difficult problem in general. :*/ +-/* Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation ++/* ++ * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation ++ * Copyright (C) 2009, 2010 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by +@@ -28,142 +16,694 @@ + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ ++#include ++#include ++#include + #include ++#include + #include ++#include ++#include ++#include ++#include + #include + #include ++#include ++#include + #include "hvc_console.h" + +-/*D:340 These represent our input and output console queues, and the virtio +- * operations for them. */ +-static struct virtqueue *in_vq, *out_vq; +-static struct virtio_device *vdev; ++/* ++ * This is a global struct for storing common data for all the devices ++ * this driver handles. ++ * ++ * Mainly, it has a linked list for all the consoles in one place so ++ * that callbacks from hvc for get_chars(), put_chars() work properly ++ * across multiple devices and multiple ports per device. ++ */ ++struct ports_driver_data { ++ /* Used for registering chardevs */ ++ struct class *class; ++ ++ /* Used for exporting per-port information to debugfs */ ++ struct dentry *debugfs_dir; + - /* The IO vqs for this port */ - struct virtqueue *in_vq, *out_vq; ++ /* Number of devices this driver is handling */ ++ unsigned int index; ++ ++ /* ++ * This is used to keep track of the number of hvc consoles ++ * spawned by this driver. This number is given as the first ++ * argument to hvc_alloc(). To correctly map an initial ++ * console spawned via hvc_instantiate to the console being ++ * hooked up via hvc_alloc, we need to pass the same vtermno. ++ * ++ * We also just assume the first console being initialised was ++ * the first one that got used as the initial console. ++ */ ++ unsigned int next_vtermno; ++ ++ /* All the console devices handled by this driver */ ++ struct list_head consoles; ++}; ++static struct ports_driver_data pdrvdata; ++ ++DEFINE_SPINLOCK(pdrvdata_lock); ++ ++/* This struct holds information that's relevant only for console ports */ ++struct console { ++ /* We'll place all consoles in a list in the pdrvdata struct */ ++ struct list_head list; ++ ++ /* The hvc device associated with this console port */ ++ struct hvc_struct *hvc; ++ ++ /* ++ * This number identifies the number that we used to register ++ * with hvc in hvc_instantiate() and hvc_alloc(); this is the ++ * number passed on by the hvc callbacks to us to ++ * differentiate between the other console ports handled by ++ * this driver ++ */ ++ u32 vtermno; ++}; ++ ++struct port_buffer { ++ char *buf; ++ ++ /* size of the buffer in *buf above */ ++ size_t size; ++ ++ /* used length of the buffer */ ++ size_t len; ++ /* offset in the buf from which to consume data */ ++ size_t offset; ++}; ++ ++/* ++ * This is a per-device struct that stores data common to all the ++ * ports for that device (vdev->priv). ++ */ ++struct ports_device { ++ /* ++ * Workqueue handlers where we process deferred work after ++ * notification ++ */ ++ struct work_struct control_work; ++ struct work_struct config_work; ++ ++ struct list_head ports; ++ ++ /* To protect the list of ports */ ++ spinlock_t ports_lock; ++ ++ /* To protect the vq operations for the control channel */ ++ spinlock_t cvq_lock; ++ ++ /* The current config space is stored here */ ++ struct virtio_console_config config; ++ ++ /* The virtio device we're associated with */ ++ struct virtio_device *vdev; ++ ++ /* ++ * A couple of virtqueues for the control channel: one for ++ * guest->host transfers, one for host->guest transfers ++ */ ++ struct virtqueue *c_ivq, *c_ovq; ++ ++ /* Array of per-port IO virtqueues */ ++ struct virtqueue **in_vqs, **out_vqs; ++ ++ /* Used for numbering devices for sysfs and debugfs */ ++ unsigned int drv_index; ++ ++ /* Major number for this device. Ports will be created as minors. */ ++ int chr_major; ++}; ++ ++/* This struct holds the per-port data */ ++struct port { ++ /* Next port in the list, head is in the ports_device */ ++ struct list_head list; ++ ++ /* Pointer to the parent virtio_console device */ ++ struct ports_device *portdev; ++ ++ /* The current buffer from which data has to be fed to readers */ ++ struct port_buffer *inbuf; ++ ++ /* ++ * To protect the operations on the in_vq associated with this ++ * port. Has to be a spinlock because it can be called from ++ * interrupt context (get_char()). ++ */ ++ spinlock_t inbuf_lock; ++ ++ /* The IO vqs for this port */ ++ struct virtqueue *in_vq, *out_vq; ++ ++ /* File in the debugfs directory that exposes this port's information */ ++ struct dentry *debugfs_file; ++ ++ /* ++ * The entries in this struct will be valid if this port is ++ * hooked up to an hvc console ++ */ ++ struct console cons; ++ ++ /* Each port associates with a separate char device */ ++ struct cdev cdev; ++ struct device *dev; ++ ++ /* A waitqueue for poll() or blocking read operations */ ++ wait_queue_head_t waitqueue; ++ ++ /* The 'name' of the port that we expose via sysfs properties */ ++ char *name; ++ ++ /* The 'id' to identify the port with the Host */ ++ u32 id; ++ ++ /* Is the host device open */ ++ bool host_connected; ++ ++ /* We should allow only one process to open a port */ ++ bool guest_connected; ++}; ++ ++/* This is the very early arch-specified put chars function. */ ++static int (*early_put_chars)(u32, const char *, int); ++ ++static struct port *find_port_by_vtermno(u32 vtermno) ++{ ++ struct port *port; ++ struct console *cons; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&pdrvdata_lock, flags); ++ list_for_each_entry(cons, &pdrvdata.consoles, list) { ++ if (cons->vtermno == vtermno) { ++ port = container_of(cons, struct port, cons); ++ goto out; ++ } ++ } ++ port = NULL; ++out: ++ spin_unlock_irqrestore(&pdrvdata_lock, flags); ++ return port; ++} ++ ++static struct port *find_port_by_id(struct ports_device *portdev, u32 id) ++{ ++ struct port *port; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&portdev->ports_lock, flags); ++ list_for_each_entry(port, &portdev->ports, list) ++ if (port->id == id) ++ goto out; ++ port = NULL; ++out: ++ spin_unlock_irqrestore(&portdev->ports_lock, flags); ++ ++ return port; ++} ++ ++static struct port *find_port_by_vq(struct ports_device *portdev, ++ struct virtqueue *vq) ++{ ++ struct port *port; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&portdev->ports_lock, flags); ++ list_for_each_entry(port, &portdev->ports, list) ++ if (port->in_vq == vq || port->out_vq == vq) ++ goto out; ++ port = NULL; ++out: ++ spin_unlock_irqrestore(&portdev->ports_lock, flags); ++ return port; ++} ++ ++static bool is_console_port(struct port *port) ++{ ++ if (port->cons.hvc) ++ return true; ++ return false; ++} ++ ++static inline bool use_multiport(struct ports_device *portdev) ++{ ++ /* ++ * This condition can be true when put_chars is called from ++ * early_init ++ */ ++ if (!portdev->vdev) ++ return 0; ++ return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); ++} -@@ -214,6 +190,8 @@ struct port { - /* The 'id' to identify the port with the Host */ - u32 id; +-/* This is our input buffer, and how much data is left in it. */ +-static unsigned int in_len; +-static char *in, *inbuf; ++static void free_buf(struct port_buffer *buf) ++{ ++ kfree(buf->buf); ++ kfree(buf); ++} ++ ++static struct port_buffer *alloc_buf(size_t buf_size) ++{ ++ struct port_buffer *buf; -+ bool outvq_full; +-/* The operations for our console. */ +-static struct hv_ops virtio_cons; ++ buf = kmalloc(sizeof(*buf), GFP_KERNEL); ++ if (!buf) ++ goto fail; ++ buf->buf = kzalloc(buf_size, GFP_KERNEL); ++ if (!buf->buf) ++ goto free_buf; ++ buf->len = 0; ++ buf->offset = 0; ++ buf->size = buf_size; ++ return buf; ++ ++free_buf: ++ kfree(buf); ++fail: ++ return NULL; ++} + - /* Is the host device open */ - bool host_connected; ++/* Callers should take appropriate locks */ ++static void *get_inbuf(struct port *port) ++{ ++ struct port_buffer *buf; ++ struct virtqueue *vq; ++ unsigned int len; -@@ -403,22 +381,22 @@ out: - return ret; - } +-/* The hvc device */ +-static struct hvc_struct *hvc; ++ vq = port->in_vq; ++ buf = vq->vq_ops->get_buf(vq, &len); ++ if (buf) { ++ buf->len = len; ++ buf->offset = 0; ++ } ++ return buf; ++} --static ssize_t send_control_msg(struct port *port, unsigned int event, -- unsigned int value) -+static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, -+ unsigned int event, unsigned int value) +-/*D:310 The put_chars() callback is pretty straightforward. ++/* ++ * Create a scatter-gather list representing our input buffer and put ++ * it in the queue. + * +- * We turn the characters into a scatter-gather list, add it to the output +- * queue and then kick the Host. Then we sit here waiting for it to finish: +- * inefficient in theory, but in practice implementations will do it +- * immediately (lguest's Launcher does). */ +-static int put_chars(u32 vtermno, const char *buf, int count) ++ * Callers should take appropriate locks. ++ */ ++static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) { struct scatterlist sg[1]; - struct virtio_console_control cpkt; - struct virtqueue *vq; ++ int ret; ++ ++ sg_init_one(sg, buf->buf, buf->size); ++ ++ ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf); ++ vq->vq_ops->kick(vq); ++ return ret; ++} ++ ++/* Discard any unread data this port has. Callers lockers. */ ++static void discard_port_data(struct port *port) ++{ ++ struct port_buffer *buf; ++ struct virtqueue *vq; unsigned int len; ++ int ret; -- if (!use_multiport(port->portdev)) -+ if (!use_multiport(portdev)) - return 0; - -- cpkt.id = port->id; -+ cpkt.id = port_id; - cpkt.event = event; - cpkt.value = value; - -- vq = port->portdev->c_ovq; -+ vq = portdev->c_ovq; +- /* This is a convenient routine to initialize a single-elem sg list */ +- sg_init_one(sg, buf, count); ++ vq = port->in_vq; ++ if (port->inbuf) ++ buf = port->inbuf; ++ else ++ buf = vq->vq_ops->get_buf(vq, &len); + +- /* add_buf wants a token to identify this buffer: we hand it any +- * non-NULL pointer, since there's only ever one buffer. */ +- if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) >= 0) { +- /* Tell Host to go! */ +- out_vq->vq_ops->kick(out_vq); +- /* Chill out until it's done with the buffer. */ +- while (!out_vq->vq_ops->get_buf(out_vq, &len)) +- cpu_relax(); ++ ret = 0; ++ while (buf) { ++ if (add_inbuf(vq, buf) < 0) { ++ ret++; ++ free_buf(buf); ++ } ++ buf = vq->vq_ops->get_buf(vq, &len); + } ++ port->inbuf = NULL; ++ if (ret) ++ dev_warn(port->dev, "Errors adding %d buffers back to vq\n", ++ ret); ++} - sg_init_one(sg, &cpkt, sizeof(cpkt)); - if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) { -@@ -429,15 +407,39 @@ static ssize_t send_control_msg(struct port *port, unsigned int event, - return 0; +- /* We're expected to return the amount of data we wrote: all of it. */ +- return count; ++static bool port_has_data(struct port *port) ++{ ++ unsigned long flags; ++ bool ret; ++ ++ spin_lock_irqsave(&port->inbuf_lock, flags); ++ if (port->inbuf) { ++ ret = true; ++ goto out; ++ } ++ port->inbuf = get_inbuf(port); ++ if (port->inbuf) { ++ ret = true; ++ goto out; ++ } ++ ret = false; ++out: ++ spin_unlock_irqrestore(&port->inbuf_lock, flags); ++ return ret; } --static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) +-/* Create a scatter-gather list representing our input buffer and put it in the +- * queue. */ +-static void add_inbuf(void) +static ssize_t send_control_msg(struct port *port, unsigned int event, + unsigned int value) -+{ -+ return __send_control_msg(port->portdev, port->id, event, value); -+} + { + struct scatterlist sg[1]; +- sg_init_one(sg, inbuf, PAGE_SIZE); ++ struct virtio_console_control cpkt; ++ struct virtqueue *vq; ++ int len; + -+/* Callers must take the port->outvq_lock */ -+static void reclaim_consumed_buffers(struct port *port) -+{ -+ void *buf; ++ if (!use_multiport(port->portdev)) ++ return 0; ++ ++ cpkt.id = port->id; ++ cpkt.event = event; ++ cpkt.value = value; ++ ++ vq = port->portdev->c_ovq; + +- /* We should always be able to add one buffer to an empty queue. */ +- if (in_vq->vq_ops->add_buf(in_vq, sg, 0, 1, inbuf) < 0) +- BUG(); +- in_vq->vq_ops->kick(in_vq); ++ sg_init_one(sg, &cpkt, sizeof(cpkt)); ++ if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) { ++ vq->vq_ops->kick(vq); ++ while (!vq->vq_ops->get_buf(vq, &len)) ++ cpu_relax(); ++ } ++ return 0; + } + +-/*D:350 get_chars() is the callback from the hvc_console infrastructure when +- * an interrupt is received. +- * +- * Most of the code deals with the fact that the hvc_console() infrastructure +- * only asks us for 16 bytes at a time. We keep in_offset and in_used fields +- * for partially-filled buffers. */ +-static int get_chars(u32 vtermno, char *buf, int count) ++static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) + { +- /* If we don't have an input queue yet, we can't get input. */ +- BUG_ON(!in_vq); ++ struct scatterlist sg[1]; ++ struct virtqueue *out_vq; ++ ssize_t ret; + unsigned int len; + -+ while ((buf = virtqueue_get_buf(port->out_vq, &len))) { -+ kfree(buf); -+ port->outvq_full = false; ++ out_vq = port->out_vq; ++ ++ sg_init_one(sg, in_buf, in_count); ++ ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf); ++ ++ /* Tell Host to go! */ ++ out_vq->vq_ops->kick(out_vq); ++ ++ if (ret < 0) { ++ len = 0; ++ goto fail; + } ++ ++ /* ++ * Wait till the host acknowledges it pushed out the data we ++ * sent. Also ensure we return to userspace the number of ++ * bytes that were successfully consumed by the host. ++ */ ++ while (!out_vq->vq_ops->get_buf(out_vq, &len)) ++ cpu_relax(); ++fail: ++ /* We're expected to return the amount of data we wrote */ ++ return len; +} + -+static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, -+ bool nonblock) - { - struct scatterlist sg[1]; - struct virtqueue *out_vq; - ssize_t ret; ++/* ++ * Give out the data that's requested from the buffer that we have ++ * queued up. ++ */ ++static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, ++ bool to_user) ++{ ++ struct port_buffer *buf; + unsigned long flags; - unsigned int len; - - out_vq = port->out_vq; - -+ spin_lock_irqsave(&port->outvq_lock, flags); + -+ reclaim_consumed_buffers(port); ++ if (!out_count || !port_has_data(port)) ++ return 0; + - sg_init_one(sg, in_buf, in_count); - ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf); - -@@ -446,14 +448,29 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) - - if (ret < 0) { - in_count = 0; -- goto fail; -+ goto done; - } ++ buf = port->inbuf; ++ out_count = min(out_count, buf->len - buf->offset); ++ ++ if (to_user) { ++ ssize_t ret; ++ ++ ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); ++ if (ret) ++ return -EFAULT; ++ } else { ++ memcpy(out_buf, buf->buf + buf->offset, out_count); ++ } ++ ++ buf->offset += out_count; ++ ++ if (buf->offset == buf->len) { ++ /* ++ * We're done using all the data in this buffer. ++ * Re-queue so that the Host can send us more data. ++ */ ++ spin_lock_irqsave(&port->inbuf_lock, flags); ++ port->inbuf = NULL; ++ ++ if (add_inbuf(port->in_vq, buf) < 0) ++ dev_warn(port->dev, "failed add_buf\n"); ++ ++ spin_unlock_irqrestore(&port->inbuf_lock, flags); ++ } ++ /* Return the number of bytes actually copied */ ++ return out_count; ++} -- /* Wait till the host acknowledges it pushed out the data we sent. */ -+ if (ret == 0) -+ port->outvq_full = true; +- /* No buffer? Try to get one. */ +- if (!in_len) { +- in = in_vq->vq_ops->get_buf(in_vq, &in_len); +- if (!in) ++/* The condition that must be true for polling to end */ ++static bool wait_is_over(struct port *port) ++{ ++ return port_has_data(port) || !port->host_connected; ++} + -+ if (nonblock) -+ goto done; ++static ssize_t port_fops_read(struct file *filp, char __user *ubuf, ++ size_t count, loff_t *offp) ++{ ++ struct port *port; ++ ssize_t ret; + ++ port = filp->private_data; ++ ++ if (!port_has_data(port)) { ++ /* ++ * If nothing's connected on the host just return 0 in ++ * case of list_empty; this tells the userspace app ++ * that there's no connection ++ */ ++ if (!port->host_connected) + return 0; ++ if (filp->f_flags & O_NONBLOCK) ++ return -EAGAIN; ++ ++ ret = wait_event_interruptible(port->waitqueue, ++ wait_is_over(port)); ++ if (ret < 0) ++ return ret; ++ } + /* -+ * Wait till the host acknowledges it pushed out the data we -+ * sent. This is done for ports in blocking mode or for data -+ * from the hvc_console; the tty operations are performed with -+ * spinlocks held so we can't sleep here. -+ */ - while (!virtqueue_get_buf(out_vq, &len)) - cpu_relax(); --fail: -- /* We're expected to return the amount of data we wrote */ -+done: -+ spin_unlock_irqrestore(&port->outvq_lock, flags); -+ /* -+ * We're expected to return the amount of data we wrote -- all -+ * of it ++ * We could've received a disconnection message while we were ++ * waiting for more data. ++ * ++ * This check is not clubbed in the if() statement above as we ++ * might receive some data as well as the host could get ++ * disconnected after we got woken up from our wait. So we ++ * really want to give off whatever data we have and only then ++ * check for host_connected. + */ - return in_count; - } - -@@ -503,9 +520,28 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, - } - - /* The condition that must be true for polling to end */ --static bool wait_is_over(struct port *port) -+static bool will_read_block(struct port *port) ++ if (!port_has_data(port) && !port->host_connected) ++ return 0; ++ ++ return fill_readbuf(port, ubuf, count, true); ++} ++ ++static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, ++ size_t count, loff_t *offp) +{ -+ return !port_has_data(port) && port->host_connected; ++ struct port *port; ++ char *buf; ++ ssize_t ret; ++ ++ port = filp->private_data; ++ ++ count = min((size_t)(32 * 1024), count); ++ ++ buf = kmalloc(count, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ ret = copy_from_user(buf, ubuf, count); ++ if (ret) { ++ ret = -EFAULT; ++ goto free_buf; + } + +- /* You want more than we have to give? Well, try wanting less! */ +- if (in_len < count) +- count = in_len; ++ ret = send_buf(port, buf, count); ++free_buf: ++ kfree(buf); ++ return ret; +} + -+static bool will_write_block(struct port *port) - { -- return port_has_data(port) || !port->host_connected; -+ bool ret; ++static unsigned int port_fops_poll(struct file *filp, poll_table *wait) ++{ ++ struct port *port; ++ unsigned int ret; ++ ++ port = filp->private_data; ++ poll_wait(filp, &port->waitqueue, wait); + ++ ret = 0; ++ if (port->inbuf) ++ ret |= POLLIN | POLLRDNORM; ++ if (port->host_connected) ++ ret |= POLLOUT; + if (!port->host_connected) -+ return true; ++ ret |= POLLHUP; ++ ++ return ret; ++} ++ ++static int port_fops_release(struct inode *inode, struct file *filp) ++{ ++ struct port *port; ++ ++ port = filp->private_data; ++ ++ /* Notify host of port being closed */ ++ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); ++ ++ spin_lock_irq(&port->inbuf_lock); ++ port->guest_connected = false; ++ ++ discard_port_data(port); ++ ++ spin_unlock_irq(&port->inbuf_lock); ++ ++ return 0; ++} ++ ++static int port_fops_open(struct inode *inode, struct file *filp) ++{ ++ struct cdev *cdev = inode->i_cdev; ++ struct port *port; ++ ++ port = container_of(cdev, struct port, cdev); ++ filp->private_data = port; + -+ spin_lock_irq(&port->outvq_lock); + /* -+ * Check if the Host has consumed any buffers since we last -+ * sent data (this is only applicable for nonblocking ports). ++ * Don't allow opening of console port devices -- that's done ++ * via /dev/hvc + */ -+ reclaim_consumed_buffers(port); -+ ret = port->outvq_full; -+ spin_unlock_irq(&port->outvq_lock); ++ if (is_console_port(port)) ++ return -ENXIO; + -+ return ret; ++ /* Allow only one process to open a particular port at a time */ ++ spin_lock_irq(&port->inbuf_lock); ++ if (port->guest_connected) { ++ spin_unlock_irq(&port->inbuf_lock); ++ return -EMFILE; ++ } + +- /* Copy across to their buffer and increment offset. */ +- memcpy(buf, in, count); +- in += count; +- in_len -= count; ++ port->guest_connected = true; ++ spin_unlock_irq(&port->inbuf_lock); + +- /* Finished? Re-register buffer so Host will use it again. */ +- if (in_len == 0) +- add_inbuf(); ++ /* Notify host of port being opened */ ++ send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); + +- return count; ++ return 0; } +-/*:*/ - static ssize_t port_fops_read(struct file *filp, char __user *ubuf, -@@ -528,7 +564,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, - return -EAGAIN; +-/*D:320 Console drivers are initialized very early so boot messages can go out, +- * so we do things slightly differently from the generic virtio initialization +- * of the net and block drivers. ++/* ++ * The file operations that we support: programs in the guest can open ++ * a console device, read from it, write to it, poll for data and ++ * close it. The devices are at ++ * /dev/vportp ++ */ ++static const struct file_operations port_fops = { ++ .owner = THIS_MODULE, ++ .open = port_fops_open, ++ .read = port_fops_read, ++ .write = port_fops_write, ++ .poll = port_fops_poll, ++ .release = port_fops_release, ++}; ++ ++/* ++ * The put_chars() callback is pretty straightforward. + * +- * At this stage, the console is output-only. It's too early to set up a +- * virtqueue, so we let the drivers do some boutique early-output thing. */ +-int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) ++ * We turn the characters into a scatter-gather list, add it to the ++ * output queue and then kick the Host. Then we sit here waiting for ++ * it to finish: inefficient in theory, but in practice ++ * implementations will do it immediately (lguest's Launcher does). ++ */ ++static int put_chars(u32 vtermno, const char *buf, int count) + { +- virtio_cons.put_chars = put_chars; +- return hvc_instantiate(0, 0, &virtio_cons); ++ struct port *port; ++ ++ port = find_port_by_vtermno(vtermno); ++ if (!port) ++ return 0; ++ ++ if (unlikely(early_put_chars)) ++ return early_put_chars(vtermno, buf, count); ++ ++ return send_buf(port, (void *)buf, count); + } - ret = wait_event_interruptible(port->waitqueue, -- wait_is_over(port)); -+ !will_read_block(port)); - if (ret < 0) - return ret; + /* +- * virtio console configuration. This supports: +- * - console resize ++ * get_chars() is the callback from the hvc_console infrastructure ++ * when an interrupt is received. ++ * ++ * We call out to fill_readbuf that gets us the required data from the ++ * buffers that are queued up. + */ +-static void virtcons_apply_config(struct virtio_device *dev) ++static int get_chars(u32 vtermno, char *buf, int count) + { ++ struct port *port; ++ ++ port = find_port_by_vtermno(vtermno); ++ if (!port) ++ return 0; ++ ++ /* If we don't have an input queue yet, we can't get input. */ ++ BUG_ON(!port->in_vq); ++ ++ return fill_readbuf(port, buf, count, false); ++} ++ ++static void resize_console(struct port *port) ++{ ++ struct virtio_device *vdev; + struct winsize ws; + +- if (virtio_has_feature(dev, VIRTIO_CONSOLE_F_SIZE)) { +- dev->config->get(dev, +- offsetof(struct virtio_console_config, cols), +- &ws.ws_col, sizeof(u16)); +- dev->config->get(dev, +- offsetof(struct virtio_console_config, rows), +- &ws.ws_row, sizeof(u16)); +- hvc_resize(hvc, ws); ++ vdev = port->portdev->vdev; ++ if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) { ++ vdev->config->get(vdev, ++ offsetof(struct virtio_console_config, cols), ++ &ws.ws_col, sizeof(u16)); ++ vdev->config->get(vdev, ++ offsetof(struct virtio_console_config, rows), ++ &ws.ws_row, sizeof(u16)); ++ hvc_resize(port->cons.hvc, ws); } -@@ -554,9 +590,22 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, - struct port *port; - char *buf; - ssize_t ret; -+ bool nonblock; + } - port = filp->private_data; +-/* +- * we support only one console, the hvc struct is a global var +- * We set the configuration at this point, since we now have a tty +- */ ++/* We set the configuration at this point, since we now have a tty */ + static int notifier_add_vio(struct hvc_struct *hp, int data) + { ++ struct port *port; ++ ++ port = find_port_by_vtermno(hp->vtermno); ++ if (!port) ++ return -EINVAL; ++ + hp->irq_requested = 1; +- virtcons_apply_config(vdev); ++ resize_console(port); + + return 0; + } +@@ -173,79 +713,797 @@ static void notifier_del_vio(struct hvc_struct *hp, int data) + hp->irq_requested = 0; + } -+ nonblock = filp->f_flags & O_NONBLOCK; +-static void hvc_handle_input(struct virtqueue *vq) ++/* The operations for console ports. */ ++static const struct hv_ops hv_ops = { ++ .get_chars = get_chars, ++ .put_chars = put_chars, ++ .notifier_add = notifier_add_vio, ++ .notifier_del = notifier_del_vio, ++ .notifier_hangup = notifier_del_vio, ++}; + -+ if (will_write_block(port)) { -+ if (nonblock) -+ return -EAGAIN; ++/* ++ * Console drivers are initialized very early so boot messages can go ++ * out, so we do things slightly differently from the generic virtio ++ * initialization of the net and block drivers. ++ * ++ * At this stage, the console is output-only. It's too early to set ++ * up a virtqueue, so we let the drivers do some boutique early-output ++ * thing. ++ */ ++int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) + { +- if (hvc_poll(hvc)) ++ early_put_chars = put_chars; ++ return hvc_instantiate(0, 0, &hv_ops); ++} ++ ++int init_port_console(struct port *port) ++{ ++ int ret; ++ ++ /* ++ * The Host's telling us this port is a console port. Hook it ++ * up with an hvc console. ++ * ++ * To set up and manage our virtual console, we call ++ * hvc_alloc(). ++ * ++ * The first argument of hvc_alloc() is the virtual console ++ * number. The second argument is the parameter for the ++ * notification mechanism (like irq number). We currently ++ * leave this as zero, virtqueues have implicit notifications. ++ * ++ * The third argument is a "struct hv_ops" containing the ++ * put_chars() get_chars(), notifier_add() and notifier_del() ++ * pointers. The final argument is the output buffer size: we ++ * can do any size, so we put PAGE_SIZE here. ++ */ ++ port->cons.vtermno = pdrvdata.next_vtermno; ++ ++ port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE); ++ if (IS_ERR(port->cons.hvc)) { ++ ret = PTR_ERR(port->cons.hvc); ++ dev_err(port->dev, ++ "error %d allocating hvc for port\n", ret); ++ port->cons.hvc = NULL; ++ return ret; ++ } ++ spin_lock_irq(&pdrvdata_lock); ++ pdrvdata.next_vtermno++; ++ list_add_tail(&port->cons.list, &pdrvdata.consoles); ++ spin_unlock_irq(&pdrvdata_lock); ++ port->guest_connected = true; ++ ++ /* Notify host of port being opened */ ++ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); ++ ++ return 0; ++} ++ ++static ssize_t show_port_name(struct device *dev, ++ struct device_attribute *attr, char *buffer) ++{ ++ struct port *port; ++ ++ port = dev_get_drvdata(dev); ++ ++ return sprintf(buffer, "%s\n", port->name); ++} ++ ++static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL); ++ ++static struct attribute *port_sysfs_entries[] = { ++ &dev_attr_name.attr, ++ NULL ++}; ++ ++static struct attribute_group port_attribute_group = { ++ .name = NULL, /* put in device directory */ ++ .attrs = port_sysfs_entries, ++}; ++ ++static int debugfs_open(struct inode *inode, struct file *filp) ++{ ++ filp->private_data = inode->i_private; ++ return 0; ++} ++ ++static ssize_t debugfs_read(struct file *filp, char __user *ubuf, ++ size_t count, loff_t *offp) ++{ ++ struct port *port; ++ char *buf; ++ ssize_t ret, out_offset, out_count; ++ ++ out_count = 1024; ++ buf = kmalloc(out_count, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ port = filp->private_data; ++ out_offset = 0; ++ out_offset += snprintf(buf + out_offset, out_count, ++ "name: %s\n", port->name ? port->name : ""); ++ out_offset += snprintf(buf + out_offset, out_count - out_offset, ++ "guest_connected: %d\n", port->guest_connected); ++ out_offset += snprintf(buf + out_offset, out_count - out_offset, ++ "host_connected: %d\n", port->host_connected); ++ out_offset += snprintf(buf + out_offset, out_count - out_offset, ++ "is_console: %s\n", ++ is_console_port(port) ? "yes" : "no"); ++ out_offset += snprintf(buf + out_offset, out_count - out_offset, ++ "console_vtermno: %u\n", port->cons.vtermno); ++ ++ ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); ++ kfree(buf); ++ return ret; ++} ++ ++static const struct file_operations port_debugfs_ops = { ++ .owner = THIS_MODULE, ++ .open = debugfs_open, ++ .read = debugfs_read, ++}; ++ ++/* Remove all port-specific data. */ ++static int remove_port(struct port *port) ++{ ++ struct port_buffer *buf; ++ ++ spin_lock_irq(&port->portdev->ports_lock); ++ list_del(&port->list); ++ spin_unlock_irq(&port->portdev->ports_lock); ++ ++ if (is_console_port(port)) { ++ spin_lock_irq(&pdrvdata_lock); ++ list_del(&port->cons.list); ++ spin_unlock_irq(&pdrvdata_lock); ++ hvc_remove(port->cons.hvc); ++ } ++ if (port->guest_connected) ++ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); ++ ++ sysfs_remove_group(&port->dev->kobj, &port_attribute_group); ++ device_destroy(pdrvdata.class, port->dev->devt); ++ cdev_del(&port->cdev); ++ ++ /* Remove unused data this port might have received. */ ++ discard_port_data(port); ++ ++ /* Remove buffers we queued up for the Host to send us data in. */ ++ while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) ++ free_buf(buf); ++ ++ kfree(port->name); ++ ++ debugfs_remove(port->debugfs_file); ++ ++ kfree(port); ++ return 0; ++} ++ ++/* Any private messages that the Host and Guest want to share */ ++static void handle_control_message(struct ports_device *portdev, ++ struct port_buffer *buf) ++{ ++ struct virtio_console_control *cpkt; ++ struct port *port; ++ size_t name_size; ++ int err; ++ ++ cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); ++ ++ port = find_port_by_id(portdev, cpkt->id); ++ if (!port) { ++ /* No valid header at start of buffer. Drop it. */ ++ dev_dbg(&portdev->vdev->dev, ++ "Invalid index %u in control packet\n", cpkt->id); ++ return; ++ } ++ ++ switch (cpkt->event) { ++ case VIRTIO_CONSOLE_CONSOLE_PORT: ++ if (!cpkt->value) ++ break; ++ if (is_console_port(port)) ++ break; ++ ++ init_port_console(port); ++ /* ++ * Could remove the port here in case init fails - but ++ * have to notify the host first. ++ */ ++ break; ++ case VIRTIO_CONSOLE_RESIZE: ++ if (!is_console_port(port)) ++ break; ++ port->cons.hvc->irq_requested = 1; ++ resize_console(port); ++ break; ++ case VIRTIO_CONSOLE_PORT_OPEN: ++ port->host_connected = cpkt->value; ++ wake_up_interruptible(&port->waitqueue); ++ break; ++ case VIRTIO_CONSOLE_PORT_NAME: ++ /* ++ * Skip the size of the header and the cpkt to get the size ++ * of the name that was sent ++ */ ++ name_size = buf->len - buf->offset - sizeof(*cpkt) + 1; ++ ++ port->name = kmalloc(name_size, GFP_KERNEL); ++ if (!port->name) { ++ dev_err(port->dev, ++ "Not enough space to store port name\n"); ++ break; ++ } ++ strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt), ++ name_size - 1); ++ port->name[name_size - 1] = 0; ++ ++ /* ++ * Since we only have one sysfs attribute, 'name', ++ * create it only if we have a name for the port. ++ */ ++ err = sysfs_create_group(&port->dev->kobj, ++ &port_attribute_group); ++ if (err) ++ dev_err(port->dev, ++ "Error %d creating sysfs device attributes\n", ++ err); ++ ++ break; ++ case VIRTIO_CONSOLE_PORT_REMOVE: ++ /* ++ * Hot unplug the port. We don't decrement nr_ports ++ * since we don't want to deal with extra complexities ++ * of using the lowest-available port id: We can just ++ * pick up the nr_ports number as the id and not have ++ * userspace send it to us. This helps us in two ++ * ways: ++ * ++ * - We don't need to have a 'port_id' field in the ++ * config space when a port is hot-added. This is a ++ * good thing as we might queue up multiple hotplug ++ * requests issued in our workqueue. ++ * ++ * - Another way to deal with this would have been to ++ * use a bitmap of the active ports and select the ++ * lowest non-active port from that map. That ++ * bloats the already tight config space and we ++ * would end up artificially limiting the ++ * max. number of ports to sizeof(bitmap). Right ++ * now we can support 2^32 ports (as the port id is ++ * stored in a u32 type). ++ * ++ */ ++ remove_port(port); ++ break; ++ } ++} ++ ++static void control_work_handler(struct work_struct *work) ++{ ++ struct ports_device *portdev; ++ struct virtqueue *vq; ++ struct port_buffer *buf; ++ unsigned int len; ++ ++ portdev = container_of(work, struct ports_device, control_work); ++ vq = portdev->c_ivq; ++ ++ spin_lock(&portdev->cvq_lock); ++ while ((buf = vq->vq_ops->get_buf(vq, &len))) { ++ spin_unlock(&portdev->cvq_lock); ++ ++ buf->len = len; ++ buf->offset = 0; ++ ++ handle_control_message(portdev, buf); + -+ ret = wait_event_interruptible(port->waitqueue, -+ !will_write_block(port)); -+ if (ret < 0) -+ return ret; ++ spin_lock(&portdev->cvq_lock); ++ if (add_inbuf(portdev->c_ivq, buf) < 0) { ++ dev_warn(&portdev->vdev->dev, ++ "Error adding buffer to queue\n"); ++ free_buf(buf); ++ } + } ++ spin_unlock(&portdev->cvq_lock); ++} + - count = min((size_t)(32 * 1024), count); - - buf = kmalloc(count, GFP_KERNEL); -@@ -569,9 +618,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, - goto free_buf; - } - -- ret = send_buf(port, buf, count); -+ ret = send_buf(port, buf, count, nonblock); ++static void in_intr(struct virtqueue *vq) ++{ ++ struct port *port; ++ unsigned long flags; + -+ if (nonblock && ret > 0) -+ goto out; ++ port = find_port_by_vq(vq->vdev->priv, vq); ++ if (!port) ++ return; + - free_buf: - kfree(buf); -+out: - return ret; - } - -@@ -586,7 +640,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait) - ret = 0; - if (port->inbuf) - ret |= POLLIN | POLLRDNORM; -- if (port->host_connected) -+ if (!will_write_block(port)) - ret |= POLLOUT; - if (!port->host_connected) - ret |= POLLHUP; -@@ -610,6 +664,10 @@ static int port_fops_release(struct inode *inode, struct file *filp) - - spin_unlock_irq(&port->inbuf_lock); - -+ spin_lock_irq(&port->outvq_lock); -+ reclaim_consumed_buffers(port); -+ spin_unlock_irq(&port->outvq_lock); ++ spin_lock_irqsave(&port->inbuf_lock, flags); ++ if (!port->inbuf) ++ port->inbuf = get_inbuf(port); + - return 0; - } - -@@ -638,6 +696,15 @@ static int port_fops_open(struct inode *inode, struct file *filp) - port->guest_connected = true; - spin_unlock_irq(&port->inbuf_lock); - -+ spin_lock_irq(&port->outvq_lock); + /* -+ * There might be a chance that we missed reclaiming a few -+ * buffers in the window of the port getting previously closed -+ * and opening now. ++ * Don't queue up data when port is closed. This condition ++ * can be reached when a console port is not yet connected (no ++ * tty is spawned) and the host sends out data to console ++ * ports. For generic serial ports, the host won't ++ * (shouldn't) send data till the guest is connected. + */ -+ reclaim_consumed_buffers(port); -+ spin_unlock_irq(&port->outvq_lock); ++ if (!port->guest_connected) ++ discard_port_data(port); + - /* Notify host of port being opened */ - send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); - -@@ -676,9 +743,9 @@ static int put_chars(u32 vtermno, const char *buf, int count) - - port = find_port_by_vtermno(vtermno); - if (!port) -- return 0; -+ return -EPIPE; - -- return send_buf(port, (void *)buf, count); -+ return send_buf(port, (void *)buf, count, false); - } - - /* -@@ -692,9 +759,13 @@ static int get_chars(u32 vtermno, char *buf, int count) - { - struct port *port; - -+ /* If we've not set up the port yet, we have no input to give. */ -+ if (unlikely(early_put_chars)) -+ return 0; ++ spin_unlock_irqrestore(&port->inbuf_lock, flags); + - port = find_port_by_vtermno(vtermno); - if (!port) -- return 0; -+ return -EPIPE; - - /* If we don't have an input queue yet, we can't get input. */ - BUG_ON(!port->in_vq); -@@ -705,22 +776,14 @@ static int get_chars(u32 vtermno, char *buf, int count) - static void resize_console(struct port *port) - { - struct virtio_device *vdev; -- struct winsize ws; - - /* The port could have been hot-unplugged */ -- if (!port) -+ if (!port || !is_console_port(port)) - return; - - vdev = port->portdev->vdev; -- if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) { -- vdev->config->get(vdev, -- offsetof(struct virtio_console_config, cols), -- &ws.ws_col, sizeof(u16)); -- vdev->config->get(vdev, -- offsetof(struct virtio_console_config, rows), -- &ws.ws_row, sizeof(u16)); -- hvc_resize(port->cons.hvc, ws); -- } -+ if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) -+ hvc_resize(port->cons.hvc, port->cons.ws); ++ wake_up_interruptible(&port->waitqueue); ++ ++ if (is_console_port(port) && hvc_poll(port->cons.hvc)) + hvc_kick(); } - /* We set the configuration at this point, since we now have a tty */ -@@ -804,6 +867,13 @@ int init_port_console(struct port *port) - spin_unlock_irq(&pdrvdata_lock); - port->guest_connected = true; - -+ /* -+ * Start using the new console output if this is the first -+ * console to come up. -+ */ -+ if (early_put_chars) -+ early_put_chars = NULL; +-/*D:370 Once we're further in boot, we get probed like any other virtio device. +- * At this stage we set up the output virtqueue. +- * +- * To set up and manage our virtual console, we call hvc_alloc(). Since we +- * never remove the console device we never need this pointer again. ++static void control_intr(struct virtqueue *vq) ++{ ++ struct ports_device *portdev; + - /* Notify host of port being opened */ - send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); - -@@ -859,6 +929,8 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, - out_offset += snprintf(buf + out_offset, out_count - out_offset, - "host_connected: %d\n", port->host_connected); - out_offset += snprintf(buf + out_offset, out_count - out_offset, -+ "outvq_full: %d\n", port->outvq_full); -+ out_offset += snprintf(buf + out_offset, out_count - out_offset, - "is_console: %s\n", - is_console_port(port) ? "yes" : "no"); - out_offset += snprintf(buf + out_offset, out_count - out_offset, -@@ -875,6 +947,153 @@ static const struct file_operations port_debugfs_ops = { - .read = debugfs_read, - }; - -+static void set_console_size(struct port *port, u16 rows, u16 cols) ++ portdev = vq->vdev->priv; ++ schedule_work(&portdev->control_work); ++} ++ ++static void config_intr(struct virtio_device *vdev) +{ -+ if (!port || !is_console_port(port)) -+ return; ++ struct ports_device *portdev; + -+ port->cons.ws.ws_row = rows; -+ port->cons.ws.ws_col = cols; ++ portdev = vdev->priv; ++ if (use_multiport(portdev)) { ++ /* Handle port hot-add */ ++ schedule_work(&portdev->config_work); ++ } ++ /* ++ * We'll use this way of resizing only for legacy support. ++ * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use ++ * control messages to indicate console size changes so that ++ * it can be done per-port ++ */ ++ resize_console(find_port_by_id(portdev, 0)); +} + +static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) +{ + struct port_buffer *buf; -+ unsigned int nr_added_bufs; -+ int ret; ++ unsigned int ret; ++ int err; + -+ nr_added_bufs = 0; ++ ret = 0; + do { + buf = alloc_buf(PAGE_SIZE); + if (!buf) + break; + + spin_lock_irq(lock); -+ ret = add_inbuf(vq, buf); -+ if (ret < 0) { ++ err = add_inbuf(vq, buf); ++ if (err < 0) { + spin_unlock_irq(lock); + free_buf(buf); + break; + } -+ nr_added_bufs++; ++ ret++; + spin_unlock_irq(lock); -+ } while (ret > 0); ++ } while (err > 0); + -+ return nr_added_bufs; ++ return ret; +} + +static int add_port(struct ports_device *portdev, u32 id) @@ -423,7 +1393,6 @@ index 48ce834..8c99bf1 100644 + struct port *port; + struct port_buffer *buf; + dev_t devt; -+ unsigned int nr_added_bufs; + int err; + + port = kmalloc(sizeof(*port), GFP_KERNEL); @@ -439,12 +1408,8 @@ index 48ce834..8c99bf1 100644 + port->inbuf = NULL; + port->cons.hvc = NULL; + -+ port->cons.ws.ws_row = port->cons.ws.ws_col = 0; -+ + port->host_connected = port->guest_connected = false; + -+ port->outvq_full = false; -+ + port->in_vq = portdev->in_vqs[port->id]; + port->out_vq = portdev->out_vqs[port->id]; + @@ -469,12 +1434,11 @@ index 48ce834..8c99bf1 100644 + } + + spin_lock_init(&port->inbuf_lock); -+ spin_lock_init(&port->outvq_lock); + init_waitqueue_head(&port->waitqueue); + + /* Fill the in_vq with buffers so the host can send us data. */ -+ nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); -+ if (!nr_added_bufs) { ++ err = fill_queue(port->in_vq, &port->inbuf_lock); ++ if (!err) { + dev_err(port->dev, "Error allocating inbufs\n"); + err = -ENOMEM; + goto free_device; @@ -515,7 +1479,7 @@ index 48ce834..8c99bf1 100644 + return 0; + +free_inbufs: -+ while ((buf = virtqueue_detach_unused_buf(port->in_vq))) ++ while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) + free_buf(buf); +free_device: + device_destroy(pdrvdata.class, port->dev->devt); @@ -524,460 +1488,356 @@ index 48ce834..8c99bf1 100644 +free_port: + kfree(port); +fail: -+ /* The host might want to notify management sw about port add failure */ -+ __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); + return err; +} + - /* Remove all port-specific data. */ - static int remove_port(struct port *port) - { -@@ -888,7 +1107,18 @@ static int remove_port(struct port *port) - spin_lock_irq(&pdrvdata_lock); - list_del(&port->cons.list); - spin_unlock_irq(&pdrvdata_lock); -+#if 0 ++/* ++ * The workhandler for config-space updates. + * +- * Finally we put our input buffer in the input queue, ready to receive. */ +-static int __devinit virtcons_probe(struct virtio_device *dev) ++ * This is called when ports are hot-added. ++ */ ++static void config_work_handler(struct work_struct *work) ++{ ++ struct virtio_console_config virtconconf; ++ struct ports_device *portdev; ++ struct virtio_device *vdev; ++ int err; ++ ++ portdev = container_of(work, struct ports_device, config_work); ++ ++ vdev = portdev->vdev; ++ vdev->config->get(vdev, ++ offsetof(struct virtio_console_config, nr_ports), ++ &virtconconf.nr_ports, ++ sizeof(virtconconf.nr_ports)); ++ ++ if (portdev->config.nr_ports == virtconconf.nr_ports) { + /* -+ * hvc_remove() not called as removing one hvc port -+ * results in other hvc ports getting frozen. -+ * -+ * Once this is resolved in hvc, this functionality -+ * will be enabled. Till that is done, the -EPIPE -+ * return from get_chars() above will help -+ * hvc_console.c to clean up on ports we remove here. ++ * Port 0 got hot-added. Since we already did all the ++ * other initialisation for it, just tell the Host ++ * that the port is ready if we find the port. In ++ * case the port was hot-removed earlier, we call ++ * add_port to add the port. + */ - hvc_remove(port->cons.hvc); -+#endif - } - if (port->guest_connected) - send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); -@@ -900,6 +1130,8 @@ static int remove_port(struct port *port) - /* Remove unused data this port might have received. */ - discard_port_data(port); - -+ reclaim_consumed_buffers(port); ++ struct port *port; + - /* Remove buffers we queued up for the Host to send us data in. */ - while ((buf = virtqueue_detach_unused_buf(port->in_vq))) - free_buf(buf); -@@ -924,7 +1156,7 @@ static void handle_control_message(struct ports_device *portdev, - cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); - - port = find_port_by_id(portdev, cpkt->id); -- if (!port) { -+ if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) { - /* No valid header at start of buffer. Drop it. */ - dev_dbg(&portdev->vdev->dev, - "Invalid index %u in control packet\n", cpkt->id); -@@ -932,6 +1164,24 @@ static void handle_control_message(struct ports_device *portdev, - } - - switch (cpkt->event) { -+ case VIRTIO_CONSOLE_PORT_ADD: -+ if (port) { -+ dev_dbg(&portdev->vdev->dev, -+ "Port %u already added\n", port->id); ++ port = find_port_by_id(portdev, 0); ++ if (!port) ++ add_port(portdev, 0); ++ else + send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); ++ return; ++ } ++ if (virtconconf.nr_ports > portdev->config.max_nr_ports) { ++ dev_warn(&vdev->dev, ++ "More ports specified (%u) than allowed (%u)", ++ portdev->config.nr_ports + 1, ++ portdev->config.max_nr_ports); ++ return; ++ } ++ if (virtconconf.nr_ports < portdev->config.nr_ports) ++ return; ++ ++ /* Hot-add ports */ ++ while (virtconconf.nr_ports - portdev->config.nr_ports) { ++ err = add_port(portdev, portdev->config.nr_ports); ++ if (err) + break; -+ } -+ if (cpkt->id >= portdev->config.max_nr_ports) { -+ dev_warn(&portdev->vdev->dev, -+ "Request for adding port with out-of-bound id %u, max. supported id: %u\n", -+ cpkt->id, portdev->config.max_nr_ports - 1); -+ break; -+ } -+ add_port(portdev, cpkt->id); -+ break; -+ case VIRTIO_CONSOLE_PORT_REMOVE: -+ remove_port(port); -+ break; - case VIRTIO_CONSOLE_CONSOLE_PORT: - if (!cpkt->value) - break; -@@ -944,15 +1194,34 @@ static void handle_control_message(struct ports_device *portdev, - * have to notify the host first. - */ - break; -- case VIRTIO_CONSOLE_RESIZE: -+ case VIRTIO_CONSOLE_RESIZE: { -+ struct { -+ __u16 rows; -+ __u16 cols; -+ } size; -+ - if (!is_console_port(port)) - break; -+ -+ memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), -+ sizeof(size)); -+ set_console_size(port, size.rows, size.cols); -+ - port->cons.hvc->irq_requested = 1; - resize_console(port); - break; ++ portdev->config.nr_ports++; + } - case VIRTIO_CONSOLE_PORT_OPEN: - port->host_connected = cpkt->value; - wake_up_interruptible(&port->waitqueue); -+ /* -+ * If the host port got closed and the host had any -+ * unconsumed buffers, we'll be able to reclaim them -+ * now. -+ */ -+ spin_lock_irq(&port->outvq_lock); -+ reclaim_consumed_buffers(port); -+ spin_unlock_irq(&port->outvq_lock); - break; - case VIRTIO_CONSOLE_PORT_NAME: - /* -@@ -990,32 +1259,6 @@ static void handle_control_message(struct ports_device *portdev, - kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); - } - break; -- case VIRTIO_CONSOLE_PORT_REMOVE: -- /* -- * Hot unplug the port. We don't decrement nr_ports -- * since we don't want to deal with extra complexities -- * of using the lowest-available port id: We can just -- * pick up the nr_ports number as the id and not have -- * userspace send it to us. This helps us in two -- * ways: -- * -- * - We don't need to have a 'port_id' field in the -- * config space when a port is hot-added. This is a -- * good thing as we might queue up multiple hotplug -- * requests issued in our workqueue. -- * -- * - Another way to deal with this would have been to -- * use a bitmap of the active ports and select the -- * lowest non-active port from that map. That -- * bloats the already tight config space and we -- * would end up artificially limiting the -- * max. number of ports to sizeof(bitmap). Right -- * now we can support 2^32 ports (as the port id is -- * stored in a u32 type). -- * -- */ -- remove_port(port); -- break; - } - } - -@@ -1092,204 +1335,29 @@ static void config_intr(struct virtio_device *vdev) - struct ports_device *portdev; - - portdev = vdev->priv; -- if (use_multiport(portdev)) { -- /* Handle port hot-add */ -- schedule_work(&portdev->config_work); -- } -- /* -- * We'll use this way of resizing only for legacy support. -- * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use -- * control messages to indicate console size changes so that -- * it can be done per-port -- */ -- resize_console(find_port_by_id(portdev, 0)); --} - --static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) --{ -- struct port_buffer *buf; -- unsigned int nr_added_bufs; -- int ret; -- -- nr_added_bufs = 0; -- do { -- buf = alloc_buf(PAGE_SIZE); -- if (!buf) -- break; -- -- spin_lock_irq(lock); -- ret = add_inbuf(vq, buf); -- if (ret < 0) { -- spin_unlock_irq(lock); -- free_buf(buf); -- break; -- } -- nr_added_bufs++; -- spin_unlock_irq(lock); -- } while (ret > 0); -- -- return nr_added_bufs; --} -- --static int add_port(struct ports_device *portdev, u32 id) --{ -- char debugfs_name[16]; -- struct port *port; -- struct port_buffer *buf; -- dev_t devt; -- unsigned int nr_added_bufs; -- int err; -- -- port = kmalloc(sizeof(*port), GFP_KERNEL); -- if (!port) { -- err = -ENOMEM; -- goto fail; -- } -- -- port->portdev = portdev; -- port->id = id; -- -- port->name = NULL; -- port->inbuf = NULL; -- port->cons.hvc = NULL; -- -- port->host_connected = port->guest_connected = false; -- -- port->in_vq = portdev->in_vqs[port->id]; -- port->out_vq = portdev->out_vqs[port->id]; -- -- cdev_init(&port->cdev, &port_fops); -- -- devt = MKDEV(portdev->chr_major, id); -- err = cdev_add(&port->cdev, devt, 1); -- if (err < 0) { -- dev_err(&port->portdev->vdev->dev, -- "Error %d adding cdev for port %u\n", err, id); -- goto free_port; -- } -- port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, -- devt, port, "vport%up%u", -- port->portdev->drv_index, id); -- if (IS_ERR(port->dev)) { -- err = PTR_ERR(port->dev); -- dev_err(&port->portdev->vdev->dev, -- "Error %d creating device for port %u\n", -- err, id); -- goto free_cdev; -- } -- -- spin_lock_init(&port->inbuf_lock); -- init_waitqueue_head(&port->waitqueue); -- -- /* Fill the in_vq with buffers so the host can send us data. */ -- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); -- if (!nr_added_bufs) { -- dev_err(port->dev, "Error allocating inbufs\n"); -- err = -ENOMEM; -- goto free_device; -- } -- -- /* -- * If we're not using multiport support, this has to be a console port -- */ -- if (!use_multiport(port->portdev)) { -- err = init_port_console(port); -- if (err) -- goto free_inbufs; -- } -- -- spin_lock_irq(&portdev->ports_lock); -- list_add_tail(&port->list, &port->portdev->ports); -- spin_unlock_irq(&portdev->ports_lock); -- -- /* -- * Tell the Host we're set so that it can send us various -- * configuration parameters for this port (eg, port name, -- * caching, whether this is a console port, etc.) -- */ -- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); -- -- if (pdrvdata.debugfs_dir) { -- /* -- * Finally, create the debugfs file that we can use to -- * inspect a port's state at any time -- */ -- sprintf(debugfs_name, "vport%up%u", -- port->portdev->drv_index, id); -- port->debugfs_file = debugfs_create_file(debugfs_name, 0444, -- pdrvdata.debugfs_dir, -- port, -- &port_debugfs_ops); -- } -- return 0; -- --free_inbufs: -- while ((buf = virtqueue_detach_unused_buf(port->in_vq))) -- free_buf(buf); --free_device: -- device_destroy(pdrvdata.class, port->dev->devt); --free_cdev: -- cdev_del(&port->cdev); --free_port: -- kfree(port); --fail: -- return err; --} -- --/* -- * The workhandler for config-space updates. -- * -- * This is called when ports are hot-added. -- */ --static void config_work_handler(struct work_struct *work) --{ -- struct virtio_console_multiport_conf virtconconf; -- struct ports_device *portdev; -- struct virtio_device *vdev; -- int err; -+ if (!use_multiport(portdev)) { -+ struct port *port; -+ u16 rows, cols; - -- portdev = container_of(work, struct ports_device, config_work); -+ vdev->config->get(vdev, -+ offsetof(struct virtio_console_config, cols), -+ &cols, sizeof(u16)); -+ vdev->config->get(vdev, -+ offsetof(struct virtio_console_config, rows), -+ &rows, sizeof(u16)); - -- vdev = portdev->vdev; -- vdev->config->get(vdev, -- offsetof(struct virtio_console_multiport_conf, -- nr_ports), -- &virtconconf.nr_ports, -- sizeof(virtconconf.nr_ports)); -+ port = find_port_by_id(portdev, 0); -+ set_console_size(port, rows, cols); - -- if (portdev->config.nr_ports == virtconconf.nr_ports) { - /* -- * Port 0 got hot-added. Since we already did all the -- * other initialisation for it, just tell the Host -- * that the port is ready if we find the port. In -- * case the port was hot-removed earlier, we call -- * add_port to add the port. -+ * We'll use this way of resizing only for legacy -+ * support. For newer userspace -+ * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages -+ * to indicate console size changes so that it can be -+ * done per-port. - */ -- struct port *port; -- -- port = find_port_by_id(portdev, 0); -- if (!port) -- add_port(portdev, 0); -- else -- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); -- return; -- } -- if (virtconconf.nr_ports > portdev->config.max_nr_ports) { -- dev_warn(&vdev->dev, -- "More ports specified (%u) than allowed (%u)", -- portdev->config.nr_ports + 1, -- portdev->config.max_nr_ports); -- return; -- } -- if (virtconconf.nr_ports < portdev->config.nr_ports) -- return; -- -- /* Hot-add ports */ -- while (virtconconf.nr_ports - portdev->config.nr_ports) { -- err = add_port(portdev, portdev->config.nr_ports); -- if (err) -- break; -- portdev->config.nr_ports++; -+ resize_console(port); - } - } - -@@ -1414,7 +1482,6 @@ static const struct file_operations portdev_fops = { - static int __devinit virtcons_probe(struct virtio_device *vdev) ++} ++ ++static int init_vqs(struct ports_device *portdev) { - struct ports_device *portdev; -- u32 i; +- vq_callback_t *callbacks[] = { hvc_handle_input, NULL}; +- const char *names[] = { "input", "output" }; +- struct virtqueue *vqs[2]; ++ vq_callback_t **io_callbacks; ++ char **io_names; ++ struct virtqueue **vqs; ++ u32 i, j, nr_ports, nr_queues; int err; - bool multiport; -@@ -1443,37 +1510,19 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) +- vdev = dev; ++ nr_ports = portdev->config.max_nr_ports; ++ nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; + +- /* This is the scratch page we use to receive console input */ +- inbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); +- if (!inbuf) { ++ vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); ++ if (!vqs) { + err = -ENOMEM; + goto fail; } ++ io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); ++ if (!io_callbacks) { ++ err = -ENOMEM; ++ goto free_vqs; ++ } ++ io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); ++ if (!io_names) { ++ err = -ENOMEM; ++ goto free_callbacks; ++ } ++ portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), ++ GFP_KERNEL); ++ if (!portdev->in_vqs) { ++ err = -ENOMEM; ++ goto free_names; ++ } ++ portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), ++ GFP_KERNEL); ++ if (!portdev->out_vqs) { ++ err = -ENOMEM; ++ goto free_invqs; ++ } ++ ++ /* ++ * For backward compat (newer host but older guest), the host ++ * spawns a console port first and also inits the vqs for port ++ * 0 before others. ++ */ ++ j = 0; ++ io_callbacks[j] = in_intr; ++ io_callbacks[j + 1] = NULL; ++ io_names[j] = "input"; ++ io_names[j + 1] = "output"; ++ j += 2; + ++ if (use_multiport(portdev)) { ++ io_callbacks[j] = control_intr; ++ io_callbacks[j + 1] = NULL; ++ io_names[j] = "control-i"; ++ io_names[j + 1] = "control-o"; ++ ++ for (i = 1; i < nr_ports; i++) { ++ j += 2; ++ io_callbacks[j] = in_intr; ++ io_callbacks[j + 1] = NULL; ++ io_names[j] = "input"; ++ io_names[j + 1] = "output"; ++ } ++ } + /* Find the queues. */ +- /* FIXME: This is why we want to wean off hvc: we do nothing +- * when input comes in. */ +- err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); ++ err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, ++ io_callbacks, ++ (const char **)io_names); + if (err) ++ goto free_outvqs; ++ ++ j = 0; ++ portdev->in_vqs[0] = vqs[0]; ++ portdev->out_vqs[0] = vqs[1]; ++ j += 2; ++ if (use_multiport(portdev)) { ++ portdev->c_ivq = vqs[j]; ++ portdev->c_ovq = vqs[j + 1]; ++ ++ for (i = 1; i < nr_ports; i++) { ++ j += 2; ++ portdev->in_vqs[i] = vqs[j]; ++ portdev->out_vqs[i] = vqs[j + 1]; ++ } ++ } ++ kfree(io_callbacks); ++ kfree(io_names); ++ kfree(vqs); ++ ++ return 0; ++ ++free_names: ++ kfree(io_names); ++free_callbacks: ++ kfree(io_callbacks); ++free_outvqs: ++ kfree(portdev->out_vqs); ++free_invqs: ++ kfree(portdev->in_vqs); ++free_vqs: ++ kfree(vqs); ++fail: ++ return err; ++} ++ ++static const struct file_operations portdev_fops = { ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Once we're further in boot, we get probed like any other virtio ++ * device. ++ * ++ * If the host also supports multiple console ports, we check the ++ * config space to see how many ports the host has spawned. We ++ * initialize each port found. ++ */ ++static int __devinit virtcons_probe(struct virtio_device *vdev) ++{ ++ struct ports_device *portdev; ++ u32 i; ++ int err; ++ bool multiport; ++ ++ portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); ++ if (!portdev) { ++ err = -ENOMEM; ++ goto fail; ++ } ++ ++ /* Attach this portdev to this virtio_device, and vice-versa. */ ++ portdev->vdev = vdev; ++ vdev->priv = portdev; ++ ++ spin_lock_irq(&pdrvdata_lock); ++ portdev->drv_index = pdrvdata.index++; ++ spin_unlock_irq(&pdrvdata_lock); ++ ++ portdev->chr_major = register_chrdev(0, "virtio-portsdev", ++ &portdev_fops); ++ if (portdev->chr_major < 0) { ++ dev_err(&vdev->dev, ++ "Error %d registering chrdev for device %u\n", ++ portdev->chr_major, portdev->drv_index); ++ err = portdev->chr_major; + goto free; ++ } - multiport = false; -- portdev->config.nr_ports = 1; - portdev->config.max_nr_ports = 1; --#if 0 /* Multiport is not quite ready yet --RR */ - if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { - multiport = true; - vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; +- in_vq = vqs[0]; +- out_vq = vqs[1]; ++ multiport = false; ++ portdev->config.nr_ports = 1; ++ portdev->config.max_nr_ports = 1; ++ if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { ++ multiport = true; ++ vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; -- vdev->config->get(vdev, -- offsetof(struct virtio_console_multiport_conf, -- nr_ports), -- &portdev->config.nr_ports, -- sizeof(portdev->config.nr_ports)); -- vdev->config->get(vdev, -- offsetof(struct virtio_console_multiport_conf, -- max_nr_ports), +- /* Start using the new console output. */ +- virtio_cons.get_chars = get_chars; +- virtio_cons.put_chars = put_chars; +- virtio_cons.notifier_add = notifier_add_vio; +- virtio_cons.notifier_del = notifier_del_vio; +- virtio_cons.notifier_hangup = notifier_del_vio; +- +- /* The first argument of hvc_alloc() is the virtual console number, so +- * we use zero. The second argument is the parameter for the +- * notification mechanism (like irq number). We currently leave this +- * as zero, virtqueues have implicit notifications. +- * +- * The third argument is a "struct hv_ops" containing the put_chars() +- * get_chars(), notifier_add() and notifier_del() pointers. +- * The final argument is the output buffer size: we can do any size, +- * so we put PAGE_SIZE here. */ +- hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE); +- if (IS_ERR(hvc)) { +- err = PTR_ERR(hvc); +- goto free_vqs; ++ vdev->config->get(vdev, offsetof(struct virtio_console_config, ++ nr_ports), ++ &portdev->config.nr_ports, ++ sizeof(portdev->config.nr_ports)); + vdev->config->get(vdev, offsetof(struct virtio_console_config, + max_nr_ports), - &portdev->config.max_nr_ports, - sizeof(portdev->config.max_nr_ports)); -- if (portdev->config.nr_ports > portdev->config.max_nr_ports) { -- dev_warn(&vdev->dev, -- "More ports (%u) specified than allowed (%u). Will init %u ports.", -- portdev->config.nr_ports, -- portdev->config.max_nr_ports, -- portdev->config.max_nr_ports); -- -- portdev->config.nr_ports = portdev->config.max_nr_ports; -- } - } - - /* Let the Host know we support multiple ports.*/ - vdev->config->finalize_features(vdev); --#endif - - err = init_vqs(portdev); - if (err < 0) { -@@ -1489,7 +1538,6 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) - - spin_lock_init(&portdev->cvq_lock); - INIT_WORK(&portdev->control_work, &control_work_handler); -- INIT_WORK(&portdev->config_work, &config_work_handler); - - nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); - if (!nr_added_bufs) { -@@ -1498,16 +1546,22 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) - err = -ENOMEM; - goto free_vqs; - } -+ } else { -+ /* -+ * For backward compatibility: Create a console port -+ * if we're running on older host. -+ */ -+ add_port(portdev, 0); ++ &portdev->config.max_nr_ports, ++ sizeof(portdev->config.max_nr_ports)); ++ if (portdev->config.nr_ports > portdev->config.max_nr_ports) { ++ dev_warn(&vdev->dev, ++ "More ports (%u) specified than allowed (%u). Will init %u ports.", ++ portdev->config.nr_ports, ++ portdev->config.max_nr_ports, ++ portdev->config.max_nr_ports); ++ ++ portdev->config.nr_ports = portdev->config.max_nr_ports; ++ } ++ } ++ ++ /* Let the Host know we support multiple ports.*/ ++ vdev->config->finalize_features(vdev); ++ ++ err = init_vqs(portdev); ++ if (err < 0) { ++ dev_err(&vdev->dev, "Error %d initializing vqs\n", err); ++ goto free_chrdev; ++ } ++ ++ spin_lock_init(&portdev->ports_lock); ++ INIT_LIST_HEAD(&portdev->ports); ++ ++ if (multiport) { ++ spin_lock_init(&portdev->cvq_lock); ++ INIT_WORK(&portdev->control_work, &control_work_handler); ++ INIT_WORK(&portdev->config_work, &config_work_handler); ++ ++ err = fill_queue(portdev->c_ivq, &portdev->cvq_lock); ++ if (!err) { ++ dev_err(&vdev->dev, ++ "Error allocating buffers for control queue\n"); ++ err = -ENOMEM; ++ goto free_vqs; ++ } } -- for (i = 0; i < portdev->config.nr_ports; i++) -- add_port(portdev, i); -- -- /* Start using the new console output. */ -- early_put_chars = NULL; -+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, -+ VIRTIO_CONSOLE_DEVICE_READY, 1); +- /* Register the input buffer the first time. */ +- add_inbuf(); ++ for (i = 0; i < portdev->config.nr_ports; i++) ++ add_port(portdev, i); ++ ++ /* Start using the new console output. */ ++ early_put_chars = NULL; return 0; free_vqs: -+ /* The host might want to notify mgmt sw about device add failure */ -+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, -+ VIRTIO_CONSOLE_DEVICE_READY, 0); vdev->config->del_vqs(vdev); - kfree(portdev->in_vqs); - kfree(portdev->out_vqs); -@@ -1529,7 +1583,6 @@ static void virtcons_remove(struct virtio_device *vdev) - portdev = vdev->priv; - - cancel_work_sync(&portdev->control_work); -- cancel_work_sync(&portdev->config_work); ++ kfree(portdev->in_vqs); ++ kfree(portdev->out_vqs); ++free_chrdev: ++ unregister_chrdev(portdev->chr_major, "virtio-portsdev"); + free: +- kfree(inbuf); ++ kfree(portdev); + fail: + return err; + } - list_for_each_entry_safe(port, port2, &portdev->ports, list) - remove_port(port); -@@ -1556,6 +1609,7 @@ static struct virtio_device_id id_table[] = { ++static void virtcons_remove(struct virtio_device *vdev) ++{ ++ struct ports_device *portdev; ++ struct port *port, *port2; ++ struct port_buffer *buf; ++ unsigned int len; ++ ++ portdev = vdev->priv; ++ ++ cancel_work_sync(&portdev->control_work); ++ cancel_work_sync(&portdev->config_work); ++ ++ list_for_each_entry_safe(port, port2, &portdev->ports, list) ++ remove_port(port); ++ ++ unregister_chrdev(portdev->chr_major, "virtio-portsdev"); ++ ++ while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len))) ++ free_buf(buf); ++ ++ while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq))) ++ free_buf(buf); ++ ++ vdev->config->del_vqs(vdev); ++ kfree(portdev->in_vqs); ++ kfree(portdev->out_vqs); ++ ++ kfree(portdev); ++} ++ + static struct virtio_device_id id_table[] = { + { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, + { 0 }, +@@ -253,6 +1511,7 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_CONSOLE_F_SIZE, @@ -985,17 +1845,147 @@ index 48ce834..8c99bf1 100644 }; static struct virtio_driver virtio_console = { +@@ -262,14 +1521,41 @@ static struct virtio_driver virtio_console = { + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtcons_probe, +- .config_changed = virtcons_apply_config, ++ .remove = virtcons_remove, ++ .config_changed = config_intr, + }; + + static int __init init(void) + { ++ int err; ++ ++ pdrvdata.class = class_create(THIS_MODULE, "virtio-ports"); ++ if (IS_ERR(pdrvdata.class)) { ++ err = PTR_ERR(pdrvdata.class); ++ pr_err("Error %d creating virtio-ports class\n", err); ++ return err; ++ } ++ ++ pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL); ++ if (!pdrvdata.debugfs_dir) { ++ pr_warning("Error %ld creating debugfs dir for virtio-ports\n", ++ PTR_ERR(pdrvdata.debugfs_dir)); ++ } ++ INIT_LIST_HEAD(&pdrvdata.consoles); ++ + return register_virtio_driver(&virtio_console); + } ++ ++static void __exit fini(void) ++{ ++ unregister_virtio_driver(&virtio_console); ++ ++ class_destroy(pdrvdata.class); ++ if (pdrvdata.debugfs_dir) ++ debugfs_remove_recursive(pdrvdata.debugfs_dir); ++} + module_init(init); ++module_exit(fini); + + MODULE_DEVICE_TABLE(virtio, id_table); + MODULE_DESCRIPTION("Virtio console driver"); +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c +index fbd2ecd..9bcfe95 100644 +--- a/drivers/virtio/virtio_ring.c ++++ b/drivers/virtio/virtio_ring.c +@@ -334,6 +334,30 @@ static bool vring_enable_cb(struct virtqueue *_vq) + return true; + } + ++static void *vring_detach_unused_buf(struct virtqueue *_vq) ++{ ++ struct vring_virtqueue *vq = to_vvq(_vq); ++ unsigned int i; ++ void *buf; ++ ++ START_USE(vq); ++ ++ for (i = 0; i < vq->vring.num; i++) { ++ if (!vq->data[i]) ++ continue; ++ /* detach_buf clears data, so grab it now. */ ++ buf = vq->data[i]; ++ detach_buf(vq, i); ++ END_USE(vq); ++ return buf; ++ } ++ /* That should have freed everything. */ ++ BUG_ON(vq->num_free != vq->vring.num); ++ ++ END_USE(vq); ++ return NULL; ++} ++ + irqreturn_t vring_interrupt(int irq, void *_vq) + { + struct vring_virtqueue *vq = to_vvq(_vq); +@@ -360,6 +384,7 @@ static struct virtqueue_ops vring_vq_ops = { + .kick = vring_kick, + .disable_cb = vring_disable_cb, + .enable_cb = vring_enable_cb, ++ .detach_unused_buf = vring_detach_unused_buf, + }; + + struct virtqueue *vring_new_virtqueue(unsigned int num, +@@ -406,8 +431,11 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, + /* Put everything in free lists. */ + vq->num_free = num; + vq->free_head = 0; +- for (i = 0; i < num-1; i++) ++ for (i = 0; i < num-1; i++) { + vq->vring.desc[i].next = i+1; ++ vq->data[i] = NULL; ++ } ++ vq->data[i] = NULL; + + return &vq->vq; + } +diff --git a/include/linux/virtio.h b/include/linux/virtio.h +index 057a2e0..f508c65 100644 +--- a/include/linux/virtio.h ++++ b/include/linux/virtio.h +@@ -51,6 +51,9 @@ struct virtqueue { + * This re-enables callbacks; it returns "false" if there are pending + * buffers in the queue, to detect a possible race between the driver + * checking for more work, and enabling callbacks. ++ * @detach_unused_buf: detach first unused buffer ++ * vq: the struct virtqueue we're talking about. ++ * Returns NULL or the "data" token handed to add_buf + * + * Locking rules are straightforward: the driver is responsible for + * locking. No two operations may be invoked simultaneously, with the exception +@@ -71,6 +74,7 @@ struct virtqueue_ops { + + void (*disable_cb)(struct virtqueue *vq); + bool (*enable_cb)(struct virtqueue *vq); ++ void *(*detach_unused_buf)(struct virtqueue *vq); + }; + + /** diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h -index 92228a8..a85064d 100644 +index fe88517..ae4f039 100644 --- a/include/linux/virtio_console.h +++ b/include/linux/virtio_console.h -@@ -12,14 +12,39 @@ +@@ -3,19 +3,45 @@ + #include + #include + #include +-/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so +- * anyone can use the definitions to implement compatible drivers/servers. */ ++/* ++ * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so ++ * anyone can use the definitions to implement compatible drivers/servers. ++ * ++ * Copyright (C) Red Hat, Inc., 2009, 2010 ++ */ /* Feature bits */ #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ +#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ -+ -+#define VIRTIO_CONSOLE_BAD_ID (~(u32)0) struct virtio_console_config { /* colums of the screens */ @@ -1004,6 +1994,8 @@ index 92228a8..a85064d 100644 __u16 rows; + /* max. number of ports this device can hold */ + __u32 max_nr_ports; ++ /* number of ports added so far */ ++ __u32 nr_ports; } __attribute__((packed)); +/* @@ -1017,15 +2009,12 @@ index 92228a8..a85064d 100644 +}; + +/* Some events for control messages */ -+#define VIRTIO_CONSOLE_DEVICE_READY 0 -+#define VIRTIO_CONSOLE_PORT_ADD 1 -+#define VIRTIO_CONSOLE_PORT_REMOVE 2 -+#define VIRTIO_CONSOLE_PORT_READY 3 -+#define VIRTIO_CONSOLE_CONSOLE_PORT 4 -+#define VIRTIO_CONSOLE_RESIZE 5 -+#define VIRTIO_CONSOLE_PORT_OPEN 6 -+#define VIRTIO_CONSOLE_PORT_NAME 7 -+ ++#define VIRTIO_CONSOLE_PORT_READY 0 ++#define VIRTIO_CONSOLE_CONSOLE_PORT 1 ++#define VIRTIO_CONSOLE_RESIZE 2 ++#define VIRTIO_CONSOLE_PORT_OPEN 3 ++#define VIRTIO_CONSOLE_PORT_NAME 4 ++#define VIRTIO_CONSOLE_PORT_REMOVE 5 + #ifdef __KERNEL__ int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); - #endif /* __KERNEL__ */ diff --git a/virt_console-rollup2.patch b/virt_console-rollup2.patch new file mode 100644 index 0000000..3b1358f --- /dev/null +++ b/virt_console-rollup2.patch @@ -0,0 +1,961 @@ +diff --git a/MAINTAINERS b/MAINTAINERS +index d371022..63c3620 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -2336,12 +2336,6 @@ L: linuxppc-dev@ozlabs.org + S: Odd Fixes + F: drivers/char/hvc_* + +-VIRTIO CONSOLE DRIVER +-M: Amit Shah +-L: virtualization@lists.linux-foundation.org +-S: Maintained +-F: drivers/char/virtio_console.c +- + GSPCA FINEPIX SUBDRIVER + M: Frank Zago + L: linux-media@vger.kernel.org +@@ -5682,6 +5676,13 @@ S: Maintained + F: Documentation/filesystems/vfat.txt + F: fs/fat/ + ++VIRTIO CONSOLE DRIVER ++M: Amit Shah ++L: virtualization@lists.linux-foundation.org ++S: Maintained ++F: drivers/char/virtio_console.c ++F: include/linux/virtio_console.h ++ + VIRTIO HOST (VHOST) + M: "Michael S. Tsirkin" + L: kvm@vger.kernel.org +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index 213373b..ab29b98 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -109,7 +109,6 @@ struct ports_device { + * notification + */ + struct work_struct control_work; +- struct work_struct config_work; + + struct list_head ports; + +@@ -159,6 +158,9 @@ struct port { + */ + spinlock_t inbuf_lock; + ++ /* Protect the operations on the out_vq. */ ++ spinlock_t outvq_lock; ++ + /* The IO vqs for this port */ + struct virtqueue *in_vq, *out_vq; + +@@ -184,6 +186,8 @@ struct port { + /* The 'id' to identify the port with the Host */ + u32 id; + ++ bool outvq_full; ++ + /* Is the host device open */ + bool host_connected; + +@@ -373,22 +377,22 @@ out: + return ret; + } + +-static ssize_t send_control_msg(struct port *port, unsigned int event, +- unsigned int value) ++static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, ++ unsigned int event, unsigned int value) + { + struct scatterlist sg[1]; + struct virtio_console_control cpkt; + struct virtqueue *vq; +- int len; ++ unsigned int len; + +- if (!use_multiport(port->portdev)) ++ if (!use_multiport(portdev)) + return 0; + +- cpkt.id = port->id; ++ cpkt.id = port_id; + cpkt.event = event; + cpkt.value = value; + +- vq = port->portdev->c_ovq; ++ vq = portdev->c_ovq; + + sg_init_one(sg, &cpkt, sizeof(cpkt)); + if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) { +@@ -399,15 +403,39 @@ static ssize_t send_control_msg(struct port *port, unsigned int event, + return 0; + } + +-static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) ++static ssize_t send_control_msg(struct port *port, unsigned int event, ++ unsigned int value) ++{ ++ return __send_control_msg(port->portdev, port->id, event, value); ++} ++ ++/* Callers must take the port->outvq_lock */ ++static void reclaim_consumed_buffers(struct port *port) ++{ ++ void *buf; ++ unsigned int len; ++ ++ while ((buf = port->out_vq->vq_ops->get_buf(port->out_vq, &len))) { ++ kfree(buf); ++ port->outvq_full = false; ++ } ++} ++ ++static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, ++ bool nonblock) + { + struct scatterlist sg[1]; + struct virtqueue *out_vq; + ssize_t ret; ++ unsigned long flags; + unsigned int len; + + out_vq = port->out_vq; + ++ spin_lock_irqsave(&port->outvq_lock, flags); ++ ++ reclaim_consumed_buffers(port); ++ + sg_init_one(sg, in_buf, in_count); + ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf); + +@@ -415,20 +443,31 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) + out_vq->vq_ops->kick(out_vq); + + if (ret < 0) { +- len = 0; +- goto fail; ++ in_count = 0; ++ goto done; + } + ++ if (ret == 0) ++ port->outvq_full = true; ++ ++ if (nonblock) ++ goto done; ++ + /* + * Wait till the host acknowledges it pushed out the data we +- * sent. Also ensure we return to userspace the number of +- * bytes that were successfully consumed by the host. ++ * sent. This is done for ports in blocking mode or for data ++ * from the hvc_console; the tty operations are performed with ++ * spinlocks held so we can't sleep here. + */ + while (!out_vq->vq_ops->get_buf(out_vq, &len)) + cpu_relax(); +-fail: +- /* We're expected to return the amount of data we wrote */ +- return len; ++done: ++ spin_unlock_irqrestore(&port->outvq_lock, flags); ++ /* ++ * We're expected to return the amount of data we wrote -- all ++ * of it ++ */ ++ return in_count; + } + + /* +@@ -477,9 +516,28 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, + } + + /* The condition that must be true for polling to end */ +-static bool wait_is_over(struct port *port) ++static bool will_read_block(struct port *port) ++{ ++ return !port_has_data(port) && port->host_connected; ++} ++ ++static bool will_write_block(struct port *port) + { +- return port_has_data(port) || !port->host_connected; ++ bool ret; ++ ++ if (!port->host_connected) ++ return true; ++ ++ spin_lock_irq(&port->outvq_lock); ++ /* ++ * Check if the Host has consumed any buffers since we last ++ * sent data (this is only applicable for nonblocking ports). ++ */ ++ reclaim_consumed_buffers(port); ++ ret = port->outvq_full; ++ spin_unlock_irq(&port->outvq_lock); ++ ++ return ret; + } + + static ssize_t port_fops_read(struct file *filp, char __user *ubuf, +@@ -502,7 +560,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, + return -EAGAIN; + + ret = wait_event_interruptible(port->waitqueue, +- wait_is_over(port)); ++ !will_read_block(port)); + if (ret < 0) + return ret; + } +@@ -528,9 +586,22 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, + struct port *port; + char *buf; + ssize_t ret; ++ bool nonblock; + + port = filp->private_data; + ++ nonblock = filp->f_flags & O_NONBLOCK; ++ ++ if (will_write_block(port)) { ++ if (nonblock) ++ return -EAGAIN; ++ ++ ret = wait_event_interruptible(port->waitqueue, ++ !will_write_block(port)); ++ if (ret < 0) ++ return ret; ++ } ++ + count = min((size_t)(32 * 1024), count); + + buf = kmalloc(count, GFP_KERNEL); +@@ -543,9 +614,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, + goto free_buf; + } + +- ret = send_buf(port, buf, count); ++ ret = send_buf(port, buf, count, nonblock); ++ ++ if (nonblock && ret > 0) ++ goto out; ++ + free_buf: + kfree(buf); ++out: + return ret; + } + +@@ -560,7 +636,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait) + ret = 0; + if (port->inbuf) + ret |= POLLIN | POLLRDNORM; +- if (port->host_connected) ++ if (!will_write_block(port)) + ret |= POLLOUT; + if (!port->host_connected) + ret |= POLLHUP; +@@ -584,6 +660,10 @@ static int port_fops_release(struct inode *inode, struct file *filp) + + spin_unlock_irq(&port->inbuf_lock); + ++ spin_lock_irq(&port->outvq_lock); ++ reclaim_consumed_buffers(port); ++ spin_unlock_irq(&port->outvq_lock); ++ + return 0; + } + +@@ -612,6 +692,15 @@ static int port_fops_open(struct inode *inode, struct file *filp) + port->guest_connected = true; + spin_unlock_irq(&port->inbuf_lock); + ++ spin_lock_irq(&port->outvq_lock); ++ /* ++ * There might be a chance that we missed reclaiming a few ++ * buffers in the window of the port getting previously closed ++ * and opening now. ++ */ ++ reclaim_consumed_buffers(port); ++ spin_unlock_irq(&port->outvq_lock); ++ + /* Notify host of port being opened */ + send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); + +@@ -645,14 +734,14 @@ static int put_chars(u32 vtermno, const char *buf, int count) + { + struct port *port; + +- port = find_port_by_vtermno(vtermno); +- if (!port) +- return 0; +- + if (unlikely(early_put_chars)) + return early_put_chars(vtermno, buf, count); + +- return send_buf(port, (void *)buf, count); ++ port = find_port_by_vtermno(vtermno); ++ if (!port) ++ return -EPIPE; ++ ++ return send_buf(port, (void *)buf, count, false); + } + + /* +@@ -666,9 +755,13 @@ static int get_chars(u32 vtermno, char *buf, int count) + { + struct port *port; + ++ /* If we've not set up the port yet, we have no input to give. */ ++ if (unlikely(early_put_chars)) ++ return 0; ++ + port = find_port_by_vtermno(vtermno); + if (!port) +- return 0; ++ return -EPIPE; + + /* If we don't have an input queue yet, we can't get input. */ + BUG_ON(!port->in_vq); +@@ -681,6 +774,10 @@ static void resize_console(struct port *port) + struct virtio_device *vdev; + struct winsize ws; + ++ /* The port could have been hot-unplugged */ ++ if (!port) ++ return; ++ + vdev = port->portdev->vdev; + if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) { + vdev->config->get(vdev, +@@ -774,6 +871,13 @@ int init_port_console(struct port *port) + spin_unlock_irq(&pdrvdata_lock); + port->guest_connected = true; + ++ /* ++ * Start using the new console output if this is the first ++ * console to come up. ++ */ ++ if (early_put_chars) ++ early_put_chars = NULL; ++ + /* Notify host of port being opened */ + send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); + +@@ -829,6 +933,8 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, + out_offset += snprintf(buf + out_offset, out_count - out_offset, + "host_connected: %d\n", port->host_connected); + out_offset += snprintf(buf + out_offset, out_count - out_offset, ++ "outvq_full: %d\n", port->outvq_full); ++ out_offset += snprintf(buf + out_offset, out_count - out_offset, + "is_console: %s\n", + is_console_port(port) ? "yes" : "no"); + out_offset += snprintf(buf + out_offset, out_count - out_offset, +@@ -845,6 +951,140 @@ static const struct file_operations port_debugfs_ops = { + .read = debugfs_read, + }; + ++static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) ++{ ++ struct port_buffer *buf; ++ unsigned int nr_added_bufs; ++ int ret; ++ ++ nr_added_bufs = 0; ++ do { ++ buf = alloc_buf(PAGE_SIZE); ++ if (!buf) ++ break; ++ ++ spin_lock_irq(lock); ++ ret = add_inbuf(vq, buf); ++ if (ret < 0) { ++ spin_unlock_irq(lock); ++ free_buf(buf); ++ break; ++ } ++ nr_added_bufs++; ++ spin_unlock_irq(lock); ++ } while (ret > 0); ++ ++ return nr_added_bufs; ++} ++ ++static int add_port(struct ports_device *portdev, u32 id) ++{ ++ char debugfs_name[16]; ++ struct port *port; ++ struct port_buffer *buf; ++ dev_t devt; ++ unsigned int nr_added_bufs; ++ int err; ++ ++ port = kmalloc(sizeof(*port), GFP_KERNEL); ++ if (!port) { ++ err = -ENOMEM; ++ goto fail; ++ } ++ ++ port->portdev = portdev; ++ port->id = id; ++ ++ port->name = NULL; ++ port->inbuf = NULL; ++ port->cons.hvc = NULL; ++ ++ port->host_connected = port->guest_connected = false; ++ ++ port->outvq_full = false; ++ ++ port->in_vq = portdev->in_vqs[port->id]; ++ port->out_vq = portdev->out_vqs[port->id]; ++ ++ cdev_init(&port->cdev, &port_fops); ++ ++ devt = MKDEV(portdev->chr_major, id); ++ err = cdev_add(&port->cdev, devt, 1); ++ if (err < 0) { ++ dev_err(&port->portdev->vdev->dev, ++ "Error %d adding cdev for port %u\n", err, id); ++ goto free_port; ++ } ++ port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, ++ devt, port, "vport%up%u", ++ port->portdev->drv_index, id); ++ if (IS_ERR(port->dev)) { ++ err = PTR_ERR(port->dev); ++ dev_err(&port->portdev->vdev->dev, ++ "Error %d creating device for port %u\n", ++ err, id); ++ goto free_cdev; ++ } ++ ++ spin_lock_init(&port->inbuf_lock); ++ spin_lock_init(&port->outvq_lock); ++ init_waitqueue_head(&port->waitqueue); ++ ++ /* Fill the in_vq with buffers so the host can send us data. */ ++ nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); ++ if (!nr_added_bufs) { ++ dev_err(port->dev, "Error allocating inbufs\n"); ++ err = -ENOMEM; ++ goto free_device; ++ } ++ ++ /* ++ * If we're not using multiport support, this has to be a console port ++ */ ++ if (!use_multiport(port->portdev)) { ++ err = init_port_console(port); ++ if (err) ++ goto free_inbufs; ++ } ++ ++ spin_lock_irq(&portdev->ports_lock); ++ list_add_tail(&port->list, &port->portdev->ports); ++ spin_unlock_irq(&portdev->ports_lock); ++ ++ /* ++ * Tell the Host we're set so that it can send us various ++ * configuration parameters for this port (eg, port name, ++ * caching, whether this is a console port, etc.) ++ */ ++ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); ++ ++ if (pdrvdata.debugfs_dir) { ++ /* ++ * Finally, create the debugfs file that we can use to ++ * inspect a port's state at any time ++ */ ++ sprintf(debugfs_name, "vport%up%u", ++ port->portdev->drv_index, id); ++ port->debugfs_file = debugfs_create_file(debugfs_name, 0444, ++ pdrvdata.debugfs_dir, ++ port, ++ &port_debugfs_ops); ++ } ++ return 0; ++ ++free_inbufs: ++ while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) ++ free_buf(buf); ++free_device: ++ device_destroy(pdrvdata.class, port->dev->devt); ++free_cdev: ++ cdev_del(&port->cdev); ++free_port: ++ kfree(port); ++fail: ++ return err; ++} ++ + /* Remove all port-specific data. */ + static int remove_port(struct port *port) + { +@@ -858,7 +1098,18 @@ static int remove_port(struct port *port) + spin_lock_irq(&pdrvdata_lock); + list_del(&port->cons.list); + spin_unlock_irq(&pdrvdata_lock); ++#if 0 ++ /* ++ * hvc_remove() not called as removing one hvc port ++ * results in other hvc ports getting frozen. ++ * ++ * Once this is resolved in hvc, this functionality ++ * will be enabled. Till that is done, the -EPIPE ++ * return from get_chars() above will help ++ * hvc_console.c to clean up on ports we remove here. ++ */ + hvc_remove(port->cons.hvc); ++#endif + } + if (port->guest_connected) + send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); +@@ -870,6 +1121,8 @@ static int remove_port(struct port *port) + /* Remove unused data this port might have received. */ + discard_port_data(port); + ++ reclaim_consumed_buffers(port); ++ + /* Remove buffers we queued up for the Host to send us data in. */ + while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) + free_buf(buf); +@@ -894,7 +1147,7 @@ static void handle_control_message(struct ports_device *portdev, + cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); + + port = find_port_by_id(portdev, cpkt->id); +- if (!port) { ++ if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) { + /* No valid header at start of buffer. Drop it. */ + dev_dbg(&portdev->vdev->dev, + "Invalid index %u in control packet\n", cpkt->id); +@@ -902,6 +1155,24 @@ static void handle_control_message(struct ports_device *portdev, + } + + switch (cpkt->event) { ++ case VIRTIO_CONSOLE_PORT_ADD: ++ if (port) { ++ dev_dbg(&portdev->vdev->dev, ++ "Port %u already added\n", port->id); ++ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); ++ break; ++ } ++ if (cpkt->id >= portdev->config.max_nr_ports) { ++ dev_warn(&portdev->vdev->dev, ++ "Request for adding port with out-of-bound id %u, max. supported id: %u\n", ++ cpkt->id, portdev->config.max_nr_ports - 1); ++ break; ++ } ++ add_port(portdev, cpkt->id); ++ break; ++ case VIRTIO_CONSOLE_PORT_REMOVE: ++ remove_port(port); ++ break; + case VIRTIO_CONSOLE_CONSOLE_PORT: + if (!cpkt->value) + break; +@@ -923,6 +1194,14 @@ static void handle_control_message(struct ports_device *portdev, + case VIRTIO_CONSOLE_PORT_OPEN: + port->host_connected = cpkt->value; + wake_up_interruptible(&port->waitqueue); ++ /* ++ * If the host port got closed and the host had any ++ * unconsumed buffers, we'll be able to reclaim them ++ * now. ++ */ ++ spin_lock_irq(&port->outvq_lock); ++ reclaim_consumed_buffers(port); ++ spin_unlock_irq(&port->outvq_lock); + break; + case VIRTIO_CONSOLE_PORT_NAME: + /* +@@ -947,37 +1226,18 @@ static void handle_control_message(struct ports_device *portdev, + */ + err = sysfs_create_group(&port->dev->kobj, + &port_attribute_group); +- if (err) ++ if (err) { + dev_err(port->dev, + "Error %d creating sysfs device attributes\n", + err); +- +- break; +- case VIRTIO_CONSOLE_PORT_REMOVE: +- /* +- * Hot unplug the port. We don't decrement nr_ports +- * since we don't want to deal with extra complexities +- * of using the lowest-available port id: We can just +- * pick up the nr_ports number as the id and not have +- * userspace send it to us. This helps us in two +- * ways: +- * +- * - We don't need to have a 'port_id' field in the +- * config space when a port is hot-added. This is a +- * good thing as we might queue up multiple hotplug +- * requests issued in our workqueue. +- * +- * - Another way to deal with this would have been to +- * use a bitmap of the active ports and select the +- * lowest non-active port from that map. That +- * bloats the already tight config space and we +- * would end up artificially limiting the +- * max. number of ports to sizeof(bitmap). Right +- * now we can support 2^32 ports (as the port id is +- * stored in a u32 type). +- * +- */ +- remove_port(port); ++ } else { ++ /* ++ * Generate a udev event so that appropriate ++ * symlinks can be created based on udev ++ * rules. ++ */ ++ kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); ++ } + break; + } + } +@@ -1055,10 +1315,7 @@ static void config_intr(struct virtio_device *vdev) + struct ports_device *portdev; + + portdev = vdev->priv; +- if (use_multiport(portdev)) { +- /* Handle port hot-add */ +- schedule_work(&portdev->config_work); +- } ++ + /* + * We'll use this way of resizing only for legacy support. + * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use +@@ -1068,192 +1325,6 @@ static void config_intr(struct virtio_device *vdev) + resize_console(find_port_by_id(portdev, 0)); + } + +-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) +-{ +- struct port_buffer *buf; +- unsigned int ret; +- int err; +- +- ret = 0; +- do { +- buf = alloc_buf(PAGE_SIZE); +- if (!buf) +- break; +- +- spin_lock_irq(lock); +- err = add_inbuf(vq, buf); +- if (err < 0) { +- spin_unlock_irq(lock); +- free_buf(buf); +- break; +- } +- ret++; +- spin_unlock_irq(lock); +- } while (err > 0); +- +- return ret; +-} +- +-static int add_port(struct ports_device *portdev, u32 id) +-{ +- char debugfs_name[16]; +- struct port *port; +- struct port_buffer *buf; +- dev_t devt; +- int err; +- +- port = kmalloc(sizeof(*port), GFP_KERNEL); +- if (!port) { +- err = -ENOMEM; +- goto fail; +- } +- +- port->portdev = portdev; +- port->id = id; +- +- port->name = NULL; +- port->inbuf = NULL; +- port->cons.hvc = NULL; +- +- port->host_connected = port->guest_connected = false; +- +- port->in_vq = portdev->in_vqs[port->id]; +- port->out_vq = portdev->out_vqs[port->id]; +- +- cdev_init(&port->cdev, &port_fops); +- +- devt = MKDEV(portdev->chr_major, id); +- err = cdev_add(&port->cdev, devt, 1); +- if (err < 0) { +- dev_err(&port->portdev->vdev->dev, +- "Error %d adding cdev for port %u\n", err, id); +- goto free_port; +- } +- port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, +- devt, port, "vport%up%u", +- port->portdev->drv_index, id); +- if (IS_ERR(port->dev)) { +- err = PTR_ERR(port->dev); +- dev_err(&port->portdev->vdev->dev, +- "Error %d creating device for port %u\n", +- err, id); +- goto free_cdev; +- } +- +- spin_lock_init(&port->inbuf_lock); +- init_waitqueue_head(&port->waitqueue); +- +- /* Fill the in_vq with buffers so the host can send us data. */ +- err = fill_queue(port->in_vq, &port->inbuf_lock); +- if (!err) { +- dev_err(port->dev, "Error allocating inbufs\n"); +- err = -ENOMEM; +- goto free_device; +- } +- +- /* +- * If we're not using multiport support, this has to be a console port +- */ +- if (!use_multiport(port->portdev)) { +- err = init_port_console(port); +- if (err) +- goto free_inbufs; +- } +- +- spin_lock_irq(&portdev->ports_lock); +- list_add_tail(&port->list, &port->portdev->ports); +- spin_unlock_irq(&portdev->ports_lock); +- +- /* +- * Tell the Host we're set so that it can send us various +- * configuration parameters for this port (eg, port name, +- * caching, whether this is a console port, etc.) +- */ +- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); +- +- if (pdrvdata.debugfs_dir) { +- /* +- * Finally, create the debugfs file that we can use to +- * inspect a port's state at any time +- */ +- sprintf(debugfs_name, "vport%up%u", +- port->portdev->drv_index, id); +- port->debugfs_file = debugfs_create_file(debugfs_name, 0444, +- pdrvdata.debugfs_dir, +- port, +- &port_debugfs_ops); +- } +- return 0; +- +-free_inbufs: +- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) +- free_buf(buf); +-free_device: +- device_destroy(pdrvdata.class, port->dev->devt); +-free_cdev: +- cdev_del(&port->cdev); +-free_port: +- kfree(port); +-fail: +- return err; +-} +- +-/* +- * The workhandler for config-space updates. +- * +- * This is called when ports are hot-added. +- */ +-static void config_work_handler(struct work_struct *work) +-{ +- struct virtio_console_config virtconconf; +- struct ports_device *portdev; +- struct virtio_device *vdev; +- int err; +- +- portdev = container_of(work, struct ports_device, config_work); +- +- vdev = portdev->vdev; +- vdev->config->get(vdev, +- offsetof(struct virtio_console_config, nr_ports), +- &virtconconf.nr_ports, +- sizeof(virtconconf.nr_ports)); +- +- if (portdev->config.nr_ports == virtconconf.nr_ports) { +- /* +- * Port 0 got hot-added. Since we already did all the +- * other initialisation for it, just tell the Host +- * that the port is ready if we find the port. In +- * case the port was hot-removed earlier, we call +- * add_port to add the port. +- */ +- struct port *port; +- +- port = find_port_by_id(portdev, 0); +- if (!port) +- add_port(portdev, 0); +- else +- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); +- return; +- } +- if (virtconconf.nr_ports > portdev->config.max_nr_ports) { +- dev_warn(&vdev->dev, +- "More ports specified (%u) than allowed (%u)", +- portdev->config.nr_ports + 1, +- portdev->config.max_nr_ports); +- return; +- } +- if (virtconconf.nr_ports < portdev->config.nr_ports) +- return; +- +- /* Hot-add ports */ +- while (virtconconf.nr_ports - portdev->config.nr_ports) { +- err = add_port(portdev, portdev->config.nr_ports); +- if (err) +- break; +- portdev->config.nr_ports++; +- } +-} +- + static int init_vqs(struct ports_device *portdev) + { + vq_callback_t **io_callbacks; +@@ -1375,7 +1446,6 @@ static const struct file_operations portdev_fops = { + static int __devinit virtcons_probe(struct virtio_device *vdev) + { + struct ports_device *portdev; +- u32 i; + int err; + bool multiport; + +@@ -1404,29 +1474,15 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) + } + + multiport = false; +- portdev->config.nr_ports = 1; + portdev->config.max_nr_ports = 1; + if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { + multiport = true; + vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; + + vdev->config->get(vdev, offsetof(struct virtio_console_config, +- nr_ports), +- &portdev->config.nr_ports, +- sizeof(portdev->config.nr_ports)); +- vdev->config->get(vdev, offsetof(struct virtio_console_config, + max_nr_ports), + &portdev->config.max_nr_ports, + sizeof(portdev->config.max_nr_ports)); +- if (portdev->config.nr_ports > portdev->config.max_nr_ports) { +- dev_warn(&vdev->dev, +- "More ports (%u) specified than allowed (%u). Will init %u ports.", +- portdev->config.nr_ports, +- portdev->config.max_nr_ports, +- portdev->config.max_nr_ports); +- +- portdev->config.nr_ports = portdev->config.max_nr_ports; +- } + } + + /* Let the Host know we support multiple ports.*/ +@@ -1442,24 +1498,28 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) + INIT_LIST_HEAD(&portdev->ports); + + if (multiport) { ++ unsigned int nr_added_bufs; ++ + spin_lock_init(&portdev->cvq_lock); + INIT_WORK(&portdev->control_work, &control_work_handler); +- INIT_WORK(&portdev->config_work, &config_work_handler); + +- err = fill_queue(portdev->c_ivq, &portdev->cvq_lock); +- if (!err) { ++ nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); ++ if (!nr_added_bufs) { + dev_err(&vdev->dev, + "Error allocating buffers for control queue\n"); + err = -ENOMEM; + goto free_vqs; + } ++ } else { ++ /* ++ * For backward compatibility: Create a console port ++ * if we're running on older host. ++ */ ++ add_port(portdev, 0); + } + +- for (i = 0; i < portdev->config.nr_ports; i++) +- add_port(portdev, i); +- +- /* Start using the new console output. */ +- early_put_chars = NULL; ++ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, ++ VIRTIO_CONSOLE_DEVICE_READY, 1); + return 0; + + free_vqs: +@@ -1471,6 +1531,9 @@ free_chrdev: + free: + kfree(portdev); + fail: ++ /* The host might want to notify mgmt sw about device add failure */ ++ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, ++ VIRTIO_CONSOLE_DEVICE_READY, 0); + return err; + } + +@@ -1484,7 +1547,6 @@ static void virtcons_remove(struct virtio_device *vdev) + portdev = vdev->priv; + + cancel_work_sync(&portdev->control_work); +- cancel_work_sync(&portdev->config_work); + + list_for_each_entry_safe(port, port2, &portdev->ports, list) + remove_port(port); +diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h +index ae4f039..a85064d 100644 +--- a/include/linux/virtio_console.h ++++ b/include/linux/virtio_console.h +@@ -14,6 +14,8 @@ + #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ + #define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ + ++#define VIRTIO_CONSOLE_BAD_ID (~(u32)0) ++ + struct virtio_console_config { + /* colums of the screens */ + __u16 cols; +@@ -21,8 +23,6 @@ struct virtio_console_config { + __u16 rows; + /* max. number of ports this device can hold */ + __u32 max_nr_ports; +- /* number of ports added so far */ +- __u32 nr_ports; + } __attribute__((packed)); + + /* +@@ -36,12 +36,14 @@ struct virtio_console_control { + }; + + /* Some events for control messages */ +-#define VIRTIO_CONSOLE_PORT_READY 0 +-#define VIRTIO_CONSOLE_CONSOLE_PORT 1 +-#define VIRTIO_CONSOLE_RESIZE 2 +-#define VIRTIO_CONSOLE_PORT_OPEN 3 +-#define VIRTIO_CONSOLE_PORT_NAME 4 +-#define VIRTIO_CONSOLE_PORT_REMOVE 5 ++#define VIRTIO_CONSOLE_DEVICE_READY 0 ++#define VIRTIO_CONSOLE_PORT_ADD 1 ++#define VIRTIO_CONSOLE_PORT_REMOVE 2 ++#define VIRTIO_CONSOLE_PORT_READY 3 ++#define VIRTIO_CONSOLE_CONSOLE_PORT 4 ++#define VIRTIO_CONSOLE_RESIZE 5 ++#define VIRTIO_CONSOLE_PORT_OPEN 6 ++#define VIRTIO_CONSOLE_PORT_NAME 7 + + #ifdef __KERNEL__ + int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); diff --git a/virtqueue-wrappers.patch b/virtqueue-wrappers.patch deleted file mode 100644 index 1767057..0000000 --- a/virtqueue-wrappers.patch +++ /dev/null @@ -1,642 +0,0 @@ -diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c -index e32b24b..83fa09a 100644 ---- a/drivers/block/virtio_blk.c -+++ b/drivers/block/virtio_blk.c -@@ -50,7 +50,7 @@ static void blk_done(struct virtqueue *vq) - unsigned long flags; - - spin_lock_irqsave(&vblk->lock, flags); -- while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { -+ while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { - int error; - - switch (vbr->status) { -@@ -158,7 +158,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, - } - } - -- if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) { -+ if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) { - mempool_free(vbr, vblk->pool); - return false; - } -@@ -187,7 +187,7 @@ static void do_virtblk_request(struct request_queue *q) - } - - if (issued) -- vblk->vq->vq_ops->kick(vblk->vq); -+ virtqueue_kick(vblk->vq); - } - - static void virtblk_prepare_flush(struct request_queue *q, struct request *req) -diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c -index 64fe0a7..75f1cbd 100644 ---- a/drivers/char/hw_random/virtio-rng.c -+++ b/drivers/char/hw_random/virtio-rng.c -@@ -32,7 +32,7 @@ static bool busy; - static void random_recv_done(struct virtqueue *vq) - { - /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ -- if (!vq->vq_ops->get_buf(vq, &data_avail)) -+ if (!virtqueue_get_buf(vq, &data_avail)) - return; - - complete(&have_data); -@@ -46,10 +46,10 @@ static void register_buffer(u8 *buf, size_t size) - sg_init_one(&sg, buf, size); - - /* There should always be room for one buffer. */ -- if (vq->vq_ops->add_buf(vq, &sg, 0, 1, buf) < 0) -+ if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0) - BUG(); - -- vq->vq_ops->kick(vq); -+ virtqueue_kick(vq); - } - - static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) -diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c -index 196428c..48ce834 100644 ---- a/drivers/char/virtio_console.c -+++ b/drivers/char/virtio_console.c -@@ -328,7 +328,7 @@ static void *get_inbuf(struct port *port) - unsigned int len; - - vq = port->in_vq; -- buf = vq->vq_ops->get_buf(vq, &len); -+ buf = virtqueue_get_buf(vq, &len); - if (buf) { - buf->len = len; - buf->offset = 0; -@@ -349,8 +349,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) - - sg_init_one(sg, buf->buf, buf->size); - -- ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf); -- vq->vq_ops->kick(vq); -+ ret = virtqueue_add_buf(vq, sg, 0, 1, buf); -+ virtqueue_kick(vq); - return ret; - } - -@@ -366,7 +366,7 @@ static void discard_port_data(struct port *port) - if (port->inbuf) - buf = port->inbuf; - else -- buf = vq->vq_ops->get_buf(vq, &len); -+ buf = virtqueue_get_buf(vq, &len); - - ret = 0; - while (buf) { -@@ -374,7 +374,7 @@ static void discard_port_data(struct port *port) - ret++; - free_buf(buf); - } -- buf = vq->vq_ops->get_buf(vq, &len); -+ buf = virtqueue_get_buf(vq, &len); - } - port->inbuf = NULL; - if (ret) -@@ -421,9 +421,9 @@ static ssize_t send_control_msg(struct port *port, unsigned int event, - vq = port->portdev->c_ovq; - - sg_init_one(sg, &cpkt, sizeof(cpkt)); -- if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) { -- vq->vq_ops->kick(vq); -- while (!vq->vq_ops->get_buf(vq, &len)) -+ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) { -+ virtqueue_kick(vq); -+ while (!virtqueue_get_buf(vq, &len)) - cpu_relax(); - } - return 0; -@@ -439,10 +439,10 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) - out_vq = port->out_vq; - - sg_init_one(sg, in_buf, in_count); -- ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf); -+ ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf); - - /* Tell Host to go! */ -- out_vq->vq_ops->kick(out_vq); -+ virtqueue_kick(out_vq); - - if (ret < 0) { - in_count = 0; -@@ -450,7 +450,7 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) - } - - /* Wait till the host acknowledges it pushed out the data we sent. */ -- while (!out_vq->vq_ops->get_buf(out_vq, &len)) -+ while (!virtqueue_get_buf(out_vq, &len)) - cpu_relax(); - fail: - /* We're expected to return the amount of data we wrote */ -@@ -901,7 +901,7 @@ static int remove_port(struct port *port) - discard_port_data(port); - - /* Remove buffers we queued up for the Host to send us data in. */ -- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) -+ while ((buf = virtqueue_detach_unused_buf(port->in_vq))) - free_buf(buf); - - kfree(port->name); -@@ -1030,7 +1030,7 @@ static void control_work_handler(struct work_struct *work) - vq = portdev->c_ivq; - - spin_lock(&portdev->cvq_lock); -- while ((buf = vq->vq_ops->get_buf(vq, &len))) { -+ while ((buf = virtqueue_get_buf(vq, &len))) { - spin_unlock(&portdev->cvq_lock); - - buf->len = len; -@@ -1224,7 +1224,7 @@ static int add_port(struct ports_device *portdev, u32 id) - return 0; - - free_inbufs: -- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) -+ while ((buf = virtqueue_detach_unused_buf(port->in_vq))) - free_buf(buf); - free_device: - device_destroy(pdrvdata.class, port->dev->devt); -@@ -1536,10 +1536,10 @@ static void virtcons_remove(struct virtio_device *vdev) - - unregister_chrdev(portdev->chr_major, "virtio-portsdev"); - -- while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len))) -+ while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) - free_buf(buf); - -- while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq))) -+ while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) - free_buf(buf); - - vdev->config->del_vqs(vdev); -diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c -index b0577dd..91738d8 100644 ---- a/drivers/net/virtio_net.c -+++ b/drivers/net/virtio_net.c -@@ -119,7 +119,7 @@ static void skb_xmit_done(struct virtqueue *svq) - struct virtnet_info *vi = svq->vdev->priv; - - /* Suppress further interrupts. */ -- svq->vq_ops->disable_cb(svq); -+ virtqueue_disable_cb(svq); - - /* We were probably waiting for more output buffers. */ - netif_wake_queue(vi->dev); -@@ -207,7 +207,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) - return -EINVAL; - } - -- page = vi->rvq->vq_ops->get_buf(vi->rvq, &len); -+ page = virtqueue_get_buf(vi->rvq, &len); - if (!page) { - pr_debug("%s: rx error: %d buffers missing\n", - skb->dev->name, hdr->mhdr.num_buffers); -@@ -339,7 +339,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) - - skb_to_sgvec(skb, sg + 1, 0, skb->len); - -- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb); -+ err = virtqueue_add_buf(vi->rvq, sg, 0, 2, skb); - if (err < 0) - dev_kfree_skb(skb); - -@@ -386,7 +386,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) - - /* chain first in list head */ - first->private = (unsigned long)list; -- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2, -+ err = virtqueue_add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2, - first); - if (err < 0) - give_pages(vi, first); -@@ -406,7 +406,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) - - sg_init_one(&sg, page_address(page), PAGE_SIZE); - -- err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page); -+ err = virtqueue_add_buf(vi->rvq, &sg, 0, 1, page); - if (err < 0) - give_pages(vi, page); - -@@ -435,7 +435,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) - } while (err > 0); - if (unlikely(vi->num > vi->max)) - vi->max = vi->num; -- vi->rvq->vq_ops->kick(vi->rvq); -+ virtqueue_kick(vi->rvq); - return !oom; - } - -@@ -444,7 +444,7 @@ static void skb_recv_done(struct virtqueue *rvq) - struct virtnet_info *vi = rvq->vdev->priv; - /* Schedule NAPI, Suppress further interrupts if successful. */ - if (napi_schedule_prep(&vi->napi)) { -- rvq->vq_ops->disable_cb(rvq); -+ virtqueue_disable_cb(rvq); - __napi_schedule(&vi->napi); - } - } -@@ -473,7 +473,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) - - again: - while (received < budget && -- (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) { -+ (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) { - receive_buf(vi->dev, buf, len); - --vi->num; - received++; -@@ -487,9 +487,9 @@ again: - /* Out of packets? */ - if (received < budget) { - napi_complete(napi); -- if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) && -+ if (unlikely(!virtqueue_enable_cb(vi->rvq)) && - napi_schedule_prep(napi)) { -- vi->rvq->vq_ops->disable_cb(vi->rvq); -+ virtqueue_disable_cb(vi->rvq); - __napi_schedule(napi); - goto again; - } -@@ -503,7 +503,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) - struct sk_buff *skb; - unsigned int len, tot_sgs = 0; - -- while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { -+ while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { - pr_debug("Sent skb %p\n", skb); - vi->dev->stats.tx_bytes += skb->len; - vi->dev->stats.tx_packets++; -@@ -559,7 +559,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) - sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr); - - hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; -- return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb); -+ return virtqueue_add_buf(vi->svq, sg, hdr->num_sg, 0, skb); - } - - static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) -@@ -587,7 +587,7 @@ again: - kfree_skb(skb); - return NETDEV_TX_OK; - } -- vi->svq->vq_ops->kick(vi->svq); -+ virtqueue_kick(vi->svq); - - /* Don't wait up for transmitted skbs to be freed. */ - skb_orphan(skb); -@@ -595,12 +595,12 @@ again: - * before it gets out of hand. Naturally, this wastes entries. */ - if (capacity < 2+MAX_SKB_FRAGS) { - netif_stop_queue(dev); -- if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { -+ if (unlikely(!virtqueue_enable_cb(vi->svq))) { - /* More just got used, free them then recheck. */ - capacity += free_old_xmit_skbs(vi); - if (capacity >= 2+MAX_SKB_FRAGS) { - netif_start_queue(dev); -- vi->svq->vq_ops->disable_cb(vi->svq); -+ virtqueue_disable_cb(vi->svq); - } - } - } -@@ -645,7 +645,7 @@ static int virtnet_open(struct net_device *dev) - * now. virtnet_poll wants re-enable the queue, so we disable here. - * We synchronize against interrupts via NAPI_STATE_SCHED */ - if (napi_schedule_prep(&vi->napi)) { -- vi->rvq->vq_ops->disable_cb(vi->rvq); -+ virtqueue_disable_cb(vi->rvq); - __napi_schedule(&vi->napi); - } - return 0; -@@ -682,15 +682,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, - sg_set_buf(&sg[i + 1], sg_virt(s), s->length); - sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); - -- BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0); -+ BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0); - -- vi->cvq->vq_ops->kick(vi->cvq); -+ virtqueue_kick(vi->cvq); - - /* - * Spin for a response, the kick causes an ioport write, trapping - * into the hypervisor, so the request should be handled immediately. - */ -- while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp)) -+ while (!virtqueue_get_buf(vi->cvq, &tmp)) - cpu_relax(); - - return status == VIRTIO_NET_OK; -@@ -1006,13 +1006,13 @@ static void free_unused_bufs(struct virtnet_info *vi) - { - void *buf; - while (1) { -- buf = vi->svq->vq_ops->detach_unused_buf(vi->svq); -+ buf = virtqueue_detach_unused_buf(vi->svq); - if (!buf) - break; - dev_kfree_skb(buf); - } - while (1) { -- buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq); -+ buf = virtqueue_detach_unused_buf(vi->rvq); - if (!buf) - break; - if (vi->mergeable_rx_bufs || vi->big_packets) -diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c -index bfec7c2..0f1da45 100644 ---- a/drivers/virtio/virtio_balloon.c -+++ b/drivers/virtio/virtio_balloon.c -@@ -75,7 +75,7 @@ static void balloon_ack(struct virtqueue *vq) - struct virtio_balloon *vb; - unsigned int len; - -- vb = vq->vq_ops->get_buf(vq, &len); -+ vb = virtqueue_get_buf(vq, &len); - if (vb) - complete(&vb->acked); - } -@@ -89,9 +89,9 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) - init_completion(&vb->acked); - - /* We should always be able to add one buffer to an empty queue. */ -- if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0) -+ if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0) - BUG(); -- vq->vq_ops->kick(vq); -+ virtqueue_kick(vq); - - /* When host has read buffer, this completes via balloon_ack */ - wait_for_completion(&vb->acked); -@@ -204,7 +204,7 @@ static void stats_request(struct virtqueue *vq) - struct virtio_balloon *vb; - unsigned int len; - -- vb = vq->vq_ops->get_buf(vq, &len); -+ vb = virtqueue_get_buf(vq, &len); - if (!vb) - return; - vb->need_stats_update = 1; -@@ -221,9 +221,9 @@ static void stats_handle_request(struct virtio_balloon *vb) - - vq = vb->stats_vq; - sg_init_one(&sg, vb->stats, sizeof(vb->stats)); -- if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0) -+ if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0) - BUG(); -- vq->vq_ops->kick(vq); -+ virtqueue_kick(vq); - } - - static void virtballoon_changed(struct virtio_device *vdev) -@@ -314,10 +314,9 @@ static int virtballoon_probe(struct virtio_device *vdev) - * use it to signal us later. - */ - sg_init_one(&sg, vb->stats, sizeof vb->stats); -- if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq, -- &sg, 1, 0, vb) < 0) -+ if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0) - BUG(); -- vb->stats_vq->vq_ops->kick(vb->stats_vq); -+ virtqueue_kick(vb->stats_vq); - } - - vb->thread = kthread_run(balloon, vb, "vballoon"); -diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c -index 0f90634..0717b5b 100644 ---- a/drivers/virtio/virtio_ring.c -+++ b/drivers/virtio/virtio_ring.c -@@ -155,11 +155,11 @@ static int vring_add_indirect(struct vring_virtqueue *vq, - return head; - } - --static int vring_add_buf(struct virtqueue *_vq, -- struct scatterlist sg[], -- unsigned int out, -- unsigned int in, -- void *data) -+int virtqueue_add_buf(struct virtqueue *_vq, -+ struct scatterlist sg[], -+ unsigned int out, -+ unsigned int in, -+ void *data) - { - struct vring_virtqueue *vq = to_vvq(_vq); - unsigned int i, avail, head, uninitialized_var(prev); -@@ -232,8 +232,9 @@ add_head: - return vq->num_free ? vq->vring.num : 0; - return vq->num_free; - } -+EXPORT_SYMBOL_GPL(virtqueue_add_buf); - --static void vring_kick(struct virtqueue *_vq) -+void virtqueue_kick(struct virtqueue *_vq) - { - struct vring_virtqueue *vq = to_vvq(_vq); - START_USE(vq); -@@ -253,6 +254,7 @@ static void vring_kick(struct virtqueue *_vq) - - END_USE(vq); - } -+EXPORT_SYMBOL_GPL(virtqueue_kick); - - static void detach_buf(struct vring_virtqueue *vq, unsigned int head) - { -@@ -284,7 +286,7 @@ static inline bool more_used(const struct vring_virtqueue *vq) - return vq->last_used_idx != vq->vring.used->idx; - } - --static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len) -+void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) - { - struct vring_virtqueue *vq = to_vvq(_vq); - void *ret; -@@ -325,15 +327,17 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len) - END_USE(vq); - return ret; - } -+EXPORT_SYMBOL_GPL(virtqueue_get_buf); - --static void vring_disable_cb(struct virtqueue *_vq) -+void virtqueue_disable_cb(struct virtqueue *_vq) - { - struct vring_virtqueue *vq = to_vvq(_vq); - - vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; - } -+EXPORT_SYMBOL_GPL(virtqueue_disable_cb); - --static bool vring_enable_cb(struct virtqueue *_vq) -+bool virtqueue_enable_cb(struct virtqueue *_vq) - { - struct vring_virtqueue *vq = to_vvq(_vq); - -@@ -351,8 +355,9 @@ static bool vring_enable_cb(struct virtqueue *_vq) - END_USE(vq); - return true; - } -+EXPORT_SYMBOL_GPL(virtqueue_enable_cb); - --static void *vring_detach_unused_buf(struct virtqueue *_vq) -+void *virtqueue_detach_unused_buf(struct virtqueue *_vq) - { - struct vring_virtqueue *vq = to_vvq(_vq); - unsigned int i; -@@ -375,6 +380,7 @@ static void *vring_detach_unused_buf(struct virtqueue *_vq) - END_USE(vq); - return NULL; - } -+EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); - - irqreturn_t vring_interrupt(int irq, void *_vq) - { -@@ -396,15 +402,6 @@ irqreturn_t vring_interrupt(int irq, void *_vq) - } - EXPORT_SYMBOL_GPL(vring_interrupt); - --static struct virtqueue_ops vring_vq_ops = { -- .add_buf = vring_add_buf, -- .get_buf = vring_get_buf, -- .kick = vring_kick, -- .disable_cb = vring_disable_cb, -- .enable_cb = vring_enable_cb, -- .detach_unused_buf = vring_detach_unused_buf, --}; -- - struct virtqueue *vring_new_virtqueue(unsigned int num, - unsigned int vring_align, - struct virtio_device *vdev, -@@ -429,7 +426,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, - vring_init(&vq->vring, num, pages, vring_align); - vq->vq.callback = callback; - vq->vq.vdev = vdev; -- vq->vq.vq_ops = &vring_vq_ops; - vq->vq.name = name; - vq->notify = notify; - vq->broken = false; -diff --git a/include/linux/virtio.h b/include/linux/virtio.h -index 40d1709..5b0fce0 100644 ---- a/include/linux/virtio.h -+++ b/include/linux/virtio.h -@@ -14,7 +14,6 @@ - * @callback: the function to call when buffers are consumed (can be NULL). - * @name: the name of this virtqueue (mainly for debugging) - * @vdev: the virtio device this queue was created for. -- * @vq_ops: the operations for this virtqueue (see below). - * @priv: a pointer for the virtqueue implementation to use. - */ - struct virtqueue { -@@ -22,60 +21,60 @@ struct virtqueue { - void (*callback)(struct virtqueue *vq); - const char *name; - struct virtio_device *vdev; -- struct virtqueue_ops *vq_ops; - void *priv; - }; - - /** -- * virtqueue_ops - operations for virtqueue abstraction layer -- * @add_buf: expose buffer to other end -+ * operations for virtqueue -+ * virtqueue_add_buf: expose buffer to other end - * vq: the struct virtqueue we're talking about. - * sg: the description of the buffer(s). - * out_num: the number of sg readable by other side - * in_num: the number of sg which are writable (after readable ones) - * data: the token identifying the buffer. - * Returns remaining capacity of queue (sg segments) or a negative error. -- * @kick: update after add_buf -+ * virtqueue_kick: update after add_buf - * vq: the struct virtqueue - * After one or more add_buf calls, invoke this to kick the other side. -- * @get_buf: get the next used buffer -+ * virtqueue_get_buf: get the next used buffer - * vq: the struct virtqueue we're talking about. - * len: the length written into the buffer - * Returns NULL or the "data" token handed to add_buf. -- * @disable_cb: disable callbacks -+ * virtqueue_disable_cb: disable callbacks - * vq: the struct virtqueue we're talking about. - * Note that this is not necessarily synchronous, hence unreliable and only - * useful as an optimization. -- * @enable_cb: restart callbacks after disable_cb. -+ * virtqueue_enable_cb: restart callbacks after disable_cb. - * vq: the struct virtqueue we're talking about. - * This re-enables callbacks; it returns "false" if there are pending - * buffers in the queue, to detect a possible race between the driver - * checking for more work, and enabling callbacks. -- * @detach_unused_buf: detach first unused buffer -+ * virtqueue_detach_unused_buf: detach first unused buffer - * vq: the struct virtqueue we're talking about. - * Returns NULL or the "data" token handed to add_buf - * - * Locking rules are straightforward: the driver is responsible for - * locking. No two operations may be invoked simultaneously, with the exception -- * of @disable_cb. -+ * of virtqueue_disable_cb. - * - * All operations can be called in any context. - */ --struct virtqueue_ops { -- int (*add_buf)(struct virtqueue *vq, -- struct scatterlist sg[], -- unsigned int out_num, -- unsigned int in_num, -- void *data); - -- void (*kick)(struct virtqueue *vq); -+int virtqueue_add_buf(struct virtqueue *vq, -+ struct scatterlist sg[], -+ unsigned int out_num, -+ unsigned int in_num, -+ void *data); - -- void *(*get_buf)(struct virtqueue *vq, unsigned int *len); -+void virtqueue_kick(struct virtqueue *vq); - -- void (*disable_cb)(struct virtqueue *vq); -- bool (*enable_cb)(struct virtqueue *vq); -- void *(*detach_unused_buf)(struct virtqueue *vq); --}; -+void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); -+ -+void virtqueue_disable_cb(struct virtqueue *vq); -+ -+bool virtqueue_enable_cb(struct virtqueue *vq); -+ -+void *virtqueue_detach_unused_buf(struct virtqueue *vq); - - /** - * virtio_device - representation of a device using virtio -diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c -index 7eb78ec..dcfbe99 100644 ---- a/net/9p/trans_virtio.c -+++ b/net/9p/trans_virtio.c -@@ -137,7 +137,7 @@ static void req_done(struct virtqueue *vq) - - P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n"); - -- while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) { -+ while ((rc = virtqueue_get_buf(chan->vq, &len)) != NULL) { - P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); - P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); - req = p9_tag_lookup(chan->client, rc->tag); -@@ -209,13 +209,13 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) - - req->status = REQ_STATUS_SENT; - -- if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { -+ if (virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { - P9_DPRINTK(P9_DEBUG_TRANS, - "9p debug: virtio rpc add_buf returned failure"); - return -EIO; - } - -- chan->vq->vq_ops->kick(chan->vq); -+ virtqueue_kick(chan->vq); - - P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n"); - return 0; diff --git a/x86-debug-clear-reserved-bits-of-dr6.patch b/x86-debug-clear-reserved-bits-of-dr6.patch new file mode 100644 index 0000000..1920233 --- /dev/null +++ b/x86-debug-clear-reserved-bits-of-dr6.patch @@ -0,0 +1,50 @@ +From: K.Prasad +Date: Thu, 28 Jan 2010 11:14:01 +0000 (+0530) +Subject: x86/debug: Clear reserved bits of DR6 in do_debug() +X-Git-Tag: v2.6.34-rc1~197^2~94 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=40f9249a73f6c251adea492b1c3d19d39e2a9bda + +x86/debug: Clear reserved bits of DR6 in do_debug() + +Clear the reserved bits from the stored copy of debug status +register (DR6). +This will help easy bitwise operations such as quick testing +of a debug event origin. + +Signed-off-by: K.Prasad +Cc: Roland McGrath +Cc: Jan Kiszka +Cc: Alan Stern +Cc: Ingo Molnar +LKML-Reference: <20100128111401.GB13935@in.ibm.com> +Signed-off-by: Frederic Weisbecker +--- + +diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h +index 8240f76..b81002f 100644 +--- a/arch/x86/include/asm/debugreg.h ++++ b/arch/x86/include/asm/debugreg.h +@@ -14,6 +14,9 @@ + which debugging register was responsible for the trap. The other bits + are either reserved or not of interest to us. */ + ++/* Define reserved bits in DR6 which are always set to 1 */ ++#define DR6_RESERVED (0xFFFF0FF0) ++ + #define DR_TRAP0 (0x1) /* db0 */ + #define DR_TRAP1 (0x2) /* db1 */ + #define DR_TRAP2 (0x4) /* db2 */ +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index 3339917..1168e44 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -534,6 +534,9 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) + + get_debugreg(dr6, 6); + ++ /* Filter out all the reserved bits which are preset to 1 */ ++ dr6 &= ~DR6_RESERVED; ++ + /* Catch kmemcheck conditions first of all! */ + if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) + return; diff --git a/x86-debug-send-sigtrap-for-user-icebp.patch b/x86-debug-send-sigtrap-for-user-icebp.patch new file mode 100644 index 0000000..376fea2 --- /dev/null +++ b/x86-debug-send-sigtrap-for-user-icebp.patch @@ -0,0 +1,80 @@ +From: Frederic Weisbecker +Date: Wed, 30 Jun 2010 13:09:06 +0000 (+0200) +Subject: x86: Send a SIGTRAP for user icebp traps +X-Git-Tag: v2.6.35-rc4~2^2~2 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=a1e80fafc9f0742a1776a0490258cb64912411b0 + +x86: Send a SIGTRAP for user icebp traps + +Before we had a generic breakpoint layer, x86 used to send a +sigtrap for any debug event that happened in userspace, +except if it was caused by lazy dr7 switches. + +Currently we only send such signal for single step or breakpoint +events. + +However, there are three other kind of debug exceptions: + +- debug register access detected: trigger an exception if the + next instruction touches the debug registers. We don't use + it. +- task switch, but we don't use tss. +- icebp/int01 trap. This instruction (0xf1) is undocumented and + generates an int 1 exception. Unlike single step through TF + flag, it doesn't set the single step origin of the exception + in dr6. + +icebp then used to be reported in userspace using trap signals +but this have been incidentally broken with the new breakpoint +code. Reenable this. Since this is the only debug event that +doesn't set anything in dr6, this is all we have to check. + +This fixes a regression in Wine where World Of Warcraft got broken +as it uses this for software protection checks purposes. And +probably other apps do. + +Reported-and-tested-by: Alexandre Julliard +Signed-off-by: Frederic Weisbecker +Cc: Ingo Molnar +Cc: H. Peter Anvin +Cc: Thomas Gleixner +Cc: Prasad +Cc: 2.6.33.x 2.6.34.x +--- + +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index 142d70c..725ef4d 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -526,6 +526,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) + dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) + { + struct task_struct *tsk = current; ++ int user_icebp = 0; + unsigned long dr6; + int si_code; + +@@ -534,6 +535,14 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) + /* Filter out all the reserved bits which are preset to 1 */ + dr6 &= ~DR6_RESERVED; + ++ /* ++ * If dr6 has no reason to give us about the origin of this trap, ++ * then it's very likely the result of an icebp/int01 trap. ++ * User wants a sigtrap for that. ++ */ ++ if (!dr6 && user_mode(regs)) ++ user_icebp = 1; ++ + /* Catch kmemcheck conditions first of all! */ + if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) + return; +@@ -575,7 +584,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) + regs->flags &= ~X86_EFLAGS_TF; + } + si_code = get_si_code(tsk->thread.debugreg6); +- if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS)) ++ if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) + send_sigtrap(tsk, regs, error_code, si_code); + preempt_conditional_cli(regs); + diff --git a/xfs-move-aio-completion-after-unwritten-extent-conversion.patch b/xfs-move-aio-completion-after-unwritten-extent-conversion.patch deleted file mode 100644 index 290be17..0000000 --- a/xfs-move-aio-completion-after-unwritten-extent-conversion.patch +++ /dev/null @@ -1,83 +0,0 @@ -From: Christoph Hellwig -Date: Sun, 18 Jul 2010 21:17:10 +0000 (+0000) -Subject: xfs: move aio completion after unwritten extent conversion -X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=fb511f2150174b18b28ad54708c1adda0df39b17 - -xfs: move aio completion after unwritten extent conversion - -If we write into an unwritten extent using AIO we need to complete the AIO -request after the extent conversion has finished. Without that a read could -race to see see the extent still unwritten and return zeros. For synchronous -I/O we already take care of that by flushing the xfsconvertd workqueue (which -might be a bit of overkill). - -To do that add iocb and result fields to struct xfs_ioend, so that we can -call aio_complete from xfs_end_io after the extent conversion has happened. -Note that we need a new result field as io_error is used for positive errno -values, while the AIO code can return negative error values and positive -transfer sizes. - -Signed-off-by: Christoph Hellwig -Reviewed-by: Dave Chinner -Signed-off-by: Alex Elder ---- - -diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c -index 95d1e26..13622d5 100644 ---- a/fs/xfs/linux-2.6/xfs_aops.c -+++ b/fs/xfs/linux-2.6/xfs_aops.c -@@ -265,8 +265,11 @@ xfs_end_io( - xfs_finish_ioend(ioend, 0); - /* ensure we don't spin on blocked ioends */ - delay(1); -- } else -+ } else { -+ if (ioend->io_iocb) -+ aio_complete(ioend->io_iocb, ioend->io_result, 0); - xfs_destroy_ioend(ioend); -+ } - } - - /* -@@ -299,6 +302,8 @@ xfs_alloc_ioend( - atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); - ioend->io_offset = 0; - ioend->io_size = 0; -+ ioend->io_iocb = NULL; -+ ioend->io_result = 0; - - INIT_WORK(&ioend->io_work, xfs_end_io); - return ioend; -@@ -1411,6 +1416,7 @@ xfs_end_io_direct( - bool is_async) - { - xfs_ioend_t *ioend = iocb->private; -+ bool complete_aio = is_async; - - /* - * Non-NULL private data means we need to issue a transaction to -@@ -1436,7 +1442,14 @@ xfs_end_io_direct( - if (ioend->io_type == IO_READ) { - xfs_finish_ioend(ioend, 0); - } else if (private && size > 0) { -- xfs_finish_ioend(ioend, is_sync_kiocb(iocb)); -+ if (is_async) { -+ ioend->io_iocb = iocb; -+ ioend->io_result = ret; -+ complete_aio = false; -+ xfs_finish_ioend(ioend, 0); -+ } else { -+ xfs_finish_ioend(ioend, 1); -+ } - } else { - /* - * A direct I/O write ioend starts it's life in unwritten -@@ -1455,7 +1468,7 @@ xfs_end_io_direct( - */ - iocb->private = NULL; - -- if (is_async) -+ if (complete_aio) - aio_complete(iocb, ret, 0); - } -