generic-poky/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-rar...

2532 lines
72 KiB
Diff

Index: linux-2.6.33/drivers/staging/Kconfig
===================================================================
--- linux-2.6.33.orig/drivers/staging/Kconfig
+++ linux-2.6.33/drivers/staging/Kconfig
@@ -141,5 +141,7 @@ source "drivers/staging/netwave/Kconfig"
source "drivers/staging/sm7xx/Kconfig"
+source "drivers/staging/rar_register/Kconfig"
+
endif # !STAGING_EXCLUDE_BUILD
endif # STAGING
Index: linux-2.6.33/drivers/staging/Makefile
===================================================================
--- linux-2.6.33.orig/drivers/staging/Makefile
+++ linux-2.6.33/drivers/staging/Makefile
@@ -38,7 +38,7 @@ obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_FB_UDL) += udlfb/
obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_VME_BUS) += vme/
-obj-$(CONFIG_RAR_REGISTER) += rar/
+obj-$(CONFIG_RAR_DRIVER) += rar/
obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_RAMZSWAP) += ramzswap/
@@ -52,3 +52,4 @@ obj-$(CONFIG_WAVELAN) += wavelan/
obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan/
obj-$(CONFIG_PCMCIA_NETWAVE) += netwave/
obj-$(CONFIG_FB_SM7XX) += sm7xx/
+obj-$(CONFIG_RAR_REGISTER) += rar_register/
Index: linux-2.6.33/drivers/staging/rar_register/Kconfig
===================================================================
--- /dev/null
+++ linux-2.6.33/drivers/staging/rar_register/Kconfig
@@ -0,0 +1,14 @@
+#
+# Serial device configuration
+#
+
+menu "RAR Register Driver"
+
+config RAR_REGISTER
+ tristate "Intel Restricted Access Region Register Driver"
+ default n
+ ---help---
+ This driver allows other kernel drivers access to the
+ contents of the restricted access region registers.
+
+endmenu
Index: linux-2.6.33/drivers/staging/rar_register/Makefile
===================================================================
--- /dev/null
+++ linux-2.6.33/drivers/staging/rar_register/Makefile
@@ -0,0 +1,3 @@
+EXTRA_CFLAGS += -DLITTLE__ENDIAN
+obj-$(CONFIG_RAR_REGISTER) += rar_register.o
+rar_register_driver-objs := rar_register.o
Index: linux-2.6.33/drivers/staging/rar_register/rar_register.c
===================================================================
--- /dev/null
+++ linux-2.6.33/drivers/staging/rar_register/rar_register.c
@@ -0,0 +1,669 @@
+/*
+ * rar_register.c - An Intel Restricted Access Region register driver
+ *
+ * Copyright(c) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ * -------------------------------------------------------------------
+ *
+ * 20090806 Ossama Othman <ossama.othman@intel.com>
+ * Return zero high address if upper 22 bits is zero.
+ * Cleaned up checkpatch errors.
+ * Clarified that driver is dealing with bus addresses.
+ *
+ * 20090702 Ossama Othman <ossama.othman@intel.com>
+ * Removed unnecessary include directives
+ * Cleaned up spinlocks.
+ * Cleaned up logging.
+ * Improved invalid parameter checks.
+ * Fixed and simplified RAR address retrieval and RAR locking
+ * code.
+ *
+ * 20090626 Mark Allyn <mark.a.allyn@intel.com>
+ * Initial publish
+ */
+
+#include <linux/rar/rar_register.h>
+#include <linux/rar/memrar.h>
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+
+/* PCI vendor id for controller */
+#define VENDOR_ID 0x8086
+
+/* PCI device id for controller */
+#define DEVICE_ID 0x4110
+
+
+/* === Lincroft Message Bus Interface === */
+/* Message Control Register */
+#define LNC_MCR_OFFSET 0xD0
+
+/* Message Data Register */
+#define LNC_MDR_OFFSET 0xD4
+
+/* Message Opcodes */
+#define LNC_MESSAGE_READ_OPCODE 0xD0
+#define LNC_MESSAGE_WRITE_OPCODE 0xE0
+
+/* Message Write Byte Enables */
+#define LNC_MESSAGE_BYTE_WRITE_ENABLES 0xF
+
+/* B-unit Port */
+#define LNC_BUNIT_PORT 0x3
+
+/* === Lincroft B-Unit Registers - Programmed by IA32 firmware === */
+#define LNC_BRAR0L 0x10
+#define LNC_BRAR0H 0x11
+#define LNC_BRAR1L 0x12
+#define LNC_BRAR1H 0x13
+
+/* Reserved for SeP */
+#define LNC_BRAR2L 0x14
+#define LNC_BRAR2H 0x15
+
+/* Moorestown supports three restricted access regions. */
+#define MRST_NUM_RAR 3
+
+
+/* RAR Bus Address Range */
+struct RAR_address_range {
+ u32 low;
+ u32 high;
+};
+
+/* Structure containing low and high RAR register offsets. */
+struct RAR_offsets {
+ u32 low; /* Register offset for low RAR bus address. */
+ u32 high; /* Register offset for high RAR bus address. */
+};
+
+struct RAR_client {
+ int (*client_callback)(void *client_data);
+ void *customer_data;
+ int client_called;
+ };
+
+DEFINE_SPINLOCK(rar_spinlock_lock);
+DEFINE_SPINLOCK(lnc_reg_lock);
+
+struct RAR_device {
+ unsigned long rar_flags;
+ unsigned long lnc_reg_flags;
+ struct RAR_offsets rar_offsets[MRST_NUM_RAR];
+ struct RAR_address_range rar_addr[MRST_NUM_RAR];
+ struct pci_dev *rar_dev;
+ u32 registered;
+ };
+
+/* this platform has only one rar_device for 3 rar regions */
+struct RAR_device my_rar_device;
+
+/* flag to indicatew whether or not this driver is registered;
+ * this is for the entire driver and not just a device */
+int driver_registered;
+
+/* this data is for handling requests from other drivers which arrive
+ * prior to this driver initializing
+ */
+
+struct RAR_client clients[MRST_NUM_RAR];
+int num_clients;
+
+/* prototype for init */
+static int __init rar_init_handler(void);
+static void __exit rar_exit_handler(void);
+
+const struct pci_device_id rar_pci_id_tbl[] = {
+ { PCI_DEVICE(VENDOR_ID, DEVICE_ID) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, rar_pci_id_tbl);
+
+/*
+ * Function that is activated on the succesful probe of the RAR
+ * device (Moorestown host controller).
+ */
+static int rar_probe(struct pci_dev *dev, const struct pci_device_id *id);
+
+/* field for registering driver to PCI device */
+static struct pci_driver rar_pci_driver = {
+ .name = "rar_register",
+ .id_table = rar_pci_id_tbl,
+ .probe = rar_probe
+};
+
+const struct pci_device_id *my_id_table = rar_pci_id_tbl;
+
+/*
+ * This function is used to retrieved RAR info using the Lincroft
+ * message bus interface.
+ */
+static int memrar_get_rar_addr(struct pci_dev *pdev,
+ int offset,
+ u32 *addr)
+{
+ /*
+ * ======== The Lincroft Message Bus Interface ========
+ * Lincroft registers may be obtained from the PCI
+ * (the Host Bridge) using the Lincroft Message Bus
+ * Interface. That message bus interface is generally
+ * comprised of two registers: a control register (MCR, 0xDO)
+ * and a data register (MDR, 0xD4).
+ *
+ * The MCR (message control register) format is the following:
+ * 1. [31:24]: Opcode
+ * 2. [23:16]: Port
+ * 3. [15:8]: Register Offset
+ * 4. [7:4]: Byte Enables (use 0xF to set all of these bits
+ * to 1)
+ * 5. [3:0]: reserved
+ *
+ * Read (0xD0) and write (0xE0) opcodes are written to the
+ * control register when reading and writing to Lincroft
+ * registers, respectively.
+ *
+ * We're interested in registers found in the Lincroft
+ * B-unit. The B-unit port is 0x3.
+ *
+ * The six B-unit RAR register offsets we use are listed
+ * earlier in this file.
+ *
+ * Lastly writing to the MCR register requires the "Byte
+ * enables" bits to be set to 1. This may be achieved by
+ * writing 0xF at bit 4.
+ *
+ * The MDR (message data register) format is the following:
+ * 1. [31:0]: Read/Write Data
+ *
+ * Data being read from this register is only available after
+ * writing the appropriate control message to the MCR
+ * register.
+ *
+ * Data being written to this register must be written before
+ * writing the appropriate control message to the MCR
+ * register.
+ */
+
+ int result;
+
+ /* Construct control message */
+ u32 const message =
+ (LNC_MESSAGE_READ_OPCODE << 24)
+ | (LNC_BUNIT_PORT << 16)
+ | (offset << 8)
+ | (LNC_MESSAGE_BYTE_WRITE_ENABLES << 4);
+
+ dev_dbg(&pdev->dev, "Offset for 'get' LNC MSG is %x\n", offset);
+
+ if (addr == 0) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&lnc_reg_lock, my_rar_device.lnc_reg_flags);
+
+ /* Send the control message */
+ result = pci_write_config_dword(pdev,
+ LNC_MCR_OFFSET,
+ message);
+
+ dev_dbg(&pdev->dev,
+ "Result from send ctl register is %x\n",
+ result);
+
+ if (!result) {
+ result = pci_read_config_dword(pdev,
+ LNC_MDR_OFFSET,
+ addr);
+
+ dev_dbg(&pdev->dev,
+ "Result from read data register is %x\n",
+ result);
+
+ dev_dbg(&pdev->dev,
+ "Value read from data register is %x\n",
+ *addr);
+ }
+
+ spin_unlock_irqrestore(&lnc_reg_lock, my_rar_device.lnc_reg_flags);
+
+ return result;
+}
+
+static int memrar_set_rar_addr(struct pci_dev *pdev,
+ int offset,
+ u32 addr)
+{
+ /*
+ * Data being written to this register must be written before
+ * writing the appropriate control message to the MCR
+ * register.
+ *
+ * @note See memrar_get_rar_addr() for a description of the
+ * message bus interface being used here.
+ */
+
+ int result = 0;
+
+ /* Construct control message */
+ u32 const message =
+ (LNC_MESSAGE_WRITE_OPCODE << 24)
+ | (LNC_BUNIT_PORT << 16)
+ | (offset << 8)
+ | (LNC_MESSAGE_BYTE_WRITE_ENABLES << 4);
+
+ if (addr == 0) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&lnc_reg_lock, my_rar_device.lnc_reg_flags);
+
+ dev_dbg(&pdev->dev,
+ "Offset for 'set' LNC MSG is %x\n", offset);
+
+ /* Send the control message */
+ result = pci_write_config_dword(pdev,
+ LNC_MDR_OFFSET,
+ addr);
+
+ dev_dbg(&pdev->dev,
+ "Result from write data register is %x\n",
+ result);
+
+ if (!result) {
+ dev_dbg(&pdev->dev,
+ "Value written to data register is %x\n",
+ addr);
+
+ result = pci_write_config_dword(pdev,
+ LNC_MCR_OFFSET,
+ message);
+
+ dev_dbg(&pdev->dev,
+ "Result from send ctl register is %x\n",
+ result);
+ }
+
+ spin_unlock_irqrestore(&lnc_reg_lock, my_rar_device.lnc_reg_flags);
+
+ return result;
+}
+
+/*
+ * Initialize RAR parameters, such as bus addresses, etc.
+ */
+static int memrar_init_rar_params(struct pci_dev *pdev)
+{
+ struct RAR_offsets const *end = my_rar_device.rar_offsets
+ + MRST_NUM_RAR;
+ struct RAR_offsets const *i;
+ struct pci_dev *my_pdev;
+ unsigned int n = 0;
+ int result = 0;
+
+ /* Retrieve RAR start and end bus addresses. */
+
+ /*
+ * Access the RAR registers through the Lincroft Message Bus
+ * Interface on PCI device: 00:00.0 Host bridge.
+ */
+
+ /* struct pci_dev *pdev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); */
+
+ my_pdev = pci_dev_get(pdev);
+
+ if (my_pdev == NULL) {
+ WARN_ON(1);
+ return -ENODEV;
+ }
+
+ for (i = my_rar_device.rar_offsets; i != end; ++i, ++n) {
+ if (memrar_get_rar_addr(my_pdev,
+ i->low,
+ &(my_rar_device.rar_addr[n].low)) != 0
+ || memrar_get_rar_addr(my_pdev,
+ i->high,
+ &(my_rar_device.rar_addr[n].high))
+ != 0) {
+ result = -1;
+ break;
+ }
+
+ /*
+ * Only the upper 22 bits of the RAR addresses are
+ * stored in their corresponding RAR registers so we
+ * must set the lower 10 bits accordingly.
+ *
+ * The low address has its lower 10 bits cleared, and
+ * the high address has all its lower 10 bits set,
+ * e.g.:
+ *
+ * low = 0x2ffffc00
+ * high = 0x3fffffff
+ *
+ * This is not arbitrary, and is actually how RAR
+ * addressing/configuration works.
+ */
+ my_rar_device.rar_addr[n].low &= 0xfffffc00u;
+
+ /*
+ * Set bits 9:0 if the 1 KiB aligned (the upper 22
+ * bits) high address is non-zero.
+ *
+ * Otherwise set all bits to zero since that indicates
+ * no RAR address is configured.
+ */
+ if ((my_rar_device.rar_addr[n].high & 0xfffffc00u) == 0)
+ my_rar_device.rar_addr[n].high = 0;
+ else
+ my_rar_device.rar_addr[n].high |= 0x3ffu;
+ }
+
+ /* Done accessing the device. */
+ /* pci_dev_put(pdev); */
+
+ if (result == 0) {
+ size_t z;
+ for (z = 0; z != MRST_NUM_RAR; ++z) {
+ /*
+ * "BRAR" refers to the RAR registers in the
+ * Lincroft B-unit.
+ */
+ dev_info(&pdev->dev,
+ "BRAR[%u] bus address range = "
+ "[0x%08x, 0x%08x]\n",
+ z,
+ my_rar_device.rar_addr[z].low,
+ my_rar_device.rar_addr[z].high);
+ }
+ }
+
+ return result;
+}
+
+/*
+ * This function registers the driver with the device subsystem (
+ * either PCI, USB, etc).
+*/
+static int __init rar_init_handler(void)
+{
+ return pci_register_driver(&rar_pci_driver);
+}
+
+static void __exit rar_exit_handler(void)
+{
+ pci_unregister_driver(&rar_pci_driver);
+}
+
+module_init(rar_init_handler);
+module_exit(rar_exit_handler);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel Restricted Access Region Register Driver");
+
+/*
+ * Function that is activaed on the succesful probe of the RAR device
+ * (Moorestown host controller).
+ */
+int rar_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int error;
+ int counter;
+
+ dev_dbg(&dev->dev,
+ "PCI probe starting\n");
+
+ /* enable the device */
+ error = pci_enable_device(dev);
+ if (error) {
+ dev_err(&dev->dev,
+ "Error enabling RAR register PCI device\n");
+ goto end_function;
+ }
+
+ /* we have only one device; fill in the rar_device structure */
+ my_rar_device.rar_dev = dev;
+ my_rar_device.rar_flags = 0;
+ my_rar_device.lnc_reg_flags = 0;
+ my_rar_device.rar_offsets[0].low = LNC_BRAR0L;
+ my_rar_device.rar_offsets[0].high = LNC_BRAR0H;
+ my_rar_device.rar_offsets[1].low = LNC_BRAR1L;
+ my_rar_device.rar_offsets[1].high = LNC_BRAR1H;
+ my_rar_device.rar_offsets[2].low = LNC_BRAR2L;
+ my_rar_device.rar_offsets[2].high = LNC_BRAR2H;
+ my_rar_device.registered = 1;
+
+ /*
+ * Initialize the RAR parameters, which have to be retrieved */
+ /* via the message bus interface.
+ */
+ error = memrar_init_rar_params(dev);
+ if (error) {
+ pci_disable_device(dev);
+
+ dev_err(&dev->dev,
+ "Error retrieving RAR addresses\n");
+
+ goto end_function;
+ }
+
+ driver_registered = 1;
+
+ /* now call anyone who has registered (using callbacks) */
+ for (counter = 0; counter < num_clients; counter += 1) {
+ if (!clients[counter].client_called) {
+ error = (*clients[counter].client_callback)(
+ clients[counter].customer_data);
+ clients[counter].client_called = 1;
+ dev_dbg(&my_rar_device.rar_dev->dev,
+ "Callback called for %d\n",
+ counter);
+ }
+ }
+
+end_function:
+
+ return error;
+}
+
+
+/*
+ * The rar_get_address function is used by other device drivers
+ * to obtain RAR address information on a RAR. It takes three
+ * parameters:
+ *
+ * int rar_index
+ * The rar_index is an index to the rar for which you wish to retrieve
+ * the address information.
+ * Values can be 0,1, or 2.
+ *
+ * The function returns a 0 upon success or a -1 if there is no RAR
+ * facility on this system.
+ */
+int rar_get_address(int rar_index,
+ u32 *start_address,
+ u32 *end_address)
+{
+ int result = -ENODEV;
+
+ if (my_rar_device.registered) {
+ if (start_address == 0
+ || end_address == 0
+ || rar_index >= MRST_NUM_RAR
+ || rar_index < 0) {
+ result = -EINVAL;
+ } else {
+ *start_address = my_rar_device.rar_addr[rar_index].low;
+ *end_address = my_rar_device.rar_addr[rar_index].high;
+ result = 0;
+ }
+ }
+
+ return result;
+}
+EXPORT_SYMBOL(rar_get_address);
+
+/*
+ * The rar_lock function is ued by other device drivers to lock an RAR.
+ * once an RAR is locked, it stays locked until the next system reboot.
+ * The function takes one parameter:
+ *
+ * int rar_index
+ * The rar_index is an index to the rar that you want to lock.
+ * Values can be 0,1, or 2.
+ *
+ * The function returns a 0 upon success or a -1 if there is no RAR
+ * facility on this system.
+ */
+int rar_lock(int rar_index)
+{
+ int result = -ENODEV;
+
+ if (rar_index >= MRST_NUM_RAR || rar_index < 0) {
+ result = -EINVAL;
+ goto exit_rar_lock;
+ }
+
+ spin_lock_irqsave(&rar_spinlock_lock, my_rar_device.rar_flags);
+
+ if (my_rar_device.registered) {
+
+ u32 low;
+ u32 high;
+
+ /*
+ * Clear bits 4:0 in low register to lock.
+ * Clear bits 8,4:0 in high register to lock.
+ *
+ * The rest of the lower 10 bits in both registers are
+ * unused so we might as well clear them all.
+ */
+ if (rar_index == RAR_TYPE_VIDEO) {
+ low = my_rar_device.rar_addr[rar_index].low &
+ 0xfffffc00u;
+ high = my_rar_device.rar_addr[rar_index].high &
+ 0xfffffc00u;
+ low |= 0x00000009;
+ high |= 0x00000015;
+ }
+
+ else if (rar_index == RAR_TYPE_AUDIO) {
+ low = my_rar_device.rar_addr[rar_index].low &
+ 0xfffffc00u;
+ high = my_rar_device.rar_addr[rar_index].high &
+ 0xfffffc00u;
+ low |= 0x00000008;
+ high |= 0x00000018;
+ }
+
+ else {
+ low = my_rar_device.rar_addr[rar_index].low &
+ 0xfffffc00u;
+ high = my_rar_device.rar_addr[rar_index].high &
+ 0xfffffc00u;
+ high |= 0x00000018;
+ }
+
+ /*
+ * Now program the register using the Lincroft message
+ * bus interface.
+ */
+ result = memrar_set_rar_addr(my_rar_device.rar_dev,
+ my_rar_device.rar_offsets[rar_index].low,
+ low);
+
+ if (result == 0)
+ result = memrar_set_rar_addr(
+ my_rar_device.rar_dev,
+ my_rar_device.rar_offsets[rar_index].high,
+ high);
+ }
+
+ spin_unlock_irqrestore(&rar_spinlock_lock, my_rar_device.rar_flags);
+
+exit_rar_lock:
+
+ return result;
+}
+EXPORT_SYMBOL(rar_lock);
+
+/* The register_rar function is to used by other device drivers
+ * to ensure that this driver is ready. As we cannot be sure of
+ * the compile/execute order of dirvers in ther kernel, it is
+ * best to give this driver a callback function to call when
+ * it is ready to give out addresses. The callback function
+ * would have those steps that continue the initialization of
+ * a driver that do require a valid RAR address. One of those
+ * steps would be to call get_rar_address()
+ * This function return 0 on success an -1 on failure.
+ */
+int register_rar(int (*callback)(void *yourparameter), void *yourparameter)
+{
+
+ int result;
+
+ result = 0;
+
+ if (driver_registered) {
+
+ /* if the driver already registered, then we can simply
+ call the callback right now */
+
+ result = (*callback)(yourparameter);
+ if (result) {
+ dev_dbg(&my_rar_device.rar_dev->dev,
+ "Immediate Callback failed: %x\n",
+ result);
+ } else {
+ dev_dbg(&my_rar_device.rar_dev->dev,
+ "Immediate Callback ran okay\n");
+ }
+
+ return result;
+ }
+
+ else if (num_clients >= MRST_NUM_RAR) {
+ return -ENODEV;
+ }
+
+ else {
+
+ clients[num_clients].client_callback = callback;
+ clients[num_clients].customer_data = yourparameter;
+ clients[num_clients].client_called = 0;
+ num_clients += 1;
+ dev_dbg(&my_rar_device.rar_dev->dev, "Callback registered\n");
+ }
+
+return result;
+
+}
+EXPORT_SYMBOL(register_rar);
+
+/*
+ Local Variables:
+ c-file-style: "linux"
+ End:
+*/
Index: linux-2.6.33/include/linux/rar/memrar.h
===================================================================
--- /dev/null
+++ linux-2.6.33/include/linux/rar/memrar.h
@@ -0,0 +1,172 @@
+/*
+ * RAR Handler (/dev/memrar) internal driver API.
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ */
+
+
+#ifndef _MEMRAR_H
+#define _MEMRAR_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+
+/*
+ * Constants that specify different kinds of RAR regions that could be
+ * set up.
+ */
+static __u32 const RAR_TYPE_VIDEO; /* 0 */
+static __u32 const RAR_TYPE_AUDIO = 1;
+static __u32 const RAR_TYPE_IMAGE = 2;
+static __u32 const RAR_TYPE_DATA = 3;
+
+/*
+ * @struct RAR_stat
+ *
+ * @brief This structure is used for @c RAR_HANDLER_STAT ioctl and for
+ * @c RAR_get_stat() user space wrapper function.
+ */
+struct RAR_stat {
+ /* Type of RAR memory (e.g., audio vs. video) */
+ __u32 type;
+
+ /*
+ * Total size of RAR memory region.
+ */
+ __u32 capacity;
+
+ /* Size of the largest reservable block. */
+ __u32 largest_block_size;
+};
+
+
+/*
+ * @struct RAR_block_info
+ *
+ * @brief The argument for the @c RAR_HANDLER_RESERVE @c ioctl.
+ *
+ */
+struct RAR_block_info {
+ /* Type of RAR memory (e.g., audio vs. video) */
+ __u32 type;
+
+ /* Requested size of a block to be reserved in RAR. */
+ __u32 size;
+
+ /* Handle that can be used to refer to reserved block. */
+ __u32 handle;
+};
+
+/*
+ * @struct RAR_buffer
+ *
+ * Structure that contains all information related to a given block of
+ * memory in RAR. It is generally only used when retrieving bus
+ * addresses.
+ *
+ * @note This structure is used only by RAR-enabled drivers, and is
+ * not intended to be exposed to the user space.
+ */
+struct RAR_buffer {
+ /* Structure containing base RAR buffer information */
+ struct RAR_block_info info;
+
+ /* Buffer bus address */
+ __u32 bus_address;
+};
+
+
+#define RAR_IOCTL_BASE 0xE0
+
+/* Reserve RAR block. */
+#define RAR_HANDLER_RESERVE _IOWR(RAR_IOCTL_BASE, 0x00, struct RAR_block_info)
+
+/* Release previously reserved RAR block. */
+#define RAR_HANDLER_RELEASE _IOW(RAR_IOCTL_BASE, 0x01, __u32)
+
+/* Get RAR stats. */
+#define RAR_HANDLER_STAT _IOWR(RAR_IOCTL_BASE, 0x02, struct RAR_stat)
+
+
+/* -------------------------------------------------------------- */
+/* Kernel Side RAR Handler Interface */
+/* -------------------------------------------------------------- */
+
+/*
+ * @function rar_reserve
+ *
+ * @brief Reserve RAR buffers.
+ *
+ * This function will reserve buffers in the restricted access regions
+ * of given types.
+ *
+ * @return Number of successfully reserved buffers.
+ * Successful buffer reservations will have the corresponding
+ * @c bus_address field set to a non-zero value in the
+ * given @a buffers vector.
+ */
+extern size_t rar_reserve(struct RAR_buffer *buffers,
+ size_t count);
+
+/*
+ * @function rar_release
+ *
+ * @brief Release RAR buffers retrieved through call to
+ * @c rar_reserve() or @c rar_handle_to_bus().
+ *
+ * This function will release RAR buffers that were retrieved through
+ * a call to @c rar_reserve() or @c rar_handle_to_bus() by
+ * decrementing the reference count. The RAR buffer will be reclaimed
+ * when the reference count drops to zero.
+ *
+ * @return Number of successfully released buffers.
+ * Successful releases will have their handle field set to
+ * zero in the given @a buffers vector.
+ */
+extern size_t rar_release(struct RAR_buffer *buffers,
+ size_t count);
+
+/*
+ * @function rar_handle_to_bus
+ *
+ * @brief Convert a vector of RAR handles to bus addresses.
+ *
+ * This function will retrieve the RAR buffer bus addresses, type and
+ * size corresponding to the RAR handles provided in the @a buffers
+ * vector.
+ *
+ * @return Number of successfully converted buffers.
+ * The bus address will be set to @c 0 for unrecognized
+ * handles.
+ *
+ * @note The reference count for each corresponding buffer in RAR will
+ * be incremented. Call @c rar_release() when done with the
+ * buffers.
+ */
+extern size_t rar_handle_to_bus(struct RAR_buffer *buffers,
+ size_t count);
+
+
+#endif /* _MEMRAR_H */
+
+
+/*
+ Local Variables:
+ c-file-style: "linux"
+ End:
+*/
Index: linux-2.6.33/include/linux/rar/rar_register.h
===================================================================
--- /dev/null
+++ linux-2.6.33/include/linux/rar/rar_register.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2008, 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ */
+
+
+#ifndef _RAR_REGISTER_H
+#define _RAR_REGISTER_H
+
+# include <linux/types.h>
+
+/* The register_rar function is to used by other device drivers
+ * to ensure that this driver is ready. As we cannot be sure of
+ * the compile/execute order of dirvers in ther kernel, it is
+ * best to give this driver a callback function to call when
+ * it is ready to give out addresses. The callback function
+ * would have those steps that continue the initialization of
+ * a driver that do require a valid RAR address. One of those
+ * steps would be to call get_rar_address()
+ * This function return 0 on success an -1 on failure.
+ */
+int register_rar(int (*callback)(void *yourparameter), void *yourparameter);
+
+/* The get_rar_address function is used by other device drivers
+ * to obtain RAR address information on a RAR. It takes two
+ * parameter:
+ *
+ * int rar_index
+ * The rar_index is an index to the rar for which you wish to retrieve
+ * the address information.
+ * Values can be 0,1, or 2.
+ *
+ * struct RAR_address_struct is a pointer to a place to which the function
+ * can return the address structure for the RAR.
+ *
+ * The function returns a 0 upon success or a -1 if there is no RAR
+ * facility on this system.
+ */
+int rar_get_address(int rar_index,
+ u32 *start_address,
+ u32 *end_address);
+
+
+/* The lock_rar function is ued by other device drivers to lock an RAR.
+ * once an RAR is locked, it stays locked until the next system reboot.
+ * The function takes one parameter:
+ *
+ * int rar_index
+ * The rar_index is an index to the rar that you want to lock.
+ * Values can be 0,1, or 2.
+ *
+ * The function returns a 0 upon success or a -1 if there is no RAR
+ * facility on this system.
+ */
+int rar_lock(int rar_index);
+
+
+#endif /* _RAR_REGISTER_H */
+
+
+/*
+ Local Variables:
+ c-file-style: "linux"
+ End:
+*/
Index: linux-2.6.33/drivers/misc/Kconfig
===================================================================
--- linux-2.6.33.orig/drivers/misc/Kconfig
+++ linux-2.6.33/drivers/misc/Kconfig
@@ -249,6 +249,17 @@ config SGI_GRU_DEBUG
This option enables addition debugging code for the SGI GRU driver. If
you are unsure, say N.
+config MRST_RAR_HANDLER
+ tristate "RAR handler driver for Intel Moorestown platform"
+ depends on X86
+ select RAR_REGISTER
+ ---help---
+ This driver provides a memory management interface to
+ restricted access regions available in the Intel Moorestown
+ platform.
+
+ If unsure, say N.
+
config MRST_VIB
tristate "vibrator driver for Intel Moorestown platform"
help
Index: linux-2.6.33/drivers/misc/Makefile
===================================================================
--- linux-2.6.33.orig/drivers/misc/Makefile
+++ linux-2.6.33/drivers/misc/Makefile
@@ -22,6 +22,8 @@ obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfg
obj-$(CONFIG_HP_ILO) += hpilo.o
obj-$(CONFIG_MRST) += intel_mrst.o
obj-$(CONFIG_ISL29003) += isl29003.o
+obj-$(CONFIG_MRST_RAR_HANDLER) += memrar.o
+memrar-y := memrar_allocator.o memrar_handler.o
obj-$(CONFIG_MRST_VIB) += mrst_vib.o
obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
obj-$(CONFIG_DS1682) += ds1682.o
Index: linux-2.6.33/drivers/misc/memrar_allocator.c
===================================================================
--- /dev/null
+++ linux-2.6.33/drivers/misc/memrar_allocator.c
@@ -0,0 +1,374 @@
+/*
+ * memrar_allocator 0.2: An allocator for Intel RAR.
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ *
+ * ------------------------------------------------------------------
+ *
+ * This simple allocator implementation provides a
+ * malloc()/free()-like interface for reserving space within a
+ * previously reserved block of memory. It is not specific to
+ * any hardware, nor is it coupled with the lower level paging
+ * mechanism.
+ *
+ * The primary goal of this implementation is to provide a means
+ * to partition an arbitrary block of memory without actually
+ * accessing the memory or incurring any hardware side-effects
+ * (e.g. paging). It is, in effect, a bookkeeping mechanism for
+ * buffers.
+ */
+
+
+#include "memrar_allocator.h"
+#include <linux/slab.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+
+struct memrar_allocator *memrar_create_allocator(unsigned long base,
+ size_t capacity,
+ size_t block_size)
+{
+ struct memrar_allocator *allocator = NULL;
+ struct memrar_free_list *first_node = NULL;
+
+ /*
+ * Make sure the base address is aligned on a block_size
+ * boundary.
+ *
+ * @todo Is this necessary?
+ */
+ /* base = ALIGN(base, block_size); */
+
+ /* Validate parameters.
+ *
+ * Make sure we can allocate the entire memory allocator
+ * space. Zero capacity or block size are obviously invalid.
+ */
+ if (base == 0
+ || capacity == 0
+ || block_size == 0
+ || ULONG_MAX - capacity < base
+ || capacity < block_size)
+ return allocator;
+
+ /*
+ * There isn't much point in creating a memory allocator that
+ * is only capable of holding one block but we'll allow it,
+ * and issue a diagnostic.
+ */
+ WARN(capacity < block_size * 2,
+ "memrar: Only one block available to allocator.\n");
+
+ allocator = kmalloc(sizeof(*allocator), GFP_KERNEL);
+
+ if (allocator == NULL)
+ return allocator;
+
+ mutex_init(&allocator->lock);
+ allocator->base = base;
+
+ /* Round the capacity down to a multiple of block_size. */
+ allocator->capacity = (capacity / block_size) * block_size;
+
+ allocator->block_size = block_size;
+
+ allocator->largest_free_area = allocator->capacity;
+
+ /* Initialize the handle and free lists. */
+ INIT_LIST_HEAD(&allocator->handle_list.list);
+ INIT_LIST_HEAD(&allocator->free_list.list);
+
+ first_node = kmalloc(sizeof(*first_node), GFP_KERNEL);
+ if (first_node == NULL) {
+ kfree(allocator);
+ allocator = NULL;
+ } else {
+ /* Full range of blocks is available. */
+ first_node->begin = base;
+ first_node->end = base + allocator->capacity;
+ list_add(&first_node->list,
+ &allocator->free_list.list);
+ }
+
+ return allocator;
+}
+
+void memrar_destroy_allocator(struct memrar_allocator *allocator)
+{
+ /*
+ * Assume that the memory allocator lock isn't held at this
+ * point in time. Caller must ensure that.
+ */
+
+ struct memrar_free_list *pos;
+ struct memrar_free_list *n;
+
+ if (allocator == NULL)
+ return;
+
+ mutex_lock(&allocator->lock);
+
+ /* Reclaim free list resources. */
+ list_for_each_entry_safe(pos,
+ n,
+ &allocator->free_list.list,
+ list) {
+ list_del(&pos->list);
+ kfree(pos);
+ }
+
+ mutex_unlock(&allocator->lock);
+
+ kfree(allocator);
+}
+
+unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
+ size_t size)
+{
+ struct memrar_free_list *pos = NULL;
+
+ size_t num_blocks;
+ unsigned long reserved_bytes;
+
+ /*
+ * Address of allocated buffer. We assume that zero is not a
+ * valid address.
+ */
+ unsigned long addr = 0;
+
+ if (allocator == NULL || size == 0)
+ return addr;
+
+ /* Reserve enough blocks to hold the amount of bytes requested. */
+ num_blocks = DIV_ROUND_UP(size, allocator->block_size);
+
+ reserved_bytes = num_blocks * allocator->block_size;
+
+ mutex_lock(&allocator->lock);
+
+ if (reserved_bytes > allocator->largest_free_area) {
+ mutex_unlock(&allocator->lock);
+ return addr;
+ }
+
+ /*
+ * Iterate through the free list to find a suitably sized
+ * range of free contiguous memory blocks.
+ */
+ list_for_each_entry(pos, &allocator->free_list.list, list) {
+ size_t const curr_size = pos->end - pos->begin;
+
+ if (curr_size >= reserved_bytes) {
+ struct memrar_handle *handle = NULL;
+ struct memrar_handle_list * const new_node =
+ kmalloc(sizeof(*new_node), GFP_KERNEL);
+
+ if (new_node == NULL)
+ break;
+
+ list_add(&new_node->list,
+ &allocator->handle_list.list);
+
+ handle = &new_node->handle;
+ handle->end = pos->end;
+ pos->end -= reserved_bytes;
+ handle->begin = pos->end;
+ addr = handle->begin;
+
+ if (curr_size == allocator->largest_free_area)
+ allocator->largest_free_area -=
+ reserved_bytes;
+
+ break;
+ }
+ }
+
+ mutex_unlock(&allocator->lock);
+
+ return addr;
+}
+
+long memrar_allocator_free(struct memrar_allocator *allocator,
+ unsigned long addr)
+{
+ struct list_head *pos = NULL;
+ struct list_head *tmp = NULL;
+ struct memrar_handle_list *handles = NULL;
+ struct memrar_handle *handle = NULL;
+ struct memrar_free_list *new_node = NULL;
+ int result = -ENOMEM;
+
+ if (allocator == NULL)
+ return -EINVAL;
+
+ if (addr == 0)
+ return 0; /* Ignore free(0). */
+
+ mutex_lock(&allocator->lock);
+
+ /* Find the corresponding handle. */
+ list_for_each_entry(handles,
+ &allocator->handle_list.list,
+ list) {
+ if (handles->handle.begin == addr) {
+ handle = &handles->handle;
+ break;
+ }
+ }
+
+ /* No such buffer created by this allocator. */
+ if (handle == NULL) {
+ mutex_unlock(&allocator->lock);
+ return -EFAULT;
+ }
+
+ /*
+ * Coalesce adjacent chunks of memory if possible.
+ *
+ * @note This isn't full blown coalescing since we're only
+ * coalescing at most three chunks of memory.
+ */
+ list_for_each_safe(pos, tmp, &allocator->free_list.list) {
+ /* @todo O(n) performance. Optimize. */
+
+ struct memrar_free_list * const chunk =
+ list_entry(pos,
+ struct memrar_free_list,
+ list);
+
+ struct memrar_free_list * const next =
+ list_entry(pos->next,
+ struct memrar_free_list,
+ list);
+
+ /* Extend size of existing free adjacent chunk. */
+ if (chunk->end == handle->begin) {
+ /*
+ * Chunk "less than" than the one we're
+ * freeing is adjacent.
+ */
+
+ unsigned long new_chunk_size;
+
+ chunk->end = handle->end;
+
+ /*
+ * Now check if next free chunk is adjacent to
+ * the current extended free chunk.
+ */
+ if (pos != pos->next
+ && chunk->end == next->begin) {
+ chunk->end = next->end;
+ list_del(pos->next);
+ kfree(next);
+ }
+
+ new_chunk_size = chunk->end - chunk->begin;
+
+ if (new_chunk_size > allocator->largest_free_area)
+ allocator->largest_free_area =
+ new_chunk_size;
+
+ result = 0;
+ goto exit_memrar_free;
+ } else if (chunk->begin == handle->end) {
+ /*
+ * Chunk "greater than" than the one we're
+ * freeing is adjacent.
+ */
+
+ unsigned long new_chunk_size;
+
+ chunk->begin = handle->begin;
+
+ /*
+ * Now check if next free chunk is adjacent to
+ * the current extended free chunk.
+ */
+ if (pos != pos->next
+ && chunk->begin == next->end) {
+ chunk->begin = next->begin;
+ list_del(pos->next);
+ kfree(next);
+ }
+
+ new_chunk_size = chunk->end - chunk->begin;
+
+ if (new_chunk_size > allocator->largest_free_area)
+ allocator->largest_free_area =
+ new_chunk_size;
+
+ result = 0;
+ goto exit_memrar_free;
+ }
+ }
+
+ /*
+ * Memory being freed is not adjacent to existing free areas
+ * of memory in the allocator. Add a new item to the free list.
+ *
+ * @todo Allocate this free_list node when the buffer itself
+ * is allocated to avoid a potential problem where a new
+ * node cannot be allocated due to lack of available
+ * kernel memory. We can then free this node in the
+ * above coalescing code node if it isn't needed.
+ *
+ * @todo While making this change would address potential
+ * memory allocation failure, it would also
+ * unfortunately reduce performance of buffer allocation
+ * provided by this allocator.
+ */
+ new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
+ if (new_node != NULL) {
+ unsigned long new_chunk_size;
+
+ new_node->begin = handle->begin;
+ new_node->end = handle->end;
+ list_add(&new_node->list,
+ &allocator->free_list.list);
+
+ new_chunk_size = handle->end - handle->begin;
+
+ if (new_chunk_size > allocator->largest_free_area)
+ allocator->largest_free_area =
+ new_chunk_size;
+
+ result = 0;
+ }
+
+exit_memrar_free:
+
+ if (result == 0)
+ list_del(&handles->list);
+
+ mutex_unlock(&allocator->lock);
+
+ kfree(handles);
+
+ return result;
+}
+
+
+
+/*
+ Local Variables:
+ c-file-style: "linux"
+ End:
+*/
Index: linux-2.6.33/drivers/misc/memrar_allocator.h
===================================================================
--- /dev/null
+++ linux-2.6.33/drivers/misc/memrar_allocator.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ */
+
+#ifndef MEMRAR_ALLOCATOR_H
+#define MEMRAR_ALLOCATOR_H
+
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * @struct memrar_free_list
+ *
+ * @brief List of available areas of memory.
+ */
+struct memrar_free_list {
+ /* Linked list of free memory allocator blocks. */
+ struct list_head list;
+
+ /* Beginning of available address range. */
+ unsigned long begin;
+
+ /*
+ * End of available address range, one past the end,
+ * i.e. [begin, end).
+ */
+ unsigned long end;
+};
+
+struct memrar_allocator;
+
+/* Structure that describes a chunk memory reserved by the allocator. */
+struct memrar_handle {
+ /* Beginning of available address range. */
+ unsigned long begin;
+
+ /*
+ * End of available address range, one past the end,
+ * i.e. [begin, end).
+ */
+ unsigned long end;
+};
+
+/*
+ * @struct memrar_handle_list
+ *
+ * @brief List of handles corresponding to allocated blocks of memory.
+ */
+struct memrar_handle_list {
+ /* Linked list of handles corresponding to allocated blocks. */
+ struct list_head list;
+
+ /* Handle for the allocated block of memory. */
+ struct memrar_handle handle;
+};
+
+/*
+ * @struct memrar_allocator
+ *
+ * @brief Encapsulation of the memory allocator state.
+ *
+ * This structure contains all memory allocator state, including the
+ * base address, capacity, free list, lock, etc.
+ */
+struct memrar_allocator {
+ /*
+ * Lock used to synchronize access to the memory allocator
+ * state.
+ */
+ struct mutex lock;
+
+ /* Base (start) address of the memory allocator. */
+ unsigned long base;
+
+ /* Size of the memory allocator in bytes. */
+ size_t capacity;
+
+ /*
+ * The size in bytes of individual blocks within the memory
+ * allocator.
+ */
+ size_t block_size;
+
+ /* Largest free area of memory in the allocator in bytes. */
+ size_t largest_free_area;
+
+ /* List of handles for allocated blocks of memory. */
+ struct memrar_handle_list handle_list;
+
+ /* List of free address ranges. */
+ struct memrar_free_list free_list;
+};
+
+/*
+ * @function memrar_create_allocator
+ *
+ * @brief Create a memory allocator.
+ *
+ * Create a memory allocator with the given capacity and block size.
+ * The capacity will be reduced to be a multiple of the block size, if
+ * necessary.
+ *
+ * @param base Address at which the memory allocator begins.
+ * @param capacity Desired size of the memory allocator. This value
+ * must be larger than the block_size, ideally more
+ * than twice as large since there wouldn't be much
+ * point in using a memory allocator otherwise.
+ * @param block_size The size of individual blocks within the memory
+ * allocator. This value must smaller than the
+ * capacity.
+ * @return An instance of the memory allocator, if creation succeeds.
+ * @return Zero if creation fails. Failure may occur if not enough
+ * kernel memory exists to create the memrar_allocator
+ * instance itself, or if the capacity and block_size
+ * arguments are not compatible or make sense.
+ */
+struct memrar_allocator *memrar_create_allocator(unsigned long base,
+ size_t capacity,
+ size_t block_size);
+
+/*
+ * Reclaim resources held by the memory allocator. The caller must
+ * explicitly free all memory reserved by memrar_allocator_alloc()
+ * prior to calling this function. Otherwise leaks will occur.
+ */
+void memrar_destroy_allocator(struct memrar_allocator *allocator);
+
+/*
+ * Reserve chunk of memory of given size in the memory allocator.
+ */
+unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
+ size_t size);
+
+/*
+ * Reserve chunk of memory of given size in the memory allocator.
+ */
+long memrar_allocator_free(struct memrar_allocator *allocator,
+ unsigned long handle);
+
+#endif /* MEMRAR_ALLOCATOR_H */
+
+
+/*
+ Local Variables:
+ c-file-style: "linux"
+ End:
+*/
Index: linux-2.6.33/drivers/misc/memrar_handler.c
===================================================================
--- /dev/null
+++ linux-2.6.33/drivers/misc/memrar_handler.c
@@ -0,0 +1,929 @@
+/*
+ * memrar_handler 1.0: An Intel restricted access region handler device
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ * -------------------------------------------------------------------
+ *
+ * Moorestown restricted access regions (RAR) provide isolated
+ * areas of main memory that are only acceessible by authorized
+ * devices.
+ *
+ * The Intel Moorestown RAR handler module exposes a kernel space
+ * RAR memory management mechanism. It is essentially a
+ * RAR-specific allocator.
+ *
+ * Besides providing RAR buffer management, the RAR handler also
+ * behaves in many ways like an OS virtual memory manager. For
+ * example, the RAR "handles" created by the RAR handler are
+ * analogous to user space virtual addresses.
+ *
+ * RAR memory itself is never accessed directly by the RAR
+ * handler.
+ *
+ * -------------------------------------------------------------------
+ *
+ * TODO
+ *
+ * 1. Split user space interface from core/kernel code, e.g.:
+ * memrar_handler.c -> memrar_core.c, memrar_user.c
+ *
+ * 2. Convert API documentation to Kerneldoc.
+ *
+ * 3. Move memrar_allocator.* to kernel lib' directory since it
+ * is HW neutral.
+ * a. Alternatively, use lib/genalloc.c instead.
+ * b. A kernel port of Doug Lea's malloc() implementation may
+ * also be an option.
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+
+#include <linux/rar/rar_register.h>
+#include <linux/rar/memrar.h>
+
+#include "memrar_allocator.h"
+
+
+#define MEMRAR_VER "1.0"
+
+/*
+ * Moorestown supports three restricted access regions.
+ *
+ * We only care about the first two, video and audio. The third,
+ * reserved for Chaabi and the P-unit, will be handled by their
+ * respective drivers.
+ */
+#define MRST_NUM_RAR 2
+
+/* ---------------- -------------------- ------------------- */
+
+/*
+ * List structure that keeps track of all RAR buffers.
+ */
+struct memrar_buffer_info {
+ /* Linked list of memrar_buffer_info objects. */
+ struct list_head list;
+
+ /* Core RAR buffer information. */
+ struct RAR_buffer buffer;
+
+ /* Reference count */
+ struct kref refcount;
+
+ /*
+ * File handle corresponding to process that reserved the
+ * block of memory in RAR. This will be zero for buffers
+ * allocated by other drivers instead of by a user space
+ * process.
+ */
+ struct file *owner;
+};
+
+/*
+ * Structure that describes that characteristics of a given RAR.
+ */
+struct memrar_rar_info {
+ /* Base bus address of the RAR. */
+ unsigned long base;
+
+ /* Length of the RAR. */
+ unsigned long length;
+
+ /* Virtual address of RAR mapped into kernel. */
+ void __iomem *iobase;
+
+ /*
+ * Allocator associated with the RAR.
+ *
+ * @note The allocator "capacity" may be smaller than the RAR
+ * length if the length is not a multiple of the
+ * configured allocator block size.
+ */
+ struct memrar_allocator *allocator;
+
+ /*
+ * Table that keeps track of all reserved RAR buffers.
+ */
+ struct memrar_buffer_info buffers;
+
+ /*
+ * Lock used to synchronize access to RAR-specific data
+ * structures.
+ */
+ struct mutex lock;
+};
+
+/*
+ * Array of RAR characteristics.
+ */
+static struct memrar_rar_info memrars[MRST_NUM_RAR];
+
+
+/* ---------------- -------------------- ------------------- */
+
+/* Validate RAR type. */
+static inline int memrar_is_valid_rar_type(u32 type)
+{
+ return type == RAR_TYPE_VIDEO || type == RAR_TYPE_AUDIO;
+}
+
+/* Check if an address/handle falls with the given RAR memory range. */
+static inline int memrar_handle_in_range(struct memrar_rar_info *rar,
+ u32 vaddr)
+{
+ unsigned long const iobase = (unsigned long) (rar->iobase);
+ return (vaddr >= iobase && vaddr < iobase + rar->length);
+}
+
+/* Retrieve RAR information associated with the given handle. */
+static struct memrar_rar_info *memrar_get_rar_info(u32 vaddr)
+{
+ int i;
+ for (i = 0; i < MRST_NUM_RAR; ++i) {
+ struct memrar_rar_info * const rar = &memrars[i];
+ if (memrar_handle_in_range(rar, vaddr))
+ return rar;
+ }
+
+ return NULL;
+}
+
+/*
+ * Retrieve bus address from given handle.
+ *
+ * @return Address corresponding to given handle. Zero if handle
+ * is invalid.
+ */
+static unsigned long memrar_get_bus_address(
+ struct memrar_rar_info *rar,
+ u32 vaddr)
+{
+ unsigned long const iobase = (unsigned long) (rar->iobase);
+
+ if (!memrar_handle_in_range(rar, vaddr))
+ return 0;
+
+ /*
+ * An assumption is made that the virtual address offset is
+ * the same as the bus address offset, at least based on the
+ * way this driver is implemented. For example, vaddr + 2 ==
+ * baddr + 2.
+ *
+ * @todo Is that a valid assumption?
+ */
+ return rar->base + (vaddr - iobase);
+}
+
+/*
+ * Retrieve physical address from given handle.
+ *
+ * @return Address corresponding to given handle. Zero if handle
+ * is invalid.
+ */
+static unsigned long memrar_get_physical_address(
+ struct memrar_rar_info *rar,
+ u32 vaddr)
+{
+ /*
+ * @todo This assumes that the bus address and physical
+ * address are the same. That is true for Moorestown
+ * but not necessarily on other platforms. This
+ * deficiency should be addressed at some point.
+ */
+ return memrar_get_bus_address(rar, vaddr);
+}
+
+/*
+ * Core block release code.
+ *
+ * @note This code removes the node from a list. Make sure any list
+ * iteration is performed using list_for_each_safe().
+ */
+static void memrar_release_block_i(struct kref *ref)
+{
+ /*
+ * Last reference is being released. Remove from the table,
+ * and reclaim resources.
+ */
+
+ struct memrar_buffer_info * const node =
+ container_of(ref, struct memrar_buffer_info, refcount);
+
+ struct RAR_block_info * const user_info =
+ &node->buffer.info;
+
+ struct memrar_allocator * const allocator =
+ memrars[user_info->type].allocator;
+
+ list_del(&node->list);
+
+ memrar_allocator_free(allocator, user_info->handle);
+
+ kfree(node);
+}
+
+/*
+ * Initialize RAR parameters, such as bus addresses, etc.
+ */
+static int memrar_init_rar_resources(char const *devname)
+{
+ /* ---- Sanity Checks ----
+ * 1. RAR bus addresses in both Lincroft and Langwell RAR
+ * registers should be the same.
+ * 2. Secure device ID in Langwell RAR registers should be set
+ * appropriately, i.e. only LPE DMA for the audio RAR, and
+ * security for the other Langwell based RAR register. The
+ * video RAR is not accessed from the Langwell side,
+ * meaning its corresponding Langwell RAR should only be
+ * accessible by the security engine.
+ * 3. Audio and video RAR register and RAR access should be
+ * locked. If not, lock them. Except for debugging
+ * purposes, there is no reason for them to be unlocked.
+ *
+ * @todo Should the RAR handler driver even be aware of audio
+ * and video RAR settings?
+ */
+
+ /*
+ * RAR buffer block size.
+ *
+ * We choose it to be the size of a page to simplify the
+ * /dev/memrar mmap() implementation and usage. Otherwise
+ * paging is not involved once an RAR is locked down.
+ */
+ static size_t const RAR_BLOCK_SIZE = PAGE_SIZE;
+
+ int z;
+ int found_rar = 0;
+
+ BUG_ON(MRST_NUM_RAR != ARRAY_SIZE(memrars));
+
+ for (z = 0; z != MRST_NUM_RAR; ++z) {
+ u32 low, high;
+ struct memrar_rar_info * const rar = &memrars[z];
+
+ BUG_ON(!memrar_is_valid_rar_type(z));
+
+ mutex_init(&rar->lock);
+
+ /*
+ * Initialize the process table before we reach any
+ * code that exit on failure since the finalization
+ * code requires an initialized list.
+ */
+ INIT_LIST_HEAD(&rar->buffers.list);
+
+ if (rar_get_address(z, &low, &high) != 0) {
+ /* No RAR is available. */
+ break;
+ } else if (low == 0 || high == 0) {
+ /*
+ * We don't immediately break out of the loop
+ * since the next type of RAR may be enabled.
+ */
+ rar->base = 0;
+ rar->length = 0;
+ rar->iobase = NULL;
+ rar->allocator = NULL;
+ continue;
+ }
+
+ /*
+ * @todo Verify that LNC and LNW RAR register contents
+ * addresses, security, etc are compatible and
+ * consistent).
+ */
+
+ rar->length = high - low + 1;
+
+ /* Claim RAR memory as our own. */
+ if (request_mem_region(low, rar->length, devname) == NULL) {
+ rar->length = 0;
+
+ pr_err("%s: Unable to claim RAR[%d] memory.\n",
+ devname,
+ z);
+ pr_err("%s: RAR[%d] disabled.\n", devname, z);
+
+ /*
+ * Rather than break out of the loop by
+ * returning -EBUSY, for example, we may be
+ * able to claim memory of the next RAR region
+ * as our own.
+ */
+ continue;
+ }
+
+ rar->base = low;
+
+ /*
+ * Now map it into the kernel address space.
+ *
+ * Note that the RAR memory may only be accessed by IA
+ * when debugging. Otherwise attempts to access the
+ * RAR memory when it is locked down will result in
+ * behavior similar to writing to /dev/null and
+ * reading from /dev/zero. This behavior is enforced
+ * by the hardware. Even if we don't access the
+ * memory, mapping it into the kernel provides us with
+ * a convenient RAR handle to physical address mapping.
+ */
+ rar->iobase = ioremap_nocache(rar->base, rar->length);
+ if (rar->iobase == NULL) {
+ pr_err("%s: Unable to map RAR memory.\n",
+ devname);
+ return -ENOMEM;
+ }
+
+ /* Initialize corresponding memory allocator. */
+ rar->allocator = memrar_create_allocator(
+ (unsigned long) rar->iobase,
+ rar->length,
+ RAR_BLOCK_SIZE);
+ if (rar->allocator == NULL)
+ return -1;
+
+ /*
+ * -------------------------------------------------
+ * Make sure all RARs handled by us are locked down.
+ * -------------------------------------------------
+ */
+
+ /* Enable RAR protection on the Lincroft side. */
+ if (0) {
+ /* @todo Enable once LNW A2 is widely available. */
+ rar_lock(z);
+ } else {
+ pr_warning("%s: LNC RAR[%d] no lock sanity check.\n",
+ devname,
+ z);
+ }
+
+ /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ */
+ /* |||||||||||||||||||||||||||||||||||||||||||||||||| */
+
+ /*
+ * Enable RAR protection on the Langwell side.
+ *
+ * Ideally Langwell side RAR protection should already
+ * have been enabled by the OEM in the SMIP header but
+ * we perform a sanity check, just in case.
+ *
+ * @todo Set appropriate "lock"/"valid" bits in LNW
+ * {LOW,UP}RAR[12] SCCB registers **and** LNW
+ * {LOW,UP}RAR[01] cDMI registers only if a
+ * suitable SDID (i.e. for security or LPE DMA)
+ * is set.
+ */
+ pr_warning("%s: LNW RAR[%d] no lock sanity check.\n",
+ devname,
+ z);
+
+
+ pr_info("%s: BRAR[%d]\n"
+ "\tlow address: 0x%x\n"
+ "\thigh address: 0x%x\n"
+ "\tsize : %u KiB\n",
+ devname,
+ z,
+ low,
+ high,
+ rar->allocator->capacity / 1024);
+
+ found_rar = 1;
+ }
+
+ if (!found_rar) {
+ /*
+ * No RAR support. Don't bother continuing.
+ *
+ * Note that this is not a failure.
+ */
+ pr_info("%s: No Moorestown RAR support available.\n",
+ devname);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/*
+ * Finalize RAR resources.
+ */
+static void memrar_fini_rar_resources(void)
+{
+ int z;
+ struct memrar_buffer_info *pos;
+ struct memrar_buffer_info *tmp;
+
+ /*
+ * @todo Do we need to hold a lock at this point in time?
+ * (module initialization failure or exit?)
+ */
+
+ for (z = MRST_NUM_RAR; z-- != 0; ) {
+ struct memrar_rar_info * const rar = &memrars[z];
+
+ /* Clean up remaining resources. */
+
+ list_for_each_entry_safe(pos,
+ tmp,
+ &rar->buffers.list,
+ list) {
+ kref_put(&pos->refcount, memrar_release_block_i);
+ }
+
+ memrar_destroy_allocator(rar->allocator);
+ rar->allocator = NULL;
+
+ iounmap(rar->iobase);
+ rar->iobase = NULL;
+
+ release_mem_region(rar->base, rar->length);
+ rar->base = 0;
+
+ rar->length = 0;
+ }
+}
+
+static long memrar_reserve_block(struct RAR_buffer *request,
+ struct file *filp)
+{
+ struct RAR_block_info * const rinfo = &request->info;
+ struct RAR_buffer *buffer;
+ struct memrar_buffer_info *buffer_info;
+ u32 handle;
+ struct memrar_rar_info *rar = NULL;
+
+ /* Prevent array overflow. */
+ if (!memrar_is_valid_rar_type(rinfo->type))
+ return -EINVAL;
+
+ rar = &memrars[rinfo->type];
+
+ /* Reserve memory in RAR. */
+ handle = memrar_allocator_alloc(rar->allocator, rinfo->size);
+ if (handle == 0)
+ return -ENOMEM;
+
+ buffer_info = kmalloc(sizeof(*buffer_info), GFP_KERNEL);
+
+ if (buffer_info == NULL) {
+ memrar_allocator_free(rar->allocator, handle);
+ return -ENOMEM;
+ }
+
+ buffer = &buffer_info->buffer;
+ buffer->info.type = rinfo->type;
+ buffer->info.size = rinfo->size;
+
+ /* Memory handle corresponding to the bus address. */
+ buffer->info.handle = handle;
+ buffer->bus_address = memrar_get_bus_address(rar, handle);
+
+ /*
+ * Keep track of owner so that we can later cleanup if
+ * necessary.
+ */
+ buffer_info->owner = filp;
+
+ kref_init(&buffer_info->refcount);
+
+ mutex_lock(&rar->lock);
+ list_add(&buffer_info->list, &rar->buffers.list);
+ mutex_unlock(&rar->lock);
+
+ rinfo->handle = buffer->info.handle;
+ request->bus_address = buffer->bus_address;
+
+ return 0;
+}
+
+static long memrar_release_block(u32 addr)
+{
+ struct memrar_buffer_info *pos;
+ struct memrar_buffer_info *tmp;
+ struct memrar_rar_info * const rar = memrar_get_rar_info(addr);
+ long result = -EINVAL;
+
+ if (rar == NULL)
+ return -EFAULT;
+
+ mutex_lock(&rar->lock);
+
+ /*
+ * Iterate through the buffer list to find the corresponding
+ * buffer to be released.
+ */
+ list_for_each_entry_safe(pos,
+ tmp,
+ &rar->buffers.list,
+ list) {
+ if (addr == pos->buffer.info.handle
+ && memrar_is_valid_rar_type(pos->buffer.info.type)) {
+ kref_put(&pos->refcount, memrar_release_block_i);
+ result = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&rar->lock);
+
+ return result;
+}
+
+static long memrar_get_stat(struct RAR_stat *r)
+{
+ long result = -EINVAL;
+
+ if (likely(r != NULL) && memrar_is_valid_rar_type(r->type)) {
+ struct memrar_allocator * const allocator =
+ memrars[r->type].allocator;
+
+ BUG_ON(allocator == NULL);
+
+ /*
+ * Allocator capacity doesn't change over time. No
+ * need to synchronize.
+ */
+ r->capacity = allocator->capacity;
+
+ mutex_lock(&allocator->lock);
+
+ r->largest_block_size = allocator->largest_free_area;
+
+ mutex_unlock(&allocator->lock);
+
+ result = 0;
+ }
+
+ return result;
+}
+
+static long memrar_ioctl(struct file *filp,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ long result = 0;
+
+ struct RAR_buffer buffer;
+ struct RAR_block_info * const request = &buffer.info;
+ struct RAR_stat rar_info;
+ u32 rar_handle;
+
+ switch (cmd) {
+ case RAR_HANDLER_RESERVE:
+ if (copy_from_user(request,
+ argp,
+ sizeof(*request)))
+ return -EFAULT;
+
+ result = memrar_reserve_block(&buffer, filp);
+ if (result != 0)
+ return result;
+
+ return copy_to_user(argp, request, sizeof(*request));
+
+ case RAR_HANDLER_RELEASE:
+ if (copy_from_user(&rar_handle,
+ argp,
+ sizeof(rar_handle)))
+ return -EFAULT;
+
+ return memrar_release_block(rar_handle);
+
+ case RAR_HANDLER_STAT:
+ if (copy_from_user(&rar_info,
+ argp,
+ sizeof(rar_info)))
+ return -EFAULT;
+
+ /*
+ * Populate the RAR_stat structure based on the RAR
+ * type given by the user
+ */
+ if (memrar_get_stat(&rar_info) != 0)
+ return -EINVAL;
+
+ /*
+ * @todo Do we need to verify destination pointer
+ * "argp" is non-zero? Is that already done by
+ * copy_to_user()?
+ */
+ return copy_to_user(argp,
+ &rar_info,
+ sizeof(rar_info)) ? -EFAULT : 0;
+
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static int memrar_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ size_t const size = vma->vm_end - vma->vm_start;
+
+ /* Users pass the RAR handle as the mmap() offset parameter. */
+ unsigned long const handle = vma->vm_pgoff << PAGE_SHIFT;
+
+ struct memrar_rar_info * const rar = memrar_get_rar_info(handle);
+
+ unsigned long pfn;
+
+ /* Invalid RAR handle or size passed to mmap(). */
+ if (rar == NULL
+ || handle == 0
+ || size > (handle - (unsigned long) rar->iobase))
+ return -EINVAL;
+
+ /*
+ * Retrieve physical address corresponding to the RAR handle,
+ * and convert it to a page frame.
+ */
+ pfn = memrar_get_physical_address(rar, handle) >> PAGE_SHIFT;
+
+
+ pr_debug("memrar: mapping RAR range [0x%lx, 0x%lx) into user space.\n",
+ handle,
+ handle + size);
+
+ /*
+ * Map RAR memory into user space. This is really only useful
+ * for debugging purposes since the memory won't be
+ * accesssible, i.e. reads return zero and writes are ignired,
+ * when it is locked down.
+ */
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ pfn,
+ size,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ /* vma->vm_ops = &memrar_mem_ops; */
+
+ return 0;
+}
+
+static int memrar_open(struct inode *inode, struct file *filp)
+{
+ /* Nothing to do yet. */
+
+ return 0;
+}
+
+static int memrar_release(struct inode *inode, struct file *filp)
+{
+ /* Free all regions associated with the given file handle. */
+
+ struct memrar_buffer_info *pos;
+ struct memrar_buffer_info *tmp;
+ int z;
+
+ for (z = 0; z != MRST_NUM_RAR; ++z) {
+ struct memrar_rar_info * const rar = &memrars[z];
+
+ mutex_lock(&rar->lock);
+
+ list_for_each_entry_safe(pos,
+ tmp,
+ &rar->buffers.list,
+ list) {
+ if (filp == pos->owner)
+ kref_put(&pos->refcount,
+ memrar_release_block_i);
+ }
+
+ mutex_unlock(&rar->lock);
+ }
+
+ return 0;
+}
+
+/*
+ * @note This function is part of the kernel space memrar driver API.
+ */
+size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
+{
+ struct RAR_buffer * const end =
+ (buffers == NULL ? buffers : buffers + count);
+ struct RAR_buffer *i;
+
+ size_t reserve_count = 0;
+
+ for (i = buffers; i != end; ++i) {
+ if (memrar_reserve_block(i, NULL) == 0)
+ ++reserve_count;
+ else
+ i->bus_address = 0;
+ }
+
+ return reserve_count;
+}
+EXPORT_SYMBOL(rar_reserve);
+
+/*
+ * @note This function is part of the kernel space memrar driver API.
+ */
+size_t rar_release(struct RAR_buffer *buffers, size_t count)
+{
+ struct RAR_buffer * const end =
+ (buffers == NULL ? buffers : buffers + count);
+ struct RAR_buffer *i;
+
+ size_t release_count = 0;
+
+ for (i = buffers; i != end; ++i) {
+ u32 * const handle = &i->info.handle;
+ if (memrar_release_block(*handle) == 0) {
+ /*
+ * @todo We assume we should do this each time
+ * the ref count is decremented. Should
+ * we instead only do this when the ref
+ * count has dropped to zero, and the
+ * buffer has been completely
+ * released/unmapped?
+ */
+ *handle = 0;
+ ++release_count;
+ }
+ }
+
+ return release_count;
+}
+EXPORT_SYMBOL(rar_release);
+
+/*
+ * @note This function is part of the kernel space driver API.
+ */
+size_t rar_handle_to_bus(struct RAR_buffer *buffers, size_t count)
+{
+ struct RAR_buffer * const end =
+ (buffers == NULL ? buffers : buffers + count);
+ struct RAR_buffer *i;
+ struct memrar_buffer_info *pos;
+
+ size_t conversion_count = 0;
+
+ /*
+ * Find all bus addresses corresponding to the given handles.
+ *
+ * @todo Not liking this nested loop. Optimize.
+ */
+ for (i = buffers; i != end; ++i) {
+ struct memrar_rar_info * const rar =
+ memrar_get_rar_info(i->info.handle);
+
+ /*
+ * Check if we have a bogus handle, and then continue
+ * with remaining buffers.
+ */
+ if (rar == NULL) {
+ i->bus_address = 0;
+ continue;
+ }
+
+ mutex_lock(&rar->lock);
+
+ list_for_each_entry(pos, &rar->buffers.list, list) {
+ struct RAR_block_info * const user_info =
+ &pos->buffer.info;
+
+ if (i->info.handle >= user_info->handle
+ && i->info.handle < (user_info->handle
+ + user_info->size)) {
+ u32 const offset =
+ i->info.handle - user_info->handle;
+
+ i->info.type = user_info->type;
+ i->info.size = user_info->size - offset;
+ i->bus_address =
+ pos->buffer.bus_address
+ + offset;
+
+ /* Increment the reference count. */
+ kref_get(&pos->refcount);
+
+ ++conversion_count;
+ break;
+ } else {
+ i->bus_address = 0;
+ }
+ }
+
+ mutex_unlock(&rar->lock);
+ }
+
+ return conversion_count;
+}
+EXPORT_SYMBOL(rar_handle_to_bus);
+
+static const struct file_operations memrar_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = memrar_ioctl,
+ .mmap = memrar_mmap,
+ .open = memrar_open,
+ .release = memrar_release,
+};
+
+static struct miscdevice memrar_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR, /* dynamic allocation */
+ .name = "memrar", /* /dev/memrar */
+ .fops = &memrar_fops
+};
+
+static char const banner[] __initdata =
+ KERN_INFO
+ "Intel RAR Handler: " MEMRAR_VER " initialized.\n";
+
+static int __init memrar_init(void)
+{
+ int result = 0;
+
+ printk(banner);
+
+ /*
+ * We initialize the RAR parameters early on so that we can
+ * discontinue memrar device initialization and registration
+ * if suitably configured RARs are not available.
+ */
+ result = memrar_init_rar_resources(memrar_miscdev.name);
+
+ if (result != 0)
+ return result;
+
+ result = misc_register(&memrar_miscdev);
+
+ if (result != 0) {
+ pr_err("%s: misc_register() failed.\n",
+ memrar_miscdev.name);
+
+ /* Clean up resources previously reserved. */
+ memrar_fini_rar_resources();
+ }
+
+ return result;
+}
+
+static void __exit memrar_exit(void)
+{
+ memrar_fini_rar_resources();
+
+ misc_deregister(&memrar_miscdev);
+}
+
+#ifndef MODULE
+/*
+ * The RAR handler must be initialized after the RAR register driver.
+ * Otherwise the RAR handler will always assume no RAR support
+ * exists.
+ */
+late_initcall_sync(memrar_init);
+#else
+module_init(memrar_init);
+#endif /* MODULE */
+
+module_exit(memrar_exit);
+
+
+MODULE_AUTHOR("Ossama Othman <ossama.othman@intel.com>");
+MODULE_DESCRIPTION("Intel Restricted Access Region Handler");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
+MODULE_VERSION(MEMRAR_VER);
+
+
+
+/*
+ Local Variables:
+ c-file-style: "linux"
+ End:
+*/
Index: linux-2.6.33/drivers/staging/rar/Kconfig
===================================================================
--- linux-2.6.33.orig/drivers/staging/rar/Kconfig
+++ linux-2.6.33/drivers/staging/rar/Kconfig
@@ -6,7 +6,7 @@ menu "RAR Register Driver"
#
# Restricted Access Register Manager
#
-config RAR_REGISTER
+config RAR_DRIVER
tristate "Restricted Access Region Register Driver"
default n
---help---
Index: linux-2.6.33/drivers/staging/rar/Makefile
===================================================================
--- linux-2.6.33.orig/drivers/staging/rar/Makefile
+++ linux-2.6.33/drivers/staging/rar/Makefile
@@ -1,2 +1,2 @@
EXTRA_CFLAGS += -DLITTLE__ENDIAN
-obj-$(CONFIG_RAR_REGISTER) += rar_driver.o
+obj-$(CONFIG_RAR_DRIVER) += rar_driver.o