forked from ports/contrib
1634 lines
46 KiB
Diff
1634 lines
46 KiB
Diff
#This patch enables 2.6.26 support until AMD officially supports it.
|
|
#It is originally by Zilvina Valinskas <zilvinas@ilibox.com>
|
|
#and adapted for AMD's 8-6 driver by Mario Limonciello <Mario_Limonciello@Dell.com>
|
|
|
|
diff -Nur -x '*.orig' -x '*~' fglrx-installer-8.512/lib/modules/fglrx/build_mod/firegl_public.c fglrx-installer-8.512.new/lib/modules/fglrx/build_mod/firegl_public.c
|
|
--- fglrx-installer-8.512/lib/modules/fglrx/build_mod/firegl_public.c 2008-07-21 14:32:08.000000000 -0400
|
|
+++ fglrx-installer-8.512.new/lib/modules/fglrx/build_mod/firegl_public.c 2008-07-21 14:31:48.000000000 -0400
|
|
@@ -24,13 +24,13 @@
|
|
// ============================================================
|
|
#include <linux/version.h>
|
|
|
|
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
|
|
#error Kernel versions older than 2.6.0 are no longer supported by this module.
|
|
-#endif
|
|
+#endif
|
|
|
|
#include <linux/autoconf.h>
|
|
|
|
-#if !defined(CONFIG_X86_PC)
|
|
+#if !defined(CONFIG_X86_PC)
|
|
#if !defined(CONFIG_X86_64)
|
|
#if !defined(CONFIG_X86_VOYAGER)
|
|
#if !defined(CONFIG_X86_NUMAQ)
|
|
@@ -62,10 +62,10 @@
|
|
* distribution would even include such a kernel patch. */
|
|
#ifdef CONFIG_MEM_MIRROR
|
|
/* Prevent asm/mm_track.h from being included in subsequent
|
|
- * kernel headers as that would redefine CONFIG_MEM_MIRROR. */
|
|
+ * kernel headers as that would redefine CONFIG_MEM_MIRROR. */
|
|
#ifndef CONFIG_X86_64
|
|
#define __I386_MMTRACK_H__
|
|
-#define mm_track(ptep)
|
|
+#define mm_track(ptep)
|
|
#else
|
|
#define __X86_64_MMTRACK_H__
|
|
#define mm_track_pte(ptep)
|
|
@@ -93,7 +93,7 @@
|
|
#include <linux/kernel.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/proc_fs.h>
|
|
-#include <linux/init.h>
|
|
+//#include <linux/init.h>
|
|
#include <linux/file.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/wait.h>
|
|
@@ -151,8 +151,8 @@
|
|
#include "linux/freezer.h"
|
|
#endif
|
|
|
|
-// For 2.6.18 or higher, the UTS_RELEASE is defined in the linux/utsrelease.h.
|
|
-#ifndef UTS_RELEASE
|
|
+// For 2.6.18 or higher, the UTS_RELEASE is defined in the linux/utsrelease.h.
|
|
+#ifndef UTS_RELEASE
|
|
#include <linux/utsrelease.h>
|
|
#endif
|
|
|
|
@@ -215,7 +215,7 @@
|
|
|
|
static void ATI_API_CALL __ke_printstr(const char *str);
|
|
|
|
-static struct pci_device_id fglrx_pci_table[] =
|
|
+static struct pci_device_id fglrx_pci_table[] =
|
|
{
|
|
#define FGL_ASIC_ID(x) \
|
|
{ \
|
|
@@ -283,7 +283,7 @@
|
|
|
|
static int firegl_pat_enabled = 0;
|
|
static unsigned long KCL_pat[2];
|
|
-static unsigned long KCL_orig_pat[2];
|
|
+static unsigned long KCL_orig_pat[2];
|
|
|
|
static int ATI_API_CALL KCL_enable_pat(void);
|
|
static void ATI_API_CALL KCL_disable_pat(void);
|
|
@@ -322,7 +322,7 @@
|
|
#endif
|
|
|
|
__ke_ssize_t ip_firegl_read( struct file *filp,
|
|
- char *buf,
|
|
+ char *buf,
|
|
__ke_size_t size,
|
|
__ke_loff_t *off_ptr)
|
|
{
|
|
@@ -334,7 +334,7 @@
|
|
}
|
|
|
|
__ke_ssize_t ip_firegl_write( struct file *filp,
|
|
- const char *buf,
|
|
+ const char *buf,
|
|
__ke_size_t size,
|
|
__ke_loff_t *off_ptr)
|
|
{
|
|
@@ -477,34 +477,34 @@
|
|
return firegl_debug_proc_write(file, buffer, count, data);
|
|
}
|
|
|
|
-static int
|
|
+static int
|
|
firegl_interrupt_open_wrap(
|
|
- struct inode *inode,
|
|
- struct file *file)
|
|
+ struct inode *inode,
|
|
+ struct file *file)
|
|
{
|
|
return firegl_interrupt_open(inode, file);
|
|
}
|
|
|
|
-static int
|
|
+static int
|
|
firegl_interrupt_release_wrap(
|
|
- struct inode *inode,
|
|
- struct file *file)
|
|
+ struct inode *inode,
|
|
+ struct file *file)
|
|
{
|
|
return firegl_interrupt_release(inode, file);
|
|
}
|
|
|
|
-static ssize_t
|
|
+static ssize_t
|
|
firegl_interrupt_read_wrap(
|
|
- struct file *user_file,
|
|
- char __user *user_buf,
|
|
- size_t user_buf_size,
|
|
+ struct file *user_file,
|
|
+ char __user *user_buf,
|
|
+ size_t user_buf_size,
|
|
loff_t *user_file_pos)
|
|
{
|
|
return (ssize_t) firegl_interrupt_read(user_file, user_buf, user_buf_size, user_file_pos);
|
|
}
|
|
|
|
-static unsigned int
|
|
-firegl_interrupt_poll_wrap(struct file *user_file, poll_table *pt)
|
|
+static unsigned int
|
|
+firegl_interrupt_poll_wrap(struct file *user_file, poll_table *pt)
|
|
{
|
|
if(firegl_interrupt_poll(user_file, (__ke_poll_table*)pt))
|
|
{
|
|
@@ -516,11 +516,11 @@
|
|
}
|
|
}
|
|
|
|
-static ssize_t
|
|
+static ssize_t
|
|
firegl_interrupt_write_wrap(
|
|
- struct file *user_file,
|
|
- const char __user *user_buf,
|
|
- size_t user_buf_size,
|
|
+ struct file *user_file,
|
|
+ const char __user *user_buf,
|
|
+ size_t user_buf_size,
|
|
loff_t *user_file_pos)
|
|
{
|
|
return (ssize_t) firegl_interrupt_write(user_file, user_buf, user_buf_size, user_file_pos);
|
|
@@ -530,7 +530,7 @@
|
|
* \param func function to be wrapped
|
|
* \return None */
|
|
|
|
-static void
|
|
+static void
|
|
firegl_smp_func_parameter_wrap(
|
|
void *func)
|
|
{
|
|
@@ -545,7 +545,7 @@
|
|
.write = firegl_interrupt_write_wrap
|
|
};
|
|
|
|
-__ke_proc_list_t firegl_proc_list[] =
|
|
+__ke_proc_list_t firegl_proc_list[] =
|
|
{
|
|
{ "name", drm_name_info_wrap, NULL},
|
|
{ "mem", drm_mem_info_wrap, NULL},
|
|
@@ -613,11 +613,11 @@
|
|
{
|
|
ent->proc_fops = (struct file_operations*)list->fops;
|
|
}
|
|
-
|
|
+
|
|
{
|
|
ent->data = (dev->pubdev.signature == FGL_DEVICE_SIGNATURE)? firegl_find_device(minor) : (dev);
|
|
}
|
|
-
|
|
+
|
|
list++;
|
|
}
|
|
|
|
@@ -663,7 +663,7 @@
|
|
{
|
|
remove_proc_entry("dri", NULL);
|
|
__KE_DEBUG("remove proc dri. \n");
|
|
- }
|
|
+ }
|
|
return 0;
|
|
}
|
|
|
|
@@ -701,12 +701,12 @@
|
|
{
|
|
int i;
|
|
int count = 0;
|
|
-
|
|
+
|
|
__KE_DEBUG("firegl_stub_getminor: name=\"%s\"\n", name);
|
|
|
|
- for( i = 0; i < FIREGL_STUB_MAXCARDS; i++ )
|
|
+ for( i = 0; i < FIREGL_STUB_MAXCARDS; i++ )
|
|
{
|
|
- if( !firegl_stub_list[i].fops )
|
|
+ if( !firegl_stub_list[i].fops )
|
|
{
|
|
firegl_stub_list[i].name = name;
|
|
firegl_stub_list[i].fops = fops;
|
|
@@ -733,16 +733,16 @@
|
|
if (minor < 0 || minor >= FIREGL_STUB_MAXCARDS)
|
|
{
|
|
return -1;
|
|
- }
|
|
+ }
|
|
firegl_proc_cleanup(minor, firegl_stub_root, firegl_stub_list[minor].dev_root, firegl_stub_list[minor].proclist);
|
|
firegl_stub_list[minor].name = NULL;
|
|
firegl_stub_list[minor].fops = NULL;
|
|
firegl_stub_list[minor].proclist = NULL;
|
|
|
|
- if( minor == (firegl_minors-1) )
|
|
+ if( minor == (firegl_minors-1) )
|
|
{
|
|
unregister_chrdev(DRM_MAJOR, "drm");
|
|
- }
|
|
+ }
|
|
return 0;
|
|
}
|
|
|
|
@@ -766,7 +766,7 @@
|
|
return -1;
|
|
} else if(err == -EBUSY) {
|
|
|
|
- // the registering of the module's device has failed
|
|
+ // the registering of the module's device has failed
|
|
// because there was already some other drm module loaded.
|
|
__KE_DEBUG("register_chrdev() failed with -EBUSY\n");
|
|
return -1;
|
|
@@ -798,7 +798,7 @@
|
|
/* Starting from 2.6.14, kernel has new struct defined for pm_message_t,
|
|
we have to handle this case separately.
|
|
2.6.11/12/13 kernels have pm_message_t defined as int and older kernels
|
|
- don't have pm_message_t defined.
|
|
+ don't have pm_message_t defined.
|
|
*/
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
|
|
static int fglrx_pci_suspend(struct pci_dev *pdev, pm_message_t pm_event)
|
|
@@ -834,9 +834,9 @@
|
|
|
|
if (!ret)
|
|
{
|
|
-
|
|
- // since privdev->pcidev is acquired in X server, use pdev
|
|
- // directly here to allow suspend/resume without X server start.
|
|
+
|
|
+ // since privdev->pcidev is acquired in X server, use pdev
|
|
+ // directly here to allow suspend/resume without X server start.
|
|
firegl_pci_save_state((__ke_pci_dev_t*)pdev, privdev);
|
|
pci_disable_device(pdev);
|
|
PMSG_EVENT(pdev->dev.power.power_state) = state;
|
|
@@ -888,13 +888,13 @@
|
|
// before pci_set_master!
|
|
firegl_pci_restore_state((__ke_pci_dev_t*)pdev, privdev);
|
|
|
|
- if (pci_enable_device(pdev))
|
|
+ if (pci_enable_device(pdev))
|
|
{
|
|
__KE_ERROR("Cannot enable PCI device.\n");
|
|
- }
|
|
+ }
|
|
|
|
pci_set_master(pdev);
|
|
-
|
|
+
|
|
firegl_cail_powerup(privdev);
|
|
|
|
if (PMSG_EVENT(pdev->dev.power.power_state) == PM_EVENT_SUSPEND)
|
|
@@ -905,7 +905,7 @@
|
|
return 0;
|
|
}
|
|
|
|
-static struct pci_driver fglrx_pci_driver =
|
|
+static struct pci_driver fglrx_pci_driver =
|
|
{
|
|
.name = "fglrx_pci",
|
|
.id_table = fglrx_pci_table,
|
|
@@ -961,10 +961,10 @@
|
|
{
|
|
pid = (struct pci_device_id *) &fglrx_pci_table[i];
|
|
pdev = NULL;
|
|
- while (( pdev = pci_get_subsys(pid->vendor,
|
|
- pid->device,
|
|
- PCI_ANY_ID,
|
|
- PCI_ANY_ID,
|
|
+ while (( pdev = pci_get_subsys(pid->vendor,
|
|
+ pid->device,
|
|
+ PCI_ANY_ID,
|
|
+ PCI_ANY_ID,
|
|
pdev)) != NULL)
|
|
{
|
|
num_of_devices++;
|
|
@@ -974,7 +974,7 @@
|
|
|
|
if (firegl_init_device_heads(num_of_devices))
|
|
{
|
|
- return -ENOMEM;
|
|
+ return -ENOMEM;
|
|
}
|
|
|
|
for (i=0; fglrx_pci_table[i].vendor != 0; i++)
|
|
@@ -982,15 +982,15 @@
|
|
pid = (struct pci_device_id *) &fglrx_pci_table[i];
|
|
|
|
pdev = NULL;
|
|
- while (( pdev = pci_get_subsys(pid->vendor,
|
|
- pid->device,
|
|
- PCI_ANY_ID,
|
|
- PCI_ANY_ID,
|
|
+ while (( pdev = pci_get_subsys(pid->vendor,
|
|
+ pid->device,
|
|
+ PCI_ANY_ID,
|
|
+ PCI_ANY_ID,
|
|
pdev)) != NULL)
|
|
{
|
|
if ((ret_code = firegl_get_dev(pubdev, pdev)))
|
|
{
|
|
- return ret_code;
|
|
+ return ret_code;
|
|
}
|
|
|
|
j++;
|
|
@@ -1023,7 +1023,7 @@
|
|
// init global vars that are in fact constants
|
|
__ke_HZ = HZ;
|
|
|
|
-#ifdef _KE_SERIAL_DEBUG
|
|
+#ifdef _KE_SERIAL_DEBUG
|
|
__ke_SetSerialPort();
|
|
#endif
|
|
|
|
@@ -1035,11 +1035,11 @@
|
|
return retcode;
|
|
}
|
|
|
|
-#ifdef FIREGL_CF_SUPPORT
|
|
+#ifdef FIREGL_CF_SUPPORT
|
|
adapter_chain_init();
|
|
cf_object_init();
|
|
-#endif
|
|
-
|
|
+#endif
|
|
+
|
|
// init DRM proc list
|
|
drm_proclist = kmalloc((DRM_PROC_ENTRIES + 1) * sizeof(__ke_proc_list_t), GFP_KERNEL);
|
|
if ( drm_proclist == NULL )
|
|
@@ -1127,7 +1127,7 @@
|
|
dev->pubdev.date,
|
|
firegl_minors);
|
|
|
|
-
|
|
+
|
|
#ifdef FIREGL_POWER_MANAGEMENT
|
|
if (pci_register_driver (&fglrx_pci_driver) < 0)
|
|
{
|
|
@@ -1157,12 +1157,12 @@
|
|
{
|
|
KCL_disable_pat();
|
|
__KE_INFO("Disable PAT\n");
|
|
- }
|
|
+ }
|
|
#endif // FIREGL_USWC_SUPPORT
|
|
|
|
for (i = 0; i < count; i++)
|
|
{
|
|
- if ( firegl_stub_unregister(i) )
|
|
+ if ( firegl_stub_unregister(i) )
|
|
{
|
|
__KE_ERROR("Cannot unload module on minor: %d\n", i);
|
|
}
|
|
@@ -1184,10 +1184,10 @@
|
|
dev->pubdev.patchlevel,
|
|
dev->pubdev.date);
|
|
|
|
-#ifdef FIREGL_CF_SUPPORT
|
|
+#ifdef FIREGL_CF_SUPPORT
|
|
cf_object_cleanup();
|
|
- adapter_chain_cleanup();
|
|
-#endif // FIREGL_CF_SUPPORT
|
|
+ adapter_chain_cleanup();
|
|
+#endif // FIREGL_CF_SUPPORT
|
|
|
|
firegl_private_cleanup (&dev->pubdev);
|
|
|
|
@@ -1265,18 +1265,18 @@
|
|
void ATI_API_CALL __ke_remove_wait_queue(__ke_wait_queue_head_t* queue_head, __ke_wait_queue_t* entry)
|
|
{
|
|
// current->state = TASK_RUNNING;
|
|
- remove_wait_queue((wait_queue_head_t*)(void *)queue_head,
|
|
+ remove_wait_queue((wait_queue_head_t*)(void *)queue_head,
|
|
(wait_queue_t*)(void *)entry);
|
|
}
|
|
|
|
void ATI_API_CALL __ke_init_waitqueue_head(__ke_wait_queue_head_t* queue_head)
|
|
{
|
|
- init_waitqueue_head((wait_queue_head_t*)(void *)queue_head);
|
|
+ init_waitqueue_head((wait_queue_head_t*)(void *)queue_head);
|
|
}
|
|
|
|
void ATI_API_CALL __ke_wait_event_interruptible(__ke_wait_queue_head_t* queue_head, int condition)
|
|
{
|
|
- wait_event_interruptible(*((wait_queue_head_t*)(void *)queue_head), condition);
|
|
+ wait_event_interruptible(*((wait_queue_head_t*)(void *)queue_head), condition);
|
|
}
|
|
|
|
void ATI_API_CALL __ke_poll_wait(struct file* filp, __ke_wait_queue_head_t* queue_head, __ke_poll_table* pt)
|
|
@@ -1287,13 +1287,13 @@
|
|
void ATI_API_CALL *__ke_asyncio_alloc_sema()
|
|
{
|
|
int i;
|
|
-
|
|
+
|
|
for(i=0; i<FIREGL_ASYNCIO_MAX_SEMA; i++)
|
|
{
|
|
if(fireglAsyncioSemaphoreUsed[i] != 1)
|
|
{
|
|
fireglAsyncioSemaphoreUsed[i] = 1;
|
|
-
|
|
+
|
|
return &(fireglAsyncioSemaphore[i]);
|
|
}
|
|
}
|
|
@@ -1303,7 +1303,7 @@
|
|
void ATI_API_CALL __ke_asyncio_free_sema(struct semaphore *pSema)
|
|
{
|
|
int i;
|
|
-
|
|
+
|
|
for(i=0; i<FIREGL_ASYNCIO_MAX_SEMA; i++)
|
|
{
|
|
if( &(fireglAsyncioSemaphore[i]) == pSema )
|
|
@@ -1317,15 +1317,15 @@
|
|
void ATI_API_CALL __ke_asyncio_init_sema(void)
|
|
{
|
|
int i;
|
|
-
|
|
+
|
|
for(i=0; i<FIREGL_ASYNCIO_MAX_SEMA; i++)
|
|
{
|
|
fireglAsyncioSemaphoreUsed[i] = 0;
|
|
}
|
|
-}
|
|
+}
|
|
|
|
-int ATI_API_CALL __ke_fasync_helper( int fd,
|
|
- struct file *filep,
|
|
+int ATI_API_CALL __ke_fasync_helper( int fd,
|
|
+ struct file *filep,
|
|
int mode,
|
|
struct fasync_struct **pasync_queue)
|
|
{
|
|
@@ -1403,33 +1403,33 @@
|
|
{
|
|
struct task_struct *p;
|
|
int process_terminated = 1;
|
|
-
|
|
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
|
|
+
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
|
|
rcu_read_lock();
|
|
-#else
|
|
+#else
|
|
read_lock(&tasklist_lock);
|
|
-#endif
|
|
- p = find_task_by_pid( pid );
|
|
- if (p)
|
|
+#endif
|
|
+ p = find_task_by_vpid( pid );
|
|
+ if (p)
|
|
{
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
|
|
- if (p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD)
|
|
+ if (p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD)
|
|
#else
|
|
- if (!(p->flags & PF_EXITING))
|
|
+ if (!(p->flags & PF_EXITING))
|
|
#endif
|
|
{
|
|
process_terminated = 0;
|
|
}
|
|
- }
|
|
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
|
|
+ }
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
|
|
rcu_read_unlock();
|
|
-#else
|
|
+#else
|
|
read_unlock(&tasklist_lock);
|
|
-#endif
|
|
+#endif
|
|
return process_terminated;
|
|
}
|
|
|
|
-/** /brief Call global OS kernel task/thread scheduler
|
|
+/** /brief Call global OS kernel task/thread scheduler
|
|
* /return Nonzero if a system call was awakened by a signal
|
|
*/
|
|
int ATI_API_CALL KCL_GetSignalStatus(void)
|
|
@@ -1483,15 +1483,15 @@
|
|
unblock_all_signals();
|
|
}
|
|
|
|
-#if defined(__i386__)
|
|
+#if defined(__i386__)
|
|
#ifndef __HAVE_ARCH_CMPXCHG
|
|
-static inline
|
|
-unsigned long __fgl_cmpxchg(volatile void *ptr, unsigned long old,
|
|
- unsigned long new, int size)
|
|
-{
|
|
- unsigned long prev;
|
|
- switch (size) {
|
|
- case 1:
|
|
+static inline
|
|
+unsigned long __fgl_cmpxchg(volatile void *ptr, unsigned long old,
|
|
+ unsigned long new, int size)
|
|
+{
|
|
+ unsigned long prev;
|
|
+ switch (size) {
|
|
+ case 1:
|
|
__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
|
|
: "=a"(prev)
|
|
: "q"(new), "m"(*__xg(ptr)), "0"(old)
|
|
@@ -1602,7 +1602,7 @@
|
|
*/
|
|
KCL_TYPE_Pid ATI_API_CALL KCL_GetPid(void)
|
|
{
|
|
- return current->pid;
|
|
+ return current->pid;
|
|
}
|
|
|
|
/** /brief Return the current Thread Group ID
|
|
@@ -1610,7 +1610,7 @@
|
|
*/
|
|
KCL_TYPE_Pid ATI_API_CALL KCL_GetTgid(void)
|
|
{
|
|
- return current->tgid;
|
|
+ return current->tgid;
|
|
}
|
|
|
|
/** /brief Return the effective user ID
|
|
@@ -1706,7 +1706,7 @@
|
|
1
|
|
#else
|
|
0
|
|
-#endif
|
|
+#endif
|
|
};
|
|
|
|
/** /brief Check whether a kernel configuration parameter is defined
|
|
@@ -1792,13 +1792,13 @@
|
|
#if defined(__x86_64__) || defined(__ia64__)
|
|
void* ATI_API_CALL __ke_pci_alloc_consistent(__ke_pci_dev_t* dev, int size, void *dma_handle)
|
|
{
|
|
- return (pci_alloc_consistent( (struct pci_dev*)(void *)dev, size, dma_handle));
|
|
+ return (pci_alloc_consistent( (struct pci_dev*)(void *)dev, size, dma_handle));
|
|
}
|
|
|
|
void ATI_API_CALL __ke_pci_free_consistent(__ke_pci_dev_t* dev, int size, unsigned long cpu_addr,
|
|
unsigned int dma_handle)
|
|
{
|
|
- pci_free_consistent( (struct pci_dev*)(void *)dev, size, (void *)cpu_addr,
|
|
+ pci_free_consistent( (struct pci_dev*)(void *)dev, size, (void *)cpu_addr,
|
|
(unsigned long)dma_handle);
|
|
}
|
|
#endif // __ia64__
|
|
@@ -1822,7 +1822,7 @@
|
|
|
|
/** \brief This function maps OS independent error conditions to OS defined error codes
|
|
* \param errcode OS independent error condition code
|
|
- * \return OS kernel defined error code corresponding to the requested error condition
|
|
+ * \return OS kernel defined error code corresponding to the requested error condition
|
|
*/
|
|
int ATI_API_CALL KCL_GetErrorCode(KCL_ENUM_ErrorCode errcode)
|
|
{
|
|
@@ -1840,7 +1840,7 @@
|
|
int ATI_API_CALL firegl_get_user_ptr(u32 *src, void **dst)
|
|
{
|
|
unsigned long temp;
|
|
- int err = get_user(temp, src);
|
|
+ int err = get_user(temp, src);
|
|
*dst = (void*) temp;
|
|
return err;
|
|
}
|
|
@@ -2019,7 +2019,7 @@
|
|
*/
|
|
int ATI_API_CALL KCL_AtomicDecAndTest(KCL_TYPE_Atomic* v)
|
|
{
|
|
- return atomic_dec_and_test((atomic_t*)v);
|
|
+ return atomic_dec_and_test((atomic_t*)v);
|
|
}
|
|
|
|
/*****************************************************************************/
|
|
@@ -2394,7 +2394,7 @@
|
|
struct page *page = NULL;
|
|
|
|
page = vmalloc_to_page(vmalloc_addr);
|
|
- if(page == NULL)
|
|
+ if(page == NULL)
|
|
{
|
|
__KE_ERROR("__ke_vmalloc_to_addr: invalid page!");
|
|
return NULL;
|
|
@@ -2436,7 +2436,7 @@
|
|
retcode = do_munmap(current->mm,
|
|
addr,
|
|
len);
|
|
-#endif
|
|
+#endif
|
|
up_write(¤t->mm->mmap_sem);
|
|
return retcode;
|
|
}
|
|
@@ -2480,10 +2480,10 @@
|
|
minus = !minus;
|
|
}
|
|
else
|
|
- {
|
|
+ {
|
|
ubase = base;
|
|
}
|
|
-
|
|
+
|
|
do_div(un, ubase);
|
|
return (minus? -un : un);
|
|
}
|
|
@@ -2513,7 +2513,7 @@
|
|
else
|
|
{
|
|
ubase = base;
|
|
- }
|
|
+ }
|
|
|
|
rem = do_div(un, ubase);
|
|
return (minus? -rem : rem);
|
|
@@ -2544,7 +2544,7 @@
|
|
vaddr = (void *) vmap(pages, count);
|
|
#else
|
|
#ifdef VM_MAP
|
|
- vaddr = (void *) vmap(pages, count, VM_MAP, PAGE_KERNEL);
|
|
+ vaddr = (void *) vmap(pages, count, VM_MAP, PAGE_KERNEL);
|
|
#else
|
|
vaddr = (void *) vmap(pages, count, 0, PAGE_KERNEL);
|
|
#endif
|
|
@@ -2600,7 +2600,7 @@
|
|
}
|
|
#endif // defined(VM_MAP) || defined(vunmap)
|
|
|
|
-/** \brief Reserve a memory page
|
|
+/** \brief Reserve a memory page
|
|
*
|
|
* \param pt Kernel logical address of the page
|
|
*
|
|
@@ -2612,7 +2612,7 @@
|
|
SetPageReserved(virt_to_page((unsigned long)pt));
|
|
}
|
|
|
|
-/** \brief Unreserve a memory page
|
|
+/** \brief Unreserve a memory page
|
|
*
|
|
* \param pt Kernel logical address of the page
|
|
*
|
|
@@ -2624,7 +2624,7 @@
|
|
ClearPageReserved(virt_to_page((unsigned long)pt));
|
|
}
|
|
|
|
-/** \brief Lock a memory page
|
|
+/** \brief Lock a memory page
|
|
*
|
|
* \param pt Kernel logical address of the page
|
|
*
|
|
@@ -2633,14 +2633,14 @@
|
|
*/
|
|
void ATI_API_CALL KCL_LockMemPage(void* pt)
|
|
{
|
|
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
|
|
SetPageReserved(virt_to_page((unsigned long)pt));
|
|
#else
|
|
lock_page(virt_to_page((unsigned long)pt));
|
|
#endif
|
|
}
|
|
|
|
-/** \brief Unlock a memory page
|
|
+/** \brief Unlock a memory page
|
|
*
|
|
* \param pt Kernel logical address of the page
|
|
*
|
|
@@ -2649,7 +2649,7 @@
|
|
*/
|
|
void ATI_API_CALL KCL_UnlockMemPage(void* pt)
|
|
{
|
|
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
|
|
ClearPageReserved(virt_to_page((unsigned long)pt));
|
|
#else
|
|
unlock_page(virt_to_page((unsigned long)pt));
|
|
@@ -2674,7 +2674,7 @@
|
|
return memory->vmptr;
|
|
}
|
|
#endif
|
|
-
|
|
+
|
|
void* ATI_API_CALL __ke_ioremap(unsigned long offset, unsigned long size)
|
|
{
|
|
return ioremap(offset, size);
|
|
@@ -2724,7 +2724,7 @@
|
|
{
|
|
/*Some kernel developer removed the export of symbol "flush_tlb_page" on 2.6.25 x86_64 SMP kernel.
|
|
Define a simple version here.*/
|
|
-#if defined(__x86_64__) && defined(__SMP__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25))
|
|
+#if defined(__x86_64__) && defined(__SMP__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25))
|
|
on_each_cpu(KCL_flush_tlb_one, &va, 1, 1);
|
|
#else
|
|
flush_tlb_page(vma, va);
|
|
@@ -2884,9 +2884,9 @@
|
|
static inline int ptep_clear_flush_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
|
{
|
|
int ret = 0;
|
|
-
|
|
+
|
|
DBG_ENTER("0x%08X, 0x%08X, 0x%08X->0x%08X", vma, addr, ptep, *ptep);
|
|
-
|
|
+
|
|
if (pte_dirty(*ptep))
|
|
{
|
|
#ifdef __x86_64__
|
|
@@ -2901,19 +2901,19 @@
|
|
{
|
|
pte_update(vma->vm_mm, addr, ptep);
|
|
}
|
|
-#endif
|
|
+#endif
|
|
}
|
|
|
|
DBG_TRACE("0x%08X->0x%08X", ptep, *ptep);
|
|
-
|
|
+
|
|
// Flush Translation Lookaside Buffers
|
|
if (ret)
|
|
{
|
|
KCL_flush_tlb_onepage(vma,addr);
|
|
}
|
|
-
|
|
+
|
|
DBG_LEAVE("%d", ret);
|
|
-
|
|
+
|
|
return ret;
|
|
}
|
|
#endif
|
|
@@ -3279,7 +3279,7 @@
|
|
return 0;
|
|
#else
|
|
return 1;
|
|
-#endif
|
|
+#endif
|
|
#else /* !CONFIG_MTRR */
|
|
return 0;
|
|
#endif /* !CONFIG_MTRR */
|
|
@@ -3305,7 +3305,7 @@
|
|
|
|
int ATI_API_CALL __ke_has_vmap(void)
|
|
{
|
|
-// We disable vmap for 2.4.x kernel to work around the big memory( > 4GB ) issue.
|
|
+// We disable vmap for 2.4.x kernel to work around the big memory( > 4GB ) issue.
|
|
#if defined(VM_MAP) || defined(vunmap)
|
|
return 1;
|
|
#else
|
|
@@ -3372,7 +3372,7 @@
|
|
{
|
|
struct pci_dev* dev = (struct pci_dev*)pcidev;
|
|
return PCI_FUNC(dev->devfn);
|
|
-}
|
|
+}
|
|
|
|
__ke_dma_addr_t ATI_API_CALL __ke_pci_map_single (__ke_pci_dev_t *pdev, void *buffer, __ke_size_t size, int direction)
|
|
{
|
|
@@ -3525,12 +3525,12 @@
|
|
return (int)(agpmem->page_count);
|
|
}
|
|
|
|
-void ATI_API_CALL __ke_agp_memory_get_memory(struct _agp_memory* agpmem,
|
|
+void ATI_API_CALL __ke_agp_memory_get_memory(struct _agp_memory* agpmem,
|
|
unsigned long **memory_ptr)
|
|
{
|
|
__KE_DEBUG("[%s] agpmem=0x%016lx agpmem->memory=0x%016lx [0]=0x%016x",
|
|
- __FUNCTION__,
|
|
- (unsigned long)agpmem,
|
|
+ __FUNCTION__,
|
|
+ (unsigned long)agpmem,
|
|
(unsigned long)agpmem->memory,
|
|
(agpmem->memory)[0]);
|
|
|
|
@@ -3539,18 +3539,9 @@
|
|
|
|
/*****************************************************************************/
|
|
|
|
-#ifndef NOPAGE_SIGBUS
|
|
-#define NOPAGE_SIGBUS 0
|
|
-#endif /* !NOPAGE_SIGBUS */
|
|
-
|
|
typedef struct page mem_map_t;
|
|
typedef mem_map_t *vm_nopage_ret_t;
|
|
|
|
-static __inline__ vm_nopage_ret_t do_vm_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address)
|
|
-{
|
|
- return 0; /* Disallow mremap */
|
|
-}
|
|
|
|
#ifdef __AGP__BUILTIN__
|
|
#ifdef __ia64__
|
|
@@ -3575,19 +3566,20 @@
|
|
return page;
|
|
}
|
|
}
|
|
- return NOPAGE_SIGBUS; /* Disallow mremap */
|
|
+ return VM_FAULT_SIGBUS; /* Disallow mremap */
|
|
}
|
|
|
|
#endif /* __ia64__ */
|
|
#endif /* __AGP__BUILTIN__ */
|
|
|
|
|
|
-static __inline__ vm_nopage_ret_t do_vm_shm_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address)
|
|
+static __inline__ int do_vm_shm_nopage(struct vm_area_struct* vma,
|
|
+ struct vm_fault *vmf)
|
|
{
|
|
pgd_t* pgd_p;
|
|
pmd_t* pmd_p;
|
|
pte_t pte;
|
|
+ unsigned long address = (unsigned long)vmf->virtual_address;
|
|
unsigned long vma_offset;
|
|
unsigned long pte_linear;
|
|
mem_map_t* pMmPage;
|
|
@@ -3616,7 +3608,7 @@
|
|
(unsigned long)__ke_vm_offset(vma));
|
|
|
|
if (address > vma->vm_end)
|
|
- return NOPAGE_SIGBUS; /* address is out of range */
|
|
+ return VM_FAULT_SIGBUS; /* address is out of range */
|
|
|
|
/* Calculate offset into VMA */
|
|
vma_offset = address - vma->vm_start;
|
|
@@ -3628,7 +3620,7 @@
|
|
pte_linear = firegl_get_addr_from_vm(vma);
|
|
if (!pte_linear)
|
|
{
|
|
- return NOPAGE_SIGBUS; /* bad address */
|
|
+ return VM_FAULT_SIGBUS; /* bad address */
|
|
}
|
|
pte_linear += vma_offset;
|
|
|
|
@@ -3656,7 +3648,9 @@
|
|
|
|
// __KE_DEBUG3("vm-address 0x%08lx => kernel-page-address 0x%p\n",
|
|
// address, page_address(pMmPage));
|
|
- return pMmPage;
|
|
+ vmf->page = pMmPage;
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
/*
|
|
@@ -3665,8 +3659,10 @@
|
|
(which is one ore more pages in size)
|
|
|
|
*/
|
|
-static __inline__ vm_nopage_ret_t do_vm_dma_nopage(struct vm_area_struct* vma, unsigned long address)
|
|
+static __inline__ int do_vm_dma_nopage(struct vm_area_struct* vma,
|
|
+ struct vm_fault *vmf)
|
|
{
|
|
+ unsigned long address = (unsigned long) vmf->virtual_address;
|
|
unsigned long kaddr;
|
|
mem_map_t* pMmPage;
|
|
|
|
@@ -3682,7 +3678,7 @@
|
|
kaddr = firegl_get_addr_from_vm(vma);
|
|
if (!kaddr)
|
|
{
|
|
- return NOPAGE_SIGBUS; /* bad address */
|
|
+ return VM_FAULT_SIGBUS; /* bad address */
|
|
}
|
|
kaddr += (address - vma->vm_start);
|
|
|
|
@@ -3694,19 +3690,23 @@
|
|
// with drm_alloc_pages, which marks all pages as reserved. Reserved
|
|
// pages' usage count is not decremented by the kernel during unmap!!!
|
|
//
|
|
- // For kernel >= 2.6.15, We should reenable this, because the VM sub-system
|
|
- // will decrement the pages' usage count even for the pages marked as reserved
|
|
+ // For kernel >= 2.6.15, We should reenable this, because the VM sub-system
|
|
+ // will decrement the pages' usage count even for the pages marked as reserved
|
|
// - MC.
|
|
get_page(pMmPage); /* inc usage count of page */
|
|
#endif
|
|
|
|
__KE_DEBUG3("vm-address 0x%08lx => kernel-page-address 0x%p\n",
|
|
address, page_address(pMmPage));
|
|
- return pMmPage;
|
|
+ vmf->page = pMmPage;
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
-static __inline__ vm_nopage_ret_t do_vm_kmap_nopage(struct vm_area_struct* vma, unsigned long address)
|
|
+static __inline__ int do_vm_kmap_nopage(struct vm_area_struct* vma,
|
|
+ struct vm_fault *vmf)
|
|
{
|
|
+ unsigned long address = (unsigned long) vmf->virtual_address;
|
|
unsigned long kaddr;
|
|
mem_map_t* pMmPage;
|
|
|
|
@@ -3716,13 +3716,14 @@
|
|
if ((pMmPage = (mem_map_t*) firegl_get_pagetable_page_from_vm(vma)))
|
|
{
|
|
get_page(pMmPage);
|
|
- return pMmPage;
|
|
+ vmf->page = pMmPage;
|
|
+ return 0;
|
|
}
|
|
|
|
kaddr = firegl_get_addr_from_vm(vma);
|
|
if (!kaddr)
|
|
{
|
|
- return NOPAGE_SIGBUS; /* bad address */
|
|
+ return VM_FAULT_SIGBUS; /* bad address */
|
|
}
|
|
kaddr += (address - vma->vm_start);
|
|
|
|
@@ -3735,50 +3736,52 @@
|
|
|
|
__KE_DEBUG3("vm-address 0x%08lx => kernel-page-address 0x%p\n", address, page_address(pMmPage));
|
|
|
|
- return pMmPage;
|
|
+ vmf->page = pMmPage;
|
|
+ return 0;
|
|
}
|
|
|
|
-/**
|
|
+/**
|
|
**
|
|
- ** This routine is intented to locate the page table through the
|
|
+ ** This routine is intented to locate the page table through the
|
|
** pagelist table created earlier in dev-> pcie
|
|
**/
|
|
-static __inline__ vm_nopage_ret_t do_vm_pcie_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address)
|
|
+static __inline__ int do_vm_pcie_nopage(struct vm_area_struct* vma,
|
|
+ struct vm_fault *vmf)
|
|
{
|
|
|
|
+ unsigned long address = (unsigned long)vmf->virtual_address;
|
|
unsigned long vma_offset;
|
|
- unsigned long i;
|
|
+ unsigned long i;
|
|
mem_map_t* pMmPage;
|
|
struct firegl_pcie_mem* pciemem;
|
|
unsigned long* pagelist;
|
|
-
|
|
+
|
|
drm_device_t *dev = (drm_device_t *)firegl_get_dev_from_vm(vma);
|
|
if (dev == NULL)
|
|
{
|
|
__KE_ERROR("dev is NULL\n");
|
|
- return NOPAGE_SIGBUS;
|
|
+ return VM_FAULT_SIGBUS;
|
|
}
|
|
|
|
if (address > vma->vm_end)
|
|
{
|
|
__KE_ERROR("address out of range\n");
|
|
- return NOPAGE_SIGBUS; /* address is out of range */
|
|
+ return VM_FAULT_SIGBUS; /* address is out of range */
|
|
}
|
|
pciemem = firegl_get_pciemem_from_addr ( vma, address);
|
|
if (pciemem == NULL)
|
|
{
|
|
__KE_ERROR("No pciemem found! \n");
|
|
- return NOPAGE_SIGBUS;
|
|
- }
|
|
+ return VM_FAULT_SIGBUS;
|
|
+ }
|
|
pagelist = firegl_get_pagelist_from_vm(vma);
|
|
|
|
- if (pagelist == NULL)
|
|
+ if (pagelist == NULL)
|
|
{
|
|
__KE_ERROR("No pagelist! \n");
|
|
- return NOPAGE_SIGBUS;
|
|
+ return VM_FAULT_SIGBUS;
|
|
}
|
|
-
|
|
+
|
|
/** Find offset in vma */
|
|
vma_offset = address - vma->vm_start;
|
|
/** Which entry in the pagelist */
|
|
@@ -3790,15 +3793,17 @@
|
|
if (page_address(pMmPage) == 0x0)
|
|
{
|
|
__KE_ERROR("Invalid page address\n");
|
|
- return NOPAGE_SIGBUS;
|
|
+ return VM_FAULT_SIGBUS;
|
|
}
|
|
- return pMmPage;
|
|
+
|
|
+ vmf->page = pMmPage;
|
|
+ return 0;
|
|
}
|
|
|
|
-static __inline__ vm_nopage_ret_t do_vm_gart_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address)
|
|
+static __inline__ int do_vm_gart_nopage(struct vm_area_struct* vma,
|
|
+ struct vm_fault *vmf)
|
|
{
|
|
-
|
|
+ unsigned long address = (unsigned long) vmf->virtual_address;
|
|
unsigned long page_addr;
|
|
unsigned long offset;
|
|
struct page *page;
|
|
@@ -3806,36 +3811,31 @@
|
|
if (address > vma->vm_end)
|
|
{
|
|
__KE_ERROR("Invalid virtual address\n");
|
|
- return NOPAGE_SIGBUS; /* Disallow mremap */
|
|
- }
|
|
+ return VM_FAULT_SIGBUS; /* Disallow mremap */
|
|
+ }
|
|
|
|
offset = address - vma->vm_start;
|
|
-#ifdef FIREGL_CF_SUPPORT
|
|
+#ifdef FIREGL_CF_SUPPORT
|
|
page_addr = mc_heap_get_page_addr(vma, offset);
|
|
#else
|
|
page_addr = firegl_cmmqs_get_pageaddr_from_vm(vma, offset);
|
|
-#endif
|
|
+#endif
|
|
if( !page_addr)
|
|
{
|
|
__KE_ERROR("Invalid page address\n");
|
|
- return NOPAGE_SIGBUS; /* Disallow mremap */
|
|
+ return VM_FAULT_SIGBUS; /* Disallow mremap */
|
|
}
|
|
page = virt_to_page(page_addr);
|
|
get_page(page);
|
|
|
|
- return page;
|
|
+ vmf->page = page;
|
|
+ return 0;
|
|
}
|
|
|
|
-
|
|
-
|
|
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
|
|
-
|
|
-static vm_nopage_ret_t vm_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address,
|
|
- int *type)
|
|
+static int vm_nopage(struct vm_area_struct* vma,
|
|
+ struct vm_fault *vmf, int type)
|
|
{
|
|
- if (type) *type = VM_FAULT_MINOR;
|
|
- return do_vm_nopage(vma, address);
|
|
+ return VM_FAULT_SIGBUS;
|
|
}
|
|
|
|
#ifdef __AGP__BUILTIN__
|
|
@@ -3867,12 +3867,10 @@
|
|
(which is one or more pages in size)
|
|
|
|
*/
|
|
-static vm_nopage_ret_t vm_shm_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address,
|
|
- int *type)
|
|
+static int vm_shm_nopage(struct vm_area_struct* vma,
|
|
+ struct vm_fault *vmf, int type)
|
|
{
|
|
- if (type) *type = VM_FAULT_MINOR;
|
|
- return do_vm_shm_nopage(vma, address);
|
|
+ return do_vm_shm_nopage(vma, vmf);
|
|
}
|
|
|
|
/*
|
|
@@ -3881,115 +3879,29 @@
|
|
(which is one ore more pages in size)
|
|
|
|
*/
|
|
-static vm_nopage_ret_t vm_dma_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address,
|
|
- int *type)
|
|
+static int vm_dma_nopage(struct vm_area_struct* vma,
|
|
+ struct vm_fault *vmf, int type)
|
|
{
|
|
- if (type) *type = VM_FAULT_MINOR;
|
|
- return do_vm_dma_nopage(vma, address);
|
|
-}
|
|
-
|
|
-static vm_nopage_ret_t vm_kmap_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address,
|
|
- int *type)
|
|
-{
|
|
- if (type) *type = VM_FAULT_MINOR;
|
|
- return do_vm_kmap_nopage(vma, address);
|
|
-}
|
|
-
|
|
-static vm_nopage_ret_t vm_pcie_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address,
|
|
- int *type)
|
|
-{
|
|
- return do_vm_pcie_nopage(vma, address);
|
|
+ return do_vm_dma_nopage(vma, vmf);
|
|
}
|
|
|
|
-static vm_nopage_ret_t vm_gart_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address,
|
|
- int *type)
|
|
+static int vm_kmap_nopage(struct vm_area_struct* vma,
|
|
+ struct vm_fault *vmf, int type)
|
|
{
|
|
- return do_vm_gart_nopage(vma, address);
|
|
+ return do_vm_kmap_nopage(vma, vmf);
|
|
}
|
|
|
|
-#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
|
|
-
|
|
-static vm_nopage_ret_t vm_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address,
|
|
- int write_access)
|
|
+static int vm_pcie_nopage(struct vm_area_struct* vma,
|
|
+ struct vm_fault *vmf, int type)
|
|
{
|
|
- return do_vm_nopage(vma, address);
|
|
+ return do_vm_pcie_nopage(vma, vmf);
|
|
}
|
|
|
|
-#ifdef __AGP__BUILTIN__
|
|
-#ifdef __ia64__
|
|
-
|
|
-
|
|
-static vm_nopage_ret_t vm_cant_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address,
|
|
- int write_access)
|
|
+static int vm_gart_nopage(struct vm_area_struct* vma,
|
|
+ struct vm_fault *vmf, int type)
|
|
{
|
|
- return do_vm_cant_nopage(vma, address);
|
|
+ return do_vm_gart_nopage(vma, vmf);
|
|
}
|
|
-#endif /* __ia64__ */
|
|
-#endif /* __AGP__BUILTIN__ */
|
|
-
|
|
-/*
|
|
-
|
|
- This function is called when a page of a mmap()'ed area is not currently
|
|
- visible in the specified VMA.
|
|
- Return value is the associated physical address for the requested page.
|
|
- (If not implemented, then the kernel default routine would allocate a new,
|
|
- zeroed page for servicing us)
|
|
-
|
|
- Possible errors: SIGBUS, OutOfMem
|
|
-
|
|
- This routine is intended to remap addresses of SHM SAREA
|
|
- (which is one or more pages in size)
|
|
-
|
|
- */
|
|
-static vm_nopage_ret_t vm_shm_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address,
|
|
- int write_access)
|
|
-{
|
|
- return do_vm_shm_nopage(vma, address);
|
|
-}
|
|
-
|
|
-/*
|
|
-
|
|
- This routine is intended to remap addresses of a OpenGL context
|
|
- (which is one ore more pages in size)
|
|
-
|
|
-*/
|
|
-static vm_nopage_ret_t vm_dma_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address,
|
|
- int write_access)
|
|
-{
|
|
- return do_vm_dma_nopage(vma, address);
|
|
-}
|
|
-
|
|
-static vm_nopage_ret_t vm_kmap_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address,
|
|
- int write_access)
|
|
-{
|
|
- return do_vm_kmap_nopage(vma, address);
|
|
-}
|
|
-
|
|
-static vm_nopage_ret_t vm_pcie_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address,
|
|
- int write_access)
|
|
-{
|
|
- return do_vm_pcie_nopage(vma, address);
|
|
-}
|
|
-
|
|
-static vm_nopage_ret_t vm_gart_nopage(struct vm_area_struct* vma,
|
|
- unsigned long address,
|
|
- int *type)
|
|
-{
|
|
- return do_vm_gart_nopage(vma, address);
|
|
-}
|
|
-
|
|
-
|
|
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) */
|
|
|
|
void* ATI_API_CALL __ke_vma_file_priv(struct vm_area_struct* vma)
|
|
{
|
|
@@ -4040,7 +3952,7 @@
|
|
*(buf + i++) = pgprot & _PAGE_DIRTY ? 'd' : '-';
|
|
*(buf + i++) = pgprot & _PAGE_PSE ? 'm' : 'k';
|
|
*(buf + i++) = pgprot & _PAGE_GLOBAL ? 'g' : 'l';
|
|
-#endif /* __i386__ */
|
|
+#endif /* __i386__ */
|
|
*(buf + i++) = 0;
|
|
|
|
return buf;
|
|
@@ -4069,9 +3981,9 @@
|
|
return buf;
|
|
}
|
|
|
|
-char* ATI_API_CALL __ke_vm_phys_addr_str(struct vm_area_struct* vma,
|
|
- char* buf,
|
|
- unsigned long virtual_addr,
|
|
+char* ATI_API_CALL __ke_vm_phys_addr_str(struct vm_area_struct* vma,
|
|
+ char* buf,
|
|
+ unsigned long virtual_addr,
|
|
__ke_dma_addr_t* phys_address)
|
|
{
|
|
pgd_t* pgd_p;
|
|
@@ -4174,7 +4086,7 @@
|
|
|
|
static struct vm_operations_struct vm_ops =
|
|
{
|
|
- nopage: ip_vm_nopage,
|
|
+ fault: ip_vm_nopage,
|
|
open: ip_drm_vm_open,
|
|
close: ip_drm_vm_close,
|
|
};
|
|
@@ -4183,7 +4095,7 @@
|
|
#ifdef __ia64__
|
|
static struct vm_operations_struct vm_cant_ops =
|
|
{
|
|
- nopage: ip_vm_cant_nopage,
|
|
+ fault: ip_vm_cant_nopage,
|
|
open: ip_drm_vm_open,
|
|
close: ip_drm_vm_close,
|
|
};
|
|
@@ -4192,42 +4104,42 @@
|
|
|
|
static struct vm_operations_struct vm_shm_ops =
|
|
{
|
|
- nopage: ip_vm_shm_nopage,
|
|
+ fault: ip_vm_shm_nopage,
|
|
open: ip_drm_vm_open,
|
|
close: ip_drm_vm_close,
|
|
};
|
|
|
|
static struct vm_operations_struct vm_pci_bq_ops =
|
|
{
|
|
- nopage: ip_vm_dma_nopage,
|
|
+ fault: ip_vm_dma_nopage,
|
|
open: ip_drm_vm_open,
|
|
close: ip_drm_vm_close,
|
|
};
|
|
|
|
static struct vm_operations_struct vm_ctx_ops =
|
|
{
|
|
- nopage: ip_vm_dma_nopage,
|
|
+ fault: ip_vm_dma_nopage,
|
|
open: ip_drm_vm_open,
|
|
close: ip_drm_vm_close,
|
|
};
|
|
|
|
static struct vm_operations_struct vm_pcie_ops =
|
|
{
|
|
- nopage: ip_vm_pcie_nopage,
|
|
+ fault: ip_vm_pcie_nopage,
|
|
open: ip_drm_vm_open,
|
|
close: ip_drm_vm_close,
|
|
};
|
|
|
|
static struct vm_operations_struct vm_kmap_ops =
|
|
{
|
|
- nopage: ip_vm_kmap_nopage,
|
|
+ fault: ip_vm_kmap_nopage,
|
|
open: ip_drm_vm_open,
|
|
close: ip_drm_vm_close,
|
|
};
|
|
|
|
static struct vm_operations_struct vm_gart_ops =
|
|
{
|
|
- nopage: ip_vm_gart_nopage,
|
|
+ fault: ip_vm_gart_nopage,
|
|
open: ip_drm_vm_open,
|
|
close: ip_drm_vm_close,
|
|
};
|
|
@@ -4236,14 +4148,14 @@
|
|
#ifndef __ia64__
|
|
static struct vm_operations_struct vm_agp_bq_ops =
|
|
{
|
|
- nopage: ip_vm_nopage,
|
|
+ fault: ip_vm_nopage,
|
|
open: ip_drm_vm_open,
|
|
close: ip_drm_vm_close,
|
|
};
|
|
#else
|
|
static struct vm_operations_struct vm_cant_agp_bq_ops =
|
|
{
|
|
- nopage: ip_vm_cant_nopage,
|
|
+ fault: ip_vm_cant_nopage,
|
|
open: ip_drm_vm_open,
|
|
close: ip_drm_vm_close,
|
|
};
|
|
@@ -4298,7 +4210,7 @@
|
|
vma->vm_ops = &vm_ops;
|
|
break;
|
|
|
|
-#ifdef FIREGL_USWC_SUPPORT
|
|
+#ifdef FIREGL_USWC_SUPPORT
|
|
case __KE_ADPT_REG:
|
|
{
|
|
#ifdef __ia64__
|
|
@@ -4318,7 +4230,7 @@
|
|
}
|
|
else
|
|
{
|
|
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
}
|
|
}
|
|
#endif /* __i386__ */
|
|
@@ -4337,7 +4249,7 @@
|
|
vma->vm_ops = &vm_ops;
|
|
}
|
|
break;
|
|
-#endif
|
|
+#endif
|
|
|
|
case __KE_SHM:
|
|
vma->vm_flags |= VM_SHM | VM_RESERVED; /* Don't swap */
|
|
@@ -4371,7 +4283,7 @@
|
|
|
|
#ifdef __AGP__BUILTIN__
|
|
case __KE_AGP:
|
|
- // if(dev->agp->cant_use_aperture == 1)
|
|
+ // if(dev->agp->cant_use_aperture == 1)
|
|
#ifdef __ia64__
|
|
{
|
|
/*
|
|
@@ -4395,9 +4307,9 @@
|
|
if( firegl_pat_enabled )
|
|
{
|
|
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
|
- }
|
|
+ }
|
|
}
|
|
-#endif
|
|
+#endif
|
|
|
|
if (REMAP_PAGE_RANGE(vma,offset))
|
|
{
|
|
@@ -4414,8 +4326,8 @@
|
|
#endif
|
|
break;
|
|
case __KE_AGP_BQS:
|
|
- // if(dev->agp->cant_use_aperture == 1)
|
|
-#ifdef __ia64__
|
|
+ // if(dev->agp->cant_use_aperture == 1)
|
|
+#ifdef __ia64__
|
|
{
|
|
/*
|
|
* On some systems we can't talk to bus dma address from
|
|
@@ -4438,9 +4350,9 @@
|
|
if( firegl_pat_enabled )
|
|
{
|
|
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
|
- }
|
|
+ }
|
|
}
|
|
-#endif
|
|
+#endif
|
|
|
|
if (REMAP_PAGE_RANGE(vma,offset))
|
|
{
|
|
@@ -4469,15 +4381,15 @@
|
|
break;
|
|
|
|
case __KE_GART_USWC:
|
|
-#ifdef FIREGL_USWC_SUPPORT
|
|
+#ifdef FIREGL_USWC_SUPPORT
|
|
if (boot_cpu_data.x86 > 3)
|
|
{
|
|
if( firegl_pat_enabled )
|
|
{
|
|
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
|
- }
|
|
+ }
|
|
}
|
|
-#endif
|
|
+#endif
|
|
// fall through
|
|
case __KE_GART_CACHEABLE:
|
|
vma->vm_flags |= VM_RESERVED;
|
|
@@ -4527,7 +4439,7 @@
|
|
#define FIREGL_agp_backend_release _X(agp_backend_release)
|
|
#define FIREGL_agp_memory _X(agp_memory)
|
|
|
|
-unsigned int __ke_firegl_agpgart_inuse = AGPGART_INUSE_NONE;
|
|
+unsigned int __ke_firegl_agpgart_inuse = AGPGART_INUSE_NONE;
|
|
|
|
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
|
|
/*****************************************************************************/
|
|
@@ -4642,7 +4554,7 @@
|
|
int ATI_API_CALL __ke_agpgart_available(__ke_pci_dev_t *pcidev)
|
|
{
|
|
drm_agp_module_stub = &drm_agp;
|
|
- __ke_firegl_agpgart_inuse = KERNEL26_AGPGART_INUSE;
|
|
+ __ke_firegl_agpgart_inuse = KERNEL26_AGPGART_INUSE;
|
|
{
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
|
|
firegl_pci_device = (struct pci_dev*)(void*)pcidev;
|
|
@@ -4778,7 +4690,7 @@
|
|
|
|
if (AGP_AVAILABLE(copy_info))
|
|
{
|
|
- struct agp_kern_info kern;
|
|
+ struct agp_kern_info kern;
|
|
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
|
|
if (firegl_agp_bridge == NULL)
|
|
@@ -4876,7 +4788,7 @@
|
|
|
|
int ATI_API_CALL __ke_read_agp_caps_registers(__ke_pci_dev_t* dev, unsigned int *caps)
|
|
{
|
|
- return -EINVAL;
|
|
+ return -EINVAL;
|
|
}
|
|
|
|
int ATI_API_CALL __ke_agp_acquire(__ke_pci_dev_t* dev)
|
|
@@ -4913,9 +4825,9 @@
|
|
|
|
/** \brief Runs a function on all other CPUs
|
|
* \param func_to_call function to be called on all other cpus
|
|
- * \return None
|
|
+ * \return None
|
|
*/
|
|
-void ATI_API_CALL KCL_CallFuncOnOtherCpus(firegl_void_routine_t func_to_call)
|
|
+void ATI_API_CALL KCL_CallFuncOnOtherCpus(firegl_void_routine_t func_to_call)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
smp_call_function( firegl_smp_func_parameter_wrap, (void*)func_to_call, 0, 1 );
|
|
@@ -4926,7 +4838,7 @@
|
|
|
|
int ATI_API_CALL KCL_is_pat_enabled(void)
|
|
{
|
|
- return firegl_pat_enabled;
|
|
+ return firegl_pat_enabled;
|
|
}
|
|
|
|
static int ATI_API_CALL KCL_has_pat(void)
|
|
@@ -4942,7 +4854,7 @@
|
|
{
|
|
unsigned long cr0=0, cr4=0;
|
|
unsigned long flags;
|
|
-
|
|
+
|
|
local_irq_save(flags);
|
|
cr0 = read_cr0() | 0x40000000;
|
|
write_cr0(cr0);
|
|
@@ -4973,7 +4885,7 @@
|
|
{
|
|
unsigned long cr0 = 0, cr4 = 0;
|
|
unsigned long flags;
|
|
-
|
|
+
|
|
local_irq_save(flags);
|
|
cr0 = read_cr0() | 0x40000000;
|
|
write_cr0(cr0);
|
|
@@ -4985,7 +4897,7 @@
|
|
write_cr4(cr4 & ~X86_CR4_PGE);
|
|
}
|
|
__flush_tlb();
|
|
-
|
|
+
|
|
wrmsr(MSR_IA32_CR_PAT, KCL_orig_pat[0], KCL_orig_pat[1]);
|
|
|
|
cr0 = read_cr0();
|
|
@@ -5008,7 +4920,7 @@
|
|
__KE_INFO("USWC is disabled in module parameters\n");
|
|
return 0;
|
|
}
|
|
-
|
|
+
|
|
if (!KCL_has_pat())
|
|
{
|
|
return 0;
|
|
@@ -5018,13 +4930,13 @@
|
|
|
|
for ( i = 0; i < 2; i++ )
|
|
{
|
|
- for (j = 0; j < 4; j ++)
|
|
+ for (j = 0; j < 4; j ++)
|
|
{
|
|
if (((KCL_orig_pat[i] >> (j * 8)) & 0xFF) == 1)
|
|
- {
|
|
+ {
|
|
__KE_ERROR("Pat entry %d is already configured\n", (i+1)*(j+1));
|
|
return 0;
|
|
- }
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -5096,7 +5008,7 @@
|
|
} kasContext_t;
|
|
|
|
/** \brief KAS context */
|
|
-static kasContext_t kasContext;
|
|
+static kasContext_t kasContext;
|
|
|
|
/** \brief Kernel support required to enable KAS */
|
|
#if defined(cmpxchg) && \
|
|
@@ -5398,7 +5310,7 @@
|
|
DBG_TRACE("Interrupt handler returned 0x%08X", ret);
|
|
|
|
kasSetExecutionLevel(orig_level);
|
|
- spin_unlock(&kasContext.lock_ih);
|
|
+ spin_unlock(&kasContext.lock_ih);
|
|
|
|
DBG_LEAVE("%d", ret);
|
|
return ret;
|
|
@@ -5673,7 +5585,7 @@
|
|
#endif
|
|
spinlock_t lock; /* OS spinlock object protecting the cache */
|
|
unsigned int routine_type; /* Type of routine the cache might be accessed from */
|
|
- char name[14]; /* Cache object name (kernel 2.4 restricts its length to 19 chars) */
|
|
+ char name[24]; /* Cache object name (kernel 2.4 restricts its length to 19 chars) */
|
|
} kasSlabCache_t;
|
|
|
|
/** \brief Return Slab Cache object size
|
|
@@ -5711,7 +5623,8 @@
|
|
|
|
slabcache_obj->routine_type = access_type;
|
|
spin_lock_init(&(slabcache_obj->lock));
|
|
- sprintf(slabcache_obj->name, "kas(%08lX)",(unsigned long)slabcache_obj);
|
|
+ snprintf(slabcache_obj->name, sizeof(slabcache_obj->name),
|
|
+ "kas(%p)", slabcache_obj);
|
|
|
|
DBG_TRACE("creating slab object '%s'", slabcache_obj->name);
|
|
|
|
@@ -6627,7 +6540,7 @@
|
|
|
|
kas_xadd(puiDestination, iAdd, ret, "l");
|
|
|
|
- return ret + iAdd;
|
|
+ return ret + iAdd;
|
|
#else
|
|
return 0xDEADC0DE; /* To make compiler happy */
|
|
#endif
|
|
@@ -6710,7 +6623,7 @@
|
|
#ifdef FIREGL_CF_SUPPORT
|
|
|
|
void *ATI_API_CALL KCL_lock_init()
|
|
-{
|
|
+{
|
|
spinlock_t *lock;
|
|
|
|
lock = kmalloc(sizeof(*lock), GFP_KERNEL);
|
|
@@ -6722,7 +6635,7 @@
|
|
}
|
|
|
|
void ATI_API_CALL KCL_lock_deinit(void *plock)
|
|
-{
|
|
+{
|
|
if (plock == NULL)
|
|
{
|
|
__KE_ERROR("plock is NULL\n");
|
|
diff -Nur -x '*.orig' -x '*~' fglrx-installer-8.512/lib/modules/fglrx/build_mod/firegl_public.h fglrx-installer-8.512.new/lib/modules/fglrx/build_mod/firegl_public.h
|
|
--- fglrx-installer-8.512/lib/modules/fglrx/build_mod/firegl_public.h 2008-07-21 14:32:08.000000000 -0400
|
|
+++ fglrx-installer-8.512.new/lib/modules/fglrx/build_mod/firegl_public.h 2008-07-21 14:32:14.000000000 -0400
|
|
@@ -78,7 +78,7 @@
|
|
if (!pgd_present(*(pgd_p))) \
|
|
{ \
|
|
__KE_ERROR("FATAL ERROR: User queue buffer not present! (pgd)\n"); \
|
|
- return (unsigned long)NOPAGE_SIGBUS; /* Something bad happened; generate SIGBUS */ \
|
|
+ return VM_FAULT_SIGBUS; /* Something bad happened; generate SIGBUS */ \
|
|
/* alternatively we could generate a NOPAGE_OOM "out of memory" */ \
|
|
} \
|
|
} while(0)
|
|
@@ -91,7 +91,7 @@
|
|
if (!pud_present(*(pud_p))) \
|
|
{ \
|
|
__KE_ERROR("FATAL ERROR: User queue buffer not present! (pud)\n"); \
|
|
- return (unsigned long)NOPAGE_SIGBUS; /* Something bad happened; generate SIGBUS */ \
|
|
+ return VM_FAULT_SIGBUS; /* Something bad happened; generate SIGBUS */ \
|
|
/* alternatively we could generate a NOPAGE_OOM "out of memory" */ \
|
|
} \
|
|
pmd_p = pmd_offset(pud_p, pte_linear); \
|
|
@@ -111,7 +111,7 @@
|
|
if (!pmd_present(*(pmd_p))) \
|
|
{ \
|
|
__KE_ERROR("FATAL ERROR: User queue buffer not present! (pmd)\n"); \
|
|
- return (unsigned long)NOPAGE_SIGBUS; /* Something bad happened; generate SIGBUS */ \
|
|
+ return VM_FAULT_SIGBUS; /* Something bad happened; generate SIGBUS */ \
|
|
/* alternatively we could generate a NOPAGE_OOM "out of memory" */ \
|
|
} \
|
|
} while(0)
|
|
@@ -157,7 +157,7 @@
|
|
if (!pte_present(pte)) \
|
|
{ \
|
|
__KE_ERROR("FATAL ERROR: User queue buffer not present! (pte)\n"); \
|
|
- return (unsigned long)NOPAGE_SIGBUS; /* Something bad happened; generate SIGBUS */ \
|
|
+ return VM_FAULT_SIGBUS; /* Something bad happened; generate SIGBUS */ \
|
|
/* alternatively we could generate a NOPAGE_OOM "out of memory" */ \
|
|
} \
|
|
} while(0)
|
|
} while(0)
|