summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Schlemmer <azarah@gentoo.org>2002-12-23 02:31:05 +0000
committerMartin Schlemmer <azarah@gentoo.org>2002-12-23 02:31:05 +0000
commit739610b9bd68d8b85b6c50187c799506cd2b1a59 (patch)
treeddb2c68dc19acca51784d4ac57c58d72786f95db /media-video/nvidia-kernel
parentRemoved sparc64 keyword (diff)
downloadgentoo-2-739610b9bd68d8b85b6c50187c799506cd2b1a59.tar.gz
gentoo-2-739610b9bd68d8b85b6c50187c799506cd2b1a59.tar.bz2
gentoo-2-739610b9bd68d8b85b6c50187c799506cd2b1a59.zip
add tasklet patch
Diffstat (limited to 'media-video/nvidia-kernel')
-rw-r--r--media-video/nvidia-kernel/ChangeLog8
-rw-r--r--media-video/nvidia-kernel/files/NVIDIA_kernel-1.0-4191-2.5-tl.diff1264
-rw-r--r--media-video/nvidia-kernel/nvidia-kernel-1.0.4191-r1.ebuild25
3 files changed, 1280 insertions, 17 deletions
diff --git a/media-video/nvidia-kernel/ChangeLog b/media-video/nvidia-kernel/ChangeLog
index a7da7d1c51b6..888a0729e3ec 100644
--- a/media-video/nvidia-kernel/ChangeLog
+++ b/media-video/nvidia-kernel/ChangeLog
@@ -1,6 +1,10 @@
# ChangeLog for media-video/nvidia-kernel
# Copyright 2002 Gentoo Technologies, Inc.; Distributed under the GPL
-# $Header: /var/cvsroot/gentoo-x86/media-video/nvidia-kernel/ChangeLog,v 1.16 2002/12/18 21:15:07 styx Exp $
+# $Header: /var/cvsroot/gentoo-x86/media-video/nvidia-kernel/ChangeLog,v 1.17 2002/12/23 02:31:05 azarah Exp $
+
+ 23 Dec 2002; Martin Schlemmer <azarah@gentoo.org> nvidia-kernel-1.0.4191-r1.ebuild :
+
+ Add tasklet patch for 2.5 kernels in again.
*nvidia-kernel-1.0.4191-1 (18 Dec 2002)
@@ -11,7 +15,7 @@
well. It's all #ifdef'ed, so it shouldn't interfere with other AA kernels.
Bumping anyway, per vapier's instructions ;)
- 14 Dec 2002; Martin Schlemmer <azarah@gentoo.org> nvidia-kernel-1.0.4191.ebuild:
+ 14 Dec 2002; Martin Schlemmer <azarah@gentoo.org> nvidia-kernel-1.0.4191.ebuild :
As it seems this version need MTRR support for most chipsets, die if its not
detected with a suited message ... This should close bug #12015.
diff --git a/media-video/nvidia-kernel/files/NVIDIA_kernel-1.0-4191-2.5-tl.diff b/media-video/nvidia-kernel/files/NVIDIA_kernel-1.0-4191-2.5-tl.diff
new file mode 100644
index 000000000000..336478d39433
--- /dev/null
+++ b/media-video/nvidia-kernel/files/NVIDIA_kernel-1.0-4191-2.5-tl.diff
@@ -0,0 +1,1264 @@
+diff -ru NVIDIA_kernel-1.0-4191/Makefile NVIDIA_kernel-1.0-4191-2.5-tl/Makefile
+--- NVIDIA_kernel-1.0-4191/Makefile 2002-12-09 21:27:15.000000000 +0100
++++ NVIDIA_kernel-1.0-4191-2.5-tl/Makefile 2002-12-17 19:58:32.000000000 +0100
+@@ -2,7 +2,7 @@
+ # Generated on 'builder3.nvidia.com' on Mon Dec 9 11:49:03 PST 2002
+
+ LINUX_MODULE=nv-linux.o
+-DEFINES=-D__KERNEL__ -DMODULE -D_LOOSE_KERNEL_NAMES -DNTRM -D_GNU_SOURCE -D_LOOSE_KERNEL_NAMES -D__KERNEL__ -DMODULE -DNV_MAJOR_VERSION=1 -DNV_MINOR_VERSION=0 -DNV_PATCHLEVEL=4191 -DNV_UNIX -DNV_LINUX -DNV_INT64_OK -DNVCPU_X86
++DEFINES=-D__KERNEL__ -DMODULE -D_LOOSE_KERNEL_NAMES -DKBUILD_MODNAME="nvidia" -DNTRM -D_GNU_SOURCE -D_LOOSE_KERNEL_NAMES -D__KERNEL__ -DMODULE -DNV_MAJOR_VERSION=1 -DNV_MINOR_VERSION=0 -DNV_PATCHLEVEL=4191 -DNV_UNIX -DNV_LINUX -DNV_INT64_OK -DNVCPU_X86
+ INCLUDES=-I.
+
+ OBJECTS=nv.o os-agp.o os-interface.o os-registry.o
+@@ -22,8 +22,10 @@
+ # this is slightly more brain-dead, but works..
+ ifeq ($(shell if test -d $(KERNDIR)/build; then echo yes; fi),yes)
+ KERNINC=$(KERNDIR)/build/include
++MACHINC=$(KERNDIR)/build/arch/i386/mach-generic
+ else
+ KERNINC=/usr/src/linux/include
++MACHINC=/usr/src/linux/arch/i386/mach-generic
+ endif
+
+ ifeq ($(shell if test -d $(KERNDIR)/kernel; then echo yes; fi),yes)
+@@ -60,6 +62,7 @@
+ INCLUDES += -I$(SYSINCLUDE)
+ else
+ INCLUDES += -I$(KERNINC)
++INCLUDES += -I$(MACHINC)
+ endif
+
+ DEFINES+=$(EXTRA_DEFINES)
+diff -ru NVIDIA_kernel-1.0-4191/nv-linux.h NVIDIA_kernel-1.0-4191-2.5-tl/nv-linux.h
+--- NVIDIA_kernel-1.0-4191/nv-linux.h 2002-12-09 21:27:15.000000000 +0100
++++ NVIDIA_kernel-1.0-4191-2.5-tl/nv-linux.h 2002-12-17 19:59:14.000000000 +0100
+@@ -28,19 +28,16 @@
+ #include <linux/module.h>
+ #include <linux/version.h>
+
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 12)
+-# error This driver does not support 2.2.11 or earlier kernels!
+-#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
+-# define KERNEL_2_2
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
++# error This driver does not support 2.2.x kernels!
+ #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
+-# error This driver does not support 2.3.x development kernels!
++# error This driver does not support 2.3.x kernels!
+ #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+ # define KERNEL_2_4
+ #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+-# error This driver does not support 2.5.x development kernels!
+ # define KERNEL_2_5
+ #else
+-# error This driver does not support 2.6.x or newer kernels!
++# error This driver does not support your kernel!
+ #endif
+
+ #if defined (__ia64)
+@@ -58,7 +55,6 @@
+ #include <linux/list.h> /* circular linked list */
+ #include <linux/stddef.h> /* NULL, offsetof */
+ #include <linux/wait.h> /* wait queues */
+-#include <linux/tqueue.h> /* struct tq_struct */
+
+ #include <linux/slab.h> /* kmalloc, kfree, etc */
+ #include <linux/vmalloc.h> /* vmalloc, vfree, etc */
+@@ -66,9 +62,15 @@
+ #include <linux/poll.h> /* poll_wait */
+ #include <linux/delay.h> /* mdelay, udelay */
+
++#ifdef KERNEL_2_5
++#include <linux/sched.h> /* suser(), capable() replacement */
++#include <linux/smp_lock.h> /* kernel_locked */
++#include <asm/kmap_types.h> /* page table entry lookup */
++#endif
++
+ #include <linux/pci.h> /* pci_find_class, etc */
+ #include <linux/wrapper.h> /* mem_map_reserve */
+-#include <linux/interrupt.h> /* mark_bh, init_bh, remove_bh */
++#include <linux/interrupt.h> /* tasklets, interrupt helpers */
+ #include <linux/timer.h>
+
+ #include <asm/system.h> /* cli, sli, save_flags */
+@@ -77,14 +79,9 @@
+ #include <asm/page.h> /* PAGE_OFFSET */
+ #include <asm/pgtable.h> /* pte bit definitions */
+
+-#if !defined (KERNEL_2_2)
+ #include <linux/spinlock.h>
+ #include <asm/semaphore.h>
+ #include <linux/highmem.h>
+-#else
+-#include <asm/spinlock.h>
+-#include <asm/semaphore.h>
+-#endif
+
+ #ifdef CONFIG_PROC_FS
+ #include <linux/proc_fs.h>
+@@ -123,28 +120,58 @@
+ for (pos = (head)->next; pos != (head); pos = (pos)->next)
+ #endif
+
+-#if !defined (KERNEL_2_2)
+-# define LINUX_VMA_OFFS(vma) (((vma)->vm_pgoff) << PAGE_SHIFT)
+-# define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym)
+-# define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym)
+-# define GET_MAP_NR(phys_page) virt_to_page(__va(phys_page))
+-# define MEM_MAP_READ_COUNT(map_nr) (atomic_read(&(map_nr)->count))
+-# define MEM_MAP_INC_COUNT(map_nr) (atomic_inc(&(map_nr)->count))
+-# define MEM_MAP_DEC_COUNT(map_nr) (atomic_dec(&(map_nr)->count))
+-# define GET_EVENT_QUEUE(nv) ((struct __wait_queue_head *) ((nv)->event_queue))
+-# define VMA_PRIVATE(vma) ((vma)->vm_private_data)
++#define LINUX_VMA_OFFS(vma) (((vma)->vm_pgoff) << PAGE_SHIFT)
++#define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym)
++#define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym)
++#define GET_MAP_NR(phys_page) virt_to_page(__va(phys_page))
++#define MEM_MAP_READ_COUNT(map_nr) (atomic_read(&(map_nr)->count))
++#define MEM_MAP_INC_COUNT(map_nr) (atomic_inc(&(map_nr)->count))
++#define MEM_MAP_DEC_COUNT(map_nr) (atomic_dec(&(map_nr)->count))
++#define VMA_PRIVATE(vma) ((vma)->vm_private_data)
++
++#ifdef KERNEL_2_5
++# define NV_DEVICE_NUMBER(_minor) ((kdev_val(_minor)) & 0x0f)
++# define NV_IS_CONTROL_DEVICE(_minor) (((kdev_val(_minor)) & 0xff) == 0xff)
++# define SUSER() capable(CAP_SYS_ADMIN)
++# define REMAP_PAGE_RANGE(a...) remap_page_range(vma, ## a)
++# define CLI() local_irq_disable()
++# define SAVE_FLAGS(x) local_save_flags(x)
++# define RESTORE_FLAGS(x) local_irq_restore(x)
++# define MAY_SLEEP() (!in_interrupt() && !in_atomic())
++# define SMP_NUM_CPUS num_online_cpus()
+ #else
+-# define in_irq() (local_irq_count[smp_processor_id()])
+-# define LINUX_VMA_OFFS(vma) ((vma)->vm_offset)
+-# define GET_MODULE_SYMBOL(mod, sym) (void*) get_module_symbol((mod), (sym))
+-# define PUT_MODULE_SYMBOL(sym)
+-# define GET_MAP_NR(phys_page) MAP_NR(__va(phys_page))
+-# define MEM_MAP_READ_COUNT(map_nr) (atomic_read(&mem_map[map_nr].count))
+-# define MEM_MAP_INC_COUNT(map_nr) (atomic_inc(&mem_map[map_nr].count))
+-# define MEM_MAP_DEC_COUNT(map_nr) (atomic_dec(&mem_map[map_nr].count))
+-# define GET_EVENT_QUEUE(nv) ((struct wait_queue **) &((nv)->event_queue))
+-# define VMA_PRIVATE(vma) ((void*)((vma)->vm_pte))
+-#endif
++# define NV_DEVICE_NUMBER(_minor) ((_minor) & 0x0f)
++# define NV_IS_CONTROL_DEVICE(_minor) (((_minor) & 0xff) == 0xff)
++# define SUSER() suser()
++# define REMAP_PAGE_RANGE(a...) remap_page_range(## a)
++# define CLI() cli()
++# define SAVE_FLAGS(x) save_flags(x)
++# define RESTORE_FLAGS(x) restore_flags(x)
++# define MAY_SLEEP() (!in_interrupt())
++# define SMP_NUM_CPUS smp_num_cpus
++#endif
++
++#ifdef pte_offset_map /* rmap-vm or 2.5 */
++#define PTE_OFFSET(pmd, address, pte) \
++ { \
++ pte_t *pPTE; \
++ pPTE = pte_offset_map(pmd, address); \
++ pte = *pPTE; \
++ pte_unmap(pPTE); \
++ }
++#else
++#ifdef pte_offset_atomic /* aa-vm */
++#define PTE_OFFSET(pmd, address, pte) \
++ { \
++ pte_t *pPTE; \
++ pPTE = pte_offset_atomic(pmd, address); \
++ pte = *pPTE; \
++ pte_kunmap(pPTE); \
++ }
++#else /* !pte_offset_atomic */
++#define PTE_OFFSET(pmd, address, pte) (pte = *pte_offset(pmd, address))
++#endif /* pte_offset_atomic */
++#endif /* pte_offset_map */
+
+ #define NV_PAGE_ALIGN(addr) ( ((addr) + PAGE_SIZE - 1) / PAGE_SIZE)
+ #define NV_MASK_OFFSET(addr) ( (addr) & (PAGE_SIZE - 1) )
+@@ -208,7 +235,8 @@
+ static inline int NV_IRQL_IS_RAISED()
+ {
+ unsigned long int eflags;
+- __save_flags(eflags);
++ /* FIXME: no __save_flags directive under 2.5 */
++ SAVE_FLAGS(eflags);
+ return !(eflags & NV_CPU_INTERRUPT_FLAGS_BIT);
+ }
+
+@@ -265,17 +293,16 @@
+
+ nv_alloc_t *alloc_queue;
+
+- // bottom half interrupt handler info; per device
+- /* keep track of any pending bottom-halves */
+- struct tq_struct *bh;
++ /* tasklets */
++ struct tasklet_struct tl;
++ wait_queue_head_t wq;
++
++ /* bh counter */
+ atomic_t bh_count;
+
+ U032 vblank_notifier;
+ U032 waiting_for_vblank;
+
+- /* queue for for NV's OS events */
+- void *event_queue;
+-
+ /* get a timer callback every second */
+ struct timer_list rc_timer;
+
+diff -ru NVIDIA_kernel-1.0-4191/nv.c NVIDIA_kernel-1.0-4191-2.5-tl/nv.c
+--- NVIDIA_kernel-1.0-4191/nv.c 2002-12-09 21:27:15.000000000 +0100
++++ NVIDIA_kernel-1.0-4191-2.5-tl/nv.c 2002-12-17 19:58:32.000000000 +0100
+@@ -36,24 +36,12 @@
+ // keep track of opened clients and their process id so they
+ // can be free'd up on abnormal close
+ nv_client_t nv_clients[NV_MAX_CLIENTS];
+-struct tq_struct nv_bottom_halves[NV_MAX_CLIENTS];
+-
+
+ #ifdef CONFIG_DEVFS_FS
+ devfs_handle_t nv_dev_handle[NV_MAX_DEVICES];
+ devfs_handle_t nv_ctl_handle;
+ #endif
+
+-/*
+- * pick apart our minor device number
+- * low 3 bits is NV device
+- * if 255, then its the control device
+- */
+-
+-#define NV_DEVICE_NUMBER(_minor) ((_minor) & 0x0f)
+-#define NV_DEVICE_IS_CONTROL_DEVICE(_minor) \
+- (((_minor) & 0xFF) == 0xFF)
+-
+ // #define NV_DBG_MEM 1
+ #undef NV_DBG_MEM
+
+@@ -104,10 +92,6 @@
+ *** EXPORTS to Linux Kernel
+ ***/
+
+-/* linux module interface functions (called by linux kernel) */
+-int init_module(void);
+-void cleanup_module(void);
+-
+ /* nv_kern_ functions, interfaces used by linux kernel */
+ void nv_kern_vma_open(struct vm_area_struct *vma);
+ void nv_kern_vma_release(struct vm_area_struct *vma);
+@@ -142,6 +126,9 @@
+ /* character driver entry points */
+
+ static struct file_operations nv_fops = {
++#ifdef KERNEL_2_5
++ owner: THIS_MODULE,
++#endif
+ poll: nv_kern_poll,
+ ioctl: nv_kern_ioctl,
+ mmap: nv_kern_mmap,
+@@ -565,12 +552,7 @@
+ nv_linux_state_t *nvl;
+ nv_linux_state_t *nv_max_devices;
+
+-#if defined (KERNEL_2_2)
+- proc[DRIVER] = create_proc_entry("driver", flags, &proc_root);
+-#else
+ proc[DRIVER] = proc_root_driver;
+-#endif
+-
+ proc[NVIDIA] = create_proc_entry("nvidia", flags, proc[DRIVER]);
+ proc[AGP] = create_proc_entry("agp", flags, proc[NVIDIA]);
+ proc[CARDS] = create_proc_entry("cards", flags, proc[NVIDIA]);
+@@ -623,14 +605,9 @@
+ static void nvos_proc_remove(void)
+ {
+ #ifdef CONFIG_PROC_FS
+-#if defined (KERNEL_2_2)
+- remove_proc_entry("driver", &proc_root);
+- remove_proc_entry("nv", &proc_root);
+-#else
+ remove_proc_entry("nvidia", proc_root_driver);
+ remove_proc_entry("nv", &proc_root);
+ #endif
+-#endif
+ }
+
+ /*
+@@ -717,12 +694,10 @@
+ /***
+ *** EXPORTS to Linux Kernel
+ ***/
+-
+-int init_module(void)
++static int __init nvidia_module_init(void)
+ {
+- nv_linux_state_t *nvl;
+ int rc;
+- int num_devices;
++ int num_devices, i;
+
+ memset(nv_linux_devices, 0, sizeof(nv_linux_devices));
+ num_devices = nvos_probe_devices();
+@@ -734,14 +709,14 @@
+
+ nv_printf(NV_DBG_ERRORS, "nvidia: loading %s\n", pNVRM_ID);
+
+-#ifdef CONFIG_DEVFS_FS
++#if defined (CONFIG_DEVFS_FS) && !defined (KERNEL_2_5)
+ rc = devfs_register_chrdev(nv_major, "nvidia", &nv_fops);
+ #else
+ rc = register_chrdev(nv_major, "nvidia", &nv_fops);
+ #endif
+
+ if (rc < 0) {
+- nv_printf(NV_DBG_ERRORS, "init_module: register failed\n");
++ nv_printf(NV_DBG_ERRORS, "nvidia_module_init: register failed\n");
+ return rc;
+ }
+
+@@ -749,7 +724,6 @@
+ memset(nv_dev_handle, 0, sizeof(devfs_handle_t) * NV_MAX_DEVICES);
+ do {
+ char name[10];
+- int i;
+
+ nv_ctl_handle = devfs_register(NULL, "nvidiactl",
+ DEVFS_FL_DEFAULT, nv_major, 255,
+@@ -766,23 +740,30 @@
+ } while(0);
+ #endif
+
+- nv_printf(NV_DBG_INFO, "init_module: major number %d\n", nv_major);
++ nv_printf(NV_DBG_INFO, "nvidia_module_init: major number %d\n", nv_major);
+
+- // init all the bottom half structures
+- for (nvl = nv_linux_devices; nvl < nv_linux_devices + NV_MAX_DEVICES; nvl++)
+- {
+- nvl->bh = &nv_bottom_halves[nvl - nv_linux_devices];
+- nvl->bh->routine = nv_kern_bh;
+- nvl->bh->data = (void *) nvl;
+- nvl->bh->sync = 0;
++ /* instantiate tasklets */
++ for (i = 0; i < NV_MAX_DEVICES; i++) {
++ /*
++ * We keep one tasklet per card to avoid latency issues with more
++ * than one device, no two instances of a single tasklet are ever
++ * executed concurrently.
++ */
++ atomic_set(&nv_linux_devices[i].tl.count, 1);
++
++ /*
++ * Initialize the event queue for this device. This only needs to
++ * happen once for every device.
++ */
++ init_waitqueue_head(&nv_linux_devices[i].wq);
+ }
+
+ // init the nvidia control device
+ {
+ nv_state_t *nv_ctl = NV_STATE_PTR(&nv_ctl_device);
+- nv_ctl_device.event_queue = NULL;
+ nv_ctl->os_state = (void *) &nv_ctl_device;
+ nv_lock_init_locks(nv_ctl);
++ init_waitqueue_head(&nv_ctl_device.wq);
+ }
+
+ #ifdef CONFIG_PM
+@@ -810,7 +791,7 @@
+ return 0;
+
+ failed:
+-#ifdef CONFIG_DEVFS_FS
++#if defined (CONFIG_DEVFS_FS) && !defined (KERNEL_2_5)
+ devfs_unregister_chrdev(nv_major, "nvidia");
+ #else
+ unregister_chrdev(nv_major, "nvidia");
+@@ -818,7 +799,7 @@
+ return rc;
+ }
+
+-void cleanup_module(void)
++static void __exit nvidia_module_exit(void)
+ {
+ int rc;
+ nv_linux_state_t *nvl;
+@@ -827,7 +808,7 @@
+ /* remove /proc/driver/nvidia */
+ nvos_proc_remove();
+
+- nv_printf(NV_DBG_INFO, "cleanup_module\n");
++ nv_printf(NV_DBG_INFO, "nvidia_module_exit\n");
+
+ #ifdef CONFIG_PM
+ /* XXX PM egads, is this the right place to do this? */
+@@ -850,20 +831,20 @@
+ continue;
+
+ nv_printf(NV_DBG_ERRORS,
+- "still have vm que at cleanup_module(): 0x%x to 0x%x\n",
++ "still have vm que at nvidia_module_exit(): 0x%x to 0x%x\n",
+ nvl->alloc_queue->vma->vm_start,
+ nvl->alloc_queue->vma->vm_end);
+ }
+ }
+
+-#ifdef CONFIG_DEVFS_FS
++#if defined (CONFIG_DEVFS_FS) && !defined (KERNEL_2_5)
+ rc = devfs_unregister_chrdev(nv_major, "nvidia");
+ #else
+ rc = unregister_chrdev(nv_major, "nvidia");
+ #endif
+
+ if (rc < 0) {
+- nv_printf(NV_DBG_ERRORS, "cleanup_module: unregister nv failed\n");
++ nv_printf(NV_DBG_ERRORS, "nvidia_module_exit: unregister nv failed\n");
+ }
+
+ #ifdef CONFIG_DEVFS_FS
+@@ -877,6 +858,8 @@
+ #endif
+ }
+
++module_init(nvidia_module_init);
++module_exit(nvidia_module_exit);
+
+ /* this is only called when the vmas are duplicated.
+ * this appears to only happen when the process is cloned to create
+@@ -903,7 +886,9 @@
+ nvos_list_page_count(at->page_table, at->num_pages);
+ }
+
++#ifndef KERNEL_2_5
+ MOD_INC_USE_COUNT;
++#endif
+ }
+
+
+@@ -937,18 +922,12 @@
+ }
+ }
+
++#ifndef KERNEL_2_5
+ MOD_DEC_USE_COUNT;
++#endif
+ }
+
+
+-/* at this point, this code just plain won't work with 2.2 kernels.
+- * additionally, only ia64 & the 460GX need a nopage handler, and 2.2 doesn't
+- * work on ia64 anyways. It's expected that at some point other agp chipsets
+- * will work similar to the 460GX (AGP 3.0 spec), so pre-emptively make sure
+- * this works on our standard ia32 driver.
+- */
+-#if !defined(KERNEL_2_2)
+-
+ /* AGP allocations under the 460GX are not mapped to the aperture
+ * addresses by the CPU. This nopage handler will fault on CPU
+ * accesses to AGP memory and map the address to the correct page.
+@@ -1022,14 +1001,11 @@
+
+ return page_ptr;
+ }
+-#endif
+
+ struct vm_operations_struct nv_vm_ops = {
+ nv_kern_vma_open,
+ nv_kern_vma_release, /* "close" */
+-#if !defined(KERNEL_2_2)
+ nv_kern_vma_nopage,
+-#endif
+ };
+
+
+@@ -1060,7 +1036,7 @@
+
+ /* for control device, just jump to its open routine */
+ /* after setting up the private data */
+- if (NV_DEVICE_IS_CONTROL_DEVICE(inode->i_rdev))
++ if (NV_IS_CONTROL_DEVICE(inode->i_rdev))
+ return nv_kern_ctl_open(inode, file);
+
+ /* what device are we talking about? */
+@@ -1071,8 +1047,9 @@
+ goto failed;
+ }
+
+-
++#ifndef KERNEL_2_5
+ MOD_INC_USE_COUNT;
++#endif
+
+ nvl = &nv_linux_devices[devnum];
+ nv = NV_STATE_PTR(nvl);
+@@ -1120,17 +1097,14 @@
+ rc = -EIO;
+ goto failed;
+ }
+-
+-#if !defined (KERNEL_2_2)
+- NV_KMALLOC(nvl->event_queue, sizeof(struct __wait_queue_head));
+- if (nvl->event_queue == NULL)
+- goto failed;
+- memset(nvl->event_queue, 0, sizeof(struct __wait_queue_head));
+-
+- init_waitqueue_head(GET_EVENT_QUEUE(nvl));
+-#else
+- nvl->event_queue = NULL;
+-#endif
++
++ /*
++ * Finalise the tasklet initialisation started in nvidia_module_init and
++ * enable bottom-half processing.
++ */
++ nvl->tl.func = (void *) rm_isr_bh;
++ nvl->tl.data = (unsigned long) nv->pdev;
++ tasklet_enable(&nvl->tl);
+
+ nv->flags |= NV_FLAG_OPEN;
+ }
+@@ -1141,7 +1115,9 @@
+ return rc;
+
+ failed:
++#ifndef KERNEL_2_5
+ MOD_DEC_USE_COUNT;
++#endif
+ nv_unlock_ldata(nv);
+ return rc;
+ }
+@@ -1163,7 +1139,7 @@
+
+ /* for control device, just jump to its open routine */
+ /* after setting up the private data */
+- if (NV_DEVICE_IS_CONTROL_DEVICE(inode->i_rdev))
++ if (NV_IS_CONTROL_DEVICE(inode->i_rdev))
+ return nv_kern_ctl_close(inode, file);
+
+ nv_printf(NV_DBG_INFO, "nv_kern_close on device %d\n", NV_DEVICE_NUMBER(inode->i_rdev));
+@@ -1173,28 +1149,22 @@
+ nv_lock_ldata(nv);
+ if (--nv->usage_count == 0)
+ {
+- int counter = 0;
+-
+- /* turn off interrupts.
+- ** be careful to make sure any pending bottom half gets run
+- ** or disabled before calling rm_shutdown_adapter() since
+- ** it will free up the pdev. This is hard to see on single
+- ** cpu systems, but easy on dual cpu :-)
+- */
++ /*
++ * The usage count for this device has dropped to zero, it can be
++ * safely shut down; the first step is to disable interrupts.
++ */
+ rm_disable_adapter(nv);
+
+- /* give it a moment to allow any bottom half to run */
+-
+-#define MAX_BH_TASKS 10
+- while (NV_ATOMIC_READ(nvl->bh_count) && (counter < MAX_BH_TASKS))
+- {
+- current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(HZ/50);
+- counter++;
+- }
+-
+- /* free the irq, which may block until any pending interrupts */
+- /* are done being processed. */
++ /*
++ * Disable this device's tasklet to make sure that no bottom-half
++ * will run with an undefined device state.
++ */
++ tasklet_disable(&nvl->tl);
++
++ /*
++ * Free the irq, which may block until any pending interrupts
++ * are done being processed.
++ */
+ free_irq(nv->interrupt_line, (void *) nv);
+
+ rm_shutdown_adapter(nv);
+@@ -1214,12 +1184,6 @@
+ }
+ }
+
+-#if !defined (KERNEL_2_2)
+- /* this only needs to be freed on 2.4 and later kernels */
+- NV_KFREE(nvl->event_queue);
+- nvl->event_queue = NULL;
+-#endif
+-
+ /* leave INIT flag alone so we don't reinit every time */
+ nv->flags &= ~(NV_FLAG_OPEN | NV_FLAG_WAITING);
+ }
+@@ -1228,9 +1192,12 @@
+ /* free up our per file private data */
+ if (file->private_data)
+ NV_KFREE(file->private_data);
++
+ file->private_data = (void *) 0;
+
++#ifndef KERNEL_2_5
+ MOD_DEC_USE_COUNT;
++#endif
+
+ return 0;
+ }
+@@ -1267,7 +1234,6 @@
+ * figure out the range and map it in
+ */
+
+-
+ /* NV reg space */
+ if (IS_REG_OFFSET(nv, LINUX_VMA_OFFS(vma), vma->vm_end - vma->vm_start))
+ {
+@@ -1276,7 +1242,8 @@
+ pages = nv->regs->size / PAGE_SIZE;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+- if (remap_page_range(vma->vm_start,
++
++ if (REMAP_PAGE_RANGE(vma->vm_start,
+ LINUX_VMA_OFFS(vma),
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+@@ -1295,7 +1262,8 @@
+ pages = nv->fb->size / PAGE_SIZE;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+- if (remap_page_range(vma->vm_start,
++
++ if (REMAP_PAGE_RANGE(vma->vm_start,
+ LINUX_VMA_OFFS(vma),
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+@@ -1340,9 +1308,13 @@
+ }
+ else
+ {
++#ifdef KERNEL_2_5
++ rm_map_agp_pages(nv, (void **) &vma,
++ at->class, at->priv_data);
++#else
+ rm_map_agp_pages(nv, (void **) &vma->vm_start,
+ at->class, at->priv_data);
+-
++#endif
+ }
+ nvos_list_page_count(at->page_table, at->num_pages);
+ nv_unlock_at(nv);
+@@ -1384,7 +1356,7 @@
+ while (pages--)
+ {
+ page = (unsigned long) at->page_table[i++];
+- if (remap_page_range(start, page, PAGE_SIZE, PAGE_SHARED))
++ if (REMAP_PAGE_RANGE(start, page, PAGE_SIZE, PAGE_SHARED))
+ return -EAGAIN;
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+@@ -1398,8 +1370,10 @@
+
+ vma->vm_file = file;
+
++#ifndef KERNEL_2_5
+ /* just increment usage count, rather than calling vma_open */
+ MOD_INC_USE_COUNT;
++#endif
+
+ return 0;
+ }
+@@ -1425,8 +1399,7 @@
+ return nv_kern_ctl_poll (file, wait);
+
+ // add us to the list
+- poll_wait(file, GET_EVENT_QUEUE(nvl), wait);
+-
++ poll_wait(file, &nvl->wq, wait);
+ nv_lock_ldata(nv);
+
+ // wake the user on any file-specific event, or a general vblank
+@@ -1505,11 +1478,15 @@
+
+ switch (_IOC_NR(cmd))
+ {
++#ifdef KERNEL_2_5
++ /* use rusty's "forced unload" kernel module option */
++#else
+ /* debug tool; zap the module use count so we can unload driver */
+ /* even if it is confused */
+ case _IOC_NR(NV_IOCTL_MODULE_RESET):
+ atomic_set(&__this_module.uc.usecount, 1);
+ break;
++#endif
+
+ /* pass out info about the card */
+ case _IOC_NR(NV_IOCTL_CARD_INFO):
+@@ -1651,8 +1628,7 @@
+ if (need_to_run_bottom_half)
+ {
+ NV_ATOMIC_INC(nvl->bh_count);
+- queue_task(nvl->bh, &tq_immediate);
+- mark_bh(IMMEDIATE_BH);
++ tasklet_schedule(&nvl->tl);
+ }
+ }
+
+@@ -1740,36 +1716,18 @@
+
+ nv_lock_ldata(nv);
+
+-
+ nv->device_number = NV_CONTROL_DEVICE_NUMBER;
+
+ /* save the nv away in file->private_data */
+ NV_HIDE_IN_FILEP(file, nv);
+
++#ifndef KERNEL_2_5
+ MOD_INC_USE_COUNT;
+-
+- /* if this is the first time the control device has been opened,
+- * allocate the wait queue
+- */
+-
+- if (! nvl->event_queue) {
+-
+-#if !defined (KERNEL_2_2)
+- NV_KMALLOC(nvl->event_queue, sizeof(struct __wait_queue_head));
+- if (nvl->event_queue == NULL)
+- return -ENOMEM;
+- memset(nvl->event_queue, 0, sizeof(struct __wait_queue_head));
+-
+- init_waitqueue_head(GET_EVENT_QUEUE(nvl));
+-#else
+- nvl->event_queue = NULL;
+ #endif
+- }
+-
++
+ nv->flags |= NV_FLAG_OPEN + NV_FLAG_CONTROL;
+
+ /* turn off the hotkey occurred bit */
+-
+ nv->flags &= ~NV_FLAG_HOTKEY_OCCURRED;
+
+ nv->usage_count++;
+@@ -1792,16 +1750,10 @@
+ nv_printf(NV_DBG_INFO, "nv_kern_ctl_close\n");
+
+ nv_lock_ldata(nv);
++
+ if (--nv->usage_count == 0)
+- {
+-#if !defined (KERNEL_2_2)
+- nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
+- /* this only needs to be freed on 2.4 and later kernels */
+- NV_KFREE(nvl->event_queue);
+- nvl->event_queue = 0;
+-#endif
+ nv->flags = 0;
+- }
++
+ nv_unlock_ldata(nv);
+
+ rm_free_unused_clients(nv, current->pid, (void *) file);
+@@ -1809,9 +1761,12 @@
+ /* free up our per file private data */
+ if (file->private_data)
+ NV_KFREE(file->private_data);
++
+ file->private_data = (void *) 0;
+
++#ifndef KERNEL_2_5
+ MOD_DEC_USE_COUNT;
++#endif
+
+ return 0;
+ }
+@@ -1837,7 +1792,7 @@
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+- poll_wait(file, GET_EVENT_QUEUE(nvl), wait);
++ poll_wait(file, &nvl->wq, wait);
+
+ nv_lock_ldata(nv);
+
+@@ -1867,7 +1822,7 @@
+ nv_ctl_device.nv_state.flags |= NV_FLAG_HOTKEY_OCCURRED;
+ nv_unlock_ldata(&(nv_ctl_device.nv_state));
+
+- wake_up_interruptible(GET_EVENT_QUEUE(&nv_ctl_device));
++ wake_up_interruptible(&nv_ctl_device.wq);
+ }
+
+ struct host_bridge_t {
+@@ -2217,7 +2172,7 @@
+ {
+ pgd_t *pg_dir;
+ pmd_t *pg_mid_dir;
+- pte_t *pte__, pte;
++ pte_t pte;
+
+ #if defined(NVCPU_IA64)
+ if (address > __IA64_UNCACHED_OFFSET)
+@@ -2241,14 +2196,7 @@
+ if (pmd_none(*pg_mid_dir))
+ goto failed;
+
+-#if defined (pte_offset_atomic)
+- pte__ = pte_offset_atomic(pg_mid_dir, address);
+- pte = *pte__;
+- pte_kunmap(pte__);
+-#else
+- pte__ = NULL;
+- pte = *pte_offset(pg_mid_dir, address);
+-#endif
++ PTE_OFFSET(pg_mid_dir, address, pte);
+
+ if (!pte_present(pte))
+ goto failed;
+@@ -2589,7 +2537,7 @@
+
+ nvfp->any_fired_notifiers++;
+
+- wake_up_interruptible(GET_EVENT_QUEUE(nvl));
++ wake_up_interruptible(&nvl->wq);
+ }
+
+ /*
+@@ -2606,7 +2554,7 @@
+ if (nvl->waiting_for_vblank)
+ nvl->vblank_notifier++;
+
+- wake_up_interruptible(GET_EVENT_QUEUE(nvl));
++ wake_up_interruptible(&nvl->wq);
+ }
+
+
+@@ -2657,12 +2605,8 @@
+ if ( (NV_AGP_DISABLED(nv)) && (config & NVOS_AGP_CONFIG_NVAGP) )
+ {
+ /* make sure the user does not have agpgart loaded */
+-#if !defined (KERNEL_2_2)
+ if (inter_module_get("drm_agp")) {
+ inter_module_put("drm_agp");
+-#else
+- if (GET_MODULE_SYMBOL(0, __MODULE_STRING(agp_enable))) {
+-#endif
+ nv_printf(NV_DBG_WARNINGS, "NVRM: not using NVAGP, AGPGART is loaded!!\n");
+ } else
+ status = rm_init_agp(nv);
+diff -ru NVIDIA_kernel-1.0-4191/os-agp.c NVIDIA_kernel-1.0-4191-2.5-tl/os-agp.c
+--- NVIDIA_kernel-1.0-4191/os-agp.c 2002-12-09 21:27:15.000000000 +0100
++++ NVIDIA_kernel-1.0-4191-2.5-tl/os-agp.c 2002-12-17 19:58:32.000000000 +0100
+@@ -48,7 +48,11 @@
+ typedef struct {
+ int (*backend_acquire)(void);
+ void (*backend_release)(void);
++#ifdef KERNEL_2_5
++ int (*copy_info)(agp_kern_info *);
++#else
+ void (*copy_info)(agp_kern_info *);
++#endif
+ agp_memory * (*allocate_memory)(size_t, unsigned int);
+ void (*free_memory)(agp_memory *);
+ int (*bind_memory)(agp_memory *, off_t);
+@@ -59,30 +63,7 @@
+ agp_operations_struct agp_ops;
+ agp_kern_info agpinfo;
+ agp_gart gart;
+-#if !defined (KERNEL_2_2)
+ const drm_agp_t *drm_agp_p;
+-#endif
+-
+-#if defined (KERNEL_2_2)
+- #define GET_AGPGART_SYMBOL(sym, sym_string) \
+- sym = (void*) GET_MODULE_SYMBOL(0, sym_string); \
+- if (sym == NULL) \
+- { \
+- nv_printf(NV_DBG_ERRORS, \
+- "NVRM: AGPGART: unable to retrieve symbol %s\n", \
+- sym_string); \
+- return 1; \
+- }
+-
+- #define AGP_BACKEND_ACQUIRE_SYM __MODULE_STRING(agp_backend_acquire)
+- #define AGP_BACKEND_RELEASE_SYM __MODULE_STRING(agp_backend_release)
+- #define AGP_COPY_INFO_SYM __MODULE_STRING(agp_copy_info)
+- #define AGP_ALLOCATE_MEMORY_SYM __MODULE_STRING(agp_allocate_memory)
+- #define AGP_FREE_MEMORY_SYM __MODULE_STRING(agp_free_memory)
+- #define AGP_BIND_MEMORY_SYM __MODULE_STRING(agp_bind_memory)
+- #define AGP_UNBIND_MEMORY_SYM __MODULE_STRING(agp_unbind_memory)
+- #define AGP_ENABLE_SYM __MODULE_STRING(agp_enable)
+-#endif
+
+ #if defined(CONFIG_MTRR)
+ #define MTRR_DEL(gart) if ((gart).mtrr > 0) mtrr_del((gart).mtrr, 0, 0);
+@@ -92,6 +73,7 @@
+
+ #endif /* AGPGART */
+
++#ifndef AGPGART
+ BOOL KernInitAGP(
+ nv_state_t *nv,
+ VOID **ap_phys_base,
+@@ -99,19 +81,24 @@
+ U032 *apsize
+ )
+ {
+-#ifndef AGPGART
+ return 1;
++}
+ #else
++BOOL KernInitAGP(
++ nv_state_t *nv,
++ VOID **ap_phys_base,
++ VOID **ap_mapped_base,
++ U032 *apsize
++)
++{
+ U032 agp_rate;
+ U032 agp_sba;
+ U032 agp_fw;
+- char* chipset;
+ VOID *bitmap;
+ U032 bitmap_size;
+
+ memset( (void *) &gart, 0, sizeof(agp_gart));
+
+-#if !defined (KERNEL_2_2)
+ if (!(drm_agp_p = inter_module_get_request("drm_agp", "agpgart")))
+ {
+ nv_printf(NV_DBG_ERRORS,
+@@ -133,25 +120,10 @@
+ // harmless (backend_acquire would have already failed and caused us to
+ // bail), so cast the function pointer to avoid compiler warnings.
+ // we may need to revisit this in the future.
+- agp_ops.copy_info = (void (*)(agp_kern_info *)) drm_agp_p->copy_info;
+-
++#ifdef KERNEL_2_5
++ agp_ops.copy_info = (int (*)(agp_kern_info *)) drm_agp_p->copy_info;
+ #else
+-#if defined(CONFIG_KMOD)
+- if ( request_module("agpgart") )
+- {
+- nv_printf(NV_DBG_ERRORS, "NVRM: AGPGART: not loading agpgart.o\n");
+- return 1;
+- }
+-#endif
+-
+- GET_AGPGART_SYMBOL(agp_ops.backend_acquire, AGP_BACKEND_ACQUIRE_SYM);
+- GET_AGPGART_SYMBOL(agp_ops.backend_release, AGP_BACKEND_RELEASE_SYM);
+- GET_AGPGART_SYMBOL(agp_ops.copy_info, AGP_COPY_INFO_SYM);
+- GET_AGPGART_SYMBOL(agp_ops.allocate_memory, AGP_ALLOCATE_MEMORY_SYM);
+- GET_AGPGART_SYMBOL(agp_ops.free_memory, AGP_FREE_MEMORY_SYM);
+- GET_AGPGART_SYMBOL(agp_ops.bind_memory, AGP_BIND_MEMORY_SYM);
+- GET_AGPGART_SYMBOL(agp_ops.unbind_memory, AGP_UNBIND_MEMORY_SYM);
+- GET_AGPGART_SYMBOL(agp_ops.enable, AGP_ENABLE_SYM);
++ agp_ops.copy_info = (void (*)(agp_kern_info *)) drm_agp_p->copy_info;
+ #endif
+
+ /* NOTE: from here down, return an error code of '-1'
+@@ -177,45 +149,15 @@
+ if (rm_read_registry_dword(nv, "NVreg", "EnableAGPFW", &agp_fw) == RM_ERROR)
+ agp_fw = 1;
+ agp_fw &= 0x00000001;
+-
+- (*(agp_ops.copy_info))(&agpinfo);
+
+- switch ( agpinfo.chipset )
+- {
+- case INTEL_GENERIC: chipset = "Intel"; break;
+- case INTEL_LX: chipset = "Intel 440LX"; break;
+- case INTEL_BX: chipset = "Intel 440BX"; break;
+- case INTEL_GX: chipset = "Intel 440GX"; break;
+- case INTEL_I810: chipset = "Intel i810"; break;
+- case INTEL_I840: chipset = "Intel i840"; break;
+-#if !defined (KERNEL_2_2)
+- case INTEL_I815: chipset = "Intel i815"; break;
+-#if !defined(__rh_config_h__)
+- case INTEL_I850: chipset = "Intel i850"; break;
+-#endif
+-#endif
+-#if defined(NVCPU_IA64)
+- case INTEL_460GX: chipset = "Intel 460GX"; break;
+-#endif
+- case VIA_GENERIC: chipset = "VIA"; break;
+- case VIA_VP3: chipset = "VIA VP3"; break;
+- case VIA_MVP3: chipset = "VIA MVP3"; break;
+- case VIA_MVP4: chipset = "VIA MVP4"; break;
+-#if !defined (KERNEL_2_2)
+- case VIA_APOLLO_KX133: chipset = "VIA Apollo KX133"; break;
+- case VIA_APOLLO_KT133: chipset = "VIA Apollo KT133"; break;
+-#endif
+- case VIA_APOLLO_PRO: chipset = "VIA Apollo Pro"; break;
+- case SIS_GENERIC: chipset = "SiS"; break;
+- case AMD_GENERIC: chipset = "AMD"; break;
+- case AMD_IRONGATE: chipset = "AMD Irongate"; break;
+- case ALI_M1541: chipset = "ALi M1541"; break;
+- case ALI_GENERIC: chipset = "ALi"; break;
+- case NOT_SUPPORTED: chipset = "unsupported"; break;
+- default: chipset = "unknown";
++#ifdef KERNEL_2_5
++ if (agp_ops.copy_info(&agpinfo) != 0) {
++ printk("nvidia: chipset not supported by agpgart.o\n");
++ agp_ops.backend_release();
+ }
+-
+- nv_printf(NV_DBG_SETUP, "NVRM: AGPGART: %s chipset\n", chipset);
++#else
++ (*(agp_ops.copy_info))(&agpinfo);
++#endif
+
+ #ifdef CONFIG_MTRR
+ if ((gart.mtrr = mtrr_add(agpinfo.aper_base,
+@@ -306,8 +248,8 @@
+ gart.ready = 1;
+
+ return 0;
+-#endif /* AGPGART */
+ }
++#endif /* AGPGART */
+
+ BOOL KernTeardownAGP(
+ nv_state_t *nv
+@@ -334,9 +276,7 @@
+
+ (*(agp_ops.backend_release))();
+
+-#if !defined (KERNEL_2_2)
+ inter_module_put("drm_agp");
+-#endif
+
+ if (rm_clear_agp_bitmap(nv, &bitmap))
+ {
+@@ -450,13 +390,9 @@
+
+ agp_addr = agpinfo.aper_base + (agp_data->offset << PAGE_SHIFT);
+
+- err = remap_page_range(vma->vm_start, (size_t) agp_addr,
++ err = REMAP_PAGE_RANGE(vma->vm_start, (size_t) agp_addr,
+ agp_data->num_pages << PAGE_SHIFT,
+-#if defined(NVCPU_IA64)
+ vma->vm_page_prot);
+-#else
+- PAGE_SHARED);
+-#endif
+
+ if (err)
+ {
+@@ -480,9 +416,6 @@
+ #endif /* AGPGART */
+ }
+
+-
+-#if !defined(KERNEL_2_2)
+-
+ RM_STATUS
+ KernMapAGPNopage(
+ VOID *address,
+@@ -529,9 +462,6 @@
+ #endif
+ }
+
+-#endif /* !defined(KERNEL_2_2) */
+-
+-
+ RM_STATUS KernFreeAGPPages(
+ nv_state_t *nv,
+ VOID **pAddress,
+@@ -567,5 +497,3 @@
+ return RM_ERROR;
+ #endif
+ }
+-
+-
+diff -ru NVIDIA_kernel-1.0-4191/os-interface.c NVIDIA_kernel-1.0-4191-2.5-tl/os-interface.c
+--- NVIDIA_kernel-1.0-4191/os-interface.c 2002-12-09 21:27:15.000000000 +0100
++++ NVIDIA_kernel-1.0-4191-2.5-tl/os-interface.c 2002-12-17 19:58:32.000000000 +0100
+@@ -31,7 +31,7 @@
+ PHWINFO pDev
+ )
+ {
+- return suser();
++ return SUSER();
+ }
+
+ U032 os_get_page_size(VOID)
+@@ -184,6 +184,11 @@
+ U032 size
+ )
+ {
++ /*
++ * XXX This needs to be !MAY_SLEEP() rather than in_interrupt(); this
++ * requires that quite a bit of locking be rearranged, however, which
++ * is why I'll leave this alone for now.
++ */
+ if (in_interrupt()) {
+ if (size <= KMALLOC_LIMIT) {
+ /*
+@@ -365,7 +370,7 @@
+ if (in_irq() && MilliSeconds > NV_MAX_ISR_MDELAY)
+ return RM_ERROR;
+
+- if (in_interrupt())
++ if (!MAY_SLEEP())
+ {
+ mdelay(MilliSeconds);
+ return RM_OK;
+@@ -669,14 +674,14 @@
+
+ ULONG os_cli(ULONG flags)
+ {
+- save_flags(flags);
+- cli();
++ SAVE_FLAGS(flags);
++ CLI();
+ return flags;
+ }
+
+ ULONG os_sti(ULONG flags)
+ {
+- restore_flags(flags);
++ RESTORE_FLAGS(flags);
+ return flags;
+ }
+
+@@ -796,29 +801,12 @@
+ {
+ void *vaddr;
+
+- if (in_interrupt())
+- {
+- nv_printf(NV_DBG_ERRORS, "trying to map 0x%x to kernel space in interrupt!\n", start);
+- os_dbg_breakpoint();
+- return NULL;
+- }
+-
+ if (mode == NV_MEMORY_DEFAULT) {
+ vaddr = ioremap(start, size_bytes);
+ } else {
+ vaddr = ioremap_nocache(start, size_bytes);
+ }
+
+-#if defined (KERNEL_2_2)
+- if ((vaddr == NULL)) // && (mode == NV_MEMORY_DEFAULT))
+- {
+- unsigned long map_nr = MAP_NR(__va(start));
+- if (map_nr < max_mapnr) {
+- vaddr = __va(start);
+- }
+- }
+-#endif
+-
+ #ifdef DEBUG
+ if (mode == NV_MEMORY_WRITECOMBINED) {
+ nv_printf(NV_DBG_ERRORS,
+@@ -839,16 +827,7 @@
+ U032 size_bytes
+ )
+ {
+-#if defined (KERNEL_2_2)
+- if (MAP_NR(addr) < max_mapnr) {
+- // if we didn't want the memory cached, this isn't necessary
+- // but we shouldn't be in a timing critical piece of code.
+- asm volatile("wbinvd":::"memory");
+- } else
+-#endif
+- {
+- iounmap(addr);
+- }
++ iounmap(addr);
+ }
+
+ VOID* os_map_user_space(
+@@ -858,24 +837,7 @@
+ U032 mode
+ )
+ {
+- int err;
+- unsigned long paddr;
+- void *uaddr = NULL;
+-
+- paddr = nv_get_phys_address((unsigned long)kaddr);
+-
+- uaddr = *priv;
+-
+- /* finally, let's do it! */
+- err = remap_page_range( (size_t) uaddr, paddr, size_bytes,
+- PAGE_SHARED);
+-
+- if (err != 0)
+- {
+- return (void *) NULL;
+- }
+-
+- return uaddr;
++ return NULL;
+ }
+
+ VOID os_unmap_user_space(
+@@ -883,7 +845,7 @@
+ VOID *priv
+ )
+ {
+- // I don't think I need to do anything here...
++ return;
+ }
+
+ VOID* os_map_io_space(
+@@ -894,24 +856,17 @@
+ U032 mode
+ )
+ {
+- int err;
+- void *uaddr = NULL;
++ struct vm_area_struct *vma;
+
+- if (!user)
++ if (user == 0 || priv == NULL || *priv == NULL)
+ return NULL;
+
+- uaddr = *priv;
++ vma = (struct vm_area_struct *) *priv;
+
+- /* finally, let's do it! */
+- err = remap_page_range( (size_t) uaddr, (size_t) start, size_bytes,
+- PAGE_SHARED);
+-
+- if (err != 0)
+- {
+- return (void *) NULL;
+- }
++ if (REMAP_PAGE_RANGE(vma->vm_start, start & PAGE_MASK, size_bytes, PAGE_SHARED))
++ return NULL;
+
+- return uaddr;
++ return (void *)(NV_UINTPTR_T) vma->vm_start;
+ }
+
+ VOID os_unmap_io_space(
+@@ -973,7 +928,7 @@
+
+ U032 os_get_cpu_count()
+ {
+- return smp_num_cpus;
++ return SMP_NUM_CPUS;
+ }
+
+
+@@ -1036,12 +991,10 @@
+ if (sgi_funcs.add_barrier == NULL)
+ {
+ #if defined(TESTING_SWAP)
+-#if !defined (KERNEL_2_2)
+ inter_module_register(ADD_BARRIER_FUNC, THIS_MODULE, sgitest_add_barrier);
+ inter_module_register(REMOVE_BARRIER_FUNC, THIS_MODULE, sgitest_remove_barrier);
+ inter_module_register(SWAP_READY_FUNC, THIS_MODULE, sgitest_swap_ready);
+ #endif
+-#endif
+ sgi_funcs.add_barrier = GET_MODULE_SYMBOL(0, ADD_BARRIER_FUNC);
+ sgi_funcs.remove_barrier = GET_MODULE_SYMBOL(0, REMOVE_BARRIER_FUNC);
+ sgi_funcs.swap_ready = GET_MODULE_SYMBOL(0, SWAP_READY_FUNC);
diff --git a/media-video/nvidia-kernel/nvidia-kernel-1.0.4191-r1.ebuild b/media-video/nvidia-kernel/nvidia-kernel-1.0.4191-r1.ebuild
index 852d5e288665..f6a7db601969 100644
--- a/media-video/nvidia-kernel/nvidia-kernel-1.0.4191-r1.ebuild
+++ b/media-video/nvidia-kernel/nvidia-kernel-1.0.4191-r1.ebuild
@@ -1,6 +1,6 @@
# Copyright 1999-2002 Gentoo Technologies, Inc.
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo-x86/media-video/nvidia-kernel/nvidia-kernel-1.0.4191-r1.ebuild,v 1.1 2002/12/18 21:15:07 styx Exp $
+# $Header: /var/cvsroot/gentoo-x86/media-video/nvidia-kernel/nvidia-kernel-1.0.4191-r1.ebuild,v 1.2 2002/12/23 02:31:05 azarah Exp $
inherit eutils
@@ -52,20 +52,15 @@ src_unpack() {
# these to us, and being so helpful to select which to use.
# This should close bug #9704.
-# local KV_major="`uname -r | cut -d. -f1`"
-# local KV_minor="`uname -r | cut -d. -f2`"
-#
-# cd ${S}
-# if [ "${KV_major}" -eq 2 -a "${KV_minor}" -eq 5 ]
-# then
-# EPATCH_SINGLE_MSG="Applying tasklet patch for kernel 2.5..." \
-# epatch ${FILESDIR}/${NV_PACKAGE}-2.5-tl.diff
-# EPATCH_SINGLE_MSG="Applying page_alloc.c patch..." \
-# epatch ${FILESDIR}/${NV_PACKAGE}-2.5-tl-pa.diff
-# else
-# EPATCH_SINGLE_MSG="Applying page_alloc.c patch..." \
-# epatch ${FILESDIR}/${NV_PACKAGE}-pa.diff
-# fi
+ local KV_major="`uname -r | cut -d. -f1`"
+ local KV_minor="`uname -r | cut -d. -f2`"
+
+ cd ${S}
+ if [ "${KV_major}" -eq 2 -a "${KV_minor}" -eq 5 ]
+ then
+ EPATCH_SINGLE_MSG="Applying tasklet patch for kernel 2.5..." \
+ epatch ${FILESDIR}/${NV_PACKAGE}-2.5-tl.diff
+ fi
# This is a minor patch to make it work with rmap enabled kernels
EPATCH_SINGLE_MSG="Applying rmap compat patch for kernel 2.4..."