void-packages/srcpkgs/nvidia340/files/4.10.0_kernel.patch

112 lines
3.3 KiB
Diff

--- kernel/nv-linux.h
+++ kernel/nv-linux.h
@@ -2082,6 +2082,8 @@ static inline NvU64 nv_node_end_pfn(int nid)
* 2016 Dec 14:5b56d49fc31dbb0487e14ead790fc81ca9fb2c99
*/
+#include <linux/version.h>
+
#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
#if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS)
#define NV_GET_USER_PAGES get_user_pages
@@ -2129,8 +2131,13 @@ static inline NvU64 nv_node_end_pfn(int nid)
#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
pages, vmas);
+#else
+ return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
+ pages, vmas, NULL);
+#endif
#endif
--- kernel/nv-pat.c
+++ kernel/nv-pat.c
@@ -203,6 +203,7 @@ void nv_disable_pat_support(void)
}
#if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
static int
nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
@@ -234,6 +235,34 @@ static struct notifier_block nv_hotcpu_nfb = {
.notifier_call = nvidia_cpu_callback,
.priority = 0
};
+#else
+static int nvidia_cpu_online(unsigned int hcpu)
+{
+ unsigned int cpu = get_cpu();
+ if (cpu == hcpu)
+ nv_setup_pat_entries(NULL);
+ else
+ NV_SMP_CALL_FUNCTION(nv_setup_pat_entries, (void *)(long int)hcpu, 1);
+
+ put_cpu();
+
+ return 0;
+}
+
+static int nvidia_cpu_down_prep(unsigned int hcpu)
+{
+ unsigned int cpu = get_cpu();
+ if (cpu == hcpu)
+ nv_restore_pat_entries(NULL);
+ else
+ NV_SMP_CALL_FUNCTION(nv_restore_pat_entries, (void *)(long int)hcpu, 1);
+
+ put_cpu();
+
+ return 0;
+}
+#endif
+
#endif
int nv_init_pat_support(nv_stack_t *sp)
@@ -255,7 +284,14 @@ int nv_init_pat_support(nv_stack_t *sp)
#if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
if (register_hotcpu_notifier(&nv_hotcpu_nfb) != 0)
+#else
+ if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "gpu/nvidia:online",
+ nvidia_cpu_online,
+ nvidia_cpu_down_prep) != 0)
+#endif
{
nv_disable_pat_support();
nv_printf(NV_DBG_ERRORS,
@@ -280,7 +316,11 @@ void nv_teardown_pat_support(void)
{
nv_disable_pat_support();
#if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
unregister_hotcpu_notifier(&nv_hotcpu_nfb);
+#else
+ cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
+#endif
#endif
}
}
--- kernel/uvm/nvidia_uvm_lite.c
+++ kernel/uvm/nvidia_uvm_lite.c
@@ -820,7 +820,11 @@ done:
#if defined(NV_VM_OPERATIONS_STRUCT_HAS_FAULT)
int _fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
unsigned long vaddr = (unsigned long)vmf->virtual_address;
+#else
+ unsigned long vaddr = (unsigned long)vmf->address;
+#endif
struct page *page = NULL;
int retval;