summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'media-plugins/vdr-analogtv/files/vdr-analogtv-0.9.37-gcc-3.4.diff')
-rw-r--r--media-plugins/vdr-analogtv/files/vdr-analogtv-0.9.37-gcc-3.4.diff86
1 files changed, 86 insertions, 0 deletions
diff --git a/media-plugins/vdr-analogtv/files/vdr-analogtv-0.9.37-gcc-3.4.diff b/media-plugins/vdr-analogtv/files/vdr-analogtv-0.9.37-gcc-3.4.diff
new file mode 100644
index 000000000000..a7e87770f115
--- /dev/null
+++ b/media-plugins/vdr-analogtv/files/vdr-analogtv-0.9.37-gcc-3.4.diff
@@ -0,0 +1,86 @@
+diff -ru analogtv-0.9.37-orig/memcpy.c analogtv-0.9.37/memcpy.c
+--- analogtv-0.9.37-orig/memcpy.c 2005-12-11 16:51:06.713174250 +0100
++++ analogtv-0.9.37/memcpy.c 2005-12-11 16:51:13.665608750 +0100
+@@ -168,9 +168,11 @@
+ /* SSE note: i tried to move 128 bytes a time instead of 64 but it
+ didn't make any measureable difference. i'm using 64 for the sake of
+ simplicity. [MF] */
+-static void * sse_memcpy(void * to, const void * from, size_t len)
++static void * sse_memcpy(void * into, const void * infrom, size_t len)
+ {
+ void *retval;
++ unsigned char* to=(unsigned char*)into;
++ unsigned char* from=(unsigned char*)infrom;
+ size_t i;
+ retval = to;
+
+@@ -211,8 +213,8 @@
+ "movntps %%xmm2, 32(%1)\n"
+ "movntps %%xmm3, 48(%1)\n"
+ :: "r" (from), "r" (to) : "memory");
+- ((const unsigned char *)from)+=64;
+- ((unsigned char *)to)+=64;
++ *from+=64;
++ *to+=64;
+ }
+ else
+ /*
+@@ -233,8 +235,8 @@
+ "movntps %%xmm2, 32(%1)\n"
+ "movntps %%xmm3, 48(%1)\n"
+ :: "r" (from), "r" (to) : "memory");
+- ((const unsigned char *)from)+=64;
+- ((unsigned char *)to)+=64;
++ *from+=64;
++ *to+=64;
+ }
+ /* since movntq is weakly-ordered, a "sfence"
+ * is needed to become ordered again. */
+@@ -249,9 +251,11 @@
+ return retval;
+ }
+
+-static void * mmx_memcpy(void * to, const void * from, size_t len)
++static void * mmx_memcpy(void * into, const void * infrom, size_t len)
+ {
+ void *retval;
++ unsigned char* to=(unsigned char*)into;
++ unsigned char* from=(unsigned char*)infrom;
+ size_t i;
+ retval = to;
+
+@@ -288,8 +292,8 @@
+ "movq %%mm6, 48(%1)\n"
+ "movq %%mm7, 56(%1)\n"
+ :: "r" (from), "r" (to) : "memory");
+- ((const unsigned char *)from)+=64;
+- ((unsigned char *)to)+=64;
++ *from+=64;
++ *to+=64;
+ }
+ __asm__ __volatile__ ("emms":::"memory");
+ }
+@@ -300,9 +304,11 @@
+ return retval;
+ }
+
+-void * mmx2_memcpy(void * to, const void * from, size_t len)
++void * mmx2_memcpy(void * into, const void * infrom, size_t len)
+ {
+ void *retval;
++ unsigned char* to=(unsigned char*)into;
++ unsigned char* from=(unsigned char*)infrom;
+ size_t i;
+ retval = to;
+
+@@ -349,8 +355,8 @@
+ "movntq %%mm6, 48(%1)\n"
+ "movntq %%mm7, 56(%1)\n"
+ :: "r" (from), "r" (to) : "memory");
+- ((const unsigned char *)from)+=64;
+- ((unsigned char *)to)+=64;
++ *from+=64;
++ *to+=64;
+ }
+ /* since movntq is weakly-ordered, a "sfence"
+ * is needed to become ordered again. */