Blob Blame History Raw
From 6eee1beac524b5582a6c6de14d9d35a78c1ece74 Mon Sep 17 00:00:00 2001
From: Andrew <16061801+brada4@users.noreply.github.com>
Date: Sun, 24 Feb 2019 20:41:02 +0200
Subject: [PATCH 2/2] move fix to right place

---
 dgemv_n_microk_piledriver-4.c               | 247 --------------------
 kernel/x86_64/dgemv_n_microk_piledriver-4.c |  98 ++++----
 2 files changed, 49 insertions(+), 296 deletions(-)
 delete mode 100644 dgemv_n_microk_piledriver-4.c

diff --git a/dgemv_n_microk_piledriver-4.c b/dgemv_n_microk_piledriver-4.c
deleted file mode 100644
index 466931b82..000000000
--- a/dgemv_n_microk_piledriver-4.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/***************************************************************************
-Copyright (c) 2014, The OpenBLAS Project
-All rights reserved.
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-1. Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in
-the documentation and/or other materials provided with the
-distribution.
-3. Neither the name of the OpenBLAS project nor the names of
-its contributors may be used to endorse or promote products
-derived from this software without specific prior written permission.
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*****************************************************************************/
-
-
-
-#define HAVE_KERNEL_4x8 1
-static void dgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLONG lda4, FLOAT *alpha) __attribute__ ((noinline));
-
-static void dgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLONG lda4, FLOAT *alpha)
-{
-
-	BLASLONG register i = 0;
-
-	__asm__  __volatile__
-	(
-	"vzeroupper			 \n\t"
-	"vbroadcastsd    (%3), %%ymm12	 \n\t"	// x0 
-	"vbroadcastsd   8(%3), %%ymm13	 \n\t"	// x1 
-	"vbroadcastsd  16(%3), %%ymm14	 \n\t"	// x2 
-	"vbroadcastsd  24(%3), %%ymm15	 \n\t"	// x3 
-	"vbroadcastsd  32(%3), %%ymm0 	 \n\t"	// x4 
-	"vbroadcastsd  40(%3), %%ymm1 	 \n\t"	// x5 
-	"vbroadcastsd  48(%3), %%ymm2 	 \n\t"	// x6 
-	"vbroadcastsd  56(%3), %%ymm3 	 \n\t"	// x7 
-
-	"vbroadcastsd    (%9), %%ymm6 	 \n\t"	// alpha 
-
-        "testq          $0x04, %1                      \n\t"
-        "jz             2f                     \n\t"
-
-	"vmovupd	(%4,%0,8), %%ymm7	       \n\t"	// 4 * y
-	"vxorpd		%%ymm4 , %%ymm4, %%ymm4        \n\t"
-	"vxorpd		%%ymm5 , %%ymm5, %%ymm5        \n\t"
-
-	"vfmadd231pd   (%5,%0,8), %%ymm12, %%ymm4      \n\t" 
-	"vfmadd231pd   (%6,%0,8), %%ymm13, %%ymm5      \n\t" 
-	"vfmadd231pd   (%7,%0,8), %%ymm14, %%ymm4      \n\t" 
-	"vfmadd231pd   (%8,%0,8), %%ymm15, %%ymm5      \n\t" 
-
-	"vfmadd231pd   (%5,%2,8), %%ymm0 , %%ymm4      \n\t" 
-	"vfmadd231pd   (%6,%2,8), %%ymm1 , %%ymm5      \n\t" 
-	"vfmadd231pd   (%7,%2,8), %%ymm2 , %%ymm4      \n\t" 
-	"vfmadd231pd   (%8,%2,8), %%ymm3 , %%ymm5      \n\t" 
-
-	"vaddpd		%%ymm4 , %%ymm5 , %%ymm5       \n\t"
-	"vmulpd		%%ymm6 , %%ymm5 , %%ymm5       \n\t"
-	"vaddpd		%%ymm7 , %%ymm5 , %%ymm5       \n\t"
-
-
-	"vmovupd  %%ymm5,   (%4,%0,8)		       \n\t"	// 4 * y
-
-        "addq		$4 , %2	  	 	       \n\t"
-        "addq		$4 , %0	  	 	       \n\t"
-	"subq	        $4 , %1			       \n\t"		
-
-        "2:                                   \n\t"
-
-        "cmpq           $0, %1                         \n\t"
-        "je             3f                      \n\t"
-
-
-	".align 16				 \n\t"
-	"1:				 \n\t"
-
-	"vxorpd		%%ymm4 , %%ymm4, %%ymm4        \n\t"
-	"vxorpd		%%ymm5 , %%ymm5, %%ymm5        \n\t"
-	"vmovupd	(%4,%0,8), %%ymm8	       \n\t"	// 4 * y
-	"vmovupd      32(%4,%0,8), %%ymm9	       \n\t"	// 4 * y
-
-	"vfmadd231pd   (%5,%0,8), %%ymm12, %%ymm4      \n\t" 
-	"vfmadd231pd 32(%5,%0,8), %%ymm12, %%ymm5      \n\t" 
-	"vfmadd231pd   (%6,%0,8), %%ymm13, %%ymm4      \n\t" 
-	"vfmadd231pd 32(%6,%0,8), %%ymm13, %%ymm5      \n\t" 
-	"vfmadd231pd   (%7,%0,8), %%ymm14, %%ymm4      \n\t" 
-	"vfmadd231pd 32(%7,%0,8), %%ymm14, %%ymm5      \n\t" 
-	"vfmadd231pd   (%8,%0,8), %%ymm15, %%ymm4      \n\t" 
-	"vfmadd231pd 32(%8,%0,8), %%ymm15, %%ymm5      \n\t" 
-
-	"vfmadd231pd   (%5,%2,8), %%ymm0 , %%ymm4      \n\t" 
-        "addq		$8 , %0	  	 	       \n\t"
-	"vfmadd231pd 32(%5,%2,8), %%ymm0 , %%ymm5      \n\t" 
-	"vfmadd231pd   (%6,%2,8), %%ymm1 , %%ymm4      \n\t" 
-	"vfmadd231pd 32(%6,%2,8), %%ymm1 , %%ymm5      \n\t" 
-	"vfmadd231pd   (%7,%2,8), %%ymm2 , %%ymm4      \n\t" 
-	"vfmadd231pd 32(%7,%2,8), %%ymm2 , %%ymm5      \n\t" 
-	"vfmadd231pd   (%8,%2,8), %%ymm3 , %%ymm4      \n\t" 
-	"vfmadd231pd 32(%8,%2,8), %%ymm3 , %%ymm5      \n\t" 
-
-	"vfmadd231pd     %%ymm6 , %%ymm4 , %%ymm8      \n\t"
-	"vfmadd231pd     %%ymm6 , %%ymm5 , %%ymm9      \n\t"
-
-        "addq		$8 , %2	  	 	      \n\t"
-	"vmovupd  %%ymm8,-64(%3,%0,8)		      \n\t"	// 4 * y
-	"subq	        $8 , %1			      \n\t"		
-	"vmovupd  %%ymm9,-32(%4,%0,8)		      \n\t"	// 4 * y
-
-	"jnz		1b		      \n\t"
-
-        "3:                             \n\t"
-	"vzeroupper			        \n\t"
-
-	:
-          "+r" (i),	// 0	
-	  "+r" (n),  	// 1
-          "+r" (lda4)   // 2
-        : 
-          "r" (x),      // 3
-          "r" (y),      // 4
-          "r" (ap[0]),  // 5
-          "r" (ap[1]),  // 6
-          "r" (ap[2]),  // 7
-          "r" (ap[3]),  // 8
-          "r" (alpha)   // 9
-	: "cc", 
-	  "%xmm0", "%xmm1", 
-	  "%xmm2", "%xmm3", 
-	  "%xmm4", "%xmm5", 
-	  "%xmm6", "%xmm7", 
-	  "%xmm8", "%xmm9", 
-	  "%xmm12", "%xmm13", "%xmm14", "%xmm15",
-	  "memory"
-	);
-
-} 
-
-
-
-#define HAVE_KERNEL_4x4 1
-static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT *alpha) __attribute__ ((noinline));
-
-static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT *alpha)
-{
-
-	BLASLONG register i = 0;
-
-	__asm__  __volatile__
-	(
-	"vzeroupper			 \n\t"
-	"vbroadcastsd    (%2), %%ymm12	 \n\t"	// x0 
-	"vbroadcastsd   8(%2), %%ymm13	 \n\t"	// x1 
-	"vbroadcastsd  16(%2), %%ymm14	 \n\t"	// x2 
-	"vbroadcastsd  24(%2), %%ymm15	 \n\t"	// x3 
-
-	"vbroadcastsd    (%8), %%ymm6 	 \n\t"	// alpha 
-
-        "testq          $0x04, %1                      \n\t"
-        "jz             2f                     \n\t"
-
-	"vxorpd		%%ymm4 , %%ymm4, %%ymm4        \n\t"
-	"vxorpd		%%ymm5 , %%ymm5, %%ymm5        \n\t"
-	"vmovupd	(%3,%0,8), %%ymm7	       \n\t"	// 4 * y
-
-	"vfmadd231pd   (%4,%0,8), %%ymm12, %%ymm4      \n\t" 
-	"vfmadd231pd   (%5,%0,8), %%ymm13, %%ymm5      \n\t" 
-	"vfmadd231pd   (%6,%0,8), %%ymm14, %%ymm4      \n\t" 
-	"vfmadd231pd   (%7,%0,8), %%ymm15, %%ymm5      \n\t" 
-
-	"vaddpd		%%ymm4 , %%ymm5 , %%ymm5       \n\t"
-	"vmulpd		%%ymm6 , %%ymm5 , %%ymm5       \n\t"
-	"vaddpd		%%ymm7 , %%ymm5 , %%ymm5       \n\t"
-
-	"vmovupd  %%ymm5,   (%3,%0,8)		       \n\t"	// 4 * y
-
-        "addq		$4 , %0	  	 	       \n\t"
-	"subq	        $4 , %1			       \n\t"		
-
-        "2:                                   \n\t"
-
-        "cmpq           $0, %1                         \n\t"
-        "je             3f                       \n\t"
-
-
-	".align 16				 \n\t"
-	"1:				 \n\t"
-	"vxorpd		%%ymm4 , %%ymm4, %%ymm4        \n\t"
-	"vxorpd		%%ymm5 , %%ymm5, %%ymm5        \n\t"
-	"vmovupd	(%3,%0,8), %%ymm8	       \n\t"	// 4 * y
-	"vmovupd      32(%3,%0,8), %%ymm9	       \n\t"	// 4 * y
-
-	"vfmadd231pd   (%4,%0,8), %%ymm12, %%ymm4      \n\t" 
-	"vfmadd231pd 32(%4,%0,8), %%ymm12, %%ymm5      \n\t" 
-	"vfmadd231pd   (%5,%0,8), %%ymm13, %%ymm4      \n\t" 
-	"vfmadd231pd 32(%5,%0,8), %%ymm13, %%ymm5      \n\t" 
-	"vfmadd231pd   (%6,%0,8), %%ymm14, %%ymm4      \n\t" 
-	"vfmadd231pd 32(%6,%0,8), %%ymm14, %%ymm5      \n\t" 
-	"vfmadd231pd   (%7,%0,8), %%ymm15, %%ymm4      \n\t" 
-	"vfmadd231pd 32(%7,%0,8), %%ymm15, %%ymm5      \n\t" 
-
-	"vfmadd231pd     %%ymm6 , %%ymm4 , %%ymm8      \n\t"
-	"vfmadd231pd     %%ymm6 , %%ymm5 , %%ymm9      \n\t"
-
-	"vmovupd  %%ymm8,   (%3,%0,8)		      \n\t"	// 4 * y
-	"vmovupd  %%ymm9, 32(%3,%0,8)		      \n\t"	// 4 * y
-
-        "addq		$8 , %0	  	 	      \n\t"
-	"subq	        $8 , %1			      \n\t"		
-	"jnz		1b		      \n\t"
-
-        "3:                                    \n\t"
-	"vzeroupper			              \n\t"
-
-	:
-          "+r" (i),	// 0	
-	  "+r" (n)  	// 1
-        : 
-          "r" (x),      // 2
-          "r" (y),      // 3
-          "r" (ap[0]),  // 4
-          "r" (ap[1]),  // 5
-          "r" (ap[2]),  // 6
-          "r" (ap[3]),  // 7
-          "r" (alpha)   // 8
-	: "cc", 
-	  "%xmm4", "%xmm5", 
-	  "%xmm6", "%xmm7", 
-	  "%xmm8", "%xmm9", 
-	  "%xmm12", "%xmm13", "%xmm14", "%xmm15",
-	  "memory"
-	);
-
-} 
-
-
diff --git a/kernel/x86_64/dgemv_n_microk_piledriver-4.c b/kernel/x86_64/dgemv_n_microk_piledriver-4.c
index 530780bab..466931b82 100644
--- a/kernel/x86_64/dgemv_n_microk_piledriver-4.c
+++ b/kernel/x86_64/dgemv_n_microk_piledriver-4.c
@@ -38,42 +38,42 @@ static void dgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO
 	__asm__  __volatile__
 	(
 	"vzeroupper			 \n\t"
-	"vbroadcastsd    (%2), %%ymm12	 \n\t"	// x0 
-	"vbroadcastsd   8(%2), %%ymm13	 \n\t"	// x1 
-	"vbroadcastsd  16(%2), %%ymm14	 \n\t"	// x2 
-	"vbroadcastsd  24(%2), %%ymm15	 \n\t"	// x3 
-	"vbroadcastsd  32(%2), %%ymm0 	 \n\t"	// x4 
-	"vbroadcastsd  40(%2), %%ymm1 	 \n\t"	// x5 
-	"vbroadcastsd  48(%2), %%ymm2 	 \n\t"	// x6 
-	"vbroadcastsd  56(%2), %%ymm3 	 \n\t"	// x7 
+	"vbroadcastsd    (%3), %%ymm12	 \n\t"	// x0 
+	"vbroadcastsd   8(%3), %%ymm13	 \n\t"	// x1 
+	"vbroadcastsd  16(%3), %%ymm14	 \n\t"	// x2 
+	"vbroadcastsd  24(%3), %%ymm15	 \n\t"	// x3 
+	"vbroadcastsd  32(%3), %%ymm0 	 \n\t"	// x4 
+	"vbroadcastsd  40(%3), %%ymm1 	 \n\t"	// x5 
+	"vbroadcastsd  48(%3), %%ymm2 	 \n\t"	// x6 
+	"vbroadcastsd  56(%3), %%ymm3 	 \n\t"	// x7 
 
 	"vbroadcastsd    (%9), %%ymm6 	 \n\t"	// alpha 
 
         "testq          $0x04, %1                      \n\t"
         "jz             2f                     \n\t"
 
-	"vmovupd	(%3,%0,8), %%ymm7	       \n\t"	// 4 * y
+	"vmovupd	(%4,%0,8), %%ymm7	       \n\t"	// 4 * y
 	"vxorpd		%%ymm4 , %%ymm4, %%ymm4        \n\t"
 	"vxorpd		%%ymm5 , %%ymm5, %%ymm5        \n\t"
 
-	"vfmadd231pd   (%4,%0,8), %%ymm12, %%ymm4      \n\t" 
-	"vfmadd231pd   (%5,%0,8), %%ymm13, %%ymm5      \n\t" 
-	"vfmadd231pd   (%6,%0,8), %%ymm14, %%ymm4      \n\t" 
-	"vfmadd231pd   (%7,%0,8), %%ymm15, %%ymm5      \n\t" 
+	"vfmadd231pd   (%5,%0,8), %%ymm12, %%ymm4      \n\t" 
+	"vfmadd231pd   (%6,%0,8), %%ymm13, %%ymm5      \n\t" 
+	"vfmadd231pd   (%7,%0,8), %%ymm14, %%ymm4      \n\t" 
+	"vfmadd231pd   (%8,%0,8), %%ymm15, %%ymm5      \n\t" 
 
-	"vfmadd231pd   (%4,%8,8), %%ymm0 , %%ymm4      \n\t" 
-	"vfmadd231pd   (%5,%8,8), %%ymm1 , %%ymm5      \n\t" 
-	"vfmadd231pd   (%6,%8,8), %%ymm2 , %%ymm4      \n\t" 
-	"vfmadd231pd   (%7,%8,8), %%ymm3 , %%ymm5      \n\t" 
+	"vfmadd231pd   (%5,%2,8), %%ymm0 , %%ymm4      \n\t" 
+	"vfmadd231pd   (%6,%2,8), %%ymm1 , %%ymm5      \n\t" 
+	"vfmadd231pd   (%7,%2,8), %%ymm2 , %%ymm4      \n\t" 
+	"vfmadd231pd   (%8,%2,8), %%ymm3 , %%ymm5      \n\t" 
 
 	"vaddpd		%%ymm4 , %%ymm5 , %%ymm5       \n\t"
 	"vmulpd		%%ymm6 , %%ymm5 , %%ymm5       \n\t"
 	"vaddpd		%%ymm7 , %%ymm5 , %%ymm5       \n\t"
 
 
-	"vmovupd  %%ymm5,   (%3,%0,8)		       \n\t"	// 4 * y
+	"vmovupd  %%ymm5,   (%4,%0,8)		       \n\t"	// 4 * y
 
-        "addq		$4 , %8	  	 	       \n\t"
+        "addq		$4 , %2	  	 	       \n\t"
         "addq		$4 , %0	  	 	       \n\t"
 	"subq	        $4 , %1			       \n\t"		
 
@@ -88,35 +88,35 @@ static void dgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO
 
 	"vxorpd		%%ymm4 , %%ymm4, %%ymm4        \n\t"
 	"vxorpd		%%ymm5 , %%ymm5, %%ymm5        \n\t"
-	"vmovupd	(%3,%0,8), %%ymm8	       \n\t"	// 4 * y
-	"vmovupd      32(%3,%0,8), %%ymm9	       \n\t"	// 4 * y
-
-	"vfmadd231pd   (%4,%0,8), %%ymm12, %%ymm4      \n\t" 
-	"vfmadd231pd 32(%4,%0,8), %%ymm12, %%ymm5      \n\t" 
-	"vfmadd231pd   (%5,%0,8), %%ymm13, %%ymm4      \n\t" 
-	"vfmadd231pd 32(%5,%0,8), %%ymm13, %%ymm5      \n\t" 
-	"vfmadd231pd   (%6,%0,8), %%ymm14, %%ymm4      \n\t" 
-	"vfmadd231pd 32(%6,%0,8), %%ymm14, %%ymm5      \n\t" 
-	"vfmadd231pd   (%7,%0,8), %%ymm15, %%ymm4      \n\t" 
-	"vfmadd231pd 32(%7,%0,8), %%ymm15, %%ymm5      \n\t" 
-
-	"vfmadd231pd   (%4,%8,8), %%ymm0 , %%ymm4      \n\t" 
+	"vmovupd	(%4,%0,8), %%ymm8	       \n\t"	// 4 * y
+	"vmovupd      32(%4,%0,8), %%ymm9	       \n\t"	// 4 * y
+
+	"vfmadd231pd   (%5,%0,8), %%ymm12, %%ymm4      \n\t" 
+	"vfmadd231pd 32(%5,%0,8), %%ymm12, %%ymm5      \n\t" 
+	"vfmadd231pd   (%6,%0,8), %%ymm13, %%ymm4      \n\t" 
+	"vfmadd231pd 32(%6,%0,8), %%ymm13, %%ymm5      \n\t" 
+	"vfmadd231pd   (%7,%0,8), %%ymm14, %%ymm4      \n\t" 
+	"vfmadd231pd 32(%7,%0,8), %%ymm14, %%ymm5      \n\t" 
+	"vfmadd231pd   (%8,%0,8), %%ymm15, %%ymm4      \n\t" 
+	"vfmadd231pd 32(%8,%0,8), %%ymm15, %%ymm5      \n\t" 
+
+	"vfmadd231pd   (%5,%2,8), %%ymm0 , %%ymm4      \n\t" 
         "addq		$8 , %0	  	 	       \n\t"
-	"vfmadd231pd 32(%4,%8,8), %%ymm0 , %%ymm5      \n\t" 
-	"vfmadd231pd   (%5,%8,8), %%ymm1 , %%ymm4      \n\t" 
-	"vfmadd231pd 32(%5,%8,8), %%ymm1 , %%ymm5      \n\t" 
-	"vfmadd231pd   (%6,%8,8), %%ymm2 , %%ymm4      \n\t" 
-	"vfmadd231pd 32(%6,%8,8), %%ymm2 , %%ymm5      \n\t" 
-	"vfmadd231pd   (%7,%8,8), %%ymm3 , %%ymm4      \n\t" 
-	"vfmadd231pd 32(%7,%8,8), %%ymm3 , %%ymm5      \n\t" 
+	"vfmadd231pd 32(%5,%2,8), %%ymm0 , %%ymm5      \n\t" 
+	"vfmadd231pd   (%6,%2,8), %%ymm1 , %%ymm4      \n\t" 
+	"vfmadd231pd 32(%6,%2,8), %%ymm1 , %%ymm5      \n\t" 
+	"vfmadd231pd   (%7,%2,8), %%ymm2 , %%ymm4      \n\t" 
+	"vfmadd231pd 32(%7,%2,8), %%ymm2 , %%ymm5      \n\t" 
+	"vfmadd231pd   (%8,%2,8), %%ymm3 , %%ymm4      \n\t" 
+	"vfmadd231pd 32(%8,%2,8), %%ymm3 , %%ymm5      \n\t" 
 
 	"vfmadd231pd     %%ymm6 , %%ymm4 , %%ymm8      \n\t"
 	"vfmadd231pd     %%ymm6 , %%ymm5 , %%ymm9      \n\t"
 
-        "addq		$8 , %8	  	 	      \n\t"
+        "addq		$8 , %2	  	 	      \n\t"
 	"vmovupd  %%ymm8,-64(%3,%0,8)		      \n\t"	// 4 * y
 	"subq	        $8 , %1			      \n\t"		
-	"vmovupd  %%ymm9,-32(%3,%0,8)		      \n\t"	// 4 * y
+	"vmovupd  %%ymm9,-32(%4,%0,8)		      \n\t"	// 4 * y
 
 	"jnz		1b		      \n\t"
 
@@ -125,15 +125,15 @@ static void dgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO
 
 	:
           "+r" (i),	// 0	
-	  "+r" (n)  	// 1
+	  "+r" (n),  	// 1
+          "+r" (lda4)   // 2
         : 
-          "r" (x),      // 2
-          "r" (y),      // 3
-          "r" (ap[0]),  // 4
-          "r" (ap[1]),  // 5
-          "r" (ap[2]),  // 6
-          "r" (ap[3]),  // 7
-          "r" (lda4),   // 8
+          "r" (x),      // 3
+          "r" (y),      // 4
+          "r" (ap[0]),  // 5
+          "r" (ap[1]),  // 6
+          "r" (ap[2]),  // 7
+          "r" (ap[3]),  // 8
           "r" (alpha)   // 9
 	: "cc", 
 	  "%xmm0", "%xmm1",