提交 | 用户 | age
|
bfc108
|
1 |
/* ----------------------------------------------------------------------
|
Q |
2 |
* Copyright (C) 2010-2014 ARM Limited. All rights reserved.
|
|
3 |
*
|
|
4 |
* $Date: 19. March 2015
|
|
5 |
* $Revision: V.1.4.5
|
|
6 |
*
|
|
7 |
* Project: CMSIS DSP Library
|
|
8 |
* Title: arm_mat_mult_fast_q31.c
|
|
9 |
*
|
|
10 |
* Description: Q31 matrix multiplication (fast variant).
|
|
11 |
*
|
|
12 |
* Target Processor: Cortex-M4/Cortex-M3
|
|
13 |
*
|
|
14 |
* Redistribution and use in source and binary forms, with or without
|
|
15 |
* modification, are permitted provided that the following conditions
|
|
16 |
* are met:
|
|
17 |
* - Redistributions of source code must retain the above copyright
|
|
18 |
* notice, this list of conditions and the following disclaimer.
|
|
19 |
* - Redistributions in binary form must reproduce the above copyright
|
|
20 |
* notice, this list of conditions and the following disclaimer in
|
|
21 |
* the documentation and/or other materials provided with the
|
|
22 |
* distribution.
|
|
23 |
* - Neither the name of ARM LIMITED nor the names of its contributors
|
|
24 |
* may be used to endorse or promote products derived from this
|
|
25 |
* software without specific prior written permission.
|
|
26 |
*
|
|
27 |
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
28 |
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
29 |
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
30 |
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
31 |
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
32 |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
33 |
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
34 |
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
35 |
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
36 |
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
|
37 |
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
38 |
* POSSIBILITY OF SUCH DAMAGE.
|
|
39 |
* -------------------------------------------------------------------- */
|
|
40 |
|
|
41 |
#include "arm_math.h"
|
|
42 |
|
|
43 |
/**
|
|
44 |
* @ingroup groupMatrix
|
|
45 |
*/
|
|
46 |
|
|
47 |
/**
|
|
48 |
* @addtogroup MatrixMult
|
|
49 |
* @{
|
|
50 |
*/
|
|
51 |
|
|
52 |
/**
|
|
53 |
* @brief Q31 matrix multiplication (fast variant) for Cortex-M3 and Cortex-M4
|
|
54 |
* @param[in] *pSrcA points to the first input matrix structure
|
|
55 |
* @param[in] *pSrcB points to the second input matrix structure
|
|
56 |
* @param[out] *pDst points to output matrix structure
|
|
57 |
* @return The function returns either
|
|
58 |
* <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
|
|
59 |
*
|
|
60 |
* @details
|
|
61 |
* <b>Scaling and Overflow Behavior:</b>
|
|
62 |
*
|
|
63 |
* \par
|
|
64 |
* The difference between the function arm_mat_mult_q31() and this fast variant is that
|
|
65 |
* the fast variant use a 32-bit rather than a 64-bit accumulator.
|
|
66 |
* The result of each 1.31 x 1.31 multiplication is truncated to
|
|
67 |
* 2.30 format. These intermediate results are accumulated in a 32-bit register in 2.30
|
|
68 |
* format. Finally, the accumulator is saturated and converted to a 1.31 result.
|
|
69 |
*
|
|
70 |
* \par
|
|
71 |
* The fast version has the same overflow behavior as the standard version but provides
|
|
72 |
* less precision since it discards the low 32 bits of each multiplication result.
|
|
73 |
* In order to avoid overflows completely the input signals must be scaled down.
|
|
74 |
* Scale down one of the input matrices by log2(numColsA) bits to
|
|
75 |
* avoid overflows, as a total of numColsA additions are computed internally for each
|
|
76 |
* output element.
|
|
77 |
*
|
|
78 |
* \par
|
|
79 |
* See <code>arm_mat_mult_q31()</code> for a slower implementation of this function
|
|
80 |
* which uses 64-bit accumulation to provide higher precision.
|
|
81 |
*/
|
|
82 |
|
|
83 |
arm_status arm_mat_mult_fast_q31(
|
|
84 |
const arm_matrix_instance_q31 * pSrcA,
|
|
85 |
const arm_matrix_instance_q31 * pSrcB,
|
|
86 |
arm_matrix_instance_q31 * pDst)
|
|
87 |
{
|
|
88 |
q31_t *pIn1 = pSrcA->pData; /* input data matrix pointer A */
|
|
89 |
q31_t *pIn2 = pSrcB->pData; /* input data matrix pointer B */
|
|
90 |
q31_t *pInA = pSrcA->pData; /* input data matrix pointer A */
|
|
91 |
// q31_t *pSrcB = pSrcB->pData; /* input data matrix pointer B */
|
|
92 |
q31_t *pOut = pDst->pData; /* output data matrix pointer */
|
|
93 |
q31_t *px; /* Temporary output data matrix pointer */
|
|
94 |
q31_t sum; /* Accumulator */
|
|
95 |
uint16_t numRowsA = pSrcA->numRows; /* number of rows of input matrix A */
|
|
96 |
uint16_t numColsB = pSrcB->numCols; /* number of columns of input matrix B */
|
|
97 |
uint16_t numColsA = pSrcA->numCols; /* number of columns of input matrix A */
|
|
98 |
uint16_t col, i = 0u, j, row = numRowsA, colCnt; /* loop counters */
|
|
99 |
arm_status status; /* status of matrix multiplication */
|
|
100 |
q31_t inA1, inA2, inA3, inA4, inB1, inB2, inB3, inB4;
|
|
101 |
|
|
102 |
#ifdef ARM_MATH_MATRIX_CHECK
|
|
103 |
|
|
104 |
|
|
105 |
/* Check for matrix mismatch condition */
|
|
106 |
if((pSrcA->numCols != pSrcB->numRows) ||
|
|
107 |
(pSrcA->numRows != pDst->numRows) || (pSrcB->numCols != pDst->numCols))
|
|
108 |
{
|
|
109 |
/* Set status as ARM_MATH_SIZE_MISMATCH */
|
|
110 |
status = ARM_MATH_SIZE_MISMATCH;
|
|
111 |
}
|
|
112 |
else
|
|
113 |
#endif /* #ifdef ARM_MATH_MATRIX_CHECK */
|
|
114 |
|
|
115 |
{
|
|
116 |
/* The following loop performs the dot-product of each row in pSrcA with each column in pSrcB */
|
|
117 |
/* row loop */
|
|
118 |
do
|
|
119 |
{
|
|
120 |
/* Output pointer is set to starting address of the row being processed */
|
|
121 |
px = pOut + i;
|
|
122 |
|
|
123 |
/* For every row wise process, the column loop counter is to be initiated */
|
|
124 |
col = numColsB;
|
|
125 |
|
|
126 |
/* For every row wise process, the pIn2 pointer is set
|
|
127 |
** to the starting address of the pSrcB data */
|
|
128 |
pIn2 = pSrcB->pData;
|
|
129 |
|
|
130 |
j = 0u;
|
|
131 |
|
|
132 |
/* column loop */
|
|
133 |
do
|
|
134 |
{
|
|
135 |
/* Set the variable sum, that acts as accumulator, to zero */
|
|
136 |
sum = 0;
|
|
137 |
|
|
138 |
/* Initiate the pointer pIn1 to point to the starting address of pInA */
|
|
139 |
pIn1 = pInA;
|
|
140 |
|
|
141 |
/* Apply loop unrolling and compute 4 MACs simultaneously. */
|
|
142 |
colCnt = numColsA >> 2;
|
|
143 |
|
|
144 |
|
|
145 |
/* matrix multiplication */
|
|
146 |
while(colCnt > 0u)
|
|
147 |
{
|
|
148 |
/* c(m,n) = a(1,1)*b(1,1) + a(1,2) * b(2,1) + .... + a(m,p)*b(p,n) */
|
|
149 |
/* Perform the multiply-accumulates */
|
|
150 |
inB1 = *pIn2;
|
|
151 |
pIn2 += numColsB;
|
|
152 |
|
|
153 |
inA1 = pIn1[0];
|
|
154 |
inA2 = pIn1[1];
|
|
155 |
|
|
156 |
inB2 = *pIn2;
|
|
157 |
pIn2 += numColsB;
|
|
158 |
|
|
159 |
inB3 = *pIn2;
|
|
160 |
pIn2 += numColsB;
|
|
161 |
|
|
162 |
sum = (q31_t) ((((q63_t) sum << 32) + ((q63_t) inA1 * inB1)) >> 32);
|
|
163 |
sum = (q31_t) ((((q63_t) sum << 32) + ((q63_t) inA2 * inB2)) >> 32);
|
|
164 |
|
|
165 |
inA3 = pIn1[2];
|
|
166 |
inA4 = pIn1[3];
|
|
167 |
|
|
168 |
inB4 = *pIn2;
|
|
169 |
pIn2 += numColsB;
|
|
170 |
|
|
171 |
sum = (q31_t) ((((q63_t) sum << 32) + ((q63_t) inA3 * inB3)) >> 32);
|
|
172 |
sum = (q31_t) ((((q63_t) sum << 32) + ((q63_t) inA4 * inB4)) >> 32);
|
|
173 |
|
|
174 |
pIn1 += 4u;
|
|
175 |
|
|
176 |
/* Decrement the loop counter */
|
|
177 |
colCnt--;
|
|
178 |
}
|
|
179 |
|
|
180 |
/* If the columns of pSrcA is not a multiple of 4, compute any remaining output samples here.
|
|
181 |
** No loop unrolling is used. */
|
|
182 |
colCnt = numColsA % 0x4u;
|
|
183 |
|
|
184 |
while(colCnt > 0u)
|
|
185 |
{
|
|
186 |
/* c(m,n) = a(1,1)*b(1,1) + a(1,2) * b(2,1) + .... + a(m,p)*b(p,n) */
|
|
187 |
/* Perform the multiply-accumulates */
|
|
188 |
sum = (q31_t) ((((q63_t) sum << 32) +
|
|
189 |
((q63_t) * pIn1++ * (*pIn2))) >> 32);
|
|
190 |
pIn2 += numColsB;
|
|
191 |
|
|
192 |
/* Decrement the loop counter */
|
|
193 |
colCnt--;
|
|
194 |
}
|
|
195 |
|
|
196 |
/* Convert the result from 2.30 to 1.31 format and store in destination buffer */
|
|
197 |
*px++ = sum << 1;
|
|
198 |
|
|
199 |
/* Update the pointer pIn2 to point to the starting address of the next column */
|
|
200 |
j++;
|
|
201 |
pIn2 = pSrcB->pData + j;
|
|
202 |
|
|
203 |
/* Decrement the column loop counter */
|
|
204 |
col--;
|
|
205 |
|
|
206 |
} while(col > 0u);
|
|
207 |
|
|
208 |
/* Update the pointer pInA to point to the starting address of the next row */
|
|
209 |
i = i + numColsB;
|
|
210 |
pInA = pInA + numColsA;
|
|
211 |
|
|
212 |
/* Decrement the row loop counter */
|
|
213 |
row--;
|
|
214 |
|
|
215 |
} while(row > 0u);
|
|
216 |
|
|
217 |
/* set status as ARM_MATH_SUCCESS */
|
|
218 |
status = ARM_MATH_SUCCESS;
|
|
219 |
}
|
|
220 |
/* Return to application */
|
|
221 |
return (status);
|
|
222 |
}
|
|
223 |
|
|
224 |
/**
|
|
225 |
* @} end of MatrixMult group
|
|
226 |
*/
|