go home Home | Main Page | Topics | Namespace List | Class Hierarchy | Alphabetical List | Data Structures | File List | Namespace Members | Data Fields | Globals | Related Pages
Loading...
Searching...
No Matches
vnl_adjugate_fixed.h
Go to the documentation of this file.
1// This is core/vnl/vnl_adjugate_fixed.h
2#ifndef vnl_adjugate_fixed_h_
3#define vnl_adjugate_fixed_h_
4//:
5// \file
6// \brief Calculates adjugate or cofactor matrix of a small vnl_matrix_fixed.
7// Code is only a small adaptation from Peter Vanroose's vnl_inverse.h
8// adjugate == inverse/det
9// cofactor == adjugate^T
10// \author Floris Berendsen
11// \date 18 April 2013
12//
13// \verbatim
14// Code is only a small adaptation from Peter Vanroose's vnl_inverse.h
15// \endverbatim
16
17#include <vnl/vnl_matrix_fixed.h>
18#include <vnl/vnl_vector_fixed.h>
19#include <vnl/vnl_matrix.h>
20#include <vnl/vnl_det.h>
21
22#include <cassert>
23
24//: Calculates adjugate of a small vnl_matrix_fixed (not using svd)
25// This allows you to write e.g.
26//
27// x = vnl_adjugate(A) * b;
28//
29// Note that this function is inlined (except for the call to vnl_det()),
30// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
31// since that one is using svd.
32//
33// \relatesalso vnl_matrix_fixed
34
35template <typename T>
36vnl_matrix_fixed<T, 1, 1>
37vnl_adjugate(const vnl_matrix_fixed<T, 1, 1> & m)
38{
39 return vnl_matrix_fixed<T, 1, 1>(m(0, 0));
40}
41
42//: Calculates adjugate of a small vnl_matrix_fixed (not using svd)
43// This allows you to write e.g.
44//
45// x = vnl_adjugate(A) * b;
46//
47// Note that this function is inlined (except for the call to vnl_det()),
48// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
49// since that one is using svd.
50//
51// \relatesalso vnl_matrix_fixed
52
53template <typename T>
54vnl_matrix_fixed<T, 2, 2>
55vnl_adjugate(const vnl_matrix_fixed<T, 2, 2> & m)
56{
57 T d[4];
58 d[0] = m(1, 1);
59 d[1] = -m(0, 1);
60 d[3] = m(0, 0);
61 d[2] = -m(1, 0);
62 return vnl_matrix_fixed<T, 2, 2>(d);
63}
64
65//: Calculates adjugate of a small vnl_matrix_fixed (not using svd)
66// This allows you to write e.g.
67//
68// x = vnl_adjugate_fixed(A) * b;
69//
70// Note that this function is inlined (except for the call to vnl_det()),
71// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
72// since that one is using svd.
73//
74// \relatesalso vnl_matrix_fixed
75
76template <typename T>
77vnl_matrix_fixed<T, 3, 3>
78vnl_adjugate(const vnl_matrix_fixed<T, 3, 3> & m)
79{
80 T d[9];
81 d[0] = (m(1, 1) * m(2, 2) - m(1, 2) * m(2, 1));
82 d[1] = (m(2, 1) * m(0, 2) - m(2, 2) * m(0, 1));
83 d[2] = (m(0, 1) * m(1, 2) - m(0, 2) * m(1, 1));
84 d[3] = (m(1, 2) * m(2, 0) - m(1, 0) * m(2, 2));
85 d[4] = (m(0, 0) * m(2, 2) - m(0, 2) * m(2, 0));
86 d[5] = (m(1, 0) * m(0, 2) - m(1, 2) * m(0, 0));
87 d[6] = (m(1, 0) * m(2, 1) - m(1, 1) * m(2, 0));
88 d[7] = (m(0, 1) * m(2, 0) - m(0, 0) * m(2, 1));
89 d[8] = (m(0, 0) * m(1, 1) - m(0, 1) * m(1, 0));
90 return vnl_matrix_fixed<T, 3, 3>(d);
91}
92
93//: Calculates adjugate_fixed of a small vnl_matrix_fixed (not using svd)
94// This allows you to write e.g.
95//
96// x = vnl_adjugate_fixed(A) * b;
97//
98// Note that this function is inlined (except for the call to vnl_det()),
99// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
100// since that one is using svd.
101//
102// \relatesalso vnl_matrix_fixed
103
104template <typename T>
105vnl_matrix_fixed<T, 4, 4>
106vnl_adjugate(const vnl_matrix_fixed<T, 4, 4> & m)
107{
108 T d[16];
109 d[0] = m(1, 1) * m(2, 2) * m(3, 3) - m(1, 1) * m(2, 3) * m(3, 2) - m(2, 1) * m(1, 2) * m(3, 3) +
110 m(2, 1) * m(1, 3) * m(3, 2) + m(3, 1) * m(1, 2) * m(2, 3) - m(3, 1) * m(1, 3) * m(2, 2);
111 d[1] = -m(0, 1) * m(2, 2) * m(3, 3) + m(0, 1) * m(2, 3) * m(3, 2) + m(2, 1) * m(0, 2) * m(3, 3) -
112 m(2, 1) * m(0, 3) * m(3, 2) - m(3, 1) * m(0, 2) * m(2, 3) + m(3, 1) * m(0, 3) * m(2, 2);
113 d[2] = m(0, 1) * m(1, 2) * m(3, 3) - m(0, 1) * m(1, 3) * m(3, 2) - m(1, 1) * m(0, 2) * m(3, 3) +
114 m(1, 1) * m(0, 3) * m(3, 2) + m(3, 1) * m(0, 2) * m(1, 3) - m(3, 1) * m(0, 3) * m(1, 2);
115 d[3] = -m(0, 1) * m(1, 2) * m(2, 3) + m(0, 1) * m(1, 3) * m(2, 2) + m(1, 1) * m(0, 2) * m(2, 3) -
116 m(1, 1) * m(0, 3) * m(2, 2) - m(2, 1) * m(0, 2) * m(1, 3) + m(2, 1) * m(0, 3) * m(1, 2);
117 d[4] = -m(1, 0) * m(2, 2) * m(3, 3) + m(1, 0) * m(2, 3) * m(3, 2) + m(2, 0) * m(1, 2) * m(3, 3) -
118 m(2, 0) * m(1, 3) * m(3, 2) - m(3, 0) * m(1, 2) * m(2, 3) + m(3, 0) * m(1, 3) * m(2, 2);
119 d[5] = m(0, 0) * m(2, 2) * m(3, 3) - m(0, 0) * m(2, 3) * m(3, 2) - m(2, 0) * m(0, 2) * m(3, 3) +
120 m(2, 0) * m(0, 3) * m(3, 2) + m(3, 0) * m(0, 2) * m(2, 3) - m(3, 0) * m(0, 3) * m(2, 2);
121 d[6] = -m(0, 0) * m(1, 2) * m(3, 3) + m(0, 0) * m(1, 3) * m(3, 2) + m(1, 0) * m(0, 2) * m(3, 3) -
122 m(1, 0) * m(0, 3) * m(3, 2) - m(3, 0) * m(0, 2) * m(1, 3) + m(3, 0) * m(0, 3) * m(1, 2);
123 d[7] = m(0, 0) * m(1, 2) * m(2, 3) - m(0, 0) * m(1, 3) * m(2, 2) - m(1, 0) * m(0, 2) * m(2, 3) +
124 m(1, 0) * m(0, 3) * m(2, 2) + m(2, 0) * m(0, 2) * m(1, 3) - m(2, 0) * m(0, 3) * m(1, 2);
125 d[8] = m(1, 0) * m(2, 1) * m(3, 3) - m(1, 0) * m(2, 3) * m(3, 1) - m(2, 0) * m(1, 1) * m(3, 3) +
126 m(2, 0) * m(1, 3) * m(3, 1) + m(3, 0) * m(1, 1) * m(2, 3) - m(3, 0) * m(1, 3) * m(2, 1);
127 d[9] = -m(0, 0) * m(2, 1) * m(3, 3) + m(0, 0) * m(2, 3) * m(3, 1) + m(2, 0) * m(0, 1) * m(3, 3) -
128 m(2, 0) * m(0, 3) * m(3, 1) - m(3, 0) * m(0, 1) * m(2, 3) + m(3, 0) * m(0, 3) * m(2, 1);
129 d[10] = m(0, 0) * m(1, 1) * m(3, 3) - m(0, 0) * m(1, 3) * m(3, 1) - m(1, 0) * m(0, 1) * m(3, 3) +
130 m(1, 0) * m(0, 3) * m(3, 1) + m(3, 0) * m(0, 1) * m(1, 3) - m(3, 0) * m(0, 3) * m(1, 1);
131 d[11] = -m(0, 0) * m(1, 1) * m(2, 3) + m(0, 0) * m(1, 3) * m(2, 1) + m(1, 0) * m(0, 1) * m(2, 3) -
132 m(1, 0) * m(0, 3) * m(2, 1) - m(2, 0) * m(0, 1) * m(1, 3) + m(2, 0) * m(0, 3) * m(1, 1);
133 d[12] = -m(1, 0) * m(2, 1) * m(3, 2) + m(1, 0) * m(2, 2) * m(3, 1) + m(2, 0) * m(1, 1) * m(3, 2) -
134 m(2, 0) * m(1, 2) * m(3, 1) - m(3, 0) * m(1, 1) * m(2, 2) + m(3, 0) * m(1, 2) * m(2, 1);
135 d[13] = m(0, 0) * m(2, 1) * m(3, 2) - m(0, 0) * m(2, 2) * m(3, 1) - m(2, 0) * m(0, 1) * m(3, 2) +
136 m(2, 0) * m(0, 2) * m(3, 1) + m(3, 0) * m(0, 1) * m(2, 2) - m(3, 0) * m(0, 2) * m(2, 1);
137 d[14] = -m(0, 0) * m(1, 1) * m(3, 2) + m(0, 0) * m(1, 2) * m(3, 1) + m(1, 0) * m(0, 1) * m(3, 2) -
138 m(1, 0) * m(0, 2) * m(3, 1) - m(3, 0) * m(0, 1) * m(1, 2) + m(3, 0) * m(0, 2) * m(1, 1);
139 d[15] = m(0, 0) * m(1, 1) * m(2, 2) - m(0, 0) * m(1, 2) * m(2, 1) - m(1, 0) * m(0, 1) * m(2, 2) +
140 m(1, 0) * m(0, 2) * m(2, 1) + m(2, 0) * m(0, 1) * m(1, 2) - m(2, 0) * m(0, 2) * m(1, 1);
141 return vnl_matrix_fixed<T, 4, 4>(d);
142}
143
144//: Calculates adjugate_fixed of a small vnl_matrix_fixed (not using svd)
145// This allows you to write e.g.
146//
147// x = vnl_adjugate_fixed(A) * b;
148//
149// Note that this function is inlined (except for the call to vnl_det()),
150// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
151// since that one is using svd.
152//
153// \relatesalso vnl_matrix
154
155template <typename T>
156vnl_matrix<T>
157vnl_adjugate_asfixed(const vnl_matrix<T> & m)
158{
159 assert(m.rows() == m.columns());
160 assert(m.rows() <= 4);
161 if (m.rows() == 1)
162 return vnl_matrix<T>(1, 1, T(1) / m(0, 0));
163 else if (m.rows() == 2)
164 return vnl_adjugate(vnl_matrix_fixed<T, 2, 2>(m)).as_ref();
165 else if (m.rows() == 3)
166 return vnl_adjugate(vnl_matrix_fixed<T, 3, 3>(m)).as_ref();
167 else
168 return vnl_adjugate(vnl_matrix_fixed<T, 4, 4>(m)).as_ref();
169}
170
171//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
172// This allows you to write e.g.
173//
174// x = vnl_cofactor(A) * b;
175//
176// Note that this function is inlined (except for the call to vnl_det()),
177// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
178// since that one is using svd. This is also faster than using
179//
180// x = vnl_adjugate_fixed(A).transpose() * b;
181//
182// \relatesalso vnl_matrix_fixed
183
184template <typename T>
185vnl_matrix_fixed<T, 1, 1>
186vnl_cofactor(const vnl_matrix_fixed<T, 1, 1> & m)
187{
188 return vnl_matrix_fixed<T, 1, 1>(T(1) / m(0, 0));
189}
190
191//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
192// This allows you to write e.g.
193//
194// x = vnl_cofactor(A) * b;
195//
196// Note that this function is inlined (except for the call to vnl_det()),
197// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
198// since that one is using svd. This is also faster than using
199//
200// x = vnl_adjugate_fixed(A).transpose() * b;
201//
202// \relatesalso vnl_matrix_fixed
203
204template <typename T>
205vnl_matrix_fixed<T, 2, 2>
206vnl_cofactor(const vnl_matrix_fixed<T, 2, 2> & m)
207{
208
209 T d[4];
210 d[0] = m(1, 1);
211 d[2] = -m(0, 1);
212 d[3] = m(0, 0);
213 d[1] = -m(1, 0);
214 return vnl_matrix_fixed<T, 2, 2>(d);
215}
216
217//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
218// This allows you to write e.g.
219//
220// x = vnl_cofactor(A) * b;
221//
222// Note that this function is inlined (except for the call to vnl_det()),
223// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
224// since that one is using svd. This is also faster than using
225//
226// x = vnl_adjugate_fixed(A).transpose() * b;
227//
228// \relatesalso vnl_matrix_fixed
229
230template <typename T>
231vnl_matrix_fixed<T, 3, 3>
232vnl_cofactor(const vnl_matrix_fixed<T, 3, 3> & m)
233{
234
235 T d[9];
236 d[0] = (m(1, 1) * m(2, 2) - m(1, 2) * m(2, 1));
237 d[3] = (m(2, 1) * m(0, 2) - m(2, 2) * m(0, 1));
238 d[6] = (m(0, 1) * m(1, 2) - m(0, 2) * m(1, 1));
239 d[1] = (m(1, 2) * m(2, 0) - m(1, 0) * m(2, 2));
240 d[4] = (m(0, 0) * m(2, 2) - m(0, 2) * m(2, 0));
241 d[7] = (m(1, 0) * m(0, 2) - m(1, 2) * m(0, 0));
242 d[2] = (m(1, 0) * m(2, 1) - m(1, 1) * m(2, 0));
243 d[5] = (m(0, 1) * m(2, 0) - m(0, 0) * m(2, 1));
244 d[8] = (m(0, 0) * m(1, 1) - m(0, 1) * m(1, 0));
245 return vnl_matrix_fixed<T, 3, 3>(d);
246}
247
248//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
249// This allows you to write e.g.
250//
251// x = vnl_cofactor(A) * b;
252//
253// Note that this function is inlined (except for the call to vnl_det()),
254// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
255// since that one is using svd. This is also faster than using
256//
257// x = vnl_adjugate_fixed(A).transpose() * b;
258//
259// \relatesalso vnl_matrix_fixed
260
261template <typename T>
262vnl_matrix_fixed<T, 4, 4>
263vnl_cofactor(const vnl_matrix_fixed<T, 4, 4> & m)
264{
265 T d[16];
266 d[0] = m(1, 1) * m(2, 2) * m(3, 3) - m(1, 1) * m(2, 3) * m(3, 2) - m(2, 1) * m(1, 2) * m(3, 3) +
267 m(2, 1) * m(1, 3) * m(3, 2) + m(3, 1) * m(1, 2) * m(2, 3) - m(3, 1) * m(1, 3) * m(2, 2);
268 d[4] = -m(0, 1) * m(2, 2) * m(3, 3) + m(0, 1) * m(2, 3) * m(3, 2) + m(2, 1) * m(0, 2) * m(3, 3) -
269 m(2, 1) * m(0, 3) * m(3, 2) - m(3, 1) * m(0, 2) * m(2, 3) + m(3, 1) * m(0, 3) * m(2, 2);
270 d[8] = m(0, 1) * m(1, 2) * m(3, 3) - m(0, 1) * m(1, 3) * m(3, 2) - m(1, 1) * m(0, 2) * m(3, 3) +
271 m(1, 1) * m(0, 3) * m(3, 2) + m(3, 1) * m(0, 2) * m(1, 3) - m(3, 1) * m(0, 3) * m(1, 2);
272 d[12] = -m(0, 1) * m(1, 2) * m(2, 3) + m(0, 1) * m(1, 3) * m(2, 2) + m(1, 1) * m(0, 2) * m(2, 3) -
273 m(1, 1) * m(0, 3) * m(2, 2) - m(2, 1) * m(0, 2) * m(1, 3) + m(2, 1) * m(0, 3) * m(1, 2);
274 d[1] = -m(1, 0) * m(2, 2) * m(3, 3) + m(1, 0) * m(2, 3) * m(3, 2) + m(2, 0) * m(1, 2) * m(3, 3) -
275 m(2, 0) * m(1, 3) * m(3, 2) - m(3, 0) * m(1, 2) * m(2, 3) + m(3, 0) * m(1, 3) * m(2, 2);
276 d[5] = m(0, 0) * m(2, 2) * m(3, 3) - m(0, 0) * m(2, 3) * m(3, 2) - m(2, 0) * m(0, 2) * m(3, 3) +
277 m(2, 0) * m(0, 3) * m(3, 2) + m(3, 0) * m(0, 2) * m(2, 3) - m(3, 0) * m(0, 3) * m(2, 2);
278 d[9] = -m(0, 0) * m(1, 2) * m(3, 3) + m(0, 0) * m(1, 3) * m(3, 2) + m(1, 0) * m(0, 2) * m(3, 3) -
279 m(1, 0) * m(0, 3) * m(3, 2) - m(3, 0) * m(0, 2) * m(1, 3) + m(3, 0) * m(0, 3) * m(1, 2);
280 d[13] = m(0, 0) * m(1, 2) * m(2, 3) - m(0, 0) * m(1, 3) * m(2, 2) - m(1, 0) * m(0, 2) * m(2, 3) +
281 m(1, 0) * m(0, 3) * m(2, 2) + m(2, 0) * m(0, 2) * m(1, 3) - m(2, 0) * m(0, 3) * m(1, 2);
282 d[2] = m(1, 0) * m(2, 1) * m(3, 3) - m(1, 0) * m(2, 3) * m(3, 1) - m(2, 0) * m(1, 1) * m(3, 3) +
283 m(2, 0) * m(1, 3) * m(3, 1) + m(3, 0) * m(1, 1) * m(2, 3) - m(3, 0) * m(1, 3) * m(2, 1);
284 d[6] = -m(0, 0) * m(2, 1) * m(3, 3) + m(0, 0) * m(2, 3) * m(3, 1) + m(2, 0) * m(0, 1) * m(3, 3) -
285 m(2, 0) * m(0, 3) * m(3, 1) - m(3, 0) * m(0, 1) * m(2, 3) + m(3, 0) * m(0, 3) * m(2, 1);
286 d[10] = m(0, 0) * m(1, 1) * m(3, 3) - m(0, 0) * m(1, 3) * m(3, 1) - m(1, 0) * m(0, 1) * m(3, 3) +
287 m(1, 0) * m(0, 3) * m(3, 1) + m(3, 0) * m(0, 1) * m(1, 3) - m(3, 0) * m(0, 3) * m(1, 1);
288 d[14] = -m(0, 0) * m(1, 1) * m(2, 3) + m(0, 0) * m(1, 3) * m(2, 1) + m(1, 0) * m(0, 1) * m(2, 3) -
289 m(1, 0) * m(0, 3) * m(2, 1) - m(2, 0) * m(0, 1) * m(1, 3) + m(2, 0) * m(0, 3) * m(1, 1);
290 d[3] = -m(1, 0) * m(2, 1) * m(3, 2) + m(1, 0) * m(2, 2) * m(3, 1) + m(2, 0) * m(1, 1) * m(3, 2) -
291 m(2, 0) * m(1, 2) * m(3, 1) - m(3, 0) * m(1, 1) * m(2, 2) + m(3, 0) * m(1, 2) * m(2, 1);
292 d[7] = m(0, 0) * m(2, 1) * m(3, 2) - m(0, 0) * m(2, 2) * m(3, 1) - m(2, 0) * m(0, 1) * m(3, 2) +
293 m(2, 0) * m(0, 2) * m(3, 1) + m(3, 0) * m(0, 1) * m(2, 2) - m(3, 0) * m(0, 2) * m(2, 1);
294 d[11] = -m(0, 0) * m(1, 1) * m(3, 2) + m(0, 0) * m(1, 2) * m(3, 1) + m(1, 0) * m(0, 1) * m(3, 2) -
295 m(1, 0) * m(0, 2) * m(3, 1) - m(3, 0) * m(0, 1) * m(1, 2) + m(3, 0) * m(0, 2) * m(1, 1);
296 d[15] = m(0, 0) * m(1, 1) * m(2, 2) - m(0, 0) * m(1, 2) * m(2, 1) - m(1, 0) * m(0, 1) * m(2, 2) +
297 m(1, 0) * m(0, 2) * m(2, 1) + m(2, 0) * m(0, 1) * m(1, 2) - m(2, 0) * m(0, 2) * m(1, 1);
298 return vnl_matrix_fixed<T, 4, 4>(d);
299}
300
301//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
302// This allows you to write e.g.
303//
304// x = vnl_cofactor(A) * b;
305//
306// Note that this function is inlined (except for the call to vnl_det()),
307// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
308// since that one is using svd. This is also faster than using
309//
310// x = vnl_adjugate_fixed(A).transpose() * b;
311//
312// \relatesalso vnl_matrix
313
314template <typename T>
315vnl_matrix<T>
316vnl_cofactor(const vnl_matrix<T> & m)
317{
318 assert(m.rows() == m.columns());
319 assert(m.rows() <= 4);
320 if (m.rows() == 1)
321 return vnl_matrix<T>(1, 1, T(1) / m(0, 0));
322 else if (m.rows() == 2)
323 return vnl_cofactor(vnl_matrix_fixed<T, 2, 2>(m)).as_ref();
324 else if (m.rows() == 3)
325 return vnl_cofactor(vnl_matrix_fixed<T, 3, 3>(m)).as_ref();
326 else
327 return vnl_cofactor(vnl_matrix_fixed<T, 4, 4>(m)).as_ref();
328}
329
330
331template <typename T>
332vnl_vector_fixed<T, 3>
333vnl_cofactor_row1(const vnl_vector_fixed<T, 3> & row2, const vnl_vector_fixed<T, 3> & row3)
334{
335 T d[3];
336 d[0] = (row2[1] * row3[2] - row2[2] * row3[1]);
337 d[1] = (row2[2] * row3[0] - row2[0] * row3[2]);
338 d[2] = (row2[0] * row3[1] - row2[1] * row3[0]);
339 return vnl_vector_fixed<T, 3>(d);
340}
341
342#endif // vnl_adjugate_fixed_h_
vnl_matrix< T > vnl_adjugate_asfixed(const vnl_matrix< T > &m)
vnl_matrix_fixed< T, 1, 1 > vnl_cofactor(const vnl_matrix_fixed< T, 1, 1 > &m)
vnl_matrix_fixed< T, 1, 1 > vnl_adjugate(const vnl_matrix_fixed< T, 1, 1 > &m)
vnl_vector_fixed< T, 3 > vnl_cofactor_row1(const vnl_vector_fixed< T, 3 > &row2, const vnl_vector_fixed< T, 3 > &row3)


Generated on 1774142652 for elastix by doxygen 1.15.0 elastix logo