#
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib as mp
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
import laUtilities as ut
import slideUtilities as sl
import demoUtilities as dm
from matplotlib import animation
from importlib import reload
from datetime import datetime
from IPython.display import Image, display_html, display, Math, HTML;
qr_setting = None
mp.rcParams['animation.html'] = 'jshtml';
Homework 6 is due Friday, 3/24 at 8pm
Remember to submit a readable file and to cite all people you collaborated with + sources you used
Upcoming office hours
Weekly reading and viewing assignments
Definition. A subspace is any set in that has three properties:
Another way of stating properties 2 and 3 is that is closed under addition and scalar multiplication.
#
fig = ut.three_d_figure((20, 1), fig_desc = 'Span{v1, v2}, a subspace.',
xmin = -10, xmax = 10, ymin = -10, ymax = 10, zmin = -10, zmax = 10, qr = qr_setting)
a1 = [-8.0, 8.0, 5.0]
a2 = [3.0, 2.0, -2.0]
a3 = 2.5 * np.array(a2)
fig.text(a1[0]+.5, a1[1]+.5, a1[2]+.5, r'$\bf v_1$', 'v_1', size=20)
fig.text(a3[0]+.5, a3[1]+.5, a3[2]+.5, r'$\bf v_2$', 'v_2', size=20)
fig.plotSpan(a1, a2,'Green')
fig.plotPoint(a1[0], a1[1], a1[2],'r')
fig.plotPoint(a3[0], a3[1], a3[2],'r')
# fig.plotPoint(a3[0], a3[1], a3[2],'r')
#fig.text(a1[0], a1[1], a1[2], r'$\bf v_1$', 'a_1', size=20)
fig.text(0.1, 0.1, -3, r'$\bf 0$', '0', size=12)
fig.plotPoint(0, 0, 0, 'b')
fig.set_title(r'Span{$v_1, v_2$}, a subspace.')
fig.text(9, -9, -7, r'$\bf H$', 'H', size = 20)
img = fig.save()
Definition. A basis for a subspace of is a minimally-small linearly independent set in that spans
#
fig = ut.three_d_figure((20, 5), fig_desc = 'Bases of H are {a1, a2} or {a1, a3}.',
xmin = -10, xmax = 10, ymin = -10, ymax = 10, zmin = -10, zmax = 10, qr = qr_setting)
a1 = [-8.0, 8.0, 5.0]
a2 = [3.0, 2.0, -2.0]
a3 = 2.5 * np.array(a2)
fig.text(a1[0]+.5, a1[1]+.5, a1[2]+.5, r'$\bf a_1$', 'a_1', size=18)
fig.text(a3[0]+.5, a3[1]+.5, a3[2]+.5, r'$\bf a_3$', 'a_3', size=18)
fig.text(a2[0]+.5, a2[1]+.5, a2[2]+.5, r'$\bf a_2$', 'a_2', size=18)
fig.plotSpan(a1, a2,'Green')
fig.plotPoint(a1[0], a1[1], a1[2],'r')
fig.plotPoint(a3[0], a3[1], a3[2],'r')
fig.plotPoint(a2[0], a2[1], a2[2],'r')
fig.plotLine([[0, 0, 0], a3], 'r', '--')
fig.plotLine([[0, 0, 0], a1], 'r', '--')
# fig.plotPoint(a3[0], a3[1], a3[2],'r')
fig.text(0.1, 0.1, -3, r'$\bf 0$', '0', size=12)
fig.plotPoint(0, 0, 0, 'b')
fig.set_title(r'Bases of $H$ are $\{{\bf a}_1, {\bf a}_2\}$ or $\{{\bf a}_1, {\bf a}_3\}$')
fig.text(9, -9, -7, r'H', 'H', size = 16)
img = fig.save()
So in the example above, a basis for could be:
or
However, is not a basis for .
And furthermore, is not a basis for .
Definition. The dimension of a nonzero subspace denoted by is the number of vectors in any basis for
The dimension of the zero subspace is defined to be zero.
Definition. The column space of a matrix is the set of all linear combinations of the columns of .
If = , with columns in then is the same as Span, and hence it is a subspace.
The column space of an matrix is a subspace of
In particular, note that equals only when the columns of span Otherwise, is only part of
Definition. The null space of a matrix is the set of all solutions of the homogeneous equation
In other words: the null space of is the set of all vectors that are mapped to the zero vector by . When has columns, a solution of is a vector in So is a subspace of
We can construct a basis for by finding a parametric description of the solution of .
We looked at the following example:
We constructed an explicit description of the null space of this matrix, as where , , and are linearly independent.
That is, is the subspace spanned by
Each basis vector corresponds to a free variable in the equation Since this basis contains 3 vectors, the dimension of 's null space (ie, ) is 3.
In general, to find the dimension of simply identify and count the number of free variables in
To find a basis for the column space, we have an easier starting point.
We know that the column space is the span of the matrix columns.
So, we can choose matrix columns to make up the basis.
The question is: which columns should we choose?
Warmup.
We start with a warmup example.
Suppose we have a matrix that happens to be in reduced echelon form:
Denote the columns of by .
Note that and
So any combination of is actually just a combination of and
So spans .
Also, and are linearly independent, because they are columns from an identity matrix.
So: the pivot columns of form a basis for
So, for matrices in reduced row echelon form, we have a simple rule for the basis of the column space:
Choose the columns that hold the pivots.
The general case.
Now I'll show that the pivot columns of form a basis for for any .
Consider the case where for some nonzero This says that there is a linear dependence relation between some of the columns of .
When we row-reduce to its reduced echelon form , the columns are changed, but the equations and have the same solution set.
So this means that the columns of and the columns of have exactly the same dependence relationships as the columns of . Specifically:
If some column of can be written as a combination of other columns of , then the same is true of the corresponding columns of .
If no combination of certain columns of yields the zero vector, then no combination of corresponding columns of yields the zero vector.
In other words:
If some set of columns of spans the column space of , then the same columns of span the column space of .
If some set of columns of are linearly independent, then the same columns of are linearly independent.
So, if some columns of are a basis for , then the corresponding columns of are a basis for .
Example. Consider the matrix :
To find its basis, we simply need to look at the basis for its reduced row echelon form.
It turns out that is row equivalent to the matrix that we considered above. For matrix , recall that:
Therefore we can immediately conclude that a basis for is 's columns 1, 2, and 5.
So a basis for is:
Theorem. The pivot columns of a matrix form a basis for the column space of .
Be careful here -- note that
Definition. The rank of a matrix, denoted by is the dimension of the column space of .
Since the pivot columns of form a basis for the rank of is just the number of pivot columns in . This is also equal to the number of basic variables in (hopefully now the term makes sense!)
Example. Determine the rank of the matrix
Solution Reduce to an echelon form:
The matrix has 3 pivot columns, so
Theorem. If a matrix has columns, then .
[This lecture is based on Prof. Crovella's CS 132 lecture notes and Fast.ai Lecture 8.]
Today's lecture will explore the geometry of vectors and matrices. We will explore the concept of orthogonality of two vectors in more detail. This will lead to our second type of matrix factorization, called QR factorization.
To begin, let's look at the geometry of space in a new way, based on the concept of a basis that we defined last time.
A geometric coordinate system gives a unique name to each point in the plane (or in any vector space).
Traditionally, we label points using the standard basis. Remember that the "standard basis" refers to the columns of the identity matrix. For instance in the standard basis vectors are:
#
ax = dm.plotSetup(-3,3,-3,3,size=(6,6))
ax.plot([0],[1],'ro',markersize=8)
ax.arrow(0, 0, 1, 0, head_width=0.2, head_length=0.2, length_includes_head = True)
ax.arrow(0, 0, 0, 1, head_width=0.2, head_length=0.2, length_includes_head = True)
ax.text(0.25,1,'(0,1)',size=20)
ax.plot([1],[0],'ro',markersize=8)
ax.text(1.25,0.25,'(1,0)',size=20)
ax.plot([-1],[2],'ro',markersize=8)
ax.text(-1.5,2.25,'(-1,2)',size=20);
In the last lecture we developed the idea of a basis -- a minimal spanning set for a subspace . What happens if we label points using a different basis? Perhaps even a basis whose vectors aren't perpendicular?
Today we'll show that:
a basis provides a coordinate system for .
Theorem. If we are given a basis for , then each vector in can be written in only one way as a linear combination of the basis vectors.
Proof. Suppose is a basis for and suppose a vector in can be generated in two ways, say
Then, subtracting gives
Now, since is a basis, we know that the vectors are linearly independent.
So by the definition of linear independence, the weights in the above expression must all be zero. That is, for all .
As a result, the two representations must be the same.
Definition. Suppose the set is a basis for the subspace .
For each in , the coordinates of relative to the basis are the weights such that .
The vector in
is called the coordinate vector of (relative to ) or the -coordinate vector of .
Example. In , let's look at the point . We will plot this point in the standard basis and also relative to a new basis:
# standard basis
xmin = -6.0
xmax = 6.0
ymin = -2.0
ymax = 8.0
b0 = [1, 0]
b1 = [1, 2]
fig = ut.two_d_figure('Dummylabel', xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax, size=(6,5))
for x in np.linspace(ymin, ymax, int(ymax-ymin+1)):
fig.plotLinEqn(0., 1., x, alpha=0.3)
for y in np.linspace(xmin, xmax, int(xmax-xmin+1)):
fig.plotLinEqn(1., 0., y, alpha=0.3)
fig.plotLinEqn(1., 0, 0, color = 'k')
fig.plotLinEqn(0, 1, 0, color = 'k')
fig.plotPoint(0, 0, 'k')
fig.ax.text(0+.1, 0-.1, r'$\bf 0$', size = 12, horizontalalignment='left', verticalalignment='top')
fig.plotPoint(1, 0, 'r')
fig.ax.text(1+.1, 0-.1, r'${\bf e}_1$', size = 12, horizontalalignment='left', verticalalignment='top')
fig.plotPoint(0, 1, 'r')
fig.ax.text(0+.1, 1-.1, r'${\bf e}_2$', size = 12, horizontalalignment='left', verticalalignment='top')
fig.plotPoint(b1[0], b1[1], 'g')
fig.ax.text(b1[0]+.1, b1[1]-.1, r'${\bf b}_2$', size = 12, horizontalalignment='left', verticalalignment='top')
fig.plotPoint(1, 6, 'b')
fig.ax.text(1+.1, 6-.1, r'${\bf x}$', size = 12, horizontalalignment='left', verticalalignment='top')
fig.ax.axis('off')
fig.ax.set_title(r'Standard Basis. $\mathbf{x}$ = (1, 6)', size = 16);
# B-basis
fig = ut.two_d_figure('Dummylabel', xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax, size=(6,5))
m = b1[1]/b1[0]
upper_intercept = ymax - m * xmin
upper_intercept = b1[1] * np.ceil(upper_intercept / b1[1])
lower_intercept = ymin - m * xmax
lower_intercept = b1[1] * np.floor(lower_intercept / b1[1])
for yint in np.linspace(lower_intercept, upper_intercept, int((upper_intercept-lower_intercept)/b1[1])+1):
fig.plotLinEqn(-b1[1], b1[0], yint, color = 'g', alpha=0.3)
for y in np.linspace(ymin, ymax, int(((ymax-ymin)/b1[1])+1)):
fig.plotLinEqn(0., 1., y, color = 'g', alpha=0.3)
fig.plotLinEqn(b1[1], -b1[0], 0, color = 'k')
fig.plotLinEqn(b0[1], -b0[0], 0, color = 'k')
fig.plotPoint(0, 0, 'k')
fig.ax.text(0+.1, 0-.1, r'$\bf 0$', size = 12, horizontalalignment='left', verticalalignment='top')
fig.plotPoint(1, 0, 'g')
fig.ax.text(1+.1, 0-.1, r'${\bf b}_1$', size = 12, horizontalalignment='left', verticalalignment='top')
fig.plotPoint(b1[0], b1[1], 'g')
fig.ax.text(b1[0]+.1, b1[1]-.1, r'${\bf b}_2$', size = 12, horizontalalignment='left', verticalalignment='top')
fig.plotPoint(1, 6, 'b')
fig.ax.text(1+.1, 6-.1, r'${\bf x}$', size = 12, horizontalalignment='left', verticalalignment='top')
fig.ax.axis('off')
fig.ax.set_title('B-Basis. $[\mathbf{x}]_\mathcal{B}$ = (-2, 3)', size = 16);
Notice that the location of relative to the origin does not change.
However, using the -basis, it has different coordinates. The new coordinates are .
Suppose we are given a particular basis. How do we find the coordinates of a point in that basis?
Let's consider a specific example.
Let and
Then is a basis for = Span because and are linearly independent.
Problem: Determine if is in , and if it is, find the coordinate vector of relative to
Solution. If is in then the following vector equation is consistent:
The scalars and if they exist, are the -coordinates of
Row operations show that
The reduced row echelon form shows that the system is consistent, so is in .
Furthermore, it shows that and
so
In this example, the basis determines a coordinate system on , which can be visualized like this:
#
fig = ut.three_d_figure((22, 1), fig_desc = 'Basis-B coordinate system on the subspace H', figsize = (8, 8),
xmin = -1, xmax = 10, ymin = -1, ymax = 10, zmin = -1, zmax = 14, qr = qr_setting)
v = [3.0, 6, 2]
u = [-1, 0, 1]
#
v = np.array([2, 0.5, 0.5])
u = np.array([1., 2, 1])
fig.text(v[0], v[1]-0.75, v[2]-2, r'$\bf v_1$', 'v1', size=16)
fig.text(u[0]-0.5, u[1], u[2]+2, r'$\bf v_2$', 'v2', size=16)
fig.text(3*u[0]+2*v[0], 3*u[1]+2*v[1], 3*u[2]+2*v[2]+1, r'$\bf 2v_1+3v_2$', '2 v1 + 3 v2', size=16)
# plotting the span of v
fig.plotSpan(u, v, 'Green')
# blue grid lines
fig.plotPoint(0, 0, 0, 'y')
fig.plotPoint(u[0], u[1], u[2], 'b')
fig.plotPoint(2*u[0], 2*u[1], 2*u[2],'b')
fig.plotPoint(3*u[0], 3*u[1], 3*u[2], 'b')
fig.plotLine([[0, 0, 0], 4*u], color='b')
fig.plotLine([v, v+4*u], color='b')
fig.plotLine([2*v, 2*v+3*u], color='b')
fig.plotLine([3*v, 3*v+2.5*u], color='b')
# red grid lines
fig.plotPoint(v[0], v[1], v[2], 'r')
fig.plotPoint(2*v[0], 2*v[1], 2*v[2], 'r')
fig.plotPoint(3*v[0], 3*v[1], 3*v[2], 'r')
fig.plotLine([[0, 0, 0], 3.5*v], color='r')
fig.plotLine([u, u+3.5*v], color='r')
fig.plotLine([2*u, 2*u+3.5*v], color='r')
fig.plotLine([3*u, 3*u+2*v], color='r')
#
fig.plotPoint(3*u[0]+2*v[0], 3*u[1]+2*v[1], 3*u[2]+2*v[2], color='m')
# plotting the axes
fig.plotIntersection([0,0,1,0], [0,1,0,0], color='Black')
fig.plotIntersection([0,0,1,0], [1,0,0,0], color='Black')
fig.plotIntersection([0,1,0,0], [1,0,0,0], color='Black')
fig.set_title('Basis-B coordinate system on the subspace H', size=16)
fig.save()
Important: Don't be confused by the fact that the "coordinate axes" are not perpendicular.
The whole idea here is that they don't need to be.
As long as the independent vectors span the space, there is only one way to express any point in terms of them.
Thus, every point has a unique coordinate.
Another important idea is that, although points in are in , they are completely determined by their coordinate vectors, which belong to
That is, the grid in the previous figure makes "look like"
The correspondence is one-to-one correspondence between and that preserves linear combinations.
In our example, .
Definition. When we have a one-to-one correspondence between two subspaces that preserves linear combinations, we call such a correspondence an isomorphism, and we say that is isomorphic to
In general, if is a basis for , then the mapping is a one-to-one correspondence that makes look and act the same as
This is even through the vectors in themselves may have more than entries.
Today we deepen our study of geometry. We take on more challenging geometric notions that bring in sets of vectors and subspaces. Within this realm, we will focus on orthogonality and a new notion called projection.
First of all, today we'll study the properties of sets of orthogonal vectors. These can be very useful.
Definition. A set of vectors in is said to be an orthogonal set if each pair of distinct vectors from the set is orthogonal, i.e.,
Example. Show that is an orthogonal set, where
Solution. Take the inner product of each pair of distinct vectors.
Each pair of distinct vectors is orthogonal, and so is an orthogonal set.
In three-space they describe three lines that we say are mutually perpendicular.
#
fig = ut.three_d_figure((22, 2), fig_desc = 'An orthogonal set of vectors',
xmin = -3, xmax = 3, ymin = -3, ymax = 3, zmin = -3, zmax = 3,
figsize = (12, 8), qr = qr_setting)
u1 = np.array([3, 1, 1])
u2 = np.array([-1, 2, 1])
u3 = np.array([-1/2, -2, 7/2])
origin = np.array([0, 0, 0])
fig.plotLine([origin, u1], 'r', '--')
fig.plotPoint(u1[0], u1[1], u1[2], 'r')
fig.text(u1[0]+.1, u1[1]+.1, u1[2]+.1, r'$\bf u_1$', 'u1', size=16, color='k')
fig.plotLine([origin, u2], 'r', '--')
fig.plotPoint(u2[0], u2[1], u2[2], 'r')
fig.text(u2[0]+.1, u2[1]+.1, u2[2]+.1, r'$\bf u_2$', 'u2', size=16, color='k')
fig.plotLine([origin, u3], 'r', '--')
fig.plotPoint(u3[0], u3[1], u3[2], 'r')
fig.text(u3[0]+.1, u3[1]+.1, u3[2]+.1, r'$\bf u_3$', 'u3', size=16, color = 'k')
fig.text(origin[0]-.45, origin[1]-.45, origin[2]-.45, r'$\bf 0$', 0, size = 16)
fig.plotPerpSym(origin, u1, u2, 0.5)
fig.plotPerpSym(origin, u3, u2, 0.5)
fig.plotPerpSym(origin, u3, u1, 0.5)
fig.set_title(r'An orthogonal set of vectors in $\mathbb{R}^3$', 'An orthogonal set of vectors in R3', size = 16)
fig.save();
Orthogonal sets are very nice to work with. First of all, we will show that any orthogonal set must be linearly independent.
Theorem. If is an orthogonal set of nonzero vectors in then is linearly independent.
Proof. We will prove that there is no linear combination of the vectors in with nonzero coefficients that yields the zero vector.
It is easier to prove the contrapositive. The contrapositive of "if A then B" is "if not-B then not-A".
That is: our proof strategy will be to show that for any linear combination of the vectors in ,
if the combination is the zero vector, then all coefficients of the combination must be zero.
Specifically: assume for some scalars . Then:
Because is orthogonal to :
Since is nonzero, the inner product is not zero and so .
We can use the same kind of reasoning to show that, must be zero.
In other words, there is no nonzero combination of 's that yields the zero vector ...
... so is linearly independent.
Notice that since is a linearly independent set, it is a basis for the subspace spanned by .
This leads us to a new kind of basis.
Definition. An orthogonal basis for a subspace of is a basis for that is also an orthogonal set.
For example, consider
Note that Hence they form an orthogonal basis for their span.
Here is the subspace :
#
fig = ut.three_d_figure((22, 3), fig_desc = 'Orthogonal Basis on the subspace H', figsize = (8, 8),
xmin = -2, xmax = 10, ymin = -1, ymax = 10, zmin = -1, zmax = 10, qr = qr_setting)
v = 1/2 * np.array([-1, 4, 2])
u = 1/3 * np.array([8, 1, 2])
vpos = v + 0.4 * v - 0.5 * u
upos = u - 0.5 * v + 0.15 * u
fig.text(vpos[0], vpos[1], vpos[2], r'$\bf u_2$', 'v', size=16)
fig.text(upos[0], upos[1], upos[2], r'$\bf u_1$', 'u', size=16)
# fig.text(3*u[0]+2*v[0], 3*u[1]+2*v[1], 3*u[2]+2*v[2]+1, r'$\bf 2v_1+3v_2$', '2 v1 + 3 v2', size=16)
# plotting the span of v
fig.plotSpan(u, v, 'Green')
# blue grid lines
fig.plotPoint(0, 0, 0, 'y')
fig.plotPoint(u[0], u[1], u[2], 'b')
fig.plotPoint(2*u[0], 2*u[1], 2*u[2],'b')
fig.plotPoint(3*u[0], 3*u[1], 3*u[2], 'b')
fig.plotLine([[0, 0, 0], 4*u], color='b')
fig.plotLine([v, v+4*u], color='b')
fig.plotLine([2*v, 2*v+3*u], color='b')
fig.plotLine([3*v, 3*v+2.5*u], color='b')
# red grid lines
fig.plotPoint(v[0], v[1], v[2], 'r')
fig.plotPoint(2*v[0], 2*v[1], 2*v[2], 'r')
fig.plotPoint(3*v[0], 3*v[1], 3*v[2], 'r')
fig.plotLine([[0, 0, 0], 3.5*v], color='r')
fig.plotLine([u, u+3.5*v], color='r')
fig.plotLine([2*u, 2*u+3.5*v], color='r')
fig.plotLine([3*u, 3*u+2*v], color='r')
#
# fig.plotPoint(3*u[0]+2*v[0], 3*u[1]+2*v[1], 3*u[2]+2*v[2], color='m')
# plotting the axes
#fig.plotIntersection([0,0,1,0], [0,1,0,0], color='Black')
#fig.plotIntersection([0,0,1,0], [1,0,0,0], color='Black')
#fig.plotIntersection([0,1,0,0], [1,0,0,0], color='Black')
#
fig.plotPerpSym(np.array([0, 0, 0]), v, u, 1)
fig.plotPerpSym(u, v+u, u+u, 1)
fig.plotPerpSym(2*u, v+2*u, 3*u, 1)
#
fig.plotPerpSym(np.array([0, 0, 0])+v, 2*v, v+u, 1)
fig.plotPerpSym(u+v, 2*v+u, v+2*u, 1)
#
fig.set_title(r'Orthogonal Basis on the subspace $H$', 'Orthogonal Basis on the subspace H', size=16)
fig.save()
We have seen that for any subspace, there may be many different sets of vectors that can serve as a basis for .
For example, let's say we have a basis
We know that to compute the coordinates of in this basis, denoted , we need to solve the linear system:
or
In general, we'd need to perform Gaussian Elimination, or matrix inversion, or some other computationally slow method to do this.
However an orthogonal basis is a particularly nice basis, because the coordinates of any point can be computed easily and simply. Let's see how.
Theorem. Let be an orthogonal basis for a subspace of . For each in , the weights of the linear combination
are given by
Proof. Let's consider the inner product of and one of the vectors --- say, .
As we saw in the last proof, the orthogonality of means that
Since is not zero (why?), the equation above can be solved for
To find any other compute and solve for .
Example. The set that we saw earlier, i.e.,
is an orthogonal basis for
Let's express the vector as a linear combination of the vectors in .
That is, find 's coordinates in the basis --- i.e., in the coordinate system .
#
fig = ut.three_d_figure((22, 4), fig_desc = 'y in an orthogonal basis',
xmin = -3, xmax = 7, ymin = -5, ymax = 5, zmin = -8, zmax = 4,
figsize = (12, 8), qr = qr_setting, equalAxes = False)
u1 = np.array([3, 1, 1])
u2 = np.array([-1, 2, 1])
u3 = np.array([-1/2, -2, 7/2])
origin = np.array([0, 0, 0])
#
fig.plotLine([origin, u1], 'r', '--')
fig.plotPoint(u1[0], u1[1], u1[2], 'r')
fig.text(u1[0]+.1, u1[1]+.1, u1[2]+.1, r'$\bf u_1$', 'u1', size=16, color='k')
#
fig.plotLine([origin, u2], 'r', '--')
fig.plotPoint(u2[0], u2[1], u2[2], 'r')
fig.text(u2[0]+.1, u2[1]+.1, u2[2]+.1, r'$\bf u_2$', 'u2', size=16, color='k')
#
fig.plotLine([origin, u3], 'r', '--')
fig.plotPoint(u3[0], u3[1], u3[2], 'r')
fig.text(u3[0]+.1, u3[1]+.1, u3[2]+.1, r'$\bf u_3$', 'u3', size=16, color = 'k')
#
fig.text(origin[0]-.45, origin[1]-.45, origin[2]-.45, r'$\bf 0$', 0, size = 16)
#
fig.plotPerpSym(origin, u1, u2, 0.5)
fig.plotPerpSym(origin, u3, u2, 0.5)
fig.plotPerpSym(origin, u3, u1, 0.5)
#
y = u1 - 2 * u2 - 2 * u3
# print(y)
fig.plotPoint(y[0], y[1], y[2], 'b')
fig.text(y[0]-2, y[1]+.1, y[2]+.1, r'$\bf y$ = (6, 1, -8)', 'y = (6, 1, -8)', size=16, color = 'b')
fig.text(y[0]-2, y[1]+.1, y[2]-2.5, r'${\bf y} = 1{\bf u}_1 -2 {\bf u}_2 -2 {\bf u}_3$', 'y = (6, 1, -8)', size=16, color = 'b')
#
fig.set_title(r'${\bf y}$ in an Orthogonal Basis', 'y in an Orthogonal Basis', size = 16)
fig.save();
Solution. Compute
So
As a result, .
Note how much simpler it is finding the coordinates of in the orthogonal basis because: